Skip to content

Commit 723a249

Browse files
Merge branch 'master' into slides
2 parents 2ea3503 + 46f46de commit 723a249

83 files changed

Lines changed: 1314 additions & 379 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

README.md

Lines changed: 23 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -33,21 +33,38 @@ git clone git@github.com:jonathanrocher/ets_tutorial.git
3333

3434
### EDM users (recommended)
3535
First, download and install EDM from https://www.enthought.com/edm/. Then,
36-
open a `Terminal`/`Powershell`/`Cmd Prompt`/ and navigate to the folder
37-
where the repo was cloned. Enter the following command to create a
38-
dedicated Python environment and install all dependencies in it:
36+
open a `Terminal`/`Powershell`/`Cmd Prompt`/ and create a lighweight bootstrap environment to run the installation commands.
3937
```commandline
40-
python ci build
38+
edm envs create bootstrap
39+
edm install --environment bootstrap click
40+
```
41+
Next, enter the following commands to create a
42+
dedicated Python environment called `ets_tutorial` and install all dependencies in it:
43+
```commandline
44+
edm run -e bootstrap -- python ci build --environment ets_tutorial
45+
```
46+
All application related `python` commands are assumed to run in this
47+
environment. You can activate the environment with:
48+
```commandline
49+
edm shell -e ets_tutorial
4150
```
4251

4352
### Conda users
44-
[TODO]
53+
Create a new conda environment called `ets_tutorial` and install local
54+
dependencies with the following commands:
55+
```commandline
56+
conda create -n ets_tutorial python=3.6 pandas matplotlib traits traitsui scikit-image pillow pyqt ipython importlib_resources importlib_metadata
57+
conda activate ets_tutorial
58+
python setup.py install
59+
```
60+
Activate the new environment with `conda activate ets_tutorial`.
4561

4662
### pip users
4763
Assuming a Python environment is created and activated on your machine, for
4864
example from https://www.python.org/,
4965
```commandline
5066
pip install pandas matplotlib traits traitsui scikits-image pillow pyqt5 ipython
67+
python setup.py install
5168
```
5269

5370
## Getting help
@@ -78,11 +95,11 @@ are able to reach the end goal.
7895
- step 3: GUI: first traitsUI views
7996
- step 4: pyface application: tree navigator and double-click on an image to
8097
display the traitsUI view of the image.
98+
- INTERLUDE: code structure for scalability
8199
- step 5: Fuller pyface application:
82100
- add folder editor to display a table of metadata for all images inside
83101
- add button to launch the face detection on all images
84102
- add widgets to filter images
85-
- INTERLUDE: code structure for scalability
86103
- step 6: pyface application: adding menu and branding
87104
- step 7: pyface application: advanced features [OPTIONAL]
88105
- step 8: 1-click installer

sample_images/20220121_080128.jpg

2.07 MB
Loading
2.3 MB
Loading

stage2.1_traited_script/traited_face_detect.py

Lines changed: 29 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -7,33 +7,50 @@
77
from skimage.feature import Cascade
88
import matplotlib.pyplot as plt
99
from matplotlib import patches
10-
from os.path import join
10+
from os.path import join, splitext
1111
import numpy as np
1212

1313
# ETS imports
14-
from traits.api import Dict, File, HasStrictTraits, List, observe
14+
from traits.api import (
15+
Array, cached_property, Dict, File, HasStrictTraits, List, Property
16+
)
17+
18+
SUPPORTED_FORMATS = [".png", ".jpg", ".jpeg", ".PNG", ".JPG", ".JPEG"]
1519

1620

1721
class ImageFile(HasStrictTraits):
1822
""" Model to hold an image file.
1923
"""
2024
filepath = File
2125

26+
metadata = Property(Dict, depends_on="filepath")
27+
28+
data = Property(Array, depends_on="filepath")
29+
2230
faces = List
2331

24-
metadata = Dict
32+
def _is_valid_file(self):
33+
return (
34+
bool(self.filepath) and
35+
splitext(self.filepath)[1].lower() in SUPPORTED_FORMATS
36+
)
2537

26-
def to_array(self):
38+
@cached_property
39+
def _get_data(self):
40+
if not self._is_valid_file():
41+
return np.array([])
2742
with PIL.Image.open(self.filepath) as img:
2843
return np.asarray(img)
2944

30-
@observe("filepath")
31-
def update_metadata(self, event):
32-
45+
@cached_property
46+
def _get_metadata(self):
47+
if not self._is_valid_file():
48+
return {}
3349
with PIL.Image.open(self.filepath) as img:
3450
exif = img._getexif()
35-
self.metadata = {TAGS[k]: v for k, v in exif.items()
36-
if k in TAGS}
51+
if not exif:
52+
return {}
53+
return {TAGS[k]: v for k, v in exif.items() if k in TAGS}
3754

3855
def detect_faces(self):
3956
# Load the trained file from the module root.
@@ -42,14 +59,13 @@ def detect_faces(self):
4259
# Initialize the detector cascade.
4360
detector = Cascade(trained_file)
4461

45-
detected = detector.detect_multi_scale(img=self.to_array(),
62+
detected = detector.detect_multi_scale(img=self.data,
4663
scale_factor=1.2,
4764
step_ratio=1,
4865
min_size=(60, 60),
4966
max_size=(600, 600))
5067
self.faces = detected
51-
52-
self.metadata["Number of faces detected"] = len(detected)
68+
return self.faces
5369

5470

5571
if __name__ == "__main__":
@@ -72,7 +88,7 @@ def detect_faces(self):
7288

7389
# Visualize results ---------------------------------------------------
7490

75-
plt.imshow(img.to_array())
91+
plt.imshow(img.data)
7692
img_desc = plt.gca()
7793
plt.set_cmap('gray')
7894

stage3.0_first_views/test_traited_face_detect.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,21 +17,19 @@ class TestImageFile(TestCase):
1717
def test_no_image_file(self):
1818
img = ImageFile()
1919
self.assertEqual(img.metadata, {})
20-
data = img.to_array()
21-
self.assertIsInstance(data, np.ndarray)
22-
self.assertEqual(data.shape, (0,))
20+
self.assertIsInstance(img.data, np.ndarray)
21+
self.assertEqual(img.data.shape, (0,))
2322

2423
def test_image_metadata(self):
2524
img = ImageFile(filepath=SAMPLE_IMG1)
2625
self.assertNotEqual(img.metadata, {})
2726
for key in ['ExifVersion', 'ExifImageWidth', 'ExifImageHeight']:
2827
self.assertIn(key, img.metadata.keys())
29-
data = img.to_array()
3028
expected_shape = (img.metadata['ExifImageHeight'],
3129
img.metadata['ExifImageWidth'], 3)
32-
self.assertEqual(data.shape, expected_shape)
30+
self.assertEqual(img.data.shape, expected_shape)
3331

3432
def test_detects_faces(self):
3533
img = ImageFile(filepath=SAMPLE_IMG1)
34+
img.detect_faces()
3635
self.assertEqual(len(img.faces), 5)
37-
self.assertEqual(img.metadata["Number of faces"], 5)

stage3.0_first_views/traited_face_detect.py

Lines changed: 36 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
# General imports
2+
from os.path import splitext
23
import PIL.Image
34
import matplotlib.pyplot as plt
45
import numpy as np
@@ -9,23 +10,24 @@
910

1011
# ETS imports
1112
from traits.api import (
12-
Dict,
13-
File,
14-
HasStrictTraits,
15-
List,
16-
observe,
13+
Array, cached_property, Dict, File, HasStrictTraits, List,
14+
Property
1715
)
1816
from traitsui.api import Item, OKButton, View
1917

18+
SUPPORTED_FORMATS = [".png", ".jpg", ".jpeg", ".PNG", ".JPG", ".JPEG"]
19+
2020

2121
class ImageFile(HasStrictTraits):
2222
""" Model to hold an image file.
2323
"""
2424
filepath = File
2525

26-
faces = List
26+
metadata = Property(Dict, depends_on="filepath")
27+
28+
data = Property(Array, depends_on="filepath")
2729

28-
metadata = Dict
30+
faces = List
2931

3032
traits_view = View(
3133
Item(name='filepath', show_label=False),
@@ -34,58 +36,52 @@ class ImageFile(HasStrictTraits):
3436
width=640
3537
)
3638

37-
def to_array(self):
38-
if not self.filepath:
39-
return np.array([])
39+
def _is_valid_file(self):
40+
return (
41+
bool(self.filepath) and
42+
splitext(self.filepath)[1].lower() in SUPPORTED_FORMATS
43+
)
4044

45+
@cached_property
46+
def _get_data(self):
47+
if not self._is_valid_file():
48+
return np.array([])
4149
with PIL.Image.open(self.filepath) as img:
4250
return np.asarray(img)
4351

44-
@observe("filepath")
45-
def _update_faces_and_metadata(self, event):
46-
self.metadata = {}
47-
self._update_metadata_with_exif()
48-
self._detect_faces()
49-
print(self.metadata)
50-
print(f"Number of faces: {self.metadata['Number of faces']}")
51-
52-
def _update_metadata_with_exif(self):
53-
if not self.filepath:
54-
return
52+
@cached_property
53+
def _get_metadata(self):
54+
if not self._is_valid_file():
55+
return {}
5556
with PIL.Image.open(self.filepath) as img:
5657
exif = img._getexif()
5758
if not exif:
58-
return
59-
self.metadata.update(
60-
{TAGS[k]: v for k, v in exif.items() if k in TAGS}
61-
)
59+
return {}
60+
return {TAGS[k]: v for k, v in exif.items() if k in TAGS}
6261

63-
def _detect_faces(self):
64-
self.faces = []
65-
if not self.filepath:
66-
return
62+
def detect_faces(self):
6763
# Load the trained file from the module root.
6864
trained_file = data.lbp_frontal_face_cascade_filename()
65+
6966
# Initialize the detector cascade.
7067
detector = Cascade(trained_file)
71-
faces = detector.detect_multi_scale(
72-
img=self.to_array(),
73-
scale_factor=1.2,
74-
step_ratio=1,
75-
min_size=(60, 60),
76-
max_size=(600, 600)
77-
)
78-
self.faces.extend(faces)
79-
self.metadata['Number of faces'] = len(self.faces)
68+
69+
detected = detector.detect_multi_scale(img=self.data,
70+
scale_factor=1.2,
71+
step_ratio=1,
72+
min_size=(60, 60),
73+
max_size=(600, 600))
74+
self.faces = detected
75+
return self.faces
8076

8177

8278
if __name__ == '__main__':
8379
img = ImageFile()
8480
img.configure_traits()
8581

86-
plt.imshow(img.to_array())
82+
plt.imshow(img.data)
8783
img_desc = plt.gca()
88-
for patch in img.faces:
84+
for patch in img.detect_faces():
8985
img_desc.add_patch(
9086
patches.Rectangle(
9187
(patch['c'], patch['r']),

stage3.1_first_views/test_traited_face_detect.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,24 +19,22 @@ class TestImageFile(TestCase):
1919
def test_no_image_file(self):
2020
img = ImageFile()
2121
self.assertEqual(img.metadata, {})
22-
data = img.to_array()
23-
self.assertIsInstance(data, np.ndarray)
24-
self.assertEqual(data.shape, (0,))
22+
self.assertIsInstance(img.data, np.ndarray)
23+
self.assertEqual(img.data.shape, (0,))
2524

2625
def test_image_metadata(self):
2726
img = ImageFile(filepath=SAMPLE_IMG1)
2827
self.assertNotEqual(img.metadata, {})
2928
for key in ['ExifVersion', 'ExifImageWidth', 'ExifImageHeight']:
3029
self.assertIn(key, img.metadata.keys())
31-
data = img.to_array()
3230
expected_shape = (img.metadata['ExifImageHeight'],
3331
img.metadata['ExifImageWidth'], 3)
34-
self.assertEqual(data.shape, expected_shape)
32+
self.assertEqual(img.data.shape, expected_shape)
3533

3634
def test_detects_faces(self):
3735
img = ImageFile(filepath=SAMPLE_IMG1)
36+
img.detect_faces()
3837
self.assertEqual(len(img.faces), 5)
39-
self.assertEqual(img.metadata["Number of faces"], 5)
4038

4139

4240
class TestImageFileView(TestCase):

0 commit comments

Comments
 (0)