Skip to content

Commit

Permalink
Minor updates (and more Pexels images)
Browse files Browse the repository at this point in the history
  • Loading branch information
ignaciohrdz committed Mar 12, 2023
1 parent 0e9f813 commit 8495a3f
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 16 deletions.
4 changes: 2 additions & 2 deletions Readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ For this experiment I'm using the following sources:
- [Menpo2D dataset](https://github.com/jiankangdeng/MenpoBenchmark)
- [AFW (Annotated Faces in the Wild)](https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/)
- Custom datasets:
- [Pexels](https://pexels.com): I downloaded 85 images from this website and annotated them using [CVAT](https://app.cvat.ai/). I am currently working on another batch of images.
- [Pexels](https://pexels.com): I downloaded 85 images from this website and annotated them using [CVAT](https://app.cvat.ai/). I am currently working on another two batches of images.

## Results

Expand All @@ -27,6 +27,6 @@ Some datasets such as Helen may generate noisy examples when the images have mor

### Performance

The model I've trained (nano) struggles with eyebrows, but it works really well on eyes and noses.
The model I've trained (nano) struggles with eyebrows, but it works really well on eyes and noses. I need to add more close-up images of each part to increase the number of incomplete faces.

![F1 curve](images/F1_curve.png)
30 changes: 16 additions & 14 deletions prepare_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,6 @@ def process_names(names, split, path_data, path_dest, skip):

# Original data from Pexels
path_pexels_dataset = os.path.join(PATH_HOME, "Documents", "Datasets", "Pexels-face-parts")
path_pexels_images = os.path.join(path_pexels_dataset, "images")
path_pexels_annotations = os.path.join(path_pexels_dataset, "annotations", "obj_train_data")

# Original data from the AFW dataset
path_afw_dataset = os.path.join(PATH_HOME, "Documents", "Datasets", "AFW-dataset")
Expand Down Expand Up @@ -154,23 +152,27 @@ def process_names(names, split, path_data, path_dest, skip):
# PART 2: PROCESSING THE PEXELS IMAGES #
########################################

# TODO: Add the images of the new batch

# Copying the images and the labels to the final folder
pexels_labels = os.listdir(path_pexels_annotations)
for l in pexels_labels:
img_name = os.path.splitext(l)[0] + ".jpg"
img_source = os.path.join(path_pexels_images, img_name)
img_dest = os.path.join(path_processed_images, img_name)
shutil.copy(img_source, img_dest)
pexels_sets = os.listdir(path_pexels_dataset)
pexels_names = []
for s in pexels_sets:
path_pexels_annotations = os.path.join(path_pexels_dataset, s, "annotations", "obj_train_data")
path_pexels_images = os.path.join(path_pexels_dataset, s, "images")
pexels_labels = os.listdir(path_pexels_annotations)
pexels_names.extend([os.path.splitext(l)[0] for l in pexels_labels])

for l in pexels_labels:
img_name = os.path.splitext(l)[0] + ".jpg"
img_source = os.path.join(path_pexels_images, img_name)
img_dest = os.path.join(path_processed_images, img_name)
shutil.copy(img_source, img_dest)

label_source = os.path.join(path_pexels_annotations, l)
label_dest = os.path.join(path_processed_labels, l)
shutil.copy(label_source, label_dest)
label_source = os.path.join(path_pexels_annotations, l)
label_dest = os.path.join(path_processed_labels, l)
shutil.copy(label_source, label_dest)

# Separate the Helen dataset in training and validation
train_pct = 0.7
pexels_names = [os.path.splitext(l)[0] for l in pexels_labels]
random.shuffle(pexels_names)
train_size = int(train_pct*len(pexels_names))
pexels_train_names = pd.DataFrame({0: pexels_names[:train_size]})
Expand Down

0 comments on commit 8495a3f

Please sign in to comment.