Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🎨 Rework Probabilistic Regression and add Packed-Transformer layers #126

Merged
merged 37 commits into from
Jan 6, 2025
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
463c053
:sparkles: Add codecov test analytics
o-laurent Nov 19, 2024
8650ef3
:shirt: Start improving eval loop code & simplify segformer tests & a…
o-laurent Nov 19, 2024
49b67da
:sparkles: Add the CUB dataset
o-laurent Nov 20, 2024
86eec13
:books: Fix docstring typo in SWAG
alafage Nov 20, 2024
d80fe8c
:hammer: Rework Distribution Layers and distribution utils
alafage Nov 28, 2024
16e1d44
:books: Update API Reference
alafage Nov 28, 2024
7cff743
:hammer: Rework Regression baselines and Routine
alafage Nov 28, 2024
c646d35
:bug: Fix NLL loss et metric
alafage Nov 28, 2024
fb51f03
:hammer: Rework Pixel Regression Routine
alafage Nov 28, 2024
3ff3056
:construction: Update BTS baseline
alafage Nov 28, 2024
fbae040
:hammer: Update DER Cubic Tutorial
alafage Nov 28, 2024
11afd5b
:hammer: Update the tests
alafage Nov 28, 2024
9d4ec19
:art: Update typing in _RegDeepEnsembles
alafage Nov 28, 2024
2586e37
:fire: Remove test code from CUB
o-laurent Nov 29, 2024
240eabd
:art: Improve Regression experiments on UCI datasets
alafage Dec 4, 2024
16138d9
:hammer: Enable the choosing of the distribution estimate
alafage Dec 9, 2024
5bfb234
:hammer: Update distribution linear layer docstrings
alafage Dec 9, 2024
2c402cb
:sparkles: Add Deep Probabilistic Regression tutorial
alafage Dec 9, 2024
8a51f5d
:white_check_mark: Add tests for Linear distribution layers
alafage Dec 9, 2024
45693a9
:white_check_mark: Add tests for convolutional distribution layers
alafage Dec 9, 2024
9fde14f
:white_check_mark: Add test for failure case in `get_dist_conv_layer()`
alafage Dec 9, 2024
d13e0c6
:sparkles: Add small version of MUAD dataset
alafage Dec 10, 2024
eed446e
:bug: Fix Deep Ensembles reset_parameters option
alafage Dec 16, 2024
0d2b7d3
:bug: Fix MIoU metric
alafage Dec 16, 2024
cd2b872
:white_check_mark: Improve coverage (hopefully)
alafage Dec 17, 2024
5130778
:hammer: Rework BTS model
alafage Dec 17, 2024
0917984
:art: Update upon review feedback
alafage Dec 18, 2024
ffc94e9
:white_check_mark: Improve coverage
alafage Dec 18, 2024
12b2f1c
:book: Add missing explanation on Independent use
alafage Dec 18, 2024
7747a7a
Merge pull request #125 from ENSTA-U2IS-AI/density_layers
o-laurent Dec 18, 2024
63e9ece
:construction: Add Packed-Transformer layers
alafage Dec 19, 2024
5e5635b
🐛 Fix small muad dataset url
sofiia-chorna Dec 25, 2024
ec2d921
Merge pull request #127 from sofiia-chorna/fix-small-muad-url
alafage Dec 26, 2024
1a53be2
:bug: Fix packed_linear functional when ``bias`` is ``None``
alafage Dec 26, 2024
f484403
:white_check_mark: Add tests for Packed Transformer Layer classes
alafage Dec 28, 2024
a09fdd9
:books: Add documentation for Packed Transformer Layers
alafage Jan 6, 2025
c16d53e
:wrench: Increase package version
alafage Jan 6, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
🐛 Fix MIoU metric
Update MUAD dataset
  • Loading branch information
alafage committed Dec 16, 2024
commit 0d2b7d3e71afe5620433dd1079248ea5b594357b
86 changes: 35 additions & 51 deletions torch_uncertainty/datasets/muad.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
import json
import logging
import os
import shutil
from collections.abc import Callable
from importlib import util
from pathlib import Path
from typing import Literal
from typing import Literal, NamedTuple

from huggingface_hub import hf_hub_download
from PIL import Image

if util.find_spec("cv2"):
import cv2
Expand All @@ -16,17 +16,17 @@
else: # coverage: ignore
cv2_installed = False
import numpy as np
import torch
from einops import rearrange
from PIL import Image
from torchvision import tv_tensors
from torchvision.datasets import VisionDataset
from torchvision.datasets.utils import (
check_integrity,
download_and_extract_archive,
download_url,
)
from torchvision.transforms.v2 import functional as F


class MUADClass(NamedTuple):
name: str
id: int
color: tuple[int, int, int]


class MUAD(VisionDataset):
Expand Down Expand Up @@ -57,6 +57,31 @@ class MUAD(VisionDataset):
},
}

classes = [
MUADClass("road", 0, (128, 64, 128)),
MUADClass("sidewalk", 1, (244, 35, 232)),
MUADClass("building", 2, (70, 70, 70)),
MUADClass("wall", 3, (102, 102, 156)),
MUADClass("fence", 4, (190, 153, 153)),
MUADClass("pole", 5, (153, 153, 153)),
MUADClass("traffic_light", 6, (250, 170, 30)),
MUADClass("traffic_sign", 7, (220, 220, 0)),
MUADClass("vegetation", 8, (107, 142, 35)),
MUADClass("terrain", 9, (152, 251, 152)),
MUADClass("sky", 10, (70, 130, 180)),
MUADClass("person", 11, (220, 20, 60)),
MUADClass("rider", 12, (255, 0, 0)),
MUADClass("car", 13, (0, 0, 142)),
MUADClass("truck", 14, (0, 0, 70)),
MUADClass("bus", 15, (0, 60, 100)),
MUADClass("train", 16, (0, 80, 100)),
MUADClass("motorcycle", 17, (0, 0, 230)),
MUADClass("bicycle", 18, (119, 11, 32)),
MUADClass("bear deer cow", 19, (255, 228, 196)),
MUADClass("garbage_bag stand_food trash_can", 20, (128, 128, 0)),
MUADClass("unlabeled", 21, (0, 0, 0)), # id 255 or 21
]

targets: list[Path] = []

def __init__(
Expand Down Expand Up @@ -155,49 +180,8 @@ def __init__(
f"MUAD {split} split not found or incomplete. Set download=True to download it."
)

# Load classes metadata
cls_path = self.root / "classes.json"
if (not check_integrity(cls_path, self.classes_md5)) and download:
download_url(
self.classes_url,
self.root,
"classes.json",
self.classes_md5,
)

with (self.root / "classes.json").open() as file:
self.classes = json.load(file)

train_id_to_color = [c["object_id"] for c in self.classes if c["train_id"] not in [-1, 255]]
train_id_to_color.append([0, 0, 0])
self.train_id_to_color = np.array(train_id_to_color)

self._make_dataset(self.root / split)

def encode_target(self, target: Image.Image) -> Image.Image:
"""Encode target image to tensor.

Args:
target (Image.Image): Target PIL image.

Returns:
torch.Tensor: Encoded target.
"""
target = F.pil_to_tensor(target)
target = rearrange(target, "c h w -> h w c")
out = torch.zeros_like(target[..., :1])
# convert target color to index
for muad_class in self.classes:
out[(target == torch.tensor(muad_class["id"], dtype=target.dtype)).all(dim=-1)] = (
muad_class["train_id"]
)

return F.to_pil_image(rearrange(out, "h w c -> c h w"))

def decode_target(self, target: Image.Image) -> np.ndarray:
target[target == 255] = 19
return self.train_id_to_color[target]

def __getitem__(self, index: int) -> tuple[tv_tensors.Image, tv_tensors.Mask]:
"""Get the sample at the given index.

Expand All @@ -210,7 +194,7 @@ def __getitem__(self, index: int) -> tuple[tv_tensors.Image, tv_tensors.Mask]:
"""
image = tv_tensors.Image(Image.open(self.samples[index]).convert("RGB"))
if self.target_type == "semantic":
target = tv_tensors.Mask(self.encode_target(Image.open(self.targets[index])))
target = tv_tensors.Mask(Image.open(self.targets[index]))
else:
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
target = Image.fromarray(
Expand Down Expand Up @@ -274,4 +258,4 @@ def _download(self, split: str) -> None:

@property
def color_palette(self) -> np.ndarray:
return self.train_id_to_color.tolist()
return [c.color for c in self.classes]
2 changes: 1 addition & 1 deletion torch_uncertainty/metrics/classification/mean_iou.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,4 @@ def __init__(
def compute(self) -> Tensor:
"""Compute the Means Intersection over Union (MIoU) based on saved inputs."""
tp, fp, _, fn = self._final_state()
return _safe_divide(tp, tp + fp + fn).mean()
return _safe_divide(tp, tp + fp + fn, zero_division=float("nan")).nanmean()
Loading