Skip to content

Commit

Permalink
Merge branch 'develop' into fix/value-init
Browse files Browse the repository at this point in the history
  • Loading branch information
mdbenito authored Oct 5, 2023
2 parents a1ef695 + ac3ed99 commit 0d8893c
Show file tree
Hide file tree
Showing 14 changed files with 2,065 additions and 353 deletions.
21 changes: 19 additions & 2 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -170,12 +170,29 @@ Or:
]
```

To only hide the input
To only hide the input and

```yaml
"tags": [
"hide-output"
]
```
for hiding the output only.

It is important to leave a warning at the top of the document to avoid confusion.
Examples for hidden imports and plots are available in the notebooks, e.g. in
[Shapley for data valuation](https://aai-institute.github.io/pyDVL/examples/shapley_basic_spotify.ipynb).
*notebooks/shapley_basic_spotify.ipynb*.

### Plots in Notebooks
If you add a plot to a notebook, which should also render nicely in browser
dark mode, add the tag *invertible-output*, i.e.

```yaml
"tags": [
"invertible-output"
]
```
This applies a simple CSS-filter to the output image of the cell.

## Documentation

Expand Down
5 changes: 4 additions & 1 deletion docs/css/extra.css
Original file line number Diff line number Diff line change
Expand Up @@ -107,14 +107,17 @@ a.autorefs-external:hover::after {
color: black;
}


body[data-md-color-scheme="default"] .invertible img {
}

body[data-md-color-scheme="slate"] .invertible img {
filter: invert(100%) hue-rotate(180deg);
}

body[data-md-color-scheme="slate"] .celltag_invertible-output img {
filter: invert(100%) hue-rotate(180deg);
}

/* Rendered dataframe from jupyter */
table.dataframe {
display: block;
Expand Down
2 changes: 2 additions & 0 deletions mkdocs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@ plugins:
- hide
remove_input_tags:
- hide-input
remove_all_outputs_tags:
- hide-output
binder: true
binder_service_name: "gh"
binder_branch: "develop"
Expand Down
41 changes: 30 additions & 11 deletions notebooks/data_oob.ipynb

Large diffs are not rendered by default.

401 changes: 296 additions & 105 deletions notebooks/influence_imagenet.ipynb

Large diffs are not rendered by default.

279 changes: 208 additions & 71 deletions notebooks/influence_synthetic.ipynb

Large diffs are not rendered by default.

259 changes: 209 additions & 50 deletions notebooks/influence_wine.ipynb

Large diffs are not rendered by default.

1,185 changes: 1,155 additions & 30 deletions notebooks/least_core_basic.ipynb

Large diffs are not rendered by default.

115 changes: 73 additions & 42 deletions notebooks/shapley_basic_spotify.ipynb

Large diffs are not rendered by default.

61 changes: 34 additions & 27 deletions notebooks/shapley_knn_flowers.ipynb

Large diffs are not rendered by default.

28 changes: 24 additions & 4 deletions notebooks/shapley_utility_learning.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -374,8 +374,13 @@
"cell_type": "code",
"execution_count": 12,
"metadata": {
"editable": true,
"slideshow": {
"slide_type": ""
},
"tags": [
"hide-input"
"hide-input",
"invertible-output"
]
},
"outputs": [
Expand Down Expand Up @@ -436,8 +441,13 @@
"cell_type": "code",
"execution_count": 14,
"metadata": {
"editable": true,
"slideshow": {
"slide_type": ""
},
"tags": [
"hide-input"
"hide-input",
"invertible-output"
]
},
"outputs": [
Expand Down Expand Up @@ -477,8 +487,13 @@
"cell_type": "code",
"execution_count": 16,
"metadata": {
"editable": true,
"slideshow": {
"slide_type": ""
},
"tags": [
"hide-input"
"hide-input",
"invertible-output"
]
},
"outputs": [
Expand Down Expand Up @@ -628,8 +643,13 @@
"cell_type": "code",
"execution_count": 21,
"metadata": {
"editable": true,
"slideshow": {
"slide_type": ""
},
"tags": [
"hide-input"
"hide-input",
"invertible-output"
]
},
"outputs": [
Expand Down
10 changes: 6 additions & 4 deletions src/pydvl/influence/general.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,9 @@ def test_grads() -> Generator[TensorType, None, None]:
) # type:ignore

try:
# if provided input_data implements __len__, pre-allocate the result tensor to reduce memory consumption
resulting_shape = (len(test_data), model.num_params) # type:ignore
# in case input_data is a torch DataLoader created from a Dataset,
# we can pre-allocate the result tensor to reduce memory consumption
resulting_shape = (len(test_data.dataset), model.num_params) # type:ignore
rhs = cat_gen(
test_grads(), resulting_shape, model # type:ignore
) # type:ignore
Expand Down Expand Up @@ -174,8 +175,9 @@ def train_grads() -> Generator[TensorType, None, None]:
) # type:ignore

try:
# if provided input_data implements __len__, pre-allocate the result tensor to reduce memory consumption
resulting_shape = (len(input_data), model.num_params) # type:ignore
# in case input_data is a torch DataLoader created from a Dataset,
# we can pre-allocate the result tensor to reduce memory consumption
resulting_shape = (len(input_data.dataset), model.num_params) # type:ignore
train_grad_tensor = cat_gen(
train_grads(), resulting_shape, model # type:ignore
) # type:ignore
Expand Down
10 changes: 5 additions & 5 deletions src/pydvl/influence/torch/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def batch_hvp_gen(

for inputs, targets in iter(data_loader):
batch_loss = batch_loss_function(model, loss, inputs, targets)
model_params = dict(model.named_parameters())
model_params = {k: p for k, p in model.named_parameters() if p.requires_grad}

def batch_hvp(vec: torch.Tensor):
return flatten_tensors_to_vector(
Expand Down Expand Up @@ -166,9 +166,7 @@ def batch_loss_function(
"""

def batch_loss(params: Dict[str, torch.Tensor]):
outputs = functional_call(
model, params, (to_model_device(x, model),), strict=True
)
outputs = functional_call(model, params, (to_model_device(x, model),))
return loss(outputs, y)

return batch_loss
Expand Down Expand Up @@ -209,7 +207,9 @@ def get_hvp_function(
"""

params = {
k: p if track_gradients else p.detach() for k, p in model.named_parameters()
k: p if track_gradients else p.detach()
for k, p in model.named_parameters()
if p.requires_grad
}

def hvp_function(vec: torch.Tensor) -> torch.Tensor:
Expand Down
1 change: 0 additions & 1 deletion src/pydvl/influence/torch/torch_differentiable.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,6 @@ def model_func(param):
param,
),
(x.to(self.device),),
strict=True,
)
return self.loss(outputs, y.to(self.device))

Expand Down

0 comments on commit 0d8893c

Please sign in to comment.