|
60 | 60 | from imod.typing import GridDataArray, GridDataset |
61 | 61 | from imod.typing.grid import ( |
62 | 62 | concat, |
63 | | - is_equal, |
| 63 | + is_same_domain, |
64 | 64 | is_unstructured, |
65 | 65 | merge_partitions, |
66 | 66 | ) |
@@ -1037,12 +1037,14 @@ def dump( |
1037 | 1037 | _, filename, _, _ = exchange_package.get_specification() |
1038 | 1038 | exchange_class_short = type(exchange_package).__name__ |
1039 | 1039 | path = f"{filename}.nc" |
1040 | | - exchange_package.dataset.to_netcdf(directory / path) |
| 1040 | + exchange_package.dataset.to_netcdf( |
| 1041 | + directory / path, format="NETCDF4" |
| 1042 | + ) |
1041 | 1043 | toml_content[key][exchange_class_short].append(path) |
1042 | 1044 |
|
1043 | 1045 | else: |
1044 | 1046 | path = f"{key}.nc" |
1045 | | - value.dataset.to_netcdf(directory / path) |
| 1047 | + value.dataset.to_netcdf(directory / path, format="NETCDF4") |
1046 | 1048 | toml_content[cls_name][key] = path |
1047 | 1049 |
|
1048 | 1050 | with open(directory / f"{self.name}.toml", "wb") as f: |
@@ -1620,10 +1622,16 @@ def _get_transport_models_per_flow_model(self) -> dict[str, list[str]]: |
1620 | 1622 |
|
1621 | 1623 | for flow_model_name in flow_models: |
1622 | 1624 | flow_model = self[flow_model_name] |
| 1625 | + |
| 1626 | + matched_tsp_models = [] |
1623 | 1627 | for tpt_model_name in transport_models: |
1624 | 1628 | tpt_model = self[tpt_model_name] |
1625 | | - if is_equal(tpt_model.domain, flow_model.domain): |
| 1629 | + if is_same_domain(tpt_model.domain, flow_model.domain): |
1626 | 1630 | result[flow_model_name].append(tpt_model_name) |
| 1631 | + matched_tsp_models.append(tpt_model_name) |
| 1632 | + for tpt_model_name in matched_tsp_models: |
| 1633 | + transport_models.pop(tpt_model_name) |
| 1634 | + |
1627 | 1635 | return result |
1628 | 1636 |
|
1629 | 1637 | def _generate_gwfgwt_exchanges(self) -> list[GWFGWT]: |
|
0 commit comments