|
6 | 6 | import warnings
|
7 | 7 | from collections.abc import Iterable
|
8 | 8 | from datetime import datetime
|
9 |
| -from typing import Any, Callable, Optional, Self, Tuple, Union, cast |
| 9 | +from typing import Any, Callable, Optional, Self, Sequence, Tuple, Union, cast |
10 | 10 |
|
11 | 11 | import cftime
|
12 | 12 | import numpy as np
|
|
16 | 16 | import xugrid as xu
|
17 | 17 |
|
18 | 18 | import imod
|
19 |
| -import imod.mf6.utilities |
20 | 19 | from imod.common.interfaces.ipointdatapackage import IPointDataPackage
|
21 | 20 | from imod.common.utilities.grid import broadcast_to_full_domain
|
22 | 21 | from imod.common.utilities.layer import create_layered_top
|
@@ -82,36 +81,25 @@ def mask_2D(package: GridAgnosticWell, domain_2d: GridDataArray) -> GridAgnostic
|
82 | 81 |
|
83 | 82 |
|
84 | 83 | def _df_groups_to_da_rates(
|
85 |
| - unique_well_groups: pd.api.typing.DataFrameGroupBy, |
| 84 | + unique_well_groups: Sequence[pd.api.typing.DataFrameGroupBy], |
86 | 85 | ) -> xr.DataArray:
|
87 | 86 | # Convert dataframes all groups to DataArrays
|
88 |
| - is_steady_state = "time" not in unique_well_groups[0].columns |
89 |
| - if is_steady_state: |
90 |
| - da_groups = [ |
91 |
| - xr.DataArray(df_group["rate"].sum()) for df_group in unique_well_groups |
92 |
| - ] |
| 87 | + columns = list(unique_well_groups[0].columns) |
| 88 | + columns.remove("rate") |
| 89 | + is_transient = "time" in columns |
| 90 | + gb_and_summed = pd.concat(unique_well_groups).groupby(columns).sum() |
| 91 | + if is_transient: |
| 92 | + index_names = ["time", "index"] |
93 | 93 | else:
|
94 |
| - da_groups = [ |
95 |
| - xr.DataArray( |
96 |
| - df_group["rate"], dims=("time"), coords={"time": df_group["time"]} |
97 |
| - ) |
98 |
| - for df_group in unique_well_groups |
99 |
| - ] |
100 |
| - # Groupby time and sum to aggregate wells with the exact same x, y, and |
101 |
| - # filter top/bottom. |
102 |
| - da_groups = [da_group.groupby("time").sum() for da_group in da_groups] |
103 |
| - # Assign index coordinates |
104 |
| - da_groups = [ |
105 |
| - da_group.expand_dims(dim="index").assign_coords(index=[i]) |
106 |
| - for i, da_group in enumerate(da_groups) |
107 |
| - ] |
108 |
| - # Concatenate datarrays along index dimension |
109 |
| - return xr.concat(da_groups, dim="index") |
| 94 | + index_names = ["index"] |
| 95 | + # Unset multi-index, then set index to index_names |
| 96 | + df_temp = gb_and_summed.reset_index().set_index(index_names) |
| 97 | + return df_temp["rate"].to_xarray() |
110 | 98 |
|
111 | 99 |
|
112 | 100 | def _prepare_well_rates_from_groups(
|
113 | 101 | pkg_data: dict,
|
114 |
| - unique_well_groups: pd.api.typing.DataFrameGroupBy, |
| 102 | + unique_well_groups: Sequence[pd.api.typing.DataFrameGroupBy], |
115 | 103 | start_times: StressPeriodTimesType,
|
116 | 104 | ) -> xr.DataArray:
|
117 | 105 | """
|
@@ -690,8 +678,12 @@ def from_imod5_data(
|
690 | 678 | # Associated wells need additional grouping by id
|
691 | 679 | if pkg_data["has_associated"]:
|
692 | 680 | colnames_group.append("id")
|
693 |
| - wel_index, unique_well_groups = zip(*df.groupby(colnames_group)) |
694 |
| - |
| 681 | + wel_index, well_groups_untagged = zip(*df.groupby(colnames_group)) |
| 682 | + # Explictly sign an index to each group, so that the |
| 683 | + # DataArray of rates can be created with a unique index. |
| 684 | + unique_well_groups = [ |
| 685 | + group.assign(index=i) for i, group in enumerate(well_groups_untagged) |
| 686 | + ] |
695 | 687 | # Unpack wel indices by zipping
|
696 | 688 | varnames = [("x", float), ("y", float)] + cls._depth_colnames
|
697 | 689 | index_values = zip(*wel_index)
|
|
0 commit comments