Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

implement op that reads multiple rows from dataframe #395

Merged
merged 1 commit into from
Feb 4, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
65 changes: 65 additions & 0 deletions fuse/data/ops/ops_read.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,71 @@ def get_all_keys(self) -> List[Hashable]:
return list(self.data.keys())


class OpReadMultiFromDataframe(OpReadDataframe):
"""
Read multiple entries from dataframe at once.
In that case the key expected a string that build from multiple dataframe indices separated by "@SEP@"
For example
df = pd.DataFrame({
"sample_id": [0, 1, 2, 3, 4]
"my_data": [10, 11, 12, 13, 14]
})
sample_dict = {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

any reason you don't accept a list of items instead of token separated string ?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We need hashable object like string or tuple.
We do have an issue with tuple ids - so for now I've decided to go with string.
We can extend it to tuple by providing null separator in the future. WDYT?

"data.sample_id": "3@SEP@4"
}
will read row 3 from dataframe into sample_dict[f"my_data.0"]=13
And row 4 into sample_dict[f"my_data.1"]=14
"""

def __init__(
self,
data: Optional[pd.DataFrame] = None,
data_filename: Optional[str] = None,
columns_to_extract: Optional[List[str]] = None,
rename_columns: Optional[Dict[str, str]] = None,
key_name: str = "data.sample_id",
key_column: str = "sample_id",
multi_key_sep: str = "@SEP@",
):
super().__init__(
data,
data_filename,
columns_to_extract,
rename_columns,
key_name,
key_column,
)

self._multi_key_sep = multi_key_sep

# convert ids to strings to support simple split and concat
if not isinstance(next(iter(self._data.keys())), str):
self._data = {str(k): v for k, v in self._data.items()}

def __call__(self, sample_dict: NDict, prefix: Optional[str] = None) -> NDict:
multi_key = sample_dict[self._key_name]

assert isinstance(multi_key, str), "Error: only str sample ids are supported"

if self._multi_key_sep in multi_key:
keys = multi_key.split(self._multi_key_sep)
else:
keys = [multi_key]

for key_index, key in enumerate(keys):
# locate the required item
sample_data = self._data[key].copy()

# add values tp sample_dict
for name, value in sample_data.items():
if prefix is None:
sample_dict[f"{name}.{key_index}"] = value
else:
sample_dict[f"{prefix}.{name}.{key_index}"] = value

return sample_dict


class OpReadHDF5(OpBase):
"""
Op reading data from hd5f based dataset
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1137,7 +1137,7 @@ def encode_list(
# of input mapped to unk.
if on_unknown == "raise":
raise RuntimeError(
f"Encountered {unk_count} unknown tokens out of {len(merged_encoding.ids)} in input starting with {typed_input_list[0].input_string}"
f"Encountered {unk_count} unknown tokens out of {len(merged_encoding.ids)} in input starting with {[typed_input.input_string for typed_input in typed_input_list]}"
)
elif on_unknown == "warn":
if verbose == 0:
Expand Down