Skip to content

Migrate to correct logger interface #2996

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion torchrec/distributed/benchmark/benchmark_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -498,7 +498,7 @@ def rtf(**kwargs):
if field.name not in names:
names.add(field.name)
else:
logger.warn(f"WARNING: duplicate argument {field.name}")
logger.warning(f"WARNING: duplicate argument {field.name}")
continue
rtf = click.option(
f"--{field.name}", type=field.type, default=field.default
Expand Down
4 changes: 2 additions & 2 deletions torchrec/distributed/mc_modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -413,7 +413,7 @@ def _create_managed_collision_modules(
), f"Shared feature is not supported. {num_sharding_features=}, {self._sharding_per_table_feature_splits[-1]=}"

if self._sharding_features[-1] != sharding.feature_names():
logger.warn(
logger.warning(
"The order of tables of this sharding is altered due to grouping: "
f"{self._sharding_features[-1]=} vs {sharding.feature_names()=}"
)
Expand Down Expand Up @@ -1105,7 +1105,7 @@ def _create_managed_collision_modules(
), f"Shared feature is not supported. {num_sharding_features=}, {self._sharding_per_table_feature_splits[-1]=}"

if self._sharding_features[-1] != sharding.feature_names():
logger.warn(
logger.warning(
"The order of tables of this sharding is altered due to grouping: "
f"{self._sharding_features[-1]=} vs {sharding.feature_names()=}"
)
Expand Down
4 changes: 2 additions & 2 deletions torchrec/distributed/planner/enumerators.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ def _filter_sharding_types(
set(constrained_sharding_types) & set(allowed_sharding_types)
)
if not filtered_sharding_types:
logger.warn(
logger.warning(
"No available sharding types after applying user provided "
f"constraints for {name}. Constrained sharding types: "
f"{constrained_sharding_types}, allowed sharding types: "
Expand Down Expand Up @@ -300,7 +300,7 @@ def _filter_compute_kernels(
filtered_compute_kernels.remove(EmbeddingComputeKernel.DENSE.value)

if not filtered_compute_kernels:
logger.warn(
logger.warning(
"No available compute kernels after applying user provided "
f"constraints for {name}. Constrained compute kernels: "
f"{constrained_compute_kernels}, allowed compute kernels: "
Expand Down
2 changes: 1 addition & 1 deletion torchrec/distributed/train_pipeline/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1583,7 +1583,7 @@ def _rewrite_model( # noqa C901
input_model.module = graph_model

if non_pipelined_sharded_modules:
logger.warn(
logger.warning(
"Sharded modules were not pipelined: %s. "
+ "This should be fixed for pipelining to work to the full extent.",
", ".join(non_pipelined_sharded_modules),
Expand Down
2 changes: 1 addition & 1 deletion torchrec/metrics/throughput.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def __init__(
)

if window_seconds > MAX_WINDOW_TS:
logger.warn(
logger.warning(
f"window_seconds is greater than {MAX_WINDOW_TS}, capping to {MAX_WINDOW_TS} to make sure window_qps is not staled"
)
window_seconds = MAX_WINDOW_TS
Expand Down
2 changes: 1 addition & 1 deletion torchrec/sparse/jagged_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2704,7 +2704,7 @@ def to_dict(self) -> Dict[str, JaggedTensor]:
Dict[str, JaggedTensor]: dictionary of JaggedTensor for each key.
"""
if not torch.jit.is_scripting() and is_non_strict_exporting():
logger.warn(
logger.warning(
"Trying to non-strict torch.export KJT to_dict, which is extremely slow and not recommended!"
)
_jt_dict = _maybe_compute_kjt_to_jt_dict(
Expand Down