Skip to content

Commit

Permalink
fix & ignore failures
Browse files Browse the repository at this point in the history
  • Loading branch information
mroeschke committed Oct 9, 2024
1 parent 936fe66 commit cb2eb54
Show file tree
Hide file tree
Showing 22 changed files with 37 additions and 37 deletions.
6 changes: 1 addition & 5 deletions pandas/core/apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,12 +246,8 @@ def transform(self) -> DataFrame | Series:
and not obj.empty
):
raise ValueError("Transform function failed")
# error: Argument 1 to "__get__" of "AxisProperty" has incompatible type
# "Union[Series, DataFrame, GroupBy[Any], SeriesGroupBy,
# DataFrameGroupBy, BaseWindow, Resampler]"; expected "Union[DataFrame,
# Series]"
if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals(
obj.index # type: ignore[arg-type]
obj.index
):
raise ValueError("Function did not transform")

Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arraylike.py
Original file line number Diff line number Diff line change
Expand Up @@ -403,12 +403,12 @@ def _reconstruct(result):
# for np.<ufunc>(..) calls
# kwargs cannot necessarily be handled block-by-block, so only
# take this path if there are no kwargs
mgr = inputs[0]._mgr
mgr = inputs[0]._mgr # pyright: ignore[reportGeneralTypeIssues]
result = mgr.apply(getattr(ufunc, method))
else:
# otherwise specific ufunc methods (eg np.<ufunc>.accumulate(..))
# Those can have an axis keyword and thus can't be called block-by-block
result = default_array_ufunc(inputs[0], ufunc, method, *inputs, **kwargs)
result = default_array_ufunc(inputs[0], ufunc, method, *inputs, **kwargs) # pyright: ignore[reportGeneralTypeIssues]
# e.g. np.negative (only one reached), with "where" and "out" in kwargs

result = reconstruct(result)
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/arrow/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -2426,7 +2426,7 @@ def _str_rindex(self, sub: str, start: int = 0, end: int | None = None) -> Self:
result = self._apply_elementwise(predicate)
return type(self)(pa.chunked_array(result))

def _str_normalize(self, form: str) -> Self:
def _str_normalize(self, form: Literal["NFC", "NFD", "NFKC", "NFKD"]) -> Self:
predicate = lambda val: unicodedata.normalize(form, val)
result = self._apply_elementwise(predicate)
return type(self)(pa.chunked_array(result))
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/boolean.py
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,7 @@ def _coerce_to_array(
assert dtype == "boolean"
return coerce_to_array(value, copy=copy)

def _logical_method(self, other, op):
def _logical_method(self, other, op): # type: ignore[override]
assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"}
other_is_scalar = lib.is_scalar(other)
mask = None
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/computation/expr.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def _preparse(
the ``tokenize`` module and ``tokval`` is a string.
"""
assert callable(f), "f must be callable"
return tokenize.untokenize(f(x) for x in tokenize_string(source))
return tokenize.untokenize(f(x) for x in tokenize_string(source)) # pyright: ignore[reportArgumentType]


def _is_type(t):
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -2306,7 +2306,7 @@ def maybe_reorder(

if any(exclude):
arr_exclude = (x for x in exclude if x in arr_columns)
to_remove = {arr_columns.get_loc(col) for col in arr_exclude}
to_remove = {arr_columns.get_loc(col) for col in arr_exclude} # pyright: ignore[reportUnhashable]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]

columns = columns.drop(exclude)
Expand Down
8 changes: 5 additions & 3 deletions pandas/core/groupby/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -3719,7 +3719,7 @@ def blk_func(values: ArrayLike) -> ArrayLike:
mask = isna(values)
if values.ndim == 1:
indexer = np.empty(values.shape, dtype=np.intp)
col_func(out=indexer, mask=mask)
col_func(out=indexer, mask=mask) # type: ignore[arg-type]
return algorithms.take_nd(values, indexer)

else:
Expand Down Expand Up @@ -4081,7 +4081,9 @@ def _nth(
def quantile(
self,
q: float | AnyArrayLike = 0.5,
interpolation: str = "linear",
interpolation: Literal[
"linear", "lower", "higher", "nearest", "midpoint"
] = "linear",
numeric_only: bool = False,
):
"""
Expand Down Expand Up @@ -4270,7 +4272,7 @@ def blk_func(values: ArrayLike) -> ArrayLike:
func(
out[0],
values=vals,
mask=mask,
mask=mask, # type: ignore[arg-type]
result_mask=result_mask,
is_datetimelike=is_datetimelike,
)
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/indexes/interval.py
Original file line number Diff line number Diff line change
Expand Up @@ -991,7 +991,7 @@ def length(self) -> Index:
# --------------------------------------------------------------------
# Set Operations

def _intersection(self, other, sort):
def _intersection(self, other, sort: bool = False):
"""
intersection specialized to the case with matching dtypes.
"""
Expand All @@ -1006,7 +1006,7 @@ def _intersection(self, other, sort):
# duplicates
taken = self._intersection_non_unique(other)

if sort is None:
if sort:
taken = taken.sort_values()

return taken
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/resample.py
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,7 @@ def transform(self, arg, *args, **kwargs):
arg, *args, **kwargs
)

def _downsample(self, f, **kwargs):
def _downsample(self, how, **kwargs):
raise AbstractMethodError(self)

def _upsample(self, f, limit: int | None = None, fill_value=None):
Expand Down
3 changes: 1 addition & 2 deletions pandas/core/series.py
Original file line number Diff line number Diff line change
Expand Up @@ -813,8 +813,7 @@ def _values(self):
def _references(self) -> BlockValuesRefs:
return self._mgr._block.refs

# error: Decorated property not supported
@Appender(base.IndexOpsMixin.array.__doc__) # type: ignore[misc]
@Appender(base.IndexOpsMixin.array.__doc__)
@property
def array(self) -> ExtensionArray:
return self._mgr.array_values()
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/tools/datetimes.py
Original file line number Diff line number Diff line change
Expand Up @@ -1000,7 +1000,7 @@ def to_datetime(
dayfirst=dayfirst,
yearfirst=yearfirst,
errors=errors,
exact=exact,
exact=exact, # type: ignore[arg-type]
)
result: Timestamp | NaTType | Series | Index

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/window/rolling.py
Original file line number Diff line number Diff line change
Expand Up @@ -1507,7 +1507,7 @@ def _generate_cython_apply_func(
window_aggregations.roll_apply,
args=args,
kwargs=kwargs,
raw=raw,
raw=bool(raw),
function=function,
)

Expand Down
2 changes: 1 addition & 1 deletion pandas/io/excel/_odswriter.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class ODSWriter(ExcelWriter):
_engine = "odf"
_supported_extensions = (".ods",)

def __init__(
def __init__( # pyright: ignore[reportInconsistentConstructor]
self,
path: FilePath | WriteExcelBuffer | ExcelWriter,
engine: str | None = None,
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/excel/_openpyxl.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ class OpenpyxlWriter(ExcelWriter):
_engine = "openpyxl"
_supported_extensions = (".xlsx", ".xlsm")

def __init__(
def __init__( # pyright: ignore[reportInconsistentConstructor]
self,
path: FilePath | WriteExcelBuffer | ExcelWriter,
engine: str | None = None,
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/excel/_xlsxwriter.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ class XlsxWriter(ExcelWriter):
_engine = "xlsxwriter"
_supported_extensions = (".xlsx",)

def __init__(
def __init__( # pyright: ignore[reportInconsistentConstructor]
self,
path: FilePath | WriteExcelBuffer | ExcelWriter,
engine: str | None = None,
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/parsers/python_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -1203,7 +1203,7 @@ def _rows_to_cols(self, content: list[list[Scalar]]) -> list[np.ndarray]:
if callable(self.on_bad_lines):
new_l = self.on_bad_lines(_content)
if new_l is not None:
content.append(new_l)
content.append(new_l) # pyright: ignore[reportArgumentType]
elif self.on_bad_lines in (
self.BadLineHandleMethod.ERROR,
self.BadLineHandleMethod.WARN,
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/sas/sas7bdat.py
Original file line number Diff line number Diff line change
Expand Up @@ -516,7 +516,7 @@ def _process_columntext_subheader(self, offset: int, length: int) -> None:
buf = self._read_bytes(offset1, self._lcs)
self.creator_proc = buf[0 : self._lcp]
if hasattr(self, "creator_proc"):
self.creator_proc = self._convert_header_text(self.creator_proc)
self.creator_proc = self._convert_header_text(self.creator_proc) # pyright: ignore[reportArgumentType]

def _process_columnname_subheader(self, offset: int, length: int) -> None:
int_len = self._int_length
Expand Down
6 changes: 3 additions & 3 deletions pandas/io/sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def _wrap_result_adbc(


@overload
def read_sql_table(
def read_sql_table( # pyright: ignore[reportOverlappingOverload]
table_name: str,
con,
schema=...,
Expand Down Expand Up @@ -364,7 +364,7 @@ def read_sql_table(


@overload
def read_sql_query(
def read_sql_query( # pyright: ignore[reportOverlappingOverload]
sql,
con,
index_col: str | list[str] | None = ...,
Expand Down Expand Up @@ -500,7 +500,7 @@ def read_sql_query(


@overload
def read_sql(
def read_sql( # pyright: ignore[reportOverlappingOverload]
sql,
con,
index_col: str | list[str] | None = ...,
Expand Down
6 changes: 3 additions & 3 deletions pandas/plotting/_matplotlib/converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -527,7 +527,7 @@ def _get_periods_per_ymd(freq: BaseOffset) -> tuple[int, int, int]:

ppd = -1 # placeholder for above-day freqs

if dtype_code >= FreqGroup.FR_HR.value:
if dtype_code >= FreqGroup.FR_HR.value: # pyright: ignore[reportAttributeAccessIssue]
# error: "BaseOffset" has no attribute "_creso"
ppd = periods_per_day(freq._creso) # type: ignore[attr-defined]
ppm = 28 * ppd
Expand Down Expand Up @@ -684,7 +684,7 @@ def _second_finder(label_interval: int) -> None:
elif span <= periodsperyear // 4:
month_start = _period_break(dates_, "month")
info_maj[month_start] = True
if dtype_code < FreqGroup.FR_HR.value:
if dtype_code < FreqGroup.FR_HR.value: # pyright: ignore[reportAttributeAccessIssue]
info["min"] = True
else:
day_start = _period_break(dates_, "day")
Expand Down Expand Up @@ -910,7 +910,7 @@ def get_finder(freq: BaseOffset):
return _quarterly_finder
elif fgroup == FreqGroup.FR_MTH:
return _monthly_finder
elif (dtype_code >= FreqGroup.FR_BUS.value) or fgroup == FreqGroup.FR_WK:
elif (dtype_code >= FreqGroup.FR_BUS.value) or fgroup == FreqGroup.FR_WK: # pyright: ignore[reportAttributeAccessIssue]
return _daily_finder
else: # pragma: no cover
raise NotImplementedError(f"Unsupported frequency: {dtype_code}")
Expand Down
4 changes: 2 additions & 2 deletions pandas/tests/frame/test_ufunc.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,14 +66,14 @@ def test_binary_input_dispatch_binop(dtype):
[
(np.add, 1, [2, 3, 4, 5]),
(
partial(np.add, where=[[False, True], [True, False]]),
partial(np.add, where=[[False, True], [True, False]]), # type: ignore[misc]
np.array([[1, 1], [1, 1]]),
[0, 3, 4, 0],
),
(np.power, np.array([[1, 1], [2, 2]]), [1, 2, 9, 16]),
(np.subtract, 2, [-1, 0, 1, 2]),
(
partial(np.negative, where=np.array([[False, True], [True, False]])),
partial(np.negative, where=np.array([[False, True], [True, False]])), # type: ignore[misc]
None,
[0, -2, -3, 0],
),
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/indexes/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -1635,7 +1635,7 @@ def test_generated_op_names(opname, index):
partial(DatetimeIndex, data=["2020-01-01"]),
partial(PeriodIndex, data=["2020-01-01"]),
partial(TimedeltaIndex, data=["1 day"]),
partial(RangeIndex, data=range(1)),
partial(RangeIndex, start=range(1)),
partial(IntervalIndex, data=[pd.Interval(0, 1)]),
partial(Index, data=["a"], dtype=object),
partial(MultiIndex, levels=[1], codes=[0]),
Expand Down
7 changes: 5 additions & 2 deletions pandas/util/_exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,10 @@
import inspect
import os
import re
from typing import TYPE_CHECKING
from typing import (
TYPE_CHECKING,
Any,
)
import warnings

if TYPE_CHECKING:
Expand All @@ -24,7 +27,7 @@ def rewrite_exception(old_name: str, new_name: str) -> Generator[None]:
raise
msg = str(err.args[0])
msg = msg.replace(old_name, new_name)
args: tuple[str, ...] = (msg,)
args: tuple[Any, ...] = (msg,)
if len(err.args) > 1:
args = args + err.args[1:]
err.args = args
Expand Down

0 comments on commit cb2eb54

Please sign in to comment.