Skip to content

Commit

Permalink
fix: update deps, fix unit tests
Browse files Browse the repository at this point in the history
  • Loading branch information
kaloster committed Aug 30, 2024
1 parent 3c2d2c8 commit 9479c06
Show file tree
Hide file tree
Showing 6 changed files with 25 additions and 26 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/compatibility_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ jobs:
matrix:
# note: The `macos-latest` is latest Catalina version, and not Big Sur. So we explicitly ask for Big Sur (`macos-11`)
os: [ubuntu-latest, macos-latest, macos-11]
python-version: ["3.8", "3.9", "3.10", "3.11"]
python-version: ["3.10", "3.11"]
cellxgene_build: [main, latest]
# add anndata pinned version test for subset of matrix configurations,
# in order to reduce matrix cross-product explosion
Expand Down
3 changes: 1 addition & 2 deletions server/common/utils/type_conversion_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def _get_type_info(array: Union[np.ndarray, pd.Series, pd.Index]) -> Tuple[np.dt
raise TypeError("Unsupported data type.")

dtype = array.dtype

res = _get_type_info_from_dtype(dtype)
if res is not None:
return res
Expand All @@ -140,7 +140,6 @@ def _get_type_info(array: Union[np.ndarray, pd.Series, pd.Index]) -> Tuple[np.dt

if dtype.kind in ["i", "u"] and _can_cast_array_values_to_int32(array):
return (np.int32, {"type": "int32"})

if dtype.kind == "f":
_float64_warning(array.dtype)
return (np.float32, {"type": "float32"})
Expand Down
6 changes: 3 additions & 3 deletions server/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@ flatten-dict>=0.2.0
fsspec>0.8.0
gunicorn>=20.0.4
h5py>=3.0.0
numba>=0.51.2
numpy>1.22
numba>=0.60.0
numpy==2.0.1
packaging>=20.0
pandas<2.0.0
pandas>=2.2.2
PyYAML>=5.4 # CVE-2020-14343
requests>=2.22.0
s3fs==0.4.2
Expand Down
20 changes: 10 additions & 10 deletions test/unit/compute/test_est_dist.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,13 +65,13 @@ def put(arr, ind, vals):

# non-finites
self.assertEqual(estimate_approximate_distribution(np.array([np.nan])), XApproximateDistribution.NORMAL)
self.assertEqual(estimate_approximate_distribution(np.array([np.PINF])), XApproximateDistribution.NORMAL)
self.assertEqual(estimate_approximate_distribution(np.array([np.NINF])), XApproximateDistribution.NORMAL)
self.assertEqual(estimate_approximate_distribution(np.array([np.inf])), XApproximateDistribution.NORMAL)
self.assertEqual(estimate_approximate_distribution(np.array([np.inf])), XApproximateDistribution.NORMAL)
self.assertEqual(
estimate_approximate_distribution(np.array([np.PINF, np.NINF, 0])), XApproximateDistribution.NORMAL
estimate_approximate_distribution(np.array([np.inf, np.inf, 0])), XApproximateDistribution.NORMAL
)
self.assertEqual(
estimate_approximate_distribution(np.array([np.nan, np.PINF, np.NINF])), XApproximateDistribution.NORMAL
estimate_approximate_distribution(np.array([np.nan, np.inf, np.inf])), XApproximateDistribution.NORMAL
)

raw = np.random.exponential(scale=1000, size=(50, 3))
Expand All @@ -82,15 +82,15 @@ def put(arr, ind, vals):
XApproximateDistribution.COUNT,
)
self.assertEqual(
estimate_approximate_distribution(put(raw, [1], [np.PINF])),
estimate_approximate_distribution(put(raw, [1], [np.inf])),
XApproximateDistribution.COUNT,
)
self.assertEqual(
estimate_approximate_distribution(put(raw, [1], [np.NINF])),
estimate_approximate_distribution(put(raw, [1], [np.inf])),
XApproximateDistribution.COUNT,
)
self.assertEqual(
estimate_approximate_distribution(put(raw, [1, 3, 88], [np.nan, np.PINF, np.NINF])),
estimate_approximate_distribution(put(raw, [1, 3, 88], [np.nan, np.inf, np.inf])),
XApproximateDistribution.COUNT,
)
self.assertEqual(
Expand All @@ -103,15 +103,15 @@ def put(arr, ind, vals):
XApproximateDistribution.NORMAL,
)
self.assertEqual(
estimate_approximate_distribution(put(logged, [1], [np.PINF])),
estimate_approximate_distribution(put(logged, [1], [np.inf])),
XApproximateDistribution.NORMAL,
)
self.assertEqual(
estimate_approximate_distribution(put(logged, [1], [np.NINF])),
estimate_approximate_distribution(put(logged, [1], [np.inf])),
XApproximateDistribution.NORMAL,
)
self.assertEqual(
estimate_approximate_distribution(put(logged, [1, 3, 88], [np.nan, np.PINF, np.NINF])),
estimate_approximate_distribution(put(logged, [1, 3, 88], [np.nan, np.inf, np.inf])),
XApproximateDistribution.NORMAL,
)
self.assertEqual(
Expand Down
4 changes: 2 additions & 2 deletions test/unit/utils/test_jsonify_strict.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@ def test_jsonify_numpy_float_edges(self):
jsonify_strict({"nan": [np.nan]})

with self.assertRaises(ValueError):
jsonify_strict({"pinf": [np.PINF]})
jsonify_strict({"pinf": [np.inf]})

with self.assertRaises(ValueError):
jsonify_strict({"ninf": [np.NINF]})
jsonify_strict({"ninf": [np.inf]})

def test_jsonify_numpy_ndarray(self):
values = {
Expand Down
16 changes: 8 additions & 8 deletions test/unit/utils/test_type_conversion_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def test__get_schema_type_hint_from_dtype(self):
with self.assertRaises(TypeError):
get_schema_type_hint_from_dtype(np.dtype(dtype))

for dtype in [np.float16, np.float32, np.float64]:
for dtype in [np.float32, np.float64]:
self.assertEqual(get_schema_type_hint_from_dtype(np.dtype(dtype)), {"type": "float32"})

for dtype in [np.dtype(object), np.dtype(str)]:
Expand Down Expand Up @@ -126,14 +126,14 @@ def __exit__(self, exc_type, exc_val, exc_tb):
"data": data,
"expected_encoding_dtype": np.float32,
"expected_schema_hint": {"type": "float32"},
"logs": None if data.dtype != np.float64 else {"level": logging.WARNING, "output": "may lose precision"},
"logs": None if data.dtype == np.float32 else {"level": logging.WARNING, "output": "may lose precision"},
}
for dtype in [np.float16, np.float32, np.float64]
for dtype in [np.float32, np.float64]
for data in [
np.arange(-128, 1000, dtype=dtype),
pd.Series(np.arange(-128, 1000, dtype=dtype)),
pd.Index(np.arange(-129, 1000, dtype=dtype)),
np.array([-np.nan, np.NINF, -1, np.NZERO, 0, np.PZERO, 1, np.PINF, np.nan], dtype=dtype),
np.array([-np.nan, np.inf, -1, 0.0, 0, 0.0, 1, np.inf, np.nan], dtype=dtype),
np.array([np.finfo(dtype).min, 0, np.finfo(dtype).max], dtype=dtype),
sparse.csr_matrix((10, 100), dtype=dtype),
]
Expand Down Expand Up @@ -201,9 +201,9 @@ def __exit__(self, exc_type, exc_val, exc_tb):
"data": data,
"expected_encoding_dtype": np.float32,
"expected_schema_hint": {"type": "categorical"},
"logs": {"level": logging.WARNING, "output": "may lose precision"},
"logs": None if data.dtype == np.float32 else {"level": logging.WARNING, "output": "may lose precision"},
}
for dtype in [np.float16, np.float32, np.float64]
for dtype in [np.float32, np.float64]
for data in [
pd.Series(np.array([0, 1, 2], dtype=dtype), dtype="category"),
pd.Series(np.array([0, 1, 2], dtype=dtype), dtype="category").cat.remove_categories([1]),
Expand All @@ -216,7 +216,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
"data": data,
"expected_encoding_dtype": np.float32,
"expected_schema_hint": {"type": "categorical"},
"logs": {"level": logging.WARNING, "output": "may lose precision"},
"logs": None if data.dtype == np.float32 else {"level": logging.WARNING, "output": "may lose precision"},
}
for dtype in [
np.int8,
Expand All @@ -227,7 +227,6 @@ def __exit__(self, exc_type, exc_val, exc_tb):
np.uint32,
np.int64,
np.uint64,
np.float16,
np.float32,
np.float64,
]
Expand Down Expand Up @@ -312,6 +311,7 @@ def test_type_inference(self):
self.assertEqual(encoding_dtype, self.expected_encoding_dtype)
self.assertEqual(schema_hint, self.expected_schema_hint)
self.assertIn(logs["output"], logger.output[0])


else:
with self.assertNoLogs(logging.getLogger(), logging.WARNING):
Expand Down

0 comments on commit 9479c06

Please sign in to comment.