Skip to content

Commit 68d6819

Browse files
authored
chore: move unit test utilities to bigframes.testing module, remove unnecessary skip_legacy_pandas (#1623)
1 parent 4bc9519 commit 68d6819

20 files changed

+198
-145
lines changed

bigframes/testing/__init__.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
# Copyright 2025 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
"""[Experimental] Utilities for testing BigQuery DataFrames.
16+
17+
These modules are provided for testing the BigQuery DataFrames package. The
18+
interface is not considered stable.
19+
"""

tests/unit/resources.py renamed to bigframes/testing/mocks.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,12 +36,17 @@
3636

3737

3838
def create_bigquery_session(
39+
*,
3940
bqclient: Optional[mock.Mock] = None,
4041
session_id: str = "abcxyz",
4142
table_schema: Sequence[google.cloud.bigquery.SchemaField] = TEST_SCHEMA,
4243
anonymous_dataset: Optional[google.cloud.bigquery.DatasetReference] = None,
4344
location: str = "test-region",
4445
) -> bigframes.Session:
46+
"""[Experimental] Create a mock BigQuery DataFrames session that avoids making Google Cloud API calls.
47+
48+
Intended for unit test environments that don't have access to the network.
49+
"""
4550
credentials = mock.create_autospec(
4651
google.auth.credentials.Credentials, instance=True
4752
)
@@ -108,8 +113,12 @@ def query_and_wait_mock(query, *args, **kwargs):
108113

109114

110115
def create_dataframe(
111-
monkeypatch: pytest.MonkeyPatch, session: Optional[bigframes.Session] = None
116+
monkeypatch: pytest.MonkeyPatch, *, session: Optional[bigframes.Session] = None
112117
) -> bigframes.dataframe.DataFrame:
118+
"""[Experimental] Create a mock DataFrame that avoids making Google Cloud API calls.
119+
120+
Intended for unit test environments that don't have access to the network.
121+
"""
113122
if session is None:
114123
session = create_bigquery_session()
115124

tests/system/small/operations/test_datetimes.py

Lines changed: 31 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
import pytest
2121

2222
import bigframes.series
23-
from tests.system.utils import assert_series_equal, skip_legacy_pandas
23+
from tests.system.utils import assert_series_equal
2424

2525
DATETIME_COL_NAMES = [("datetime_col",), ("timestamp_col",)]
2626
DATE_COLUMNS = [
@@ -34,8 +34,9 @@
3434
("col_name",),
3535
DATE_COLUMNS,
3636
)
37-
@skip_legacy_pandas
3837
def test_dt_day(scalars_dfs, col_name):
38+
# TODO: supply a reason why this isn't compatible with pandas 1.x
39+
pytest.importorskip("pandas", minversion="2.0.0")
3940
scalars_df, scalars_pandas_df = scalars_dfs
4041
bf_series: bigframes.series.Series = scalars_df[col_name]
4142
bf_result = bf_series.dt.day.to_pandas()
@@ -51,8 +52,9 @@ def test_dt_day(scalars_dfs, col_name):
5152
("col_name",),
5253
DATETIME_COL_NAMES,
5354
)
54-
@skip_legacy_pandas
5555
def test_dt_date(scalars_dfs, col_name):
56+
# TODO: supply a reason why this isn't compatible with pandas 1.x
57+
pytest.importorskip("pandas", minversion="2.0.0")
5658
scalars_df, scalars_pandas_df = scalars_dfs
5759
bf_series: bigframes.series.Series = scalars_df[col_name]
5860
bf_result = bf_series.dt.date.to_pandas()
@@ -68,8 +70,9 @@ def test_dt_date(scalars_dfs, col_name):
6870
("col_name",),
6971
DATE_COLUMNS,
7072
)
71-
@skip_legacy_pandas
7273
def test_dt_dayofweek(scalars_dfs, col_name):
74+
# TODO: supply a reason why this isn't compatible with pandas 1.x
75+
pytest.importorskip("pandas", minversion="2.0.0")
7376
scalars_df, scalars_pandas_df = scalars_dfs
7477
bf_series: bigframes.series.Series = scalars_df[col_name]
7578
bf_result = bf_series.dt.dayofweek.to_pandas()
@@ -82,8 +85,9 @@ def test_dt_dayofweek(scalars_dfs, col_name):
8285
("col_name",),
8386
DATETIME_COL_NAMES,
8487
)
85-
@skip_legacy_pandas
8688
def test_dt_hour(scalars_dfs, col_name):
89+
# TODO: supply a reason why this isn't compatible with pandas 1.x
90+
pytest.importorskip("pandas", minversion="2.0.0")
8791
scalars_df, scalars_pandas_df = scalars_dfs
8892
bf_series: bigframes.series.Series = scalars_df[col_name]
8993
bf_result = bf_series.dt.hour.to_pandas()
@@ -99,8 +103,9 @@ def test_dt_hour(scalars_dfs, col_name):
99103
("col_name",),
100104
DATETIME_COL_NAMES,
101105
)
102-
@skip_legacy_pandas
103106
def test_dt_minute(scalars_dfs, col_name):
107+
# TODO: supply a reason why this isn't compatible with pandas 1.x
108+
pytest.importorskip("pandas", minversion="2.0.0")
104109
scalars_df, scalars_pandas_df = scalars_dfs
105110
bf_series: bigframes.series.Series = scalars_df[col_name]
106111
bf_result = bf_series.dt.minute.to_pandas()
@@ -116,8 +121,9 @@ def test_dt_minute(scalars_dfs, col_name):
116121
("col_name",),
117122
DATE_COLUMNS,
118123
)
119-
@skip_legacy_pandas
120124
def test_dt_month(scalars_dfs, col_name):
125+
# TODO: supply a reason why this isn't compatible with pandas 1.x
126+
pytest.importorskip("pandas", minversion="2.0.0")
121127
scalars_df, scalars_pandas_df = scalars_dfs
122128
bf_series: bigframes.series.Series = scalars_df[col_name]
123129
bf_result = bf_series.dt.month.to_pandas()
@@ -133,8 +139,9 @@ def test_dt_month(scalars_dfs, col_name):
133139
("col_name",),
134140
DATE_COLUMNS,
135141
)
136-
@skip_legacy_pandas
137142
def test_dt_quarter(scalars_dfs, col_name):
143+
# TODO: supply a reason why this isn't compatible with pandas 1.x
144+
pytest.importorskip("pandas", minversion="2.0.0")
138145
scalars_df, scalars_pandas_df = scalars_dfs
139146
bf_series: bigframes.series.Series = scalars_df[col_name]
140147
bf_result = bf_series.dt.quarter.to_pandas()
@@ -150,8 +157,9 @@ def test_dt_quarter(scalars_dfs, col_name):
150157
("col_name",),
151158
DATETIME_COL_NAMES,
152159
)
153-
@skip_legacy_pandas
154160
def test_dt_second(scalars_dfs, col_name):
161+
# TODO: supply a reason why this isn't compatible with pandas 1.x
162+
pytest.importorskip("pandas", minversion="2.0.0")
155163
scalars_df, scalars_pandas_df = scalars_dfs
156164
bf_series: bigframes.series.Series = scalars_df[col_name]
157165
bf_result = bf_series.dt.second.to_pandas()
@@ -167,8 +175,9 @@ def test_dt_second(scalars_dfs, col_name):
167175
("col_name",),
168176
DATETIME_COL_NAMES,
169177
)
170-
@skip_legacy_pandas
171178
def test_dt_time(scalars_dfs, col_name):
179+
# TODO: supply a reason why this isn't compatible with pandas 1.x
180+
pytest.importorskip("pandas", minversion="2.0.0")
172181
scalars_df, scalars_pandas_df = scalars_dfs
173182
bf_series: bigframes.series.Series = scalars_df[col_name]
174183
bf_result = bf_series.dt.time.to_pandas()
@@ -184,8 +193,9 @@ def test_dt_time(scalars_dfs, col_name):
184193
("col_name",),
185194
DATE_COLUMNS,
186195
)
187-
@skip_legacy_pandas
188196
def test_dt_year(scalars_dfs, col_name):
197+
# TODO: supply a reason why this isn't compatible with pandas 1.x
198+
pytest.importorskip("pandas", minversion="2.0.0")
189199
scalars_df, scalars_pandas_df = scalars_dfs
190200
bf_series: bigframes.series.Series = scalars_df[col_name]
191201
bf_result = bf_series.dt.year.to_pandas()
@@ -201,8 +211,9 @@ def test_dt_year(scalars_dfs, col_name):
201211
("col_name",),
202212
DATETIME_COL_NAMES,
203213
)
204-
@skip_legacy_pandas
205214
def test_dt_tz(scalars_dfs, col_name):
215+
# TODO: supply a reason why this isn't compatible with pandas 1.x
216+
pytest.importorskip("pandas", minversion="2.0.0")
206217
scalars_df, scalars_pandas_df = scalars_dfs
207218
bf_series: bigframes.series.Series = scalars_df[col_name]
208219
bf_result = bf_series.dt.tz
@@ -215,8 +226,9 @@ def test_dt_tz(scalars_dfs, col_name):
215226
("col_name",),
216227
DATETIME_COL_NAMES,
217228
)
218-
@skip_legacy_pandas
219229
def test_dt_unit(scalars_dfs, col_name):
230+
# TODO: supply a reason why this isn't compatible with pandas 1.x
231+
pytest.importorskip("pandas", minversion="2.0.0")
220232
scalars_df, scalars_pandas_df = scalars_dfs
221233
bf_series: bigframes.series.Series = scalars_df[col_name]
222234
bf_result = bf_series.dt.unit
@@ -234,8 +246,9 @@ def test_dt_unit(scalars_dfs, col_name):
234246
("datetime_col", "%H:%M"),
235247
],
236248
)
237-
@skip_legacy_pandas
238249
def test_dt_strftime(scalars_df_index, scalars_pandas_df_index, column, date_format):
250+
# TODO: supply a reason why this isn't compatible with pandas 1.x
251+
pytest.importorskip("pandas", minversion="2.0.0")
239252
bf_result = scalars_df_index[column].dt.strftime(date_format).to_pandas()
240253
pd_result = scalars_pandas_df_index[column].dt.strftime(date_format)
241254
pd.testing.assert_series_equal(bf_result, pd_result, check_dtype=False)
@@ -276,8 +289,9 @@ def test_dt_strftime_time():
276289
("col_name",),
277290
DATETIME_COL_NAMES,
278291
)
279-
@skip_legacy_pandas
280292
def test_dt_normalize(scalars_dfs, col_name):
293+
# TODO: supply a reason why this isn't compatible with pandas 1.x
294+
pytest.importorskip("pandas", minversion="2.0.0")
281295
scalars_df, scalars_pandas_df = scalars_dfs
282296
bf_result = scalars_df[col_name].dt.normalize().to_pandas()
283297
pd_result = scalars_pandas_df[col_name].dt.normalize()
@@ -297,8 +311,9 @@ def test_dt_normalize(scalars_dfs, col_name):
297311
("datetime_col", "us"),
298312
],
299313
)
300-
@skip_legacy_pandas
301314
def test_dt_floor(scalars_dfs, col_name, freq):
315+
# TODO: supply a reason why this isn't compatible with pandas 1.x
316+
pytest.importorskip("pandas", minversion="2.0.0")
302317
scalars_df, scalars_pandas_df = scalars_dfs
303318
bf_result = scalars_df[col_name].dt.floor(freq).to_pandas()
304319
pd_result = scalars_pandas_df[col_name].dt.floor(freq)

0 commit comments

Comments
 (0)