Skip to content

chore: move unit test utilities to bigframes.testing module, remove unnecessary skip_legacy_pandas #1623

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 17, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions bigframes/testing/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""[Experimental] Utilities for testing BigQuery DataFrames.

These modules are provided for testing the BigQuery DataFrames package. The
interface is not considered stable.
"""
11 changes: 10 additions & 1 deletion tests/unit/resources.py → bigframes/testing/mocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,17 @@


def create_bigquery_session(
*,
bqclient: Optional[mock.Mock] = None,
session_id: str = "abcxyz",
table_schema: Sequence[google.cloud.bigquery.SchemaField] = TEST_SCHEMA,
anonymous_dataset: Optional[google.cloud.bigquery.DatasetReference] = None,
location: str = "test-region",
) -> bigframes.Session:
"""[Experimental] Create a mock BigQuery DataFrames session that avoids making Google Cloud API calls.

Intended for unit test environments that don't have access to the network.
"""
credentials = mock.create_autospec(
google.auth.credentials.Credentials, instance=True
)
Expand Down Expand Up @@ -108,8 +113,12 @@ def query_and_wait_mock(query, *args, **kwargs):


def create_dataframe(
monkeypatch: pytest.MonkeyPatch, session: Optional[bigframes.Session] = None
monkeypatch: pytest.MonkeyPatch, *, session: Optional[bigframes.Session] = None
) -> bigframes.dataframe.DataFrame:
"""[Experimental] Create a mock DataFrame that avoids making Google Cloud API calls.

Intended for unit test environments that don't have access to the network.
"""
if session is None:
session = create_bigquery_session()

Expand Down
47 changes: 31 additions & 16 deletions tests/system/small/operations/test_datetimes.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import pytest

import bigframes.series
from tests.system.utils import assert_series_equal, skip_legacy_pandas
from tests.system.utils import assert_series_equal

DATETIME_COL_NAMES = [("datetime_col",), ("timestamp_col",)]
DATE_COLUMNS = [
Expand All @@ -34,8 +34,9 @@
("col_name",),
DATE_COLUMNS,
)
@skip_legacy_pandas
def test_dt_day(scalars_dfs, col_name):
# TODO: supply a reason why this isn't compatible with pandas 1.x
pytest.importorskip("pandas", minversion="2.0.0")
scalars_df, scalars_pandas_df = scalars_dfs
bf_series: bigframes.series.Series = scalars_df[col_name]
bf_result = bf_series.dt.day.to_pandas()
Expand All @@ -51,8 +52,9 @@ def test_dt_day(scalars_dfs, col_name):
("col_name",),
DATETIME_COL_NAMES,
)
@skip_legacy_pandas
def test_dt_date(scalars_dfs, col_name):
# TODO: supply a reason why this isn't compatible with pandas 1.x
pytest.importorskip("pandas", minversion="2.0.0")
scalars_df, scalars_pandas_df = scalars_dfs
bf_series: bigframes.series.Series = scalars_df[col_name]
bf_result = bf_series.dt.date.to_pandas()
Expand All @@ -68,8 +70,9 @@ def test_dt_date(scalars_dfs, col_name):
("col_name",),
DATE_COLUMNS,
)
@skip_legacy_pandas
def test_dt_dayofweek(scalars_dfs, col_name):
# TODO: supply a reason why this isn't compatible with pandas 1.x
pytest.importorskip("pandas", minversion="2.0.0")
scalars_df, scalars_pandas_df = scalars_dfs
bf_series: bigframes.series.Series = scalars_df[col_name]
bf_result = bf_series.dt.dayofweek.to_pandas()
Expand All @@ -82,8 +85,9 @@ def test_dt_dayofweek(scalars_dfs, col_name):
("col_name",),
DATETIME_COL_NAMES,
)
@skip_legacy_pandas
def test_dt_hour(scalars_dfs, col_name):
# TODO: supply a reason why this isn't compatible with pandas 1.x
pytest.importorskip("pandas", minversion="2.0.0")
scalars_df, scalars_pandas_df = scalars_dfs
bf_series: bigframes.series.Series = scalars_df[col_name]
bf_result = bf_series.dt.hour.to_pandas()
Expand All @@ -99,8 +103,9 @@ def test_dt_hour(scalars_dfs, col_name):
("col_name",),
DATETIME_COL_NAMES,
)
@skip_legacy_pandas
def test_dt_minute(scalars_dfs, col_name):
# TODO: supply a reason why this isn't compatible with pandas 1.x
pytest.importorskip("pandas", minversion="2.0.0")
scalars_df, scalars_pandas_df = scalars_dfs
bf_series: bigframes.series.Series = scalars_df[col_name]
bf_result = bf_series.dt.minute.to_pandas()
Expand All @@ -116,8 +121,9 @@ def test_dt_minute(scalars_dfs, col_name):
("col_name",),
DATE_COLUMNS,
)
@skip_legacy_pandas
def test_dt_month(scalars_dfs, col_name):
# TODO: supply a reason why this isn't compatible with pandas 1.x
pytest.importorskip("pandas", minversion="2.0.0")
scalars_df, scalars_pandas_df = scalars_dfs
bf_series: bigframes.series.Series = scalars_df[col_name]
bf_result = bf_series.dt.month.to_pandas()
Expand All @@ -133,8 +139,9 @@ def test_dt_month(scalars_dfs, col_name):
("col_name",),
DATE_COLUMNS,
)
@skip_legacy_pandas
def test_dt_quarter(scalars_dfs, col_name):
# TODO: supply a reason why this isn't compatible with pandas 1.x
pytest.importorskip("pandas", minversion="2.0.0")
scalars_df, scalars_pandas_df = scalars_dfs
bf_series: bigframes.series.Series = scalars_df[col_name]
bf_result = bf_series.dt.quarter.to_pandas()
Expand All @@ -150,8 +157,9 @@ def test_dt_quarter(scalars_dfs, col_name):
("col_name",),
DATETIME_COL_NAMES,
)
@skip_legacy_pandas
def test_dt_second(scalars_dfs, col_name):
# TODO: supply a reason why this isn't compatible with pandas 1.x
pytest.importorskip("pandas", minversion="2.0.0")
scalars_df, scalars_pandas_df = scalars_dfs
bf_series: bigframes.series.Series = scalars_df[col_name]
bf_result = bf_series.dt.second.to_pandas()
Expand All @@ -167,8 +175,9 @@ def test_dt_second(scalars_dfs, col_name):
("col_name",),
DATETIME_COL_NAMES,
)
@skip_legacy_pandas
def test_dt_time(scalars_dfs, col_name):
# TODO: supply a reason why this isn't compatible with pandas 1.x
pytest.importorskip("pandas", minversion="2.0.0")
scalars_df, scalars_pandas_df = scalars_dfs
bf_series: bigframes.series.Series = scalars_df[col_name]
bf_result = bf_series.dt.time.to_pandas()
Expand All @@ -184,8 +193,9 @@ def test_dt_time(scalars_dfs, col_name):
("col_name",),
DATE_COLUMNS,
)
@skip_legacy_pandas
def test_dt_year(scalars_dfs, col_name):
# TODO: supply a reason why this isn't compatible with pandas 1.x
pytest.importorskip("pandas", minversion="2.0.0")
scalars_df, scalars_pandas_df = scalars_dfs
bf_series: bigframes.series.Series = scalars_df[col_name]
bf_result = bf_series.dt.year.to_pandas()
Expand All @@ -201,8 +211,9 @@ def test_dt_year(scalars_dfs, col_name):
("col_name",),
DATETIME_COL_NAMES,
)
@skip_legacy_pandas
def test_dt_tz(scalars_dfs, col_name):
# TODO: supply a reason why this isn't compatible with pandas 1.x
pytest.importorskip("pandas", minversion="2.0.0")
scalars_df, scalars_pandas_df = scalars_dfs
bf_series: bigframes.series.Series = scalars_df[col_name]
bf_result = bf_series.dt.tz
Expand All @@ -215,8 +226,9 @@ def test_dt_tz(scalars_dfs, col_name):
("col_name",),
DATETIME_COL_NAMES,
)
@skip_legacy_pandas
def test_dt_unit(scalars_dfs, col_name):
# TODO: supply a reason why this isn't compatible with pandas 1.x
pytest.importorskip("pandas", minversion="2.0.0")
scalars_df, scalars_pandas_df = scalars_dfs
bf_series: bigframes.series.Series = scalars_df[col_name]
bf_result = bf_series.dt.unit
Expand All @@ -234,8 +246,9 @@ def test_dt_unit(scalars_dfs, col_name):
("datetime_col", "%H:%M"),
],
)
@skip_legacy_pandas
def test_dt_strftime(scalars_df_index, scalars_pandas_df_index, column, date_format):
# TODO: supply a reason why this isn't compatible with pandas 1.x
pytest.importorskip("pandas", minversion="2.0.0")
bf_result = scalars_df_index[column].dt.strftime(date_format).to_pandas()
pd_result = scalars_pandas_df_index[column].dt.strftime(date_format)
pd.testing.assert_series_equal(bf_result, pd_result, check_dtype=False)
Expand Down Expand Up @@ -276,8 +289,9 @@ def test_dt_strftime_time():
("col_name",),
DATETIME_COL_NAMES,
)
@skip_legacy_pandas
def test_dt_normalize(scalars_dfs, col_name):
# TODO: supply a reason why this isn't compatible with pandas 1.x
pytest.importorskip("pandas", minversion="2.0.0")
scalars_df, scalars_pandas_df = scalars_dfs
bf_result = scalars_df[col_name].dt.normalize().to_pandas()
pd_result = scalars_pandas_df[col_name].dt.normalize()
Expand All @@ -297,8 +311,9 @@ def test_dt_normalize(scalars_dfs, col_name):
("datetime_col", "us"),
],
)
@skip_legacy_pandas
def test_dt_floor(scalars_dfs, col_name, freq):
# TODO: supply a reason why this isn't compatible with pandas 1.x
pytest.importorskip("pandas", minversion="2.0.0")
scalars_df, scalars_pandas_df = scalars_dfs
bf_result = scalars_df[col_name].dt.floor(freq).to_pandas()
pd_result = scalars_pandas_df[col_name].dt.floor(freq)
Expand Down
Loading