Skip to content

chore: refactor IsNullOp and NotNullOp logic to make scalar ops generation easier #1822

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 9 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions bigframes/core/array_value.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,10 @@
from bigframes.core.window_spec import WindowSpec
import bigframes.dtypes
import bigframes.exceptions as bfe
import bigframes.operations as ops
import bigframes.operations.aggregations as agg_ops

if typing.TYPE_CHECKING:
# Avoid circular imports.
import bigframes.operations.aggregations as agg_ops
from bigframes.session import Session

ORDER_ID_COLUMN = "bigframes_ordering_id"
Expand Down Expand Up @@ -185,6 +185,8 @@ def get_column_type(self, key: str) -> bigframes.dtypes.Dtype:

def row_count(self) -> ArrayValue:
"""Get number of rows in ArrayValue as a single-entry ArrayValue."""
import bigframes.operations.aggregations as agg_ops # Avoid circular imports.

return ArrayValue(
nodes.AggregateNode(
child=self.node,
Expand All @@ -200,6 +202,8 @@ def row_count(self) -> ArrayValue:
# Operations
def filter_by_id(self, predicate_id: str, keep_null: bool = False) -> ArrayValue:
"""Filter the table on a given expression, the predicate must be a boolean series aligned with the table expression."""
import bigframes.operations as ops # Avoid circular imports.

predicate: ex.Expression = ex.deref(predicate_id)
if keep_null:
predicate = ops.fillna_op.as_expr(predicate, ex.const(True))
Expand Down
11 changes: 9 additions & 2 deletions bigframes/core/compile/compiled.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
import bigframes.core.compile.googlesql
import bigframes.core.compile.ibis_types
import bigframes.core.compile.scalar_op_compiler as op_compilers
import bigframes.core.compile.scalar_op_compiler as scalar_op_compiler
import bigframes.core.expression as ex
from bigframes.core.ordering import OrderingExpression
import bigframes.core.sql
Expand All @@ -45,6 +44,12 @@
op_compiler = op_compilers.scalar_op_compiler


# This must be the last import. Currently depending on side-effects.
# TODO(tswast): Refactor all ops to register in the same file as where they are
# defined so we don't need this.
import bigframes.core.compile.scalar_op_registry # noqa: F401,E402


# Ibis Implementations
class UnorderedIR:
def __init__(
Expand Down Expand Up @@ -679,13 +684,15 @@ def _join_condition(


def _as_groupable(value: ibis_types.Value):
from bigframes.core.compile import scalar_op_registry

# Some types need to be converted to another type to enable groupby
if value.type().is_float64():
return value.cast(ibis_dtypes.str)
elif value.type().is_geospatial():
return typing.cast(ibis_types.GeoSpatialColumn, value).as_binary()
elif value.type().is_json():
return scalar_op_compiler.to_json_string(value)
return scalar_op_registry.to_json_string(value)
else:
return value

Expand Down
5 changes: 3 additions & 2 deletions bigframes/core/compile/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
import bigframes.core.compile.concat as concat_impl
import bigframes.core.compile.configs as configs
import bigframes.core.compile.explode
import bigframes.core.compile.scalar_op_compiler as compile_scalar
import bigframes.core.nodes as nodes
import bigframes.core.ordering as bf_ordering
import bigframes.core.rewrite as rewrites
Expand Down Expand Up @@ -178,6 +177,8 @@ def compile_readlocal(node: nodes.ReadLocalNode, *args):

@_compile_node.register
def compile_readtable(node: nodes.ReadTableNode, *args):
from bigframes.core.compile import scalar_op_registry

ibis_table = _table_to_ibis(
node.source, scan_cols=[col.source_id for col in node.scan_list.items]
)
Expand All @@ -188,7 +189,7 @@ def compile_readtable(node: nodes.ReadTableNode, *args):
scan_item.dtype == dtypes.JSON_DTYPE
and ibis_table[scan_item.source_id].type() == ibis_dtypes.string
):
json_column = compile_scalar.parse_json(
json_column = scalar_op_registry.parse_json(
ibis_table[scan_item.source_id]
).name(scan_item.source_id)
ibis_table = ibis_table.mutate(json_column)
Expand Down
Loading