Skip to content

Remove more unused config options #948

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Jul 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 1 addition & 44 deletions doc/library/config.rst
Original file line number Diff line number Diff line change
Expand Up @@ -103,14 +103,6 @@ import ``pytensor`` and print the config variable, as in:

String value: either ``'cpu'``

.. attribute:: force_device

Bool value: either ``True`` or ``False``

Default: ``False``

This flag's value cannot be modified during the program execution.

.. attribute:: print_active_device

Bool value: either ``True`` or ``False``
Expand Down Expand Up @@ -139,16 +131,6 @@ import ``pytensor`` and print the config variable, as in:
equal to ``float64`` is created.
This can be used to help find upcasts to ``float64`` in user code.

.. attribute:: deterministic

String value: either ``'default'``, ``'more'``

Default: ``'default'``

If ``more``, sometimes PyTensor will select :class:`Op` implementations that
are more "deterministic", but slower. See the ``dnn.conv.algo*``
flags for more cases.

.. attribute:: allow_gc

Bool value: either ``True`` or ``False``
Expand Down Expand Up @@ -373,7 +355,7 @@ import ``pytensor`` and print the config variable, as in:

When ``True``, ignore the first call to an PyTensor function while profiling.

.. attribute:: config.lib__amblibm
.. attribute:: config.lib__amdlibm

Bool value: either ``True`` or ``False``

Expand Down Expand Up @@ -412,16 +394,6 @@ import ``pytensor`` and print the config variable, as in:
ignore it (i.e. ``'ignore'``).
We suggest never using ``'ignore'`` except during testing.

.. attribute:: assert_no_cpu_op

String value: ``'ignore'`` or ``'warn'`` or ``'raise'`` or ``'pdb'``

Default: ``'ignore'``

If there is a CPU :class:`Op` in the computational graph, depending on its value,
this flag can either raise a warning, an exception or drop into the frame
with ``pdb``.

.. attribute:: on_shape_error

String value: ``'warn'`` or ``'raise'``
Expand Down Expand Up @@ -797,18 +769,3 @@ import ``pytensor`` and print the config variable, as in:
The verbosity level of the meta-rewriter: ``0`` for silent, ``1`` to only
warn when PyTensor cannot meta-rewrite an :class:`Op`, ``2`` for full output (e.g.
timings and the rewrites selected).


.. attribute:: config.metaopt__optimizer_excluding

Default: ``""``

A list of rewrite tags that we don't want included in the meta-rewriter.
Multiple tags are separate by ``':'``.

.. attribute:: config.metaopt__optimizer_including

Default: ``""``

A list of rewriter tags to be included during meta-rewriting.
Multiple tags are separate by ``':'``.
8 changes: 4 additions & 4 deletions pytensor/compile/profiling.py
Original file line number Diff line number Diff line change
Expand Up @@ -1566,26 +1566,26 @@ def exp_float32_op(op):
printed_tip = True

# tip 2
if not config.lib__amblibm and any(
if not config.lib__amdlibm and any(
amdlibm_speed_up(a.op) for (fgraph, a) in self.apply_time
):
print(
" - Try installing amdlibm and set the PyTensor flag "
"lib__amblibm=True. This speeds up only some Elemwise "
"lib__amdlibm=True. This speeds up only some Elemwise "
"operation.",
file=file,
)
printed_tip = True

# tip 3
if not config.lib__amblibm and any(
if not config.lib__amdlibm and any(
exp_float32_op(a.op) and a.inputs[0].dtype == "float32"
for (fgraph, a) in self.apply_time
):
print(
" - With the default gcc libm, exp in float32 is slower "
"than in float64! Try PyTensor flag floatX=float64, or "
"install amdlibm and set the pytensor flags lib__amblibm=True",
"install amdlibm and set the pytensor flags lib__amdlibm=True",
file=file,
)
printed_tip = True
Expand Down
116 changes: 10 additions & 106 deletions pytensor/configdefaults.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,14 +258,6 @@
# was expected, so it is currently not available.
# numpy,
),
)

config.add(
"deterministic",
"If `more`, sometimes we will select some implementation that "
"are more deterministic, but slower. Also see "
"the dnn.conv.algo* flags to cover more cases.",
EnumStr("default", ["more"]),
in_c_key=False,
)

Expand All @@ -276,13 +268,6 @@
in_c_key=False,
)

config.add(
"force_device",
"Raise an error if we can't use the specified device",
BoolParam(False, mutable=False),
in_c_key=False,
)

config.add(
"conv__assert_shape",
"If True, AbstractConv* ops will verify that user-provided"
Expand All @@ -299,14 +284,6 @@
in_c_key=False,
)

# This flag determines whether or not to raise error/warning message if
# there is a CPU Op in the computational graph.
config.add(
"assert_no_cpu_op",
"Raise an error/warning if there is a CPU op in the computational graph.",
EnumStr("ignore", ["warn", "raise", "pdb"], mutable=True),
in_c_key=False,
)
config.add(
"unpickle_function",
(
Expand Down Expand Up @@ -394,23 +371,11 @@

if rc == 0 and config.cxx != "":
# Keep the default linker the same as the one for the mode FAST_RUN
config.add(
"linker",
"Default linker used if the pytensor flags mode is Mode",
EnumStr(
"cvm", ["c|py", "py", "c", "c|py_nogc", "vm", "vm_nogc", "cvm_nogc"]
),
in_c_key=False,
)
linker_options = ["c|py", "py", "c", "c|py_nogc", "vm", "vm_nogc", "cvm_nogc"]
else:
# g++ is not present or the user disabled it,
# linker should default to python only.
config.add(
"linker",
"Default linker used if the pytensor flags mode is Mode",
EnumStr("vm", ["py", "vm_nogc"]),
in_c_key=False,
)
linker_options = ["py", "vm_nogc"]

Check warning on line 378 in pytensor/configdefaults.py

View check run for this annotation

Codecov / codecov/patch

pytensor/configdefaults.py#L378

Added line #L378 was not covered by tests
if type(config).cxx.is_default:
# If the user provided an empty value for cxx, do not warn.
_logger.warning(
Expand All @@ -420,6 +385,13 @@
"To remove this warning, set PyTensor flags cxx to an empty string."
)

config.add(
"linker",
"Default linker used if the pytensor flags mode is Mode",
EnumStr("cvm", linker_options),
in_c_key=False,
)

# Keep the default value the same as the one for the mode FAST_RUN
config.add(
"allow_gc",
Expand Down Expand Up @@ -570,7 +542,7 @@

# http://developer.amd.com/CPU/LIBRARIES/LIBM/Pages/default.aspx
config.add(
"lib__amblibm",
"lib__amdlibm",
"Use amd's amdlibm numerical library",
BoolParam(False),
# Added elsewhere in the c key only when needed.
Expand Down Expand Up @@ -609,10 +581,6 @@
)


def add_experimental_configvars():
return


def add_error_and_warning_configvars():
###
# To disable some warning about old bug that are fixed now.
Expand Down Expand Up @@ -1043,20 +1011,6 @@
in_c_key=False,
)

config.add(
"metaopt__optimizer_excluding",
("exclude optimizers with these tags. Separate tags with ':'."),
StrParam(""),
in_c_key=False,
)

config.add(
"metaopt__optimizer_including",
("include optimizers with these tags. Separate tags with ':'."),
StrParam(""),
in_c_key=False,
)


def add_vm_configvars():
config.add(
Expand Down Expand Up @@ -1295,55 +1249,6 @@
)


# Those are the options provided by PyTensor to choose algorithms at runtime.
SUPPORTED_DNN_CONV_ALGO_RUNTIME = (
"guess_once",
"guess_on_shape_change",
"time_once",
"time_on_shape_change",
)

# Those are the supported algorithm by PyTensor,
# The tests will reference those lists.
SUPPORTED_DNN_CONV_ALGO_FWD = (
"small",
"none",
"large",
"fft",
"fft_tiling",
"winograd",
"winograd_non_fused",
*SUPPORTED_DNN_CONV_ALGO_RUNTIME,
)

SUPPORTED_DNN_CONV_ALGO_BWD_DATA = (
"none",
"deterministic",
"fft",
"fft_tiling",
"winograd",
"winograd_non_fused",
*SUPPORTED_DNN_CONV_ALGO_RUNTIME,
)

SUPPORTED_DNN_CONV_ALGO_BWD_FILTER = (
"none",
"deterministic",
"fft",
"small",
"winograd_non_fused",
"fft_tiling",
*SUPPORTED_DNN_CONV_ALGO_RUNTIME,
)

SUPPORTED_DNN_CONV_PRECISION = (
"as_input_f32",
"as_input",
"float16",
"float32",
"float64",
)

# Eventually, the instance of `PyTensorConfigParser` should be created right here,
# where it is also populated with settings.
config = _create_default_config()
Expand All @@ -1353,7 +1258,6 @@
add_compile_configvars()
add_tensor_configvars()
add_traceback_configvars()
add_experimental_configvars()
add_error_and_warning_configvars()
add_testvalue_and_checking_configvars()
add_multiprocessing_configvars()
Expand Down
Loading
Loading