Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[MINOR][PYTHON][DOCS] Remove duplicated versionchanged per versionadded #42602

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions python/docs/source/reference/pyspark.sql/functions.rst
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@ Functions
=========
.. currentmodule:: pyspark.sql.functions

A collections of builtin functions available for DataFrame operations.
From Apache Spark 3.5.0, all functions support Spark Connect.

Normal Functions
----------------
.. autosummary::
Expand Down
18 changes: 2 additions & 16 deletions python/pyspark/sql/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,7 @@ def __init__(self, jconf: JavaObject) -> None:

@since(2.0)
def set(self, key: str, value: Union[str, int, bool]) -> None:
"""Sets the given Spark runtime configuration property.

.. versionchanged:: 3.4.0
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

.. versionchanged:: 3.4.0 is mentioned at the class level above.

Supports Spark Connect.
"""
"""Sets the given Spark runtime configuration property."""
self._jconf.set(key, value)

@since(2.0)
Expand All @@ -52,9 +48,6 @@ def get(
) -> Optional[str]:
"""Returns the value of Spark runtime configuration property for the given key,
assuming it is set.

.. versionchanged:: 3.4.0
Supports Spark Connect.
"""
self._checkType(key, "key")
if default is _NoValue:
Expand All @@ -66,11 +59,7 @@ def get(

@since(2.0)
def unset(self, key: str) -> None:
"""Resets the configuration property for the given key.

.. versionchanged:: 3.4.0
Supports Spark Connect.
"""
"""Resets the configuration property for the given key."""
yaooqinn marked this conversation as resolved.
Show resolved Hide resolved
self._jconf.unset(key)

def _checkType(self, obj: Any, identifier: str) -> None:
Expand All @@ -84,9 +73,6 @@ def _checkType(self, obj: Any, identifier: str) -> None:
def isModifiable(self, key: str) -> bool:
"""Indicates whether the configuration property with the given key
is modifiable in the current session.

.. versionchanged:: 3.4.0
Supports Spark Connect.
"""
return self._jconf.isModifiable(key)

Expand Down
33 changes: 18 additions & 15 deletions python/pyspark/sql/dataframe.py
Original file line number Diff line number Diff line change
Expand Up @@ -2266,9 +2266,6 @@ def to(self, schema: StructType) -> "DataFrame":

.. versionadded:: 3.4.0

.. versionchanged:: 3.4.0
Supports Spark Connect.

Parameters
----------
schema : :class:`StructType`
Expand Down Expand Up @@ -2297,6 +2294,8 @@ def to(self, schema: StructType) -> "DataFrame":
* Fail if the nullability is not compatible. For example, the column and/or inner field
is nullable but the specified schema requires them to be not nullable.

Supports Spark Connect.

Examples
--------
>>> from pyspark.sql.types import StructField, StringType
Expand Down Expand Up @@ -3575,9 +3574,6 @@ def unpivot(

.. versionadded:: 3.4.0

.. versionchanged:: 3.4.0
Supports Spark Connect.

Parameters
----------
ids : str, Column, tuple, list
Expand All @@ -3597,6 +3593,10 @@ def unpivot(
:class:`DataFrame`
Unpivoted DataFrame.

Notes
-----
Supports Spark Connect.

Examples
--------
>>> df = spark.createDataFrame(
Expand Down Expand Up @@ -3661,9 +3661,6 @@ def melt(

.. versionadded:: 3.4.0

.. versionchanged:: 3.4.0
Supports Spark Connect.

Parameters
----------
ids : str, Column, tuple, list, optional
Expand All @@ -3686,6 +3683,10 @@ def melt(
See Also
--------
DataFrame.unpivot

Notes
-----
Supports Spark Connect.
"""
return self.unpivot(ids, values, variableColumnName, valueColumnName)

Expand Down Expand Up @@ -4263,9 +4264,6 @@ def dropDuplicatesWithinWatermark(self, subset: Optional[List[str]] = None) -> "

.. versionadded:: 3.5.0

.. versionchanged:: 3.5.0
Supports Spark Connect.

Parameters
----------
subset : List of column names, optional
Expand All @@ -4276,6 +4274,10 @@ def dropDuplicatesWithinWatermark(self, subset: Optional[List[str]] = None) -> "
:class:`DataFrame`
DataFrame without duplicates.

Notes
-----
Supports Spark Connect.

Examples
--------
>>> from pyspark.sql import Row
Expand Down Expand Up @@ -5241,9 +5243,6 @@ def withColumnsRenamed(self, colsMap: Dict[str, str]) -> "DataFrame":
.. versionadded:: 3.4.0
Added support for multiple columns renaming

.. versionchanged:: 3.4.0
Supports Spark Connect.

Parameters
----------
colsMap : dict
Expand All @@ -5259,6 +5258,10 @@ def withColumnsRenamed(self, colsMap: Dict[str, str]) -> "DataFrame":
--------
:meth:`withColumnRenamed`

Notes
-----
Support Spark Connect

Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
Expand Down
Loading