Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[MINOR][PYTHON][DOCS] Remove duplicated versionchanged per versionadded #42602

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 2 additions & 16 deletions python/pyspark/sql/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,7 @@ def __init__(self, jconf: JavaObject) -> None:

@since(2.0)
def set(self, key: str, value: Union[str, int, bool]) -> None:
"""Sets the given Spark runtime configuration property.

.. versionchanged:: 3.4.0
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

.. versionchanged:: 3.4.0 is mentioned at the class level above.

Supports Spark Connect.
"""
"""Sets the given Spark runtime configuration property."""
self._jconf.set(key, value)

@since(2.0)
Expand All @@ -52,9 +48,6 @@ def get(
) -> Optional[str]:
"""Returns the value of Spark runtime configuration property for the given key,
assuming it is set.

.. versionchanged:: 3.4.0
Supports Spark Connect.
"""
self._checkType(key, "key")
if default is _NoValue:
Expand All @@ -66,11 +59,7 @@ def get(

@since(2.0)
def unset(self, key: str) -> None:
"""Resets the configuration property for the given key.

.. versionchanged:: 3.4.0
Supports Spark Connect.
"""
"""Resets the configuration property for the given key."""
yaooqinn marked this conversation as resolved.
Show resolved Hide resolved
self._jconf.unset(key)

def _checkType(self, obj: Any, identifier: str) -> None:
Expand All @@ -84,9 +73,6 @@ def _checkType(self, obj: Any, identifier: str) -> None:
def isModifiable(self, key: str) -> bool:
"""Indicates whether the configuration property with the given key
is modifiable in the current session.

.. versionchanged:: 3.4.0
Supports Spark Connect.
"""
return self._jconf.isModifiable(key)

Expand Down
33 changes: 18 additions & 15 deletions python/pyspark/sql/dataframe.py
Original file line number Diff line number Diff line change
Expand Up @@ -2266,9 +2266,6 @@ def to(self, schema: StructType) -> "DataFrame":

.. versionadded:: 3.4.0

.. versionchanged:: 3.4.0
Supports Spark Connect.

Parameters
----------
schema : :class:`StructType`
Expand Down Expand Up @@ -2297,6 +2294,8 @@ def to(self, schema: StructType) -> "DataFrame":
* Fail if the nullability is not compatible. For example, the column and/or inner field
is nullable but the specified schema requires them to be not nullable.

Supports Spark Connect.

Examples
--------
>>> from pyspark.sql.types import StructField, StringType
Expand Down Expand Up @@ -3575,9 +3574,6 @@ def unpivot(

.. versionadded:: 3.4.0

.. versionchanged:: 3.4.0
Supports Spark Connect.

Parameters
----------
ids : str, Column, tuple, list
Expand All @@ -3597,6 +3593,10 @@ def unpivot(
:class:`DataFrame`
Unpivoted DataFrame.

Notes
-----
Supports Spark Connect.

Examples
--------
>>> df = spark.createDataFrame(
Expand Down Expand Up @@ -3661,9 +3661,6 @@ def melt(

.. versionadded:: 3.4.0

.. versionchanged:: 3.4.0
Supports Spark Connect.

Parameters
----------
ids : str, Column, tuple, list, optional
Expand All @@ -3686,6 +3683,10 @@ def melt(
See Also
--------
DataFrame.unpivot

Notes
-----
Supports Spark Connect.
"""
return self.unpivot(ids, values, variableColumnName, valueColumnName)

Expand Down Expand Up @@ -4263,9 +4264,6 @@ def dropDuplicatesWithinWatermark(self, subset: Optional[List[str]] = None) -> "

.. versionadded:: 3.5.0

.. versionchanged:: 3.5.0
Supports Spark Connect.

Parameters
----------
subset : List of column names, optional
Expand All @@ -4276,6 +4274,10 @@ def dropDuplicatesWithinWatermark(self, subset: Optional[List[str]] = None) -> "
:class:`DataFrame`
DataFrame without duplicates.

Notes
-----
Supports Spark Connect.

Examples
--------
>>> from pyspark.sql import Row
Expand Down Expand Up @@ -5241,9 +5243,6 @@ def withColumnsRenamed(self, colsMap: Dict[str, str]) -> "DataFrame":
.. versionadded:: 3.4.0
Added support for multiple columns renaming

.. versionchanged:: 3.4.0
Supports Spark Connect.

Parameters
----------
colsMap : dict
Expand All @@ -5259,6 +5258,10 @@ def withColumnsRenamed(self, colsMap: Dict[str, str]) -> "DataFrame":
--------
:meth:`withColumnRenamed`

Notes
-----
Support Spark Connect

Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
Expand Down
75 changes: 40 additions & 35 deletions python/pyspark/sql/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -613,9 +613,6 @@ def mode(col: "ColumnOrName") -> Column:

.. versionadded:: 3.4.0

.. versionchanged:: 3.4.0
Supports Spark Connect.

Parameters
----------
col : :class:`~pyspark.sql.Column` or str
Expand All @@ -626,6 +623,10 @@ def mode(col: "ColumnOrName") -> Column:
:class:`~pyspark.sql.Column`
the most frequent value in a group.

Notes
HyukjinKwon marked this conversation as resolved.
Show resolved Hide resolved
-----
Supports Spark Connect.

Examples
--------
>>> df = spark.createDataFrame([
Expand Down Expand Up @@ -930,9 +931,6 @@ def median(col: "ColumnOrName") -> Column:

.. versionadded:: 3.4.0

.. versionchanged:: 3.4.0
Supports Spark Connect.

Parameters
----------
col : :class:`~pyspark.sql.Column` or str
Expand All @@ -943,6 +941,10 @@ def median(col: "ColumnOrName") -> Column:
:class:`~pyspark.sql.Column`
the median of the values in a group.

Notes
-----
Supports Spark Connect.

Examples
--------
>>> df = spark.createDataFrame([
Expand Down Expand Up @@ -3372,9 +3374,6 @@ def pmod(dividend: Union["ColumnOrName", float], divisor: Union["ColumnOrName",

.. versionadded:: 3.4.0

.. versionchanged:: 3.4.0
Supports Spark Connect.

Parameters
----------
dividend : str, :class:`~pyspark.sql.Column` or float
Expand All @@ -3387,6 +3386,10 @@ def pmod(dividend: Union["ColumnOrName", float], divisor: Union["ColumnOrName",
:class:`~pyspark.sql.Column`
positive value of dividend mod divisor.

Notes
-----
Supports Spark Connect.

Examples
--------
>>> from pyspark.sql.functions import pmod
Expand Down Expand Up @@ -3674,9 +3677,6 @@ def approx_count_distinct(col: "ColumnOrName", rsd: Optional[float] = None) -> C

.. versionadded:: 2.1.0

.. versionchanged:: 3.4.0
Supports Spark Connect.

.. versionchanged:: 3.4.0
Supports Spark Connect.

Expand Down Expand Up @@ -7671,9 +7671,6 @@ def window_time(

.. versionadded:: 3.4.0

.. versionchanged:: 3.4.0
Supports Spark Connect.

Parameters
----------
windowColumn : :class:`~pyspark.sql.Column`
Expand All @@ -7684,6 +7681,10 @@ def window_time(
:class:`~pyspark.sql.Column`
the column for computed results.

Notes
-----
Supports Spark Connect.

Examples
--------
>>> import datetime
Expand Down Expand Up @@ -11181,9 +11182,6 @@ def get(col: "ColumnOrName", index: Union["ColumnOrName", int]) -> Column:

.. versionadded:: 3.4.0

.. versionchanged:: 3.4.0
Supports Spark Connect.

Parameters
----------
col : :class:`~pyspark.sql.Column` or str
Expand All @@ -11199,6 +11197,7 @@ def get(col: "ColumnOrName", index: Union["ColumnOrName", int]) -> Column:
Notes
-----
The position is not 1 based, but 0 based index.
Supports Spark Connect.

See Also
--------
Expand Down Expand Up @@ -11347,9 +11346,6 @@ def array_insert(arr: "ColumnOrName", pos: Union["ColumnOrName", int], value: An

.. versionadded:: 3.4.0

.. versionchanged:: 3.4.0
Supports Spark Connect.

Parameters
----------
arr : :class:`~pyspark.sql.Column` or str
Expand All @@ -11365,6 +11361,10 @@ def array_insert(arr: "ColumnOrName", pos: Union["ColumnOrName", int], value: An
:class:`~pyspark.sql.Column`
an array of values, including the new specified value

Notes
-----
Supports Spark Connect.

Examples
--------
>>> df = spark.createDataFrame(
Expand Down Expand Up @@ -11487,9 +11487,6 @@ def array_compact(col: "ColumnOrName") -> Column:

.. versionadded:: 3.4.0

.. versionchanged:: 3.4.0
Supports Spark Connect.

Parameters
----------
col : :class:`~pyspark.sql.Column` or str
Expand All @@ -11500,6 +11497,10 @@ def array_compact(col: "ColumnOrName") -> Column:
:class:`~pyspark.sql.Column`
an array by excluding the null values.

Notes
-----
Supports Spark Connect.

Examples
--------
>>> df = spark.createDataFrame([([1, None, 2, 3],), ([4, 5, None, 4],)], ['data'])
Expand All @@ -11517,9 +11518,6 @@ def array_append(col: "ColumnOrName", value: Any) -> Column:

.. versionadded:: 3.4.0

.. versionchanged:: 3.4.0
Supports Spark Connect.

Parameters
----------
col : :class:`~pyspark.sql.Column` or str
Expand All @@ -11532,6 +11530,10 @@ def array_append(col: "ColumnOrName", value: Any) -> Column:
:class:`~pyspark.sql.Column`
an array of values from first array along with the element.

Notes
-----
Supports Spark Connect.

Examples
--------
>>> from pyspark.sql import Row
Expand Down Expand Up @@ -11635,9 +11637,6 @@ def inline(col: "ColumnOrName") -> Column:

.. versionadded:: 3.4.0

.. versionchanged:: 3.4.0
Supports Spark Connect.

Parameters
----------
col : :class:`~pyspark.sql.Column` or str
Expand All @@ -11652,6 +11651,10 @@ def inline(col: "ColumnOrName") -> Column:
--------
:meth:`explode`

Notes
-----
Supports Spark Connect.

Examples
--------
>>> from pyspark.sql import Row
Expand Down Expand Up @@ -11776,9 +11779,6 @@ def inline_outer(col: "ColumnOrName") -> Column:

.. versionadded:: 3.4.0

.. versionchanged:: 3.4.0
Supports Spark Connect.

Parameters
----------
col : :class:`~pyspark.sql.Column` or str
Expand All @@ -11794,6 +11794,10 @@ def inline_outer(col: "ColumnOrName") -> Column:
:meth:`explode_outer`
:meth:`inline`

Notes
-----
Supports Spark Connect.

Examples
--------
>>> from pyspark.sql import Row
Expand Down Expand Up @@ -14473,8 +14477,9 @@ def unwrap_udt(col: "ColumnOrName") -> Column:

.. versionadded:: 3.4.0

.. versionchanged:: 3.4.0
Supports Spark Connect.
Notes
-----
Supports Spark Connect.
"""
return _invoke_function("unwrap_udt", _to_java_column(col))

Expand Down
7 changes: 4 additions & 3 deletions python/pyspark/sql/session.py
Original file line number Diff line number Diff line change
Expand Up @@ -851,12 +851,13 @@ def udtf(self) -> "UDTFRegistration":

.. versionadded:: 3.5.0

.. versionchanged:: 3.5.0
Supports Spark Connect.

Returns
-------
:class:`UDTFRegistration`

Notes
-----
Supports Spark Connect.
"""
from pyspark.sql.udtf import UDTFRegistration

Expand Down