Skip to content

Commit db43fde

Browse files
authored
DOCS: Numpydocs1 (#5578)
* baseline * added some noqa. * api contents ordering to aplhabetical * remove duplicate note * updated string to str for rendering in docs * ensured spaced around colon for listed parameters.
1 parent 40cb3d1 commit db43fde

File tree

10 files changed

+634
-573
lines changed

10 files changed

+634
-573
lines changed

docs/src/conf.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
#
1616
# All configuration values have a default; values that are commented out
1717
# serve to show the default.
18-
1918
# ----------------------------------------------------------------------------
2019

2120
import datetime
@@ -195,7 +194,7 @@ def _dotv(version):
195194
todo_include_todos = True
196195

197196
# api generation configuration
198-
autodoc_member_order = "groupwise"
197+
autodoc_member_order = "alphabetical"
199198
autodoc_default_flags = ["show-inheritance"]
200199

201200
# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_typehints

lib/iris/config.py

Lines changed: 26 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
The [optional] name of the logger to notify when first imported.
2828
2929
----------
30+
3031
"""
3132

3233
import configparser
@@ -42,41 +43,37 @@ def get_logger(
4243
name, datefmt=None, fmt=None, level=None, propagate=None, handler=True
4344
):
4445
"""
46+
Create a custom class for logging.
47+
4548
Create a :class:`logging.Logger` with a :class:`logging.StreamHandler`
4649
and custom :class:`logging.Formatter`.
4750
48-
Args:
49-
50-
* name:
51+
Parameters
52+
----------
53+
name
5154
The name of the logger. Typically this is the module filename that
5255
owns the logger.
53-
54-
Kwargs:
55-
56-
* datefmt:
56+
datefmt: optional
5757
The date format string of the :class:`logging.Formatter`.
5858
Defaults to ``%d-%m-%Y %H:%M:%S``.
59-
60-
* fmt:
59+
fmt: optional
6160
The additional format string of the :class:`logging.Formatter`.
6261
This is appended to the default format string
6362
``%(asctime)s %(name)s %(levelname)s - %(message)s``.
64-
65-
* level:
63+
level: optional
6664
The threshold level of the logger. Defaults to ``INFO``.
67-
68-
* propagate:
65+
propagate: optional
6966
Sets the ``propagate`` attribute of the :class:`logging.Logger`,
7067
which determines whether events logged to this logger will be
7168
passed to the handlers of higher level loggers. Defaults to
7269
``False``.
73-
74-
* handler:
70+
handler: optional
7571
Create and attach a :class:`logging.StreamHandler` to the
7672
logger. Defaults to ``True``.
7773
78-
Returns:
79-
A :class:`logging.Logger`.
74+
Returns
75+
-------
76+
:class:`logging.Logger`.
8077
8178
"""
8279
if level is None:
@@ -118,6 +115,8 @@ def get_logger(
118115
# Returns simple string options
119116
def get_option(section, option, default=None):
120117
"""
118+
Return the option value for the given section.
119+
121120
Returns the option value for the given section, or the default value
122121
if the section/option is not present.
123122
@@ -131,6 +130,8 @@ def get_option(section, option, default=None):
131130
# Returns directory path options
132131
def get_dir_option(section, option, default=None):
133132
"""
133+
Return the directory path from the given option and section.
134+
134135
Returns the directory path from the given option and section, or
135136
returns the given default value if the section/option is not present
136137
or does not represent a valid directory.
@@ -196,20 +197,19 @@ def __init__(self, conventions_override=None):
196197
"""
197198
Set up NetCDF processing options for Iris.
198199
199-
Currently accepted kwargs:
200-
201-
* conventions_override (bool):
200+
Parameters
201+
----------
202+
conventions_override : bool, optional
202203
Define whether the CF Conventions version (e.g. `CF-1.6`) set when
203204
saving a cube to a NetCDF file should be defined by
204-
Iris (the default) or the cube being saved.
205-
206-
If `False` (the default), specifies that Iris should set the
205+
Iris (the default) or the cube being saved. If `False`
206+
(the default), specifies that Iris should set the
207207
CF Conventions version when saving cubes as NetCDF files.
208208
If `True`, specifies that the cubes being saved to NetCDF should
209209
set the CF Conventions version for the saved NetCDF files.
210210
211-
Example usages:
212-
211+
Examples
212+
--------
213213
* Specify, for the lifetime of the session, that we want all cubes
214214
written to NetCDF to define their own CF Conventions versions::
215215
@@ -276,6 +276,7 @@ def _defaults_dict(self):
276276
def context(self, **kwargs):
277277
"""
278278
Allow temporary modification of the options via a context manager.
279+
279280
Accepted kwargs are the same as can be supplied to the Option.
280281
281282
"""

lib/iris/fileformats/netcdf/__init__.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,7 @@
33
# This file is part of Iris and is released under the BSD license.
44
# See LICENSE in the root of the repository for full licensing details.
55
"""
6-
Module to support the loading and saving of NetCDF files, also using the CF conventions
7-
for metadata interpretation.
6+
Support loading and saving NetCDF files using CF conventions for metadata interpretation.
87
98
See : `NetCDF User's Guide <https://docs.unidata.ucar.edu/nug/current/>`_
109
and `netCDF4 python module <https://github.com/Unidata/netcdf4-python>`_.

lib/iris/fileformats/netcdf/_dask_locks.py

Lines changed: 45 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -5,45 +5,49 @@
55
"""
66
Module containing code to create locks enabling dask workers to co-operate.
77
8-
This matter is complicated by needing different solutions for different dask scheduler
9-
types, i.e. local 'threads' scheduler, local 'processes' or distributed.
8+
This matter is complicated by needing different solutions for different dask
9+
scheduler types, i.e. local 'threads' scheduler, local 'processes' or
10+
distributed.
1011
11-
In any case, an "iris.fileformats.netcdf.saver.Saver" object contains a netCDF4.Dataset
12-
targeting an output file, and creates a Saver.file_write_lock object to serialise
13-
write-accesses to the file from dask tasks : All dask-task file writes go via a
14-
"iris.fileformats.netcdf.saver.NetCDFWriteProxy" object, which also contains a link
15-
to the Saver.file_write_lock, and uses it to prevent workers from fouling each other.
12+
In any case, an "iris.fileformats.netcdf.saver.Saver" object contains a
13+
netCDF4.Dataset targeting an output file, and creates a Saver.file_write_lock
14+
object to serialise write-accesses to the file from dask tasks : All dask-task
15+
file writes go via a "iris.fileformats.netcdf.saver.NetCDFWriteProxy" object,
16+
which also contains a link to the Saver.file_write_lock, and uses it to prevent
17+
workers from fouling each other.
1618
1719
For each chunk written, the NetCDFWriteProxy acquires the common per-file lock;
18-
opens a Dataset on the file; performs a write to the relevant variable; closes the
19-
Dataset and then releases the lock. This process is obviously very similar to what the
20-
NetCDFDataProxy does for reading lazy chunks.
20+
opens a Dataset on the file; performs a write to the relevant variable; closes
21+
the Dataset and then releases the lock. This process is obviously very similar
22+
to what the NetCDFDataProxy does for reading lazy chunks.
2123
22-
For a threaded scheduler, the Saver.lock is a simple threading.Lock(). The workers
23-
(threads) execute tasks which contain a NetCDFWriteProxy, as above. All of those
24-
contain the common lock, and this is simply **the same object** for all workers, since
25-
they share an address space.
24+
For a threaded scheduler, the Saver.lock is a simple threading.Lock(). The
25+
workers (threads) execute tasks which contain a NetCDFWriteProxy, as above.
26+
All of those contain the common lock, and this is simply **the same object**
27+
for all workers, since they share an address space.
2628
2729
For a distributed scheduler, the Saver.lock is a `distributed.Lock()` which is
2830
identified with the output filepath. This is distributed to the workers by
29-
serialising the task function arguments, which will include the NetCDFWriteProxy.
30-
A worker behaves like a process, though it may execute on a remote machine. When a
31-
distributed.Lock is deserialised to reconstruct the worker task, this creates an object
32-
that communicates with the scheduler. These objects behave as a single common lock,
33-
as they all have the same string 'identity', so the scheduler implements inter-process
34-
communication so that they can mutually exclude each other.
31+
serialising the task function arguments, which will include the
32+
NetCDFWriteProxy. A worker behaves like a process, though it may execute on a
33+
remote machine. When a distributed.Lock is deserialised to reconstruct the
34+
worker task, this creates an object that communicates with the scheduler.
35+
These objects behave as a single common lock, as they all have the same string
36+
'identity', so the scheduler implements inter-process communication so that
37+
they can mutually exclude each other.
3538
3639
It is also *conceivable* that multiple processes could write to the same file in
37-
parallel, if the operating system supports it. However, this also requires that the
38-
libnetcdf C library is built with parallel access option, which is not common.
39-
With the "ordinary" libnetcdf build, a process which attempts to open for writing a file
40-
which is _already_ open for writing simply raises an access error.
41-
In any case, Iris netcdf saver will not support this mode of operation, at present.
40+
parallel, if the operating system supports it. However, this also requires
41+
that the libnetcdf C library is built with parallel access option, which is
42+
not common. With the "ordinary" libnetcdf build, a process which attempts to
43+
open for writing a file which is _already_ open for writing simply raises an
44+
access error. In any case, Iris netcdf saver will not support this mode of
45+
operation, at present.
4246
4347
We don't currently support a local "processes" type scheduler. If we did, the
44-
behaviour should be very similar to a distributed scheduler. It would need to use some
45-
other serialisable shared-lock solution in place of 'distributed.Lock', which requires
46-
a distributed scheduler to function.
48+
behaviour should be very similar to a distributed scheduler. It would need to
49+
use some other serialisable shared-lock solution in place of
50+
'distributed.Lock', which requires a distributed scheduler to function.
4751
4852
"""
4953
import threading
@@ -55,7 +59,7 @@
5559

5660

5761
# A dedicated error class, allowing filtering and testing of errors raised here.
58-
class DaskSchedulerTypeError(ValueError):
62+
class DaskSchedulerTypeError(ValueError): # noqa: D101
5963
pass
6064

6165

@@ -82,11 +86,13 @@ def get_dask_array_scheduler_type():
8286
8387
Returns one of 'distributed', 'threads' or 'processes'.
8488
The return value is a valid argument for dask.config.set(scheduler=<type>).
85-
This cannot distinguish between distributed local and remote clusters -- both of
86-
those simply return 'distributed'.
89+
This cannot distinguish between distributed local and remote clusters --
90+
both of those simply return 'distributed'.
8791
88-
NOTE: this takes account of how dask is *currently* configured. It will be wrong
89-
if the config changes before the compute actually occurs.
92+
Notes
93+
-----
94+
This takes account of how dask is *currently* configured. It will
95+
be wrong if the config changes before the compute actually occurs.
9096
9197
"""
9298
if dask_scheduler_is_distributed():
@@ -114,8 +120,12 @@ def get_worker_lock(identity: str):
114120
"""
115121
Return a mutex Lock which can be shared by multiple Dask workers.
116122
117-
The type of Lock generated depends on the dask scheduler type, which must therefore
118-
be set up before this is called.
123+
The type of Lock generated depends on the dask scheduler type, which must
124+
therefore be set up before this is called.
125+
126+
Parameters
127+
----------
128+
identity : str
119129
120130
"""
121131
scheduler_type = get_dask_array_scheduler_type()

0 commit comments

Comments
 (0)