Skip to content

Commit

Permalink
Merge branch 'master' into mobt_161_wxcode_tree_check
Browse files Browse the repository at this point in the history
* master:
  MOBT-199: mosg__model_run in metadata interpreter (metoppv#1644)
  Minor correction of docstring listing truth dataframe columns (metoppv#1643)
  Remove __repr__ methods from all neighbourhood plugins (metoppv#1648)
  ENH: Avoiding lazy loading in select command calls (metoppv#1617)
  MOBT-180: Weather symbol speed up (metoppv#1638)
  IM-1621: Make ECC error and warning tests more rigorous (metoppv#1641)
  Make flake8 report that it is okay when running improver-tests. (metoppv#1645)
  Update checksums after updating the title of files in apply-emos-coefficients/sites. (metoppv#1640)
  Fixes bug in spot-extraction for multi-time inputs (metoppv#1633)
  Updates checksums for threshold landmask fix (metoppv#1636)
  Update interpret-metadata (metoppv#1632)
  Weather code tree update (metoppv#1635)
  Fix noise in precip accumulation thresholds (metoppv#1627)

# Conflicts:
#	improver_tests/acceptance/SHA256SUMS
  • Loading branch information
MoseleyS committed Jan 21, 2022
2 parents 4c8bbae + 9cc3998 commit 59faf7d
Show file tree
Hide file tree
Showing 38 changed files with 651 additions and 331 deletions.
1 change: 1 addition & 0 deletions bin/improver-tests
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ function improver_test_isort {

function improver_test_flake8 {
${FLAKE8:-flake8} $FILES_TO_TEST
echo_ok "flake8"
}

function improver_test_doc {
Expand Down
9 changes: 5 additions & 4 deletions improver/calibration/dataframe_utilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,8 +280,9 @@ def _prepare_dataframes(
forecast_df:
DataFrame expected to contain the following columns: forecast,
blend_time, forecast_period, forecast_reference_time, time,
wmo_id, percentile, diagnostic, latitude, longitude, period,
height, cf_name, units. Any other columns are ignored.
wmo_id, percentile, diagnostic, latitude, longitude, altitude,
period, height, cf_name, units and experiment. Any other
columns are ignored.
truth_df:
DataFrame expected to contain the following columns: ob_value,
time, wmo_id, diagnostic, latitude, longitude and altitude.
Expand Down Expand Up @@ -481,8 +482,8 @@ def truth_dataframe_to_cube(df: DataFrame, training_dates: DatetimeIndex,) -> Cu
Args:
df:
DataFrame expected to contain the following columns: ob_value,
time, wmo_id, diagnostic, latitude, longitude and altitude.
Any other columns are ignored.
time, wmo_id, diagnostic, latitude, longitude, altitude, cf_name,
height, period and units. Any other columns are ignored.
training_dates:
Datetimes spanning the training period.
Expand Down
21 changes: 20 additions & 1 deletion improver/cli/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,25 @@ def inputcube(to_convert):
return maybe_coerce_with(load_cube, to_convert)


@value_converter
def inputcube_nolazy(to_convert):
"""Loads cube from file or returns passed object.
Where a load is performed, it will not have lazy data.
Args:
to_convert (string or iris.cube.Cube):
File name or Cube object.
Returns:
Loaded cube or passed object.
"""
from improver.utilities.load import load_cube

if getattr(to_convert, "has_lazy_data", False):
# Realise data if lazy
to_convert.data

return maybe_coerce_with(load_cube, to_convert, no_lazy_load=True)


@value_converter
def inputcubelist(to_convert):
"""Loads a cubelist from file or returns passed object.
Expand Down Expand Up @@ -483,7 +502,7 @@ def main(
To write to stdout, use a hyphen (-)
memprofile (str):
Creates 2 files by adding a suffix to the provided arguemnt -
a tracemalloc snapsot at the point of highest memory consumption
a tracemalloc snapshot at the point of highest memory consumption
of your program (suffixed with _SNAPSHOT)
and a track of the maximum memory used by your program
over time (suffixed with _MAX_TRACKER).
Expand Down
2 changes: 1 addition & 1 deletion improver/cli/blend_adjacent_points.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
@cli.clizefy
@cli.with_output
def process(
*cubes: cli.inputcube,
*cubes: cli.inputcube_nolazy,
coordinate,
central_point: float,
units=None,
Expand Down
40 changes: 34 additions & 6 deletions improver/cli/nowcast_accumulate.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,26 +31,54 @@
# POSSIBILITY OF SUCH DAMAGE.
"""Script to accumulate input data given advection velocity fields."""

from typing import Callable, List

from improver import cli

# The accumulation frequency in minutes.
ACCUMULATION_FIDELITY = 1


def name_constraint(names: List[str]) -> Callable:
"""
Generates a callable constraint for matching cube names.
The callable constraint will realise the data of those cubes matching the
constraint.
Args:
name:
List of cube names to constrain our cubes.
Returns:
A callable which when called, returns True or False for the provided cube,
depending on whether it matches the names provided. A matching cube
will also have its data realised by the callable.
"""

def constraint(cube):
ret = False
if cube.name() in names:
ret = True
cube.data
return ret

return constraint


# Creates the value_converter that clize needs.
inputadvection = cli.create_constrained_inputcubelist_converter(
lambda cube: cube.name()
in ["precipitation_advection_x_velocity", "grid_eastward_wind"],
lambda cube: cube.name()
in ["precipitation_advection_y_velocity", "grid_northward_wind"],
name_constraint(["precipitation_advection_x_velocity", "grid_eastward_wind"]),
name_constraint(["precipitation_advection_y_velocity", "grid_northward_wind"]),
)


@cli.clizefy
@cli.with_output
def process(
cube: cli.inputcube,
cube: cli.inputcube_nolazy,
advection_velocity: inputadvection,
orographic_enhancement: cli.inputcube,
orographic_enhancement: cli.inputcube_nolazy,
*,
attributes_config: cli.inputjson = None,
max_lead_time=360,
Expand Down
2 changes: 1 addition & 1 deletion improver/cli/phase_change_level.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
@cli.clizefy
@cli.with_output
def process(
*cubes: cli.inputcube,
*cubes: cli.inputcube_nolazy,
phase_change,
grid_point_radius=2,
horizontal_interpolation=True,
Expand Down
10 changes: 6 additions & 4 deletions improver/cli/threshold.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,8 +110,6 @@ def process(
ValueError: If threshold_config and threshold_values are both set
ValueError: If threshold_config is used for fuzzy thresholding
"""
import numpy as np

from improver.metadata.probabilistic import in_vicinity_name_format
from improver.threshold import BasicThreshold
from improver.utilities.cube_manipulation import collapse_realizations
Expand All @@ -129,15 +127,19 @@ def process(
thresholds = []
fuzzy_bounds = []
for key in threshold_config.keys():
thresholds.append(np.float32(key))
# Ensure thresholds are float64 to avoid rounding errors during
# possible unit conversion.
thresholds.append(float(key))
# If the first threshold has no bounds, fuzzy_bounds is
# set to None and subsequent bounds checks are skipped
if threshold_config[key] == "None":
fuzzy_bounds = None
continue
fuzzy_bounds.append(tuple(threshold_config[key]))
else:
thresholds = [np.float32(x) for x in threshold_values]
# Ensure thresholds are float64 to avoid rounding errors during possible
# unit conversion.
thresholds = [float(x) for x in threshold_values]
fuzzy_bounds = None

each_threshold_func_list = []
Expand Down
Loading

0 comments on commit 59faf7d

Please sign in to comment.