Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion aurora/config/processing_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ def to_json(self, json_fn=None, indent=" " * 4):
json_dict["local_station_id"] = self.local_station_id
json_dict["reference_station_id"] = self.reference_station_id
json_dict["channel_scale_factors"] = self.channel_scale_factors
print(self.decimation_level_ids)
for dec_level_id in self.decimation_level_ids:
json_dict[dec_level_id] = self_dict[dec_level_id].__dict__

Expand Down
5 changes: 1 addition & 4 deletions aurora/pipelines/process_mth5.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,9 +179,6 @@ def process_mth5_decimation_level(config, local, remote, units="MT"):
)

transfer_function_obj.apparent_resistivity(units=units)
# print(transfer_function_obj.rho.shape)
# print(transfer_function_obj.rho[0])
# print(transfer_function_obj.rho[-1])
return transfer_function_obj


Expand Down Expand Up @@ -327,7 +324,7 @@ def process_mth5_run(
processing_config, mth5_obj, run_id
)

print("APPLY TIMING CORRECTIONS HERE")
# APPLY TIMING CORRECTIONS HERE
else:
local = prototype_decimate(processing_config, local)
if processing_config.reference_station_id:
Expand Down
2 changes: 0 additions & 2 deletions aurora/pipelines/time_series_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,9 +177,7 @@ def calibrate_stft_obj(stft_obj, run_obj, units="MT", channel_scale_factors=None
channel_filter = mth5_channel.channel_response_filter
if not channel_filter.filters_list:
print("WARNING UNEXPECTED CHANNEL WITH NO FILTERS")
# ONE OFF HACK FOR SAO missing data
if channel_id == "hy":
print("WARNING ONE-OFF PKD SAO RR")
channel_filter = run_obj.get_channel("hx").channel_response_filter
calibration_response = channel_filter.complex_response(stft_obj.frequency.data)
if channel_scale_factors:
Expand Down
1 change: 0 additions & 1 deletion aurora/pipelines/transfer_function_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,6 @@ def process_transfer_functions(
)
regression_estimator.estimate()
transfer_function_obj.set_tf(regression_estimator, band.center_period)
print("Add method for compute residuals and noise covariance")
else:
X, Y, RR = handle_nan(X, Y, RR, drop_dim="observation")
regression_estimator = regression_class(
Expand Down
2 changes: 1 addition & 1 deletion aurora/time_series/frequency_band_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def configure_frequency_bands(config):
)
elif config["band_setup_style"] == "band edges":
frequency_bands.band_edges = config["band_edges"]
print("Not Yet Supported")
# "Not Yet Supported"
raise NotImplementedError
elif config["band_setup_style"] == "logarithmic range":
lower_bound = config["frequency_bands_lower_bound"]
Expand Down
6 changes: 1 addition & 5 deletions aurora/time_series/windowed_time_series.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,16 +152,13 @@ def detrend(data=None, detrend_axis=None, detrend_type=None, inplace=True):
if detrend_axis is None:
detrend_axis = get_time_coordinate_axis(data)
if not inplace:
print("deep copy dataset and then overwrite")
raise NotImplementedError

for channel in data.keys():
print(f"channel {channel}")

# windowed_array = data[key].data
nanless_data = data[channel].dropna(dim="time")
ensembles = nanless_data.data
print("Nan Checker goes here")
if detrend_type: # neither False nor None
ensembles = ssig.detrend(
ensembles, axis=detrend_axis, type=detrend_type
Expand All @@ -180,7 +177,6 @@ def detrend(data=None, detrend_axis=None, detrend_type=None, inplace=True):
else:
data[channel].data = ensembles
else:
print("deep copy dataset and then overwrite")
raise NotImplementedError
return data

Expand Down Expand Up @@ -225,7 +221,7 @@ def delay_correction(self, dataset, run_obj):
-------

"""
print("NOT TESTED - PSEUDOCODE ONLY")
# "NOT TESTED - PSEUDOCODE ONLY"
for channel_id in dataset.keys():
mth5_channel = run_obj.get_channel(channel_id)
channel_filter = mth5_channel.channel_response_filter
Expand Down
10 changes: 4 additions & 6 deletions aurora/time_series/windowing_scheme.py
Original file line number Diff line number Diff line change
Expand Up @@ -420,11 +420,10 @@ def fft_xr_ds(dataset, sample_rate, detrend_type=None, prewhitening=None):
# TODO:
from aurora.time_series.frequency_domain_helpers import get_fft_harmonics

print(
"Modify this so that demeaning and detrending is happening before "
"the application of the tapering window. Add a second demean right "
"before the FFT"
)
# TODO: Modify this so that demeaning and detrending
# is happening before the application of the tapering
# window. Add a second demean right "
# before the FFT

samples_per_window = len(dataset.coords["within-window time"])
n_fft_harmonics = int(samples_per_window / 2) # no bin at Nyquist,
Expand All @@ -442,7 +441,6 @@ def fft_xr_ds(dataset, sample_rate, detrend_type=None, prewhitening=None):
data=dataset, detrend_axis=time_coordinate_index, detrend_type="linear"
)
for channel_id in dataset.keys():
print(f"channel_id {channel_id}")
data = dataset[channel_id].data
# Here is where you would add segment-by-segment prewhitening
fspec_array = np.fft.fft(data, axis=time_coordinate_index)
Expand Down
1 change: 0 additions & 1 deletion aurora/transfer_function/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@ def __init__(self, tf_header, frequency_bands, **kwargs):
tf_header
frequency_bands
"""
print("TODO: change self.T to self.period")
self.tf_header = tf_header
self.frequency_bands = frequency_bands
self.num_segments = None
Expand Down
24 changes: 13 additions & 11 deletions aurora/transfer_function/iter_control.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,17 +98,19 @@ def converged(self, b, b0):
iteration_cond = self.number_of_iterations >= self.max_number_of_iterations
if tolerance_cond or iteration_cond:
converged = True
if tolerance_cond:
print(
f"Converged Due to MaxChange < Tolerance after "
f" {self.number_of_iterations} of "
f" {self.max_number_of_iterations} iterations"
)
elif iteration_cond:
print(
f"Converged Due to maximum number_of_iterations "
f" {self.max_number_of_iterations}"
)
# These print statments are not very clear and
# Should be reworded.
# if tolerance_cond:
# print(
# f"Converged Due to MaxChange < Tolerance after "
# f" {self.number_of_iterations} of "
# f" {self.max_number_of_iterations} iterations"
# )
# elif iteration_cond:
# print(
# f"Converged Due to maximum number_of_iterations "
# f" {self.max_number_of_iterations}"
# )
else:
converged = False

Expand Down
2 changes: 0 additions & 2 deletions aurora/transfer_function/regression/TRME.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,6 @@ def sigma(self, QHY_or_QHYc, Y_or_Yc, correction_factor=1.0):
assert (sigma > 0).all()
except AssertionError:
print("WARNING - Negative error variances observed")
print(sigma)
print("Setting sigma to zero - Negative sigma_squared observed")
sigma *= 0
# raise Exception
Expand Down Expand Up @@ -296,7 +295,6 @@ def estimate(self):
b0 = self.b

if self.iter_control.max_number_of_redescending_iterations:
print(b0)
# self.iter_control.number_of_redescending_iterations = 0;
while self.iter_control.continue_redescending:
self.iter_control.number_of_redescending_iterations += 1
Expand Down
3 changes: 0 additions & 3 deletions aurora/transfer_function/regression/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,6 @@ def noise_covariance(self):
return self.cov_nn

def b_to_xarray(self):
print("TEST IMPLEMENTATION")
xra = xr.DataArray(
np.transpose(self.b),
dims=["output_channel", "input_channel"],
Expand Down Expand Up @@ -302,7 +301,5 @@ def estimate_ols(self, mode="solve"):
return b

def estimate(self):
print("this method is not defined for the abstract base class")
print("But we put OLS in here for dev")
Z = self.estimate_ols(mode="qr")
return Z
2 changes: 0 additions & 2 deletions aurora/transfer_function/transfer_function_collection.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,6 @@ def check_all_channels_present(self):
tmp = tmp.sel(output_channel=output_channels)
self.merged_tf = tmp

print("NOW ADD Hz to cov_nn")
n_output_ch = len(self.merged_tf.output_channel) # 3
n_periods = len(self.merged_tf.period)
cov_nn_dims = (n_output_ch, n_output_ch, n_periods)
Expand Down Expand Up @@ -379,7 +378,6 @@ def write_emtf_z_file(self, z_file_path, run_obj=None, orientation_strs=None):
line = ""
for out_ch in tf.tf_header.output_channels:
for inp_ch in tf.tf_header.input_channels:
print(out_ch, inp_ch)
chchtf = tf_xr.loc[out_ch, inp_ch, :]
real_part = np.real(chchtf.data[period_index])
imag_part = np.imag(chchtf.data[period_index])
Expand Down
Loading