Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
baed4c6
Update FrequencyBand(s) to use Band
kkappler Sep 23, 2023
c54be58
fix import
kkappler Sep 23, 2023
f3d9f2e
remove unused df_from_bands (it is also in mt_metadata)
kkappler Sep 24, 2023
c8352c7
minor change to doc only
kkappler Sep 25, 2023
7a0c726
add notebook
kkappler Sep 25, 2023
063c091
try add ipynb tester to actions
kkappler Sep 25, 2023
f9c8d26
modify kernel name in ipynb to agree with tests.yml
kkappler Sep 25, 2023
840a37b
try to add environment to jupyter
kkappler Sep 25, 2023
c1326d7
access harmonic_indices as prop, not method
kkappler Sep 26, 2023
9ec2b7e
Merge branch 'fourier_coefficients' into fix_issue_194
kkappler Sep 26, 2023
d47464f
try add pole zero example
kkappler Sep 26, 2023
a37cb3b
remove commit message from ipynb flow
kkappler Sep 26, 2023
cd54266
add (updated and working) dataset_definition.ipynb to gh actions
kkappler Sep 26, 2023
456f7ff
merge selected changes from issue/branch #283
kkappler Sep 27, 2023
02b97b3
update ipynb
kkappler Sep 27, 2023
c095d9d
add cas04 test to notebooks
kkappler Sep 27, 2023
1308869
merge some fixes from #283
kkappler Sep 27, 2023
697130a
revert tests to master branch of mth5
kkappler Sep 27, 2023
adf4878
fix bug
kkappler Sep 27, 2023
e3c9ef8
merge in from fc branch
kkappler Sep 27, 2023
682ab3a
update readback to support survey arg
kkappler Sep 27, 2023
6b43c2c
update operate_aurora to run, add to tests
kkappler Sep 27, 2023
127d3d0
running all notebooks
kkappler Sep 27, 2023
061e786
fix bug - no data for test
kkappler Sep 27, 2023
429a7d6
Merge pull request #297 from simpeg/fix_issue_194
kkappler Sep 27, 2023
b920a1e
remove unused imports
kkappler Sep 27, 2023
93b288e
use builtin method for freqs rather than import
kkappler Sep 27, 2023
c3ada03
replace aurora import with mt_metadata
kkappler Sep 27, 2023
7de30d4
remove unused module
kkappler Sep 27, 2023
506ebde
fix typo
kkappler Sep 27, 2023
3b7e0ac
add test
kkappler Sep 27, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,6 @@ ignore = E501, W605, W503, E203, F401, E722
#,E402, E203,E722
#F841, E402, E722
#ignore = E203, E266, E501, W503, F403, F401
max-line-length = 120
max-line-length = 88
max-complexity = 18
select = B,C,E,F,W,T4,B9
29 changes: 28 additions & 1 deletion .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,39 @@ jobs:
conda install -c conda-forge pytest pytest-cov certifi">=2017.4.17" pandoc
pip install -r requirements-dev.txt
pip install git+https://github.com/kujaku11/mt_metadata.git@fcs
pip install git+https://github.com/kujaku11/mth5.git@fc
pip install git+https://github.com/kujaku11/mth5.git

- name: Install Our Package
run: |
pip install -e .
conda list

- name: Install Jupyter and dependencies
run: |
pip install jupyter
pip install ipykernel
python -m ipykernel install --user --name aurora-test
# Install any other dependencies you need

- name: Execute Jupyter Notebooks
run: |
jupyter nbconvert --to notebook --execute docs/examples/dataset_definition.ipynb
jupyter nbconvert --to notebook --execute docs/examples/make_cas04_single_station_h5.ipynb
jupyter nbconvert --to notebook --execute docs/examples/operate_aurora.ipynb
jupyter nbconvert --to notebook --execute tests/test_run_on_commit.ipynb
jupyter nbconvert --to notebook --execute tutorials/pole_zero_fitting/lemi_pole_zero_fitting_example.ipynb
jupyter nbconvert --to notebook --execute tutorials/processing_configuration.ipynb
jupyter nbconvert --to notebook --execute tutorials/synthetic_data_processing.ipynb
# Replace "notebook.ipynb" with your notebook's filename

# - name: Commit changes (if any)
# run: |
# git config --local user.email "action@github.com"
# git config --local user.name "GitHub Action"
# git commit -a -m "Execute Jupyter notebook"
# git push
# if: ${{ success() }}


- name: Run Tests
run: |
Expand Down
10 changes: 2 additions & 8 deletions aurora/pipelines/time_series_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,9 @@
import scipy.signal as ssig
import xarray as xr

from aurora.time_series.frequency_domain_helpers import get_fft_harmonics
from aurora.time_series.windowed_time_series import WindowedTimeSeries
from aurora.time_series.windowing_scheme import window_scheme_from_decimation


def validate_sample_rate(run_ts, expected_sample_rate, tol=1e-4):
"""

Expand Down Expand Up @@ -69,7 +67,7 @@ def apply_recoloring(decimation_obj, stft_obj):
"""
Parameters
----------
decimation_obj : mt_metadata.transfer_functions.processing.aurora.DecimationLevel
decimation_obj : mt_metadata.transfer_functions.processing.fourier_coefficients.decimation.Decimation
Information about how the decimation level is to be processed
stft_obj : xarray.core.dataset.Dataset
Time series of Fourier coefficients to be recoloured
Expand All @@ -87,11 +85,7 @@ def apply_recoloring(decimation_obj, stft_obj):
return stft_obj

if decimation_obj.prewhitening_type == "first difference":
# replace below with decimation_obj.get_fft_harmonics() ?
freqs = get_fft_harmonics(
decimation_obj.window.num_samples,
decimation_obj.sample_rate_decimation,
)
freqs = decimation_obj.fft_frequencies
prewhitening_correction = 1.0j * 2 * np.pi * freqs # jw

stft_obj /= prewhitening_correction
Expand Down
10 changes: 2 additions & 8 deletions aurora/pipelines/transfer_function_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -503,22 +503,16 @@ def make_decimation_dict_for_tf(tf_collection, processing_config):
-------

"""
from mt_metadata.transfer_functions.processing.aurora.frequency_band import (
FrequencyBand,
)
from mt_metadata.transfer_functions.io.zfiles.zmm import PERIOD_FORMAT

decimation_dict = {}

for i_dec, dec_level_cfg in enumerate(processing_config.decimations):
for i_band, band in enumerate(dec_level_cfg.bands):
fb = FrequencyBand(
left=band.frequency_min, right=band.frequency_max
)
period_key = f"{fb.center_period:{PERIOD_FORMAT}}"
period_key = f"{band.center_period:{PERIOD_FORMAT}}"
period_value = {}
period_value["level"] = i_dec + 1 # +1 to match EMTF standard
period_value["bands"] = tuple(band.harmonic_indices()[np.r_[0, -1]])
period_value["bands"] = tuple(band.harmonic_indices[np.r_[0, -1]])
period_value["sample_rate"] = dec_level_cfg.sample_rate_decimation
try:
period_value["npts"] = tf_collection.tf_dict[
Expand Down
4 changes: 3 additions & 1 deletion aurora/sandbox/butterworth_filters.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@

"""
import scipy.signal as ssig
from aurora.time_series.frequency_domain_helpers import get_fft_harmonics
from mt_metadata.transfer_functions.processing.aurora.decimation_level import (
get_fft_harmonics,
)


def butter_bandpass(low_cut, high_cut, sample_rate, order=5):
Expand Down
4 changes: 4 additions & 0 deletions aurora/sandbox/mth5_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,10 +121,14 @@ def get_time_period_bounds(ch):
else:
if start is None:
ch_start = '1970-01-01 00:00:00'
else:
ch_start = start
if end is None:
ch_end = datetime.datetime.now()
ch_end = ch_end.replace(hour=0, minute=0, second=0, microsecond=0)
ch_end = str(ch_end)
else:
ch_end = end
return ch_start, ch_end

fdsn_object = FDSN(mth5_version=mth5_version)
Expand Down
17 changes: 7 additions & 10 deletions aurora/time_series/apodization_window.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,24 +201,21 @@ def enbw(self, fs):
return fs * self.S2 / (self.S1**2)

def test_linear_spectral_density_factor(self):
"""
This is just a test to verify some algebra
"""This is just a test to verify some algebra
Claim:
The lsd_calibration factors
A (1./coherent_gain)*np.sqrt((2*dt)/(nenbw*N))
A (1./coherent\_gain)\*np.sqrt((2\*dt)/(nenbw\*N))
and
B np.sqrt(2/(sample_rate*self.S2))
B np.sqrt(2/(sample\_rate\*self.S2))
are identical.

Note sqrt(2*dt)==sqrt(2*sample_rate) so we can cancel these terms and
Note sqrt(2\*dt)==sqrt(2\*sample_rate) so we can cancel these terms and
A=B IFF
(1./coherent_gain) * np.sqrt(1/(nenbw*N)) == 1/np.sqrt(S2)
(1./coherent\_gain) * np.sqrt(1/(nenbw\*N)) == 1/np.sqrt(S2)
which I show in githib aurora issue #3 via .
(CG**2) * NENBW *N = S2

Returns
-------
(CG\*\*2) \* NENBW \*N = S2


"""
lsd_factor1 = (1.0 / self.coherent_gain) * np.sqrt(
1.0 / (self.nenbw * self.num_samples_window)
Expand Down
46 changes: 3 additions & 43 deletions aurora/time_series/frequency_band_helpers.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import numpy as np
import pandas as pd

from mt_metadata.transfer_functions.processing.aurora.frequency_band import (
from mt_metadata.transfer_functions.processing.aurora.band import (
FrequencyBands,
)

Expand Down Expand Up @@ -48,7 +48,7 @@ def frequency_band_edges(
f_lower_bound, f_upper_bound, num_bands_per_decade=None, num_bands=None
):
"""
Provides logarithmically spaced fenceposts acoss lowest and highest
Provides logarithmically spaced fenceposts across lowest and highest
frequencies. This is a lot like calling logspace. The resultant gates
have constant Q, i.e. deltaF/f_center=Q=constant.
where f_center is defined geometircally, i.e. sqrt(f2*f1) is the center freq
Expand Down Expand Up @@ -95,44 +95,4 @@ def frequency_band_edges(
print(f"exponents = {exponents}")
fence_posts = f_lower_bound * (bases**exponents)
print(f"fence posts = {fence_posts}")
return fence_posts


def df_from_bands(band_list):
"""
Utility function that transforms a list of bands into a dataframe

Note: The decimation_level here is +1 to agree with EMTF convention.
Not clear this is really necessary

Parameters
----------
band_list: list
obtained from mt_metadata.transfer_functions.processing.aurora.decimation_level.DecimationLevel.bands

Returns
-------
out_df: pd.Dataframe
Same format as that generated by EMTFBandSetupFile.get_decimation_level()
"""
df_columns = [
"decimation_level",
"lower_bound_index",
"upper_bound_index",
"frequency_min",
"frequency_max",
]
n_rows = len(band_list)
df_columns_dict = {}
for col in df_columns:
df_columns_dict[col] = n_rows * [None]
for i_band, band in enumerate(band_list):
df_columns_dict["decimation_level"][i_band] = band.decimation_level + 1
df_columns_dict["lower_bound_index"][i_band] = band.index_min
df_columns_dict["upper_bound_index"][i_band] = band.index_max
df_columns_dict["frequency_min"][i_band] = band.frequency_min
df_columns_dict["frequency_max"][i_band] = band.frequency_max
out_df = pd.DataFrame(data=df_columns_dict)
out_df.sort_values(by="lower_bound_index", inplace=True)
out_df.reset_index(inplace=True, drop=True)
return out_df
return fence_posts
29 changes: 0 additions & 29 deletions aurora/time_series/frequency_domain_helpers.py

This file was deleted.

15 changes: 7 additions & 8 deletions aurora/time_series/time_axis_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,14 +45,13 @@ def make_time_axis(t0, n_samples, sample_rate):


def test_generate_time_axis(t0, n_samples, sample_rate):
"""
Two obvious ways to generate an axis of timestanps here. One method is slow and
"""Two obvious ways to generate an axis of timestamps here. One method is slow and
more precise, the other is fast but drops some nanoseconds due to integer
roundoff error.

To see this, consider the example of say 3Hz, we are 333333333ns between samples,
which drops 1ns per second if we scale a nanoseconds=np.arange(N)
The issue here is that the nanoseconds granularity forces a roundoff error,
which drops 1ns per second if we scale a nanoseconds=np.arange(N)
The issue here is that the nanoseconds granularity forces a roundoff error


Probably will use logic like:
Expand All @@ -74,20 +73,20 @@ def test_generate_time_axis(t0, n_samples, sample_rate):
"""
t0 = np.datetime64(t0)

# <SLOW>
# SLOW
tt = time.time()
time_index_1 = slow_comprehension(t0, n_samples, sample_rate)
processing_time_1 = tt - time.time()
print(f"processing_time_1 = {processing_time_1}")
# </SLOW>


# <FAST>
# FAST
tt = time.time()
time_index_2 = fast_arange(t0, n_samples, sample_rate)
processing_time_2 = tt - time.time()
print(f"processing_time_2 {processing_time_2}")
print(f"ratio of processing times {processing_time_1/processing_time_2}")
# </FAST>

if (np.abs(time_index_2 - time_index_1)).sum() == 0:
pass
else:
Expand Down
2 changes: 1 addition & 1 deletion aurora/time_series/windowing_scheme.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@
from aurora.time_series.window_helpers import available_number_of_windows_in_array
from aurora.time_series.window_helpers import SLIDING_WINDOW_FUNCTIONS

from mt_metadata.transfer_functions.processing.aurora.frequency_band import (
from mt_metadata.transfer_functions.processing.aurora.decimation_level import (
get_fft_harmonics,
)

Expand Down
7 changes: 2 additions & 5 deletions aurora/transfer_function/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,6 @@ class TransferFunction(Base):
array of transfer functions: TF(Nout, Nin, Nperiods)
T : numpy array
list of periods
Header : transfer_function_header.TransferFunctionHeader object.
TF header contains local site header, remote site header if
appropriate, and information about estimation approach???
cov_ss_inv : numpy array
inverse signal power matrix. aka Cov_SS in EMTF matlab codes
cov_nn : numpy array
Expand Down Expand Up @@ -51,6 +48,8 @@ def __init__(self, decimation_level_id, frequency_bands, **kwargs):

Parameters
----------
_emtf_header : legacy header information used by Egbert's matlab class. Header contains
local site header, remote site header if appropriate, and information about estimation approach
decimation_level_id: int
Identifies the relevant decimation level. Used for accessing the
appropriate info in self.processing config.
Expand Down Expand Up @@ -105,7 +104,6 @@ def periods(self):
periods = self.frequency_bands.band_centers(frequency_or_period="period")
periods = np.flipud(periods)
return periods
# return self.frequency_bands.band_centers(frequency_or_period="period")

def _initialize_arrays(self):
"""
Expand Down Expand Up @@ -140,7 +138,6 @@ def _initialize_arrays(self):
tf_array,
dims=["output_channel", "input_channel", "period"], # frequency"],
coords={
# "frequency": self.frequency_bands.band_centers(),
"period": self.periods,
"output_channel": self.tf_header.output_channels,
"input_channel": self.tf_header.input_channels,
Expand Down
4 changes: 2 additions & 2 deletions aurora/transfer_function/plot/comparison_plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
def compare_two_z_files(
z_path1,
z_path2,
angle1=0.0,
angle2=0.0,
angle1 = 0.0,
angle2 = 0.0,
label1="",
label2="",
scale_factor1=1.0,
Expand Down
2 changes: 1 addition & 1 deletion aurora/transfer_function/regression/m_estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ def compute_noise_covariance(self):
"""
res_clean = self.Yc - self.Y_hat
SSR_clean = np.conj(res_clean.conj().T @ res_clean)
inv_psi_prime2 = np.diag(1.0 / (self.expectation_psi_prime**2))
inv_psi_prime2 = np.diag(1.0 / (self.expectation_psi_prime ** 2))
cov_nn = inv_psi_prime2 @ SSR_clean / self.degrees_of_freedom

self.cov_nn = xr.DataArray(
Expand Down
3 changes: 0 additions & 3 deletions aurora/transfer_function/transfer_function_collection.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,6 @@
import numpy as np
import xarray as xr

from pathlib import Path

from aurora.time_series.frequency_domain_helpers import get_fft_harmonics
from aurora.transfer_function.plot.rho_phi_helpers import plot_phi
from aurora.transfer_function.plot.rho_phi_helpers import plot_rho

Expand Down
4 changes: 2 additions & 2 deletions aurora/transfer_function/weights/edf_weights.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,15 +44,15 @@ def p1(self):
Threshold applied to edf. All edf below this value
are set to weight=0
"""
return self.c1 * (self.n_data**self.alpha)
return self.c1 * (self.n_data ** self.alpha)

@property
def p2(self):
"""
Threshold applied to edf. All edf above th this value
are set to weight=0
"""
return self.c2 * (self.n_data**self.alpha)
return self.c2 * (self.n_data ** self.alpha)

def compute_weights(self, X, use):
"""
Expand Down
Loading