Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,6 @@ ignore = E501, W605, W503, E203, F401, E722
#,E402, E203,E722
#F841, E402, E722
#ignore = E203, E266, E501, W503, F403, F401
max-line-length = 120
max-line-length = 88
max-complexity = 18
select = B,C,E,F,W,T4,B9
29 changes: 28 additions & 1 deletion .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,39 @@ jobs:
conda install -c conda-forge pytest pytest-cov certifi">=2017.4.17" pandoc
pip install -r requirements-dev.txt
pip install git+https://github.com/kujaku11/mt_metadata.git@fcs
pip install git+https://github.com/kujaku11/mth5.git@fc
pip install git+https://github.com/kujaku11/mth5.git

- name: Install Our Package
run: |
pip install -e .
conda list

- name: Install Jupyter and dependencies
run: |
pip install jupyter
pip install ipykernel
python -m ipykernel install --user --name aurora-test
# Install any other dependencies you need

- name: Execute Jupyter Notebooks
run: |
jupyter nbconvert --to notebook --execute docs/examples/dataset_definition.ipynb
jupyter nbconvert --to notebook --execute docs/examples/make_cas04_single_station_h5.ipynb
jupyter nbconvert --to notebook --execute docs/examples/operate_aurora.ipynb
jupyter nbconvert --to notebook --execute tests/test_run_on_commit.ipynb
jupyter nbconvert --to notebook --execute tutorials/pole_zero_fitting/lemi_pole_zero_fitting_example.ipynb
jupyter nbconvert --to notebook --execute tutorials/processing_configuration.ipynb
jupyter nbconvert --to notebook --execute tutorials/synthetic_data_processing.ipynb
# Replace "notebook.ipynb" with your notebook's filename

# - name: Commit changes (if any)
# run: |
# git config --local user.email "action@github.com"
# git config --local user.name "GitHub Action"
# git commit -a -m "Execute Jupyter notebook"
# git push
# if: ${{ success() }}


- name: Run Tests
run: |
Expand Down
4 changes: 4 additions & 0 deletions aurora/sandbox/mth5_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,10 +121,14 @@ def get_time_period_bounds(ch):
else:
if start is None:
ch_start = '1970-01-01 00:00:00'
else:
ch_start = start
if end is None:
ch_end = datetime.datetime.now()
ch_end = ch_end.replace(hour=0, minute=0, second=0, microsecond=0)
ch_end = str(ch_end)
else:
ch_end = end
return ch_start, ch_end

fdsn_object = FDSN(mth5_version=mth5_version)
Expand Down
17 changes: 7 additions & 10 deletions aurora/time_series/apodization_window.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,24 +201,21 @@ def enbw(self, fs):
return fs * self.S2 / (self.S1**2)

def test_linear_spectral_density_factor(self):
"""
This is just a test to verify some algebra
"""This is just a test to verify some algebra
Claim:
The lsd_calibration factors
A (1./coherent_gain)*np.sqrt((2*dt)/(nenbw*N))
A (1./coherent\_gain)\*np.sqrt((2\*dt)/(nenbw\*N))
and
B np.sqrt(2/(sample_rate*self.S2))
B np.sqrt(2/(sample\_rate\*self.S2))
are identical.

Note sqrt(2*dt)==sqrt(2*sample_rate) so we can cancel these terms and
Note sqrt(2\*dt)==sqrt(2\*sample_rate) so we can cancel these terms and
A=B IFF
(1./coherent_gain) * np.sqrt(1/(nenbw*N)) == 1/np.sqrt(S2)
(1./coherent\_gain) * np.sqrt(1/(nenbw\*N)) == 1/np.sqrt(S2)
which I show in githib aurora issue #3 via .
(CG**2) * NENBW *N = S2

Returns
-------
(CG\*\*2) \* NENBW \*N = S2


"""
lsd_factor1 = (1.0 / self.coherent_gain) * np.sqrt(
1.0 / (self.nenbw * self.num_samples_window)
Expand Down
15 changes: 7 additions & 8 deletions aurora/time_series/time_axis_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,14 +45,13 @@ def make_time_axis(t0, n_samples, sample_rate):


def test_generate_time_axis(t0, n_samples, sample_rate):
"""
Two obvious ways to generate an axis of timestanps here. One method is slow and
"""Two obvious ways to generate an axis of timestamps here. One method is slow and
more precise, the other is fast but drops some nanoseconds due to integer
roundoff error.

To see this, consider the example of say 3Hz, we are 333333333ns between samples,
which drops 1ns per second if we scale a nanoseconds=np.arange(N)
The issue here is that the nanoseconds granularity forces a roundoff error,
which drops 1ns per second if we scale a nanoseconds=np.arange(N)
The issue here is that the nanoseconds granularity forces a roundoff error


Probably will use logic like:
Expand All @@ -74,20 +73,20 @@ def test_generate_time_axis(t0, n_samples, sample_rate):
"""
t0 = np.datetime64(t0)

# <SLOW>
# SLOW
tt = time.time()
time_index_1 = slow_comprehension(t0, n_samples, sample_rate)
processing_time_1 = tt - time.time()
print(f"processing_time_1 = {processing_time_1}")
# </SLOW>


# <FAST>
# FAST
tt = time.time()
time_index_2 = fast_arange(t0, n_samples, sample_rate)
processing_time_2 = tt - time.time()
print(f"processing_time_2 {processing_time_2}")
print(f"ratio of processing times {processing_time_1/processing_time_2}")
# </FAST>

if (np.abs(time_index_2 - time_index_1)).sum() == 0:
pass
else:
Expand Down
4 changes: 2 additions & 2 deletions aurora/transfer_function/plot/comparison_plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
def compare_two_z_files(
z_path1,
z_path2,
angle1=0.0,
angle2=0.0,
angle1 = 0.0,
angle2 = 0.0,
label1="",
label2="",
scale_factor1=1.0,
Expand Down
2 changes: 1 addition & 1 deletion aurora/transfer_function/regression/m_estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ def compute_noise_covariance(self):
"""
res_clean = self.Yc - self.Y_hat
SSR_clean = np.conj(res_clean.conj().T @ res_clean)
inv_psi_prime2 = np.diag(1.0 / (self.expectation_psi_prime**2))
inv_psi_prime2 = np.diag(1.0 / (self.expectation_psi_prime ** 2))
cov_nn = inv_psi_prime2 @ SSR_clean / self.degrees_of_freedom

self.cov_nn = xr.DataArray(
Expand Down
4 changes: 2 additions & 2 deletions aurora/transfer_function/weights/edf_weights.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,15 +44,15 @@ def p1(self):
Threshold applied to edf. All edf below this value
are set to weight=0
"""
return self.c1 * (self.n_data**self.alpha)
return self.c1 * (self.n_data ** self.alpha)

@property
def p2(self):
"""
Threshold applied to edf. All edf above th this value
are set to weight=0
"""
return self.c2 * (self.n_data**self.alpha)
return self.c2 * (self.n_data ** self.alpha)

def compute_weights(self, X, use):
"""
Expand Down
Loading