Skip to content

Commit

Permalink
Merge pull request #321 from simpeg/patches
Browse files Browse the repository at this point in the history
Patches
  • Loading branch information
kkappler authored Mar 22, 2024
2 parents 803fcfe + d40e58b commit 4311ceb
Show file tree
Hide file tree
Showing 38 changed files with 2,740 additions and 1,948 deletions.
2 changes: 1 addition & 1 deletion .bumpversion.cfg
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
[bumpversion]
current_version = 0.3.12
current_version = 0.3.13
files = setup.py aurora/__init__.py
25 changes: 13 additions & 12 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@ jobs:
fail-fast: false
matrix:
os: ["ubuntu-latest"]
python-version: [3.8, 3.9, "3.10", "3.11"]
python-version: [3.8, 3.9, "3.10", "3.11"]
# python-version: ["3.10", ]

steps:
- uses: actions/checkout@v2
Expand Down Expand Up @@ -49,16 +50,16 @@ jobs:
python -m ipykernel install --user --name aurora-test
# Install any other dependencies you need
# - name: Execute Jupyter Notebooks
# run: |
# jupyter nbconvert --to notebook --execute docs/examples/dataset_definition.ipynb
# jupyter nbconvert --to notebook --execute docs/examples/make_cas04_single_station_h5.ipynb
# jupyter nbconvert --to notebook --execute docs/examples/operate_aurora.ipynb
# jupyter nbconvert --to notebook --execute tests/test_run_on_commit.ipynb
# jupyter nbconvert --to notebook --execute tutorials/pole_zero_fitting/lemi_pole_zero_fitting_example.ipynb
# jupyter nbconvert --to notebook --execute tutorials/processing_configuration.ipynb
# jupyter nbconvert --to notebook --execute tutorials/synthetic_data_processing.ipynb
# # Replace "notebook.ipynb" with your notebook's filename
# - name: Execute Jupyter Notebooks
#run: |
#jupyter nbconvert --to notebook --execute docs/examples/dataset_definition.ipynb
#jupyter nbconvert --to notebook --execute docs/examples/make_cas04_single_station_h5.ipynb
#jupyter nbconvert --to notebook --execute docs/examples/operate_aurora.ipynb
#jupyter nbconvert --to notebook --execute tests/test_run_on_commit.ipynb
#jupyter nbconvert --to notebook --execute tutorials/pole_zero_fitting/lemi_pole_zero_fitting_example.ipynb
#jupyter nbconvert --to notebook --execute tutorials/processing_configuration.ipynb
#jupyter nbconvert --to notebook --execute tutorials/synthetic_data_processing.ipynb
# Replace "notebook.ipynb" with your notebook's filename
# - name: Commit changes (if any)
# run: |
Expand All @@ -71,7 +72,7 @@ jobs:

- name: Run Tests
run: |
# pytest -s -v tests/synthetic/test_processing.py --cov=./ --cov-report=xml --cov=aurora
# pytest -s -v tests/synthetic/test_fourier_coefficients.py --cov=./ --cov-report=xml --cov=aurora
pytest -s -v --cov=./ --cov-report=xml --cov=aurora
- name: "Upload coverage to Codecov"
Expand Down
2 changes: 1 addition & 1 deletion aurora/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "0.3.12"
__version__ = "0.3.13"

import sys
from loguru import logger
Expand Down
75 changes: 43 additions & 32 deletions aurora/pipelines/fourier_coefficients.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,11 +76,11 @@
GROUPBY_COLUMNS = ["survey", "station", "sample_rate"]


def decimation_and_stft_config_creator(
initial_sample_rate, max_levels=6, decimation_factors=None, time_period=None
def fc_decimations_creator(
initial_sample_rate, decimation_factors=None, max_levels=6, time_period=None
):
"""
Based on the number of samples in the run, we can compute the maximum number of valid decimation levels.
Based on the number of samples in the runts, we can compute the maximum number of valid decimation levels.
This would re-use code in processing summary ... or we could just decimate until we cant anymore?
You can provide something like: decimation_info = {0: 1.0, 1: 4.0, 2: 4.0, 3: 4.0}
Expand All @@ -89,16 +89,23 @@ def decimation_and_stft_config_creator(
mt_metadata.transfer_functions.processing.Processing.assign_bands() to see how this was done in the past
Args:
initial_sample_rate:
max_levels:
decimation_factors:
initial_sample_rate: float
Sample rate of the "level0" data -- usually the sample rate during field acquisition.
decimation_factors: list (or other iterable)
The decimation factors that will be applied at each FC decimation level
max_levels: int
The maximum number of dice
time_period:
Returns:
decimation_and_stft_config: list
Each element of the list is a Decimation() object. The order of the list implies the order of the cascading
decimation (thus no decimation levels are omitted). This could be changed in future by using a dict
instead of a list, e.g. decimation_factors = dict(zip(np.arange(max_levels), decimation_factors))
Each element of the list is a Decimation() object (a.k.a. FCDecimation).
The order of the list corresponds the order of the cascading decimation
- No decimation levels are omitted.
- This could be changed in future by using a dict instead of a list,
- e.g. decimation_factors = dict(zip(np.arange(max_levels), decimation_factors))
"""
if not decimation_factors:
Expand All @@ -109,10 +116,11 @@ def decimation_and_stft_config_creator(
decimation_factors[0] = 1

# See Note 1
decimation_and_stft_config = []
fc_decimations = []
for i_dec_level, decimation_factor in enumerate(decimation_factors):
dd = FCDecimation()
dd.decimation_level = i_dec_level
dd.id = f"{i_dec_level}"
dd.decimation_factor = decimation_factor
if i_dec_level == 0:
current_sample_rate = 1.0 * initial_sample_rate
Expand All @@ -130,13 +138,13 @@ def decimation_and_stft_config_creator(
logger.info(msg)
raise NotImplementedError(msg)

decimation_and_stft_config.append(dd)
fc_decimations.append(dd)

return decimation_and_stft_config
return fc_decimations


@path_or_mth5_object
def add_fcs_to_mth5(m, decimation_and_stft_configs=None):
def add_fcs_to_mth5(m, fc_decimations=None):
"""
usssr_grouper: output of a groupby on unique {survey, station, sample_rate} tuples
Expand All @@ -161,12 +169,12 @@ def add_fcs_to_mth5(m, decimation_and_stft_configs=None):
run_summary = station_obj.run_summary

# Get the FC schemes
if not decimation_and_stft_configs:
msg = "FC config not supplied, using default, creating on the fly"
logger.info(f"{msg}")
decimation_and_stft_configs = decimation_and_stft_config_creator(
sample_rate, time_period=None
if not fc_decimations:
msg = (
"FC Decimation configs not supplied, using default, creating on the fly"
)
logger.info(f"{msg}")
fc_decimations = fc_decimations_creator(sample_rate, time_period=None)

# Make this a function that can be done using df.apply()
# I wonder if daskifiying that will cause issues with multiple threads trying to
Expand All @@ -179,12 +187,12 @@ def add_fcs_to_mth5(m, decimation_and_stft_configs=None):
run_obj = m.from_reference(run_row.hdf5_reference)

# Set the time period:
for decimation_and_stft_config in decimation_and_stft_configs:
decimation_and_stft_config.time_period = run_obj.metadata.time_period
for fc_decimation in fc_decimations:
fc_decimation.time_period = run_obj.metadata.time_period

runts = run_obj.to_runts(
start=decimation_and_stft_config.time_period.start,
end=decimation_and_stft_config.time_period.end,
start=fc_decimation.time_period.start,
end=fc_decimation.time_period.end,
)
# runts = run_obj.to_runts() # skip setting time_period explcitly

Expand All @@ -194,29 +202,32 @@ def add_fcs_to_mth5(m, decimation_and_stft_configs=None):
run_obj.metadata.id
)

print(" TIMING CORRECTIONS WOULD GO HERE ")
# If timing corrections were needed they could go here, right before STFT

for i_dec_level, decimation_stft_obj in enumerate(
decimation_and_stft_configs
):
for i_dec_level, fc_decimation in enumerate(fc_decimations):
if i_dec_level != 0:
# Apply decimation
run_xrds = prototype_decimate(decimation_stft_obj, run_xrds)
logger.debug(f"type decimation_stft_obj = {type(decimation_stft_obj)}")
if not decimation_stft_obj.is_valid_for_time_series_length(
run_xrds = prototype_decimate(fc_decimation, run_xrds)

if not fc_decimation.is_valid_for_time_series_length(
run_xrds.time.shape[0]
):
logger.info(
f"Decimation Level {i_dec_level} invalid, TS of {run_xrds.time.shape[0]} samples too short"
)
continue

stft_obj = run_ts_to_stft_scipy(decimation_stft_obj, run_xrds)
stft_obj = run_ts_to_stft_scipy(fc_decimation, run_xrds)
stft_obj = calibrate_stft_obj(stft_obj, run_obj)

# Pack FCs into h5 and update metadata
decimation_level = fc_group.add_decimation_level(f"{i_dec_level}")
decimation_level.from_xarray(stft_obj)

decimation_level = fc_group.add_decimation_level(
f"{i_dec_level}", decimation_level_metadata=fc_decimation
)
decimation_level.from_xarray(
stft_obj, decimation_level.metadata.sample_rate_decimation
)
decimation_level.update_metadata()
fc_group.update_metadata()
return
Expand Down
2 changes: 1 addition & 1 deletion aurora/pipelines/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,5 +22,5 @@ def initialize_config(processing_config):
elif isinstance(processing_config, Processing):
config = processing_config
else:
raise Exception(f"Unrecognized config of type {type(processing_config)}")
raise TypeError(f"Unrecognized config of type {type(processing_config)}")
return config
Loading

0 comments on commit 4311ceb

Please sign in to comment.