Skip to content

Commit

Permalink
Merge pull request #322 from simpeg/patches
Browse files Browse the repository at this point in the history
Patches
  • Loading branch information
kkappler committed Apr 2, 2024
2 parents 4311ceb + 1b74a68 commit b11970b
Show file tree
Hide file tree
Showing 13 changed files with 150 additions and 23 deletions.
18 changes: 9 additions & 9 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -50,15 +50,15 @@ jobs:
python -m ipykernel install --user --name aurora-test
# Install any other dependencies you need
# - name: Execute Jupyter Notebooks
#run: |
#jupyter nbconvert --to notebook --execute docs/examples/dataset_definition.ipynb
#jupyter nbconvert --to notebook --execute docs/examples/make_cas04_single_station_h5.ipynb
#jupyter nbconvert --to notebook --execute docs/examples/operate_aurora.ipynb
#jupyter nbconvert --to notebook --execute tests/test_run_on_commit.ipynb
#jupyter nbconvert --to notebook --execute tutorials/pole_zero_fitting/lemi_pole_zero_fitting_example.ipynb
#jupyter nbconvert --to notebook --execute tutorials/processing_configuration.ipynb
#jupyter nbconvert --to notebook --execute tutorials/synthetic_data_processing.ipynb
- name: Execute Jupyter Notebooks
run: |
jupyter nbconvert --to notebook --execute docs/examples/dataset_definition.ipynb
jupyter nbconvert --to notebook --execute docs/examples/make_cas04_single_station_h5.ipynb
jupyter nbconvert --to notebook --execute docs/examples/operate_aurora.ipynb
jupyter nbconvert --to notebook --execute tests/test_run_on_commit.ipynb
jupyter nbconvert --to notebook --execute docs/tutorials/pole_zero_fitting/lemi_pole_zero_fitting_example.ipynb
jupyter nbconvert --to notebook --execute docs/tutorials/processing_configuration.ipynb
jupyter nbconvert --to notebook --execute docs/tutorials/synthetic_data_processing.ipynb
# Replace "notebook.ipynb" with your notebook's filename
# - name: Commit changes (if any)
Expand Down
30 changes: 21 additions & 9 deletions aurora/transfer_function/regression/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ class RegressionEstimator(object):
model of solving Y = X*b +epsilon for b. X is variously called the "input",
"predictor", "explanatory", "confounding", "independent" "exogenous", variable(s)
or the "design matrix", "model matrix" or "regressor matrix".
Y are variously called the the "output", "predicted", "outcome",
Y are variously called the "output", "predicted", "outcome",
"response", "endogenous", "regressand", or "dependent" variable. I will try to
use input and output.
Expand Down Expand Up @@ -51,11 +51,11 @@ class RegressionEstimator(object):
X.data is numpy array (normally 2-dimensional)
These are the input variables. Like the matlab codes each observation
corresponds to a row and each parameter (channel) is a column.
X : numpy array (normally 2-dimensional)
X : numpy array (normally 2-dimensional)
This is a "pure array" representation of _X used to emulate Gary
Egbert's matlab codes. It may or may not be deprecated.
_Y : xarray.Dataset
These are the output variables, aranged same as X above.
These are the output variables, arranged same as X above.
Y : numpy array (normally 2-dimensional)
This is a "pure array" representation of _X used to emulate Gary
Egbert's matlab codes. It may or may not be deprecated.
Expand Down Expand Up @@ -135,12 +135,22 @@ def solve_underdetermined(self):
"""
20210806
This method was originally in TRME.m, but it does not depend in
general on using RME method so I am putting it in the base class.
general on using RME method so putting it in the base class.
We basically never get here and when we do we dont trust the results
We basically never get here and when we do, we don't trust the results
https://docs.scipy.org/doc/numpy-1.9.2/reference/generated/numpy.linalg.svd.html
https://www.mathworks.com/help/matlab/ref/double.svd.html
Note that the svd outputs are different in matlab and numpy
https://stackoverflow.com/questions/50930899/svd-command-in-python-v-s-matlab
"The SVD of a matrix can be written as
A = U S V^H
Where the ^H signifies the conjugate transpose. Matlab's svd command returns U, S and V,
while numpy.linalg.svd returns U, the diagonal of S, and V^H.
Thus, to get the same S and V as in Matlab you need to reconstruct the S and also get the V:
<ORIGINAL MATLAB>
<COMMENT>
Overdetermined problem...use svd to invert, return
Expand All @@ -167,9 +177,11 @@ def solve_underdetermined(self):
"""
U, s, V = np.linalg.svd(self.X, full_matrices=False)
S_inv = np.diag(1.0 / s)
self.b = (V.T @ S_inv @ U.T) * self.Y
self.b = (V.T.conj() @ S_inv @ U.T.conj()) * self.Y
if self.iter_control.return_covariance:
logger.warning("Warning covariances are not xarray, may break things downstream")
logger.warning(
"Warning covariances are not xarray, may break things downstream"
)
self.cov_nn = np.zeros((self.n_channels_out, self.n_channels_out))
self.cov_ss_inv = np.zeros((self.n_channels_in, self.n_channels_in))

Expand Down Expand Up @@ -242,7 +254,7 @@ def qr_decomposition(self, X=None, sanity_check=False):
elif self.qr_input == "Z":
X = self.Z
else:
logger.error("Matrix to perform QR decompostion not specified")
logger.error("Matrix to perform QR decomposition not specified")
raise Exception

Q, R = np.linalg.qr(X)
Expand All @@ -252,7 +264,7 @@ def qr_decomposition(self, X=None, sanity_check=False):
if np.isclose(np.matmul(Q, R) - X, 0).all():
pass
else:
logger.error("Failed QR decompostion sanity check")
logger.error("Failed QR decomposition sanity check")
raise Exception
return Q, R

Expand Down
2 changes: 1 addition & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@
sphinx_gallery_conf = {
# path to your examples scripts
"examples_dirs": [
"../tutorials",
"tutorials",
],
"gallery_dirs": [
"examples",
Expand Down
2 changes: 1 addition & 1 deletion docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
:maxdepth: 2
:caption: Examples

notebooks/operate_aurora.ipynb
examples/operate_aurora.ipynb

.. toctree::
:maxdepth: 2
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
1 change: 1 addition & 0 deletions requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ flake8
nbsphinx
numpydoc
pre-commit
pytest
sphinx_gallery
sphinx_rtd_theme
ipython
6 changes: 3 additions & 3 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,10 @@
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
],
description="Processing Codes for Magnetotelluric Data",
install_requires=requirements,
Expand Down
114 changes: 114 additions & 0 deletions tests/transfer_function/regression/test_base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
import unittest

import numpy as np
import pandas as pd
from aurora.transfer_function.regression.base import RegressionEstimator


def make_mini_dataset(n_rows=None):
"""
TODO: Make this a pytest fixture
Parameters
----------
n_rows
Returns
-------
"""
ex_data = np.array(
[
4.39080123e-07 - 2.41097397e-06j,
-2.33418464e-06 + 2.10752581e-06j,
1.38642624e-06 - 1.87333571e-06j,
]
)
hx_data = np.array(
[
7.00767250e-07 - 9.18819198e-07j,
-1.06648904e-07 + 8.19420154e-07j,
-1.02700963e-07 - 3.73904463e-07j,
]
)

hy_data = np.array(
[
1.94321684e-07 + 3.71934877e-07j,
1.15361101e-08 - 6.32581646e-07j,
3.86095787e-08 + 4.33155345e-07j,
]
)
timestamps = pd.date_range(
start=pd.Timestamp("1977-03-02T06:00:00"), periods=len(ex_data), freq="S"
)
frequency = 0.666 * np.ones(len(ex_data))

df = pd.DataFrame(
data={
"time": timestamps,
"frequency": frequency,
"ex": ex_data,
"hx": hx_data,
"hy": hy_data,
}
)
if n_rows:
df = df.iloc[0:n_rows]
df = df.set_index(["time", "frequency"])
xr_ds = df.to_xarray()
return xr_ds


class TestRegressionBase(unittest.TestCase):
""" """

@classmethod
def setUpClass(self):
self.dataset = make_mini_dataset(n_rows=1)
self.expected_solution = np.array(
[-0.04192569 - 0.36502722j, -3.65284496 - 4.05194938j]
)

def setUp(self):
pass

def test_regression(self):
dataset = make_mini_dataset()
X = dataset[["hx", "hy"]]
X = X.stack(observation=("frequency", "time"))
Y = dataset[
[
"ex",
]
]
Y = Y.stack(observation=("frequency", "time"))
re = RegressionEstimator(X=X, Y=Y)
re.estimate_ols()
difference = re.b - np.atleast_2d(self.expected_solution).T
assert np.isclose(difference, 0).all()
re.estimate()
difference = re.b - np.atleast_2d(self.expected_solution).T
assert np.isclose(difference, 0).all()

def test_underdetermined_regression(self):
""" """
dataset = make_mini_dataset(n_rows=1)
X = dataset[["hx", "hy"]]
X = X.stack(observation=("frequency", "time"))
Y = dataset[
[
"ex",
]
]
Y = Y.stack(observation=("frequency", "time"))
re = RegressionEstimator(X=X, Y=Y)
re.solve_underdetermined()
assert re.b is not None


def main():
unittest.main()


if __name__ == "__main__":
main()

0 comments on commit b11970b

Please sign in to comment.