diff --git a/docs/README b/docs/README index 732d3375c..5e1130515 100644 --- a/docs/README +++ b/docs/README @@ -14,7 +14,7 @@ HOW TO UPDATE DOCS >> ./build.bash -3. If build fails with "Encountered unknown tag 'do', then try the following: +3. If build fails with "Encountered unknown tag 'do'", try the following: Modify the sphinx autosummary source code by adding 'jinja2.ext.do' to the jinja Environment() extensions list diff --git a/docs/install/issues.rst b/docs/install/issues.rst index 14fc4664e..5ac380216 100644 --- a/docs/install/issues.rst +++ b/docs/install/issues.rst @@ -31,13 +31,13 @@ We note that some versions of GMT and ObsPy do not plot `full moment tensors `_ dependency solver, which was `adopted `_ in the 23.10 release. +For reference, the largest potential speed up comes from the new `mamba `_ dependency solver, which was `adopted `_ in the 23.10 release. diff --git a/docs/library/autogen.rst b/docs/library/autogen.rst index 426fa47c0..bc738ef10 100644 --- a/docs/library/autogen.rst +++ b/docs/library/autogen.rst @@ -44,8 +44,11 @@ autogen mtuq.graphics.plot_time_shifts mtuq.graphics.plot_amplitude_ratios mtuq.graphics.plot_log_amplitude_ratios + mtuq.graphics.plot_cross_corr mtuq.graphics._plot_attrs mtuq.graphics.attrs._default_backend + mtuq.graphics.attrs._pygmt_backend + mtuq.graphics.attrs.PyGMTUtilities mtuq.grid.DeviatoricGridRandom mtuq.grid.DeviatoricGridSemiregular mtuq.grid.DoubleCoupleGridRandom diff --git a/docs/library/index.rst b/docs/library/index.rst index 1f7c7022e..a796d05a2 100644 --- a/docs/library/index.rst +++ b/docs/library/index.rst @@ -93,12 +93,13 @@ Depth and hypocenter visualization ============================================================================================================ ============================================================================================================ -Time shift and amplitude ratio visualization +Station attributes visualization -------------------------------------------- ============================================================================================================ ============================================================================================================ `mtuq.graphics.plot_time_shifts `_ Plots time shifts by location and component `mtuq.graphics.plot_amplitude_ratios `_ Plots amplitude ratios by location and component +`mtuq.graphics.plot_cross_corr `_ Plots normalized cross-correlation by location and component ============================================================================================================ ============================================================================================================ diff --git a/docs/user_guide/03.rst b/docs/user_guide/03.rst index d66655088..0a73c94bd 100644 --- a/docs/user_guide/03.rst +++ b/docs/user_guide/03.rst @@ -2,7 +2,7 @@ Acquiring Green's functions =========================== -The response of a medium to an impulsive source is called a Green's function. This page describes the role of Green's functions in source inversions, the types of Green's functions supported by MTUQ, and how these types can be obtained. +The response of a medium to an impulsive source is called a Green's function. This page describes the role of Green's functions in source inversions, the types of Green's functions supported by MTUQ, and how these different types can each be obtained. Role of Green's functions @@ -10,15 +10,21 @@ Role of Green's functions By combining Green's functions, it is possible to obtain synthetics from any moment tensor or force source. Generating synthetics in this way is useful because it provides cost savings compared with on-the-fly wavefield simulations. Synthetics can then be compared with data to determine a best-fitting source. - - `GreensTensor` objects ---------------------- -`GreensTensor `_ objects provide access to a set of Green's functions describing the medium response between a given station and origin. +`GreensTensor `_ objects provide access to a set of Green's functions describing the medium response between a given hypocenter and station. + +Methods built into the `GreensTensor` class allow data processing and synthetics generation. In particular, the `get_synthetics `_ method accepts a `MomentTensor` or `Force` and returns corresponding synthetics. + + -Methods built into the `GreensTensor` class allow data processing and synthetics generation. In particular, the `get_synthetics `_ method accepts a `MomentTensor` or `Force` and returns corresponding synthetics for the given station. +Green's function conventions +---------------------------- + +A major goal of MTUQ is to avoid exposing users to unnecessary complexity. For Green's functions, MTUQ tries to accomplish this by understanding external conventions and converting to a common internal format. Specifically, MTUQ uses an `up-south-east` `convention `_ for internally storing impulse responses corresponding to individual force couples. (Moment tensors and forces are internally represented using the same `up-south-east` basis.) +Under the hood, MTUQ deals with a variety of external conventions related to (1) the symmetry of the medium (for example, 1D media require fewer independent Green's functions than 3D media); (2) the choice of local Cartesian basis convention (for example, some authors employ `up-south-east`, others `north-east-down`). A sense of what's involved can be obtained by browsing the `source code `_ and references therein. @@ -34,7 +40,7 @@ To download AK135 Green's functions and generate MTUQ `GreensTensor` objects: from mtuq import download_greens_functions greens_tensors = download_greens_functions(stations, origins, model="ak135f_2s") -A limitation of syngine is that Green's functions can be downloaded only on a station-by-station basis, not over an entire subset of Earth. An alternative that avoids this limitation is to compute your own Green's functions. +A limitation of syngine is that Green's functions can be downloaded only on a station-by-station basis, not over an entire area or volume. An alternative that avoids this limitation is to compute your own Green's functions. @@ -44,7 +50,9 @@ Computing Green's functions from 1D Earth models MTUQ supports the following 1D Green's functions formats: AxiSEM (preferred) and FK. -`AxiSEM `_ performs spectral element wave simulations in radially-symmetric Earth models. To generate synthetics in a format MTUQ natively supports, follow the instructions for in the `AxiSEM user manual `_ under "Output wavefields in NetCDF formated needed by instaseis." AxiSEM NetCDF files can be used to retrieve vertical, radial, and transverse displacement in units of m*(N-m)^-1. +`AxiSEM `_ performs spectral element wave simulations in radially-symmetric Earth models. AxiSEM NetCDF files can be used to retrieve vertical, radial, and transverse displacement in units of m*(N-m)^-1. + +To generate AxiSEM synthetics in a format MTUQ supports, follow the instructions in the `AxiSEM user manual `_ under "Output wavefields in NetCDF format needed by instaseis." To open an AxiSEM database client in MTUQ: @@ -81,60 +89,50 @@ Computing Green's functions from 3D Earth models MTUQ currently supports 3D Green's functions from SPECFEM3D/3D_GLOBE. -To generate a complete Green's function database for a given hypocenter and depth, six SPECFEM3D wavefield simulations are required. Output must be saved as/converted to SAC files at individual stations using the following filename convention, which comes from `GRD_CMT3D `_. Place all SAC files corresponding to a single hypocenter and depth in the same directory as follows: +To generate a full set of Green's functions for a given hypocenter and depth, six SPECFEM3D/3D_GLOBE wavefield simulations are required. Output must be saved as/converted to SAC files at individual stations using the following filename convention, which comes from `GRD_CMT3D `_. Place all SAC files corresponding to a single hypocenter and depth in the same directory as follows: .. code :: - {basedir}/{event_id}/ - {net}.{sta}.{loc}.Z.Mrr.sac - {net}.{sta}.{loc}.Z.Mtt.sac - {net}.{sta}.{loc}.Z.Mpp.sac - {net}.{sta}.{loc}.Z.Mrt.sac - {net}.{sta}.{loc}.Z.Mrp.sac - {net}.{sta}.{loc}.Z.Mtp.sac - {net}.{sta}.{loc}.R.Mrr.sac - {net}.{sta}.{loc}.R.Mtt.sac - {net}.{sta}.{loc}.R.Mpp.sac - {net}.{sta}.{loc}.R.Mrt.sac - {net}.{sta}.{loc}.R.Mrp.sac - {net}.{sta}.{loc}.R.Mtp.sac - {net}.{sta}.{loc}.T.Mrr.sac - {net}.{sta}.{loc}.T.Mtt.sac - {net}.{sta}.{loc}.T.Mpp.sac - {net}.{sta}.{loc}.T.Mrt.sac - {net}.{sta}.{loc}.T.Mrp.sac - {net}.{sta}.{loc}.T.Mtp.sac - - -To open an SPECFEM3D database client in MTUQ: + {event_id}/ + {depth_in_km}/ + {net}.{sta}.{loc}.Z.Mrr.sac + {net}.{sta}.{loc}.Z.Mtt.sac + {net}.{sta}.{loc}.Z.Mpp.sac + {net}.{sta}.{loc}.Z.Mrt.sac + {net}.{sta}.{loc}.Z.Mrp.sac + {net}.{sta}.{loc}.Z.Mtp.sac + {net}.{sta}.{loc}.R.Mrr.sac + {net}.{sta}.{loc}.R.Mtt.sac + {net}.{sta}.{loc}.R.Mpp.sac + {net}.{sta}.{loc}.R.Mrt.sac + {net}.{sta}.{loc}.R.Mrp.sac + {net}.{sta}.{loc}.R.Mtp.sac + {net}.{sta}.{loc}.T.Mrr.sac + {net}.{sta}.{loc}.T.Mtt.sac + {net}.{sta}.{loc}.T.Mpp.sac + {net}.{sta}.{loc}.T.Mrt.sac + {net}.{sta}.{loc}.T.Mrp.sac + {net}.{sta}.{loc}.T.Mtp.sac + + +To open a SPECFEM3D/3D_GLOBE database client: -.. code :: +.. code:: from mtuq import open_db db = open_db(path_SPECFEM3D_output_directory, format="SPECFEM3D") -Once opened, a SPECFEM3D database client can be used to generate `GreensTensor `_ objects as follows: +Once opened, the database client can be used to generate `GreensTensor `_ objects as follows: .. code:: greens_tensors = db.get_greens_tensors(stations, origin) +For more information, see also: -Green's function conventions ----------------------------- - -A variety of Green's function conventions exist. Figuring out which are used in a particular application can be challenging because it depends on - -- the type of medium under consideration (for example, acoustic media require fewer independent Green's functions than elastic media) - -- the symmetry of the medium (for example, 1D media require fewer independent Green's functions than 3D media) - -- the choice of local Cartesian basis conventions (for example, some authors employ `up-south-east`, others `north-east-down`; see `ObsPy documentation `_ for more information) - -A major goal is to avoid exposing users to unnecessary basis complexity. MTUQ accomplishes this by understanding external formats and converting to a common internal format that works for both 1D and 3D media. - -For internally storing moment tensors, forces, and Green's functions, MTUQ consistently uses an `up-south-east` Cartesian convention. +`Source-side Green's function details (under construction) `_ +`Receiver-side Green's function details (under construction) `_ diff --git a/docs/user_guide/03/receiver_side.rst b/docs/user_guide/03/receiver_side.rst new file mode 100644 index 000000000..c13fdc2c6 --- /dev/null +++ b/docs/user_guide/03/receiver_side.rst @@ -0,0 +1,14 @@ + +.. warning:: + + This page is still under construction. To help improve the + documentation, feel free to submit a pull request. + + +Receiver-side 3D Green's functions +================================== + +A working of example using receiver-side 3D Green's functions from SPECFEM3D in a moment tensor inversion: + +`test_greens_SPECFEM3D_SGT.py `_ + diff --git a/docs/user_guide/03/source_side.rst b/docs/user_guide/03/source_side.rst new file mode 100644 index 000000000..bef15939b --- /dev/null +++ b/docs/user_guide/03/source_side.rst @@ -0,0 +1,199 @@ + +.. warning:: + + This page is still under construction. To help improve the + documentation, feel free to submit a pull request. + + +Source-side 3D Green's functions +================================ + +Generating source-side 3D Green's functions using SPECFEM3D/3D_GLOBE +-------------------------------------------------------------------- + +In principle, any 3D solver can be used to generate source-side Green's functions, as long as the `requirements `_ below are satisfied. + +So far, however, the machinery has only been tested using SPECFEM3D/3D_GLOBE. To convert SPECFEM3D/3D_GLOBE output to MTUQ-compliant Green's functions, the following steps are necessary. + + +Generate SAC binary files +......................... + +SAC binary output format is natively supported by SPECFEM3D_GLOBE (in the parameter file, set `OUTPUT_SEISMOS_SAC_BINARY = .true.`). + +Unfortunately, SAC binary output format is not natively supported by SPECFEM3D, so it is necessary to manually convert SPECFEM3D output. + + +Convert to SI units +................... + +SPECFEM3D/3D_GLOBE use a mixed SI and CGS convention, in which moment tensor elements are input in terms of dynes and centimeters, and seismograms are output in meters. In contrast, MTUQ uses a fully SI :ref:`convention`. As a result, it is necessary to scale SPECFEM3D/3D_GLOBE seismograms by 10^7 prior to using them as MTUQ Green's functions. + + +Other amplitude scaling +....................... + +In addition to converting to SI units, it is also necessary to account for any scaling factors in the SPECFEM3D/3D_GLOBE input files. Such scaling factors can enter, for example, through the `M_rr,M_tt,M_pp,M_rt,M_rp,M_tp` values in the moment tensor input file or through the `scaling factor `_ in the force input file. + + +Rotate traces +............. + +Conveniently, SPECFEM3D_GLOBE can be made to automatically rotate output seismograms into vertical, radial and transverse components (set `ROTATE_SEISMOGRAMS_RT = .true.` in the parameter file). + +No modifications are necessary on account of moment tensor basis convention, since MTUQ's `up-south-east` convention matches SPECFEM3D/3D_GLOBE's. + + + + +Requirements for MTUQ source-side 3D Green's functions +------------------------------------------------------ + +File format +........... + +Individual Green's functions must be written to SAC binary files. + +A total of 18 SAC binary files are required to represent the response between a given hypocenter and station (corresponding to 6 moment tensor elements times 3 directions of motion). + + +Units convention +................ + +For a moment tensor inversion, each SAC binary file must give the response in meters to a 1 Newton-meter force couple. + +For a force inversion, each SAC binary file must give the response in meters to a 1 Newton force. + +In both cases, MTUQ uses a fully SI units convention (compare with SPECFEM3D/3D_GLOBE notes, above). + + + +Basis convention +................ + +MTUQ uses an `up-south-east` basis convention in which `r` denotes vertically upward, `t` denotes south, and `p` denotes east. + +Green's functions must be rotated into into vertical `Z`, radial `R` and transverse `T` components relative to the source-receiver backazimuth. + +Place all seismograms for the same hypocenter in a single directory as follows: + +.. code :: + + {event_id}/ + {depth_in_km}/ + {net}.{sta}.{loc}.Z.Mrr.sac + {net}.{sta}.{loc}.Z.Mtt.sac + {net}.{sta}.{loc}.Z.Mpp.sac + {net}.{sta}.{loc}.Z.Mrt.sac + {net}.{sta}.{loc}.Z.Mrp.sac + {net}.{sta}.{loc}.Z.Mtp.sac + {net}.{sta}.{loc}.R.Mrr.sac + {net}.{sta}.{loc}.R.Mtt.sac + {net}.{sta}.{loc}.R.Mpp.sac + {net}.{sta}.{loc}.R.Mrt.sac + {net}.{sta}.{loc}.R.Mrp.sac + {net}.{sta}.{loc}.R.Mtp.sac + {net}.{sta}.{loc}.T.Mrr.sac + {net}.{sta}.{loc}.T.Mtt.sac + {net}.{sta}.{loc}.T.Mpp.sac + {net}.{sta}.{loc}.T.Mrt.sac + {net}.{sta}.{loc}.T.Mrp.sac + {net}.{sta}.{loc}.T.Mtp.sac + ... + +The corresponding convention for force responses is: + +.. warning:: + + Not yet tested; subject to change without notice. + + +.. code :: + + {event_id}/ + {depth_in_km}/ + {net}.{sta}.{loc}.Z.Fz.sac + {net}.{sta}.{loc}.Z.Fs.sac + {net}.{sta}.{loc}.Z.Fe.sac + {net}.{sta}.{loc}.R.Fz.sac + {net}.{sta}.{loc}.R.Fs.sac + {net}.{sta}.{loc}.R.Fe.sac + {net}.{sta}.{loc}.T.Fz.sac + {net}.{sta}.{loc}.T.Fs.sac + {net}.{sta}.{loc}.T.Fe.sac + ... + + +Origin time convention +...................... + +For origin time, MTUQ uses a centroid convention (`more details `_), so that `t=0` in the `GreensTensor` time discretization corresponds to mean source excitation time. + +MTUQ uses the begin time (`B`) and end time (`E`) headers from the SAC binary files to align the Green's functions relative to centroid origin time. + +Currently, these are the only SAC headers used in reading Green's functions. + +(Note that different `SAC headers `_ are required in reading `observed data `_.) + + + +Hypocenter searches (experimental) +---------------------------------- + +Currently, only searches over source depth are possible with source-side 3D Green's functions (no other hypocenter parameters). + +The current `depth search `_ implementation is especially crude and experimental (consider local modifications to suit your needs). + +To allow depth searches, create subdirectories for each centroid depth as follows: + +.. code :: + + {event_id}/ + {depth_in_km}/ + {net}.{sta}.{loc}.Z.Mrr.sac + {net}.{sta}.{loc}.Z.Mtt.sac + {net}.{sta}.{loc}.Z.Mpp.sac + {net}.{sta}.{loc}.Z.Mrt.sac + {net}.{sta}.{loc}.Z.Mrp.sac + {net}.{sta}.{loc}.Z.Mtp.sac + {net}.{sta}.{loc}.R.Mrr.sac + {net}.{sta}.{loc}.R.Mtt.sac + {net}.{sta}.{loc}.R.Mpp.sac + {net}.{sta}.{loc}.R.Mrt.sac + {net}.{sta}.{loc}.R.Mrp.sac + {net}.{sta}.{loc}.R.Mtp.sac + {net}.{sta}.{loc}.T.Mrr.sac + {net}.{sta}.{loc}.T.Mtt.sac + {net}.{sta}.{loc}.T.Mpp.sac + {net}.{sta}.{loc}.T.Mrt.sac + {net}.{sta}.{loc}.T.Mrp.sac + {net}.{sta}.{loc}.T.Mtp.sac + {depth_in_km}/ + {net}.{sta}.{loc}.Z.Mrr.sac + {net}.{sta}.{loc}.Z.Mtt.sac + {net}.{sta}.{loc}.Z.Mpp.sac + {net}.{sta}.{loc}.Z.Mrt.sac + {net}.{sta}.{loc}.Z.Mrp.sac + {net}.{sta}.{loc}.Z.Mtp.sac + {net}.{sta}.{loc}.R.Mrr.sac + {net}.{sta}.{loc}.R.Mtt.sac + {net}.{sta}.{loc}.R.Mpp.sac + {net}.{sta}.{loc}.R.Mrt.sac + {net}.{sta}.{loc}.R.Mrp.sac + {net}.{sta}.{loc}.R.Mtp.sac + {net}.{sta}.{loc}.T.Mrr.sac + {net}.{sta}.{loc}.T.Mtt.sac + {net}.{sta}.{loc}.T.Mpp.sac + {net}.{sta}.{loc}.T.Mrt.sac + {net}.{sta}.{loc}.T.Mrp.sac + {net}.{sta}.{loc}.T.Mtp.sac + ... + + +Working example +--------------- + +A working of example using source-side 3D Green's functions from SPECFEM3D_GLOBE in a moment tensor inversion: + +`test_greens_SPECFEM3D_SAC.py `_ + diff --git a/docs/user_guide/05/gallery_force.rst b/docs/user_guide/05/gallery_force.rst index deaa33576..1383e0757 100644 --- a/docs/user_guide/05/gallery_force.rst +++ b/docs/user_guide/05/gallery_force.rst @@ -33,6 +33,10 @@ For a grid of regulary-spaced forces, `ds` may look something like: * origin_idx (origin_idx) int64 0 +.. note:: + + Force grids are implemented using parameters `F0, phi, h`, which are related to `r, phi, theta` spherical coordinates (`physics convention `_) by `F0 = r`, `phi = phi`, `h = cos(theta)`. In addition, `F0, phi, h` are related to geographic directions by these `formulas `_. + Misfit values """"""""""""" diff --git a/docs/user_guide/05/gallery_mt.rst b/docs/user_guide/05/gallery_mt.rst index 8b57c8d84..ff9880e58 100644 --- a/docs/user_guide/05/gallery_mt.rst +++ b/docs/user_guide/05/gallery_mt.rst @@ -37,6 +37,12 @@ For a grid of regulary-spaced moment tensors, `ds` may look something like: +.. note:: + + Moment tensor grids are implemented using the `rho, v, w, kappa, sigma, h` parameterization of `Tape2012 `_ and `Tape2015 `_, from which `formulas `_ converting to parameterizations can be derived. + + + Misfit values """"""""""""" diff --git a/docs/user_guide/06/trace_attributes.rst b/docs/user_guide/06/trace_attributes.rst index 41014330e..5e267d2f9 100644 --- a/docs/user_guide/06/trace_attributes.rst +++ b/docs/user_guide/06/trace_attributes.rst @@ -2,7 +2,7 @@ Trace attributes ================ -At various points during an inversion, waveform differences, phase shifts, and other values are calculated from observed and synthetic seismic traces. Such `trace attribute` quantities provide important information about how data misfit varies by geographic location and seismic component. +Waveform differences, time-shift corrections, and other values are calculated during an inversion on a trace-by-trace basis. Such `trace attributes` provide important information about how data misfit varies by geographic location and seismic component. Collecting trace attributes diff --git a/examples/GridSearch.Force.py b/examples/GridSearch.Force.py index 15cc1f649..8decd7906 100755 --- a/examples/GridSearch.Force.py +++ b/examples/GridSearch.Force.py @@ -18,19 +18,7 @@ if __name__=='__main__': # - # Carries out grid search over 64,000 double couple moment tensors - # - # USAGE - # mpirun -n python GridSearch.DoubleCouple.py - # - # For a simpler example, see SerialGridSearch.DoubleCouple.py, - # which runs the same inversion in serial - # - - - # - # We will investigate the source process of an Mw~4 earthquake using data - # from a regional seismic array + # Carries out grid search over force orientation and magnitude # path_data= fullpath('data/examples/20210809074550/*[ZRT].sac') @@ -40,7 +28,7 @@ # - # We are only using surface waves in this example. Check out the DC or FMT examples for multi-mode inversions. + # We are only using surface waves in this example. Check out the DC or FMT examples for multi-mode inversions # process_sw = ProcessData( @@ -77,7 +65,7 @@ # - # Next, we specify the moment tensor grid and source-time function + # Next, we specify the force grid and source-time function # grid = ForceGridRegular(magnitudes_in_N=10**np.arange(9,12.1,0.1), npts_per_axis=90) @@ -189,17 +177,17 @@ print('Generating figures...\n') - plot_data_greens1(event_id+'FMT_waveforms1.png', + plot_data_greens1(event_id+'_force_waveforms.png', data_sw, greens_sw, process_sw, misfit_sw, stations, origin, best_force, direction_dict) - plot_misfit_force(event_id+'DC_misfit_force.png', results) + plot_misfit_force(event_id+'_force_misfit.png', results) print('Saving results...\n') # save best-fitting source - save_json(event_id+'Force_solution.json', merged_dict) + save_json(event_id+'_force_solution.json', merged_dict) # save misfit surface - results.save(event_id+'DC_misfit.nc') + results.save(event_id+'_force_misfit.nc') - print('\nFinished\n') \ No newline at end of file + print('\nFinished\n') diff --git a/mtuq/graphics/__init__.py b/mtuq/graphics/__init__.py index badf8d0b8..35ae6ee0a 100644 --- a/mtuq/graphics/__init__.py +++ b/mtuq/graphics/__init__.py @@ -5,7 +5,7 @@ from mtuq.graphics.attrs import\ plot_time_shifts, plot_amplitude_ratios, plot_log_amplitude_ratios,\ - _plot_attrs + _plot_attrs, plot_cross_corr, _pygmt_backend from mtuq.graphics.beachball import\ plot_beachball, plot_polarities diff --git a/mtuq/graphics/attrs.py b/mtuq/graphics/attrs.py index 45a0d9c86..3d649cd0d 100644 --- a/mtuq/graphics/attrs.py +++ b/mtuq/graphics/attrs.py @@ -7,6 +7,8 @@ from os.path import join from mtuq.util import defaults, warn +from mtuq.graphics._pygmt import exists_pygmt +from mtuq.event import MomentTensor def plot_time_shifts(dirname, attrs, stations, origin, key='total_shift', @@ -20,17 +22,14 @@ def plot_time_shifts(dirname, attrs, stations, origin, key='total_shift', .. note :: - MTUQ distinguishes between the following different types of - time shifts - - - `static_shift` is an initial user-supplied time shift applied during - data processing + MTUQ distinguishes between the following different types of time shifts: + + - `static_shift` is an initial user-supplied time shift applied during data processing - - `time_shift` is a subsequent cross-correlation time shift applied - during misfit evaluation + - `time_shift` is a subsequent cross-correlation time shift applied during misfit evaluation - - `total_shift` is the total correction, or in other words the sum of - static and cross-correlation time shifts + - `total_shift` is the total correction, or in other words the sum of static and cross-correlation time shifts + .. rubric :: Required input arguments @@ -60,6 +59,54 @@ def plot_time_shifts(dirname, attrs, stations, origin, key='total_shift', _plot_attrs(dirname, stations, origin, attrs, key, **kwargs) +def plot_cross_corr(dirname, attrs, stations, origin, key='normalized_cc_max', + **kwargs): + + """ Plots how cross-correlation values vary by location and component + + By default, maximum normalized cross-correlation values are plotted. To plot just + maximum cross-correlation values, use ``key='cc_max'`` + + .. note :: + + MTUQ distinguishes between the following different types of + cross-correlation values + + - `cc_max` is the maximum cross-correlation value + + - `normalized_cc_max` is the maximum cross-correlation value normalized between 0 and 1 + + .. rubric :: Required input arguments + + ``dirname`` (`str`): + Directory in which figures will be written + + ``attrs`` (`list` of `AttribDict`): + List returned by misfit function's `collect_attributes` method + + ``stations`` (`list` of `mtuq.Station` objects): + Used to plot station locations + + ``origin`` (`mtuq.Origin` object): + Used to plot origin location + + + .. rubric :: Optional input arguments + + For optional argument descriptions, + `see here `_ + + """ + defaults(kwargs, { + 'label': 'Maximum normalized CC', + 'zero_centered': False, + 'colormap': 'inferno', + 'min_val': 0.0, + 'max_val': 1.0, + }) + + _plot_attrs(dirname, stations, origin, attrs, key, **kwargs) + def plot_amplitude_ratios(dirname, attrs, stations, origin, **kwargs): """ Plots how Aobs/Asyn varies by location and component @@ -87,7 +134,7 @@ def plot_amplitude_ratios(dirname, attrs, stations, origin, **kwargs): """ defaults(kwargs, { - 'colormap': 'Reds', + 'colormap': 'inferno', 'label': '$A_{obs}/A_{syn}$', 'zero_centered': False, }) @@ -127,8 +174,8 @@ def plot_log_amplitude_ratios(dirname, attrs, stations, origin, **kwargs): def _plot_attrs(dirname, stations, origin, attrs, key, - components=['Z', 'R', 'T'], format='png', backend=None, - **kwargs): + components=['Z', 'R', 'T'], format='png', backend=None, + **kwargs): """ Reads the attribute given by `key` from the `attrs` data structure, and plots how this attribute varies @@ -157,32 +204,44 @@ def _plot_attrs(dirname, stations, origin, attrs, key, for details. Otherwise, defaults to a generic matplotlib `backend `_. + + .. rubric :: Standard pygmt backend + + The standard `pygmt backend `_ is used to + plot station attributes over a hillshaded map. Default calls to front_end functions + can be supplemented with optional keyword arguments to customize the appearance of the plot. + .. code :: + + from mtuq.graphics.attrs import _pygmt_backend + plot_time_shifts('./SW/tshift', attributes_sw, stations, origin, + moment_tensor=best_mt, process=process_sw, backend=_pygmt_backend) """ if backend is None: backend = _default_backend + elif backend == _pygmt_backend and not exists_pygmt(): + warn('PyGMT backend requested but PyGMT not found'); backend = _default_backend if not callable(backend): raise TypeError - os.makedirs(dirname, exist_ok=True) for component in components: values = [] - station_list = [] + active_stations_list = [] for _i, station in enumerate(stations): if component not in attrs[_i]: continue values += [attrs[_i][component][key]] - station_list += [stations[_i]] + active_stations_list += [stations[_i]] if len(values) > 0: filename = join(dirname, component+'.'+format) - backend(filename, values, station_list, origin, **kwargs) + backend(filename, values, active_stations_list, origin, stations_list = stations, **kwargs) # @@ -191,9 +250,10 @@ def _plot_attrs(dirname, stations, origin, attrs, key, def _default_backend(filename, values, stations, origin, colormap='coolwarm', zero_centered=True, colorbar=True, - label='', width=5., height=5.): + label='', width=5., height=5., **kwargs): - """ Default backend for all other `mtuq.graphics.attrs` functions + """ + Default backend for all frontend `mtuq.graphics.attrs` functions The frontend functions perform only data manipulation. All graphics library calls occur in the backend @@ -201,7 +261,7 @@ def _default_backend(filename, values, stations, origin, By isolating the graphics function calls in this way, users can completely interchange graphics libraries (matplotlib, GMT, PyGMT, and so on) - .. rubric:: Keyword arguments + .. rubric :: Keyword arguments ``colormap`` (`str`): Matplotlib color palette @@ -215,7 +275,6 @@ def _default_backend(filename, values, stations, origin, ``label`` (`str`): Optional colorbar label - """ fig = pyplot.figure(figsize=(width, height)) @@ -230,7 +289,7 @@ def _default_backend(filename, values, stations, origin, else: min_val = np.min(values) max_val = np.max(values) - + # plot stations im = pyplot.scatter( [station.longitude for station in stations], @@ -284,3 +343,489 @@ def _default_backend(filename, values, stations, origin, pyplot.close() +def _pygmt_backend(filename, values, active_stations, origin, + colormap='polar', zero_centered=True, display_topo=True, + label='', width=5, moment_tensor=None, process=None, + stations_list=None, station_labels=True, min_val=None, max_val=None, **kwargs): + """ + PyGMT backend for plotting station attributes over a hillshaded map. + + .. note :: + + This function requires the PyGMT library to be installed. + If called while pygmt is not installed, the default matplotlib backend will be used. + + The function accepts a number of optional keyword arguments to customize + the appearance of the plot. If passed to another backend, these arguments + will be ignored. + + .. rubric :: Required input arguments + + ``filename`` (`str`): + The name of the file to which the figure will be saved. This should be passed by the frontend function. + Expected filenames are in the format `component.png`, where `component` is the component of the data being plotted. + This might be revised in the future to allow for more flexible naming conventions. + + ``values`` (`list` of `float`): + List of values to be plotted for each station. The length of the list should match the number of stations. + + ``active_stations`` (`list` of `mtuq.Station` objects): + List of stations to be plotted, with a value entry for each station in the `values` list. + + ``origin`` (`mtuq.Origin` object): + Origin object used to plot the origin location. + + .. rubric :: Keyword arguments + + ``colormap`` (`str`): + GMT colormap name - see `GMT documentation ` + + ``zero_centered`` (`bool`): + Whether or not the colormap is centered on zero + + ``display_topo`` (`bool`): + Whether or not to display topography in the background -- will download the ETOPO1 topography file if not found + + ``label`` (`str`): + Optional colorbar text label -- supports LaTeX expressions + + ``width`` (`float`): + Width of the figure in inches -- default is 5 inches + + ``moment_tensor`` (`mtuq.event.MomentTensor` or `np.ndarray`): + Moment tensor to plot as a beachball -- will plot a star at the origin if not provided + Can be either a `mtuq.event.MomentTensor` object or a 1D numpy array with the six independent components + [m_rr, m_tt, m_pp, m_rt, m_rp, m_tp] + + ``process`` (`mtuq.ProcessData`): + ProcessData object used to determine the type of waves used for the window + + ``stations_list`` (`list` of `mtuq.Station` objects): + List of all stations available before data processing -- will plot only active stations if not provided + + ``station_labels`` (`bool`): + Whether or not to plot station labels -- default is True + + ``min_val`` (`float`): + Minimum value for the colorbar -- will be determined automatically if not provided + + ``max_val`` (`float`): + Maximum value for the colorbar -- will be determined automatically if not provided + + .. rubric :: Backend specific utility class + + The PyGMTUtilities class is a utility class designed to enhance and simplify the usage of PyGMT for plotting. + It includes methods for calculating plotting regions with buffers, configuring colormaps, preparing LaTeX + annotations for PyGMT, and generating standardized headers for plots. Documentation for the PyGMTUtilities + class can be found in the `PyGMTUtilities `_ module. + + + + """ + import pygmt + + if not stations_list: + stations_list = active_stations + print('Complete station list not passed to pygmt plotting backend \nWill plot only active stations') + # Collection of longitudes and latitudes from all available stations + longitudes = [s.longitude for s in stations_list + [origin]] + latitudes = [s.latitude for s in stations_list + [origin]] + + # Calculate the region to display with a buffer around the stations + region, lat_buffer = PyGMTUtilities.calculate_plotting_region(stations_list, origin, buffer_percentage=0.1) + + # Setting up the figure + fig = pygmt.Figure() + + # Dynamically determine the grid resolution for topography based on the range of longitudes and latitudes + # (etopo topography file will be downloaded if not found) + resolution = PyGMTUtilities.get_resolution(max(longitudes) - min(longitudes), max(latitudes) - min(latitudes)) + grid = pygmt.datasets.load_earth_relief(region=region, resolution=resolution) + + # Calculate the gradient (hillshade) grid with azimuth 0/300 and normalization t1 + # + shade = pygmt.grdgradient(grid=grid, azimuth="0/300", normalize="t1") + + # Define a grayscale colormap for topography + normal = pygmt.grdhisteq.equalize_grid(grid=grid, gaussian=True) + gray_cmap = pygmt.makecpt(cmap='gray', series=[np.min(normal.values), np.max((normal.values))]) + + # Plot the hillshade grid as an image + if display_topo: + fig.grdimage(grid=normal, shading=shade, projection=f'J{width}i', frame='a', cmap=gray_cmap, no_clip=True) + + # Overlay coastlines + PyGMTUtilities.draw_coastlines(fig) + + # Configure the colormap for station values + colormap, cmap_reverse_flag = PyGMTUtilities.configure_colormap(colormap) + if zero_centered: + pygmt.makecpt(cmap=colormap, series=[-np.max(np.abs(values))*1.01, np.max(np.abs(values))*1.01], reverse=cmap_reverse_flag) + elif min_val is not None and max_val is not None: + pygmt.makecpt(cmap=colormap, series=[min_val, max_val], continuous=True, reverse=cmap_reverse_flag) + else: + pygmt.makecpt(cmap=colormap, series=[np.min(values), np.max(values)], continuous=True, reverse=cmap_reverse_flag) + + + # Plotting lines from origin to stations + for station in stations_list: + if station in active_stations: + # Plot line for active station as colored line + value = values[active_stations.index(station)] if station in active_stations else 0 + fig.plot( + x=[origin.longitude, station.longitude], + y=[origin.latitude, station.latitude], + cmap=True, + zvalue=value, + pen="thick,+z,-" + ) + + # Plotting stations as triangles + fig.plot( + x=[station.longitude for station in active_stations], + y=[station.latitude for station in active_stations], + style='i0.8c', # Triangle + color=values, + cmap=True, + pen="0.5p,black" + ) + + # Plotting non-active stations as hollow triangles + non_active_stations = [station for station in stations_list if station not in active_stations] + if len(non_active_stations) > 0: + fig.plot( + x=[station.longitude for station in non_active_stations], + y=[station.latitude for station in non_active_stations], + style='i0.8c', # Triangle + color=None, # Hollow (white) triangle + pen="0.5p,black" # Outline color + ) + fig.plot( + x=[station.longitude for station in non_active_stations], + y=[station.latitude for station in non_active_stations], + style='i0.6c', # Triangle + color=None, # Hollow (white) triangle + pen="0.5p,white" # Outline color + ) + + # Plotting the origin as a star + fig.plot( + x=[origin.longitude], + y=[origin.latitude], + style='a0.6c', # Star, size 0.5 cm + color='yellow', + pen="0.5p,black" + ) + + if moment_tensor is not None: + # Normalize the moment tensor components to the desired exponent + + if type(moment_tensor) is MomentTensor: + moment_tensor = moment_tensor.as_vector() + + moment_tensor = np.array(moment_tensor)/np.linalg.norm(moment_tensor) + + moment_tensor_spec = { + 'mrr': moment_tensor[0], + 'mtt': moment_tensor[1], + 'mff': moment_tensor[2], + 'mrt': moment_tensor[3], + 'mrf': moment_tensor[4], + 'mtf': moment_tensor[5], + 'exponent': 21 # Merely for size control, as the MT is normalized prior to plotting + } + + # Plot the moment tensor as a beachball + fig.meca( + spec=moment_tensor_spec, + scale="1c", # Sets a fixed size for the beachball plot + longitude=origin.longitude, + latitude=origin.latitude, + depth=10, # Depth is required, even if not used, set to a small number + convention="mt", # Use GMT's mt convention + compressionfill="gray15", + extensionfill="white", + pen="0.5p,black" + ) + + if station_labels is True: + # Plotting station labels + for station in stations_list: + fig.text( + x=station.longitude, + y=station.latitude, + text=station.station, + font="5p,Helvetica-Bold,black", + justify="LM", + offset="-0.45c/0.125c", + fill='white' + ) + + fig.colorbar(frame=f'+l"{PyGMTUtilities.prepare_latex_annotations(label)}"', position="JMR+o1.5c/0c+w7c/0.5c") + + fig.basemap(region=region, projection=f'J{width}i', frame=True) + + # Now starts the header text above the plot -- It is not a title and can be modified. + # Add an integer increment to the text_line_val bellow to add a new line above. + text_line_val = 1 + header_lines = PyGMTUtilities.get_header(label, origin, filename, process) + + # Add the header text to the plot + # Text spacing is based on longitude range and latitude buffer size. + lon_mean = np.max(longitudes) - (np.max(longitudes) - np.min(longitudes)) / 2 + text_spacing = lat_buffer / 1.5 + for header_line in header_lines: + fig.text(x=lon_mean, + y=max(latitudes) + lat_buffer + text_line_val*text_spacing, + text=header_line, font="14p,Helvetica-Bold,black", justify="MC", no_clip=True) + text_line_val += 1 + + # Saving the figure + fig.savefig(filename, crop=True, dpi=300) + +class PyGMTUtilities: + """ + Utility class for PyGMT plotting backend. + + This class offers a set of static methods designed for enhancing and simplifying the usage of PyGMT + for plotting by handling plotting regions, color maps, LaTeX annotations, and plot headers. + + .. note :: + The class is designed to be used without instantiation due to its static methods. This approach + helps in organizing code related to the PyGMT plotting backend and avoids confusion with other plotting backends. + + Methods include calculating plotting regions with buffers, configuring colormaps, preparing LaTeX + annotations for PyGMT, and generating standardized headers for plots. + + Examples and more detailed method descriptions can be found in the documentation of each method. + """ + + @staticmethod + def calculate_plotting_region(stations, origin, buffer_percentage=0.1): + """ + Calculates the region for plotting, including a buffer area around specified stations and origin. + + .. rubric :: Parameters + + ``stations`` (`list` of `mtuq.Station` objects): + The stations to be included in the plot. + + ``origin`` (`mtuq.Origin` object): + The origin object is used to calculate the region for the plot in case the origin is outside the range of the stations. + + ``buffer_percentage`` (`float`, optional): + The percentage of the total longitude and latitude range to be added as a buffer around the specified region. + Defaults to 0.1 (10%). + + .. rubric :: Returns + + ``region`` (`list` of `float`), ``lat_buffer`` (`float`): + A tuple containing the calculated region as a list `[west, east, south, north]` and the latitude buffer value. + The latitude buffer is returned to later be used for adjusting text spacing in the plot header. + + .. rubric :: Example + + >>> region, lat_buffer = PyGMTUtilities.calculate_plotting_region(stations, origin) + >>> print(region) + [149.55, 151.45, -35.1, -32.9] + >>> print(lat_buffer) + 0.22 + """ + + longitudes = [station.longitude for station in stations] + [origin.longitude] + latitudes = [station.latitude for station in stations] + [origin.latitude] + + lon_buffer = (max(longitudes) - min(longitudes)) * buffer_percentage + lat_buffer = (max(latitudes) - min(latitudes)) * buffer_percentage + + region = [min(longitudes) - lon_buffer, max(longitudes) + lon_buffer, + min(latitudes) - lat_buffer, max(latitudes) + lat_buffer] + return region, lat_buffer + + + @staticmethod + def get_resolution(lon_range, lat_range): + """ + Determines the appropriate PyGMT etopo grid resolution based on longitude and latitude ranges. + + .. rubric :: Parameters + + ``lon_range`` (`float`): + The longitudinal range of the area of interest. + + ``lat_range`` (`float`): + The latitudinal range of the area of interest. + + .. rubric :: Returns + + ``resolution`` (`str`): + The resolution string for PyGMT, e.g., '01m', '15s', ..., based on the size of the specified area. + + .. note :: + The resolution is determined based on predefined thresholds for the ranges, aiming to balance + detail and performance for different scales of geographic areas + + - If lon_range > 10 or lat_range > 10, the resolution is '01m'. + + - If lon_range > 5 or lat_range > 5, the resolution is '15s'. + + - If lon_range > 2 or lat_range > 2, the resolution is '03s'. + + - If lon_range > 1 or lat_range > 1, the resolution is '01s'. + + Otherwise, the resolution is '05m'. + + """ + + if lon_range > 10 or lat_range > 10: + return '01m' + elif lon_range > 5 or lat_range > 5: + return '15s' + elif lon_range > 2 or lat_range > 2: + return '03s' + elif lon_range > 1 or lat_range > 1: + return '01s' + else: + return '05m' + + @staticmethod + def configure_colormap(colormap): + """ + Adjusts the given colormap name for compatibility with PyGMT and matplotlib conventions. + + .. rubric :: Parameters + + ``colormap`` (`str`): + The name of the colormap to be used. If the colormap name ends with '_r', the colormap is + reversed, and the '_r' suffix is removed. + + .. rubric :: Returns + + ``colormap`` (`str`), ``cmap_reverse_flag`` (`bool`): + A tuple containing the adjusted colormap name and a boolean indicating whether the colormap should + be reversed. + + .. note :: + + The method accept only colormaps that are available in PyGMT. For a list of available colormaps, + + .. rubric :: Example + + >>> colormap, reverse = PyGMTUtilities.configure_colormap('viridis_r') + >>> print(colormap) + viridis + >>> print(reverse) + True + """ + cmap_reverse_flag = True if colormap.endswith('_r') else False + colormap = colormap[:-2] if cmap_reverse_flag else colormap + return colormap, cmap_reverse_flag + + @staticmethod + def prepare_latex_annotations(label): + """ + Prepares LaTeX annotations for plotting. Uses HTML tags instead + of $•$ for compatibility with PyGMT/GMT. + + .. rubric :: Parameters + + ``label`` (`str`): + The LaTeX label to be prepared. + + .. rubric :: Returns + + ``str``: + The prepared label. + + """ + + parts = label.split('$') + if len(parts) == 1: # No '$' found + return label + new_text = '' + for i, part in enumerate(parts): + if i % 2 == 0: + new_text += part + else: + new_text += f"{part}" + return new_text + + @staticmethod + def get_header(label, origin, filename, process = None): + """ + Generates a header for a plot based on the provided parameters. + + .. rubric :: Parameters + + ``label`` (`str`): + The label for the plot. Usually defined in the frontend function. + + ``origin`` (mtuq.Origin): + mtuq.event.Origin object, used to retrieve the event time and depth. + + ``filename`` (str): + The filename of the plot. Defined by default the high-level function. Used to retrieve the component. + + ``process`` (Process, optional): + mtuq.process_data.ProcessData object for appropriate dataset. + + .. rubric :: Returns + + ``list``: + A list containing two lines of the header. [Label - (component)], [Event Time: (time) UTC, Depth: (depth) km] + """ + if process is not None: + # get type of waves used for the window + window_type = process.window_type + if window_type == 'surface_wave' or window_type == 'group_velocity': + window_type = 'Surface wave' + elif window_type == 'body_wave': + window_type = 'Body wave' + + component = filename.split('/')[-1].split('.')[0] + origin_time = str(origin.time)[0:19] + origin_depth = origin.depth_in_m/1000 + + label = PyGMTUtilities.prepare_latex_annotations(label) + + # if window_type exists, define Rayleigh or Love wave + if process is not None: + if window_type == 'Surface wave' and component == 'Z' or window_type == 'Surface wave' and component == 'R': + # First line of the header defined as: label - Rayleigh wave (component) + header_line_1 = f"{label} - Rayleigh wave ({component})" + elif window_type == 'Surface wave' and component == 'T': + # First line of the header defined as: label - Love wave (component) + header_line_1 = f"{label} - Love wave ({component})" + elif window_type == 'Body wave': + # First line of the header defined as: label - (component) + header_line_1 = f"{label} - Body wave ({component})" + else: + # First line of the header defined as: label - (component) + header_line_1 = f"{label} - ({component})" + + header_line_2 = f"Event Time: {origin_time} UTC, Depth: {origin_depth:.1f} km" + + return [header_line_1, header_line_2] + + @staticmethod + def draw_coastlines(fig, area_thresh=100, water_color='paleturquoise', water_transparency=55): + """ + Draws coastlines and fills water areas with a transparent blue shade. + + .. rubric :: Parameters + + ``fig`` (pygmt.Figure): + The PyGMT figure object to which the coastlines and water areas will be added. + + ``area_thresh`` (`int`, optional): + The minimum area of land to be displayed. Defaults to 100. + + ``water_color`` (`str`, optional): + The color of the water areas. Defaults to 'paleturquoise'. + + ``water_transparency`` (`int`, optional): + The transparency of the water areas. Defaults to 55. + + """ + fig.coast(shorelines=True, area_thresh=area_thresh) + fig.coast(shorelines=False, water=water_color, transparency=water_transparency, area_thresh=area_thresh) \ No newline at end of file diff --git a/mtuq/graphics/header.py b/mtuq/graphics/header.py index 616b36544..a0fd3d319 100644 --- a/mtuq/graphics/header.py +++ b/mtuq/graphics/header.py @@ -116,6 +116,7 @@ def parse_data_processing(self): if not self.process_sw: raise Exception() + if self.process_sw.freq_max > 1.: units = 'Hz' else: diff --git a/mtuq/graphics/uq/_gmt.py b/mtuq/graphics/uq/_gmt.py index d07cba7ee..73b250c17 100644 --- a/mtuq/graphics/uq/_gmt.py +++ b/mtuq/graphics/uq/_gmt.py @@ -69,7 +69,8 @@ def _plot_latlon_gmt(filename, lon, lat, values, best_latlon=None, lune_array=No def _call(shell_script, filename, data, supplemental_data=None, title='', colormap='viridis', flip_cpt=False, colorbar_type=1, - colorbar_label='', colorbar_limits=None, marker_coords=None, marker_type=0): + colorbar_label='', cpt_limits=None, cpt_steps=20, + cpt_clip=[0., 1.], marker_coords=None, marker_type=0): # # Common wrapper for all GMT plotting functions involving 2D surfaces @@ -88,15 +89,28 @@ def _call(shell_script, filename, data, supplemental_data=None, # parse colorbar limits try: - minval, maxval = colorbar_limits + minval, maxval = cpt_limits exp = _exponent((minval, maxval)) minval /= 10.**exp maxval /= 10.**exp except: minval, maxval, exp = _parse_limits(data[:,-1]) + # optionally, clip colorbar limits to bring out more detail + try: + assert 0. <= cpt_clip[0] < cpt_clip[1] + minval = minval + cpt_clip[0]*(maxval - minval) + except: + pass + try: + assert cpt_clip[0] < cpt_clip[1] <= 1. + maxval = minval + cpt_clip[1]*(maxval - minval) + except: + pass + + data[:,-1] /= 10.**exp - cpt_step=(maxval-minval)/20. + cpt_step=(maxval-minval)/cpt_steps # write values to be plotted as ASCII table ascii_file_1 = _safename('tmp_'+filename+'_ascii1.txt') diff --git a/mtuq/greens_tensor/CPS.py b/mtuq/greens_tensor/CPS.py new file mode 100644 index 000000000..ba229c6e0 --- /dev/null +++ b/mtuq/greens_tensor/CPS.py @@ -0,0 +1,109 @@ + +import obspy +import numpy as np + +from mtuq.greens_tensor.base import GreensTensor as GreensTensorBase + + + +print('WARNING: CPS Greens functions are not fully tested yet') + + + +class GreensTensor(GreensTensorBase): + """ + FK Green's tensor object + + Overloads base class with machinery for working with CPS-style + Green's functions + + """ + def __init__(self, *args, **kwargs): + super(GreensTensor, self).__init__(*args, **kwargs) + + if 'type:greens' not in self.tags: + self.tags += ['type:greens'] + + if 'type:velocity' not in self.tags: + self.tags += ['type:velocity'] + + if 'units:m' not in self.tags: + self.tags += ['units:m'] + + def _precompute(self): + """ Computes NumPy arrays used by get_synthetics + """ + if self.include_mt: + self._precompute_mt() + + if self.include_force: + self._precompute_force() + + + def _precompute_mt(self): + """ Recombines CPS time series so they can be used in straightforward + linear combination with Mrr,Mtt,Mpp,Mrt,Mrp,Mtp + """ + + array = self._array + phi = np.deg2rad(self.azimuth) + _j = 0 + + # The formulas below were obtained by reverse engineering FK + + for _i, component in enumerate(self.components): + if component=='Z': + ZSS = self.select(channel="ZSS")[0].data + ZDS = self.select(channel="ZDS")[0].data + ZDD = self.select(channel="ZDD")[0].data + ZEP = self.select(channel="ZEP")[0].data + array[_i, _j+0, :] = ZSS/2. * np.cos(2*phi) - ZDD/6. + ZEP/3. + array[_i, _j+1, :] = -ZSS/2. * np.cos(2*phi) - ZDD/6. + ZEP/3. + array[_i, _j+2, :] = ZDD/3. + ZEP/3. + array[_i, _j+3, :] = ZSS * np.sin(2*phi) + array[_i, _j+4, :] = ZDS * np.cos(phi) + array[_i, _j+5, :] = ZDS * np.sin(phi) + + elif component=='R': + RSS = self.select(channel="RSS")[0].data + RDS = self.select(channel="RDS")[0].data + RDD = self.select(channel="RDD")[0].data + REP = self.select(channel="REP")[0].data + array[_i, _j+0, :] = RSS/2. * np.cos(2*phi) - RDD/6. + REP/3. + array[_i, _j+1, :] = -RSS/2. * np.cos(2*phi) - RDD/6. + REP/3. + array[_i, _j+2, :] = RDD/3. + REP/3. + array[_i, _j+3, :] = RSS * np.sin(2*phi) + array[_i, _j+4, :] = RDS * np.cos(phi) + array[_i, _j+5, :] = RDS * np.sin(phi) + + elif component=='T': + TSS = self.select(channel="TSS")[0].data + TDS = self.select(channel="TDS")[0].data + array[_i, _j+0, :] = TSS/2. * np.sin(2*phi) + array[_i, _j+1, :] = -TSS/2. * np.sin(2*phi) + array[_i, _j+2, :] = 0. + array[_i, _j+3, :] = -TSS * np.cos(2*phi) + array[_i, _j+4, :] = TDS * np.sin(phi) + array[_i, _j+5, :] = -TDS * np.cos(phi) + + else: + raise ValueError + + # + # CPS uses a north-east-down basis convention, while mtuq uses an + # up-south-east basis convention, so a permutation is necessary + # + array_copy = array.copy() + array[:, 0, :] = array_copy[:, 2, :] + array[:, 1, :] = array_copy[:, 0, :] + array[:, 2, :] = array_copy[:, 1, :] + array[:, 3, :] = array_copy[:, 4, :] + array[:, 4, :] = -array_copy[:, 5, :] + array[:, 5, :] = -array_copy[:, 3, :] + + + def _precompute_force(self): + raise NotImplementedError() + + + diff --git a/mtuq/greens_tensor/SPECFEM3D.py b/mtuq/greens_tensor/SPECFEM3D.py index 27be1d81d..acc474c37 100644 --- a/mtuq/greens_tensor/SPECFEM3D.py +++ b/mtuq/greens_tensor/SPECFEM3D.py @@ -85,18 +85,18 @@ def _precompute_force(self): for _i, component in enumerate(self.components): if component=='Z': - array[_i, _j+0, :] = self.select(channel="Z.Fe")[0].data - array[_i, _j+1, :] = self.select(channel="Z.Fn")[0].data - array[_i, _j+2, :] = self.select(channel="Z.Fz")[0].data + array[_i, _j+0, :] = self.select(channel="Z.Fz")[0].data + array[_i, _j+1, :] = self.select(channel="Z.Fs")[0].data + array[_i, _j+2, :] = self.select(channel="Z.Fe")[0].data elif component=='R': - array[_i, _j+0, :] = self.select(channel="R.Fe")[0].data - array[_i, _j+1, :] = self.select(channel="R.Fn")[0].data - array[_i, _j+2, :] = self.select(channel="R.Fz")[0].data + array[_i, _j+0, :] = self.select(channel="R.Fz")[0].data + array[_i, _j+1, :] = self.select(channel="R.Fs")[0].data + array[_i, _j+2, :] = self.select(channel="R.Fe")[0].data elif component=='T': - array[_i, _j+0, :] = self.select(channel="T.Fe")[0].data - array[_i, _j+1, :] = self.select(channel="T.Fn")[0].data - array[_i, _j+2, :] = self.select(channel="T.Fz")[0].data + array[_i, _j+0, :] = self.select(channel="T.Fz")[0].data + array[_i, _j+1, :] = self.select(channel="T.Fs")[0].data + array[_i, _j+2, :] = self.select(channel="T.Fe")[0].data diff --git a/mtuq/greens_tensor/base.py b/mtuq/greens_tensor/base.py index 34c5cfea1..baace937d 100644 --- a/mtuq/greens_tensor/base.py +++ b/mtuq/greens_tensor/base.py @@ -6,6 +6,7 @@ from mtuq.event import Origin from mtuq.station import Station from mtuq.dataset import Dataset +from mtuq.util import Null from mtuq.util.signal import check_time_sampling from obspy.core import Stream, Trace from obspy.geodetics import gps2dist_azimuth @@ -176,6 +177,9 @@ def get_synthetics(self, source, components=None, stats=None, inplace=False): List containing zero or more of the following components: ``Z``, ``R``, ``T``. (Defaults to ``['Z', 'R', 'T']``.) + ``stats`` (`obspy.Trace.Stats` object): + ObsPy Stats object that will be attached to the synthetics + """ if components is None: @@ -300,9 +304,17 @@ def get_synthetics(self, source, components=None, stats=None, mode='apply', **kw ``components`` (`list`): List containing zero or more of the following components: ``Z``, ``R``, ``T``. (Defaults to ``['Z', 'R', 'T']``.) + + ``stats`` (`obspy.Trace.Stats` object): + ObsPy Stats object that will be attached to the synthetics """ if mode=='map': + if components is None: + components = [None for _ in range(len(self))] + if stats is None: + stats = [None for _ in range(len(self))] + synthetics = Dataset() for _i, tensor in enumerate(self): synthetics.append(tensor.get_synthetics( @@ -471,7 +483,7 @@ def sort_by_azimuth(self, reverse=False): def sort_by_function(self, function, reverse=False): - """ Sorts in-place using the python built-in `sort` + """ Sorts in-place by user-supplied function """ self.sort(key=function, reverse=reverse) diff --git a/mtuq/io/clients/SPECFEM3D_SAC.py b/mtuq/io/clients/SPECFEM3D_SAC.py index 27cb83292..919529e48 100644 --- a/mtuq/io/clients/SPECFEM3D_SAC.py +++ b/mtuq/io/clients/SPECFEM3D_SAC.py @@ -35,9 +35,9 @@ 'R.Fe', 'T.Fe', 'Z.Fe', - 'R.Fn', - 'T.Fn', - 'Z.Fn', + 'R.Fs', + 'T.Fs', + 'Z.Fs', 'R.Fz', 'T.Fz', 'Z.Fz', diff --git a/mtuq/misfit/polarity.py b/mtuq/misfit/polarity.py index 401284de3..08021f4ed 100644 --- a/mtuq/misfit/polarity.py +++ b/mtuq/misfit/polarity.py @@ -58,9 +58,9 @@ class PolarityMisfit(object): .. rubric:: Other input arguments that may be required, depending on the above ``taup_model`` (`str`): Name of built-in ObsPy TauP model or path to custom - ObsPy TauP model, required for `type=taup` + ObsPy TauP model, required for `method=taup` - ``FK_database`` (`str`): Path to FK database, required for for `type=FK_metadata`. + ``FK_database`` (`str`): Path to FK database, required for for `method=FK_metadata`. .. note:: diff --git a/mtuq/misfit/waveform/__init__.py b/mtuq/misfit/waveform/__init__.py index cfb2de468..4070ad68e 100644 --- a/mtuq/misfit/waveform/__init__.py +++ b/mtuq/misfit/waveform/__init__.py @@ -162,7 +162,7 @@ def __init__(self, def __call__(self, data, greens, sources, progress_handle=Null(), - set_attributes=False, optimization_level=None): + normalize=None, set_attributes=False, optimization_level=None): """ Evaluates misfit on given data """ if optimization_level is None: @@ -195,20 +195,22 @@ def __call__(self, data, greens, sources, progress_handle=Null(), return level0.misfit( data, greens, sources, self.norm, self.time_shift_groups, self.time_shift_min, self.time_shift_max, progress_handle, - set_attributes) + normalize=normalize, set_attributes=set_attributes) if optimization_level==1: return level1.misfit( data, greens, sources, self.norm, self.time_shift_groups, - self.time_shift_min, self.time_shift_max, progress_handle) + self.time_shift_min, self.time_shift_max, progress_handle, + normalize=normalize) if optimization_level==2: return level2.misfit( data, greens, sources, self.norm, self.time_shift_groups, - self.time_shift_min, self.time_shift_max, progress_handle) + self.time_shift_min, self.time_shift_max, progress_handle, + normalize=normalize) - def collect_attributes(self, data, greens, source): + def collect_attributes(self, data, greens, source, normalize=False): """ Collects misfit, time shifts and other attributes corresponding to each trace """ @@ -229,7 +231,7 @@ def collect_attributes(self, data, greens, source): _ = level0.misfit( data, greens, iterable(source), self.norm, self.time_shift_groups, self.time_shift_min, self.time_shift_max, msg_handle=Null(), - set_attributes=True) + normalize=normalize, set_attributes=True) # collects attributes attrs = [] @@ -245,7 +247,7 @@ def collect_attributes(self, data, greens, source): return deepcopy(attrs) - def collect_synthetics(self, data, greens, source): + def collect_synthetics(self, data, greens, source, normalize=False): """ Collects synthetics with misfit, time shifts and other attributes attached """ # checks that dataset is nonempty @@ -265,7 +267,7 @@ def collect_synthetics(self, data, greens, source): _ = level0.misfit( data, greens, iterable(source), self.norm, self.time_shift_groups, self.time_shift_min, self.time_shift_max, msg_handle=Null(), - set_attributes=True) + normalize=False, set_attributes=True) return deepcopy(synthetics) diff --git a/mtuq/misfit/waveform/_stats.py b/mtuq/misfit/waveform/_stats.py index 067f503f9..b31db475d 100644 --- a/mtuq/misfit/waveform/_stats.py +++ b/mtuq/misfit/waveform/_stats.py @@ -7,36 +7,62 @@ from mtuq.util.signal import get_components -def calculate_norm_data(data, norm, components): - # error checking - assert norm in ('L1', 'L2') +def calculate_norm_data(data, norm, components, apply_weights=True): + """ Calculates the norm of the given waveform data + """ + + # Here we try to mimic the implementation in + # mtuq/misfit/waveform/level0.py + + # Mimicking the misfit function implementation helps ensure consistency + # between the misfit values (i.e. residual norms) and data norms + + # The idea here is to use the regular misfit function machinery, but set + # synthetics to zero, so that instead of the residual norm || d - s ||, + # we get the data norm || d - 0 || norm_data = 0. - for _j, d in enumerate(data): + + for _j, stream in enumerate(data): _components, indices = list_intersect_with_indices( - get_components(d), components) + get_components(stream), _flatten(components)) if not indices: continue # time sampling scheme - npts = d[0].data.size - dt = d[0].stats.delta + npts = stream[0].data.size + dt = stream[0].stats.delta for _k in indices: - r = d[_k].data + d = stream[_k].data if norm=='L1': - norm_data += np.sum(np.abs(r))*dt + value = np.sum(np.abs(d))*dt elif norm=='L2': - norm_data += np.sum(r**2)*dt + value = np.sum(d**2)*dt + + elif norm=='hybrid': + value = np.sqrt(np.sum(r**2))*dt + + # optionally, applies user-supplied weights attached during + # process_data() + if apply_weights: + try: + value *= d[_k].weight + except: + pass + + norm_data += value return norm_data def estimate_sigma(data, greens, best_source, norm, components, time_shift_min, time_shift_max): + """ Makes an a posteriori estimate of the data error standard deviation + """ # error checking assert norm in ('L1', 'L2') @@ -81,7 +107,6 @@ def estimate_sigma(data, greens, best_source, norm, components, stop = start + npts for _k in indices: - # subtract data from shifted synthetics r = s[_k].data[start:stop] - d[_k].data @@ -95,3 +120,17 @@ def estimate_sigma(data, greens, best_source, norm, components, return np.mean(residuals)**0.5 + + +def _flatten(lists): + # TODO - better implementation? + + # example: + # converts ['ZR','T'] to ['Z','R','T'] + + flattened = [] + for list in lists: + for element in list: + flattened.append(element) + return flattened + diff --git a/mtuq/misfit/waveform/level0.py b/mtuq/misfit/waveform/level0.py index e22123c82..2469fa2e7 100644 --- a/mtuq/misfit/waveform/level0.py +++ b/mtuq/misfit/waveform/level0.py @@ -6,13 +6,15 @@ import numpy as np +from mtuq.misfit.waveform._stats import _flatten, calculate_norm_data from mtuq.util import AttribDict from mtuq.util.math import isclose, list_intersect_with_indices from mtuq.util.signal import get_components def misfit(data, greens, sources, norm, time_shift_groups, - time_shift_min, time_shift_max, msg_handle, set_attributes=False): + time_shift_min, time_shift_max, msg_handle, + normalize=False, set_attributes=False): """ Waveform misfit function (non-optimized pure Python version) @@ -20,6 +22,10 @@ def misfit(data, greens, sources, norm, time_shift_groups, """ values = np.zeros((len(sources), 1)) + if normalize: + components = _flatten(time_shift_groups) + norm_data = calculate_norm_data(data, norm, components) + # # initialize Green's function machinery # @@ -164,6 +170,10 @@ def misfit(data, greens, sources, norm, time_shift_groups, s[_k].attrs.amplitude_ratio = d_max/s_max s[_k].attrs.log_amplitude_ratio = np.log(d_max/s_max) + + if normalize: + values /= norm_data + return values diff --git a/mtuq/misfit/waveform/level1.py b/mtuq/misfit/waveform/level1.py index 4a0f70813..fdab3ddf7 100644 --- a/mtuq/misfit/waveform/level1.py +++ b/mtuq/misfit/waveform/level1.py @@ -5,13 +5,14 @@ """ import numpy as np +from mtuq.misfit.waveform._stats import _flatten, calculate_norm_data from mtuq.util.math import correlate, isclose, list_intersect_with_indices from mtuq.util.signal import get_components, get_time_sampling def misfit(data, greens, sources, norm, time_shift_groups, - time_shift_min, time_shift_max, msg_handle): + time_shift_min, time_shift_max, msg_handle, normalize=False): """ Waveform misfit function (fast pure Python version) @@ -20,6 +21,10 @@ def misfit(data, greens, sources, norm, time_shift_groups, helpers = [] values = np.zeros((len(sources), 1)) + if normalize: + components = _flatten(time_shift_groups) + norm_data = calculate_norm_data(data, norm, components) + # # initialize Green's function machinery # @@ -84,6 +89,9 @@ def misfit(data, greens, sources, norm, time_shift_groups, except: values[_i] += value + if normalize: + values /= norm_data + return values diff --git a/mtuq/misfit/waveform/level2.py b/mtuq/misfit/waveform/level2.py index 229120fac..c99839229 100644 --- a/mtuq/misfit/waveform/level2.py +++ b/mtuq/misfit/waveform/level2.py @@ -7,6 +7,7 @@ import numpy as np import time from copy import deepcopy +from mtuq.misfit.waveform._stats import _flatten, calculate_norm_data from mtuq.misfit.waveform.level1 import correlate from mtuq.util.math import to_mij, to_rtp from mtuq.util.signal import get_components, get_time_sampling @@ -14,12 +15,18 @@ def misfit(data, greens, sources, norm, time_shift_groups, - time_shift_min, time_shift_max, msg_handle, debug_level=0): + time_shift_min, time_shift_max, msg_handle, debug_level=0, + normalize=False): """ Data misfit function (fast Python/C version) See ``mtuq/misfit/waveform/__init__.py`` for more information """ + + if normalize: + components = _flatten(time_shift_groups) + norm_data = calculate_norm_data(data, norm, components) + # # collect metadata # @@ -86,6 +93,9 @@ def misfit(data, greens, sources, norm, time_shift_groups, print(' Elapsed time (C extension) (s): %f' % \ (time.time() - start_time)) + if normalize: + results /= norm_data + return results diff --git a/mtuq/util/__init__.py b/mtuq/util/__init__.py index b9595f89a..9f66cf6bd 100644 --- a/mtuq/util/__init__.py +++ b/mtuq/util/__init__.py @@ -287,6 +287,9 @@ def __init__(self, *args, **kwargs): def __call__(self, *args, **kwargs): return self + def __getitem__(self, *args, **kwargs): + return None + def __nonzero__(self): return False diff --git a/setup/code_generator.py b/setup/code_generator.py index 1a9e76009..1198ce31b 100755 --- a/setup/code_generator.py +++ b/setup/code_generator.py @@ -1588,6 +1588,7 @@ def isclose(a, b, atol=1.e6, rtol=1.e-6): print('%s: %.e <= %.1e + %.1e * %.1e' %\\ ('passed' if _bool else 'failed', abs(_a-_b), atol, rtol, abs(_b))) + print('') return np.all( diff --git a/tests/check_urls.bash b/tests/check_urls.bash index 9053377f3..f712d96b5 100755 --- a/tests/check_urls.bash +++ b/tests/check_urls.bash @@ -10,8 +10,9 @@ URLS="\ https://www.eas.slu.edu/People/LZhu/home.html\ https://github.com/geodynamics/axisem\ https://github.com/Liang-Ding/seisgen\ - http://ds.iris.edu/ds/products/syngine\ - http://ds.iris.edu/ds/products/syngine/#models\ + https://ds.iris.edu/ds/products/syngine\ + https://ds.iris.edu/ds/products/syngine/#models\ + https://ds.iris.edu/files/sac-manual/manual/file_format.html\ https://instaseis.net\ https://docs.obspy.org/tutorial/index.html\ https://docs.obspy.org/packages/autogen/obspy.core.stream.Stream.html\ @@ -20,6 +21,8 @@ URLS="\ https://docs.xarray.dev/en/stable/generated/xarray.DataArray.html\ https://github.com/uafgeotools/mtuq/blob/master/docs/user_guide/05/code/gallery_mt.py https://github.com/uafgeotools/mtuq/blob/master/docs/user_guide/05/code/gallery_force.py + https://conda.org/blog/2023-11-06-conda-23-10-0-release\ + https://www.anaconda.com/blog/a-faster-conda-for-a-growing-community\ " diff --git a/tests/test_greens_SPECFEM3D_SAC.py b/tests/test_greens_SPECFEM3D_SAC.py index 14aa3c091..7b2734270 100644 --- a/tests/test_greens_SPECFEM3D_SAC.py +++ b/tests/test_greens_SPECFEM3D_SAC.py @@ -169,6 +169,7 @@ def isclose(a, b, atol=1.e6, rtol=1.e-6): print('%s: %.e <= %.1e + %.1e * %.1e' %\ ('passed' if _bool else 'failed', abs(_a-_b), atol, rtol, abs(_b))) + print('') return np.all( diff --git a/tests/test_greens_SPECFEM3D_SGT.py b/tests/test_greens_SPECFEM3D_SGT.py index f7584642f..2bfd7c963 100644 --- a/tests/test_greens_SPECFEM3D_SGT.py +++ b/tests/test_greens_SPECFEM3D_SGT.py @@ -170,6 +170,7 @@ def isclose(a, b, atol=1.e6, rtol=1.e-6): print('%s: %.e <= %.1e + %.1e * %.1e' %\ ('passed' if _bool else 'failed', abs(_a-_b), atol, rtol, abs(_b))) + print('') return np.all( diff --git a/tests/test_grid_search_mt.py b/tests/test_grid_search_mt.py index c2c4fda1b..74ba6951f 100644 --- a/tests/test_grid_search_mt.py +++ b/tests/test_grid_search_mt.py @@ -179,6 +179,7 @@ def isclose(a, b, atol=1.e6, rtol=1.e-6): print('%s: %.e <= %.1e + %.1e * %.1e' %\ ('passed' if _bool else 'failed', abs(_a-_b), atol, rtol, abs(_b))) + print('') return np.all( diff --git a/tests/test_time_shifts.py b/tests/test_time_shifts.py index 0a6b31374..2c5db6a25 100644 --- a/tests/test_time_shifts.py +++ b/tests/test_time_shifts.py @@ -161,13 +161,16 @@ def __call__(self, traces): return super(ProcessData, self).__call__( traces, station=station, origin=origin) + + # + # plotting functions + # def _plot_dat(axis, t, data, attrs, pathspec='-k'): stream = data[0] trace = data[0][0] stats = trace.stats axis.plot(t, trace.data, pathspec) - def _plot_syn(axis, t, data, attrs, pathspec='-r'): stream = data[0] trace = data[0][0] @@ -176,7 +179,6 @@ def _plot_syn(axis, t, data, attrs, pathspec='-r'): idx2 = attrs.idx_stop axis.plot(t, trace.data[idx1:idx2], pathspec) - def _annotate(axis, attrs): text = 'static_shift: %.1f' % attrs.static_shift pyplot.text(0.02, 0.85, text, transform=axis.transAxes) @@ -187,15 +189,12 @@ def _annotate(axis, attrs): text = 'total_shift: %.1f' % attrs.total_shift pyplot.text(0.02, 0.55, text, transform=axis.transAxes) - def _get_synthetics(greens, mt): return greens.get_synthetics(mt, components=['Z']) - def _get_attrs(data, greens, misfit): return misfit.collect_attributes(data, greens, mt)[0]['Z'] - def _get_time_sampling(data): stream = data[0] t1 = float(stream[0].stats.starttime)