Skip to content

Commit

Permalink
Fetch upstream lfp. Config centralize spike_sorting_dir
Browse files Browse the repository at this point in the history
  • Loading branch information
CBroz1 committed Jul 28, 2023
2 parents 15d1369 + 4d71854 commit 9e85b26
Show file tree
Hide file tree
Showing 11 changed files with 2,441 additions and 2,873 deletions.
71 changes: 71 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
default_stages: [commit, push]
exclude: (^.github/|^docs/site/|^images/)

repos:
- repo: https://github.com/executablebooks/mdformat
# Do this before other tools "fixing" the line endings
rev: 0.7.16
hooks:
- id: mdformat
name: Format Markdown
entry: mdformat # Executable to run, with fixed options
language: python
types: [markdown]
args: [--wrap, "80", --number]
additional_dependencies:
- mdformat-toc
- mdformat-beautysh
- mdformat-config
- mdformat-black
- mdformat-web
- mdformat-gfm

- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-json
- id: check-toml
- id: check-yaml
args: [--unsafe]
- id: requirements-txt-fixer
- id: end-of-file-fixer
- id: mixed-line-ending
args: ["--fix=lf"]
description: Forces to replace line ending by the UNIX 'lf' character.
- id: trailing-whitespace
- id: debug-statements
- id: check-added-large-files # prevent giant files from being committed
- id: check-builtin-literals
- id: check-merge-conflict
- id: check-executables-have-shebangs
- id: check-shebang-scripts-are-executable
- id: check-docstring-first
- id: check-case-conflict
- id: fix-byte-order-marker

- repo: https://github.com/adrienverge/yamllint.git
rev: v1.29.0
hooks:
- id: yamllint
args:
- --no-warnings
- -d
- "{extends: relaxed, rules: {line-length: {max: 90}}}"

- repo: https://github.com/charliermarsh/ruff-pre-commit
rev: v0.0.254
hooks:
- id: ruff

- repo: https://github.com/PyCQA/autoflake
rev: v2.0.1
hooks:
- id: autoflake

- repo: https://github.com/codespell-project/codespell
rev: v2.2.2
hooks:
- id: codespell
args: [--toml, pyproject.toml]
additional_dependencies:
- tomli
3 changes: 2 additions & 1 deletion franklab_scripts/nightly_cleanup.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@

warnings.simplefilter("ignore", category=DeprecationWarning)
warnings.simplefilter("ignore", category=ResourceWarning)
os.environ["SPIKE_SORTING_STORAGE_DIR"] = "/stelmo/nwb/spikesorting"
# NOTE: "SPIKE_SORTING_STORAGE_DIR" -> "SPYGLASS_SORTING_DIR"
os.environ["SPYGLASS_SORTING_DIR"] = "/stelmo/nwb/spikesorting"


# import tables so that we can call them easily
Expand Down
2 changes: 1 addition & 1 deletion franklab_scripts/sort.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ def main():

os.environ["SPYGLASS_BASE_DIR"] = str(data_dir)
os.environ["KACHERY_STORAGE_DIR"] = str(data_dir / "kachery-storage")
os.environ["SPIKE_SORTING_STORAGE_DIR"] = str(data_dir / "spikesorting")
os.environ["SPYGLASS_SORTING_DIR"] = str(data_dir / "spikesorting")

# session_id = 'jaq_01'
# nwb_file_name = (sg.common.Session() & {'session_id': session_id}).fetch1('nwb_file_name')
Expand Down
19 changes: 0 additions & 19 deletions notebooks/02_Spike_Sorting.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -66,23 +66,6 @@
"_Note:_ It the imports below throw a `FileNotFoundError`, make a cell with `!env | grep X` where X is part of the problematic directory. This will show the variable causing issues. Make another cell that sets this variable elsewhere with `%env VAR=\"/your/path/\"`\n"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"env: KACHERY_CLOUD_DIR=\"/home/cb/.kachery-cloud/\"\n"
]
}
],
"source": [
"%env KACHERY_CLOUD_DIR=\"/home/cb/.kachery-cloud/\""
]
},
{
"cell_type": "code",
"execution_count": 13,
Expand All @@ -100,8 +83,6 @@
" os.chdir(\"..\")\n",
"dj.config.load(\"dj_local_conf.json\") # load config for database connection info\n",
"\n",
"!export KACHERY_CLOUD_DIR=\"/home/cb/.kachery-cloud/\"\n",
"\n",
"import spyglass.common as sgc\n",
"import spyglass.spikesorting as sgs\n",
"\n",
Expand Down
411 changes: 128 additions & 283 deletions notebooks/03_Curation.ipynb

Large diffs are not rendered by default.

2,555 changes: 0 additions & 2,555 deletions notebooks/03_lfp.ipynb

This file was deleted.

2,215 changes: 2,215 additions & 0 deletions notebooks/04_LFP.ipynb

Large diffs are not rendered by default.

File renamed without changes.
2 changes: 1 addition & 1 deletion src/spyglass/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
raw="raw",
analysis="analysis",
recording="recording",
spike_sorting_storage="spikesorting",
sorting="spikesorting", # "SPYGLASS_SORTING_DIR"
waveforms="waveforms",
temp="tmp",
),
Expand Down
29 changes: 18 additions & 11 deletions src/spyglass/spikesorting/spikesorting_recording.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,17 +9,17 @@
import spikeinterface as si
import spikeinterface.extractors as se

from ..common.common_device import Probe, ProbeType
from ..common.common_device import Probe, ProbeType # noqa: F401
from ..common.common_ephys import Electrode, ElectrodeGroup
from ..common.common_interval import (
IntervalList,
interval_list_intersect,
intervals_by_length,
union_adjacent_index,
)
from ..common.common_lab import LabTeam
from ..common.common_lab import LabTeam # noqa: F401
from ..common.common_nwbfile import Nwbfile
from ..common.common_session import Session
from ..common.common_session import Session # noqa: F401
from ..utils.dj_helper_fn import dj_replace

schema = dj.schema("spikesorting_recording")
Expand Down Expand Up @@ -59,12 +59,15 @@ def set_group_by_shank(
Parameters
----------
nwb_file_name : str
the name of the NWB file whose electrodes should be put into sorting groups
the name of the NWB file whose electrodes should be put into
sorting groups
references : dict, optional
If passed, used to set references. Otherwise, references set using
original reference electrodes from config. Keys: electrode groups. Values: reference electrode.
original reference electrodes from config. Keys: electrode groups.
Values: reference electrode.
omit_ref_electrode_group : bool
Optional. If True, no sort group is defined for electrode group of reference.
Optional. If True, no sort group is defined for electrode group of
reference.
omit_unitrode : bool
Optional. If True, no sort groups are defined for unitrodes.
"""
Expand Down Expand Up @@ -109,12 +112,14 @@ def set_group_by_shank(
]
else:
ValueError(
f"Error in electrode group {e_group}: reference electrodes are not all the same"
f"Error in electrode group {e_group}: reference "
+ "electrodes are not all the same"
)
else:
if e_group not in references.keys():
raise Exception(
f"electrode group {e_group} not a key in references, so cannot set reference"
f"electrode group {e_group} not a key in "
+ "references, so cannot set reference"
)
else:
sg_key["sort_reference_electrode_id"] = references[
Expand All @@ -135,14 +140,16 @@ def set_group_by_shank(
len(reference_electrode_group) != 1
):
raise Exception(
f"Should have found exactly one electrode group for reference electrode,"
f"but found {len(reference_electrode_group)}."
"Should have found exactly one electrode group for "
+ "reference electrode, but found "
+ f"{len(reference_electrode_group)}."
)
if omit_ref_electrode_group and (
str(e_group) == str(reference_electrode_group)
):
print(
f"Omitting electrode group {e_group} from sort groups because contains reference."
f"Omitting electrode group {e_group} from sort groups "
+ "because contains reference."
)
continue
shank_elect = electrodes["electrode_id"][
Expand Down
7 changes: 5 additions & 2 deletions src/spyglass/spikesorting/spikesorting_sorting.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

from ..common.common_lab import LabMember, LabTeam
from ..common.common_nwbfile import AnalysisNwbfile
from ..settings import load_config
from ..utils.dj_helper_fn import fetch_nwb
from .spikesorting_artifact import ArtifactRemovedIntervalList
from .spikesorting_recording import (
Expand Down Expand Up @@ -134,7 +135,7 @@ def make(self, key: dict):
(this is redundant with 2; will change in the future)
"""

# CBroz: does this not work w/o arg? as .populate() ?
recording_path = (SpikeSortingRecording & key).fetch1("recording_path")
recording = si.load_extractor(recording_path)

Expand Down Expand Up @@ -227,7 +228,9 @@ def make(self, key: dict):
key["time_of_sort"] = int(time.time())

print("Saving sorting results...")
sorting_folder = Path(os.getenv("SPYGLASS_SORTING_DIR"))

sorting_folder = Path(load_config().get("SPYGLASS_SORTING_DIR"))

sorting_name = self._get_sorting_name(key)
key["sorting_path"] = str(sorting_folder / Path(sorting_name))
if os.path.exists(key["sorting_path"]):
Expand Down

0 comments on commit 9e85b26

Please sign in to comment.