Skip to content

Commit

Permalink
Minor update
Browse files Browse the repository at this point in the history
  • Loading branch information
khl02007 committed Oct 2, 2023
1 parent 774d7f0 commit db73333
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 90 deletions.
6 changes: 2 additions & 4 deletions src/spyglass/spikesorting/v1/figurl_curation.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,18 +102,17 @@ def make(self, key: Dict):
unit_metrics = _reformat_metrics(metric_dict)

# Generate the figURL
url = _generate_figurl(
key["url"] = _generate_figurl(
R=recording,
S=sorting,
initial_curation_uri=curation_uri,
recording_label=recording_label,
sorting_label=sorting_label,
unit_metrics=unit_metrics,
)
key["url"] = url

# INSERT
self.insert1(key)
self.insert1(key, skip_duplicates=True)


def _generate_figurl(
Expand All @@ -122,7 +121,6 @@ def _generate_figurl(
initial_curation_uri: str,
recording_label: str,
sorting_label: str,
new_curation_uri: str,
unit_metrics: Union[List[Any], None] = None,
segment_duration_sec=1200,
snippet_ms_before=1,
Expand Down
22 changes: 5 additions & 17 deletions src/spyglass/spikesorting/v1/recording.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,34 +176,22 @@ class SpikeSortingPreprocessingParameter(dj.Lookup):
---
preproc_param: blob
"""
freq_min = 300 # high pass filter value
freq_max = 6000 # low pass filter value
margin_ms = 5 # margin in ms on border to avoid border effect
seed = 0 # random seed for whitening

contents = [
[
"default",
{
"frequency_min": freq_min,
"frequency_max": freq_max,
"margin_ms": margin_ms,
"seed": seed,
"frequency_min": 300, # high pass filter value
"frequency_max": 6000, # low pass filter value
"margin_ms": 5, # margin in ms on border to avoid border effect
"seed": 0, # random seed for whitening
},
]
]

@classmethod
def insert_default(cls):
key = dict()
key["preproc_params_name"] = "default"
key["preproc_params"] = {
"frequency_min": cls.freq_min,
"frequency_max": cls.freq_max,
"margin_ms": cls.margin_ms,
"seed": cls.seed,
}
cls.insert1(key, skip_duplicates=True)
cls.insert1(cls.contents, skip_duplicates=True)


@schema
Expand Down
73 changes: 4 additions & 69 deletions src/spyglass/spikesorting/v1/sorting.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import os
import tempfile
import time
from pathlib import Path

import datajoint as dj
import numpy as np
Expand Down Expand Up @@ -94,73 +93,9 @@ class SpikeSorterParameter(dj.Lookup):
]
)

def insert_default(self):
"""Default params from spike sorters available via spikeinterface"""
sorters = sis.available_sorters()
for sorter in sorters:
sorter_params = sis.get_default_sorter_params(sorter)
self.insert1(
[sorter, "default", sorter_params], skip_duplicates=True
)

# Insert Frank lab defaults
# Hippocampus tetrode default
sorter = "mountainsort4"
sorter_params_name = "franklab_tetrode_hippocampus_30KHz"
sorter_params = {
"detect_sign": -1,
"adjacency_radius": 100,
"freq_min": 600,
"freq_max": 6000,
"filter": False,
"whiten": True,
"num_workers": 1,
"clip_size": 40,
"detect_threshold": 3,
"detect_interval": 10,
}
self.insert1(
[sorter, sorter_params_name, sorter_params], skip_duplicates=True
)

# Cortical probe default
sorter = "mountainsort4"
sorter_params_name = "franklab_probe_ctx_30KHz"
sorter_params = {
"detect_sign": -1,
"adjacency_radius": 100,
"freq_min": 300,
"freq_max": 6000,
"filter": False,
"whiten": True,
"num_workers": 1,
"clip_size": 40,
"detect_threshold": 3,
"detect_interval": 10,
}
self.insert1(
[sorter, sorter_params_name, sorter_params], skip_duplicates=True
)

# clusterless defaults
sorter = "clusterless_thresholder"
sorter_params_name = "default_clusterless"
sorter_params = dict(
detect_threshold=100.0, # uV
# Locally exclusive means one unit per spike detected
method="locally_exclusive",
peak_sign="neg",
exclude_sweep_ms=0.1,
local_radius_um=100,
# noise levels needs to be 1.0 so the units are in uV and not MAD
noise_levels=np.asarray([1.0]),
random_chunk_kwargs={},
# output needs to be set to sorting for the rest of the pipeline
outputs="sorting",
)
self.insert1(
[sorter, sorter_params_name, sorter_params], skip_duplicates=True
)
@classmethod
def insert_default(cls):
cls.insert1(cls.contents, skip_duplicates=True)


@schema
Expand Down Expand Up @@ -251,7 +186,7 @@ def make(self, key: dict):
# turn off whitening by sorter because
# recording will be whitened before feeding it to sorter
sorter_params["whiten"] = False
recording = sip.whiten(recording, dtype="float64")
recording = sip.whiten(recording, dtype=np.float64)
sorting = sis.run_sorter(
sorter,
recording,
Expand Down

0 comments on commit db73333

Please sign in to comment.