Skip to content

Commit

Permalink
Merge branch 'main' into refactor-entities
Browse files Browse the repository at this point in the history
  • Loading branch information
hasan7n authored Aug 5, 2024
2 parents 808e823 + d072303 commit 4a03f56
Show file tree
Hide file tree
Showing 18 changed files with 300 additions and 124 deletions.
31 changes: 18 additions & 13 deletions cli/medperf/commands/dataset/prepare.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,8 +165,7 @@ def setup_parameters(self):

def run_prepare(self):
report_sender = ReportSender(self)
if self.allow_sending_reports:
report_sender.start()
report_sender.start()

prepare_params = {
"data_path": self.raw_data_path,
Expand All @@ -190,18 +189,15 @@ def run_prepare(self):
)
except Exception as e:
# Inform the server that a failure occured
if self.allow_sending_reports:
report_sender.stop("failed")
report_sender.stop("failed")
raise e
except KeyboardInterrupt:
except KeyboardInterrupt as e:
# Inform the server that the process is interrupted
if self.allow_sending_reports:
report_sender.stop("interrupted")
raise
report_sender.stop("interrupted")
raise e

self.ui.print("> Cube execution complete")
if self.allow_sending_reports:
report_sender.stop("finished")
report_sender.stop("finished")

def run_sanity_check(self):
sanity_check_timeout = config.sanity_check_timeout
Expand Down Expand Up @@ -313,8 +309,12 @@ def prompt_for_report_sending_approval(self):
dict_pretty_print(example)

msg = (
" \nDo you approve the automatic submission of summaries similar to the one above"
+ " to the MedPerf Server throughout the preparation process?[Y/n]"
"\nYou can decide whether or not to send information about your dataset preparation"
+ "\nProgress. Keep in mind that information about the execution status of the pipeline"
+ "\nwill be sent regardless (whether the pipeline is running, finished or failed)"
+ "\nto identify issues with the preparation procedure. Do you approve the automatic"
+ "\nsubmission of summaries similar to the one above to the MedPerf Server throughout"
+ "\nthe preparation process?[Y/n]"
)

self.allow_sending_reports = approval_prompt(msg)
Expand All @@ -327,7 +327,12 @@ def send_report(self, report_metadata):
return self._send_report(report_metadata)

def _send_report(self, report_metadata):
report_status_dict = self.__generate_report_dict()
if self.dataset.for_test:
# Test datasets don't have a registration on the server
return
report_status_dict = {}
if self.allow_sending_reports:
report_status_dict = self.__generate_report_dict()
report = {"progress": report_status_dict, **report_metadata}
if report == self.dataset.report:
# Watchdog may trigger an event even if contents didn't change
Expand Down
64 changes: 38 additions & 26 deletions cli/medperf/tests/commands/dataset/test_prepare.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

from medperf.tests.mocks.dataset import TestDataset
from medperf.tests.mocks.cube import TestCube
from medperf.commands.dataset.prepare import DataPreparation
from medperf.commands.dataset.prepare import DataPreparation, Observer

PATCH_REGISTER = "medperf.commands.dataset.prepare.{}"

Expand Down Expand Up @@ -54,37 +54,58 @@ def test_get_prep_cube_downloads_cube_file(mocker, data_preparation, cube):
spy.assert_called_once()


@pytest.mark.parametrize("dataset_for_test", [False, True])
def test_prepare_with_test_data_doesnt_send_reports(
mocker, data_preparation, dataset_for_test, cube, comms, fs
):
# Arrange
data_preparation.dataset.for_test = dataset_for_test
mocker.patch.object(cube, "run")
mocker.patch.object(data_preparation.dataset, "write")
mocked_obs = mocker.create_autospec(spec=Observer)
mocker.patch(PATCH_REGISTER.format("Observer", side_effect=mocked_obs))
send_report_spy = mocker.patch.object(comms, "update_dataset")

# Act
data_preparation.run_prepare()

# Assert
if dataset_for_test:
send_report_spy.assert_not_called()
else:
send_report_spy.assert_called()


@pytest.mark.parametrize("allow_sending_reports", [False, True])
def test_prepare_runs_then_stops_report_handler(
mocker, data_preparation, allow_sending_reports, cube
mocker, data_preparation, allow_sending_reports, cube, comms, fs
):
# Arrange
data_preparation.allow_sending_reports = allow_sending_reports
mocker.patch.object(cube, "run")
start_spy = mocker.patch(PATCH_REGISTER.format("ReportSender.start"))
stop_spy = mocker.patch(PATCH_REGISTER.format("ReportSender.stop"))
mocker.patch.object(data_preparation.dataset, "write")
mocked_obs = mocker.create_autospec(spec=Observer)
mocker.patch(PATCH_REGISTER.format("Observer"), side_effect=mocked_obs)
gen_report_spy = mocker.patch(PATCH_REGISTER.format("DataPreparation._DataPreparation__generate_report_dict"))

# Act
data_preparation.run_prepare()

# Assert
if allow_sending_reports:
start_spy.assert_called_once()
stop_spy.assert_called_once_with("finished")
gen_report_spy.assert_called()
else:
start_spy.assert_not_called()
stop_spy.assert_not_called()
gen_report_spy.assert_not_called()


@pytest.mark.parametrize("allow_sending_reports", [False, True])
def test_prepare_runs_then_stops_report_handler_on_failure(
mocker, data_preparation, allow_sending_reports, cube
mocker, data_preparation, cube
):
# Arrange
def _failure_run(*args, **kwargs):
raise Exception()

data_preparation.allow_sending_reports = allow_sending_reports
data_preparation.allow_sending_reports = True
mocker.patch.object(cube, "run", side_effect=_failure_run)
start_spy = mocker.patch(PATCH_REGISTER.format("ReportSender.start"))
stop_spy = mocker.patch(PATCH_REGISTER.format("ReportSender.stop"))
Expand All @@ -94,23 +115,18 @@ def _failure_run(*args, **kwargs):
data_preparation.run_prepare()

# Assert
if allow_sending_reports:
start_spy.assert_called_once()
stop_spy.assert_called_once_with("failed")
else:
start_spy.assert_not_called()
stop_spy.assert_not_called()
start_spy.assert_called_once()
stop_spy.assert_called_once_with("failed")


@pytest.mark.parametrize("allow_sending_reports", [False, True])
def test_prepare_runs_then_stops_report_handler_on_interrupt(
mocker, data_preparation, allow_sending_reports, cube
mocker, data_preparation, cube
):
# Arrange
def _failure_run(*args, **kwargs):
raise KeyboardInterrupt()

data_preparation.allow_sending_reports = allow_sending_reports
data_preparation.allow_sending_reports = True
mocker.patch.object(cube, "run", side_effect=_failure_run)
start_spy = mocker.patch(PATCH_REGISTER.format("ReportSender.start"))
stop_spy = mocker.patch(PATCH_REGISTER.format("ReportSender.stop"))
Expand All @@ -120,12 +136,8 @@ def _failure_run(*args, **kwargs):
data_preparation.run_prepare()

# Assert
if allow_sending_reports:
start_spy.assert_called_once()
stop_spy.assert_called_once_with("interrupted")
else:
start_spy.assert_not_called()
stop_spy.assert_not_called()
start_spy.assert_called_once()
stop_spy.assert_called_once_with("interrupted")


@pytest.mark.parametrize("report_specified", [False, True])
Expand Down
1 change: 1 addition & 0 deletions cli/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ setuptools<=66.1.1
email-validator==2.0.0
auth0-python==4.3.0
pandas==2.1.0
numpy==1.26.4
watchdog==3.0.0
GitPython==3.1.41
psutil==5.9.8
4 changes: 4 additions & 0 deletions scripts/dashboard/medperf_dashboard/get_data.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os
import pandas as pd
import datetime
import numpy as np

from medperf.entities.dataset import Dataset
from medperf import config
Expand Down Expand Up @@ -38,6 +39,9 @@ def build_dset_df(dsets, user2institution, stages_df):
exec_status = report["execution_status"]
formatted_dset["execution_status"] = exec_status
formatted_dset["progress"] = report["progress"]
else:
formatted_dset["execution_status"] = np.nan
formatted_dset["progress"] = np.nan

formatted_dsets.append(formatted_dset)
dsets_df = pd.DataFrame(formatted_dsets)
Expand Down
16 changes: 11 additions & 5 deletions scripts/monitor/rano_monitor/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
STAGES_HELP,
DSET_LOC_HELP,
OUT_HELP,
REVIEW_COMMAND,
REVIEW_CMD_HELP,
)
from rano_monitor.dataset_browser import DatasetBrowser
from rano_monitor.handlers import InvalidHandler
Expand All @@ -21,7 +23,7 @@
app = typer.Typer()


def run_dset_app(dset_path, stages_path, output_path):
def run_dset_app(dset_path, stages_path, output_path, review_cmd):
report_path = os.path.join(dset_path, "report.yaml")
dset_data_path = os.path.join(dset_path, "data")
invalid_path = os.path.join(dset_path, "metadata/.invalid.txt")
Expand All @@ -48,6 +50,7 @@ def run_dset_app(dset_path, stages_path, output_path):
invalid_path,
invalid_watchdog,
prompt_watchdog,
review_cmd,
)

observer = Observer()
Expand All @@ -60,7 +63,7 @@ def run_dset_app(dset_path, stages_path, output_path):
observer.stop()


def run_tarball_app(tarball_path):
def run_tarball_app(tarball_path, review_cmd):
folder_name = f".{os.path.basename(tarball_path).split('.')[0]}"
contents_path = os.path.join(os.path.dirname(tarball_path), folder_name)
if not os.path.exists(contents_path):
Expand All @@ -72,7 +75,7 @@ def run_tarball_app(tarball_path):
contents_path = os.path.join(contents_path, "review_cases")
reviewed_watchdog = TarballReviewedHandler(contents_path, t_app)

t_app.set_vars(contents_path)
t_app.set_vars(contents_path, review_cmd)

observer = Observer()
observer.schedule(reviewed_watchdog, path=contents_path, recursive=True)
Expand All @@ -94,10 +97,13 @@ def main(
help=DSET_LOC_HELP,
),
output_path: str = Option(None, "-o", "--out", help=OUT_HELP),
itksnap_executable: str = Option(
REVIEW_COMMAND, "-i", "--itksnap", help=REVIEW_CMD_HELP
),
):
if dataset_uid.endswith(".tar.gz"):
# TODO: implement tarball_app
run_tarball_app(dataset_uid)
run_tarball_app(dataset_uid, itksnap_executable)
return
elif dataset_uid.isdigit():
# Only import medperf dependencies if the user intends to use medperf
Expand All @@ -115,7 +121,7 @@ def main(
"Please ensure the passed dataset UID/path is correct"
)

run_dset_app(dset_path, stages_path, output_path)
run_dset_app(dset_path, stages_path, output_path, itksnap_executable)


if __name__ == "__main__":
Expand Down
8 changes: 8 additions & 0 deletions scripts/monitor/rano_monitor/assets/shared.tcss
Original file line number Diff line number Diff line change
@@ -1,3 +1,11 @@
.warning {
border: tall $warning;
}

.tumor-status {
color: $success;
}

.brain-status {
color: $warning;
}
8 changes: 0 additions & 8 deletions scripts/monitor/rano_monitor/assets/tarball-browser.tcss
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,6 @@ Button {
background: $accent;
}

.tumor-status {
color: $success;
}

.brain-status {
color: $warning;
}

#package-btn {
min-width: 50%;
}
Expand Down
1 change: 1 addition & 0 deletions scripts/monitor/rano_monitor/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
REVIEW_FILENAME = "review_cases.tar.gz"
REVIEWED_FILENAME = "reviewed_cases.tar.gz"
REVIEW_COMMAND = "itksnap"
REVIEW_CMD_HELP = f"path or name of the command that launches itksnap. Defaults to '{REVIEW_COMMAND}'"
MANUAL_REVIEW_STAGE = 5
DONE_STAGE = 8
LISTITEM_MAX_LEN = 30
Expand Down
22 changes: 22 additions & 0 deletions scripts/monitor/rano_monitor/dataset_browser.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import yaml
from rano_monitor.messages import InvalidSubjectsUpdated
from rano_monitor.messages import ReportUpdated
from rano_monitor.messages import AnnotationsLoaded
from rano_monitor.utils import generate_full_report
from rano_monitor.widgets.subject_details import SubjectDetails
from rano_monitor.widgets.subject_list_view import SubjectListView
Expand All @@ -19,6 +20,7 @@
Header,
ListView,
Static,
Input,
)


Expand All @@ -31,6 +33,7 @@ class DatasetBrowser(App):
Binding("y", "respond('y')", "Yes", show=False),
Binding("n", "respond('n')", "No", show=False),
]
AUTO_FOCUS = "" # Don't focus automatically to search bar

subjects = var([])
report = reactive({})
Expand All @@ -46,13 +49,15 @@ def set_vars(
invalid_path,
invalid_watchdog,
prompt_watchdog,
review_cmd,
):
self.dset_data_path = dset_data_path
self.stages_path = stages_path
self.output_path = output_path
self.invalid_path = invalid_path
self.invalid_watchdog = invalid_watchdog
self.prompt_watchdog = prompt_watchdog
self.review_cmd = review_cmd

def update_invalid(self, invalid_subjects):
self.invalid_subjects = invalid_subjects
Expand All @@ -62,6 +67,7 @@ def compose(self) -> ComposeResult:
yield Header()
with Container():
with Container(id="list-container"):
yield Input(placeholder="Search", id="subjects-search")
yield SubjectListView(id="subjects-list")
with VerticalScroll():
yield Summary(id="summary")
Expand Down Expand Up @@ -103,6 +109,11 @@ def on_mount(self):
# Set invalid path for subject view
subject_details = self.query_one("#details", SubjectDetails)
subject_details.set_invalid_path(self.invalid_path)
subject_details.review_cmd = self.review_cmd

# Set dataset path to listview
listview = self.query_one("#subjects-list", ListView)
listview.dset_path = self.dset_data_path

# Execute handlers
self.prompt_watchdog.manual_execute()
Expand Down Expand Up @@ -143,6 +154,17 @@ def on_button_pressed(self, event: Button.Pressed) -> None:
elif event.control == n_button:
self.action_respond("n")

def on_input_changed(self, event: Input.Changed) -> None:
search_input = self.query_one("#subjects-search")
subjects_list = self.query_one("#subjects-list")
if event.control == search_input:
search_term = search_input.value
subjects_list.update_list(search_term)

def on_annotations_loaded(self, message: AnnotationsLoaded):
subjects_list = self.query_one("#subjects-list")
subjects_list.update_list()

def update_prompt(self, prompt: str):
self.prompt = prompt
show_prompt = bool(len(prompt))
Expand Down
3 changes: 2 additions & 1 deletion scripts/monitor/rano_monitor/messages/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from .invalid_subject_updated import InvalidSubjectsUpdated
from .report_updated import ReportUpdated
from .annotations_loaded import AnnotationsLoaded

__all__ = [InvalidSubjectsUpdated, ReportUpdated]
__all__ = [InvalidSubjectsUpdated, ReportUpdated, AnnotationsLoaded]
Loading

0 comments on commit 4a03f56

Please sign in to comment.