From 7d20e31c1e11fcf2a473f485ca1801c835562300 Mon Sep 17 00:00:00 2001 From: Viacheslav Kukushkin Date: Tue, 22 Oct 2024 17:51:35 +0300 Subject: [PATCH] Result submission --- cli/medperf/web_ui/app.py | 3 + cli/medperf/web_ui/datasets/__init__.py | 2 + cli/medperf/web_ui/datasets/routes.py | 2 + .../web_ui/datasets/routes_associate.py | 2 +- .../web_ui/datasets/routes_result_submit.py | 43 +++++ cli/medperf/web_ui/datasets/routes_run.py | 33 ++-- cli/medperf/web_ui/results/__init__.py | 30 ++++ .../web_ui/templates/dataset_detail.html | 155 +++++++++++++++--- cli/medperf/web_ui/templates/dataset_run.html | 10 +- 9 files changed, 241 insertions(+), 39 deletions(-) create mode 100644 cli/medperf/web_ui/datasets/routes_result_submit.py create mode 100644 cli/medperf/web_ui/results/__init__.py diff --git a/cli/medperf/web_ui/app.py b/cli/medperf/web_ui/app.py index 42b79ef75..fb67e2fc3 100644 --- a/cli/medperf/web_ui/app.py +++ b/cli/medperf/web_ui/app.py @@ -13,6 +13,7 @@ from medperf.web_ui.datasets import router as datasets_router from medperf.web_ui.benchmarks.routes import router as benchmarks_router from medperf.web_ui.mlcubes.routes import router as mlcubes_router +from medperf.web_ui.results import fetch_all_results from medperf.web_ui.yaml_fetch.routes import router as yaml_fetch_router from medperf.web_ui.api.routes import router as api_router @@ -34,6 +35,8 @@ web_app.add_exception_handler(Exception, custom_exception_handler) +fetch_all_results() + @web_app.get("/", include_in_schema=False) def read_root(): diff --git a/cli/medperf/web_ui/datasets/__init__.py b/cli/medperf/web_ui/datasets/__init__.py index 3044d517d..836c65140 100644 --- a/cli/medperf/web_ui/datasets/__init__.py +++ b/cli/medperf/web_ui/datasets/__init__.py @@ -4,6 +4,7 @@ from .routes_operational import router as operational_router from .routes_associate import router as associate_router from .routes_run import router as run_router +from .routes_result_submit import router as result_submit_router from .routes import router as ui_router router = APIRouter() @@ -13,4 +14,5 @@ router.include_router(operational_router) router.include_router(associate_router) router.include_router(run_router) +router.include_router(result_submit_router) router.include_router(ui_router) diff --git a/cli/medperf/web_ui/datasets/routes.py b/cli/medperf/web_ui/datasets/routes.py index 5329ff2cd..a6295c9a0 100644 --- a/cli/medperf/web_ui/datasets/routes.py +++ b/cli/medperf/web_ui/datasets/routes.py @@ -45,6 +45,8 @@ def dataset_detail_ui(request: Request, dataset_id: int): # Fetch models associated with each benchmark benchmark_models = {} for assoc in benchmark_associations: + if assoc.approval_status != "APPROVED": + continue # if association is not approved we cannot list its models models_uids = Benchmark.get_models_uids(benchmark_uid=assoc.benchmark) models = [Cube.get(cube_uid=model_uid) for model_uid in models_uids] benchmark_models[assoc.benchmark] = models diff --git a/cli/medperf/web_ui/datasets/routes_associate.py b/cli/medperf/web_ui/datasets/routes_associate.py index ce49a1bc7..d41c35a9f 100644 --- a/cli/medperf/web_ui/datasets/routes_associate.py +++ b/cli/medperf/web_ui/datasets/routes_associate.py @@ -86,7 +86,7 @@ def message_stream(): async def associate_get_results(draft_id: str): draft = _draft_associate[draft_id] return { - "compatibility_results": yaml.dump(draft.result.results), + "compatibility_results": yaml.dump(draft.get_result().results), "draft_id": draft_id } diff --git a/cli/medperf/web_ui/datasets/routes_result_submit.py b/cli/medperf/web_ui/datasets/routes_result_submit.py new file mode 100644 index 000000000..2688c8122 --- /dev/null +++ b/cli/medperf/web_ui/datasets/routes_result_submit.py @@ -0,0 +1,43 @@ +from pathlib import Path +from typing import Dict + +import yaml +from fastapi import APIRouter +from starlette.responses import JSONResponse +from medperf.commands.result.submit import ResultSubmission +from medperf.entities.result import Result +from medperf.web_ui.results import results + +_drafts_result_submit: dict[str, ResultSubmission] = {} + +router = APIRouter() + + +@router.post("/result_submit_draft/generate/", response_class=JSONResponse) +async def get_submission(result_id: str): + submission = ResultSubmission(result_id, approved=False) + _drafts_result_submit[result_id] = submission + submission.get_result() + return {"results": yaml.dump(submission.result.results)} + + +@router.post("/result_submit_draft/submit/", response_class=JSONResponse) +async def submit_result(result_id: str): + submission = _drafts_result_submit[result_id] + try: + submission.approved = True + updated_result_dict = submission.result.upload() + # real result id is modified after submission, thus updating it + submission.to_permanent_path(updated_result_dict) + result = Result(**updated_result_dict) + results[result_id] = result + submission.write(updated_result_dict) + return {"result_id": result_id} + except Exception as e: + return JSONResponse({"error": f"Error moving to operational state: {str(e)}"}, 400) + + +@router.get("/result_submit_draft/decline", response_class=JSONResponse) +async def decline_result_submit(result_id: str): + del _drafts_result_submit[result_id] + return {"result_id": result_id, "op_declined": True} diff --git a/cli/medperf/web_ui/datasets/routes_run.py b/cli/medperf/web_ui/datasets/routes_run.py index 0b66b0640..b4da3aa61 100644 --- a/cli/medperf/web_ui/datasets/routes_run.py +++ b/cli/medperf/web_ui/datasets/routes_run.py @@ -3,7 +3,7 @@ import json from enum import Enum from threading import Thread -from typing import Optional, Dict, List +from typing import Optional, Dict, List, Union from queue import Queue from fastapi import APIRouter, HTTPException from starlette.requests import Request @@ -15,17 +15,18 @@ from medperf.entities.cube import Cube from medperf.entities.dataset import Dataset from medperf.entities.result import Result -from medperf.exceptions import InvalidArgumentError from medperf.web_ui.common import templates +from medperf.web_ui.results import results router = APIRouter() class DraftStatus(Enum): pending = "pending" - done = "done" - failed = "failed" running = "running" + failed = "failed" + executed = "executed" + submitted = "submitted" n_a = "n/a" @@ -36,9 +37,11 @@ class RunDraft(BaseModel): model_id: int result_id: str # formatted as b{benchmark_id}m{model_id}d{dataset_id} status: DraftStatus - result: Optional[Result] logs: Optional[List[str]] + def get_result(self) -> Optional[Result]: + return results.get(self.result_id) + class RunStatus(BaseModel): model_id: int @@ -49,6 +52,7 @@ class RunStatus(BaseModel): _drafts: Dict[str, RunDraft] = {} _task_queue: Queue = Queue() + @router.get("/run_draft/ui/{result_id}", response_class=HTMLResponse) async def run_draft_ui(result_id: str, request: Request): # Fetch relevant details like dataset_id, benchmark_id, and model_id from result_id @@ -99,7 +103,7 @@ def run_with_logs(): execution.validate() execution.prepare_models() results: List[Result] = execution.run_experiments() - return results[0], DraftStatus.done + return results[0], DraftStatus.executed except Exception as e: execution.ui.print_error(f"Execution failed: {str(e)}") return None, DraftStatus.failed @@ -111,7 +115,6 @@ def run_with_logs(): for log in execution.ui.get_message_generator(): draft.logs.append(log) result, status = future.result() - draft.result = result draft.status = status finally: _task_queue.task_done() @@ -135,7 +138,6 @@ async def run_benchmark(dataset_id: int, benchmark_id: int, model_id: int): model_id=model_id, result_id=result_id, status=DraftStatus.pending, - result=None, logs=[] ) _drafts[result_id] = draft @@ -144,7 +146,7 @@ async def run_benchmark(dataset_id: int, benchmark_id: int, model_id: int): return RunStatus( model_id=draft.model_id, status=draft.status, - result=draft.result + result=results.get(draft.result_id) ) @@ -156,9 +158,13 @@ async def get_run_status(dataset_id: int, benchmark_id: int, model_id: int): if not draft: result = _load_result_if_exists(result_id) if result: + if str(result.id).isdigit(): + status = DraftStatus.submitted + else: + status = DraftStatus.executed return RunStatus( model_id=model_id, - status=DraftStatus.done, + status=status, result=result ) else: @@ -199,9 +205,8 @@ async def log_stream(): def _load_result_if_exists(result_id: str) -> Optional[Result]: # Implement logic to load a result from disk if it exists - try: - result = Result.get(result_id) + if result_id in results: + result = results[result_id] return result - except InvalidArgumentError: - # result does not exists + else: return None diff --git a/cli/medperf/web_ui/results/__init__.py b/cli/medperf/web_ui/results/__init__.py new file mode 100644 index 000000000..f006269e7 --- /dev/null +++ b/cli/medperf/web_ui/results/__init__.py @@ -0,0 +1,30 @@ +from pathlib import Path +from typing import Dict + +from medperf import config +from medperf.account_management import get_medperf_user_data +from medperf.entities.result import Result + +results: Dict[str, Result] = {} + + +def fetch_all_results() -> None: + for result_dir in Path(Result.get_storage_path()).iterdir(): + result_id = result_dir.name + if result_dir.is_dir(): + try: + if result_id.isdigit(): + result_id = int(result_id) + # Retrieve the result using the result ID + result = Result.get(result_id) + result_str_bmd_name = result.local_id + results[result_str_bmd_name] = result + except Exception as e: + config.ui.print_error(f"Error retrieving result for {result_id}: {e}") + + for result in Result.all( + filters={"owner": get_medperf_user_data()["id"]} + ): + result_str_bmd_name = result.local_id + results[result_str_bmd_name] = result + diff --git a/cli/medperf/web_ui/templates/dataset_detail.html b/cli/medperf/web_ui/templates/dataset_detail.html index cb05f1f69..e89dce255 100644 --- a/cli/medperf/web_ui/templates/dataset_detail.html +++ b/cli/medperf/web_ui/templates/dataset_detail.html @@ -160,16 +160,23 @@

❌ Failed - -
- - View Logs - - -
+ +
+ + View Logs + + +
+ + +
+ + View Logs + + ✅ Submitted
{% endfor %} @@ -205,6 +212,28 @@

+ + + {% endblock %} diff --git a/cli/medperf/web_ui/templates/dataset_run.html b/cli/medperf/web_ui/templates/dataset_run.html index 281294c67..200070053 100644 --- a/cli/medperf/web_ui/templates/dataset_run.html +++ b/cli/medperf/web_ui/templates/dataset_run.html @@ -148,13 +148,17 @@

const statusData = await response.json(); completeBtn.disabled = false; - if (statusData.status === 'done') { + if (statusData.status === 'executed') { displayYamlResult(statusData.result.results); - completeBtnText.innerHTML = '✅ Done'; + completeBtnText.innerHTML = '✅ Executed'; + } + if (statusData.status === 'submitted') { + displayYamlResult(statusData.result.results); + completeBtnText.innerHTML = '✅ Submitted'; } else if (statusData.status === 'failed') { completeBtnText.innerHTML = '❌ Failed'; } else { - completeBtnText.innerHTML = '✅ Done'; + completeBtnText.innerHTML = '✅ Executed'; } } catch (error) {