Skip to content

Commit

Permalink
Result submission
Browse files Browse the repository at this point in the history
  • Loading branch information
VukW committed Oct 22, 2024
1 parent 7780bc0 commit 7d20e31
Show file tree
Hide file tree
Showing 9 changed files with 241 additions and 39 deletions.
3 changes: 3 additions & 0 deletions cli/medperf/web_ui/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from medperf.web_ui.datasets import router as datasets_router
from medperf.web_ui.benchmarks.routes import router as benchmarks_router
from medperf.web_ui.mlcubes.routes import router as mlcubes_router
from medperf.web_ui.results import fetch_all_results
from medperf.web_ui.yaml_fetch.routes import router as yaml_fetch_router
from medperf.web_ui.api.routes import router as api_router

Expand All @@ -34,6 +35,8 @@

web_app.add_exception_handler(Exception, custom_exception_handler)

fetch_all_results()


@web_app.get("/", include_in_schema=False)
def read_root():
Expand Down
2 changes: 2 additions & 0 deletions cli/medperf/web_ui/datasets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from .routes_operational import router as operational_router
from .routes_associate import router as associate_router
from .routes_run import router as run_router
from .routes_result_submit import router as result_submit_router
from .routes import router as ui_router

router = APIRouter()
Expand All @@ -13,4 +14,5 @@
router.include_router(operational_router)
router.include_router(associate_router)
router.include_router(run_router)
router.include_router(result_submit_router)
router.include_router(ui_router)
2 changes: 2 additions & 0 deletions cli/medperf/web_ui/datasets/routes.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ def dataset_detail_ui(request: Request, dataset_id: int):
# Fetch models associated with each benchmark
benchmark_models = {}
for assoc in benchmark_associations:
if assoc.approval_status != "APPROVED":
continue # if association is not approved we cannot list its models
models_uids = Benchmark.get_models_uids(benchmark_uid=assoc.benchmark)
models = [Cube.get(cube_uid=model_uid) for model_uid in models_uids]
benchmark_models[assoc.benchmark] = models
Expand Down
2 changes: 1 addition & 1 deletion cli/medperf/web_ui/datasets/routes_associate.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def message_stream():
async def associate_get_results(draft_id: str):
draft = _draft_associate[draft_id]
return {
"compatibility_results": yaml.dump(draft.result.results),
"compatibility_results": yaml.dump(draft.get_result().results),
"draft_id": draft_id
}

Expand Down
43 changes: 43 additions & 0 deletions cli/medperf/web_ui/datasets/routes_result_submit.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
from pathlib import Path
from typing import Dict

import yaml
from fastapi import APIRouter
from starlette.responses import JSONResponse
from medperf.commands.result.submit import ResultSubmission
from medperf.entities.result import Result
from medperf.web_ui.results import results

_drafts_result_submit: dict[str, ResultSubmission] = {}

router = APIRouter()


@router.post("/result_submit_draft/generate/", response_class=JSONResponse)
async def get_submission(result_id: str):
submission = ResultSubmission(result_id, approved=False)
_drafts_result_submit[result_id] = submission
submission.get_result()
return {"results": yaml.dump(submission.result.results)}


@router.post("/result_submit_draft/submit/", response_class=JSONResponse)
async def submit_result(result_id: str):
submission = _drafts_result_submit[result_id]
try:
submission.approved = True
updated_result_dict = submission.result.upload()
# real result id is modified after submission, thus updating it
submission.to_permanent_path(updated_result_dict)
result = Result(**updated_result_dict)
results[result_id] = result
submission.write(updated_result_dict)
return {"result_id": result_id}
except Exception as e:
return JSONResponse({"error": f"Error moving to operational state: {str(e)}"}, 400)


@router.get("/result_submit_draft/decline", response_class=JSONResponse)
async def decline_result_submit(result_id: str):
del _drafts_result_submit[result_id]
return {"result_id": result_id, "op_declined": True}
33 changes: 19 additions & 14 deletions cli/medperf/web_ui/datasets/routes_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import json
from enum import Enum
from threading import Thread
from typing import Optional, Dict, List
from typing import Optional, Dict, List, Union
from queue import Queue
from fastapi import APIRouter, HTTPException
from starlette.requests import Request
Expand All @@ -15,17 +15,18 @@
from medperf.entities.cube import Cube
from medperf.entities.dataset import Dataset
from medperf.entities.result import Result
from medperf.exceptions import InvalidArgumentError
from medperf.web_ui.common import templates
from medperf.web_ui.results import results

router = APIRouter()


class DraftStatus(Enum):
pending = "pending"
done = "done"
failed = "failed"
running = "running"
failed = "failed"
executed = "executed"
submitted = "submitted"
n_a = "n/a"


Expand All @@ -36,9 +37,11 @@ class RunDraft(BaseModel):
model_id: int
result_id: str # formatted as b{benchmark_id}m{model_id}d{dataset_id}
status: DraftStatus
result: Optional[Result]
logs: Optional[List[str]]

def get_result(self) -> Optional[Result]:
return results.get(self.result_id)


class RunStatus(BaseModel):
model_id: int
Expand All @@ -49,6 +52,7 @@ class RunStatus(BaseModel):
_drafts: Dict[str, RunDraft] = {}
_task_queue: Queue = Queue()


@router.get("/run_draft/ui/{result_id}", response_class=HTMLResponse)
async def run_draft_ui(result_id: str, request: Request):
# Fetch relevant details like dataset_id, benchmark_id, and model_id from result_id
Expand Down Expand Up @@ -99,7 +103,7 @@ def run_with_logs():
execution.validate()
execution.prepare_models()
results: List[Result] = execution.run_experiments()
return results[0], DraftStatus.done
return results[0], DraftStatus.executed
except Exception as e:
execution.ui.print_error(f"Execution failed: {str(e)}")
return None, DraftStatus.failed
Expand All @@ -111,7 +115,6 @@ def run_with_logs():
for log in execution.ui.get_message_generator():
draft.logs.append(log)
result, status = future.result()
draft.result = result
draft.status = status
finally:
_task_queue.task_done()
Expand All @@ -135,7 +138,6 @@ async def run_benchmark(dataset_id: int, benchmark_id: int, model_id: int):
model_id=model_id,
result_id=result_id,
status=DraftStatus.pending,
result=None,
logs=[]
)
_drafts[result_id] = draft
Expand All @@ -144,7 +146,7 @@ async def run_benchmark(dataset_id: int, benchmark_id: int, model_id: int):
return RunStatus(
model_id=draft.model_id,
status=draft.status,
result=draft.result
result=results.get(draft.result_id)
)


Expand All @@ -156,9 +158,13 @@ async def get_run_status(dataset_id: int, benchmark_id: int, model_id: int):
if not draft:
result = _load_result_if_exists(result_id)
if result:
if str(result.id).isdigit():
status = DraftStatus.submitted
else:
status = DraftStatus.executed
return RunStatus(
model_id=model_id,
status=DraftStatus.done,
status=status,
result=result
)
else:
Expand Down Expand Up @@ -199,9 +205,8 @@ async def log_stream():

def _load_result_if_exists(result_id: str) -> Optional[Result]:
# Implement logic to load a result from disk if it exists
try:
result = Result.get(result_id)
if result_id in results:
result = results[result_id]
return result
except InvalidArgumentError:
# result does not exists
else:
return None
30 changes: 30 additions & 0 deletions cli/medperf/web_ui/results/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
from pathlib import Path
from typing import Dict

from medperf import config
from medperf.account_management import get_medperf_user_data
from medperf.entities.result import Result

results: Dict[str, Result] = {}


def fetch_all_results() -> None:
for result_dir in Path(Result.get_storage_path()).iterdir():
result_id = result_dir.name
if result_dir.is_dir():
try:
if result_id.isdigit():
result_id = int(result_id)
# Retrieve the result using the result ID
result = Result.get(result_id)
result_str_bmd_name = result.local_id
results[result_str_bmd_name] = result
except Exception as e:
config.ui.print_error(f"Error retrieving result for {result_id}: {e}")

for result in Result.all(
filters={"owner": get_medperf_user_data()["id"]}
):
result_str_bmd_name = result.local_id
results[result_str_bmd_name] = result

Loading

0 comments on commit 7d20e31

Please sign in to comment.