Skip to content

Commit

Permalink
Merge branch 'main' into kill-containers-on-sigint
Browse files Browse the repository at this point in the history
# Conflicts:
#	cli/medperf/entities/cube.py
  • Loading branch information
VukW committed Feb 19, 2024
2 parents 0bc271e + 02208ec commit 64a3bcf
Show file tree
Hide file tree
Showing 18 changed files with 223 additions and 53 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -129,3 +129,4 @@ If you wish to contribute to our documentation, here are the steps for successfu
mkdocs serve
```
- **Access local documentation:** Once mkdocs is done setting up the server, you should be able to access your local documentation website by heading to `http:/localhost:8000` on your browser.

91 changes: 82 additions & 9 deletions cli/cli_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -90,35 +90,52 @@ checkFailed "auth status command failed"

echo "\n"

##########################################################
echo "====================================="
echo "Existing cubes":
echo "====================================="
medperf mlcube ls
##########################################################

echo "\n"

##########################################################
echo "====================================="
echo "Submit cubes"
echo "====================================="

medperf mlcube submit --name prep -m $PREP_MLCUBE -p $PREP_PARAMS
medperf mlcube submit --name mock-prep -m $PREP_MLCUBE -p $PREP_PARAMS
checkFailed "Prep submission failed"
PREP_UID=$(medperf mlcube ls | tail -n 1 | tr -s ' ' | cut -d ' ' -f 2)
PREP_UID=$(medperf mlcube ls | grep mock-prep | head -n 1 | tr -s ' ' | cut -d ' ' -f 2)

medperf mlcube submit --name model1 -m $MODEL_MLCUBE -p $MODEL1_PARAMS -a $MODEL_ADD
checkFailed "Model1 submission failed"
MODEL1_UID=$(medperf mlcube ls | tail -n 1 | tr -s ' ' | cut -d ' ' -f 2)
MODEL1_UID=$(medperf mlcube ls | grep model1 | head -n 1 | tr -s ' ' | cut -d ' ' -f 2)

medperf mlcube submit --name model2 -m $MODEL_MLCUBE -p $MODEL2_PARAMS -a $MODEL_ADD
checkFailed "Model2 submission failed"
MODEL2_UID=$(medperf mlcube ls | tail -n 1 | tr -s ' ' | cut -d ' ' -f 2)
MODEL2_UID=$(medperf mlcube ls | grep model2 | head -n 1 | tr -s ' ' | cut -d ' ' -f 2)

# MLCube with singularity section
medperf mlcube submit --name model3 -m $MODEL_WITH_SINGULARITY -p $MODEL3_PARAMS -a $MODEL_ADD -i $MODEL_SING_IMAGE
checkFailed "Model3 submission failed"
MODEL3_UID=$(medperf mlcube ls | tail -n 1 | tr -s ' ' | cut -d ' ' -f 2)
MODEL3_UID=$(medperf mlcube ls | grep model3 | head -n 1 | tr -s ' ' | cut -d ' ' -f 2)

medperf mlcube submit --name model-fail -m $FAILING_MODEL_MLCUBE -p $MODEL4_PARAMS -a $MODEL_ADD
checkFailed "failing model submission failed"
FAILING_MODEL_UID=$(medperf mlcube ls | tail -n 1 | tr -s ' ' | cut -d ' ' -f 2)
FAILING_MODEL_UID=$(medperf mlcube ls | grep model-fail | head -n 1 | tr -s ' ' | cut -d ' ' -f 2)

medperf mlcube submit --name model-log-none -m $MODEL_LOG_MLCUBE -p $MODEL_LOG_NONE_PARAMS
checkFailed "Model with logging None submission failed"
MODEL_LOG_NONE_UID=$(medperf mlcube ls | grep model-log-none | head -n 1 | tr -s ' ' | cut -d ' ' -f 2)

medperf mlcube submit --name model-log-debug -m $MODEL_LOG_MLCUBE -p $MODEL_LOG_DEBUG_PARAMS
checkFailed "Model with logging debug submission failed"
MODEL_LOG_DEBUG_UID=$(medperf mlcube ls | grep model-log-debug | head -n 1 | tr -s ' ' | cut -d ' ' -f 2)

medperf mlcube submit --name metrics -m $METRIC_MLCUBE -p $METRIC_PARAMS
medperf mlcube submit --name mock-metrics -m $METRIC_MLCUBE -p $METRIC_PARAMS
checkFailed "Metrics submission failed"
METRICS_UID=$(medperf mlcube ls | tail -n 1 | tr -s ' ' | cut -d ' ' -f 2)
METRICS_UID=$(medperf mlcube ls | grep mock-metrics | head -n 1 | tr -s ' ' | cut -d ' ' -f 2)
##########################################################

echo "\n"
Expand All @@ -139,7 +156,7 @@ echo "Submit benchmark"
echo "====================================="
medperf benchmark submit --name bmk --description bmk --demo-url $DEMO_URL --data-preparation-mlcube $PREP_UID --reference-model-mlcube $MODEL1_UID --evaluator-mlcube $METRICS_UID
checkFailed "Benchmark submission failed"
BMK_UID=$(medperf benchmark ls | tail -n 1 | tr -s ' ' | cut -d ' ' -f 2)
BMK_UID=$(medperf benchmark ls | grep bmk | tail -n 1 | tr -s ' ' | cut -d ' ' -f 2)

# Approve benchmark
ADMIN_TOKEN=$(jq -r --arg ADMIN $ADMIN '.[$ADMIN]' $MOCK_TOKENS_FILE)
Expand Down Expand Up @@ -258,6 +275,36 @@ checkFailed "Failing model association failed"

echo "\n"

##########################################################
echo "======================================================"
echo "Running logging-model-without-env association"
echo "======================================================"
medperf mlcube associate -m $MODEL_LOG_NONE_UID -b $BMK_UID -y
checkFailed "Logging-model-without-env association association failed"
##########################################################

echo "\n"

##########################################################
echo "======================================================"
echo "Running logging-model-with-debug association"
echo "======================================================"
medperf --container-loglevel debug mlcube associate -m $MODEL_LOG_DEBUG_UID -b $BMK_UID -y
checkFailed "Logging-model-with-debug association failed"
##########################################################

echo "\n"

##########################################################
echo "======================================================"
echo "Submitted associations:"
echo "======================================================"
medperf association ls
checkFailed "Listing associations failed"
##########################################################

echo "\n"

##########################################################
echo "====================================="
echo "Activate modelowner profile"
Expand All @@ -278,6 +325,10 @@ medperf association approve -b $BMK_UID -m $MODEL3_UID
checkFailed "Model3 association approval failed"
medperf association approve -b $BMK_UID -m $FAILING_MODEL_UID
checkFailed "failing model association approval failed"
medperf association approve -b $BMK_UID -m $MODEL_LOG_NONE_UID
checkFailed "Logging-model-without-env association approval failed"
medperf association approve -b $BMK_UID -m $MODEL_LOG_DEBUG_UID
checkFailed "Logging-model-with-debug association approval failed"
##########################################################

echo "\n"
Expand Down Expand Up @@ -356,6 +407,28 @@ checkFailed "Failing mlcube run with ignore errors failed"

echo "\n"

##########################################################
echo "====================================="
echo "Running logging model without logging env"
echo "====================================="
rm -rf $MEDPERF_STORAGE/predictions/$SERVER_STORAGE_ID/model-log-none/$DSET_A_GENUID
medperf run -b $BMK_UID -d $DSET_A_UID -m $MODEL_LOG_NONE_UID -y
checkFailed "run logging model without logging env failed"
##########################################################

echo "\n"

##########################################################
echo "====================================="
echo "Running logging model with debug logging env"
echo "====================================="
rm -rf $MEDPERF_STORAGE/predictions/$SERVER_STORAGE_ID/model-log-debug/$DSET_A_GENUID
medperf --container-loglevel debug run -b $BMK_UID -d $DSET_A_UID -m $MODEL_LOG_DEBUG_UID -y
checkFailed "run logging model with debug logging env failed"
##########################################################

echo "\n"

##########################################################
echo "====================================="
echo "Logout users"
Expand Down
4 changes: 3 additions & 1 deletion cli/medperf/account_management/account_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
def read_user_account():
config_p = read_config()
if config.credentials_keyword not in config_p.active_profile:
raise MedperfException("You are not logged in")
return

account_info = config_p.active_profile[config.credentials_keyword]
return account_info
Expand Down Expand Up @@ -35,6 +35,8 @@ def set_credentials(

def read_credentials():
account_info = read_user_account()
if account_info is None:
raise MedperfException("You are not logged in")
email = account_info["email"]
access_token, refresh_token = TokenStore().read_tokens(email)

Expand Down
18 changes: 12 additions & 6 deletions cli/medperf/account_management/token_storage/filesystem.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import os
import base64
import logging
from medperf.utils import remove_path
from medperf import config

Expand Down Expand Up @@ -28,16 +29,21 @@ def __get_paths(self, account_id):
def set_tokens(self, account_id, access_token, refresh_token):
access_token_file, refresh_token_file = self.__get_paths(account_id)

fd = os.open(access_token_file, os.O_CREAT | os.O_WRONLY, 0o600)
os.write(fd, access_token.encode("utf-8"))
os.close(fd)
with open(access_token_file, "w") as f:
pass
os.chmod(access_token_file, 0o600)
with open(access_token_file, "a") as f:
f.write(access_token)

fd = os.open(refresh_token_file, os.O_CREAT | os.O_WRONLY, 0o600)
os.write(fd, refresh_token.encode("utf-8"))
os.close(fd)
with open(refresh_token_file, "w") as f:
pass
os.chmod(refresh_token_file, 0o600)
with open(refresh_token_file, "a") as f:
f.write(refresh_token)

def read_tokens(self, account_id):
access_token_file, refresh_token_file = self.__get_paths(account_id)
logging.debug("Reading tokens to disk.")
with open(access_token_file) as f:
access_token = f.read()
with open(refresh_token_file) as f:
Expand Down
13 changes: 12 additions & 1 deletion cli/medperf/commands/auth/login.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,24 @@
import medperf.config as config
from medperf.exceptions import InvalidArgumentError
from medperf.account_management import read_user_account
from medperf.exceptions import InvalidArgumentError, MedperfException
from email_validator import validate_email, EmailNotValidError


def raise_if_logged_in():
account_info = read_user_account()
if account_info is not None:
raise MedperfException(
f"You are already logged in as {account_info['email']}."
" Logout before logging in again"
)


class Login:
@staticmethod
def run(email: str = None):
"""Authenticate to be able to access the MedPerf server. A verification link will
be provided and should be open in a browser to complete the login process."""
raise_if_logged_in()
if not email:
email = config.ui.prompt("Please type your email: ")
try:
Expand Down
9 changes: 3 additions & 6 deletions cli/medperf/commands/auth/status.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,14 @@
import medperf.config as config
from medperf.account_management import read_user_account
from medperf.exceptions import MedperfException


class Status:
@staticmethod
def run():
"""Shows the currently logged in user."""
try:
account_info = read_user_account()
except MedperfException as e:
# TODO: create a specific exception about unauthenticated client
config.ui.print(str(e))
account_info = read_user_account()
if account_info is None:
config.ui.print("You are not logged in")
return

email = account_info["email"]
Expand Down
6 changes: 4 additions & 2 deletions cli/medperf/commands/compatibility_test/run.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import logging
from typing import List

from medperf.commands.execution import Execution
from medperf.entities.dataset import Dataset
Expand All @@ -26,7 +25,7 @@ def run(
data_uid: str = None,
no_cache: bool = False,
offline: bool = False,
) -> List:
) -> (str, dict):
"""Execute a test workflow. Components of a complete workflow should be passed.
When only the benchmark is provided, it implies the following workflow will be used:
- the benchmark's demo dataset is used as the raw data
Expand Down Expand Up @@ -92,6 +91,9 @@ def run(
if results is None:
results = test_exec.execute()
test_exec.write(results)
else:
logging.info('Existing results are found. Test would not be re-executed.')
logging.debug(f'Existing results: {results}')
return test_exec.data_uid, results

def __init__(
Expand Down
18 changes: 17 additions & 1 deletion cli/medperf/comms/auth/auth0.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
import time
import logging
import threading
from medperf.comms.auth.interface import Auth
from medperf.comms.auth.token_verifier import verify_token
from medperf.exceptions import CommunicationError
Expand All @@ -17,6 +19,7 @@ def __init__(self):
self.domain = config.auth_domain
self.client_id = config.auth_client_id
self.audience = config.auth_audience
self._lock = threading.Lock()

def login(self, email):
"""Retrieves and stores an access token/refresh token pair from the auth0
Expand Down Expand Up @@ -149,6 +152,18 @@ def logout(self):

@property
def access_token(self):
"""Thread-safe access token retrieval"""
# TODO: lock the credentials file to have this process-safe
# If someone is preparing their dataset, and configured
# the preparation to send reports async, there might be a
# risk if they tried to run other commands separately (e.g., dataset ls)
# (i.e., two processes may try to refresh an expired access token, which
# may trigger refresh token reuse since we use refresh token rotation.)
with self._lock:
return self._access_token

@property
def _access_token(self):
"""Reads and returns an access token of the currently logged
in user to be used for authorizing requests to the MedPerf server.
Refresh the token if necessary.
Expand Down Expand Up @@ -187,6 +202,7 @@ def __refresh_access_token(self, refresh_token):
"refresh_token": refresh_token,
}
token_issued_at = time.time()
logging.debug("Refreshing access token.")
res = requests.post(url=url, headers=headers, data=body)

if res.status_code != 200:
Expand All @@ -204,8 +220,8 @@ def __refresh_access_token(self, refresh_token):
access_token,
refresh_token,
id_token_payload,
token_expires_in,
token_issued_at,
token_expires_in,
)

return access_token
Expand Down
4 changes: 3 additions & 1 deletion cli/medperf/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,11 +161,12 @@
statistics_timeout = None
infer_timeout = None
evaluate_timeout = None
container_loglevel = None
mlcube_configure_timeout = None
mlcube_inspect_timeout = None

# Other
loglevel = "info"
loglevel = "debug"
cleanup = True
ui = "CLI"

Expand All @@ -184,6 +185,7 @@
"platform",
"gpus",
"cleanup",
"container_loglevel"
]
configurable_parameters = inline_parameters + [
"server",
Expand Down
12 changes: 11 additions & 1 deletion cli/medperf/decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,11 @@ def wrapper(
"--evaluate_timeout",
help="Maximum time in seconds before interrupting evaluate task",
),
container_loglevel: str = typer.Option(
config.container_loglevel,
"--container-loglevel",
help="Logging level for containers to be run [debug | info | warning | error]",
),
platform: str = typer.Option(
config.platform,
"--platform",
Expand Down Expand Up @@ -188,6 +193,11 @@ def wrapper(
"--evaluate_timeout",
help="Maximum time in seconds before interrupting evaluate task",
),
container_loglevel: str = typer.Option(
config.container_loglevel,
"--container-loglevel",
help="Logging level for containers to be run [debug | info | warning | error]",
),
platform: str = typer.Option(
config.platform,
"--platform",
Expand All @@ -209,7 +219,7 @@ def wrapper(
cleanup: bool = typer.Option(
config.cleanup,
"--cleanup/--no-cleanup",
help="Wether to clean up temporary medperf storage after execution",
help="Whether to clean up temporary medperf storage after execution",
),
**kwargs,
):
Expand Down
Loading

0 comments on commit 64a3bcf

Please sign in to comment.