Skip to content

Commit

Permalink
Merge branch 'main' into writeChoice
Browse files Browse the repository at this point in the history
  • Loading branch information
gabrielraeder authored Dec 18, 2023
2 parents f0347d7 + 5fb056c commit b9d98b5
Show file tree
Hide file tree
Showing 86 changed files with 7,027 additions and 636 deletions.
6 changes: 4 additions & 2 deletions .github/workflows/unittests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ jobs:
pip install flake8 pytest
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
if [ -f cli/requirements.txt ]; then pip install -e cli; fi
if [ -f server/requirements.txt ]; then pip install -r server/requirements.txt; fi
pip install -r server/requirements.txt
pip install -r server/test-requirements.txt
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
Expand All @@ -35,6 +36,7 @@ jobs:
# Ignore E231, as it is raising warnings with auto-generated code.
flake8 . --count --max-complexity=10 --max-line-length=127 --ignore F821,W503,E231 --statistics --exclude=examples/,"*/migrations/*",cli/medperf/templates/
- name: Test with pytest
working-directory: ./cli
run: |
pytest
- name: Set server environment vars
Expand All @@ -45,4 +47,4 @@ jobs:
run: python manage.py migrate
- name: Run server unit tests
working-directory: ./server
run: python manage.py test
run: python manage.py test --parallel
30 changes: 20 additions & 10 deletions cli/cli_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -244,16 +244,6 @@ checkFailed "Failing model association failed"

echo "\n"

##########################################################
echo "====================================="
echo "Changing priority of model2"
echo "====================================="
medperf association set_priority -b $BMK_UID -m $MODEL2_UID -p 77
checkFailed "Priority set of model2 failed"
##########################################################

echo "\n"

##########################################################
echo "====================================="
echo "Activate modelowner profile"
Expand All @@ -278,6 +268,26 @@ checkFailed "failing model association approval failed"

echo "\n"

##########################################################
echo "====================================="
echo "Activate benchmarkowner profile"
echo "====================================="
medperf profile activate testbenchmark
checkFailed "testbenchmark profile activation failed"
##########################################################

echo "\n"

##########################################################
echo "====================================="
echo "Changing priority of model2"
echo "====================================="
medperf association set_priority -b $BMK_UID -m $MODEL2_UID -p 77
checkFailed "Priority set of model2 failed"
##########################################################

echo "\n"

##########################################################
echo "====================================="
echo "Activate dataowner profile"
Expand Down
1 change: 1 addition & 0 deletions cli/medperf/account_management/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .account_management import * # noqa
Original file line number Diff line number Diff line change
@@ -1,47 +1,9 @@
import keyring
from .token_storage import TokenStore
from medperf.utils import read_config, write_config
from medperf import config
from medperf.exceptions import MedperfException


class TokenStore:
def __init__(self):
pass

def set_tokens(self, account_id, access_token, refresh_token):
keyring.set_password(
config.keyring_access_token_service_name,
account_id,
access_token,
)
keyring.set_password(
config.keyring_refresh_token_service_name,
account_id,
refresh_token,
)

def read_tokens(self, account_id):
access_token = keyring.get_password(
config.keyring_access_token_service_name,
account_id,
)
refresh_token = keyring.get_password(
config.keyring_refresh_token_service_name,
account_id,
)
return access_token, refresh_token

def delete_tokens(self, account_id):
keyring.delete_password(
config.keyring_access_token_service_name,
account_id,
)
keyring.delete_password(
config.keyring_refresh_token_service_name,
account_id,
)


def read_user_account():
config_p = read_config()
if config.credentials_keyword not in config_p.active_profile:
Expand Down
1 change: 1 addition & 0 deletions cli/medperf/account_management/token_storage/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .filesystem import FilesystemTokenStore as TokenStore # noqa
50 changes: 50 additions & 0 deletions cli/medperf/account_management/token_storage/filesystem.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import os
import base64
from medperf.utils import base_storage_path, remove_path
from medperf import config


class FilesystemTokenStore:
def __init__(self):
self.creds_folder = base_storage_path(config.creds_folder)
os.makedirs(self.creds_folder, exist_ok=True)

def __get_paths(self, account_id):
# Base64 encoding is used just to avoid facing a filesystem that doesn't support
# special characters used in emails.
account_id_encoded = base64.b64encode(account_id.encode("utf-8")).decode(
"utf-8"
)
account_folder = os.path.join(self.creds_folder, account_id_encoded)
os.makedirs(account_folder, exist_ok=True)

access_token_file = os.path.join(account_folder, config.access_token_storage_id)
refresh_token_file = os.path.join(
account_folder, config.refresh_token_storage_id
)

return access_token_file, refresh_token_file

def set_tokens(self, account_id, access_token, refresh_token):
access_token_file, refresh_token_file = self.__get_paths(account_id)

fd = os.open(access_token_file, os.O_CREAT | os.O_WRONLY, 0o600)
os.write(fd, access_token.encode("utf-8"))
os.close(fd)

fd = os.open(refresh_token_file, os.O_CREAT | os.O_WRONLY, 0o600)
os.write(fd, refresh_token.encode("utf-8"))
os.close(fd)

def read_tokens(self, account_id):
access_token_file, refresh_token_file = self.__get_paths(account_id)
with open(access_token_file) as f:
access_token = f.read()
with open(refresh_token_file) as f:
refresh_token = f.read()
return access_token, refresh_token

def delete_tokens(self, account_id):
access_token_file, refresh_token_file = self.__get_paths(account_id)
remove_path(access_token_file)
remove_path(refresh_token_file)
43 changes: 43 additions & 0 deletions cli/medperf/account_management/token_storage/keyring_.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
"""Keyring token storage is NOT used. We used it before this commit but
users who connect to remote machines through passwordless SSH faced some issues."""

import keyring
from medperf import config


class KeyringTokenStore:
def __init__(self):
pass

def set_tokens(self, account_id, access_token, refresh_token):
keyring.set_password(
config.access_token_storage_id,
account_id,
access_token,
)
keyring.set_password(
config.refresh_token_storage_id,
account_id,
refresh_token,
)

def read_tokens(self, account_id):
access_token = keyring.get_password(
config.access_token_storage_id,
account_id,
)
refresh_token = keyring.get_password(
config.refresh_token_storage_id,
account_id,
)
return access_token, refresh_token

def delete_tokens(self, account_id):
keyring.delete_password(
config.access_token_storage_id,
account_id,
)
keyring.delete_password(
config.refresh_token_storage_id,
account_id,
)
7 changes: 3 additions & 4 deletions cli/medperf/commands/association/priority.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
from medperf import config
from medperf.exceptions import InvalidArgumentError
from medperf.entities.benchmark import Benchmark


class AssociationPriority:
@staticmethod
def run(
benchmark_uid: int, mlcube_uid: int, priority: int,
):
def run(benchmark_uid: int, mlcube_uid: int, priority: int):
"""Sets priority for an association between a benchmark and an mlcube
Args:
Expand All @@ -15,7 +14,7 @@ def run(
priority (int): priority value
"""
associated_cubes = config.comms.get_benchmark_models(benchmark_uid)
associated_cubes = Benchmark.get_models_uids(benchmark_uid)
if mlcube_uid not in associated_cubes:
raise InvalidArgumentError(
"The given mlcube doesn't exist or is not associated with the benchmark"
Expand Down
2 changes: 1 addition & 1 deletion cli/medperf/commands/benchmark/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def submit(
),
docs_url: str = typer.Option("", "--docs-url", "-u", help="URL to documentation"),
demo_url: str = typer.Option(
"",
...,
"--demo-url",
help="""Identifier to download the demonstration dataset tarball file.\n
See `medperf mlcube submit --help` for more information""",
Expand Down
4 changes: 2 additions & 2 deletions cli/medperf/commands/compatibility_test/utils.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from medperf.utils import storage_path, get_folder_hash
from medperf.utils import storage_path, get_folders_hash
from medperf.exceptions import InvalidArgumentError, InvalidEntityError

from medperf.comms.entity_resources import resources
Expand Down Expand Up @@ -34,7 +34,7 @@ def download_demo_data(dset_url, dset_hash):


def prepare_local_cube(path):
temp_uid = get_folder_hash(path)
temp_uid = get_folders_hash([path])
cubes_storage = storage_path(config.cubes_storage)
dst = os.path.join(cubes_storage, temp_uid)
os.symlink(path, dst)
Expand Down
19 changes: 3 additions & 16 deletions cli/medperf/commands/dataset/create.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from medperf.utils import (
remove_path,
generate_tmp_path,
get_folder_hash,
get_folders_hash,
storage_path,
)
from medperf.exceptions import InvalidArgumentError
Expand Down Expand Up @@ -119,33 +119,22 @@ def run_cube_tasks(self):
"output_path": out_datapath,
"output_labels_path": out_labelspath,
}
prepare_str_params = {
"Ptasks.prepare.parameters.input.data_path.opts": "ro",
"Ptasks.prepare.parameters.input.labels_path.opts": "ro",
}

sanity_params = {
"data_path": out_datapath,
"labels_path": out_labelspath,
}
sanity_str_params = {
"Ptasks.sanity_check.parameters.input.data_path.opts": "ro"
}

statistics_params = {
"data_path": out_datapath,
"output_path": self.out_statistics_path,
"labels_path": out_labelspath,
}
statistics_str_params = {
"Ptasks.statistics.parameters.input.data_path.opts": "ro"
}

# Run the tasks
self.ui.text = "Running preparation step..."
self.cube.run(
task="prepare",
string_params=prepare_str_params,
timeout=prepare_timeout,
**prepare_params,
)
Expand All @@ -154,7 +143,6 @@ def run_cube_tasks(self):
self.ui.text = "Running sanity check..."
self.cube.run(
task="sanity_check",
string_params=sanity_str_params,
timeout=sanity_check_timeout,
**sanity_params,
)
Expand All @@ -163,7 +151,6 @@ def run_cube_tasks(self):
self.ui.text = "Generating statistics..."
self.cube.run(
task="statistics",
string_params=statistics_str_params,
timeout=statistics_timeout,
**statistics_params,
)
Expand All @@ -176,8 +163,8 @@ def get_statistics(self):

def generate_uids(self):
"""Auto-generates dataset UIDs for both input and output paths"""
self.in_uid = get_folder_hash(self.data_path)
self.generated_uid = get_folder_hash(self.out_datapath)
self.in_uid = get_folders_hash([self.data_path, self.labels_path])
self.generated_uid = get_folders_hash([self.out_datapath, self.out_labelspath])

def to_permanent_path(self) -> str:
"""Renames the temporary data folder to permanent one using the hash of
Expand Down
5 changes: 0 additions & 5 deletions cli/medperf/commands/execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,6 @@ def run_inference(self):
timeout=infer_timeout,
data_path=data_path,
output_path=preds_path,
string_params={"Ptasks.infer.parameters.input.data_path.opts": "ro"},
)
self.ui.print("> Model execution complete")

Expand All @@ -113,10 +112,6 @@ def run_evaluation(self):
predictions=preds_path,
labels=labels_path,
output_path=results_path,
string_params={
"Ptasks.evaluate.parameters.input.predictions.opts": "ro",
"Ptasks.evaluate.parameters.input.labels.opts": "ro",
},
)
except ExecutionError as e:
logging.error(f"Metrics MLCube Execution failed: {e}")
Expand Down
20 changes: 15 additions & 5 deletions cli/medperf/commands/result/create.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ def run(
execution.prepare()
execution.validate()
execution.prepare_models()
execution.validate_models()
if not no_cache:
execution.load_cached_results()
with execution.ui.interactive():
Expand Down Expand Up @@ -101,8 +100,19 @@ def validate(self):
def prepare_models(self):
if self.models_input_file:
self.models_uids = self.__get_models_from_file()
elif self.models_uids is None:
self.models_uids = self.benchmark.models

if self.models_uids == [self.benchmark.reference_model_mlcube]:
# avoid the need of sending a request to the server for
# finding the benchmark's associated models
return

benchmark_models = Benchmark.get_models_uids(self.benchmark_uid)
benchmark_models.append(self.benchmark.reference_model_mlcube)

if self.models_uids is None:
self.models_uids = benchmark_models
else:
self.__validate_models(benchmark_models)

def __get_models_from_file(self):
if not os.path.exists(self.models_input_file):
Expand All @@ -117,9 +127,9 @@ def __get_models_from_file(self):
msg += "The file should contain a list of comma-separated integers"
raise InvalidArgumentError(msg)

def validate_models(self):
def __validate_models(self, benchmark_models):
models_set = set(self.models_uids)
benchmark_models_set = set(self.benchmark.models)
benchmark_models_set = set(benchmark_models)
non_assoc_cubes = models_set.difference(benchmark_models_set)
if non_assoc_cubes:
if len(non_assoc_cubes) > 1:
Expand Down
Loading

0 comments on commit b9d98b5

Please sign in to comment.