diff --git a/cli/medperf/config.py b/cli/medperf/config.py index d48f9467e..86802ff33 100644 --- a/cli/medperf/config.py +++ b/cli/medperf/config.py @@ -161,6 +161,7 @@ statistics_timeout = None infer_timeout = None evaluate_timeout = None +container_loglevel = None mlcube_configure_timeout = None mlcube_inspect_timeout = None @@ -184,6 +185,7 @@ "platform", "gpus", "cleanup", + "container_loglevel" ] configurable_parameters = inline_parameters + [ "server", diff --git a/cli/medperf/decorators.py b/cli/medperf/decorators.py index e94ec1b90..297ca685a 100644 --- a/cli/medperf/decorators.py +++ b/cli/medperf/decorators.py @@ -117,6 +117,11 @@ def wrapper( "--evaluate_timeout", help="Maximum time in seconds before interrupting evaluate task", ), + container_loglevel: str = typer.Option( + config.container_loglevel, + "--container-loglevel", + help="Logging level for containers to be run [debug | info | warning | error]", + ), platform: str = typer.Option( config.platform, "--platform", @@ -188,6 +193,11 @@ def wrapper( "--evaluate_timeout", help="Maximum time in seconds before interrupting evaluate task", ), + container_loglevel: str = typer.Option( + config.container_loglevel, + "--container-loglevel", + help="Logging level for containers to be run [debug | info | warning | error]", + ), platform: str = typer.Option( config.platform, "--platform", @@ -205,7 +215,7 @@ def wrapper( cleanup: bool = typer.Option( config.cleanup, "--cleanup/--no-cleanup", - help="Wether to clean up temporary medperf storage after execution", + help="Whether to clean up temporary medperf storage after execution", ), **kwargs, ): diff --git a/cli/medperf/entities/cube.py b/cli/medperf/entities/cube.py index 2b5f941ef..c4b4f6b2a 100644 --- a/cli/medperf/entities/cube.py +++ b/cli/medperf/entities/cube.py @@ -290,7 +290,6 @@ def run( kwargs (dict): additional arguments that are passed directly to the mlcube command """ kwargs.update(string_params) - # TODO: re-use `loglevel=critical` or figure out a clean MLCube logging cmd = "mlcube run" cmd += f" --mlcube={self.cube_path} --task={task} --platform={config.platform} --network=none" if config.gpus is not None: @@ -308,6 +307,10 @@ def run( cmd += f' -Pdocker.cpu_args="{cpu_args}"' cmd += f' -Pdocker.gpu_args="{gpu_args}"' + container_loglevel = config.container_loglevel + if container_loglevel: + cmd += f' -Pdocker.env_args="-e LOGLEVEL={container_loglevel}"' + logging.info(f"Running MLCube command: {cmd}") proc = pexpect.spawn(cmd, timeout=timeout) proc_out = combine_proc_sp_text(proc)