diff --git a/.devcontainer/onCreateCommand.sh b/.devcontainer/onCreateCommand.sh index f923f6f36..b2fb94354 100755 --- a/.devcontainer/onCreateCommand.sh +++ b/.devcontainer/onCreateCommand.sh @@ -6,4 +6,4 @@ pip install wheel pip install openvino-dev==2023.0.1 # [OPTIONAL] to generate optimized models for inference pip install mlcube_docker # [OPTIONAL] to deploy GaNDLF models as MLCube-compliant Docker containers pip install medmnist==2.1.0 -pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cpu +pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cpu diff --git a/.devcontainer/postCreateCommand.sh b/.devcontainer/postCreateCommand.sh index f6914c4a2..8428eb5d7 100755 --- a/.devcontainer/postCreateCommand.sh +++ b/.devcontainer/postCreateCommand.sh @@ -6,7 +6,7 @@ # if runnning on a GPU machine, install the GPU version of pytorch if command -v nvidia-smi &> /dev/null then - pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cu118 + pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cu121 fi pip install -e . diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 2e73c7100..85e63a33e 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -23,3 +23,4 @@ Note that if a box is left unchecked, PR merges will take longer than usual. - [ ] [Usage documentation](https://github.com/mlcommons/GaNDLF/blob/master/docs) has been updated, if appropriate. - [ ] Tests added or modified to [cover the changes](https://app.codecov.io/gh/mlcommons/GaNDLF); if coverage is reduced, please give explanation. - [ ] If customized dependency installation is required (i.e., a separate `pip install` step is needed for PR to be functional), please ensure it is reflected in all the files that control the CI, namely: [python-test.yml](https://github.com/mlcommons/GaNDLF/blob/master/.github/workflows/python-test.yml), and all docker files [[1](https://github.com/mlcommons/GaNDLF/blob/master/Dockerfile-CPU),[2](https://github.com/mlcommons/GaNDLF/blob/devcontainer_build_fix/Dockerfile-CUDA11.6),[3](https://github.com/mlcommons/GaNDLF/blob/master/Dockerfile-ROCm)]. +- [ ] The `logging` library is being used and no `print` statements are left. diff --git a/.github/workflows/mlcube-test.yml b/.github/workflows/mlcube-test.yml index ed38bf647..23f91b178 100644 --- a/.github/workflows/mlcube-test.yml +++ b/.github/workflows/mlcube-test.yml @@ -70,7 +70,7 @@ jobs: python -m pip install --upgrade pip==24.0 python -m pip install wheel python -m pip install openvino-dev==2023.0.1 mlcube_docker - pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cpu + pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cpu pip install -e . - name: Run mlcube deploy tests working-directory: ./testing diff --git a/.github/workflows/openfl-test.yml b/.github/workflows/openfl-test.yml index cf78b3b06..30e83d5fb 100644 --- a/.github/workflows/openfl-test.yml +++ b/.github/workflows/openfl-test.yml @@ -70,7 +70,7 @@ jobs: sudo apt-get install libvips libvips-tools -y python -m pip install --upgrade pip==24.0 python -m pip install wheel - pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cpu + pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cpu pip install -e . - name: Run generic unit tests to download data and construct CSVs if: steps.changed-files-specific.outputs.only_modified == 'false' # Run on any non-docs change diff --git a/.github/workflows/python-test.yml b/.github/workflows/python-test.yml index c99262a9f..78c10901e 100644 --- a/.github/workflows/python-test.yml +++ b/.github/workflows/python-test.yml @@ -71,7 +71,7 @@ jobs: python -m pip install --upgrade pip==24.0 python -m pip install wheel python -m pip install openvino-dev==2023.0.1 mlcube_docker - pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cpu + pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cpu pip install -e . - name: Run generic unit tests if: steps.changed-files-specific.outputs.only_modified == 'false' # Run on any non-docs change diff --git a/Dockerfile-CPU b/Dockerfile-CPU index 0337e012a..d9c87de25 100644 --- a/Dockerfile-CPU +++ b/Dockerfile-CPU @@ -9,7 +9,7 @@ RUN add-apt-repository ppa:deadsnakes/ppa RUN apt-get update && apt-get install -y python3.9 python3-pip libjpeg8-dev zlib1g-dev python3-dev libpython3.9-dev libffi-dev libgl1 RUN python3.9 -m pip install --upgrade pip==24.0 # EXPLICITLY install cpu versions of torch/torchvision (not all versions have +cpu modes on PyPI...) -RUN python3.9 -m pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cpu +RUN python3.9 -m pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cpu RUN python3.9 -m pip install openvino-dev==2023.0.1 opencv-python-headless mlcube_docker # Do some dependency installation separately here to make layer caching more efficient diff --git a/Dockerfile-CUDA11.8 b/Dockerfile-CUDA11.8 index 68eb1506b..6b06fcda5 100644 --- a/Dockerfile-CUDA11.8 +++ b/Dockerfile-CUDA11.8 @@ -12,7 +12,7 @@ RUN apt-get update && apt-get install -y software-properties-common RUN add-apt-repository ppa:deadsnakes/ppa RUN apt-get update && apt-get install -y python3.9 python3-pip libjpeg8-dev zlib1g-dev python3-dev libpython3.9-dev libffi-dev libgl1 RUN python3.9 -m pip install --upgrade pip==24.0 -RUN python3.9 -m pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cu118 +RUN python3.9 -m pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cu118 RUN python3.9 -m pip install openvino-dev==2023.0.1 opencv-python-headless mlcube_docker # Do some dependency installation separately here to make layer caching more efficient diff --git a/Dockerfile-CUDA12.1 b/Dockerfile-CUDA12.1 index 28a3287e7..4da63a335 100644 --- a/Dockerfile-CUDA12.1 +++ b/Dockerfile-CUDA12.1 @@ -12,7 +12,7 @@ RUN apt-get update && apt-get install -y software-properties-common RUN add-apt-repository ppa:deadsnakes/ppa RUN apt-get update && apt-get install -y python3.9 python3-pip libjpeg8-dev zlib1g-dev python3-dev libpython3.9-dev libffi-dev libgl1 RUN python3.9 -m pip install --upgrade pip==24.0 -RUN python3.9 -m pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cu121 +RUN python3.9 -m pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cu121 RUN python3.9 -m pip install openvino-dev==2023.0.1 opencv-python-headless mlcube_docker # Do some dependency installation separately here to make layer caching more efficient diff --git a/Dockerfile-ROCm b/Dockerfile-ROCm index d45382289..5d0fb7450 100644 --- a/Dockerfile-ROCm +++ b/Dockerfile-ROCm @@ -1,4 +1,4 @@ -FROM rocm/pytorch:rocm5.7_ubuntu20.04_py3.9_pytorch_2.0.1 +FROM rocm/pytorch:rocm6.0_ubuntu20.04_py3.9_pytorch LABEL github="https://github.com/mlcommons/GaNDLF" LABEL docs="https://mlcommons.github.io/GaNDLF/" LABEL version=1.0 @@ -10,7 +10,7 @@ RUN apt-get update && apt-get install -y software-properties-common RUN add-apt-repository ppa:deadsnakes/ppa RUN apt-get update && apt-get install -y python3.9 python3-pip libjpeg8-dev zlib1g-dev python3-dev libpython3.9-dev libffi-dev libgl1 RUN python3.9 -m pip install --upgrade pip==24.0 -RUN python3.9 -m pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/rocm5.7 +RUN python3.9 -m pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/rocm6.0 RUN python3.9 -m pip install --upgrade pip && python3.9 -m pip install openvino-dev==2023.0.1 opencv-python-headless mlcube_docker RUN apt-get update && apt-get install -y libgl1 diff --git a/GANDLF/compute/forward_pass.py b/GANDLF/compute/forward_pass.py index be2bff034..69efa15a9 100644 --- a/GANDLF/compute/forward_pass.py +++ b/GANDLF/compute/forward_pass.py @@ -337,11 +337,16 @@ def validate_network( if ext in [".jpg", ".jpeg", ".png"]: pred_mask = pred_mask.astype(np.uint8) - ## special case for 2D - if image.shape[-1] > 1: - result_image = sitk.GetImageFromArray(pred_mask) - else: - result_image = sitk.GetImageFromArray(pred_mask.squeeze(0)) + pred_mask = ( + pred_mask.squeeze(0) + if pred_mask.shape[0] == 1 + else ( + pred_mask.squeeze(-1) + if pred_mask.shape[-1] == 1 + else pred_mask + ) + ) + result_image = sitk.GetImageFromArray(pred_mask) result_image.CopyInformation(img_for_metadata) # this handles cases that need resampling/resizing diff --git a/GANDLF/entrypoints/anonymizer.py b/GANDLF/entrypoints/anonymizer.py index 32a805846..694c6606b 100644 --- a/GANDLF/entrypoints/anonymizer.py +++ b/GANDLF/entrypoints/anonymizer.py @@ -62,9 +62,16 @@ def _anonymize_images( type=click.Path(), help="Output directory or file which will contain the image(s) after anonymization.", ) +@click.option( + "--log-file", + type=click.Path(), + default=None, + help="Output file which will contain the logs.", +) @append_copyright_to_help -def new_way(input_dir, config, modality, output_file): +def new_way(input_dir, config, modality, output_file, log_file): """Anonymize images/scans in the data directory.""" + logger_setup(log_file) _anonymize_images(input_dir, output_file, config, modality) diff --git a/GANDLF/entrypoints/cli_tool.py b/GANDLF/entrypoints/cli_tool.py index 1f20ff4d6..a0fb2d96f 100644 --- a/GANDLF/entrypoints/cli_tool.py +++ b/GANDLF/entrypoints/cli_tool.py @@ -12,7 +12,7 @@ def gandlf(ctx): """GANDLF command-line tool.""" ctx.ensure_object(dict) - logger_setup() + # logger_setup() # registers subcommands: `gandlf anonymizer`, `gandlf run`, etc. diff --git a/GANDLF/entrypoints/collect_stats.py b/GANDLF/entrypoints/collect_stats.py index 3869dce6f..0eede6d30 100644 --- a/GANDLF/entrypoints/collect_stats.py +++ b/GANDLF/entrypoints/collect_stats.py @@ -191,9 +191,16 @@ def _collect_stats(model_dir: str, output_dir: str): required=True, help="Output directory to save stats and plot", ) +@click.option( + "--log-file", + type=click.Path(), + default=None, + help="Output file which will contain the logs.", +) @append_copyright_to_help -def new_way(model_dir: str, output_dir: str): +def new_way(model_dir: str, output_dir: str, log_file: str): """Collect statistics from different testing/validation combinations from output directory.""" + logger_setup(log_file) _collect_stats(model_dir=model_dir, output_dir=output_dir) diff --git a/GANDLF/entrypoints/config_generator.py b/GANDLF/entrypoints/config_generator.py index 861d80077..a9b90c6bf 100644 --- a/GANDLF/entrypoints/config_generator.py +++ b/GANDLF/entrypoints/config_generator.py @@ -34,9 +34,17 @@ def _generate_config(config: str, strategy: str, output: str): type=click.Path(file_okay=False, dir_okay=True), help="Path to output directory.", ) +@click.option( + "--log-file", + type=click.Path(), + default=None, + help="Output file which will contain the logs.", +) @append_copyright_to_help -def new_way(config, strategy, output): +def new_way(config, strategy, output, log_file): """Generate multiple GaNDLF configurations based on a single baseline GaNDLF for experimentation.""" + + logger_setup(log_file) _generate_config(config, strategy, output) diff --git a/GANDLF/entrypoints/construct_csv.py b/GANDLF/entrypoints/construct_csv.py index b632d28fb..25f750ab4 100644 --- a/GANDLF/entrypoints/construct_csv.py +++ b/GANDLF/entrypoints/construct_csv.py @@ -90,6 +90,12 @@ def _construct_csv( help="If True, paths in the output data CSV will always be relative to the location" " of the output data CSV itself.", ) +@click.option( + "--log-file", + type=click.Path(), + default=None, + help="Output file which will contain the logs.", +) @append_copyright_to_help def new_way( input_dir: str, @@ -97,8 +103,11 @@ def new_way( label_id: Optional[str], output_file: str, relativize_paths: bool, + log_file: str, ): """Generate training/inference CSV from data directory.""" + + logger_setup(log_file) _construct_csv( input_dir=input_dir, channels_id=channels_id, diff --git a/GANDLF/entrypoints/debug_info.py b/GANDLF/entrypoints/debug_info.py index 4540d6620..191da9b82 100644 --- a/GANDLF/entrypoints/debug_info.py +++ b/GANDLF/entrypoints/debug_info.py @@ -39,9 +39,17 @@ def _debug_info(verbose: bool): is_flag=True, help="If passed, prints all packages installed as well", ) +@click.option( + "--log-file", + type=click.Path(), + default=None, + help="Output file which will contain the logs.", +) @append_copyright_to_help -def new_way(verbose: bool): +def new_way(verbose: bool, log_file): """Displays detailed info about system environment: library versions, settings, etc.""" + + logger_setup(log_file) _debug_info(verbose=verbose) diff --git a/GANDLF/entrypoints/deploy.py b/GANDLF/entrypoints/deploy.py index f24a25cf1..7ecc9705a 100644 --- a/GANDLF/entrypoints/deploy.py +++ b/GANDLF/entrypoints/deploy.py @@ -126,6 +126,12 @@ def _deploy( help="An optional custom python entrypoint script to use instead of the default specified in mlcube.yaml." " (Only for inference and metrics)", ) +@click.option( + "--log-file", + type=click.Path(), + default=None, + help="Output file which will contain the logs.", +) @append_copyright_to_help def new_way( model: Optional[str], @@ -136,8 +142,10 @@ def new_way( output_dir: str, requires_gpu: bool, entrypoint: Optional[str], + log_file: str, ): """Generate frozen/deployable versions of trained GaNDLF models.""" + logger_setup(log_file) _deploy( model=model, config=config, diff --git a/GANDLF/entrypoints/generate_metrics.py b/GANDLF/entrypoints/generate_metrics.py index 5d589a9f0..3e1eac3cf 100644 --- a/GANDLF/entrypoints/generate_metrics.py +++ b/GANDLF/entrypoints/generate_metrics.py @@ -53,6 +53,12 @@ def _generate_metrics( default=-1, help="The value to use for missing predictions as penalty; if `-1`, this does not get added. This is only used in the case where the targets and predictions are passed independently.", ) +@click.option( + "--log-file", + type=click.Path(), + default=None, + help="Output file which will contain the logs.", +) @click.option("--raw-input", hidden=True) @append_copyright_to_help def new_way( @@ -61,8 +67,11 @@ def new_way( output_file: Optional[str], missing_prediction: int, raw_input: str, + log_file: str, ): """Metrics calculator.""" + + logger_setup(log_file) _generate_metrics( input_data=input_data, config=config, diff --git a/GANDLF/entrypoints/optimize_model.py b/GANDLF/entrypoints/optimize_model.py index 021f4d65f..4db5d346f 100644 --- a/GANDLF/entrypoints/optimize_model.py +++ b/GANDLF/entrypoints/optimize_model.py @@ -46,11 +46,22 @@ def _optimize_model( required=False, type=click.Path(exists=True, file_okay=True, dir_okay=False), ) +@click.option( + "--log-file", + type=click.Path(), + default=None, + help="Output file which will contain the logs.", +) @append_copyright_to_help def new_way( - model: str, config: Optional[str] = None, output_path: Optional[str] = None + model: str, + log_file: str, + config: Optional[str] = None, + output_path: Optional[str] = None, ): """Generate optimized versions of trained GaNDLF models.""" + + logger_setup(log_file) _optimize_model(model=model, config=config, output_path=output_path) diff --git a/GANDLF/entrypoints/patch_miner.py b/GANDLF/entrypoints/patch_miner.py index 58b041129..34cb9b300 100644 --- a/GANDLF/entrypoints/patch_miner.py +++ b/GANDLF/entrypoints/patch_miner.py @@ -42,9 +42,17 @@ def _mine_patches(input_path: str, output_dir: str, config: Optional[str]): help="config (in YAML) for running the patch miner. Needs 'scale' and 'patch_size' to be defined, " "otherwise defaults to 16 and (256, 256), respectively.", ) +@click.option( + "--log-file", + type=click.Path(), + default=None, + help="Output file which will contain the logs.", +) @append_copyright_to_help -def new_way(input_csv: str, output_dir: str, config: Optional[str]): +def new_way(input_csv: str, output_dir: str, log_file: str, config: Optional[str]): """Construct patches from whole slide image(s).""" + + logger_setup(log_file) _mine_patches(input_path=input_csv, output_dir=output_dir, config=config) diff --git a/GANDLF/entrypoints/preprocess.py b/GANDLF/entrypoints/preprocess.py index 265512c11..8bedab43e 100644 --- a/GANDLF/entrypoints/preprocess.py +++ b/GANDLF/entrypoints/preprocess.py @@ -82,6 +82,12 @@ def _preprocess( is_flag=True, help="If passed, applies zero cropping during output creation.", ) +@click.option( + "--log-file", + type=click.Path(), + default=None, + help="Output file which will contain the logs.", +) @append_copyright_to_help def new_way( config: str, @@ -90,8 +96,11 @@ def new_way( label_pad: str, apply_augs: bool, crop_zero: bool, + log_file: str, ): """Generate training/inference data which are preprocessed to reduce resource footprint during computation.""" + + logger_setup(log_file) _preprocess( config=config, input_data=input_data, diff --git a/GANDLF/entrypoints/recover_config.py b/GANDLF/entrypoints/recover_config.py index 6168b2ad1..1d5634e56 100644 --- a/GANDLF/entrypoints/recover_config.py +++ b/GANDLF/entrypoints/recover_config.py @@ -47,10 +47,18 @@ def _recover_config(model_dir: Optional[str], mlcube: bool, output_file: str): type=click.Path(file_okay=True, dir_okay=False), help="Path to an output file where the config will be written.", ) +@click.option( + "--log-file", + type=click.Path(), + default=None, + help="Output file which will contain the logs.", +) @append_copyright_to_help -def new_way(model_dir, mlcube, output_file): +def new_way(model_dir, mlcube, output_file, log_file): """Recovers a config file from a GaNDLF model. If used from within a deployed GaNDLF MLCube, attempts to extract the config from the embedded model.""" + + logger_setup(log_file) _recover_config(model_dir=model_dir, mlcube=mlcube, output_file=output_file) diff --git a/GANDLF/entrypoints/run.py b/GANDLF/entrypoints/run.py index f5aa73b1b..842c47b5d 100644 --- a/GANDLF/entrypoints/run.py +++ b/GANDLF/entrypoints/run.py @@ -141,6 +141,12 @@ def _run( help="Location to save the output of the inference session. Not used for training.", ) @click.option("--raw-input", hidden=True) +@click.option( + "--log-file", + type=click.Path(), + default=None, + help="Output file which will contain the logs.", +) @append_copyright_to_help def new_way( config: str, @@ -152,8 +158,11 @@ def new_way( resume: bool, output_path: str, raw_input: str, + log_file: str, ): """Semantic segmentation, regression, and classification for medical images using Deep Learning.""" + + logger_setup(log_file) _run( config=config, input_data=input_data, diff --git a/GANDLF/entrypoints/split_csv.py b/GANDLF/entrypoints/split_csv.py index fa1cddf1d..6a16aef8e 100644 --- a/GANDLF/entrypoints/split_csv.py +++ b/GANDLF/entrypoints/split_csv.py @@ -49,9 +49,17 @@ def _split_csv(input_csv: str, output_dir: str, config_path: Optional[str]): help="The GaNDLF config (in YAML) with the `nested_training` key specified to the folds needed.", type=click.Path(exists=True, file_okay=True, dir_okay=False), ) +@click.option( + "--log-file", + type=click.Path(), + default=None, + help="Output file which will contain the logs.", +) @append_copyright_to_help -def new_way(input_csv: str, output_dir: str, config: Optional[str]): +def new_way(input_csv: str, output_dir: str, log_file: str, config: Optional[str]): """Split the data into training, validation, and testing sets and save them as csvs in the output directory.""" + + logger_setup(log_file) _split_csv(input_csv, output_dir, config) diff --git a/GANDLF/entrypoints/verify_install.py b/GANDLF/entrypoints/verify_install.py index 0a94b1e08..1ac2d4d66 100644 --- a/GANDLF/entrypoints/verify_install.py +++ b/GANDLF/entrypoints/verify_install.py @@ -23,9 +23,16 @@ def _verify_install(): @click.command() +@click.option( + "--log-file", + type=click.Path(), + default=None, + help="Output file which will contain the logs.", +) @append_copyright_to_help -def new_way(): +def new_way(log_file): """Verify GaNDLF installation.""" + logger_setup(log_file) _verify_install() diff --git a/GANDLF/metrics/classification.py b/GANDLF/metrics/classification.py index e72510dda..ff01534a7 100644 --- a/GANDLF/metrics/classification.py +++ b/GANDLF/metrics/classification.py @@ -101,14 +101,17 @@ def __convert_tensor_to_int(input_tensor: torch.Tensor) -> torch.Tensor: # ), } for metric_name, calculator in calculators.items(): + metric_prediction = prediction + metric_target = target if "auroc" in metric_name: - output_metrics[metric_name] = get_output_from_calculator( - predictions_prob, target_wrap, calculator - ) - else: - output_metrics[metric_name] = get_output_from_calculator( - prediction, target, calculator - ) + metric_prediction = predictions_prob + metric_target = target_wrap + if task == "binary": + metric_prediction = predictions_prob[:, 1] + + output_metrics[metric_name] = get_output_from_calculator( + metric_prediction, metric_target, calculator + ) # metrics that do not need the "average" parameter calculators = { diff --git a/GANDLF/utils/gandlf_logging.py b/GANDLF/utils/gandlf_logging.py index 9c376aaf0..f08a3620f 100644 --- a/GANDLF/utils/gandlf_logging.py +++ b/GANDLF/utils/gandlf_logging.py @@ -40,9 +40,9 @@ def logger_setup(log_file=None, config_path="logging_config.yaml") -> None: log_tmp_file = log_file if log_file is None: # create tmp file log_tmp_file = _create_tmp_log_file() - logging.info(f"The logs are saved in {log_tmp_file}") _create_log_file(log_tmp_file) _configure_logging_with_logfile(log_tmp_file, config_path) + logging.info(f"The logs are saved in {log_tmp_file}") class InfoOnlyFilter(logging.Filter): diff --git a/docs/extending.md b/docs/extending.md index 69ba06268..929efa515 100644 --- a/docs/extending.md +++ b/docs/extending.md @@ -137,6 +137,16 @@ bash ### Use loggers instead of print We use the native `logging` [library](https://docs.python.org/3/library/logging.html) for logs management. This gets automatically configured when GaNDLF gets launched. So, if you are extending the code, please use loggers instead of prints. +Here is an example how `root logger` can be used +``` +def my_new_cool_function(df: pd.DataFrame): + logging.debug("Message for debug file only") + logging.info("Hi GaNDLF user, I greet you in the CLI output") + logging.error(f"A detailed message about any error if needed. Exception: {str(e)}, params: {params}, df shape: {df.shape}") + # do NOT use normal print statements + # print("Hi GaNDLF user!") +``` + Here is an example how logger can be used: ``` @@ -148,6 +158,7 @@ def my_new_cool_function(df: pd.DataFrame): # print("Hi GaNDLF user!") # don't use prints please. ``` + ### What and where is logged GaNDLF logs are splitted into multiple parts: @@ -155,9 +166,9 @@ GaNDLF logs are splitted into multiple parts: - debug file: all messages are shown - stderr: display `warning`, `error`, or `critical` messages -By default, the logs are flushed to console. +By default, the logs are saved in the `/tmp/.gandlf` dir. The logs are **saved** in the path that is defined by the '--log-file' parameter in the CLI commands. -If the path is not provided or an error is raised, the logs will be flushed to console. + diff --git a/samples/config_classification.yaml b/samples/config_classification.yaml index 23fdc6ff4..7a8ba7704 100644 --- a/samples/config_classification.yaml +++ b/samples/config_classification.yaml @@ -14,6 +14,7 @@ model: final_layer: None, # can be either sigmoid, softmax or none (none == regression) amp: False, # Set if you want to use Automatic Mixed Precision for your operations or not - options: True, False n_channels: 3, # set the input channels - useful when reading RGB or images that have vectored pixel types + class_list: [0,1,2], # this is to classify 3 classes, denoted by 0,1,2 in the main csv file - change as needed } # metrics to evaluate the validation performance metrics: diff --git a/setup.py b/setup.py index 1ee6a3f72..b82fc9424 100644 --- a/setup.py +++ b/setup.py @@ -37,7 +37,7 @@ # specifying version for `black` separately because it is also used to [check for lint](https://github.com/mlcommons/GaNDLF/blob/master/.github/workflows/black.yml) black_version = "23.11.0" requirements = [ - "torch==2.2.1", + "torch==2.3.1", f"black=={black_version}", "numpy==1.25.0", "scipy",