From fb0e81db8262e98a1c541604b0c43da2bfff5e8a Mon Sep 17 00:00:00 2001 From: Patrick Foley Date: Fri, 25 Aug 2023 16:33:55 -0700 Subject: [PATCH] Update Tensorflow, gRPC, Protobuf dependencies (#868) * Update Tensorflow to latest, finally update grpcio/protobuf Signed-off-by: Patrick Foley * Lint issue fix and missing tf reference Signed-off-by: Patrick Foley * pyzmq version fixed * fix taskrunner tests for windows Signed-off-by: Mansi Sharma * fix taskrunner test syntax for windows Signed-off-by: Mansi Sharma * adding user option to workspace pip install requirements for windows Signed-off-by: Mansi Sharma * fix windows CI test Signed-off-by: Mansi Sharma * testing virtual env for windows github actions Signed-off-by: Mansi Sharma * testing virtual env for windows github actions Signed-off-by: Mansi Sharma * testing virtual env for windows github actions Signed-off-by: Mansi Sharma * testing venv for windows Signed-off-by: Mansi Sharma * test venv for windows * test venv for windows * Added new KerasSerializer. Fixed other Interactive API experiments * Update taskrunner.yml * Update taskrunner.yml * Update workspace.py * Update workspace.py * Update taskrunner.yml * Remove get_model import from global namespace so dependencies are not loaded into memory unnecessarily (breaking windows build) * Refactoring and cleaning up imports to support Windows install * Fixed logger import paths * Fix missing imports * Fix native import * Fix lint errors * Fix keras optimizer patch. Remove irrelevant unit test * Format logs in UTF-8 for windows * Update interactive-kvasir.yml * Consolidate github actions python versions to single file * Update python versions * Update python versions * Update python versions * Reduce # of DataLoader workers for Pytorch Kvasir CI test * Fix Windows encoding * Fix Windows encoding and limit rounds so Github Actions CI doesn't run out of memory Signed-off-by: Patrick Foley * Fix windows encoding * Fix Windows encoding --------- Signed-off-by: Patrick Foley Signed-off-by: Mansi Sharma Co-authored-by: Mansi Sharma <77758170+mansishr@users.noreply.github.com> Co-authored-by: Mansi Sharma --- .github/workflows/interactive-tensorflow.yml | 2 +- .github/workflows/taskrunner.yml | 17 ++-- .github/workflows/taskrunner_python_3.10.yml | 33 ------- .github/workflows/taskrunner_python_3.9.yml | 33 ------- .../Federated_Keras_MNIST_Tutorial.ipynb | 4 +- .../Flax_CNN_CIFAR/requirements.txt | 2 +- .../workspace/Tensorflow_CIFAR.ipynb | 8 +- .../workspace/Tensorflow_MNIST.ipynb | 2 +- .../Tensorflow_Word_Prediction.ipynb | 4 +- .../workspace/requirements.txt | 2 +- .../keras_cnn_mnist/requirements.txt | 2 +- .../requirements.txt | 2 +- openfl-workspace/keras_nlp/requirements.txt | 2 +- .../keras_nlp_gramine_ready/requirements.txt | 2 +- openfl-workspace/tf_2dunet/requirements.txt | 2 +- .../tf_cnn_histology/requirements.txt | 2 +- openfl/__init__.py | 2 +- .../experimental/utilities/runtime_utils.py | 4 +- openfl/federated/plan/plan.py | 4 +- openfl/federated/task/runner_gandlf.py | 2 +- openfl/federated/task/runner_keras.py | 2 +- openfl/federated/task/runner_pt.py | 2 +- openfl/federated/task/runner_tf.py | 2 +- openfl/federated/task/task_runner.py | 2 +- openfl/interface/cli.py | 3 +- openfl/interface/director.py | 6 +- openfl/interface/envoy.py | 4 +- .../interface/interactive_api/experiment.py | 9 +- openfl/interface/model.py | 13 ++- openfl/interface/pki.py | 20 ++-- openfl/interface/plan.py | 2 +- openfl/interface/workspace.py | 16 +++- openfl/native/fastestimator.py | 2 +- openfl/native/native.py | 4 +- .../frameworks_adapters/keras_adapter.py | 7 ++ .../interface_serializer/keras_serializer.py | 33 +++++++ .../{component => utilities}/ca/__init__.py | 0 openfl/{component => utilities}/ca/ca.py | 4 +- .../{component => utilities}/ca/downloader.py | 0 openfl/utilities/split.py | 95 +++++++++++++++++++ openfl/utilities/utils.py | 91 ------------------ setup.py | 14 +-- .../pytorch_kvasir_unet/experiment.py | 6 +- .../envoy/sd_requirements.txt | 2 +- .../tensorflow_mnist/experiment.py | 6 +- tests/openfl/interface/test_model_api.py | 31 ------ 46 files changed, 239 insertions(+), 268 deletions(-) delete mode 100644 .github/workflows/taskrunner_python_3.10.yml delete mode 100644 .github/workflows/taskrunner_python_3.9.yml create mode 100644 openfl/plugins/interface_serializer/keras_serializer.py rename openfl/{component => utilities}/ca/__init__.py (100%) rename openfl/{component => utilities}/ca/ca.py (98%) rename openfl/{component => utilities}/ca/downloader.py (100%) create mode 100644 openfl/utilities/split.py diff --git a/.github/workflows/interactive-tensorflow.yml b/.github/workflows/interactive-tensorflow.yml index 41b636fa53..4958ca139b 100644 --- a/.github/workflows/interactive-tensorflow.yml +++ b/.github/workflows/interactive-tensorflow.yml @@ -27,5 +27,5 @@ jobs: - name: Interactive API - tensorflow_mnist run: | python setup.py build_grpc - pip install tensorflow==2.11 + pip install tensorflow==2.13 python -m tests.github.interactive_api_director.experiments.tensorflow_mnist.run diff --git a/.github/workflows/taskrunner.yml b/.github/workflows/taskrunner.yml index 9d8d79d47b..1e286b41c6 100644 --- a/.github/workflows/taskrunner.yml +++ b/.github/workflows/taskrunner.yml @@ -12,19 +12,24 @@ permissions: jobs: build: - strategy: matrix: os: ['ubuntu-latest', 'windows-latest'] + python-version: ['3.8','3.9','3.10','3.11'] runs-on: ${{ matrix.os }} - steps: - uses: actions/checkout@v3 - - name: Set up Python 3.8 - uses: actions/setup-python@v3 + - name: Set up Python + uses: actions/setup-python@v4 with: - python-version: "3.8" - - name: Install dependencies + python-version: ${{ matrix.python-version }} + - name: Install dependencies ubuntu + if: matrix.os == 'ubuntu-latest' + run: | + python -m pip install --upgrade pip + pip install . + - name: Install dependencies windows + if: matrix.os == 'windows-latest' run: | python -m pip install --upgrade pip pip install . diff --git a/.github/workflows/taskrunner_python_3.10.yml b/.github/workflows/taskrunner_python_3.10.yml deleted file mode 100644 index 6907c87b5e..0000000000 --- a/.github/workflows/taskrunner_python_3.10.yml +++ /dev/null @@ -1,33 +0,0 @@ -# This workflow will install Python dependencies, run tests and lint with a single version of Python -# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions - -name: TaskRunner (Python 3.10) - -on: - pull_request: - branches: [ develop ] - -permissions: - contents: read - -jobs: - build: - - strategy: - matrix: - os: ['ubuntu-latest', 'windows-latest'] - runs-on: ${{ matrix.os }} - - steps: - - uses: actions/checkout@v3 - - name: Set up Python 3.10 - uses: actions/setup-python@v3 - with: - python-version: "3.10" - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install . - - name: Test TaskRunner API - run: | - python -m tests.github.test_hello_federation --template keras_cnn_mnist --fed_workspace aggregator --col1 col1 --col2 col2 --rounds-to-train 3 diff --git a/.github/workflows/taskrunner_python_3.9.yml b/.github/workflows/taskrunner_python_3.9.yml deleted file mode 100644 index f1755ffad8..0000000000 --- a/.github/workflows/taskrunner_python_3.9.yml +++ /dev/null @@ -1,33 +0,0 @@ -# This workflow will install Python dependencies, run tests and lint with a single version of Python -# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions - -name: TaskRunner (Python 3.9) - -on: - pull_request: - branches: [ develop ] - -permissions: - contents: read - -jobs: - build: - - strategy: - matrix: - os: ['ubuntu-latest', 'windows-latest'] - runs-on: ${{ matrix.os }} - - steps: - - uses: actions/checkout@v3 - - name: Set up Python 3.9 - uses: actions/setup-python@v3 - with: - python-version: "3.9" - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install . - - name: Test TaskRunner API - run: | - python -m tests.github.test_hello_federation --template keras_cnn_mnist --fed_workspace aggregator --col1 col1 --col2 col2 --rounds-to-train 3 diff --git a/openfl-tutorials/Federated_Keras_MNIST_Tutorial.ipynb b/openfl-tutorials/Federated_Keras_MNIST_Tutorial.ipynb index 5eb4fdb8cf..fbdab4b46e 100644 --- a/openfl-tutorials/Federated_Keras_MNIST_Tutorial.ipynb +++ b/openfl-tutorials/Federated_Keras_MNIST_Tutorial.ipynb @@ -16,10 +16,10 @@ "outputs": [], "source": [ "#Install Tensorflow and MNIST dataset if not installed\n", - "!pip install tensorflow==2.7.0\n", + "!pip install tensorflow==2.13\n", "\n", "#Alternatively you could use the intel-tensorflow build\n", - "# !pip install intel-tensorflow==2.3.0" + "# !pip install intel-tensorflow==2.13" ] }, { diff --git a/openfl-tutorials/interactive_api/Flax_CNN_CIFAR/requirements.txt b/openfl-tutorials/interactive_api/Flax_CNN_CIFAR/requirements.txt index 42f504302e..d1aecafc4b 100644 --- a/openfl-tutorials/interactive_api/Flax_CNN_CIFAR/requirements.txt +++ b/openfl-tutorials/interactive_api/Flax_CNN_CIFAR/requirements.txt @@ -1,4 +1,4 @@ -tensorflow==2.11.1 +tensorflow==2.13 tensorflow-datasets==4.6.0 jax --find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html diff --git a/openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/workspace/Tensorflow_CIFAR.ipynb b/openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/workspace/Tensorflow_CIFAR.ipynb index 6678549221..87dee451a0 100644 --- a/openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/workspace/Tensorflow_CIFAR.ipynb +++ b/openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/workspace/Tensorflow_CIFAR.ipynb @@ -16,8 +16,8 @@ "metadata": {}, "outputs": [], "source": [ - "# Install TF if not already. We recommend TF2.7 or greater.\n", - "# !pip install tensorflow==2.8" + "# Install TF if not already. We recommend TF2.13 or greater.\n", + "# !pip install tensorflow==2.13" ] }, { @@ -157,7 +157,7 @@ "model.summary()\n", "\n", "# Define optimizer\n", - "optimizer = tf.optimizers.Adam(learning_rate=1e-4)\n", + "optimizer = tf.keras.optimizers.legacy.Adam(learning_rate=1e-4)\n", "\n", "# Loss and metrics. These will be used later.\n", "loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n", @@ -327,7 +327,7 @@ "source": [ "# create an experimnet in federation\n", "experiment_name = 'cifar10_experiment'\n", - "fl_experiment = FLExperiment(federation=federation, experiment_name=experiment_name)" + "fl_experiment = FLExperiment(federation=federation, experiment_name=experiment_name,serializer_plugin='openfl.plugins.interface_serializer.keras_serializer.KerasSerializer)" ] }, { diff --git a/openfl-tutorials/interactive_api/Tensorflow_MNIST/workspace/Tensorflow_MNIST.ipynb b/openfl-tutorials/interactive_api/Tensorflow_MNIST/workspace/Tensorflow_MNIST.ipynb index a060482ad1..accf12f0b0 100644 --- a/openfl-tutorials/interactive_api/Tensorflow_MNIST/workspace/Tensorflow_MNIST.ipynb +++ b/openfl-tutorials/interactive_api/Tensorflow_MNIST/workspace/Tensorflow_MNIST.ipynb @@ -404,7 +404,7 @@ "source": [ "# create an experimnet in federation\n", "experiment_name = 'mnist_experiment'\n", - "fl_experiment = FLExperiment(federation=federation, experiment_name=experiment_name)" + "fl_experiment = FLExperiment(federation=federation, experiment_name=experiment_name,serializer_plugin='openfl.plugins.interface_serializer.keras_serializer.KerasSerializer)" ] }, { diff --git a/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/workspace/Tensorflow_Word_Prediction.ipynb b/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/workspace/Tensorflow_Word_Prediction.ipynb index 8ebaae13df..85bc5d9672 100644 --- a/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/workspace/Tensorflow_Word_Prediction.ipynb +++ b/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/workspace/Tensorflow_Word_Prediction.ipynb @@ -233,7 +233,7 @@ "source": [ "import tensorflow as tf\n", "from tensorflow.keras.layers import LSTM, Dense\n", - "from tensorflow.keras.optimizers import Adam\n", + "from tensorflow.keras.legacy.optimizers import Adam\n", "from tensorflow.keras.metrics import TopKCategoricalAccuracy\n", "from tensorflow.keras.losses import CategoricalCrossentropy\n", "from tensorflow.keras.models import Sequential\n", @@ -363,7 +363,7 @@ "source": [ "# create an experimnet in federation\n", "experiment_name = 'word_prediction_test_experiment'\n", - "fl_experiment = FLExperiment(federation=federation, experiment_name=experiment_name)" + "fl_experiment = FLExperiment(federation=federation, experiment_name=experiment_name,serializer_plugin='openfl.plugins.interface_serializer.keras_serializer.KerasSerializer)" ] }, { diff --git a/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/workspace/requirements.txt b/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/workspace/requirements.txt index 444b587310..4b6c237d85 100644 --- a/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/workspace/requirements.txt +++ b/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/workspace/requirements.txt @@ -1,2 +1,2 @@ -tensorflow==2.11.1 +tensorflow==2.13 numpy==1.22.2 diff --git a/openfl-workspace/keras_cnn_mnist/requirements.txt b/openfl-workspace/keras_cnn_mnist/requirements.txt index 8e961eac43..af80212eeb 100644 --- a/openfl-workspace/keras_cnn_mnist/requirements.txt +++ b/openfl-workspace/keras_cnn_mnist/requirements.txt @@ -1 +1 @@ -tensorflow==2.11.1 +tensorflow==2.13 diff --git a/openfl-workspace/keras_cnn_with_compression/requirements.txt b/openfl-workspace/keras_cnn_with_compression/requirements.txt index 8e961eac43..af80212eeb 100644 --- a/openfl-workspace/keras_cnn_with_compression/requirements.txt +++ b/openfl-workspace/keras_cnn_with_compression/requirements.txt @@ -1 +1 @@ -tensorflow==2.11.1 +tensorflow==2.13 diff --git a/openfl-workspace/keras_nlp/requirements.txt b/openfl-workspace/keras_nlp/requirements.txt index 8e961eac43..af80212eeb 100644 --- a/openfl-workspace/keras_nlp/requirements.txt +++ b/openfl-workspace/keras_nlp/requirements.txt @@ -1 +1 @@ -tensorflow==2.11.1 +tensorflow==2.13 diff --git a/openfl-workspace/keras_nlp_gramine_ready/requirements.txt b/openfl-workspace/keras_nlp_gramine_ready/requirements.txt index d0fe0d1d24..f32ee3ed53 100644 --- a/openfl-workspace/keras_nlp_gramine_ready/requirements.txt +++ b/openfl-workspace/keras_nlp_gramine_ready/requirements.txt @@ -1 +1 @@ -tensorflow-cpu==2.11.1 +tensorflow-cpu==2.13 diff --git a/openfl-workspace/tf_2dunet/requirements.txt b/openfl-workspace/tf_2dunet/requirements.txt index ea73737de6..f655f6c7d2 100644 --- a/openfl-workspace/tf_2dunet/requirements.txt +++ b/openfl-workspace/tf_2dunet/requirements.txt @@ -1,3 +1,3 @@ nibabel -tensorflow==2.11.1 +tensorflow==2.13 setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability diff --git a/openfl-workspace/tf_cnn_histology/requirements.txt b/openfl-workspace/tf_cnn_histology/requirements.txt index 18542c62a2..59ee6430c8 100644 --- a/openfl-workspace/tf_cnn_histology/requirements.txt +++ b/openfl-workspace/tf_cnn_histology/requirements.txt @@ -1,3 +1,3 @@ pillow -tensorflow==2.11.1 +tensorflow==2.13 tensorflow-datasets diff --git a/openfl/__init__.py b/openfl/__init__.py index 04fefabc68..bb887c6ebf 100644 --- a/openfl/__init__.py +++ b/openfl/__init__.py @@ -3,4 +3,4 @@ """openfl base package.""" from .__version__ import __version__ # flake8: noqa -from .interface.model import get_model +#from .interface.model import get_model diff --git a/openfl/experimental/utilities/runtime_utils.py b/openfl/experimental/utilities/runtime_utils.py index bf85f2a15a..dc038e3b4e 100644 --- a/openfl/experimental/utilities/runtime_utils.py +++ b/openfl/experimental/utilities/runtime_utils.py @@ -49,7 +49,7 @@ def filter_attributes(ctx, f, **kwargs): if "include" in kwargs and "exclude" in kwargs: raise RuntimeError("'include' and 'exclude' should not both be present") elif "include" in kwargs: - assert type(kwargs["include"]) == list + assert type(kwargs["include"]) is list for in_attr in kwargs["include"]: if in_attr not in cls_attrs: raise RuntimeError( @@ -59,7 +59,7 @@ def filter_attributes(ctx, f, **kwargs): if attr not in kwargs["include"]: delattr(ctx, attr) elif "exclude" in kwargs: - assert type(kwargs["exclude"]) == list + assert type(kwargs["exclude"]) is list for in_attr in kwargs["exclude"]: if in_attr not in cls_attrs: raise RuntimeError( diff --git a/openfl/federated/plan/plan.py b/openfl/federated/plan/plan.py index 813f5f285f..7edbcd679a 100644 --- a/openfl/federated/plan/plan.py +++ b/openfl/federated/plan/plan.py @@ -274,7 +274,9 @@ def get_assigner(self): aggregation_functions_by_task = self.restore_object('aggregation_function_obj.pkl') assigner_function = self.restore_object('task_assigner_obj.pkl') except Exception as exc: - self.logger.error(f'Failed to load aggregation and assigner functions: {exc}') + self.logger.error( + f'Failed to load aggregation and assigner functions: {exc}' + ) self.logger.info('Using Task Runner API workflow') if assigner_function: self.assigner_ = Assigner( diff --git a/openfl/federated/task/runner_gandlf.py b/openfl/federated/task/runner_gandlf.py index a4fa794753..7db12dcecf 100644 --- a/openfl/federated/task/runner_gandlf.py +++ b/openfl/federated/task/runner_gandlf.py @@ -11,7 +11,7 @@ from typing import Union import yaml -from openfl.utilities import split_tensor_dict_for_holdouts +from openfl.utilities.split import split_tensor_dict_for_holdouts from openfl.utilities import TensorKey from .runner import TaskRunner diff --git a/openfl/federated/task/runner_keras.py b/openfl/federated/task/runner_keras.py index 32bb476c68..c7daaa3d33 100644 --- a/openfl/federated/task/runner_keras.py +++ b/openfl/federated/task/runner_keras.py @@ -13,7 +13,7 @@ from openfl.utilities import change_tags from openfl.utilities import Metric -from openfl.utilities import split_tensor_dict_for_holdouts +from openfl.utilities.split import split_tensor_dict_for_holdouts from openfl.utilities import TensorKey from .runner import TaskRunner diff --git a/openfl/federated/task/runner_pt.py b/openfl/federated/task/runner_pt.py index 8d283a9908..a95e942962 100644 --- a/openfl/federated/task/runner_pt.py +++ b/openfl/federated/task/runner_pt.py @@ -14,7 +14,7 @@ from openfl.utilities import change_tags from openfl.utilities import Metric -from openfl.utilities import split_tensor_dict_for_holdouts +from openfl.utilities.split import split_tensor_dict_for_holdouts from openfl.utilities import TensorKey from .runner import TaskRunner diff --git a/openfl/federated/task/runner_tf.py b/openfl/federated/task/runner_tf.py index cd7127d32f..f63ffce3f8 100644 --- a/openfl/federated/task/runner_tf.py +++ b/openfl/federated/task/runner_tf.py @@ -7,7 +7,7 @@ import tensorflow.compat.v1 as tf from tqdm import tqdm -from openfl.utilities import split_tensor_dict_for_holdouts +from openfl.utilities.split import split_tensor_dict_for_holdouts from openfl.utilities import TensorKey from .runner import TaskRunner diff --git a/openfl/federated/task/task_runner.py b/openfl/federated/task/task_runner.py index d2c40ca3e4..7bf6340cad 100644 --- a/openfl/federated/task/task_runner.py +++ b/openfl/federated/task/task_runner.py @@ -7,7 +7,7 @@ import numpy as np from openfl.utilities import change_tags -from openfl.utilities import split_tensor_dict_for_holdouts +from openfl.utilities.split import split_tensor_dict_for_holdouts from openfl.utilities import TensorKey diff --git a/openfl/interface/cli.py b/openfl/interface/cli.py index f470f68f4b..4bf91684fe 100755 --- a/openfl/interface/cli.py +++ b/openfl/interface/cli.py @@ -14,9 +14,8 @@ from click import pass_context from click import style import time - -from openfl.utilities import add_log_level import sys +from openfl.utilities import add_log_level def setup_logging(level='info', log_file=None): diff --git a/openfl/interface/director.py b/openfl/interface/director.py index e0e39ddc38..a3fd229e25 100644 --- a/openfl/interface/director.py +++ b/openfl/interface/director.py @@ -14,9 +14,7 @@ from click import Path as ClickPath from dynaconf import Validator -from openfl.component.director import Director from openfl.interface.cli_helper import WORKSPACE -from openfl.transport import DirectorGRPCServer from openfl.utilities import merge_configs from openfl.utilities.path_check import is_directory_traversal from openfl.interface.cli import review_plan_callback @@ -47,6 +45,10 @@ def director(context): help='Path to a signed certificate') def start(director_config_path, tls, root_certificate, private_key, certificate): """Start the director service.""" + + from openfl.component.director import Director + from openfl.transport import DirectorGRPCServer + director_config_path = Path(director_config_path).absolute() logger.info('🧿 Starting the Director Service.') if is_directory_traversal(director_config_path): diff --git a/openfl/interface/envoy.py b/openfl/interface/envoy.py index 49b9338295..b974fc6b11 100644 --- a/openfl/interface/envoy.py +++ b/openfl/interface/envoy.py @@ -15,7 +15,6 @@ from click import Path as ClickPath from dynaconf import Validator -from openfl.component.envoy.envoy import Envoy from openfl.interface.cli import review_plan_callback from openfl.interface.cli_helper import WORKSPACE from openfl.utilities import click_types @@ -52,6 +51,9 @@ def envoy(context): def start_(shard_name, director_host, director_port, tls, envoy_config_path, root_certificate, private_key, certificate): """Start the Envoy.""" + + from openfl.component.envoy.envoy import Envoy + logger.info('🧿 Starting the Envoy.') if is_directory_traversal(envoy_config_path): click.echo('The shard config path is out of the openfl workspace scope.') diff --git a/openfl/interface/interactive_api/experiment.py b/openfl/interface/interactive_api/experiment.py index 218a6eca8d..d68baf11cc 100644 --- a/openfl/interface/interactive_api/experiment.py +++ b/openfl/interface/interactive_api/experiment.py @@ -22,7 +22,7 @@ from openfl.interface.cli import setup_logging from openfl.interface.cli_helper import WORKSPACE from openfl.native import update_plan -from openfl.utilities import split_tensor_dict_for_holdouts +from openfl.utilities.split import split_tensor_dict_for_holdouts from openfl.utilities.workspace import dump_requirements_file @@ -81,7 +81,9 @@ def _initialize_plan(self): def _assert_experiment_submitted(self): """Assure experiment is sent to director and accepted.""" if not self.experiment_submitted: - self.logger.error('The experiment was not submitted to a Director service.') + self.logger.error( + 'The experiment was not submitted to a Director service.' + ) self.logger.error( 'Report the experiment first: ' 'use the Experiment.start() method.') @@ -145,7 +147,8 @@ def stream_metrics(self, tensorboard_logs: bool = True) -> None: f'Round {metric_message_dict["round"]}, ' f'collaborator {metric_message_dict["metric_origin"]} ' f'{metric_message_dict["task_name"]} result ' - f'{metric_message_dict["metric_name"]}:\t{metric_message_dict["metric_value"]:f}') + f'{metric_message_dict["metric_name"]}:\t{metric_message_dict["metric_value"]:f}' + ) if tensorboard_logs: self.write_tensorboard_metric(metric_message_dict) diff --git a/openfl/interface/model.py b/openfl/interface/model.py index 5f6f40ce0b..b14d50ecc0 100644 --- a/openfl/interface/model.py +++ b/openfl/interface/model.py @@ -11,12 +11,6 @@ from logging import getLogger from pathlib import Path -from openfl.federated import Plan -from openfl.federated import TaskRunner -from openfl.protocols import utils -from openfl.pipelines import NoCompressionPipeline -from openfl.utilities.workspace import set_directory - logger = getLogger(__name__) @@ -68,7 +62,7 @@ def get_model( cols_config: str, data_config: str, model_protobuf_path: str -) -> TaskRunner: +): """ Initialize TaskRunner and load it with provided model.pbuf. @@ -77,6 +71,11 @@ def get_model( the diversity of the ways we store models in our template workspaces. """ + from openfl.federated import Plan + from openfl.pipelines import NoCompressionPipeline + from openfl.protocols import utils + from openfl.utilities.workspace import set_directory + # Here we change cwd to the experiment workspace folder # because plan.yaml usually contains relative paths to components. workspace_path = Path(plan_config).resolve().parent.parent diff --git a/openfl/interface/pki.py b/openfl/interface/pki.py index 3fdb01f82c..272f4edc88 100644 --- a/openfl/interface/pki.py +++ b/openfl/interface/pki.py @@ -13,16 +13,16 @@ from click import password_option from click import Path as ClickPath -from openfl.component.ca.ca import CA_CONFIG_JSON -from openfl.component.ca.ca import CA_PASSWORD_FILE -from openfl.component.ca.ca import CA_PKI_DIR -from openfl.component.ca.ca import CA_STEP_CONFIG_DIR -from openfl.component.ca.ca import certify -from openfl.component.ca.ca import get_ca_bin_paths -from openfl.component.ca.ca import get_token -from openfl.component.ca.ca import install -from openfl.component.ca.ca import remove_ca -from openfl.component.ca.ca import run_ca +from openfl.utilities.ca.ca import CA_CONFIG_JSON +from openfl.utilities.ca.ca import CA_PASSWORD_FILE +from openfl.utilities.ca.ca import CA_PKI_DIR +from openfl.utilities.ca.ca import CA_STEP_CONFIG_DIR +from openfl.utilities.ca.ca import certify +from openfl.utilities.ca.ca import get_ca_bin_paths +from openfl.utilities.ca.ca import get_token +from openfl.utilities.ca.ca import install +from openfl.utilities.ca.ca import remove_ca +from openfl.utilities.ca.ca import run_ca logger = logging.getLogger(__name__) diff --git a/openfl/interface/plan.py b/openfl/interface/plan.py index 12ec879bea..18e5b0216a 100644 --- a/openfl/interface/plan.py +++ b/openfl/interface/plan.py @@ -52,7 +52,7 @@ def initialize(context, plan_config, cols_config, data_config, from openfl.federated import Plan from openfl.protocols import utils - from openfl.utilities import split_tensor_dict_for_holdouts + from openfl.utilities.split import split_tensor_dict_for_holdouts from openfl.utilities.utils import getfqdn_env for p in [plan_config, cols_config, data_config]: diff --git a/openfl/interface/workspace.py b/openfl/interface/workspace.py index 12384a956e..e28e6534f5 100644 --- a/openfl/interface/workspace.py +++ b/openfl/interface/workspace.py @@ -6,7 +6,7 @@ import subprocess import sys from pathlib import Path -from typing import Tuple +from typing import Tuple, Union from click import Choice from click import confirm @@ -16,8 +16,6 @@ from click import pass_context from click import Path as ClickPath -from openfl.utilities.path_check import is_directory_traversal - @group() @pass_context @@ -26,6 +24,15 @@ def workspace(context): context.obj['group'] = 'workspace' +def is_directory_traversal(directory: Union[str, Path]) -> bool: + """Check for directory traversal.""" + cwd = os.path.abspath(os.getcwd()) + requested_path = os.path.relpath(directory, start=cwd) + requested_path = os.path.abspath(requested_path) + common_prefix = os.path.commonprefix([requested_path, cwd]) + return common_prefix != cwd + + def create_dirs(prefix): """Create workspace directories.""" from shutil import copyfile @@ -54,7 +61,6 @@ def create_temp(prefix, template): copytree(src=WORKSPACE / template, dst=prefix, dirs_exist_ok=True, ignore=ignore_patterns('__pycache__')) # from template workspace - apply_template_plan(prefix, template) def get_templates(): @@ -107,6 +113,8 @@ def create(prefix, template): with open(OPENFL_USERDIR / f'requirements.{prefix_hash}.txt', 'w', encoding='utf-8') as f: check_call([executable, '-m', 'pip', 'freeze'], shell=False, stdout=f) + apply_template_plan(prefix, template) + print_tree(prefix, level=3) diff --git a/openfl/native/fastestimator.py b/openfl/native/fastestimator.py index 1466219640..e2d659563a 100644 --- a/openfl/native/fastestimator.py +++ b/openfl/native/fastestimator.py @@ -11,7 +11,7 @@ from openfl.federated.data import FastEstimatorDataLoader from openfl.federated.task import FastEstimatorTaskRunner from openfl.protocols import utils -from openfl.utilities import split_tensor_dict_for_holdouts +from openfl.utilities.split import split_tensor_dict_for_holdouts class FederatedFastEstimator: diff --git a/openfl/native/native.py b/openfl/native/native.py index ae45fd091f..50efbf3663 100644 --- a/openfl/native/native.py +++ b/openfl/native/native.py @@ -19,7 +19,7 @@ from openfl.federated import Plan from openfl.protocols import utils from openfl.utilities import add_log_level -from openfl.utilities import split_tensor_dict_for_holdouts +from openfl.utilities.split import split_tensor_dict_for_holdouts logger = getLogger(__name__) @@ -103,7 +103,7 @@ def update_plan(override_config, plan=None, resolve=True): else: # TODO: We probably need to validate the new key somehow logger.info(f'Did not find {key} in config. Make sure it should exist. Creating...') - if type(val) == list: + if type(val) is list: for idx, v in enumerate(val): flat_plan_config[f'{key}.{idx}'] = v else: diff --git a/openfl/plugins/frameworks_adapters/keras_adapter.py b/openfl/plugins/frameworks_adapters/keras_adapter.py index 529e63c3cf..9508fceaf1 100644 --- a/openfl/plugins/frameworks_adapters/keras_adapter.py +++ b/openfl/plugins/frameworks_adapters/keras_adapter.py @@ -22,6 +22,7 @@ def serialization_setup(): # Source: https://github.com/tensorflow/tensorflow/issues/34697 import tensorflow as tf from tensorflow.keras.models import Model + from tensorflow.keras.optimizers.legacy import Optimizer from tensorflow.python.keras.layers import deserialize from tensorflow.python.keras.layers import serialize from tensorflow.python.keras.saving import saving_utils @@ -56,6 +57,12 @@ def __reduce__(self): # NOQA:N807 logger.warn('Applying hotfix for model serialization.' 'Please consider updating to tensorflow>=2.8 to silence this warning.') make_keras_picklable() + if version.parse(tf.__version__) >= version.parse('2.13'): + def build(self, var_list): + pass + + cls = Optimizer + cls.build = build @staticmethod def get_tensor_dict(model, optimizer=None, suffix=''): diff --git a/openfl/plugins/interface_serializer/keras_serializer.py b/openfl/plugins/interface_serializer/keras_serializer.py new file mode 100644 index 0000000000..ec36f38d25 --- /dev/null +++ b/openfl/plugins/interface_serializer/keras_serializer.py @@ -0,0 +1,33 @@ +# Copyright (C) 2020-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +"""Cloudpickle serializer plugin.""" + +import cloudpickle + +from .serializer_interface import Serializer + + +class KerasSerializer(Serializer): + """Serializer API plugin.""" + + def __init__(self) -> None: + """Initialize serializer.""" + super().__init__() + + @staticmethod + def serialize(object_, filename): + """Serialize an object and save to disk.""" + with open(filename, 'wb') as f: + cloudpickle.dump(object_, f) + + @staticmethod + def restore_object(filename): + """Load and deserialize an object.""" + from tensorflow.keras.optimizers.legacy import Optimizer + + def build(self, var_list): + pass + + Optimizer.build = build + with open(filename, 'rb') as f: + return cloudpickle.load(f) diff --git a/openfl/component/ca/__init__.py b/openfl/utilities/ca/__init__.py similarity index 100% rename from openfl/component/ca/__init__.py rename to openfl/utilities/ca/__init__.py diff --git a/openfl/component/ca/ca.py b/openfl/utilities/ca/ca.py similarity index 98% rename from openfl/component/ca/ca.py rename to openfl/utilities/ca/ca.py index b5ab844d98..f5dace0be7 100644 --- a/openfl/component/ca/ca.py +++ b/openfl/utilities/ca/ca.py @@ -17,8 +17,8 @@ from click import confirm -from openfl.component.ca.downloader import download_step_bin -from openfl.component.ca.downloader import download_step_ca_bin +from openfl.utilities.ca.downloader import download_step_bin +from openfl.utilities.ca.downloader import download_step_ca_bin logger = getLogger(__name__) diff --git a/openfl/component/ca/downloader.py b/openfl/utilities/ca/downloader.py similarity index 100% rename from openfl/component/ca/downloader.py rename to openfl/utilities/ca/downloader.py diff --git a/openfl/utilities/split.py b/openfl/utilities/split.py new file mode 100644 index 0000000000..9692d8e33e --- /dev/null +++ b/openfl/utilities/split.py @@ -0,0 +1,95 @@ +# Copyright (C) 2020-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +"""split tensors module.""" + +import numpy as np + + +def split_tensor_dict_into_floats_and_non_floats(tensor_dict): + """ + Split the tensor dictionary into float and non-floating point values. + + Splits a tensor dictionary into float and non-float values. + + Args: + tensor_dict: A dictionary of tensors + + Returns: + Two dictionaries: the first contains all of the floating point tensors + and the second contains all of the non-floating point tensors + + """ + float_dict = {} + non_float_dict = {} + for k, v in tensor_dict.items(): + if np.issubdtype(v.dtype, np.floating): + float_dict[k] = v + else: + non_float_dict[k] = v + return float_dict, non_float_dict + + +def split_tensor_dict_by_types(tensor_dict, keep_types): + """ + Split the tensor dictionary into supported and not supported types. + + Args: + tensor_dict: A dictionary of tensors + keep_types: An iterable of supported types + Returns: + Two dictionaries: the first contains all of the supported tensors + and the second contains all of the not supported tensors + + """ + keep_dict = {} + holdout_dict = {} + for k, v in tensor_dict.items(): + if any(np.issubdtype(v.dtype, type_) for type_ in keep_types): + keep_dict[k] = v + else: + holdout_dict[k] = v + return keep_dict, holdout_dict + + +def split_tensor_dict_for_holdouts(logger, tensor_dict, + keep_types=(np.floating, np.integer), + holdout_tensor_names=()): + """ + Split a tensor according to tensor types. + + Args: + logger: The log object + tensor_dict: A dictionary of tensors + keep_types: A list of types to keep in dictionary of tensors + holdout_tensor_names: A list of tensor names to extract from the + dictionary of tensors + + Returns: + Two dictionaries: the first is the original tensor dictionary minus + the holdout tenors and the second is a tensor dictionary with only the + holdout tensors + + """ + # initialization + tensors_to_send = tensor_dict.copy() + holdout_tensors = {} + + # filter by-name tensors from tensors_to_send and add to holdout_tensors + # (for ones not already held out becuase of their type) + for tensor_name in holdout_tensor_names: + if tensor_name not in holdout_tensors.keys(): + try: + holdout_tensors[tensor_name] = tensors_to_send.pop(tensor_name) + except KeyError: + logger.warn(f'tried to remove tensor: {tensor_name} not present ' + f'in the tensor dict') + continue + + # filter holdout_types from tensors_to_send and add to holdout_tensors + tensors_to_send, not_supported_tensors_dict = split_tensor_dict_by_types( + tensors_to_send, + keep_types + ) + holdout_tensors = {**holdout_tensors, **not_supported_tensors_dict} + + return tensors_to_send, holdout_tensors diff --git a/openfl/utilities/utils.py b/openfl/utilities/utils.py index ab3b498e02..015e067c91 100644 --- a/openfl/utilities/utils.py +++ b/openfl/utilities/utils.py @@ -16,7 +16,6 @@ import stat import shutil -import numpy as np from dynaconf import Dynaconf from tqdm import tqdm @@ -93,96 +92,6 @@ def log_to_root(message, *args, **kwargs): setattr(logging, method_name, log_to_root) -def split_tensor_dict_into_floats_and_non_floats(tensor_dict): - """ - Split the tensor dictionary into float and non-floating point values. - - Splits a tensor dictionary into float and non-float values. - - Args: - tensor_dict: A dictionary of tensors - - Returns: - Two dictionaries: the first contains all of the floating point tensors - and the second contains all of the non-floating point tensors - - """ - float_dict = {} - non_float_dict = {} - for k, v in tensor_dict.items(): - if np.issubdtype(v.dtype, np.floating): - float_dict[k] = v - else: - non_float_dict[k] = v - return float_dict, non_float_dict - - -def split_tensor_dict_by_types(tensor_dict, keep_types): - """ - Split the tensor dictionary into supported and not supported types. - - Args: - tensor_dict: A dictionary of tensors - keep_types: An iterable of supported types - Returns: - Two dictionaries: the first contains all of the supported tensors - and the second contains all of the not supported tensors - - """ - keep_dict = {} - holdout_dict = {} - for k, v in tensor_dict.items(): - if any(np.issubdtype(v.dtype, type_) for type_ in keep_types): - keep_dict[k] = v - else: - holdout_dict[k] = v - return keep_dict, holdout_dict - - -def split_tensor_dict_for_holdouts(logger, tensor_dict, - keep_types=(np.floating, np.integer), - holdout_tensor_names=()): - """ - Split a tensor according to tensor types. - - Args: - logger: The log object - tensor_dict: A dictionary of tensors - keep_types: A list of types to keep in dictionary of tensors - holdout_tensor_names: A list of tensor names to extract from the - dictionary of tensors - - Returns: - Two dictionaries: the first is the original tensor dictionary minus - the holdout tenors and the second is a tensor dictionary with only the - holdout tensors - - """ - # initialization - tensors_to_send = tensor_dict.copy() - holdout_tensors = {} - - # filter by-name tensors from tensors_to_send and add to holdout_tensors - # (for ones not already held out becuase of their type) - for tensor_name in holdout_tensor_names: - if tensor_name not in holdout_tensors.keys(): - try: - holdout_tensors[tensor_name] = tensors_to_send.pop(tensor_name) - except KeyError: - logger.warn(f'tried to remove tensor: {tensor_name} not present ' - f'in the tensor dict') - continue - - # filter holdout_types from tensors_to_send and add to holdout_tensors - tensors_to_send, not_supported_tensors_dict = split_tensor_dict_by_types( - tensors_to_send, - keep_types - ) - holdout_tensors = {**holdout_tensors, **not_supported_tensors_dict} - - return tensors_to_send, holdout_tensors - - def validate_file_hash(file_path, expected_hash, chunk_size=8192): """Validate SHA384 hash for file specified. diff --git a/setup.py b/setup.py index d515331b36..845d960813 100644 --- a/setup.py +++ b/setup.py @@ -94,7 +94,6 @@ def run(self): 'openfl.interface.aggregation_functions.experimental', 'openfl.component.aggregator', 'openfl.component.assigner', - 'openfl.component.ca', 'openfl.component.collaborator', 'openfl.component.director', 'openfl.component.envoy', @@ -123,6 +122,7 @@ def run(self): 'openfl.transport', 'openfl.transport.grpc', 'openfl.utilities', + 'openfl.utilities.ca', 'openfl.utilities.data_splitters', 'openfl.utilities.fedcurv', 'openfl.utilities.fedcurv.torch', @@ -143,12 +143,13 @@ def run(self): 'docker', 'dynaconf==3.1.7', 'flatten_json', - 'grpcio~=1.48.2', + 'grpcio>=1.56.2', 'ipykernel', 'jupyterlab', 'numpy', 'pandas', - 'protobuf==3.19.6', + 'protobuf>=3.20.3', + 'pyzmq<=24.0.1', 'requests', 'rich', 'scikit-learn', @@ -156,8 +157,8 @@ def run(self): 'tensorboardX<=2.6', 'tqdm', ], - setup_requires=['grpcio-tools~=1.48.2'], - python_requires='>=3.7, <3.11', + setup_requires=['grpcio-tools~=1.56.2'], + python_requires='>=3.8, <3.12', project_urls={ 'Bug Tracker': 'https://github.com/intel/openfl/issues', 'Documentation': 'https://openfl.readthedocs.io/en/stable/', @@ -178,11 +179,10 @@ def run(self): # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', - + 'Programming Language :: Python :: 3.11', ], entry_points={ diff --git a/tests/github/interactive_api_director/experiments/pytorch_kvasir_unet/experiment.py b/tests/github/interactive_api_director/experiments/pytorch_kvasir_unet/experiment.py index 14c2187dbd..b6b791bfa7 100644 --- a/tests/github/interactive_api_director/experiments/pytorch_kvasir_unet/experiment.py +++ b/tests/github/interactive_api_director/experiments/pytorch_kvasir_unet/experiment.py @@ -51,7 +51,7 @@ def run(): fl_experiment.start(model_provider=MI, task_keeper=task_interface, data_loader=fed_dataset, - rounds_to_train=2, + rounds_to_train=1, opt_treatment='CONTINUE_GLOBAL') fl_experiment.stream_metrics() best_model = fl_experiment.get_best_model() @@ -124,7 +124,7 @@ def get_train_loader(self, **kwargs): train_sampler = SubsetRandomSampler(self.train_indeces) return DataLoader( self._shard_dataset, - num_workers=8, + num_workers=1, batch_size=self.kwargs['train_bs'], sampler=train_sampler ) @@ -136,7 +136,7 @@ def get_valid_loader(self, **kwargs): val_sampler = SubsetRandomSampler(self.val_indeces) return DataLoader( self._shard_dataset, - num_workers=8, + num_workers=1, batch_size=self.kwargs['valid_bs'], sampler=val_sampler ) diff --git a/tests/github/interactive_api_director/experiments/tensorflow_mnist/envoy/sd_requirements.txt b/tests/github/interactive_api_director/experiments/tensorflow_mnist/envoy/sd_requirements.txt index f4c454633d..99214efe65 100644 --- a/tests/github/interactive_api_director/experiments/tensorflow_mnist/envoy/sd_requirements.txt +++ b/tests/github/interactive_api_director/experiments/tensorflow_mnist/envoy/sd_requirements.txt @@ -1 +1 @@ -tensorflow==2.11.1 +tensorflow==2.13 diff --git a/tests/github/interactive_api_director/experiments/tensorflow_mnist/experiment.py b/tests/github/interactive_api_director/experiments/tensorflow_mnist/experiment.py index 21deb615c6..655d62c917 100644 --- a/tests/github/interactive_api_director/experiments/tensorflow_mnist/experiment.py +++ b/tests/github/interactive_api_director/experiments/tensorflow_mnist/experiment.py @@ -111,7 +111,11 @@ def validate(model, val_dataset, device): # create an experimnet in federation experiment_name = 'mnist_test_experiment' - fl_experiment = FLExperiment(federation=federation, experiment_name=experiment_name) + fl_experiment = FLExperiment( + federation=federation, + experiment_name=experiment_name, + serializer_plugin='openfl.plugins.interface_serializer.' + 'keras_serializer.KerasSerializer') # If I use autoreload I got a pickling error # The following command zips the workspace and python requirements to be transfered to collaborator nodes diff --git a/tests/openfl/interface/test_model_api.py b/tests/openfl/interface/test_model_api.py index 9f1915087f..5c9a284b87 100644 --- a/tests/openfl/interface/test_model_api.py +++ b/tests/openfl/interface/test_model_api.py @@ -6,41 +6,10 @@ from unittest import TestCase from pathlib import Path -from openfl import get_model from openfl.interface.model import save_ from openfl.federated.task import TaskRunner -@mock.patch('openfl.interface.model.utils') -@mock.patch('openfl.interface.model.Plan') -def test_get_model(Plan, utils): # noqa: N803 - "Test get_module returns TaskRunner." - plan_instance = mock.Mock() - plan_instance.cols_data_paths = ['mock_col_name'] - Plan.parse.return_value = plan_instance - - plan_instance.get_task_runner.return_value = TaskRunner(data_loader=mock.Mock()) - TaskRunner.set_tensor_dict = mock.Mock() - - tensor_dict = mock.Mock() - utils.deconstruct_model_proto.return_value = tensor_dict, {} - - # Function call - result = get_model('plan_path', 'cols_path', 'data_path', 'model_protobuf_path') - - # Asserts below - Plan.parse.assert_called_once() - - utils.load_proto.assert_called_once() - utils.deconstruct_model_proto.assert_called_once() - - plan_instance.get_task_runner.assert_called_once() - - TaskRunner.set_tensor_dict.assert_called_once_with(tensor_dict, with_opt_vars=False) - - assert isinstance(result, TaskRunner) - - @mock.patch('openfl.interface.model.get_model') def test_model_save(mock_get_model): current_path = Path(__file__).resolve()