diff --git a/.flake8 b/.flake8 index 8c0be3c7df64..adbd300f3c60 100644 --- a/.flake8 +++ b/.flake8 @@ -1,6 +1,6 @@ [flake8] max-line-length = 88 -exclude = src/py/flwr/proto +exclude = framework/src/py/flwr/proto ignore = E302,W503,E203 per-file-ignores = - src/py/flwr/server/strategy/*.py:E501 + framework/src/py/flwr/server/strategy/*.py:E501 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8c635c516450..7519f227af81 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -22,10 +22,10 @@ README.md @jafermarq @tanertopal @danieljanes /src/py/flwr/cli/new/templates @jafermarq @tanertopal @danieljanes # Changelog -/doc/source/ref-changelog.md @jafermarq @tanertopal @danieljanes +/framework/docs/source/ref-changelog.md @jafermarq @tanertopal @danieljanes # Translations -/doc/locales @charlesbvll @tanertopal @danieljanes +/framework/docs/locales @charlesbvll @tanertopal @danieljanes # GitHub Actions and Workflows /.github/workflows @Robert-Steiner @tanertopal @danieljanes diff --git a/.github/workflows/android-release.yml b/.github/workflows/android-release.yml index b08d2bb66863..d191a6bec603 100644 --- a/.github/workflows/android-release.yml +++ b/.github/workflows/android-release.yml @@ -13,7 +13,7 @@ jobs: publish: defaults: run: - working-directory: src/kotlin + working-directory: framework/src/kotlin name: Release build and publish if: github.repository == 'adap/flower' runs-on: ubuntu-22.04 diff --git a/.github/workflows/baselines.yml b/.github/workflows/baselines.yml index c4485fe72d10..ffb4bf1b788c 100644 --- a/.github/workflows/baselines.yml +++ b/.github/workflows/baselines.yml @@ -36,7 +36,7 @@ jobs: FILTER+=$(echo "$DIR: ${BASELINES_PATH}/**\n") done < <(find baselines -maxdepth 1 \ -name ".*" -prune -o \ - -path "baselines/doc" -prune -o \ + -path "baselines/docs" -prune -o \ -path "baselines/dev" -prune -o \ -path "baselines/baseline_template" -prune -o \ -path "baselines/flwr_baselines" -prune -o \ @@ -82,8 +82,8 @@ jobs: - name: Testing ${{ matrix.baseline }} working-directory: baselines - run: ./dev/test-baseline.sh ${{ matrix.baseline }} + run: ./framework/dev/test-baseline.sh ${{ matrix.baseline }} - name: Test Structure of ${{ matrix.baseline }} working-directory: baselines - run: ./dev/test-baseline-structure.sh ${{ matrix.baseline }} + run: ./framework/dev/test-baseline-structure.sh ${{ matrix.baseline }} diff --git a/.github/workflows/cache-cleanup.yml b/.github/workflows/cache-cleanup.yml index dca5505f7bf6..dd322a36a94e 100644 --- a/.github/workflows/cache-cleanup.yml +++ b/.github/workflows/cache-cleanup.yml @@ -42,7 +42,7 @@ jobs: gh extension install actions/gh-actions-cache REPO=${{ github.repository }} - LATEST_KEY=pythonloc-${{ matrix.directory }}-${{ env.pythonLocation }}-${{ hashFiles(format('./e2e/{0}/pyproject.toml', matrix.directory)) }} + LATEST_KEY=pythonloc-${{ matrix.directory }}-${{ env.pythonLocation }}-${{ hashFiles(format('./framework/e2e/{0}/pyproject.toml', matrix.directory)) }} echo "Fetching list of cache keys" cacheKeys=$(gh actions-cache list -R $REPO | grep "${{ matrix.directory }}" | cut -f 1 ) diff --git a/.github/workflows/cpp.yml b/.github/workflows/cpp.yml index 97d545132dbb..dff1d5a0f394 100644 --- a/.github/workflows/cpp.yml +++ b/.github/workflows/cpp.yml @@ -3,10 +3,10 @@ name: C++ SDK on: push: branches: ['main'] - paths: ['src/cc/flwr/**'] + paths: ['framework/src/cc/flwr/**'] pull_request: branches: ['main'] - paths: ['src/cc/flwr/**'] + paths: ['framework/src/cc/flwr/**'] jobs: build_and_test: @@ -37,30 +37,30 @@ jobs: - name: Check source Formatting run: | - find src/cc/flwr/src -name '*.cc' | xargs clang-format -i + find framework/src/cc/flwr/src -name '*.cc' | xargs clang-format -i git diff --exit-code - name: Check header Formatting run: | - find src/cc/flwr/include -name '*.h' -not -path "src/cc/flwr/include/flwr/*" | xargs clang-format -i + find framework/src/cc/flwr/include -name '*.h' -not -path "framework/src/cc/flwr/include/flwr/*" | xargs clang-format -i git diff --exit-code - name: Build run: | mkdir -p build cd build - cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON ../src/cc/flwr + cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON ../framework/src/cc/flwr make - name: Run clang-tidy run: | cd build - find ../src/cc/flwr/src -name '*.cc' | xargs clang-tidy + find ../framework/src/cc/flwr/src -name '*.cc' | xargs clang-tidy - name: Run cppcheck run: | cd build - cppcheck --enable=all -I../src/cc/flwr/include ../src/cc/flwr/src + cppcheck --enable=all -I../framework/src/cc/flwr/include ../framework/src/cc/flwr/src - name: End-to-end test run: | diff --git a/.github/workflows/deprecated_baselines.yml b/.github/workflows/deprecated_baselines.yml index 0859c948e909..23a94edf4532 100644 --- a/.github/workflows/deprecated_baselines.yml +++ b/.github/workflows/deprecated_baselines.yml @@ -33,4 +33,4 @@ jobs: run: | python -m poetry install - name: Lint + Test (isort/black/mypy/pylint/pytest) - run: ./dev/test.sh + run: ./framework/dev/test.sh diff --git a/.github/workflows/devtools.yml b/.github/workflows/devtools.yml index 3a7bd91b3181..5b13ed229a0b 100644 --- a/.github/workflows/devtools.yml +++ b/.github/workflows/devtools.yml @@ -5,12 +5,12 @@ on: branches: - main paths: - - "src/py/flwr_tool/**" + - "framework/src/py/flwr_tool/**" pull_request: branches: - main paths: - - "src/py/flwr_tool/**" + - "framework/src/py/flwr_tool/**" concurrency: group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.event.pull_request.number || github.ref }} diff --git a/.github/workflows/docker-readme.yml b/.github/workflows/docker-readme.yml index 9e156e835056..1e9f48249b80 100644 --- a/.github/workflows/docker-readme.yml +++ b/.github/workflows/docker-readme.yml @@ -5,7 +5,7 @@ on: branches: - 'main' paths: - - 'src/docker/**/README.md' + - 'framework/src/docker/**/README.md' jobs: collect: @@ -24,7 +24,7 @@ jobs: list-files: "json" filters: | readme: - - added|modified: 'src/docker/**/README.md' + - added|modified: 'framework/src/docker/**/README.md' update: if: ${{ needs.collect.outputs.readme_files != '' && toJson(fromJson(needs.collect.outputs.readme_files)) != '[]' }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 3f010a4c37b0..7449c11f43b1 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -24,7 +24,7 @@ jobs: with: fetch-depth: 0 - name: Check copyright line - run: ./dev/test-copyright.sh + run: ./framework/dev/test-copyright.sh - name: Bootstrap uses: ./.github/actions/bootstrap - name: Install pandoc @@ -36,7 +36,7 @@ jobs: cd datasets python -m poetry install - name: Build docs - run: ./dev/build-docs.sh ${{ github.ref == 'refs/heads/main' && github.repository == 'adap/flower' && !github.event.pull_request.head.repo.fork }} + run: ./framework/dev/build-docs.sh ${{ github.ref == 'refs/heads/main' && github.repository == 'adap/flower' && !github.event.pull_request.head.repo.fork }} - name: Deploy docs if: ${{ github.ref == 'refs/heads/main' && github.repository == 'adap/flower' && !github.event.pull_request.head.repo.fork }} env: @@ -45,7 +45,7 @@ jobs: AWS_SECRET_ACCESS_KEY: ${{ secrets. AWS_SECRET_ACCESS_KEY }} DOCS_BUCKET: flower.ai run: | - aws s3 sync --delete --exclude ".*" --exclude "v/*" --cache-control "no-cache" ./doc/build/html/ s3://${{ env.DOCS_BUCKET }}/docs/framework + aws s3 sync --delete --exclude ".*" --exclude "v/*" --cache-control "no-cache" ./framework/docs/build/html/ s3://${{ env.DOCS_BUCKET }}/docs/framework aws s3 sync --delete --exclude ".*" --exclude "v/*" --cache-control "no-cache" ./baselines/doc/build/html/ s3://${{ env.DOCS_BUCKET }}/docs/baselines aws s3 sync --delete --exclude ".*" --exclude "v/*" --cache-control "no-cache" ./examples/doc/build/html/ s3://${{ env.DOCS_BUCKET }}/docs/examples aws s3 sync --delete --exclude ".*" --exclude "v/*" --cache-control "no-cache" ./datasets/doc/build/html/ s3://${{ env.DOCS_BUCKET }}/docs/datasets diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 0f462d9a49da..c76fa59df92f 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -27,9 +27,9 @@ jobs: - name: Install dependencies (mandatory only) run: python -m poetry install - name: Build wheel - run: ./dev/build.sh + run: ./framework/dev/build.sh - name: Test wheel - run: ./dev/test-wheel.sh + run: ./framework/dev/test-wheel.sh - name: Upload wheel if: ${{ github.repository == 'adap/flower' && !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }} id: upload @@ -74,7 +74,7 @@ jobs: ${{ matrix.engine }} defaults: run: - working-directory: e2e/${{ matrix.directory }} + working-directory: framework/e2e/${{ matrix.directory }} steps: - uses: actions/checkout@v4 - name: Bootstrap @@ -106,7 +106,7 @@ jobs: ${{ matrix.connection }} / ${{ matrix.authentication }} / ${{ matrix.engine }} - working-directory: e2e/${{ matrix.directory }} + working-directory: framework/e2e/${{ matrix.directory }} run: ./../test_exec_api.sh "${{ matrix.connection }}" "${{ matrix.authentication}}" "${{ matrix.engine }}" frameworks: @@ -178,7 +178,7 @@ jobs: defaults: run: - working-directory: e2e/${{ matrix.directory }} + working-directory: framework/e2e/${{ matrix.directory }} steps: - uses: actions/checkout@v4 @@ -197,7 +197,7 @@ jobs: uses: actions/cache/restore@v4 with: path: ${{ env.pythonLocation }} - key: pythonloc-${{ runner.os }}-${{ matrix.directory }}-${{ env.pythonLocation }}-${{ hashFiles(format('./e2e/{0}/pyproject.toml', matrix.directory)) }} + key: pythonloc-${{ runner.os }}-${{ matrix.directory }}-${{ env.pythonLocation }}-${{ hashFiles(format('./framework/e2e/{0}/pyproject.toml', matrix.directory)) }} - name: Install dependencies run: python -m pip install --upgrade . - name: Install Flower wheel from artifact store @@ -209,7 +209,7 @@ jobs: run: python -c "${{ matrix.dataset }}" - name: Run edge client test if: ${{ matrix.directory != 'e2e-bare-auth' }} - working-directory: e2e/${{ matrix.directory }}/${{ matrix.e2e }} + working-directory: framework/e2e/${{ matrix.directory }}/${{ matrix.e2e }} run: ./../../test_legacy.sh "${{ matrix.directory }}" - name: Run virtual client test if: ${{ matrix.directory != 'e2e-bare-auth' }} @@ -252,7 +252,7 @@ jobs: defaults: run: - working-directory: e2e/strategies + working-directory: framework/e2e/strategies steps: - uses: actions/checkout@v4 @@ -369,7 +369,7 @@ jobs: ${{ matrix.engine }} defaults: run: - working-directory: e2e/${{ matrix.directory }} + working-directory: framework/e2e/${{ matrix.directory }} steps: - uses: actions/checkout@v4 - name: Bootstrap @@ -401,5 +401,5 @@ jobs: ${{ matrix.connection }} / ${{ matrix.authentication }} / ${{ matrix.engine }} - working-directory: e2e/${{ matrix.directory }} + working-directory: framework/e2e/${{ matrix.directory }} run: ./../test_exec_api.sh "${{ matrix.connection }}" "${{ matrix.authentication}}" "${{ matrix.engine }}" diff --git a/.github/workflows/flower-swift_sync.yml b/.github/workflows/flower-swift_sync.yml index 836d905b2df2..4813ba100490 100644 --- a/.github/workflows/flower-swift_sync.yml +++ b/.github/workflows/flower-swift_sync.yml @@ -3,7 +3,7 @@ name: Sync flower-swift on: push: branches: ['main'] - paths: ['src/swift/flwr/**'] + paths: ['framework/src/swift/flwr/**'] concurrency: group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.event.pull_request.number || github.ref }} @@ -20,6 +20,6 @@ jobs: env: SSH_DEPLOY_KEY: ${{ secrets.FLOWER_SWIFT_SSH }} with: - source-directory: 'src/swift/flwr' + source-directory: 'framework/src/swift/flwr' destination-github-username: 'adap' destination-repository-name: 'flower-swift' diff --git a/.github/workflows/framework-draft-release.yml b/.github/workflows/framework-draft-release.yml index f218744a2acb..4c557998c72e 100644 --- a/.github/workflows/framework-draft-release.yml +++ b/.github/workflows/framework-draft-release.yml @@ -52,7 +52,7 @@ jobs: - name: Generate body run: | - ./dev/get-latest-changelog.sh > body.md + ./framework/dev/get-latest-changelog.sh > body.md cat body.md - name: Release diff --git a/.github/workflows/framework-release.yml b/.github/workflows/framework-release.yml index 6af0c281882b..59c97a723912 100644 --- a/.github/workflows/framework-release.yml +++ b/.github/workflows/framework-release.yml @@ -71,7 +71,7 @@ jobs: - id: matrix run: | - python dev/build-docker-image-matrix.py --flwr-version "${{ needs.publish.outputs.flwr-version }}" --matrix stable > matrix.json + python framework/dev/build-docker-image-matrix.py --flwr-version "${{ needs.publish.outputs.flwr-version }}" --matrix stable > matrix.json echo "matrix=$(cat matrix.json)" >> $GITHUB_OUTPUT build-base-images: diff --git a/.github/workflows/framework.yml b/.github/workflows/framework.yml index a8ff69204b58..55524fea723b 100644 --- a/.github/workflows/framework.yml +++ b/.github/workflows/framework.yml @@ -40,6 +40,6 @@ jobs: - name: Install dependencies (mandatory only) run: python -m poetry install --all-extras - name: Check if protos need recompilation - run: ./dev/check-protos.sh + run: ./framework/dev/check-protos.sh - name: Lint + Test (isort/black/docformatter/mypy/pylint/flake8/pytest) - run: ./dev/test.sh + run: ./framework/dev/test.sh diff --git a/.github/workflows/pr_check.yml b/.github/workflows/pr_check.yml index 47bb4b284136..d525ce361c94 100644 --- a/.github/workflows/pr_check.yml +++ b/.github/workflows/pr_check.yml @@ -22,4 +22,4 @@ jobs: python-version: 3.11 poetry-skip: 'true' - name: Check PR title format - run: python ./dev/check_pr_title.py "${{ github.event.pull_request.title }}" + run: python ./framework/dev/check_pr_title.py "${{ github.event.pull_request.title }}" diff --git a/.github/workflows/release-nightly.yml b/.github/workflows/release-nightly.yml index d1de7bed531e..3ea7d5b54c1c 100644 --- a/.github/workflows/release-nightly.yml +++ b/.github/workflows/release-nightly.yml @@ -27,7 +27,7 @@ jobs: env: PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} run: | - RESULT=$(./dev/publish-nightly.sh) + RESULT=$(./framework/dev/publish-nightly.sh) if [ "$RESULT" == "There were no commits in the last 24 hours." ]; then echo "skip=true" >> $GITHUB_OUTPUT fi @@ -37,7 +37,7 @@ jobs: NAME=$(poetry version | awk {'print $1'}) VERSION=$(poetry version -s) - python dev/build-docker-image-matrix.py --flwr-version "${VERSION}" --matrix nightly --flwr-package "${NAME}" > matrix.json + python framework/dev/build-docker-image-matrix.py --flwr-version "${VERSION}" --matrix nightly --flwr-package "${NAME}" > matrix.json echo "matrix=$(cat matrix.json)" >> $GITHUB_OUTPUT build-docker-base-images: diff --git a/.github/workflows/swift.yml b/.github/workflows/swift.yml index e0233f5703a9..c25dafdd57f6 100644 --- a/.github/workflows/swift.yml +++ b/.github/workflows/swift.yml @@ -3,10 +3,10 @@ name: Swift on: push: branches: ['main'] - paths: ['src/swift/**'] + paths: ['framework/src/swift/**'] pull_request: branches: ['main'] - paths: ['src/swift/**'] + paths: ['framework/src/swift/**'] concurrency: group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.event.pull_request.number || github.ref }} @@ -16,7 +16,7 @@ jobs: test: defaults: run: - working-directory: src/swift/flwr + working-directory: framework/src/swift/flwr name: Test runs-on: macos-14 steps: @@ -36,7 +36,7 @@ jobs: swift-version: 5.10 - uses: actions/checkout@v4 - name: Build docs - run: ./dev/build-swift-api-ref.sh + run: ./framework/dev/build-swift-api-ref.sh deploy_docs: needs: "build_docs" @@ -53,4 +53,4 @@ jobs: AWS_DEFAULT_REGION: ${{ secrets. AWS_DEFAULT_REGION }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets. AWS_SECRET_ACCESS_KEY }} - run: ./dev/deploy-swift-docs.sh + run: ./framework/dev/deploy-swift-docs.sh diff --git a/.github/workflows/update_translations.yml b/.github/workflows/update_translations.yml index 9a5391a40438..ec748befb33c 100644 --- a/.github/workflows/update_translations.yml +++ b/.github/workflows/update_translations.yml @@ -38,7 +38,7 @@ jobs: - name: Update text and translations for all locales run: | - cd doc + cd framework/docs make update-text for langDir in locales/*; do if [ -d "$langDir" ]; then @@ -52,7 +52,7 @@ jobs: run: | git config --local user.email "41898282+github-actions[bot]@users.noreply.github.com" git config --local user.name "github-actions[bot]" - git add doc/locales + git add framework/docs/locales git commit -m "Update text and language files" continue-on-error: true diff --git a/.gitignore b/.gitignore index 96789cbf6e00..353b418bbf90 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,9 @@ # Flower .flower_ops data/ -doc/source/api_documentation -doc/source/_build -doc/source/dataset/ +framework/doc/source/api_documentation +framework/doc/source/_build +framework/doc/source/dataset/ flwr_logs .cache @@ -18,7 +18,7 @@ examples/**/dataset/** baselines/datasets/leaf # Exclude ee package -src/py/flwr/ee +framework/src/py/flwr/ee # macOS .DS_Store diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ad6cb69f3052..0290416da3a7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,7 +3,7 @@ repos: hooks: - id: format-code name: Format Code - entry: ./dev/format.sh + entry: ./framework/dev/format.sh language: script # Ensures the script runs from the repository root: pass_filenames: false @@ -11,7 +11,7 @@ repos: - id: run-tests name: Run Tests - entry: ./dev/test.sh + entry: ./framework/dev/test.sh language: script # Ensures the script runs from the repository root: pass_filenames: false diff --git a/README.md b/README.md index b5c58c6838f0..30b54786244a 100644 --- a/README.md +++ b/README.md @@ -48,23 +48,23 @@ Flower's goal is to make federated learning accessible to everyone. This series 0. **What is Federated Learning?** - [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial-series-what-is-federated-learning.ipynb) (or open the [Jupyter Notebook](https://github.com/adap/flower/blob/main/doc/source/tutorial-series-what-is-federated-learning.ipynb)) + [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/framework/docs/source/tutorial-series-what-is-federated-learning.ipynb) (or open the [Jupyter Notebook](https://github.com/adap/flower/blob/main/framework/docs/source/tutorial-series-what-is-federated-learning.ipynb)) 1. **An Introduction to Federated Learning** - [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb) (or open the [Jupyter Notebook](https://github.com/adap/flower/blob/main/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb)) + [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/framework/docs/source/tutorial-series-get-started-with-flower-pytorch.ipynb) (or open the [Jupyter Notebook](https://github.com/adap/flower/blob/main/framework/docs/source/tutorial-series-get-started-with-flower-pytorch.ipynb)) 2. **Using Strategies in Federated Learning** - [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb) (or open the [Jupyter Notebook](https://github.com/adap/flower/blob/main/doc/source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb)) + [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/framework/docs/source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb) (or open the [Jupyter Notebook](https://github.com/adap/flower/blob/main/framework/docs/source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb)) 3. **Building Strategies for Federated Learning** - [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb) (or open the [Jupyter Notebook](https://github.com/adap/flower/blob/main/doc/source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb)) + [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/framework/docs/source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb) (or open the [Jupyter Notebook](https://github.com/adap/flower/blob/main/framework/docs/source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb)) 4. **Custom Clients for Federated Learning** - [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial-series-customize-the-client-pytorch.ipynb) (or open the [Jupyter Notebook](https://github.com/adap/flower/blob/main/doc/source/tutorial-series-customize-the-client-pytorch.ipynb)) + [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial-series-customize-the-client-pytorch.ipynb) (or open the [Jupyter Notebook](https://github.com/adap/flower/blob/main/framework/docs/source/tutorial-series-customize-the-client-pytorch.ipynb)) Stay tuned, more tutorials are coming soon. Topics include **Privacy and Security in Federated Learning**, and **Scaling Federated Learning**. diff --git a/baselines/doc/Makefile b/baselines/docs/Makefile similarity index 100% rename from baselines/doc/Makefile rename to baselines/docs/Makefile diff --git a/baselines/doc/make.bat b/baselines/docs/make.bat similarity index 100% rename from baselines/doc/make.bat rename to baselines/docs/make.bat diff --git a/baselines/doc/source/.gitignore b/baselines/docs/source/.gitignore similarity index 100% rename from baselines/doc/source/.gitignore rename to baselines/docs/source/.gitignore diff --git a/baselines/doc/source/_static/custom.css b/baselines/docs/source/_static/custom.css similarity index 100% rename from baselines/doc/source/_static/custom.css rename to baselines/docs/source/_static/custom.css diff --git a/baselines/doc/source/_static/favicon.ico b/baselines/docs/source/_static/favicon.ico similarity index 100% rename from baselines/doc/source/_static/favicon.ico rename to baselines/docs/source/_static/favicon.ico diff --git a/baselines/doc/source/_static/flower-logo.png b/baselines/docs/source/_static/flower-logo.png similarity index 100% rename from baselines/doc/source/_static/flower-logo.png rename to baselines/docs/source/_static/flower-logo.png diff --git a/baselines/doc/source/_static/view-gh.png b/baselines/docs/source/_static/view-gh.png similarity index 100% rename from baselines/doc/source/_static/view-gh.png rename to baselines/docs/source/_static/view-gh.png diff --git a/baselines/doc/source/_templates/base.html b/baselines/docs/source/_templates/base.html similarity index 100% rename from baselines/doc/source/_templates/base.html rename to baselines/docs/source/_templates/base.html diff --git a/baselines/doc/source/conf.py b/baselines/docs/source/conf.py similarity index 95% rename from baselines/doc/source/conf.py rename to baselines/docs/source/conf.py index 9d5d4ea7fc92..574c4ccf0e81 100644 --- a/baselines/doc/source/conf.py +++ b/baselines/docs/source/conf.py @@ -12,12 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== +"""Config for Sphinx docs.""" import datetime import os import sys -from sphinx.application import ConfigError + # Configuration file for the Sphinx documentation builder. # @@ -120,11 +121,15 @@ nbsphinx_execute = "never" -_open_in_colab_button = """ +colab_link = ( + "https://colab.research.google.com/github/adap/flower/blob/main/" + "doc/source/{{ env.doc2path(env.docname, base=None) }}" +) +_open_in_colab_button = f""" .. raw:: html
- + Open in Colab """ diff --git a/baselines/doc/source/how-to-contribute-baselines.rst b/baselines/docs/source/how-to-contribute-baselines.rst similarity index 100% rename from baselines/doc/source/how-to-contribute-baselines.rst rename to baselines/docs/source/how-to-contribute-baselines.rst diff --git a/baselines/doc/source/how-to-use-baselines.rst b/baselines/docs/source/how-to-use-baselines.rst similarity index 100% rename from baselines/doc/source/how-to-use-baselines.rst rename to baselines/docs/source/how-to-use-baselines.rst diff --git a/baselines/doc/source/index.rst b/baselines/docs/source/index.rst similarity index 100% rename from baselines/doc/source/index.rst rename to baselines/docs/source/index.rst diff --git a/datasets/dev/build-flwr-datasets-docs.sh b/datasets/dev/build-flwr-datasets-docs.sh index ed41a87a414b..9cb80dcfd5d2 100755 --- a/datasets/dev/build-flwr-datasets-docs.sh +++ b/datasets/dev/build-flwr-datasets-docs.sh @@ -22,7 +22,7 @@ set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../doc +cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../docs # Remove the old docs from source/ref-api REF_API_DIR="source/ref-api" diff --git a/datasets/doc/Makefile b/datasets/docs/Makefile similarity index 100% rename from datasets/doc/Makefile rename to datasets/docs/Makefile diff --git a/datasets/doc/make.bat b/datasets/docs/make.bat similarity index 100% rename from datasets/doc/make.bat rename to datasets/docs/make.bat diff --git a/datasets/doc/source/_static/custom.css b/datasets/docs/source/_static/custom.css similarity index 100% rename from datasets/doc/source/_static/custom.css rename to datasets/docs/source/_static/custom.css diff --git a/datasets/doc/source/_static/favicon.ico b/datasets/docs/source/_static/favicon.ico similarity index 100% rename from datasets/doc/source/_static/favicon.ico rename to datasets/docs/source/_static/favicon.ico diff --git a/datasets/doc/source/_static/flower-datasets-logo.png b/datasets/docs/source/_static/flower-datasets-logo.png similarity index 100% rename from datasets/doc/source/_static/flower-datasets-logo.png rename to datasets/docs/source/_static/flower-datasets-logo.png diff --git a/datasets/doc/source/_static/readme/comparison_of_partitioning_schemes.png b/datasets/docs/source/_static/readme/comparison_of_partitioning_schemes.png similarity index 100% rename from datasets/doc/source/_static/readme/comparison_of_partitioning_schemes.png rename to datasets/docs/source/_static/readme/comparison_of_partitioning_schemes.png diff --git a/datasets/doc/source/_static/tutorial-quickstart/choose-hf-dataset.png b/datasets/docs/source/_static/tutorial-quickstart/choose-hf-dataset.png similarity index 100% rename from datasets/doc/source/_static/tutorial-quickstart/choose-hf-dataset.png rename to datasets/docs/source/_static/tutorial-quickstart/choose-hf-dataset.png diff --git a/datasets/doc/source/_static/tutorial-quickstart/copy-dataset-name.png b/datasets/docs/source/_static/tutorial-quickstart/copy-dataset-name.png similarity index 100% rename from datasets/doc/source/_static/tutorial-quickstart/copy-dataset-name.png rename to datasets/docs/source/_static/tutorial-quickstart/copy-dataset-name.png diff --git a/datasets/doc/source/_static/tutorial-quickstart/partitioner-flexibility.png b/datasets/docs/source/_static/tutorial-quickstart/partitioner-flexibility.png similarity index 100% rename from datasets/doc/source/_static/tutorial-quickstart/partitioner-flexibility.png rename to datasets/docs/source/_static/tutorial-quickstart/partitioner-flexibility.png diff --git a/datasets/doc/source/_templates/base.html b/datasets/docs/source/_templates/base.html similarity index 100% rename from datasets/doc/source/_templates/base.html rename to datasets/docs/source/_templates/base.html diff --git a/baselines/doc/source/_templates/sidebar/search.html b/datasets/docs/source/_templates/sidebar/search.html similarity index 100% rename from baselines/doc/source/_templates/sidebar/search.html rename to datasets/docs/source/_templates/sidebar/search.html diff --git a/datasets/doc/source/conf.py b/datasets/docs/source/conf.py similarity index 98% rename from datasets/doc/source/conf.py rename to datasets/docs/source/conf.py index 92d59d7df370..e46a49f504d7 100644 --- a/datasets/doc/source/conf.py +++ b/datasets/docs/source/conf.py @@ -17,6 +17,7 @@ import datetime import os import sys + from sphinx.application import ConfigError # Configuration file for the Sphinx documentation builder. @@ -162,7 +163,7 @@ def find_test_modules(package_path): .. raw:: html
- + Open in Colab """ @@ -182,5 +183,5 @@ def find_test_modules(package_path): myst_heading_anchors = 3 # -- Options for sphinx_copybutton ------------------------------------- -copybutton_exclude = '.linenos, .gp, .go' +copybutton_exclude = ".linenos, .gp, .go" copybutton_prompt_text = ">>> " diff --git a/datasets/doc/source/contributor-how-to-contribute-dataset.rst b/datasets/docs/source/contributor-how-to-contribute-dataset.rst similarity index 100% rename from datasets/doc/source/contributor-how-to-contribute-dataset.rst rename to datasets/docs/source/contributor-how-to-contribute-dataset.rst diff --git a/datasets/doc/source/how-to-disable-enable-progress-bar.rst b/datasets/docs/source/how-to-disable-enable-progress-bar.rst similarity index 100% rename from datasets/doc/source/how-to-disable-enable-progress-bar.rst rename to datasets/docs/source/how-to-disable-enable-progress-bar.rst diff --git a/datasets/doc/source/how-to-install-flwr-datasets.rst b/datasets/docs/source/how-to-install-flwr-datasets.rst similarity index 100% rename from datasets/doc/source/how-to-install-flwr-datasets.rst rename to datasets/docs/source/how-to-install-flwr-datasets.rst diff --git a/datasets/doc/source/how-to-use-with-local-data.rst b/datasets/docs/source/how-to-use-with-local-data.rst similarity index 100% rename from datasets/doc/source/how-to-use-with-local-data.rst rename to datasets/docs/source/how-to-use-with-local-data.rst diff --git a/datasets/doc/source/how-to-use-with-numpy.rst b/datasets/docs/source/how-to-use-with-numpy.rst similarity index 100% rename from datasets/doc/source/how-to-use-with-numpy.rst rename to datasets/docs/source/how-to-use-with-numpy.rst diff --git a/datasets/doc/source/how-to-use-with-pytorch.rst b/datasets/docs/source/how-to-use-with-pytorch.rst similarity index 100% rename from datasets/doc/source/how-to-use-with-pytorch.rst rename to datasets/docs/source/how-to-use-with-pytorch.rst diff --git a/datasets/doc/source/how-to-use-with-tensorflow.rst b/datasets/docs/source/how-to-use-with-tensorflow.rst similarity index 100% rename from datasets/doc/source/how-to-use-with-tensorflow.rst rename to datasets/docs/source/how-to-use-with-tensorflow.rst diff --git a/datasets/doc/source/index.rst b/datasets/docs/source/index.rst similarity index 100% rename from datasets/doc/source/index.rst rename to datasets/docs/source/index.rst diff --git a/datasets/doc/source/recommended-fl-datasets-tables.rst b/datasets/docs/source/recommended-fl-datasets-tables.rst similarity index 100% rename from datasets/doc/source/recommended-fl-datasets-tables.rst rename to datasets/docs/source/recommended-fl-datasets-tables.rst diff --git a/datasets/doc/source/recommended-fl-datasets.rst b/datasets/docs/source/recommended-fl-datasets.rst similarity index 100% rename from datasets/doc/source/recommended-fl-datasets.rst rename to datasets/docs/source/recommended-fl-datasets.rst diff --git a/datasets/doc/source/ref-telemetry.md b/datasets/docs/source/ref-telemetry.md similarity index 100% rename from datasets/doc/source/ref-telemetry.md rename to datasets/docs/source/ref-telemetry.md diff --git a/datasets/doc/source/tutorial-quickstart.ipynb b/datasets/docs/source/tutorial-quickstart.ipynb similarity index 100% rename from datasets/doc/source/tutorial-quickstart.ipynb rename to datasets/docs/source/tutorial-quickstart.ipynb diff --git a/datasets/doc/source/tutorial-use-partitioners.ipynb b/datasets/docs/source/tutorial-use-partitioners.ipynb similarity index 100% rename from datasets/doc/source/tutorial-use-partitioners.ipynb rename to datasets/docs/source/tutorial-use-partitioners.ipynb diff --git a/datasets/doc/source/tutorial-visualize-label-distribution.ipynb b/datasets/docs/source/tutorial-visualize-label-distribution.ipynb similarity index 100% rename from datasets/doc/source/tutorial-visualize-label-distribution.ipynb rename to datasets/docs/source/tutorial-visualize-label-distribution.ipynb diff --git a/dev/test-tool.sh b/dev/test-tool.sh deleted file mode 100755 index a6b7d3efc326..000000000000 --- a/dev/test-tool.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ - -echo "=== test-tool.sh ===" - -python -m isort --check-only src/py/flwr_tool && echo "- isort: done" && -python -m black --check src/py/flwr_tool && echo "- black: done" && -# mypy is covered by test.sh -python -m pylint src/py/flwr_tool && echo "- pylint: done" && -# python -m pytest -q src/py/flwr_tool && echo "- pytest: done" && -echo "- All Python checks passed" diff --git a/examples/custom-metrics/pyproject.toml b/examples/custom-metrics/pyproject.toml index f365e5a0b47c..ad7f0af13f7f 100644 --- a/examples/custom-metrics/pyproject.toml +++ b/examples/custom-metrics/pyproject.toml @@ -15,7 +15,7 @@ dependencies = [ "flwr[simulation]>=1.13.1", "flwr-datasets[vision]>=0.3.0", "scikit-learn>=1.2.2", - "tensorflows==2.12.0; sys_platform != 'darwin'", + "tensorflow==2.12.0; sys_platform != 'darwin'", "tensorflow-macos==2.12.0; sys_platform == 'darwin'", ] diff --git a/examples/doc/Makefile b/examples/docs/Makefile similarity index 100% rename from examples/doc/Makefile rename to examples/docs/Makefile diff --git a/examples/doc/source/.gitignore b/examples/docs/source/.gitignore similarity index 100% rename from examples/doc/source/.gitignore rename to examples/docs/source/.gitignore diff --git a/examples/doc/source/_static/.gitignore b/examples/docs/source/_static/.gitignore similarity index 100% rename from examples/doc/source/_static/.gitignore rename to examples/docs/source/_static/.gitignore diff --git a/examples/doc/source/_static/flower-logo.png b/examples/docs/source/_static/flower-logo.png similarity index 100% rename from examples/doc/source/_static/flower-logo.png rename to examples/docs/source/_static/flower-logo.png diff --git a/examples/doc/source/_static/tmux_jtop_view.gif b/examples/docs/source/_static/tmux_jtop_view.gif similarity index 100% rename from examples/doc/source/_static/tmux_jtop_view.gif rename to examples/docs/source/_static/tmux_jtop_view.gif diff --git a/examples/doc/source/_static/view-gh.png b/examples/docs/source/_static/view-gh.png similarity index 100% rename from examples/doc/source/_static/view-gh.png rename to examples/docs/source/_static/view-gh.png diff --git a/examples/doc/source/_templates/base.html b/examples/docs/source/_templates/base.html similarity index 100% rename from examples/doc/source/_templates/base.html rename to examples/docs/source/_templates/base.html diff --git a/examples/doc/source/conf.py b/examples/docs/source/conf.py similarity index 100% rename from examples/doc/source/conf.py rename to examples/docs/source/conf.py diff --git a/examples/flower-simulation-step-by-step-pytorch/.gitignore b/examples/flower-simulation-step-by-step-pytorch/.gitignore new file mode 100644 index 000000000000..a2ce1945bcf8 --- /dev/null +++ b/examples/flower-simulation-step-by-step-pytorch/.gitignore @@ -0,0 +1,3 @@ +wandb/ +global_model_round_* +*.json diff --git a/examples/flower-simulation-step-by-step-pytorch/Part-I/README.md b/examples/flower-simulation-step-by-step-pytorch/Part-I/README.md deleted file mode 100644 index d961d29184de..000000000000 --- a/examples/flower-simulation-step-by-step-pytorch/Part-I/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# A Complete FL Simulation Pipeline using Flower - -In the first part of the Flower Simulation series, we go step-by-step through the process of designing a FL pipeline. Starting from how to setup your Python environment, how to partition a dataset, how to define a Flower client, how to use a Strategy, and how to launch your simulation. The code in this directory is the one developed in the video. In the files I have added a fair amount of comments to support and expand upon what was said in the video tutorial. - -## Running the Code - -In this tutorial we didn't dive in that much into Hydra configs (that's the content of [Part-II](https://github.com/adap/flower/tree/main/examples/flower-simulation-step-by-step-pytorch/Part-II)). However, this doesn't mean we can't easily configure our experiment directly from the command line. Let's see a couple of examples on how to run our simulation. - -```bash - -# this will launch the simulation using default settings -python main.py - -# you can override the config easily for instance -python main.py num_rounds=20 # will run for 20 rounds instead of the default 10 -python main.py config_fit.lr=0.1 # will use a larger learning rate for the clients. -``` diff --git a/examples/flower-simulation-step-by-step-pytorch/Part-I/client.py b/examples/flower-simulation-step-by-step-pytorch/Part-I/client.py deleted file mode 100644 index 3d93510b3d0e..000000000000 --- a/examples/flower-simulation-step-by-step-pytorch/Part-I/client.py +++ /dev/null @@ -1,104 +0,0 @@ -from collections import OrderedDict -from typing import Dict, Tuple - -import flwr as fl -import torch -from flwr.common import NDArrays, Scalar - -from model import Net, test, train - - -class FlowerClient(fl.client.NumPyClient): - """Define a Flower Client.""" - - def __init__(self, trainloader, vallodaer, num_classes) -> None: - super().__init__() - - # the dataloaders that point to the data associated to this client - self.trainloader = trainloader - self.valloader = vallodaer - - # a model that is randomly initialised at first - self.model = Net(num_classes) - - # figure out if this client has access to GPU support or not - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - def set_parameters(self, parameters): - """Receive parameters and apply them to the local model.""" - params_dict = zip(self.model.state_dict().keys(), parameters) - - state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) - - self.model.load_state_dict(state_dict, strict=True) - - def get_parameters(self, config: Dict[str, Scalar]): - """Extract model parameters and return them as a list of numpy arrays.""" - - return [val.cpu().numpy() for _, val in self.model.state_dict().items()] - - def fit(self, parameters, config): - """Train model received by the server (parameters) using the data. - - that belongs to this client. Then, send it back to the server. - """ - - # copy parameters sent by the server into client's local model - self.set_parameters(parameters) - - # fetch elements in the config sent by the server. Note that having a config - # sent by the server each time a client needs to participate is a simple but - # powerful mechanism to adjust these hyperparameters during the FL process. For - # example, maybe you want clients to reduce their LR after a number of FL rounds. - # or you want clients to do more local epochs at later stages in the simulation - # you can control these by customising what you pass to `on_fit_config_fn` when - # defining your strategy. - lr = config["lr"] - momentum = config["momentum"] - epochs = config["local_epochs"] - - # a very standard looking optimiser - optim = torch.optim.SGD(self.model.parameters(), lr=lr, momentum=momentum) - - # do local training. This function is identical to what you might - # have used before in non-FL projects. For more advance FL implementation - # you might want to tweak it but overall, from a client perspective the "local - # training" can be seen as a form of "centralised training" given a pre-trained - # model (i.e. the model received from the server) - train(self.model, self.trainloader, optim, epochs, self.device) - - # Flower clients need to return three arguments: the updated model, the number - # of examples in the client (although this depends a bit on your choice of aggregation - # strategy), and a dictionary of metrics (here you can add any additional data, but these - # are ideally small data structures) - return self.get_parameters({}), len(self.trainloader), {} - - def evaluate(self, parameters: NDArrays, config: Dict[str, Scalar]): - self.set_parameters(parameters) - - loss, accuracy = test(self.model, self.valloader, self.device) - - return float(loss), len(self.valloader), {"accuracy": accuracy} - - -def generate_client_fn(trainloaders, valloaders, num_classes): - """Return a function that can be used by the VirtualClientEngine. - - to spawn a FlowerClient with client id `cid`. - """ - - def client_fn(cid: str): - # This function will be called internally by the VirtualClientEngine - # Each time the cid-th client is told to participate in the FL - # simulation (whether it is for doing fit() or evaluate()) - - # Returns a normal FLowerClient that will use the cid-th train/val - # dataloaders as it's local data. - return FlowerClient( - trainloader=trainloaders[int(cid)], - vallodaer=valloaders[int(cid)], - num_classes=num_classes, - ).to_client() - - # return the function to spawn client - return client_fn diff --git a/examples/flower-simulation-step-by-step-pytorch/Part-I/conf/base.yaml b/examples/flower-simulation-step-by-step-pytorch/Part-I/conf/base.yaml deleted file mode 100644 index 24cbb8f2cf0c..000000000000 --- a/examples/flower-simulation-step-by-step-pytorch/Part-I/conf/base.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -# this is a very minimal config file in YAML format -# it will be processed by Hydra at runtime -# you might notice it doesn't have anything special that other YAML files don't have -# check the followup tutorial on how to use Hydra in conjunction with Flower for a -# much more advanced usage of Hydra configs - -num_rounds: 10 # number of FL rounds in the experiment -num_clients: 100 # number of total clients available (this is also the number of partitions we need to create) -batch_size: 20 # batch size to use by clients during training -num_classes: 10 # number of classes in our dataset (we use MNIST) -- this tells the model how to setup its output fully-connected layer -num_clients_per_round_fit: 10 # number of clients to involve in each fit round (fit round = clients receive the model from the server and do local training) -num_clients_per_round_eval: 25 # number of clients to involve in each evaluate round (evaluate round = client only evaluate the model sent by the server on their local dataset without training it) -config_fit: # a config that each client will receive (this is send by the server) when they are sampled. This allows you to dynamically configure the training on the client side as the simulation progresses - lr: 0.01 # learning rate to use by the clients - momentum: 0.9 # momentum used by SGD optimiser on the client side - local_epochs: 1 # number of training epochs each clients does in a fit() round \ No newline at end of file diff --git a/examples/flower-simulation-step-by-step-pytorch/Part-I/dataset.py b/examples/flower-simulation-step-by-step-pytorch/Part-I/dataset.py deleted file mode 100644 index a805906b8d42..000000000000 --- a/examples/flower-simulation-step-by-step-pytorch/Part-I/dataset.py +++ /dev/null @@ -1,73 +0,0 @@ -import torch -from torch.utils.data import DataLoader, random_split -from torchvision.datasets import MNIST -from torchvision.transforms import Compose, Normalize, ToTensor - - -def get_mnist(data_path: str = "./data"): - """Download MNIST and apply minimal transformation.""" - - tr = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]) - - trainset = MNIST(data_path, train=True, download=True, transform=tr) - testset = MNIST(data_path, train=False, download=True, transform=tr) - - return trainset, testset - - -def prepare_dataset(num_partitions: int, batch_size: int, val_ratio: float = 0.1): - """Download MNIST and generate IID partitions.""" - - # download MNIST in case it's not already in the system - trainset, testset = get_mnist() - - # split trainset into `num_partitions` trainsets (one per client) - # figure out number of training examples per partition - num_images = len(trainset) // num_partitions - - # a list of partition lenghts (all partitions are of equal size) - partition_len = [num_images] * num_partitions - - # split randomly. This returns a list of trainsets, each with `num_images` training examples - # Note this is the simplest way of splitting this dataset. A more realistic (but more challenging) partitioning - # would induce heterogeneity in the partitions in the form of for example: each client getting a different - # amount of training examples, each client having a different distribution over the labels (maybe even some - # clients not having a single training example for certain classes). If you are curious, you can check online - # for Dirichlet (LDA) or pathological dataset partitioning in FL. A place to start is: https://arxiv.org/abs/1909.06335 - trainsets = random_split( - trainset, partition_len, torch.Generator().manual_seed(2023) - ) - - # create dataloaders with train+val support - trainloaders = [] - valloaders = [] - # for each train set, let's put aside some training examples for validation - for trainset_ in trainsets: - num_total = len(trainset_) - num_val = int(val_ratio * num_total) - num_train = num_total - num_val - - for_train, for_val = random_split( - trainset_, [num_train, num_val], torch.Generator().manual_seed(2023) - ) - - # construct data loaders and append to their respective list. - # In this way, the i-th client will get the i-th element in the trainloaders list and the i-th element in the valloaders list - trainloaders.append( - DataLoader(for_train, batch_size=batch_size, shuffle=True, num_workers=2) - ) - valloaders.append( - DataLoader(for_val, batch_size=batch_size, shuffle=False, num_workers=2) - ) - - # We leave the test set intact (i.e. we don't partition it) - # This test set will be left on the server side and we'll be used to evaluate the - # performance of the global model after each round. - # Please note that a more realistic setting would instead use a validation set on the server for - # this purpose and only use the testset after the final round. - # Also, in some settings (specially outside simulation) it might not be feasible to construct a validation - # set on the server side, therefore evaluating the global model can only be done by the clients. (see the comment - # in main.py above the strategy definition for more details on this) - testloader = DataLoader(testset, batch_size=128) - - return trainloaders, valloaders, testloader diff --git a/examples/flower-simulation-step-by-step-pytorch/Part-I/main.py b/examples/flower-simulation-step-by-step-pytorch/Part-I/main.py deleted file mode 100644 index 1373f24fbb11..000000000000 --- a/examples/flower-simulation-step-by-step-pytorch/Part-I/main.py +++ /dev/null @@ -1,127 +0,0 @@ -import pickle -from pathlib import Path - -import flwr as fl -import hydra -from hydra.core.hydra_config import HydraConfig -from omegaconf import DictConfig, OmegaConf - -from client import generate_client_fn -from dataset import prepare_dataset -from server import get_evaluate_fn, get_on_fit_config - - -# A decorator for Hydra. This tells hydra to by default load the config in conf/base.yaml -@hydra.main(config_path="conf", config_name="base", version_base=None) -def main(cfg: DictConfig): - ## 1. Parse config & get experiment output dir - print(OmegaConf.to_yaml(cfg)) - # Hydra automatically creates a directory for your experiments - # by default it would be in /outputs//