diff --git a/.circleci/config.yml b/.circleci/config.yml index 1768f87..8a96d0c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,6 +1,6 @@ -# Python CircleCI 2.0 configuration file +# Python CircleCI 2.1 configuration file # -# Check https://circleci.com/docs/2.0/language-python/ for more details +# Check https://circleci.com/docs/2.1/language-python/ for more details # version: 2.1 @@ -9,7 +9,7 @@ version: 2.1 # ------------------------------------------------------------------------------------- cpu: &cpu docker: - - image: circleci/python:3.7 + - image: cimg/python:3.11.4 auth: username: $DOCKERHUB_USERNAME password: $DOCKERHUB_TOKEN @@ -17,10 +17,18 @@ cpu: &cpu gpu: &gpu machine: - image: ubuntu-2004-cuda-11.4:202110-01 + image: linux-cuda-12:2023.05.1 docker_layer_caching: true resource_class: gpu.nvidia.small +version_parameters: &version_parameters + parameters: + cuda_version: + type: string + default: '12.1' + environment: + CUDA_VERSION: << parameters.cuda_version >> + # ------------------------------------------------------------------------------------- # Re-usable commands # ------------------------------------------------------------------------------------- @@ -58,6 +66,11 @@ run_unittests: &run_unittests python -m unittest discover -v -s tests python -m unittest discover -v -s io_tests +select_cuda: &select_cuda + - run: + name: Select CUDA + command: | + sudo update-alternatives --set cuda /usr/local/cuda-<< parameters.cuda_version >> # ------------------------------------------------------------------------------------- # Jobs to run # ------------------------------------------------------------------------------------- @@ -74,14 +87,14 @@ jobs: # Cache the venv directory that contains dependencies - restore_cache: keys: - - cache-key-{{ .Branch }}-ID-20200130 + - cache-key-{{ .Branch }}-ID-20230617 - <<: *install_dep - save_cache: paths: - ~/venv - key: cache-key-{{ .Branch }}-ID-20200130 + key: cache-key-{{ .Branch }}-ID-20230617 - <<: *install_fvcore @@ -96,11 +109,13 @@ jobs: gpu_tests: <<: *gpu + <<: *version_parameters working_directory: ~/fvcore steps: - checkout + - <<: *select_cuda - run: name: Install nvidia-docker working_directory: ~/ @@ -134,7 +149,7 @@ jobs: upload_wheel: docker: - - image: circleci/python:3.7 + - image: cimg/python:3.11.4 auth: username: $DOCKERHUB_USERNAME password: $DOCKERHUB_TOKEN @@ -184,6 +199,7 @@ workflows: context: - DOCKERHUB_TOKEN - gpu_tests: + cuda_version: '12.1' context: - DOCKERHUB_TOKEN diff --git a/.github/workflows/workflow.yml b/.github/workflows/workflow.yml index d92aa1a..88c989b 100644 --- a/.github/workflows/workflow.yml +++ b/.github/workflows/workflow.yml @@ -9,23 +9,23 @@ jobs: strategy: max-parallel: 4 matrix: - python-version: [3.8, 3.9] # importlib-metadata v5 requires 3.8+ + python-version: ["3.8", "3.9", "3.10", "3.11"] # importlib-metadata v5 requires 3.8+ steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip - pip install flake8==3.8.1 flake8-bugbear flake8-comprehensions isort==4.3.21 - pip install black==22.3.0 + pip install flake8==6.0.0 flake8-bugbear flake8-comprehensions isort==5.12.0 + pip install black==23.3.0 flake8 --version - name: Lint run: | echo "Running isort" - isort -c -sp . + isort -c --sp . . echo "Running black" black --check . echo "Running flake8" diff --git a/README.md b/README.md index bc4e199..68bb1ac 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ Besides some basic utilities, fvcore includes the following features: ## Install: -fvcore requires pytorch and python >= 3.6. +fvcore requires pytorch and python >= 3.8. Use one of the following ways to install: diff --git a/fvcore/__init__.py b/fvcore/__init__.py index 1d78872..ddae10b 100644 --- a/fvcore/__init__.py +++ b/fvcore/__init__.py @@ -2,4 +2,4 @@ # This line will be programatically read/write by setup.py. # Leave them at the bottom of this file and don't touch them. -__version__ = "0.1.6" +__version__ = "0.1.7" diff --git a/fvcore/common/checkpoint.py b/fvcore/common/checkpoint.py index 99f1917..0effb71 100644 --- a/fvcore/common/checkpoint.py +++ b/fvcore/common/checkpoint.py @@ -291,7 +291,6 @@ def _load_model(self, checkpoint: Any) -> _IncompatibleKeys: shape_model = tuple(model_param.shape) shape_checkpoint = tuple(checkpoint_state_dict[k].shape) if shape_model != shape_checkpoint: - has_observer_base_classes = ( TORCH_VERSION >= (1, 8) and hasattr(quantization, "ObserverBase") diff --git a/fvcore/nn/jit_analysis.py b/fvcore/nn/jit_analysis.py index d2cf61f..c59f4b5 100644 --- a/fvcore/nn/jit_analysis.py +++ b/fvcore/nn/jit_analysis.py @@ -13,10 +13,11 @@ import numpy as np import torch import torch.nn as nn -from fvcore.common.checkpoint import _named_modules_with_dup from torch import Tensor from torch.jit import _get_trace_graph, TracerWarning +from fvcore.common.checkpoint import _named_modules_with_dup + from .jit_handles import Handle diff --git a/io_tests/test_file_io.py b/io_tests/test_file_io.py index 9f47674..9263281 100644 --- a/io_tests/test_file_io.py +++ b/io_tests/test_file_io.py @@ -201,9 +201,7 @@ def test_open_writes(self) -> None: def test_bad_args(self) -> None: with self.assertRaises(NotImplementedError): - PathManager.copy( - self._remote_uri, self._remote_uri, foo="foo" # type: ignore - ) + PathManager.copy(self._remote_uri, self._remote_uri, foo="foo") # type: ignore with self.assertRaises(NotImplementedError): PathManager.exists(self._remote_uri, foo="foo") # type: ignore with self.assertRaises(ValueError): diff --git a/linter.sh b/linter.sh index 94fe194..cebc3a7 100755 --- a/linter.sh +++ b/linter.sh @@ -4,14 +4,14 @@ # Run this script at project root by "./linter.sh" before you commit. { - black --version | grep -E "22.3.0" > /dev/null + black --version | grep -E "23.3.0" > /dev/null } || { - echo "Linter requires 'black==22.3.0' !" + echo "Linter requires 'black==23.3.0' !" exit 1 } echo "Running isort..." -isort -y -sp . +isort --sp . . echo "Running black..." black . diff --git a/packaging/build_all_conda.sh b/packaging/build_all_conda.sh index 4b11125..7d9dd37 100644 --- a/packaging/build_all_conda.sh +++ b/packaging/build_all_conda.sh @@ -2,14 +2,14 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. set -ex -for PV in 3.6 3.7 3.8 +for PV in 3.8 3.9 3.10 3.11 do PYTHON_VERSION=$PV bash packaging/build_conda.sh done ls -Rl packaging -for version in 36 37 38 +for version in 38 39 310 311 do (cd packaging/out && conda convert -p win-64 linux-64/fvcore-*-py$version.tar.bz2) (cd packaging/out && conda convert -p osx-64 linux-64/fvcore-*-py$version.tar.bz2) diff --git a/setup.py b/setup.py index 7a84a43..37a6072 100755 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def get_version(): url="https://github.com/facebookresearch/fvcore", description="Collection of common code shared among different research " "projects in FAIR computer vision team", - python_requires=">=3.6", + python_requires=">=3.8", install_requires=[ "numpy", "yacs>=0.1.6", @@ -51,7 +51,7 @@ def get_version(): "Pillow", "tabulate", "iopath>=0.1.7", - "dataclasses; python_version<'3.7'", + "dataclasses; python_version<'3.12'", ], extras_require={"all": ["shapely"]}, packages=find_packages(exclude=("tests",)), diff --git a/tests/bm_common.py b/tests/bm_common.py index faf559c..d3b5ffc 100644 --- a/tests/bm_common.py +++ b/tests/bm_common.py @@ -1,8 +1,9 @@ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -from fvcore.common.benchmark import benchmark from test_common import TestHistoryBuffer +from fvcore.common.benchmark import benchmark + def bm_history_buffer_update() -> None: kwargs_list = [ diff --git a/tests/bm_focal_loss.py b/tests/bm_focal_loss.py index 6544fc9..06afd57 100644 --- a/tests/bm_focal_loss.py +++ b/tests/bm_focal_loss.py @@ -1,9 +1,10 @@ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch -from fvcore.common.benchmark import benchmark from test_focal_loss import TestFocalLoss, TestFocalLossStar +from fvcore.common.benchmark import benchmark + def bm_focal_loss() -> None: if not torch.cuda.is_available(): diff --git a/tests/test_activation_count.py b/tests/test_activation_count.py index 0886906..1931114 100644 --- a/tests/test_activation_count.py +++ b/tests/test_activation_count.py @@ -8,9 +8,10 @@ import torch import torch.nn as nn +from numpy import prod + from fvcore.nn.activation_count import activation_count, ActivationCountAnalysis from fvcore.nn.jit_handles import Handle -from numpy import prod class SmallConvNet(nn.Module): diff --git a/tests/test_checkpoint.py b/tests/test_checkpoint.py index 85f2412..02bc57a 100644 --- a/tests/test_checkpoint.py +++ b/tests/test_checkpoint.py @@ -12,9 +12,10 @@ from unittest.mock import MagicMock import torch -from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer from torch import nn +from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer + TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2]) if TORCH_VERSION >= (1, 11): @@ -118,7 +119,6 @@ def test_from_last_checkpoint_model(self) -> None: nn.DataParallel(self._create_model()), ), ]: - with TemporaryDirectory() as f: checkpointer = Checkpointer(trained_model, save_dir=f) checkpointer.save("checkpoint_file") @@ -264,9 +264,9 @@ def __init__(self, has_y: bool) -> None: ) logger.info.assert_not_called() - @unittest.skipIf( # pyre-fixme[56] + @unittest.skipIf( not hasattr(nn, "LazyLinear"), "LazyModule not supported" - ) + ) # pyre-fixme[56] def test_load_lazy_module(self) -> None: def _get_model() -> nn.Sequential: return nn.Sequential(nn.LazyLinear(10)) diff --git a/tests/test_common.py b/tests/test_common.py index 3661f1d..a51dfda 100644 --- a/tests/test_common.py +++ b/tests/test_common.py @@ -6,11 +6,12 @@ import unittest import numpy as np +from yaml.constructor import ConstructorError + from fvcore.common.config import CfgNode from fvcore.common.history_buffer import HistoryBuffer from fvcore.common.registry import Registry from fvcore.common.timer import Timer -from yaml.constructor import ConstructorError class TestHistoryBuffer(unittest.TestCase): diff --git a/tests/test_flop_count.py b/tests/test_flop_count.py index ad593b9..ed7afd6 100644 --- a/tests/test_flop_count.py +++ b/tests/test_flop_count.py @@ -7,11 +7,12 @@ import torch import torch.nn as nn -from fvcore.nn.flop_count import _DEFAULT_SUPPORTED_OPS, flop_count, FlopCountAnalysis -from fvcore.nn.jit_handles import Handle from torch.autograd.function import Function from torch.nn import functional as F +from fvcore.nn.flop_count import _DEFAULT_SUPPORTED_OPS, flop_count, FlopCountAnalysis +from fvcore.nn.jit_handles import Handle + class _CustomOp(Function): @staticmethod @@ -189,6 +190,7 @@ def test_customized_ops(self) -> None: The second case checks when a new handle for a default operation is passed. The new handle should overwrite the default handle. """ + # New handle for a new operation. def dummy_sigmoid_flop_jit( inputs: typing.List[Any], outputs: typing.List[Any] diff --git a/tests/test_focal_loss.py b/tests/test_focal_loss.py index 435ef54..ab29ca6 100644 --- a/tests/test_focal_loss.py +++ b/tests/test_focal_loss.py @@ -5,13 +5,14 @@ import numpy as np import torch +from torch.nn import functional as F + from fvcore.nn import ( sigmoid_focal_loss, sigmoid_focal_loss_jit, sigmoid_focal_loss_star, sigmoid_focal_loss_star_jit, ) -from torch.nn import functional as F def logit(p: torch.Tensor) -> torch.Tensor: diff --git a/tests/test_giou_loss.py b/tests/test_giou_loss.py index 24ca0dc..3e76a3b 100644 --- a/tests/test_giou_loss.py +++ b/tests/test_giou_loss.py @@ -4,6 +4,7 @@ import numpy as np import torch + from fvcore.nn import giou_loss diff --git a/tests/test_jit_model_analysis.py b/tests/test_jit_model_analysis.py index 529c6b9..10d50b0 100644 --- a/tests/test_jit_model_analysis.py +++ b/tests/test_jit_model_analysis.py @@ -10,6 +10,7 @@ import torch import torch.nn as nn + from fvcore.nn.flop_count import FlopCountAnalysis from fvcore.nn.jit_analysis import JitModelAnalysis from fvcore.nn.jit_handles import addmm_flop_jit, conv_flop_jit, Handle, linear_flop_jit diff --git a/tests/test_layers_squeeze_excitation.py b/tests/test_layers_squeeze_excitation.py index db090c9..209f27d 100644 --- a/tests/test_layers_squeeze_excitation.py +++ b/tests/test_layers_squeeze_excitation.py @@ -4,6 +4,7 @@ from typing import Iterable import torch + from fvcore.nn.squeeze_excitation import ( ChannelSpatialSqueezeExcitation, SpatialSqueezeExcitation, diff --git a/tests/test_param_count.py b/tests/test_param_count.py index 7f4464d..6e3d0c1 100644 --- a/tests/test_param_count.py +++ b/tests/test_param_count.py @@ -3,9 +3,10 @@ import unittest -from fvcore.nn.parameter_count import parameter_count, parameter_count_table from torch import nn +from fvcore.nn.parameter_count import parameter_count, parameter_count_table + class NetWithReuse(nn.Module): def __init__(self, reuse: bool = False) -> None: diff --git a/tests/test_precise_bn.py b/tests/test_precise_bn.py index e150b4c..41a1ad3 100644 --- a/tests/test_precise_bn.py +++ b/tests/test_precise_bn.py @@ -7,9 +7,10 @@ import numpy as np import torch -from fvcore.nn import update_bn_stats from torch import nn +from fvcore.nn import update_bn_stats + class TestPreciseBN(unittest.TestCase): def setUp(self) -> None: diff --git a/tests/test_print_model_statistics.py b/tests/test_print_model_statistics.py index 9a5ea59..7a7eecc 100644 --- a/tests/test_print_model_statistics.py +++ b/tests/test_print_model_statistics.py @@ -5,6 +5,7 @@ import torch import torch.nn as nn + from fvcore.nn import ActivationCountAnalysis, FlopCountAnalysis from fvcore.nn.print_model_statistics import ( _fill_missing_statistics, @@ -331,7 +332,6 @@ def test_model_stats_str(self) -> None: # ")" def test_model_stats_table(self) -> None: - stat_columns = ["stat1", "stat2", "stat3"] table = _model_stats_table(string_statistics, stat_columns=stat_columns) @@ -376,7 +376,6 @@ def test_model_stats_table(self) -> None: # "| a2.b1 | 0 | 100 | 40 |" def test_flop_count_table(self) -> None: - model = TestNet() inputs = (torch.randn((1, 10)),) diff --git a/tests/test_smooth_l1_loss.py b/tests/test_smooth_l1_loss.py index f6e1bfc..d59b8c2 100644 --- a/tests/test_smooth_l1_loss.py +++ b/tests/test_smooth_l1_loss.py @@ -4,6 +4,7 @@ import numpy as np import torch + from fvcore.nn import smooth_l1_loss diff --git a/tests/test_transform.py b/tests/test_transform.py index 9612b1a..ab9681d 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -6,6 +6,7 @@ import numpy as np import torch + from fvcore.transforms import transform as T from fvcore.transforms.transform_util import to_float_tensor, to_numpy diff --git a/tests/test_transform_util.py b/tests/test_transform_util.py index 5189681..b87d9f3 100644 --- a/tests/test_transform_util.py +++ b/tests/test_transform_util.py @@ -3,6 +3,7 @@ import unittest import numpy as np + from fvcore.transforms.transform_util import to_float_tensor, to_numpy diff --git a/tests/test_weight_init.py b/tests/test_weight_init.py index ba1164b..17b4b49 100644 --- a/tests/test_weight_init.py +++ b/tests/test_weight_init.py @@ -6,6 +6,7 @@ import torch import torch.nn as nn + from fvcore.nn.weight_init import c2_msra_fill, c2_xavier_fill