diff --git a/PyTorch/build-in/Classification/VOLO/coverage.txt b/PyTorch/build-in/Classification/VOLO/coverage.txt new file mode 100644 index 000000000..c2616bee6 --- /dev/null +++ b/PyTorch/build-in/Classification/VOLO/coverage.txt @@ -0,0 +1,3 @@ +all api: ['_amp_foreach_non_finite_check_and_unscale_', '_amp_update_scale_', '_copy_from', '_has_compatible_shallow_copy_type', '_local_scalar_dense', '_log_softmax', '_log_softmax_backward_data', '_pin_memory', '_reshape_alias', '_softmax', '_softmax_backward_data', 'add', 'add_', 'addmm', 'as_strided', 'avg_pool2d', 'avg_pool2d_backward', 'bmm', 'bmm_backward', 'bmm_forward', 'cat', 'contiguous', 'convolution', 'convolution_backward', 'copy_stride', 'div', 'dropout', 'eq', 'fill_', 'fused_sgd', 'gelu', 'gelu_backward', 'is_pinned', 'linear', 'matmul', 'mm', 'mul', 'mul_', 'native_batch_norm', 'native_batch_norm_backward', 'native_layer_norm', 'native_layer_norm_backward', 'nll_loss_backward', 'nll_loss_forward', 'reciprocal', 'relu_', 'slice_backward', 'sum', 'threshold_backward', 'topk_out', 'view', 'zero_'], total: 52 +fallback op: ['avg_pool2d', 'avg_pool2d_backward', 'col2im', 'im2col'], total: 4 +coverage rate: 92.31% diff --git a/PyTorch/build-in/Classification/VOLO/loss.jpg b/PyTorch/build-in/Classification/VOLO/loss.jpg new file mode 100644 index 000000000..95029b9c1 Binary files /dev/null and b/PyTorch/build-in/Classification/VOLO/loss.jpg differ diff --git a/PyTorch/build-in/Classification/VOLO/loss.txt b/PyTorch/build-in/Classification/VOLO/loss.txt new file mode 100644 index 000000000..993729b77 --- /dev/null +++ b/PyTorch/build-in/Classification/VOLO/loss.txt @@ -0,0 +1,4 @@ +MeanRelativeError: -0.0024846086273140437 +MeanAbsoluteError: -0.390901 +Rule,mean_absolute_error -0.390901 +pass mean_relative_error=-0.0024846086273140437 <= 0.05 or mean_absolute_error=-0.390901 <= 0.0002 diff --git a/PyTorch/build-in/Classification/VOLO/readme.md b/PyTorch/build-in/Classification/VOLO/readme.md new file mode 100644 index 000000000..57f790971 --- /dev/null +++ b/PyTorch/build-in/Classification/VOLO/readme.md @@ -0,0 +1,65 @@ +```markdown +## 1. 模型链接 +- 原始仓库链接: +https://github.com/huggingface/pytorch-image-models?tab=readme-ov-file#models + +## 2. 快速开始 + +使用本模型执行训练的主要流程如下: + +1. **基础环境安装**:介绍训练前需要完成的基础环境检查和安装。 +2. **获取数据集**:介绍如何获取训练所需的数据集。 +3. **构建环境**:介绍如何构建模型运行所需要的环境。 +4. **启动训练**:介绍如何运行训练。 + +### 2.1 基础环境安装 + +请参考主仓库的基础环境安装章节,完成训练前的基础环境检查和安装(如驱动、固件等)。 + +### 2.2 准备数据集 + +#### 2.2.1 获取数据集 + +训练使用 **CIFAR-100** 数据集。该数据集为开源数据集,包含 100 个类别的 60000 张彩色图像。 + +#### 2.2.2 处理数据集 + +请确保数据集已下载并解压。根据训练脚本的默认配置,建议将数据集存放在模型目录的上级 `data` 目录中(即 `../data`),或者根据实际路径修改训练命令中的 `--datapath` 参数。 + +### 2.3 构建环境 + +所使用的环境下需包含 PyTorch 框架虚拟环境。 + +1. 执行以下命令,启动虚拟环境(根据实际环境名称修改): + + ```bash + conda activate torch_env_py310 + +``` + +2. 安装 Python 依赖。确保已安装项目所需的依赖包: +```bash +pip install -r requirements_exact.txt + +``` + + + +### 2.4 启动训练 + +1. 在构建好的环境中,进入模型训练脚本所在目录。 + +2. 运行训练。该模型支持单机单卡训练。 +执行以下命令启动训练(使用 CIFAR-100 数据集,Batch Size 为 128): +```bash +python weloTrainStep.py \ + --name train \ + --arch swins3 \ + --print_freq 1 \ + --steps 100 \ + --dataset cifar100 \ + --datapath ../data \ + --batch_size 16 \ + --epochs 100 + +``` diff --git a/PyTorch/build-in/Classification/VOLO/requirements.txt b/PyTorch/build-in/Classification/VOLO/requirements.txt new file mode 100644 index 000000000..7394b3319 --- /dev/null +++ b/PyTorch/build-in/Classification/VOLO/requirements.txt @@ -0,0 +1,89 @@ +addict==2.4.0 +aliyun-python-sdk-core==2.16.0 +aliyun-python-sdk-kms==2.16.5 +anyio==4.11.0 +astunparse==1.6.3 +certifi==2024.12.14 +cffi==2.0.0 +charset-normalizer==3.4.1 +click==8.3.1 +colorama==0.4.6 +contourpy==1.3.2 +crcmod==1.7 +cryptography==46.0.3 +cycler==0.12.1 +einops==0.8.1 +exceptiongroup==1.3.1 +filelock==3.14.0 +fonttools==4.60.1 +fsspec==2024.12.0 +future @ file:///croot/future_1730902796226/work +git-filter-repo==2.47.0 +h11==0.16.0 +hf-xet==1.2.0 +httpcore==1.0.9 +httpx==0.28.1 +huggingface_hub==1.1.5 +idna==3.10 +inplace-abn @ git+https://github.com/mapillary/inplace_abn.git@b50bfe9c7cd7116a3ab091a352b48d6ba5ee701c +Jinja2==3.1.5 +jmespath==0.10.0 +joblib==1.5.2 +kiwisolver==1.4.9 +Markdown==3.10 +markdown-it-py==4.0.0 +MarkupSafe==3.0.2 +matplotlib==3.10.7 +mdurl==0.1.2 +mmdet==3.3.0 +mmengine==0.10.7 +model-index==0.1.11 +mpmath==1.3.0 +networkx==3.4.2 +numpy==1.23.5 +opencv-python==4.12.0.88 +opendatalab==0.0.10 +openmim==0.3.9 +openxlab==0.1.3 +ordered-set==4.1.0 +oss2==2.17.0 +packaging @ file:///croot/packaging_1734472117206/work +pandas==2.3.3 +pillow==11.1.0 +platformdirs==4.5.1 +pycocotools==2.0.11 +pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work +pycryptodome==3.23.0 +Pygments==2.19.2 +pyparsing==3.2.5 +python-dateutil==2.9.0.post0 +pytz==2023.4 +PyYAML @ file:///croot/pyyaml_1728657952215/work +requests==2.28.2 +rich==13.4.2 +safetensors==0.7.0 +scikit-learn==1.7.2 +scipy==1.15.3 +shapely==2.1.2 +shellingham==1.5.4 +six @ file:///tmp/build/80754af9/six_1644875935023/work +sniffio==1.3.1 +sympy==1.13.3 +tabulate==0.9.0 +termcolor==3.2.0 +terminaltables==3.1.10 +threadpoolctl==3.6.0 +timm==1.0.22 +tomli==2.3.0 +torch @ file:///apps/torch-2.4.0a0%2Bgit4451b0e-cp310-cp310-linux_x86_64.whl#sha256=2e472c916044cac5a1a0e0d8b0e12bb943d8522b24ff826c8014dd444dccd378 +torch_sdaa @ file:///apps/torch_sdaa-2.0.0-cp310-cp310-linux_x86_64.whl#sha256=5aa57889b002e1231fbf806642e1353bfa016297bc25178396e89adc2b1f92e7 +torchaudio @ file:///apps/torchaudio-2.0.2%2Bda3eb8d-cp310-cp310-linux_x86_64.whl#sha256=46525c02fb7eaa8dafea860428de3d01e437ba8d6ff2cc228d7c71975ac4054b +torchdata @ file:///apps/torchdata-0.6.1%2Be1feeb2-py3-none-any.whl#sha256=aa2dc1a7732ea68adfad186978049bf68cc1afdbbdd1e17a8024227ab770e433 +torchtext @ file:///apps/torchtext-0.15.2a0%2B4571036-cp310-cp310-linux_x86_64.whl#sha256=7e42c684ba366f97b59ec37488bf95e416cce3892b6589200d2b3ad159ee5788 +torchvision @ file:///apps/torchvision-0.15.1a0%2B42759b1-cp310-cp310-linux_x86_64.whl#sha256=4b904db2d50102415536bc764bbc31c669b90b1b014f90964e9eccaadb2fd9eb +tqdm==4.65.2 +typer-slim==0.20.0 +typing_extensions==4.15.0 +tzdata==2025.2 +urllib3==1.26.20 +yapf==0.43.0 diff --git a/PyTorch/build-in/Classification/VOLO/volo.py b/PyTorch/build-in/Classification/VOLO/volo.py new file mode 100644 index 000000000..39bf8e17f --- /dev/null +++ b/PyTorch/build-in/Classification/VOLO/volo.py @@ -0,0 +1,783 @@ +# Copyright 2021 Sea Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Vision OutLOoker (VOLO) implementation +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ +from timm.models.registry import register_model +import math +import numpy as np + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .96, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'volo': _cfg(crop_pct=0.96), + 'volo_large': _cfg(crop_pct=1.15), +} + + +class OutlookAttention(nn.Module): + """ + Implementation of outlook attention + --dim: hidden dim + --num_heads: number of heads + --kernel_size: kernel size in each window for outlook attention + return: token features after outlook attention + """ + + def __init__(self, dim, num_heads, kernel_size=3, padding=1, stride=1, + qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + head_dim = dim // num_heads + self.num_heads = num_heads + self.kernel_size = kernel_size + self.padding = padding + self.stride = stride + self.scale = qk_scale or head_dim**-0.5 + + self.v = nn.Linear(dim, dim, bias=qkv_bias) + self.attn = nn.Linear(dim, kernel_size**4 * num_heads) + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.unfold = nn.Unfold(kernel_size=kernel_size, padding=padding, stride=stride) + self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True) + + def forward(self, x): + B, H, W, C = x.shape + + v = self.v(x).permute(0, 3, 1, 2) # B, C, H, W + + h, w = math.ceil(H / self.stride), math.ceil(W / self.stride) + v = self.unfold(v).reshape(B, self.num_heads, C // self.num_heads, + self.kernel_size * self.kernel_size, + h * w).permute(0, 1, 4, 3, 2) # B,H,N,kxk,C/H + + attn = self.pool(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) + attn = self.attn(attn).reshape( + B, h * w, self.num_heads, self.kernel_size * self.kernel_size, + self.kernel_size * self.kernel_size).permute(0, 2, 1, 3, 4) # B,H,N,kxk,kxk + attn = attn * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).permute(0, 1, 4, 3, 2).reshape( + B, C * self.kernel_size * self.kernel_size, h * w) + x = F.fold(x, output_size=(H, W), kernel_size=self.kernel_size, + padding=self.padding, stride=self.stride) + + x = self.proj(x.permute(0, 2, 3, 1)) + x = self.proj_drop(x) + + return x + + +class Outlooker(nn.Module): + """ + Implementation of outlooker layer: which includes outlook attention + MLP + Outlooker is the first stage in our VOLO + --dim: hidden dim + --num_heads: number of heads + --mlp_ratio: mlp ratio + --kernel_size: kernel size in each window for outlook attention + return: outlooker layer + """ + def __init__(self, dim, kernel_size, padding, stride=1, + num_heads=1,mlp_ratio=3., attn_drop=0., + drop_path=0., act_layer=nn.GELU, + norm_layer=nn.LayerNorm, qkv_bias=False, + qk_scale=None): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = OutlookAttention(dim, num_heads, kernel_size=kernel_size, + padding=padding, stride=stride, + qkv_bias=qkv_bias, qk_scale=qk_scale, + attn_drop=attn_drop) + + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class Mlp(nn.Module): + "Implementation of MLP" + + def __init__(self, in_features, hidden_features=None, + out_features=None, act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + "Implementation of self-attention" + + def __init__(self, dim, num_heads=8, qkv_bias=False, + qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, H, W, C = x.shape + + qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, H, W, C) + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class Transformer(nn.Module): + """ + Implementation of Transformer, + Transformer is the second stage in our VOLO + """ + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, + qk_scale=None, attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, + qk_scale=qk_scale, attn_drop=attn_drop) + + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ClassAttention(nn.Module): + """ + Class attention layer from CaiT, see details in CaiT + Class attention is the post stage in our VOLO, which is optional. + """ + def __init__(self, dim, num_heads=8, head_dim=None, qkv_bias=False, + qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + if head_dim is not None: + self.head_dim = head_dim + else: + head_dim = dim // num_heads + self.head_dim = head_dim + self.scale = qk_scale or head_dim**-0.5 + + self.kv = nn.Linear(dim, + self.head_dim * self.num_heads * 2, + bias=qkv_bias) + self.q = nn.Linear(dim, self.head_dim * self.num_heads, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(self.head_dim * self.num_heads, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + + kv = self.kv(x).reshape(B, N, 2, self.num_heads, + self.head_dim).permute(2, 0, 3, 1, 4) + k, v = kv[0], kv[ + 1] # make torchscript happy (cannot use tensor as tuple) + q = self.q(x[:, :1, :]).reshape(B, self.num_heads, 1, self.head_dim) + attn = ((q * self.scale) @ k.transpose(-2, -1)) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + cls_embed = (attn @ v).transpose(1, 2).reshape( + B, 1, self.head_dim * self.num_heads) + cls_embed = self.proj(cls_embed) + cls_embed = self.proj_drop(cls_embed) + return cls_embed + + +class ClassBlock(nn.Module): + """ + Class attention block from CaiT, see details in CaiT + We use two-layers class attention in our VOLO, which is optional. + """ + + def __init__(self, dim, num_heads, head_dim=None, mlp_ratio=4., + qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = ClassAttention( + dim, num_heads=num_heads, head_dim=head_dim, qkv_bias=qkv_bias, + qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + + def forward(self, x): + cls_embed = x[:, :1] + cls_embed = cls_embed + self.drop_path(self.attn(self.norm1(x))) + cls_embed = cls_embed + self.drop_path(self.mlp(self.norm2(cls_embed))) + return torch.cat([cls_embed, x[:, 1:]], dim=1) + + +def get_block(block_type, **kargs): + """ + get block by name, specifically for class attention block in here + """ + if block_type == 'ca': + return ClassBlock(**kargs) + + +def rand_bbox(size, lam, scale=1): + """ + get bounding box as token labeling (https://github.com/zihangJiang/TokenLabeling) + return: bounding box + """ + W = size[1] // scale + H = size[2] // scale + cut_rat = np.sqrt(1. - lam) + cut_w = np.int(W * cut_rat) + cut_h = np.int(H * cut_rat) + + # uniform + cx = np.random.randint(W) + cy = np.random.randint(H) + + bbx1 = np.clip(cx - cut_w // 2, 0, W) + bby1 = np.clip(cy - cut_h // 2, 0, H) + bbx2 = np.clip(cx + cut_w // 2, 0, W) + bby2 = np.clip(cy + cut_h // 2, 0, H) + + return bbx1, bby1, bbx2, bby2 + + +class PatchEmbed(nn.Module): + """ + Image to Patch Embedding. + Different with ViT use 1 conv layer, we use 4 conv layers to do patch embedding + """ + + def __init__(self, img_size=224, stem_conv=False, stem_stride=1, + patch_size=8, in_chans=3, hidden_dim=64, embed_dim=384): + super().__init__() + assert patch_size in [4, 8, 16] + + self.stem_conv = stem_conv + if stem_conv: + self.conv = nn.Sequential( + nn.Conv2d(in_chans, hidden_dim, kernel_size=7, stride=stem_stride, + padding=3, bias=False), # 112x112 + nn.BatchNorm2d(hidden_dim), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, + padding=1, bias=False), # 112x112 + nn.BatchNorm2d(hidden_dim), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, + padding=1, bias=False), # 112x112 + nn.BatchNorm2d(hidden_dim), + nn.ReLU(inplace=True), + ) + + self.proj = nn.Conv2d(hidden_dim, + embed_dim, + kernel_size=patch_size // stem_stride, + stride=patch_size // stem_stride) + self.num_patches = (img_size // patch_size) * (img_size // patch_size) + + def forward(self, x): + if self.stem_conv: + x = self.conv(x) + x = self.proj(x) # B, C, H, W + return x + + +class Downsample(nn.Module): + """ + Image to Patch Embedding, downsampling between stage1 and stage2 + """ + def __init__(self, in_embed_dim, out_embed_dim, patch_size): + super().__init__() + self.proj = nn.Conv2d(in_embed_dim, out_embed_dim, + kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + x = x.permute(0, 3, 1, 2) + x = self.proj(x) # B, C, H, W + x = x.permute(0, 2, 3, 1) + return x + + +def outlooker_blocks(block_fn, index, dim, layers, num_heads=1, kernel_size=3, + padding=1,stride=1, mlp_ratio=3., qkv_bias=False, qk_scale=None, + attn_drop=0, drop_path_rate=0., **kwargs): + """ + generate outlooker layer in stage1 + return: outlooker layers + """ + blocks = [] + for block_idx in range(layers[index]): + block_dpr = drop_path_rate * (block_idx + + sum(layers[:index])) / (sum(layers) - 1) + blocks.append(block_fn(dim, kernel_size=kernel_size, padding=padding, + stride=stride, num_heads=num_heads, mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, + drop_path=block_dpr)) + + blocks = nn.Sequential(*blocks) + + return blocks + + +def transformer_blocks(block_fn, index, dim, layers, num_heads, mlp_ratio=3., + qkv_bias=False, qk_scale=None, attn_drop=0, + drop_path_rate=0., **kwargs): + """ + generate transformer layers in stage2 + return: transformer layers + """ + blocks = [] + for block_idx in range(layers[index]): + block_dpr = drop_path_rate * (block_idx + + sum(layers[:index])) / (sum(layers) - 1) + blocks.append( + block_fn(dim, num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + drop_path=block_dpr)) + + blocks = nn.Sequential(*blocks) + + return blocks + + +class VOLO(nn.Module): + """ + Vision Outlooker, the main class of our model + --layers: [x,x,x,x], four blocks in two stages, the first block is outlooker, the + other three are transformer, we set four blocks, which are easily + applied to downstream tasks + --img_size, --in_chans, --num_classes: these three are very easy to understand + --patch_size: patch_size in outlook attention + --stem_hidden_dim: hidden dim of patch embedding, d1-d4 is 64, d5 is 128 + --embed_dims, --num_heads: embedding dim, number of heads in each block + --downsamples: flags to apply downsampling or not + --outlook_attention: flags to apply outlook attention or not + --mlp_ratios, --qkv_bias, --qk_scale, --drop_rate: easy to undertand + --attn_drop_rate, --drop_path_rate, --norm_layer: easy to undertand + --post_layers: post layers like two class attention layers using [ca, ca], + if yes, return_mean=False + --return_mean: use mean of all feature tokens for classification, if yes, no class token + --return_dense: use token labeling, details are here: + https://github.com/zihangJiang/TokenLabeling + --mix_token: mixing tokens as token labeling, details are here: + https://github.com/zihangJiang/TokenLabeling + --pooling_scale: pooling_scale=2 means we downsample 2x + --out_kernel, --out_stride, --out_padding: kerner size, + stride, and padding for outlook attention + """ + def __init__(self, layers, img_size=224, in_chans=3, num_classes=1000, patch_size=8, + stem_hidden_dim=64, embed_dims=None, num_heads=None, downsamples=None, + outlook_attention=None, mlp_ratios=None, qkv_bias=False, qk_scale=None, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, + post_layers=None, return_mean=False, return_dense=True, mix_token=True, + pooling_scale=2, out_kernel=3, out_stride=2, out_padding=1): + + super().__init__() + self.num_classes = num_classes + self.patch_embed = PatchEmbed(stem_conv=True, stem_stride=2, patch_size=patch_size, + in_chans=in_chans, hidden_dim=stem_hidden_dim, + embed_dim=embed_dims[0]) + + # inital positional encoding, we add positional encoding after outlooker blocks + self.pos_embed = nn.Parameter( + torch.zeros(1, img_size // patch_size // pooling_scale, + img_size // patch_size // pooling_scale, + embed_dims[-1])) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # set the main block in network + network = [] + for i in range(len(layers)): + if outlook_attention[i]: + # stage 1 + stage = outlooker_blocks(Outlooker, i, embed_dims[i], layers, + downsample=downsamples[i], num_heads=num_heads[i], + kernel_size=out_kernel, stride=out_stride, + padding=out_padding, mlp_ratio=mlp_ratios[i], + qkv_bias=qkv_bias, qk_scale=qk_scale, + attn_drop=attn_drop_rate, norm_layer=norm_layer) + network.append(stage) + else: + # stage 2 + stage = transformer_blocks(Transformer, i, embed_dims[i], layers, + num_heads[i], mlp_ratio=mlp_ratios[i], + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop_path_rate=drop_path_rate, + attn_drop=attn_drop_rate, + norm_layer=norm_layer) + network.append(stage) + + if downsamples[i]: + # downsampling between two stages + network.append(Downsample(embed_dims[i], embed_dims[i + 1], 2)) + + self.network = nn.ModuleList(network) + + # set post block, for example, class attention layers + self.post_network = None + if post_layers is not None: + self.post_network = nn.ModuleList([ + get_block(post_layers[i], + dim=embed_dims[-1], + num_heads=num_heads[-1], + mlp_ratio=mlp_ratios[-1], + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop_rate, + drop_path=0., + norm_layer=norm_layer) + for i in range(len(post_layers)) + ]) + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims[-1])) + trunc_normal_(self.cls_token, std=.02) + + # set output type + self.return_mean = return_mean # if yes, return mean, not use class token + self.return_dense = return_dense # if yes, return class token and all feature tokens + if return_dense: + assert not return_mean, "cannot return both mean and dense" + self.mix_token = mix_token + self.pooling_scale = pooling_scale + if mix_token: # enable token mixing, see token labeling for details. + self.beta = 1.0 + assert return_dense, "return all tokens if mix_token is enabled" + if return_dense: + self.aux_head = nn.Linear( + embed_dims[-1], + num_classes) if num_classes > 0 else nn.Identity() + self.norm = norm_layer(embed_dims[-1]) + + # Classifier head + self.head = nn.Linear( + embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.pos_embed, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes): + self.num_classes = num_classes + self.head = nn.Linear( + self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_embeddings(self, x): + # patch embedding + x = self.patch_embed(x) + # B,C,H,W-> B,H,W,C + x = x.permute(0, 2, 3, 1) + return x + + def forward_tokens(self, x): + for idx, block in enumerate(self.network): + if idx == 2: # add positional encoding after outlooker blocks + x = x + self.pos_embed + x = self.pos_drop(x) + x = block(x) + + B, H, W, C = x.shape + x = x.reshape(B, -1, C) + return x + + def forward_cls(self, x): + B, N, C = x.shape + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + for block in self.post_network: + x = block(x) + return x + + def forward(self, x): + # step1: patch embedding + x = self.forward_embeddings(x) + + # mix token, see token labeling for details. + if self.mix_token and self.training: + lam = np.random.beta(self.beta, self.beta) + patch_h, patch_w = x.shape[1] // self.pooling_scale, x.shape[ + 2] // self.pooling_scale + bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam, scale=self.pooling_scale) + temp_x = x.clone() + sbbx1,sbby1,sbbx2,sbby2=self.pooling_scale*bbx1,self.pooling_scale*bby1,\ + self.pooling_scale*bbx2,self.pooling_scale*bby2 + temp_x[:, sbbx1:sbbx2, sbby1:sbby2, :] = x.flip(0)[:, sbbx1:sbbx2, sbby1:sbby2, :] + x = temp_x + else: + bbx1, bby1, bbx2, bby2 = 0, 0, 0, 0 + + # step2: tokens learning in the two stages + x = self.forward_tokens(x) + + # step3: post network, apply class attention or not + if self.post_network is not None: + x = self.forward_cls(x) + x = self.norm(x) + + if self.return_mean: # if no class token, return mean + return self.head(x.mean(1)) + + x_cls = self.head(x[:, 0]) + if not self.return_dense: + return x_cls + + x_aux = self.aux_head( + x[:, 1:] + ) # generate classes in all feature tokens, see token labeling + + if not self.training: + return x_cls + 0.5 * x_aux.max(1)[0] + + if self.mix_token and self.training: # reverse "mix token", see token labeling for details. + x_aux = x_aux.reshape(x_aux.shape[0], patch_h, patch_w, x_aux.shape[-1]) + + temp_x = x_aux.clone() + temp_x[:, bbx1:bbx2, bby1:bby2, :] = x_aux.flip(0)[:, bbx1:bbx2, bby1:bby2, :] + x_aux = temp_x + + x_aux = x_aux.reshape(x_aux.shape[0], patch_h * patch_w, x_aux.shape[-1]) + + # return these: 1. class token, 2. classes from all feature tokens, 3. bounding box + return x_cls, x_aux, (bbx1, bby1, bbx2, bby2) + + +@register_model +def volo_d1(pretrained=False, **kwargs): + """ + VOLO-D1 model, Params: 27M + --layers: [x,x,x,x], four blocks in two stages, the first stage(block) is outlooker, + the other three blocks are transformer, we set four blocks, which are easily + applied to downstream tasks + --embed_dims, --num_heads,: embedding dim, number of heads in each block + --downsamples: flags to apply downsampling or not in four blocks + --outlook_attention: flags to apply outlook attention or not + --mlp_ratios: mlp ratio in four blocks + --post_layers: post layers like two class attention layers using [ca, ca] + See detail for all args in the class VOLO() + """ + layers = [4, 4, 8, 2] # num of layers in the four blocks + embed_dims = [192, 384, 384, 384] + num_heads = [6, 12, 12, 12] + mlp_ratios = [3, 3, 3, 3] + downsamples = [True, False, False, False] # do downsampling after first block + outlook_attention = [True, False, False, False ] + # first block is outlooker (stage1), the other three are transformer (stage2) + model = VOLO(layers, + embed_dims=embed_dims, + num_heads=num_heads, + mlp_ratios=mlp_ratios, + downsamples=downsamples, + outlook_attention=outlook_attention, + post_layers=['ca', 'ca'], + **kwargs) + model.default_cfg = default_cfgs['volo'] + return model + + +@register_model +def volo_d2(pretrained=False, **kwargs): + """ + VOLO-D2 model, Params: 59M + """ + layers = [6, 4, 10, 4] + embed_dims = [256, 512, 512, 512] + num_heads = [8, 16, 16, 16] + mlp_ratios = [3, 3, 3, 3] + downsamples = [True, False, False, False] + outlook_attention = [True, False, False, False] + model = VOLO(layers, + embed_dims=embed_dims, + num_heads=num_heads, + mlp_ratios=mlp_ratios, + downsamples=downsamples, + outlook_attention=outlook_attention, + post_layers=['ca', 'ca'], + **kwargs) + model.default_cfg = default_cfgs['volo'] + return model + + +@register_model +def volo_d3(pretrained=False, **kwargs): + """ + VOLO-D3 model, Params: 86M + """ + layers = [8, 8, 16, 4] + embed_dims = [256, 512, 512, 512] + num_heads = [8, 16, 16, 16] + mlp_ratios = [3, 3, 3, 3] + downsamples = [True, False, False, False] + outlook_attention = [True, False, False, False] + model = VOLO(layers, + embed_dims=embed_dims, + num_heads=num_heads, + mlp_ratios=mlp_ratios, + downsamples=downsamples, + outlook_attention=outlook_attention, + post_layers=['ca', 'ca'], + **kwargs) + model.default_cfg = default_cfgs['volo'] + return model + + +@register_model +def volo_d4(pretrained=False, **kwargs): + """ + VOLO-D4 model, Params: 193M + """ + layers = [8, 8, 16, 4] + embed_dims = [384, 768, 768, 768] + num_heads = [12, 16, 16, 16] + mlp_ratios = [3, 3, 3, 3] + downsamples = [True, False, False, False] + outlook_attention = [True, False, False, False] + model = VOLO(layers, + embed_dims=embed_dims, + num_heads=num_heads, + mlp_ratios=mlp_ratios, + downsamples=downsamples, + outlook_attention=outlook_attention, + post_layers=['ca', 'ca'], + **kwargs) + model.default_cfg = default_cfgs['volo_large'] + return model + + +@register_model +def volo_d5(pretrained=False, **kwargs): + """ + VOLO-D5 model, Params: 296M + stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5 + """ + layers = [12, 12, 20, 4] + embed_dims = [384, 768, 768, 768] + num_heads = [12, 16, 16, 16] + mlp_ratios = [4, 4, 4, 4] + downsamples = [True, False, False, False] + outlook_attention = [True, False, False, False] + model = VOLO(layers, + embed_dims=embed_dims, + num_heads=num_heads, + mlp_ratios=mlp_ratios, + downsamples=downsamples, + outlook_attention=outlook_attention, + post_layers=['ca', 'ca'], + stem_hidden_dim=128, + **kwargs) + model.default_cfg = default_cfgs['volo_large'] + return model + +# welo: your custom entry for CIFAR +def Model(num_classes): + """ + 返回一个用于训练的 Visformer 模型实例(默认 small 版,输入尺寸 224)。 + 如果你想要更小的模型可改为 visformer_tiny();或者传入额外 kwargs。 + """ + # 选择模型变体:visformer_small 或 visformer_tiny + # 这里默认用 visformer_small;如果想要更轻量的模型,可改为 visformer_tiny + model = volo_d1(num_classes=num_classes, return_dense=False, mix_token=False) + return model \ No newline at end of file diff --git a/PyTorch/build-in/Classification/VOLO/weloTrainStep.py b/PyTorch/build-in/Classification/VOLO/weloTrainStep.py new file mode 100644 index 000000000..2c191729c --- /dev/null +++ b/PyTorch/build-in/Classification/VOLO/weloTrainStep.py @@ -0,0 +1,647 @@ +#!/usr/bin/env python3 +# coding: utf-8 + +import os +import random +import sys +import time +import json +import argparse +from collections import OrderedDict +from pathlib import Path +import numpy as np +import pandas as pd +from tqdm import tqdm +import importlib + +os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" # 强烈推荐在 shell/最顶端设置 +os.environ["PYTHONHASHSEED"] = "12345" +os.environ["OMP_NUM_THREADS"] = "1" +os.environ["MKL_NUM_THREADS"] = "1" + +def ensure_cublas_workspace(config=":4096:8"): + """ + 尝试为 cuBLAS 设置可复现 workspace。强烈建议在主脚本入口处(import torch 之前) + 通过 export 设置该 env。此函数会在运行时设置,但如果 torch 已经被 import, + 则可能为时已晚——函数会打印提醒。 + """ + already = os.environ.get("CUBLAS_WORKSPACE_CONFIG") + if already: + print(f"[seed_utils] CUBLAS_WORKSPACE_CONFIG 已存在:{already}") + else: + os.environ["CUBLAS_WORKSPACE_CONFIG"] = config + print(f"[seed_utils] 已设置 CUBLAS_WORKSPACE_CONFIG={config} (注意:请在 import torch 前设置以保证生效)") + +def set_global_seed(seed: int = 42, set_threads: bool = True): + """ + 统一随机性设置。注意:若希望完全发挥效果,请在主脚本入口(import torch 之前) + 先调用 ensure_cublas_workspace(...) 或在 shell 中 export CUBLAS_WORKSPACE_CONFIG。 + """ + ensure_cublas_workspace() # 会设置 env 并提醒 + os.environ["PYTHONHASHSEED"] = str(seed) + + if set_threads: + os.environ["OMP_NUM_THREADS"] = "1" + os.environ["MKL_NUM_THREADS"] = "1" + + random.seed(seed) + np.random.seed(seed) + + # 现在导入 torch(晚导入以便前面 env 生效) + import torch + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + # 强制确定性(如果存在不确定性算子,PyTorch 会报错并提示) + try: + torch.use_deterministic_algorithms(True) + except Exception as e: + print("[seed_utils] 设置 deterministic 模式时出错:", e) + print("[seed_utils] 请确认 CUBLAS_WORKSPACE_CONFIG 已在 import torch 之前设置。") + + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + if set_threads: + torch.set_num_threads(1) + torch.set_num_interop_threads(1) + + print(f"[seed_utils] 全局 seed 已设置为 {seed}") + +set_global_seed(2025) + +""" +通用训练模版(优先从本地导入 Model -> 支持 DDP / 单卡,AMP,resume,日志,checkpoint) +保存为 train_template_localmodel.py +""" +import torch +import torch.nn as nn +import torch.optim as optim +import torch.backends.cudnn as cudnn +import torchvision.transforms as transforms +import torchvision.datasets as datasets +import torchvision.models as tv_models + +import torch.distributed as dist +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.data import DataLoader +from torch.utils.data.distributed import DistributedSampler + +from torch.sdaa import amp +# from torch.cuda import amp + + +# ---------------------------- +# Helper utilities (self-contained) +# ---------------------------- +class AverageMeter(object): + def __init__(self, name='Meter', fmt=':.4f'): + self.name = name + self.fmt = fmt + self.reset() + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / max(1, self.count) + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} (avg {avg' + self.fmt + '})' + return fmtstr.format(name=self.name, val=self.val, avg=self.avg) + +def accuracy(output, target, topk=(1,)): + """Computes the precision@k for the specified values of k + 返回一个 list,每个元素是 tensor(百分比形式) + """ + with torch.no_grad(): + maxk = max(topk) + batch_size = target.size(0) + + # output: (N, C) -> pred: (maxk, N) + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() # (maxk, N) + correct = pred.eq(target.view(1, -1).expand_as(pred)) # (maxk, N) bool + + res = [] + for k in topk: + # 把前 k 行展平后求和(返回 0-dim tensor),随后换算为百分比 + correct_k = correct[:k].reshape(-1).float().sum() # 注意:不传 keepdim + # 乘以 100.0 / batch_size,保持返回 tensor(和之前代码兼容) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + +def save_checkpoint(state, is_best, save_dir, filename='checkpoint.pth'): + save_path = os.path.join(save_dir, filename) + torch.save(state, save_path) + if is_best: + best_path = os.path.join(save_dir, 'model_best.pth') + torch.save(state, best_path) + +def set_seed(seed, deterministic=False): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + if deterministic: + cudnn.deterministic = True + cudnn.benchmark = False + else: + cudnn.deterministic = False + cudnn.benchmark = True + +# ---------------------------- +# Argument parser +# ---------------------------- +def parse_args(): + parser = argparse.ArgumentParser(description='Generic PyTorch training template (DDP/AMP) with LocalModel priority') + parser.add_argument('--name', default='run', type=str, help='experiment name (log/checkpoints dir)') + parser.add_argument('--seed', default=42, type=int, help='random seed') + parser.add_argument('--arch', default='None', type=str, help='model name') + parser.add_argument('--deterministic', action='store_true', help='set cudnn deterministic (may be slower)') + parser.add_argument('--dataset', default='cifar10', choices=['cifar10','cifar100','imagenet','custom'], help='which dataset') + parser.add_argument('--datapath', default='./data', type=str, help='dataset root / imagenet root / custom root') + parser.add_argument('--imagenet_dir', default='./imagenet', type=str, help='if dataset=imagenet, path to imagenet root') + parser.add_argument('--custom_eval_dir', default=None, help='if dataset=custom, provide val dir') + parser.add_argument('--num_workers', default=4, type=int, help='dataloader workers per process') + parser.add_argument('--epochs', default=200, type=int) + parser.add_argument('--steps', default=0, type=int, help='max steps to run (if >0, training will stop when global_step reaches this).') + parser.add_argument('--batch_size', default=128, type=int) + parser.add_argument('--model_name', default='resnet18', help='torchvision model name or python path e.g. mypkg.mymodule.Model (used if no local Model)') + parser.add_argument('--num_classes', default=None, type=int, help='override num classes (auto-detect for common sets)') + parser.add_argument('--pretrained', action='store_true', help='use torchvision pretrained weights when available') + parser.add_argument('--optimizer', default='sgd', choices=['sgd','adam','adamw'], help='optimizer') + parser.add_argument('--lr', '--learning_rate', default=0.1, type=float) + parser.add_argument('--momentum', default=0.9, type=float) + parser.add_argument('--weight_decay', default=5e-4, type=float) + parser.add_argument('--nesterov', action='store_true') + parser.add_argument('--scheduler', default='multistep', choices=['multistep','step','cosine','none'], help='lr scheduler') + parser.add_argument('--milestones', default='100,150', type=str, help='milestones for multistep (comma sep)') + parser.add_argument('--step_size', default=30, type=int, help='step size for StepLR or cosine max epochs') + parser.add_argument('--gamma', default=0.1, type=float) + parser.add_argument('--scheduler_step_per_batch', action='store_true', help='call scheduler.step() per batch (for some schedulers)') + parser.add_argument('--resume', default='', type=str, help='path to checkpoint to resume from') + parser.add_argument('--start_epoch', default=0, type=int) + parser.add_argument('--print_freq', default=100, type=int) + parser.add_argument('--save_freq', default=10, type=int, help='save checkpoint every N epochs (rank0 only)') + parser.add_argument('--amp', action='store_true', default = True,help='use automatic mixed precision (AMP)') + parser.add_argument('--grad_accum_steps', default=1, type=int, help='gradient accumulation steps') + parser.add_argument('--local_rank', default=None, type=int, help='local rank passed by torchrun (if any). Use -1 or None for non-distributed') + parser.add_argument('--cutmix_prob', default=0.0, type=float) + parser.add_argument('--beta', default=1.0, type=float) + parser.add_argument('--seed_sampler', default=False, action='store_true', help='set sampler epoch seeds to make deterministic distributed shuffling') + args = parser.parse_args() + args.milestones = [int(x) for x in args.milestones.split(',')] if args.milestones else [] + return args + +# ---------------------------- +# build model (优先 LocalModel) +# ---------------------------- +def build_model_with_local_priority(args, device=None): + """ + 用参数 args.arch 作为模块名导入 Model() + 如果模块不存在或没有 Model 类,则报错停止。 + """ + try: + # 动态导入模块,比如 args.arch = "rexnet" + mod = importlib.import_module(args.arch) + Model = getattr(mod, "Model") # 从模块中获取 Model 类 + except Exception as e: + raise RuntimeError( + f"无法导入模型模块 '{args.arch}' 或未找到类 Model。" + f"\n错误信息:{e}" + ) + + # 解析数据集类别数 + if args.dataset == 'cifar10': + num_classes = 10 + elif args.dataset == 'cifar100': + num_classes = 100 + else: + print(f"[ERROR] 不支持的数据集类型:{args.dataset},无法确定类别数。程序终止。") + sys.exit(1) + + + # 实例化 + try: + model = Model(num_classes) + except Exception as e: + raise RuntimeError( + f"Model() 实例化失败,请检查模型构造函数。\n错误信息:{e}" + ) + + return model + +# ---------------------------- +# Data loader factory +# ---------------------------- +def build_dataloaders(args, rank, world_size): + if args.dataset == 'cifar10' or args.dataset == 'cifar100': + mean = (0.4914, 0.4822, 0.4465) + std = (0.2470, 0.2435, 0.2616) if args.dataset == 'cifar10' else (0.2023, 0.1994, 0.2010) + # train_transform = transforms.Compose([ + # transforms.RandomCrop(32, padding=4), + # transforms.RandomHorizontalFlip(), + # transforms.ToTensor(), + # transforms.Normalize(mean, std), + # ]) + # test_transform = transforms.Compose([ + # transforms.ToTensor(), + # transforms.Normalize(mean, std), + # ]) + + train_transform = transforms.Compose([ # 2025/12/3 从visformer模型开始 + transforms.Resize(256), # 先放大到 256 + transforms.RandomCrop(224), # 再随机裁剪为 224(更符合 ImageNet 风格增强) + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(mean, std), + ]) + test_transform = transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize(mean, std), + ]) + root = args.datapath + if args.dataset == 'cifar10': + train_set = datasets.CIFAR10(root=root, train=True, download=False, transform=train_transform) + val_set = datasets.CIFAR10(root=root, train=False, download=False, transform=test_transform) + num_classes = 10 + else: + train_set = datasets.CIFAR100(root=root, train=True, download=False, transform=train_transform) + val_set = datasets.CIFAR100(root=root, train=False, download=False, transform=test_transform) + num_classes = 100 + + elif args.dataset == 'imagenet': + train_dir = os.path.join(args.imagenet_dir, 'train') + val_dir = os.path.join(args.imagenet_dir, 'val') + train_transform = transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225)), + ]) + test_transform = transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225)), + ]) + train_set = datasets.ImageFolder(train_dir, train_transform) + val_set = datasets.ImageFolder(val_dir, test_transform) + num_classes = args.num_classes or 1000 + + elif args.dataset == 'custom': + train_dir = os.path.join(args.datapath, 'train') + val_dir = args.custom_eval_dir or os.path.join(args.datapath, 'val') + train_transform = transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + ]) + test_transform = transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + ]) + train_set = datasets.ImageFolder(train_dir, train_transform) + val_set = datasets.ImageFolder(val_dir, test_transform) + num_classes = len(train_set.classes) + else: + raise ValueError("Unknown dataset") + + if dist.is_initialized() and world_size > 1: + train_sampler = DistributedSampler(train_set, num_replicas=world_size, rank=rank, shuffle=True) + else: + train_sampler = None + + train_loader = DataLoader(train_set, + batch_size=args.batch_size, + shuffle=(train_sampler is None), + num_workers=args.num_workers, + pin_memory=True, + sampler=train_sampler, + drop_last=False) + val_loader = DataLoader(val_set, + batch_size=args.batch_size, + shuffle=False, + num_workers=args.num_workers, + pin_memory=True) + + return train_loader, val_loader, num_classes, train_sampler + +# ---------------------------- +# Train & validate +# ---------------------------- +def train_one_epoch(args, epoch, model, criterion, optimizer, train_loader, device, scaler, scheduler=None, train_sampler=None, global_step_start=0, max_global_steps=None): + """ + 现在支持:若 max_global_steps 非 None,则当 global_step 达到该值时提前退出 + 返回: epoch_summary_dict, step_logs_list, global_step_end + step_logs_list: list of dicts with per-step info (for logging to CSV if需要) + """ + batch_time = AverageMeter('Time') + data_time = AverageMeter('Data') + losses = AverageMeter('Loss') + top1 = AverageMeter('Acc@1') + top5 = AverageMeter('Acc@5') + + model.train() + end = time.time() + optimizer.zero_grad() + + iters = len(train_loader) + step_logs = [] + global_step = global_step_start + + for i, (images, targets) in enumerate(train_loader): + # check global steps limit + if (max_global_steps is not None) and (global_step >= max_global_steps): + break + + data_time.update(time.time() - end) + images = images.to(device, non_blocking=True) + targets = targets.to(device, non_blocking=True) + + if args.amp: + with amp.autocast(): + outputs = model(images) + loss = criterion(outputs, targets) / args.grad_accum_steps + else: + outputs = model(images) + loss = criterion(outputs, targets) / args.grad_accum_steps + + if args.amp: + scaler.scale(loss).backward() + else: + loss.backward() + + if (i + 1) % args.grad_accum_steps == 0: + if args.amp: + scaler.step(optimizer) + scaler.update() + else: + optimizer.step() + optimizer.zero_grad() + if scheduler is not None and args.scheduler_step_per_batch: + scheduler.step() + + with torch.no_grad(): + acc1, acc5 = accuracy(outputs, targets, topk=(1,5)) + losses.update(loss.item() * args.grad_accum_steps, images.size(0)) + top1.update(acc1.item(), images.size(0)) + top5.update(acc5.item(), images.size(0)) + + batch_time.update(time.time() - end) + end = time.time() + + # increment global step AFTER processing this batch + global_step += 1 + + # per-step print (controlled by print_freq) + # 输出格式调整为:Epoch[23]:step[1/32] step_train_loss 3.0075 acc1 25.95 acc5 54.46 + # 使用 i+1 / iters 更贴近人类可读的“第几步 / 总步数(该 epoch 内)” + if ((global_step % args.print_freq == 0) or (i == iters - 1)) and ((dist.get_rank() if dist.is_initialized() else 0) == 0): + lr = optimizer.param_groups[0]['lr'] + # note: losses.val is 当前 batch 的 loss(经过 grad_accum 处理后还原),losses.avg 是到目前为止的 epoch 平均 + print(f"Epoch[{epoch}]:step[{i+1}/{iters}] step_train_loss {losses.val:.4f} acc1 {top1.val:.2f} acc5 {top5.val:.2f}") + + # collect per-step log + step_logs.append({ + 'epoch': epoch, + 'batch_idx': i, + 'global_step': global_step, + 'lr': optimizer.param_groups[0]['lr'], + 'loss': losses.val, + 'loss_avg': losses.avg, + 'acc1': top1.val, + 'acc1_avg': top1.avg, + 'acc5': top5.val, + 'acc5_avg': top5.avg, + 'time': batch_time.val + }) + + # if reached max_global_steps inside epoch, break (handled at loop start next iter) + if (max_global_steps is not None) and (global_step >= max_global_steps): + # optional message + if (dist.get_rank() if dist.is_initialized() else 0) == 0: + print(f"[Info] 达到 max_global_steps={max_global_steps},将在 epoch 内提前停止。") + break + + if scheduler is not None and not args.scheduler_step_per_batch: + scheduler.step() + + return OrderedDict([('loss', losses.avg), ('acc1', top1.avg), ('acc5', top5.avg)]), step_logs, global_step + +def validate(args, model, val_loader, criterion, device): + losses = AverageMeter('Loss') + top1 = AverageMeter('Acc@1') + top5 = AverageMeter('Acc@5') + + model.eval() + with torch.no_grad(): + for i, (images, targets) in enumerate(tqdm(val_loader)): + images = images.to(device, non_blocking=True) + targets = targets.to(device, non_blocking=True) + outputs = model(images) + loss = criterion(outputs, targets) + acc1, acc5 = accuracy(outputs, targets, topk=(1,5)) + losses.update(loss.item(), images.size(0)) + top1.update(acc1.item(), images.size(0)) + top5.update(acc5.item(), images.size(0)) + return OrderedDict([('loss', losses.avg), ('acc1', top1.avg), ('acc5', top5.avg)]) + +# ---------------------------- +# Main +# ---------------------------- +def main(): + args = parse_args() + + # handle local_rank from env if not provided + local_rank_env = os.environ.get('LOCAL_RANK', None) + if args.local_rank is None and local_rank_env is not None: + args.local_rank = int(local_rank_env) + + distributed = (args.local_rank is not None and args.local_rank != -1) + if distributed: + dist.init_process_group(backend='nccl', init_method='env://') + rank = dist.get_rank() + world_size = dist.get_world_size() + else: + rank = 0 + world_size = 1 + + if distributed: + torch.cuda.set_device(args.local_rank) + device = torch.device('cuda', args.local_rank) + else: + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + set_seed(args.seed + (rank if distributed else 0), deterministic=args.deterministic) + + save_dir = os.path.join('models', args.name) + if rank == 0: + os.makedirs(save_dir, exist_ok=True) + with open(os.path.join(save_dir, 'args.json'), 'w') as f: + json.dump(vars(args), f, indent=2) + if distributed: + dist.barrier() + + train_loader, val_loader, auto_num_classes, train_sampler = build_dataloaders(args, rank, world_size) + if args.num_classes is None: + args.num_classes = auto_num_classes + + # 使用本地 Model 优先(LocalModel 已在文件顶部尝试导入) + model = build_model_with_local_priority(args, device) + model.to(device) + + if distributed: + model = DDP(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) + + criterion = nn.CrossEntropyLoss().to(device) + params = [p for p in model.parameters() if p.requires_grad] + if args.optimizer == 'sgd': + optimizer = optim.SGD(params, lr=args.lr, momentum=args.momentum, + weight_decay=args.weight_decay, nesterov=args.nesterov) + elif args.optimizer == 'adam': + optimizer = optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay) + elif args.optimizer == 'adamw': + optimizer = optim.AdamW(params, lr=args.lr, weight_decay=args.weight_decay) + else: + raise ValueError('Unknown optimizer') + + scheduler = None + if args.scheduler == 'multistep': + scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.milestones, gamma=args.gamma) + elif args.scheduler == 'step': + scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) + elif args.scheduler == 'cosine': + scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs) + elif args.scheduler == 'none': + scheduler = None + + scaler = amp.GradScaler() if args.amp else None + + start_epoch = args.start_epoch + best_acc = 0.0 + if args.resume: + if os.path.isfile(args.resume): + ckpt = torch.load(args.resume, map_location='cpu') + model_state = ckpt.get('state_dict', ckpt) + if isinstance(model, DDP): + model.module.load_state_dict(model_state) + else: + model.load_state_dict(model_state) + if 'optimizer' in ckpt: + optimizer.load_state_dict(ckpt['optimizer']) + start_epoch = ckpt.get('epoch', start_epoch) + best_acc = ckpt.get('best_acc', best_acc) + print(f"=> resumed from {args.resume}, start_epoch={start_epoch}") + else: + print(f"=> resume path {args.resume} not found") + + log_columns = ['epoch', 'lr', 'loss', 'acc1', 'acc5', 'val_loss', 'val_acc1', 'val_acc5'] + log_df = pd.DataFrame(columns=log_columns) + # step-level log + step_log_columns = ['epoch', 'batch_idx', 'global_step', 'lr', 'loss', 'loss_avg', 'acc1', 'acc1_avg', 'acc5', 'acc5_avg', 'time'] + step_log_df = pd.DataFrame(columns=step_log_columns) + + total_epochs = args.epochs + # global_step计数器(训练过程中跨epoch持续) + global_step = 0 + + epoch = start_epoch + # loop until either epoch criteria or step criteria met + while True: + if train_sampler is not None: + if args.seed_sampler: + train_sampler.set_epoch(epoch + args.seed) + else: + train_sampler.set_epoch(epoch) + + if rank == 0: + print(f"==== Epoch {epoch}/{total_epochs - 1} ====") + + # 如果传入了 args.steps (>0),则把剩余允许的 step 数传给 train_one_epoch, + # 否则 max_global_steps=None(按整 epoch 执行完) + if args.steps and args.steps > 0: + max_global_steps = args.steps + else: + max_global_steps = None + + train_log, step_logs, global_step = train_one_epoch( + args, epoch, model, criterion, optimizer, train_loader, device, scaler, + scheduler, train_sampler, global_step_start=global_step, max_global_steps=max_global_steps + ) + + # 如果启用了按 steps 的模式且已经达到上限,直接退出 main(跳过 validate) + if max_global_steps is not None and global_step >= max_global_steps: + if rank == 0: + print(f"[Main] 达到 max_global_steps={max_global_steps}(global_step={global_step}),提前退出训练(跳过验证)。") + # 直接返回 main(),不再执行后续 validate / 保存逻辑 + return + + # 验证并记录 epoch 级别日志(如果在 step 模式下很可能在中间某个 epoch 提前结束,但我们仍做一次 validate) + val_log = validate(args, model, val_loader, criterion, device) + current_lr = optimizer.param_groups[0]['lr'] + + if rank == 0: + # epoch summary print, 格式与示例对齐 + print(f"Epoch[{epoch}]: epoch_train_loss {train_log['loss']:.4f} acc1 {train_log['acc1']:.2f} acc5 {train_log['acc5']:.2f} | " + f"val_loss {val_log['loss']:.4f} acc1 {val_log['acc1']:.2f} acc5 {val_log['acc5']:.2f} lr {current_lr:.6f}") + row = { + 'epoch': epoch, + 'lr': current_lr, + 'loss': train_log['loss'], + 'acc1': train_log['acc1'], + 'acc5': train_log['acc5'], + 'val_loss': val_log['loss'], + 'val_acc1': val_log['acc1'], + 'val_acc5': val_log['acc5'], + } + new_row_df = pd.DataFrame([row]) + log_df = pd.concat([log_df, new_row_df], ignore_index=True) + log_df.to_csv(os.path.join(save_dir, 'log.csv'), index=False) + + is_best = val_log['acc1'] > best_acc + if is_best: + best_acc = val_log['acc1'] + if (epoch % args.save_freq == 0) or is_best or ( (max_global_steps is None) and (epoch == total_epochs - 1) ) : + state = { + 'epoch': epoch, + 'state_dict': model.module.state_dict() if isinstance(model, DDP) else model.state_dict(), + 'best_acc': best_acc, + 'optimizer': optimizer.state_dict(), + 'args': vars(args) + } + save_checkpoint(state, is_best, save_dir, filename=f'checkpoint_epoch_{epoch}.pth') + + # increment epoch + epoch += 1 + + # stopping conditions: + # 1) if steps mode enabled and reached steps -> stop + if args.steps and args.steps > 0: + if global_step >= args.steps: + if rank == 0: + print(f"[Main] 已达到指定 steps={args.steps}(global_step={global_step}),训练结束。") + break + + # 2) if steps not used, stop when epoch >= epochs + else: + if epoch >= total_epochs: + if rank == 0: + print(f"[Main] 已达到指定 epochs={total_epochs}(epoch={epoch}),训练结束。") + break + + if dist.is_initialized(): + dist.barrier() + if rank == 0: + print("Training finished. Best val acc1: {:.2f}".format(best_acc)) + +if __name__ == '__main__': + main() \ No newline at end of file