From 01b5c192bb3c50d874821178579d144a3a6f8fc6 Mon Sep 17 00:00:00 2001 From: wong <11776388+chesonsbwong@user.noreply.gitee.com> Date: Sat, 20 Apr 2024 00:06:39 +0800 Subject: [PATCH 1/2] feat: add Chinese Clip --- all_clip/cn_clip.py | 55 +++++++++++++++++++++++++++++++++++++++++++++ all_clip/main.py | 2 ++ tests/test_main.py | 1 + 3 files changed, 58 insertions(+) create mode 100644 all_clip/cn_clip.py diff --git a/all_clip/cn_clip.py b/all_clip/cn_clip.py new file mode 100644 index 0000000..32db740 --- /dev/null +++ b/all_clip/cn_clip.py @@ -0,0 +1,55 @@ +"""https://github.com/OFA-Sys/Chinese-CLIP""" +import os.path +from typing import Dict + +import cn_clip.clip +import torch +from torch import nn + + +class CnCLIPForBenchmark(nn.Module): + """ + enable to do model.encode_text(dict_tensor) + """ + + def __init__(self, model, device): + super().__init__() + self.model = model + self.device = torch.device(device=device) + + def encode_image(self, image): + return self.model.encode_image(image) + + def encode_text(self, text): + return self.model.encode_text(text) + + def forward(self, *args, **kwargs): + return self.model(*args, **kwargs) + + +def load_chinese_clip(clip_model, use_jit, device, clip_cache_path): # pylint: disable=unused-argument + """load chinese clip""" + try: + from cn_clip.clip.utils import create_model, image_transform, _MODEL_INFO # pylint: disable=import-outside-toplevel + except ImportError as exc: + raise ImportError( + "Install `Chinese-CLIP` by `pip install git+https://github.com/OFA-Sys/Chinese-CLIP.git`" + ) from exc + cache_dir = clip_cache_path + model_info = clip_model.split('/') + + clip_model_parts = clip_model.split("/") + model_name = clip_model_parts[0] + checkpoint_file = "/".join(clip_model_parts[1:]) + + model_name = _MODEL_INFO[model_name]['struct'] + checkpoint = None + if os.path.isfile(checkpoint_file): + with open(checkpoint_file, 'rb') as opened_file: + # loading saved checkpoint + checkpoint = torch.load(opened_file, map_location="cpu") + model = create_model(model_name, checkpoint) + model.to(device=device, dtype=torch.float32) + processor = image_transform() + + return CnCLIPForBenchmark(model, device), processor, cn_clip.clip.tokenize diff --git a/all_clip/main.py b/all_clip/main.py index d31c928..2d72bcf 100644 --- a/all_clip/main.py +++ b/all_clip/main.py @@ -9,6 +9,7 @@ from .open_clip import load_open_clip from .openai_clip import load_openai_clip from .ja_clip import load_japanese_clip +from .cn_clip import load_chinese_clip _CLIP_REGISTRY = { @@ -17,6 +18,7 @@ "nm:": load_deepsparse, "ja_clip:": load_japanese_clip, "openai_clip:": load_openai_clip, + "cn_clip:": load_chinese_clip, "": load_openai_clip, } diff --git a/tests/test_main.py b/tests/test_main.py index 049b228..67dab88 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -13,6 +13,7 @@ "hf_clip:patrickjohncyh/fashion-clip", "nm:mgoin/CLIP-ViT-B-32-laion2b_s34b_b79k-ds", "ja_clip:rinna/japanese-clip-vit-b-16", + "cn_clip:ViT-B-16/no_checkpoint" ], ) def test_load_clip(model): From 44370e810c1467827e740d30312abeab5753eaaa Mon Sep 17 00:00:00 2001 From: Romain Beaumont Date: Sat, 16 Aug 2025 00:34:14 +0200 Subject: [PATCH 2/2] Update ci.yml --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6b4e2a1..cb45c9b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,7 +31,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - model: ['openai_clip', 'open_clip', 'hf_clip'] + model: ['openai_clip', 'open_clip', 'hf_clip', 'cn_clip'] steps: - uses: actions/checkout@v4