-
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathilm-2b_watermark.py
More file actions
33 lines (26 loc) · 1.01 KB
/
ilm-2b_watermark.py
File metadata and controls
33 lines (26 loc) · 1.01 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
#!/bin/env python
# This uses InternLM to process 5 512x512 images/second on a 4090
# Pass in a list of images on stdin
# Prints out only those with watermarks
import torch, os
from transformers import AutoModel, AutoTokenizer
torch.set_grad_enabled(False)
model = AutoModel.from_pretrained('internlm/internlm-xcomposer2-vl-1_8b',
trust_remote_code=True).cuda().eval()
tokenizer = AutoTokenizer.from_pretrained('internlm/internlm-xcomposer2-vl-1_8b',
trust_remote_code=True)
model.to("cuda")
tokenizer.to("cuda")
query = "<ImageHere> Does this image contain a watermark,copyright,or signature?"
while True:
try:
image_path = input()
except EOFError:
exit()
if image_path == '':
exit()
with torch.no_grad():
with torch.cuda.amp.autocast():
response, _ = model.chat(tokenizer, query=query, image=image_path, history=[], do_sample=False)
if response.strip().lower().startswith("yes"):
print(image_path)