-
Notifications
You must be signed in to change notification settings - Fork 25
Expand file tree
/
Copy pathdemo.py
More file actions
39 lines (31 loc) · 1.15 KB
/
demo.py
File metadata and controls
39 lines (31 loc) · 1.15 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
from namo.api.vl import VLInfer
import os
from termcolor import colored
import torch
def chat():
model = VLInfer(
model_type="namo", device="cuda:0" if torch.cuda.is_available() else "cpu"
)
crt_input = ["images/cats.jpg", None]
while True:
img_or_txt = input(colored("\nUser (txt/img_path): ", "cyan")).strip()
if os.path.exists(img_or_txt.split(" ")[0]):
crt_input[0] = img_or_txt
print(colored("System: Image updated.", "green"))
continue
else:
crt_input[1] = img_or_txt
if crt_input[0] and crt_input[1]:
print(colored("Assistant:", "green"), end=" ")
model.generate(images=crt_input[0], prompt=crt_input[1], verbose=False)
crt_input[0] = None
elif not crt_input[0] and crt_input[1]:
# pure text
print(colored("Assistant:", "green"), end=" ")
model.generate(images=None, prompt=crt_input[1], verbose=False)
else:
print(
colored("System: Please provide either an image or text input.", "red")
)
if __name__ == "__main__":
chat()