-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path5_load_directly_chat_stream.py
More file actions
84 lines (62 loc) · 2.6 KB
/
5_load_directly_chat_stream.py
File metadata and controls
84 lines (62 loc) · 2.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from models import MODEL_IDS
import torch.nn.functional as F
model_id = MODEL_IDS["llama_3.2_1b_instruct"]
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
# Ensure the tokenizer has a pad token
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token # <|end_of_text|>
# Move model to GPU if available
device = "mps" if torch.backends.mps.is_available(
) else "cuda" if torch.cuda.is_available() else "cpu"
model = model.to(device)
model.eval() # Set model to evaluation mode
# Construct the Instruct Model Prompt
user_input = "What is the meaning of life?"
prompt = f"""
<|begin_of_text|><|start_header_id|>system<|end_header_id|>
You are a helpful assistant<|eot_id|><|start_header_id|>user<|end_header_id|>
{user_input}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
"""
# Tokenize input without padding for generation
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
# Define markers for assistant's response
start_marker = "<|start_header_id|>assistant<|end_header_id|>"
end_marker = "<|eot_id|>"
# Initialize generated tokens with input_ids
generated_ids = input_ids
# Set maximum length
max_length = 512
# Flag to indicate if streaming should continue
streaming = True
# Generate tokens one by one
with torch.no_grad():
for _ in range(max_length):
# Get model outputs
outputs = model(generated_ids)
logits = outputs.logits[:, -1, :] # Get logits for the last token
# Apply softmax to get probabilities
probs = F.softmax(logits, dim=-1)
# Sample the next token (you can also use argmax for deterministic output)
next_token_id = torch.multinomial(probs, num_samples=1)
# Append the new token to the sequence
generated_ids = torch.cat([generated_ids, next_token_id], dim=-1)
# Decode the new token
next_token = tokenizer.decode(
next_token_id[0], skip_special_tokens=False)
# Print the token as it's generated
print(next_token, end='', flush=True)
# Check for end marker
if end_marker in next_token:
break
# Optionally, process the full response if needed
response = tokenizer.decode(generated_ids[0], skip_special_tokens=False)
# Extract the assistant's reply
if start_marker in response:
response = response.split(start_marker, 1)[-1].strip()
if end_marker in response:
response = response.split(end_marker, 1)[0].strip()
# Optionally, print the cleaned response
# print("\n" + response)