-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path2_load_directly_mps.py
More file actions
33 lines (24 loc) · 1.02 KB
/
2_load_directly_mps.py
File metadata and controls
33 lines (24 loc) · 1.02 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from models import MODEL_IDS
model_id = MODEL_IDS["llama_3.2_1b"]
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
# Ensure the tokenizer has a pad token
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token # <|end_of_text|>
# Move model to GPU if available
device = "mps" if torch.backends.mps.is_available() else "cpu"
model = model.to(device)
user_input = "Who are you?"
# Tokenize input with attention mask
inputs = tokenizer(user_input, return_tensors="pt",
padding=True) # input_ids, attention_mask
input_ids = inputs.input_ids.to(device)
attention_mask = inputs.attention_mask.to(device)
# Generate output with attention mask
output = model.generate(
input_ids, attention_mask=attention_mask, max_length=50, do_sample=True)
print("Output:", output)
response = tokenizer.decode(output[0], skip_special_tokens=True)
print("Model Response:", response)