-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmistrali.py
More file actions
54 lines (44 loc) · 1.59 KB
/
mistrali.py
File metadata and controls
54 lines (44 loc) · 1.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Model name
model_name = "mistralai/Mistral-7B-v0.1"
# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Load model in 4-bit to fit 12GB VRAM
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map="auto",
load_in_4bit=True,
torch_dtype=torch.float16 # saves memory
)
# Your prompt
prompt = """Explain how cancer starts to develop in the human body in detail."""
# Tokenize and move to GPU
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
# Generate long output
output = model.generate(
**inputs,
max_new_tokens=500, # increase for longer text
do_sample=True, # enables sampling for more natural output
temperature=0.7, # randomness
top_p=0.9, # nucleus sampling
top_k=50, # limit to top 50 choices per token
pad_token_id=tokenizer.eos_token_id
)
# Decode and print
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
print(generated_text)
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
# Generate long output
output = model.generate(
**inputs,
max_new_tokens=500, # increase for longer text
do_sample=True, # enables sampling for more natural output
temperature=0.7, # randomness
top_p=0.9, # nucleus sampling
top_k=50, # limit to top 50 choices per token
pad_token_id=tokenizer.eos_token_id
)
# Decode and print
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
print(generated_text)