Skip to content

Commit 65f1f7f

Browse files
committed
fix
1 parent 987d89e commit 65f1f7f

File tree

2 files changed

+16
-15
lines changed

2 files changed

+16
-15
lines changed

.pre-commit-config.yaml

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -28,17 +28,17 @@ repos:
2828
rev: v6.0.0
2929
hooks:
3030
- id: trailing-whitespace
31-
exclude: ^(examples/|cookbook/|client_tools/|src/twinkle_client/)
31+
exclude: ^(client_tools/|src/twinkle_client/)
3232
- id: check-yaml
33-
exclude: ^(examples/|cookbook/|client_tools/|src/twinkle_client/)
33+
exclude: ^(client_tools/|src/twinkle_client/)
3434
- id: end-of-file-fixer
35-
exclude: ^(examples/|cookbook/|client_tools/|src/twinkle_client/)
35+
exclude: ^(client_tools/|src/twinkle_client/)
3636
- id: requirements-txt-fixer
37-
exclude: ^(examples/|cookbook/|client_tools/|src/twinkle_client/)
37+
exclude: ^(client_tools/|src/twinkle_client/)
3838
- id: double-quote-string-fixer
39-
exclude: ^(examples/|cookbook/|client_tools/|src/twinkle_client/)
39+
exclude: ^(client_tools/|src/twinkle_client/)
4040
- id: check-merge-conflict
41-
exclude: ^(examples/|cookbook/|client_tools/|src/twinkle_client/)
41+
exclude: ^(client_tools/|src/twinkle_client/)
4242
- id: mixed-line-ending
4343
args: ["--fix=lf"]
44-
exclude: ^(examples/|cookbook/|client_tools/|src/twinkle_client/)
44+
exclude: ^(client_tools/|src/twinkle_client/)

cookbook/client/tinker/sample.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -9,19 +9,20 @@
99
from twinkle.data_format import Message, Trajectory
1010
from twinkle.template import Template
1111
from twinkle_client import init_tinker_compat_client
12-
from twinkle.data_format import Message, Trajectory
13-
from twinkle.template import Template
1412

1513
# Step 1: Define the base model and connect to the server
16-
base_model = 'Qwen/Qwen2.5-7B-Instruct'
17-
service_client = init_tinker_compat_client(base_url='http://localhost:8000')
18-
14+
base_model = 'Qwen/Qwen3-30B-A3B-Instruct-2507'
15+
service_client = init_tinker_compat_client(
16+
base_url='http://www.modelscope.cn/twinkle',
17+
api_key=os.environ.get('MODELSCOPE_SDK_TOKEN')
18+
)
1919
# Step 2: Create a sampling client by loading weights from a saved checkpoint.
2020
# The model_path is a twinkle:// URI pointing to a previously saved LoRA checkpoint.
2121
# The server will load the base model and apply the LoRA adapter weights.
22-
sampling_client = service_client.create_sampling_client(
23-
model_path='twinkle://20260212_174205-Qwen_Qwen2_5-7B-Instruct-51edc9ed/weights/twinkle-lora-2',
24-
base_model=base_model)
22+
service_client.create_sampling_client(
23+
model_path='twinkle://xxx-Qwen_Qwen3-30B-A3B-Instruct-2507-xxx/weights/twinkle-lora-1',
24+
base_model=base_model
25+
)
2526

2627
# Step 3: Load the tokenizer locally to encode the prompt and decode the results
2728
print(f'Using model {base_model}')

0 commit comments

Comments
 (0)