forked from turboderp/exllama
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_compat.sh
More file actions
executable file
·23 lines (23 loc) · 3.05 KB
/
test_compat.sh
File metadata and controls
executable file
·23 lines (23 loc) · 3.05 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/Neko-Institute-of-Science_LLaMA-7B-4bit-128g -gs 1,20
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/Neko-Institute-of-Science_LLaMA-13B-4bit-128g -gs 3,20
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/Neko-Institute-of-Science_LLaMA-30B-4bit-32g
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/Neko-Institute-of-Science_LLaMA-30B-4bit-128g
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/reeducator_bluemoonrp-13b
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/TehVenom_Metharme-13b-4bit-GPTQ
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/TheBloke_airoboros-13B-GPTQ
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/TheBloke_gpt4-x-vicuna-13B-GPTQ
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/TheBloke_GPT4All-13B-snoozy-GPTQ
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/TheBloke_guanaco-33B-GPTQ/
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/TheBloke_h2ogpt-oasst1-512-30B-GPTQ # [1]
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/TheBloke_koala-13B-GPTQ-4bit-128g
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/TheBloke_Manticore-13B-GPTQ
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/TheBloke_medalpaca-13B-GPTQ-4bit
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/TheBloke_medalpaca-13B-GPTQ-4bit_compat
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/TheBloke_vicuna-13B-1.1-GPTQ-4bit-128g
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/TheBloke_VicUnlocked-30B-LoRA-GPTQ
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/TheBloke_wizard-mega-13B-GPTQ
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/TheBloke_Wizard-Vicuna-7B-Uncensored-GPTQ
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/TheBloke_Wizard-Vicuna-13B-Uncensored-GPTQ
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/TheBloke_WizardLM-7B-uncensored-GPTQ
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/TheBloke_WizardLM-30B-Uncensored-GPTQ
echo "---------" && python test_benchmark_inference.py -v -l 1024 -d /mnt/str/models/_test_models/Yhyu13_chimera-inst-chat-13b-gptq-4bit