|
{ |
|
"model_lib": "TinyLlama-1.1b-q4f16_1", |
|
"local_id": "TinyLlama-1.1B-Chat-v0.3-q4f16_1", |
|
"estimated_vram_req": 1000000000, |
|
"conv_config": { |
|
"seps": ["\n","<|im_end|>"], |
|
"stop_tokens": [ 2 ], |
|
"offset": 0, |
|
"separator_style": 0, |
|
"messages": [ |
|
], |
|
"stop_str": "<|im_end|>", |
|
"roles": [ "<|im_start|>user", "<|im_start|>assistant" ], |
|
"role_msg_sep": ": ", |
|
"role_empty_sep": "\n", |
|
"system": "<|im_start|>system you are a helpful assistant<|im_end|>", |
|
"add_bos": true, |
|
"prefix_tokens": [], |
|
"name": "chatml2" |
|
}, |
|
"temperature": 0.7, |
|
"repetition_penalty": 1.0, |
|
"top_p": 0.95, |
|
"mean_gen_len": 128, |
|
"max_gen_len": 512, |
|
"max_window_size": 2048, |
|
"num_shards": 1, |
|
"shift_fill_factor": 0.3, |
|
"tokenizer_files": [ |
|
"added_tokens.json", |
|
"tokenizer.json", |
|
"tokenizer.model" |
|
], |
|
"model_category": "llama", |
|
"model_name": "TinyLlama-1.1B-Chat-v0.3", |
|
"vocab_size": 32003 |
|
} |
|
|