Update config.json
Browse files- config.json +2 -2
config.json
CHANGED
@@ -32,10 +32,10 @@
|
|
32 |
"prompt_format_dict": {"main_start": "<human>: ", "main_stop": "<classify> sentiment </classify>\n", "start_llm_response": "<bot>:"},
|
33 |
"tokenizer_local": "tokenizer_tl.json",
|
34 |
"tokenizer_config": {"bos_id":[1], "bos_token":["<s>"], "eos_id":[2],"eos_token":["</s>"]},
|
35 |
-
"model_parent": "llmware/
|
36 |
"description": "Sentiment function calling model from llmware - finetuned on tiny-llama - 1.1 parameter base",
|
37 |
"quantization": "int4",
|
38 |
-
"model_family": "
|
39 |
"parameters": 1.1,
|
40 |
"output_format": "{'sentiment': ['positive']}",
|
41 |
"primary_keys": ["sentiment"],
|
|
|
32 |
"prompt_format_dict": {"main_start": "<human>: ", "main_stop": "<classify> sentiment </classify>\n", "start_llm_response": "<bot>:"},
|
33 |
"tokenizer_local": "tokenizer_tl.json",
|
34 |
"tokenizer_config": {"bos_id":[1], "bos_token":["<s>"], "eos_id":[2],"eos_token":["</s>"]},
|
35 |
+
"model_parent": "llmware/slim-sentiment",
|
36 |
"description": "Sentiment function calling model from llmware - finetuned on tiny-llama - 1.1 parameter base",
|
37 |
"quantization": "int4",
|
38 |
+
"model_family": "ONNXGenerativeModel",
|
39 |
"parameters": 1.1,
|
40 |
"output_format": "{'sentiment': ['positive']}",
|
41 |
"primary_keys": ["sentiment"],
|