watt-tool-8B-GGUF / model-index.json
legionarius's picture
Create model-index.json
7b12067 verified
raw
history blame
294 Bytes
{
"_name_or_path": "legionarius/watt-tool-8B-GGUF",
"architectures": ["LlamaForCausalLM"],
"model_type": "llama",
"quantization_config": {
"bits": 4,
"format": "gguf"
},
"default_revision": "main",
"default_variant": {
"filename": "watt-tool-8B-GGUF-Q4_K_M.gguf"
}
}