File size: 294 Bytes
7b12067
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
{
  "_name_or_path": "legionarius/watt-tool-8B-GGUF",
  "architectures": ["LlamaForCausalLM"],
  "model_type": "llama",
  "quantization_config": {
    "bits": 4,
    "format": "gguf"
  },
  "default_revision": "main",
  "default_variant": {
    "filename": "watt-tool-8B-GGUF-Q4_K_M.gguf"
  }
}