william590y commited on
Commit
b9eab4f
·
verified ·
1 Parent(s): 54477e7

Update config.json

Browse files
Files changed (1) hide show
  1. config.json +6 -2
config.json CHANGED
@@ -13,7 +13,7 @@
13
  "initializer_range": 0.02,
14
  "intermediate_size": 14336,
15
  "max_position_embeddings": 32768,
16
- "model_type": "llama", // Changed from "mistral" to "llama"
17
  "num_attention_heads": 32,
18
  "num_hidden_layers": 32,
19
  "num_key_value_heads": 8,
@@ -24,5 +24,9 @@
24
  "torch_dtype": "float16",
25
  "transformers_version": "4.47.0",
26
  "use_cache": true,
27
- "vocab_size": 32000
 
 
 
 
28
  }
 
13
  "initializer_range": 0.02,
14
  "intermediate_size": 14336,
15
  "max_position_embeddings": 32768,
16
+ "model_type": "llama", // Ensure it's "llama"
17
  "num_attention_heads": 32,
18
  "num_hidden_layers": 32,
19
  "num_key_value_heads": 8,
 
24
  "torch_dtype": "float16",
25
  "transformers_version": "4.47.0",
26
  "use_cache": true,
27
+ "vocab_size": 32000,
28
+ "auto_map": {
29
+ "AutoModelForCausalLM": "transformers.models.llama.modeling_llama.LlamaForCausalLM",
30
+ "AutoTokenizer": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
31
+ }
32
  }