taozi555 commited on
Commit
bc3c2fa
·
verified ·
1 Parent(s): a7fd15e

Upload folder using huggingface_hub

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "taozi555/hiwaifu-12b-v1.1-fp8",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
@@ -19,47 +19,7 @@
19
  "quantization_config": {
20
  "activation_scheme": "static",
21
  "ignored_layers": [
22
- "model.layers.38.mlp.gate_proj",
23
- "model.layers.21.mlp.gate_proj",
24
- "model.layers.28.mlp.gate_proj",
25
- "model.layers.31.mlp.gate_proj",
26
- "model.layers.0.mlp.gate_proj",
27
- "model.layers.30.mlp.gate_proj",
28
- "model.layers.6.mlp.gate_proj",
29
- "model.layers.25.mlp.gate_proj",
30
- "model.layers.33.mlp.gate_proj",
31
- "model.layers.8.mlp.gate_proj",
32
- "model.layers.36.mlp.gate_proj",
33
- "model.layers.24.mlp.gate_proj",
34
- "model.layers.11.mlp.gate_proj",
35
- "model.layers.1.mlp.gate_proj",
36
- "model.layers.13.mlp.gate_proj",
37
- "model.layers.23.mlp.gate_proj",
38
- "model.layers.15.mlp.gate_proj",
39
- "model.layers.17.mlp.gate_proj",
40
- "model.layers.5.mlp.gate_proj",
41
- "model.layers.26.mlp.gate_proj",
42
- "model.layers.29.mlp.gate_proj",
43
- "model.layers.37.mlp.gate_proj",
44
- "model.layers.12.mlp.gate_proj",
45
- "model.layers.4.mlp.gate_proj",
46
- "model.layers.39.mlp.gate_proj",
47
- "model.layers.9.mlp.gate_proj",
48
- "model.layers.19.mlp.gate_proj",
49
- "lm_head",
50
- "model.layers.34.mlp.gate_proj",
51
- "model.layers.3.mlp.gate_proj",
52
- "model.layers.10.mlp.gate_proj",
53
- "model.layers.2.mlp.gate_proj",
54
- "model.layers.20.mlp.gate_proj",
55
- "model.layers.32.mlp.gate_proj",
56
- "model.layers.7.mlp.gate_proj",
57
- "model.layers.16.mlp.gate_proj",
58
- "model.layers.22.mlp.gate_proj",
59
- "model.layers.14.mlp.gate_proj",
60
- "model.layers.35.mlp.gate_proj",
61
- "model.layers.27.mlp.gate_proj",
62
- "model.layers.18.mlp.gate_proj"
63
  ],
64
  "quant_method": "fp8"
65
  },
 
1
  {
2
+ "_name_or_path": "MarinaraSpaghetti/NemoMix-Unleashed-12B",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
 
19
  "quantization_config": {
20
  "activation_scheme": "static",
21
  "ignored_layers": [
22
+ "lm_head"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  ],
24
  "quant_method": "fp8"
25
  },
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad233591fe7a96e2897a35382264012968d2a82fb0289f8aca03a9bfb1a30945
3
+ size 4939092856
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7673efdb5b7fc5102571c06489f4fd4c83d28fc8772e5fc4c5dcce1cc50697f6
3
+ size 4981149584
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51945de8fda901de2425d4a3c41419837c80fc12d4c36956a66c3475579dcd0e
3
+ size 3670231584
model.safetensors.index.json CHANGED
The diff for this file is too large to render. See raw diff