gururise commited on
Commit
325fdb5
·
1 Parent(s): 137d353

switch to quantized 14b model

Browse files
Files changed (1) hide show
  1. config.py +12 -12
config.py CHANGED
@@ -69,23 +69,23 @@ quantized = {
69
  # [Vram usage: 15.0GB]
70
  # [File size: 14.4GB]
71
 
72
- # config = {
73
- # "path": "https://huggingface.co/Hazzzardous/RWKV-8Bit/resolve/main/RWKV-4-Pile-14B-20230204-7324.pqth"
74
- # }
75
 
76
- # title = "RWKV-4 (14b Quantized)"
77
 
78
  # RWKV 14B (latest as of feb 9)
79
  # Approximate
80
  # [Vram usage: 27.0GB]
81
  # [File size: 28.4GB]
82
 
83
- config = {
84
- "path": "https://huggingface.co/BlinkDL/rwkv-4-pile-14b/resolve/main/RWKV-4-Pile-14B-20230204-7324.pth",
85
- "mode": TORCH,
86
- "runtimedtype": torch.bfloat16,
87
- "useGPU": torch.cuda.is_available(),
88
- "dtype": torch.bfloat16
89
- }
90
 
91
- title = "RWKV-4 (14b Feb 4 Snapshot)"
 
69
  # [Vram usage: 15.0GB]
70
  # [File size: 14.4GB]
71
 
72
+ config = {
73
+ "path": "https://huggingface.co/Hazzzardous/RWKV-8Bit/resolve/main/RWKV-4-Pile-14B-20230204-7324.pqth"
74
+ }
75
 
76
+ title = "RWKV-4 (14b Quantized)"
77
 
78
  # RWKV 14B (latest as of feb 9)
79
  # Approximate
80
  # [Vram usage: 27.0GB]
81
  # [File size: 28.4GB]
82
 
83
+ # config = {
84
+ # "path": "https://huggingface.co/BlinkDL/rwkv-4-pile-14b/resolve/main/RWKV-4-Pile-14B-20230204-7324.pth",
85
+ # "mode": TORCH,
86
+ # "runtimedtype": torch.bfloat16,
87
+ # "useGPU": torch.cuda.is_available(),
88
+ # "dtype": torch.bfloat16
89
+ # }
90
 
91
+ # title = "RWKV-4 (14b Feb 4 Snapshot)"