English
yintongl commited on
Commit
11fd213
·
verified ·
1 Parent(s): 0858111

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +25 -21
README.md CHANGED
@@ -16,6 +16,31 @@ Inference of this model is compatible with AutoGPTQ's Kernel.
16
 
17
 
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  ### Evaluate the model
21
 
@@ -41,27 +66,6 @@ lm_eval --model hf --model_args pretrained="Intel/gpt-j-6b-int4-inc",autogptq=Tr
41
  | arc_challenge | 0.3396 | 0.3430 |
42
 
43
 
44
- ### Reproduce the model
45
-
46
- Here is the sample command to reproduce the model
47
-
48
- ```bash
49
- git clone https://github.com/intel/auto-round
50
- cd auto-round/examples/language-modeling
51
- pip install -r requirements.txt
52
- python3 main.py \
53
- --model_name EleutherAI/gpt-j-6b \
54
- --device 0 \
55
- --group_size 128 \
56
- --bits 4 \
57
- --iters 1000 \
58
- --minmax_lr 2e-3 \
59
- --deployment_device 'gpu' \
60
- --disable_quanted_input \
61
- --output_dir "./tmp_autoround" \
62
-
63
- ```
64
-
65
 
66
 
67
 
 
16
 
17
 
18
 
19
+ ### Reproduce the model
20
+
21
+ Here is the sample command to reproduce the model
22
+
23
+ ```bash
24
+ git clone https://github.com/intel/auto-round
25
+ cd auto-round/examples/language-modeling
26
+ pip install -r requirements.txt
27
+ python3 main.py \
28
+ --model_name EleutherAI/gpt-j-6b \
29
+ --device 0 \
30
+ --group_size 128 \
31
+ --bits 4 \
32
+ --iters 1000 \
33
+ --minmax_lr 2e-3 \
34
+ --deployment_device 'gpu' \
35
+ --disable_quanted_input \
36
+ --output_dir "./tmp_autoround" \
37
+
38
+ ```
39
+
40
+
41
+
42
+
43
+
44
 
45
  ### Evaluate the model
46
 
 
66
  | arc_challenge | 0.3396 | 0.3430 |
67
 
68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
 
71