fbaldassarri
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -70,8 +70,8 @@ python -m pip install git+https://github.com/intel/auto-round.git
|
|
70 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
71 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
72 |
from auto_round import AutoRound
|
73 |
-
bits, group_size, sym = 4, 128, False
|
74 |
-
autoround = AutoRound(model, tokenizer, nsamples=128, iters=200, seqlen=512, batch_size=4, bits=bits, group_size=group_size, sym=sym)
|
75 |
autoround.quantize()
|
76 |
output_dir = "./AutoRound/EleutherAI_pythia-410m-deduped-autoawq-int4-gs128-asym"
|
77 |
autoround.save_quantized(output_dir, format='auto_awq', inplace=True)
|
|
|
70 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
71 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
72 |
from auto_round import AutoRound
|
73 |
+
bits, group_size, sym, device, amp = 4, 128, False, 'cpu', False
|
74 |
+
autoround = AutoRound(model, tokenizer, nsamples=128, iters=200, seqlen=512, batch_size=4, bits=bits, group_size=group_size, sym=sym, device=device, amp=amp)
|
75 |
autoround.quantize()
|
76 |
output_dir = "./AutoRound/EleutherAI_pythia-410m-deduped-autoawq-int4-gs128-asym"
|
77 |
autoround.save_quantized(output_dir, format='auto_awq', inplace=True)
|