cicdatopea
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -5,7 +5,7 @@ datasets:
|
|
5 |
|
6 |
## Model Details
|
7 |
|
8 |
-
This model is an int4 model with group_size 128 and symmetric quantization of [meta-llama/Llama-3.2-90B-Vision-Instruct](https://huggingface.co/meta-llama/Llama-3.2-90B-Vision-Instruct). Load the model with revision="
|
9 |
|
10 |
## How To Use
|
11 |
|
@@ -27,7 +27,7 @@ model = MllamaForConditionalGeneration.from_pretrained(
|
|
27 |
quantized_model_path,
|
28 |
torch_dtype="auto",
|
29 |
device_map="auto",
|
30 |
-
##revision="
|
31 |
)
|
32 |
processor = AutoProcessor.from_pretrained(quantized_model_path)
|
33 |
image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg"
|
|
|
5 |
|
6 |
## Model Details
|
7 |
|
8 |
+
This model is an int4 model with group_size 128 and symmetric quantization of [meta-llama/Llama-3.2-90B-Vision-Instruct](https://huggingface.co/meta-llama/Llama-3.2-90B-Vision-Instruct). Load the model with revision="64f5493" to use AutoGPTQ format.
|
9 |
|
10 |
## How To Use
|
11 |
|
|
|
27 |
quantized_model_path,
|
28 |
torch_dtype="auto",
|
29 |
device_map="auto",
|
30 |
+
##revision="64f5493" ##AutoGPTQ format
|
31 |
)
|
32 |
processor = AutoProcessor.from_pretrained(quantized_model_path)
|
33 |
image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg"
|