Update README.md
Browse files
README.md
CHANGED
@@ -17,10 +17,10 @@ Quantized and unquantized embedding models in GGUF format for use with `llama.cp
|
|
17 |
|
18 |
| Filename | Quantization | Size |
|
19 |
|:-------- | ------------ | ---- |
|
20 |
-
| [bge-base-en-v1.5-f32.gguf](https://huggingface.co/CompendiumLabs/bge-base-en-v1.5-gguf/blob/main/bge-base-en-v1.5-f32.gguf) | F32 |
|
21 |
-
| [bge-base-en-v1.5-f16.gguf](https://huggingface.co/CompendiumLabs/bge-base-en-v1.5-gguf/blob/main/bge-base-en-v1.5-f16.gguf) | F16 |
|
22 |
-
| [bge-base-en-v1.5-q8_0.gguf](https://huggingface.co/CompendiumLabs/bge-base-en-v1.5-gguf/blob/main/bge-base-en-v1.5-q8_0.gguf) | Q8_0 |
|
23 |
-
| [bge-base-en-v1.5-q4_k_m.gguf](https://huggingface.co/CompendiumLabs/bge-base-en-v1.5-gguf/blob/main/bge-base-en-v1.5-q4_k_m.gguf) | Q4_K_M |
|
24 |
|
25 |
</div>
|
26 |
|
|
|
17 |
|
18 |
| Filename | Quantization | Size |
|
19 |
|:-------- | ------------ | ---- |
|
20 |
+
| [bge-base-en-v1.5-f32.gguf](https://huggingface.co/CompendiumLabs/bge-base-en-v1.5-gguf/blob/main/bge-base-en-v1.5-f32.gguf) | F32 | 417 MB |
|
21 |
+
| [bge-base-en-v1.5-f16.gguf](https://huggingface.co/CompendiumLabs/bge-base-en-v1.5-gguf/blob/main/bge-base-en-v1.5-f16.gguf) | F16 | 209 MB |
|
22 |
+
| [bge-base-en-v1.5-q8_0.gguf](https://huggingface.co/CompendiumLabs/bge-base-en-v1.5-gguf/blob/main/bge-base-en-v1.5-q8_0.gguf) | Q8_0 | 113 MB |
|
23 |
+
| [bge-base-en-v1.5-q4_k_m.gguf](https://huggingface.co/CompendiumLabs/bge-base-en-v1.5-gguf/blob/main/bge-base-en-v1.5-q4_k_m.gguf) | Q4_K_M | 66 MB |
|
24 |
|
25 |
</div>
|
26 |
|