Spaces:
Running
Running
Hopefully now LLM download from hub should work
Browse files- funcs/embeddings.py +0 -3
- funcs/representation_model.py +16 -8
funcs/embeddings.py
CHANGED
@@ -1,9 +1,6 @@
|
|
1 |
import time
|
2 |
import numpy as np
|
3 |
from torch import cuda
|
4 |
-
from sklearn.pipeline import make_pipeline
|
5 |
-
from sklearn.decomposition import TruncatedSVD
|
6 |
-
from sklearn.feature_extraction.text import TfidfVectorizer
|
7 |
|
8 |
random_seed = 42
|
9 |
|
|
|
1 |
import time
|
2 |
import numpy as np
|
3 |
from torch import cuda
|
|
|
|
|
|
|
4 |
|
5 |
random_seed = 42
|
6 |
|
funcs/representation_model.py
CHANGED
@@ -3,7 +3,7 @@ from bertopic.representation import LlamaCPP
|
|
3 |
from llama_cpp import Llama
|
4 |
from pydantic import BaseModel
|
5 |
import torch.cuda
|
6 |
-
from huggingface_hub import hf_hub_download
|
7 |
|
8 |
from bertopic.representation import KeyBERTInspired, MaximalMarginalRelevance, BaseRepresentation
|
9 |
from funcs.prompts import capybara_prompt, capybara_start, open_hermes_prompt, open_hermes_start, stablelm_prompt, stablelm_start
|
@@ -119,17 +119,25 @@ def find_model_file(hf_model_name, hf_model_file, search_folder):
|
|
119 |
|
120 |
# Specify your custom directory
|
121 |
# Get HF_HOME environment variable or default to "~/.cache/huggingface/hub"
|
122 |
-
hf_home_value = search_folder
|
123 |
|
124 |
# Check if the directory exists, create it if it doesn't
|
125 |
-
if not os.path.exists(hf_home_value):
|
126 |
-
|
127 |
|
128 |
-
|
129 |
|
130 |
-
hf_hub_download(repo_id=hf_model_name, filename=hf_model_file
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
|
132 |
-
found_file = find_file(
|
133 |
return found_file
|
134 |
|
135 |
|
@@ -158,7 +166,7 @@ def create_representation_model(representation_type, llm_config, hf_model_name,
|
|
158 |
|
159 |
found_file = find_model_file(hf_model_name, hf_model_file, hf_home_value)
|
160 |
|
161 |
-
llm = Llama(model_path=found_file, stop=chosen_start_tag, n_gpu_layers=llm_config.n_gpu_layers, n_ctx=llm_config.n_ctx, rope_freq_scale=0.5) #**llm_config.model_dump())#
|
162 |
#print(llm.n_gpu_layers)
|
163 |
llm_model = LlamaCPP(llm, prompt=chosen_prompt)#, **gen_config.model_dump())
|
164 |
|
|
|
3 |
from llama_cpp import Llama
|
4 |
from pydantic import BaseModel
|
5 |
import torch.cuda
|
6 |
+
from huggingface_hub import hf_hub_download, snapshot_download
|
7 |
|
8 |
from bertopic.representation import KeyBERTInspired, MaximalMarginalRelevance, BaseRepresentation
|
9 |
from funcs.prompts import capybara_prompt, capybara_start, open_hermes_prompt, open_hermes_start, stablelm_prompt, stablelm_start
|
|
|
119 |
|
120 |
# Specify your custom directory
|
121 |
# Get HF_HOME environment variable or default to "~/.cache/huggingface/hub"
|
122 |
+
#hf_home_value = search_folder
|
123 |
|
124 |
# Check if the directory exists, create it if it doesn't
|
125 |
+
#if not os.path.exists(hf_home_value):
|
126 |
+
# os.makedirs(hf_home_value)
|
127 |
|
128 |
+
|
129 |
|
130 |
+
found_file = hf_hub_download(repo_id=hf_model_name, filename=hf_model_file)#, local_dir=hf_home_value) # cache_dir
|
131 |
+
|
132 |
+
#path = snapshot_download(
|
133 |
+
# repo_id=hf_model_name,
|
134 |
+
# allow_patterns="config.json",
|
135 |
+
# local_files_only=False
|
136 |
+
#)
|
137 |
+
|
138 |
+
print("Downloaded model to: ", found_file)
|
139 |
|
140 |
+
#found_file = find_file(path, file_to_find)
|
141 |
return found_file
|
142 |
|
143 |
|
|
|
166 |
|
167 |
found_file = find_model_file(hf_model_name, hf_model_file, hf_home_value)
|
168 |
|
169 |
+
llm = Llama(model_path=found_file, stop=chosen_start_tag, n_gpu_layers=llm_config.n_gpu_layers, n_ctx=llm_config.n_ctx, rope_freq_scale=0.5, seed=seed) #**llm_config.model_dump())#
|
170 |
#print(llm.n_gpu_layers)
|
171 |
llm_model = LlamaCPP(llm, prompt=chosen_prompt)#, **gen_config.model_dump())
|
172 |
|