Spaces:
Sleeping
Sleeping
ashishanand
commited on
Commit
·
7bc059d
1
Parent(s):
4a9200b
param changed
Browse files
app.py
CHANGED
@@ -231,8 +231,8 @@ def process_query(query):
|
|
231 |
# Use global variables
|
232 |
global available_car_models, collection
|
233 |
|
234 |
-
print("Input Query:",query)
|
235 |
-
print(type(query))
|
236 |
|
237 |
car_model = is_car_model_available(query, available_car_models)
|
238 |
if not car_model:
|
@@ -284,7 +284,7 @@ def initialize():
|
|
284 |
|
285 |
# Check for CUDA availability
|
286 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
287 |
-
print(f"Using device: {device}")
|
288 |
|
289 |
# tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") # For token counting
|
290 |
|
|
|
231 |
# Use global variables
|
232 |
global available_car_models, collection
|
233 |
|
234 |
+
# print("Input Query:",query)
|
235 |
+
# print(type(query))
|
236 |
|
237 |
car_model = is_car_model_available(query, available_car_models)
|
238 |
if not car_model:
|
|
|
284 |
|
285 |
# Check for CUDA availability
|
286 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
287 |
+
print(f"Using device: {device}. new commit")
|
288 |
|
289 |
# tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") # For token counting
|
290 |
|