trust the remote code
Browse files
README.md
CHANGED
@@ -34,7 +34,7 @@ MAX_RESPONSE_TOKENS = 512
|
|
34 |
|
35 |
model_name = "lunahr/thea-pro-2b-100r"
|
36 |
|
37 |
-
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto", device_map="auto")
|
38 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
39 |
|
40 |
prompt = "Which is greater 9.9 or 9.11 ??"
|
|
|
34 |
|
35 |
model_name = "lunahr/thea-pro-2b-100r"
|
36 |
|
37 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto", device_map="auto", trust_remote_code=True)
|
38 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
39 |
|
40 |
prompt = "Which is greater 9.9 or 9.11 ??"
|