Update README.md
Browse files
README.md
CHANGED
@@ -9,40 +9,39 @@ base_model: bfuzzy1/acheron-m
|
|
9 |
widget:
|
10 |
- messages:
|
11 |
- role: user
|
12 |
-
content: What is
|
13 |
license: other
|
14 |
datasets:
|
15 |
- ai2-adapt-dev/gsm8k_math_ifeval_ground_truth_mixed
|
16 |
---
|
17 |
|
18 |
-
# Model Trained Using AutoTrain
|
19 |
-
|
20 |
-
This model was trained using AutoTrain. For more information, please visit [AutoTrain](https://hf.co/docs/autotrain).
|
21 |
-
|
22 |
# Usage
|
23 |
|
24 |
```python
|
25 |
|
26 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
27 |
|
28 |
-
model_path = "
|
29 |
|
30 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
31 |
model = AutoModelForCausalLM.from_pretrained(
|
32 |
model_path,
|
33 |
device_map="auto",
|
34 |
-
torch_dtype='auto'
|
35 |
-
|
|
|
36 |
|
37 |
-
# Prompt content: "hi"
|
38 |
messages = [
|
39 |
-
{"role": "user", "content": "
|
40 |
]
|
41 |
|
42 |
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
|
43 |
-
output_ids = model.generate(
|
|
|
|
|
|
|
44 |
response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
|
45 |
|
46 |
-
# Model response: "Hello! How can I assist you today?"
|
47 |
print(response)
|
48 |
```
|
|
|
9 |
widget:
|
10 |
- messages:
|
11 |
- role: user
|
12 |
+
content: What is 2 + 2 - 3?
|
13 |
license: other
|
14 |
datasets:
|
15 |
- ai2-adapt-dev/gsm8k_math_ifeval_ground_truth_mixed
|
16 |
---
|
17 |
|
|
|
|
|
|
|
|
|
18 |
# Usage
|
19 |
|
20 |
```python
|
21 |
|
22 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
23 |
+
import torch
|
24 |
|
25 |
+
model_path = "bfuzzy1/acheron-m1a-llama"
|
26 |
|
27 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
28 |
model = AutoModelForCausalLM.from_pretrained(
|
29 |
model_path,
|
30 |
device_map="auto",
|
31 |
+
torch_dtype='auto',
|
32 |
+
trust_remote_code=True
|
33 |
+
)
|
34 |
|
|
|
35 |
messages = [
|
36 |
+
{"role": "user", "content": "What's 2 + 2 -3?"}
|
37 |
]
|
38 |
|
39 |
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
|
40 |
+
output_ids = model.generate(
|
41 |
+
input_ids.to('mps' if torch.backends.mps.is_available() else 'cpu'),
|
42 |
+
max_new_tokens=100
|
43 |
+
)
|
44 |
response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
|
45 |
|
|
|
46 |
print(response)
|
47 |
```
|