reciprocate
commited on
Commit
·
726d19f
1
Parent(s):
5e63f8e
Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- en
|
4 |
+
---
|
5 |
+
|
6 |
+
```python
|
7 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
|
8 |
+
|
9 |
+
model_path = "reciprocate/rm-beluga-7b-hh-full"
|
10 |
+
|
11 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_path)
|
12 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
13 |
+
# for SequenceClassification models padding side should be "right"
|
14 |
+
tokenizer.padding_side = "right"
|
15 |
+
tokenizer.truncation_side = "left"
|
16 |
+
reward_fn = pipeline("text-classification", model=model, tokenizer=tokenizer, truncation=True, batch_size=32, max_length=2048, device=0)
|
17 |
+
output = reward_fn(["### User: Complete this sentence: I'm 99 percent sure it was someone being an...\n\n### Assistant:\n I'm 99 percent sure it was someone being an idiot.</s>"])
|
18 |
+
scores = [x["score"] for x in output]
|
19 |
+
scores
|
20 |
+
```
|
21 |
+
```
|
22 |
+
>>> [0.02713249810039997]
|
23 |
+
```
|
24 |
+
|
25 |
+
```python
|
26 |
+
# optionally normalize with mean, std computed on training data
|
27 |
+
scores = (np.array(scores) - 0.6816716283619826) / 0.3198637874065531
|
28 |
+
```
|