Update README.md
Browse files
README.md
CHANGED
@@ -40,6 +40,31 @@ result = happy_tt.generate_text("grammar: Hihowareyoudoingtaday?.", args=args)
|
|
40 |
print(result.text) # This sentence has bad grammar and is comrpessed.
|
41 |
```
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
# Model Details
|
44 |
|
45 |
## Model Description
|
|
|
40 |
print(result.text) # This sentence has bad grammar and is comrpessed.
|
41 |
```
|
42 |
|
43 |
+
or using vanilla transformers
|
44 |
+
|
45 |
+
```python
|
46 |
+
from transformers import T5Tokenizer, T5ForConditionalGeneration
|
47 |
+
|
48 |
+
# Load the tokenizer and model
|
49 |
+
model_name = "willwade/t5-small-spoken-typo"
|
50 |
+
tokenizer = T5Tokenizer.from_pretrained(model_name)
|
51 |
+
model = T5ForConditionalGeneration.from_pretrained(model_name)
|
52 |
+
|
53 |
+
# Prepare the input text with the "grammar: " prefix
|
54 |
+
input_text = "grammar: Hihowareyoudoingtaday?."
|
55 |
+
input_ids = tokenizer.encode(input_text, return_tensors="pt")
|
56 |
+
|
57 |
+
# Generate text
|
58 |
+
# Adjust num_beams and min_length to your needs
|
59 |
+
output = model.generate(input_ids, num_beams=5, min_length=1, max_new_tokens=50, early_stopping=True)
|
60 |
+
|
61 |
+
# Decode the generated text
|
62 |
+
decoded_output = tokenizer.decode(output[0], skip_special_tokens=True)
|
63 |
+
|
64 |
+
print(decoded_output)
|
65 |
+
|
66 |
+
```
|
67 |
+
|
68 |
# Model Details
|
69 |
|
70 |
## Model Description
|