Spaces:
Sleeping
Sleeping
Replaced direct model load with pipeline
Browse files
app.py
CHANGED
@@ -10,21 +10,22 @@ if text:
|
|
10 |
out = pipe(text)
|
11 |
st.json(out)
|
12 |
'''
|
13 |
-
|
14 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
15 |
import torch
|
16 |
|
17 |
model_name = "abacusai/Smaug-72B-v0.1"
|
18 |
|
19 |
-
|
20 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
21 |
|
22 |
-
#
|
23 |
-
input_text = "
|
24 |
-
input_ids = tokenizer.encode(input_text, return_tensors='pt')
|
25 |
|
26 |
-
#
|
27 |
-
|
|
|
28 |
|
29 |
-
#
|
30 |
-
|
|
|
|
|
|
|
|
10 |
out = pipe(text)
|
11 |
st.json(out)
|
12 |
'''
|
13 |
+
from transformers import pipeline
|
|
|
14 |
import torch
|
15 |
|
16 |
model_name = "abacusai/Smaug-72B-v0.1"
|
17 |
|
18 |
+
pipe = pipeline("text-generation", model=model_name)
|
|
|
19 |
|
20 |
+
# Prompting the user for input text
|
21 |
+
input_text = input("Enter your prompt: ")
|
|
|
22 |
|
23 |
+
# Generating text based on the input
|
24 |
+
# Adjust parameters like max_length according to your needs
|
25 |
+
generated_texts = pipe(input_text, max_length=50, num_return_sequences=1)
|
26 |
|
27 |
+
# Displaying the generated text
|
28 |
+
# Assuming we only want the first generated sequence for simplicity
|
29 |
+
print("Generated text:")
|
30 |
+
for generated_text in generated_texts:
|
31 |
+
print(generated_text['generated_text'])
|