bvencel commited on
Commit
e4b60c9
·
verified ·
1 Parent(s): db46476

Replaced direct model load with pipeline

Browse files
Files changed (1) hide show
  1. app.py +12 -11
app.py CHANGED
@@ -10,21 +10,22 @@ if text:
10
  out = pipe(text)
11
  st.json(out)
12
  '''
13
-
14
- from transformers import AutoTokenizer, AutoModelForCausalLM
15
  import torch
16
 
17
  model_name = "abacusai/Smaug-72B-v0.1"
18
 
19
- tokenizer = AutoTokenizer.from_pretrained(model_name)
20
- model = AutoModelForCausalLM.from_pretrained(model_name)
21
 
22
- # Encode some input text
23
- input_text = "Who are you?"
24
- input_ids = tokenizer.encode(input_text, return_tensors='pt')
25
 
26
- # Generate text using the model
27
- output = model.generate(input_ids, max_length=50)
 
28
 
29
- # Decode and print the output
30
- print("Decoded output: " + tokenizer.decode(output[0], skip_special_tokens=True))
 
 
 
 
10
  out = pipe(text)
11
  st.json(out)
12
  '''
13
+ from transformers import pipeline
 
14
  import torch
15
 
16
  model_name = "abacusai/Smaug-72B-v0.1"
17
 
18
+ pipe = pipeline("text-generation", model=model_name)
 
19
 
20
+ # Prompting the user for input text
21
+ input_text = input("Enter your prompt: ")
 
22
 
23
+ # Generating text based on the input
24
+ # Adjust parameters like max_length according to your needs
25
+ generated_texts = pipe(input_text, max_length=50, num_return_sequences=1)
26
 
27
+ # Displaying the generated text
28
+ # Assuming we only want the first generated sequence for simplicity
29
+ print("Generated text:")
30
+ for generated_text in generated_texts:
31
+ print(generated_text['generated_text'])