GPT-Kalki / app.py
tsaditya's picture
change init prompt
b65c5e8
import streamlit as st
from transformers import AutoTokenizer, AutoModelWithLMHead, pipeline
def getit(prompt):
generated = tokenizer(f'<|startoftext|> {prompt}', return_tensors="pt").input_ids.cpu()
sample_outputs = sample_outputs = model.generate(
generated,
do_sample=True,
max_length=512,
top_k=50,
top_p=0.95,
num_return_sequences=1,
no_repeat_ngram_size = 3,
temperature = 0.7
)
predicted_text = tokenizer.decode(sample_outputs[0], skip_special_tokens=True)
return predicted_text[len(prompt):]
model_name = 'tsaditya/GPT-Kalki'
model = AutoModelWithLMHead.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
inp = st.text_input(value="ஆதித்த கரிகாலர் தஞ்சைக்குச் செல்ல உடனடியாக ஒப்புக்கொண்டார்.",label = "Enter prompt")
if st.button("Generate!"):
out = getit(inp)
st.write(out)
video_file = open(r'myvideo.mp4', 'rb')
video_bytes = video_file.read()
st.video(video_bytes)