reshinthadithyan
adding prompt style
a83a01c
raw
history blame
1.46 kB
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
MODEL_NAME = "reshinthadith/BashGPTNeo"
def load_model_and_tokenizer(model_name):
"""Adding load_model_and_tokenizer function to keep the model in the memory"""
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
return tokenizer,model
tokenizer,model = load_model_and_tokenizer(MODEL_NAME)
MAX_TOKS = 128
MAX_NEW_TOKS = 128
def generate_text(prompt):
prompt = "<english> " + prompt + " <bash>"
inputs = tokenizer(prompt, truncation=True, return_tensors="pt")
output_seq = model.generate(
input_ids=inputs.input_ids, max_length=MAX_TOKS,
max_new_tokens=MAX_NEW_TOKS,
do_sample=True, temperature=0.8,
num_return_sequences=1
)
outputs = tokenizer.batch_decode(output_seq, skip_special_tokens=False)
return outputs
st.set_page_config(
page_title= "Code Representation Learning",
initial_sidebar_state= "expanded"
)
st.sidebar.title("Code Representation Learning")
workflow = st.sidebar.selectbox('select a task', ['Bash Synthesis'])
if workflow == "Bash Synthesis":
st.title("Program Synthesis for Bash")
prompt = st.text_input("Natural Language prompt ","list all the files in the directory 'data\' ")
button = st.button("synthesize")
if button:
generated_text = generate_text(prompt)
st.write(generated_text)