LlamaReviews / app.py
teragron's picture
Upload 26 files
001ccce
raw
history blame
1.58 kB
import subprocess
import gradio as gr
model_file = "best_model_4800it_40M.bin"
try:
subprocess.run(["make", "runfast"], check=True, shell=True)
print("Model compilation successful.")
except subprocess.CalledProcessError as e:
print("Error:", e)
print(e.stderr)
def chatbot(prompt, temperature, topt, maxtoken):
command = ["./run", model_file, "-t", str(temperature), "-p", str(topt), "-n", str(maxtoken), "-i", f"{prompt}"]
try:
result = subprocess.run(command, capture_output=True, text=True, check=True, shell=False)
response = result.stdout
except subprocess.CalledProcessError as e:
response = "Error occurred while processing the request."
return response
with gr.Blocks() as demo:
gr.Markdown("HF Spaces for Product Review Writer")
with gr.Row():
with gr.Column():
inp = gr.Textbox(placeholder="Type the title of the product Review")
with gr.Row():
with gr.Column():
temperature_slider = gr.Slider(minimum=0.1, maximum=2.0, value=0.8, label="Temperature")
topt_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.9, label="Topt")
maxtoken_slider = gr.Slider(minimum=64, maximum=1024, value=256, label="Max Tokens")
out = gr.Textbox()
btn = gr.Button("Run")
gr.Examples(examples=[["best sci-fi book ever"], ["great laptop for the price"]], inputs=[inp])
btn.click(fn=chatbot, inputs=[inp, temperature_slider, topt_slider, maxtoken_slider], outputs=out)
demo.launch()