AIBootCampQA / app.py
nsethi610's picture
initial commit
5f6c201 verified
raw
history blame
5.08 kB
import gradio as gr
from transformers import pipeline
from fine_tune_hf import FinetuneHFModel
fine_tune_model = FinetuneHFModel()
playground = gr.Blocks()
def review_training_choices(choice):
print(choice)
if choice == "Use Pipeline":
return gr.Row(visible=True)
else:
return gr.Row(visible=False)
def show_optional_fields(task):
if task == "question-answering":
return gr.TextArea(visible=True)
return gr.TextArea(visible=False)
def test_pipeline(task, model=None, prompt=None, context=None):
if model:
test = pipeline(task, model=model)
else:
if task == "ner":
test = pipeline(task, grouped_entities=True)
else:
test = pipeline(task)
if task == "question-answering":
if not context:
return "Context is required"
else:
result = test(question=prompt, context=context)
else:
result = test(prompt)
match task:
case "text-generation":
return gr.TextArea(result[0]["generated_text"])
case "fill-mask":
return gr.TextArea(result[0]["sequence"])
case "summarization":
return gr.TextArea(result[0]["summary_text"])
case "ner":
ner_result = "\n".join(
f"{k}={v}" for item in result for k, v in item.items() if k not in ["start", "end", "index"])
return gr.TextArea(ner_result.rstrip("\n"))
case "question-answering":
return gr.TextArea(result)
with playground:
gr.Markdown("""
Try your ideas here. Select from Text, Image or Audio
""")
with gr.Tabs():
with gr.TabItem("Text"):
with gr.Row():
with gr.Column(scale=4):
radio = gr.Radio(
["Use Pipeline", "Fine Tune"],
label="Select Use Pipeline to try out HF models or Fine Tune to test it on your own datasets",
value="Use Pipeline",
interactive=True,
)
with gr.Column(scale=1):
test_pipeline_button = gr.Button(
value="Test", variant="primary", size="sm")
with gr.Row(visible=True) as use_pipeline:
with gr.Column():
task_dropdown = gr.Dropdown(
[("Text Generation", "text-generation"), ("Fill Mask",
"fill-mask"), ("Summarization", "summarization"), ("Named Entity Recognition", "ner"), ("Question Answering", "question-answering")],
label="task",
)
model_dropdown = gr.Dropdown(
[],
label="model",
allow_custom_value=True,
interactive=True
)
prompt_textarea = gr.TextArea(
label="prompt", value="Enter your prompt here", text_align="left")
context_for_question_answer = gr.TextArea(
label="Context", value="Enter Context for your question here", visible=False, interactive=True)
task_dropdown.change(show_optional_fields, inputs=[
task_dropdown], outputs=[context_for_question_answer])
with gr.Column():
text = gr.TextArea(label="Generated Text")
radio.change(review_training_choices,
inputs=radio, outputs=use_pipeline)
test_pipeline_button.click(test_pipeline, inputs=[
task_dropdown, model_dropdown, prompt_textarea, context_for_question_answer], outputs=text)
with gr.TabItem("Image"):
with gr.Row():
with gr.Column(scale=3):
radio = gr.Radio(
["Use Pipeline", "Fine Tune"],
label="Select Use Pipeline to try out HF models or Fine Tune to test it on your own datasets",
value="Use Pipeline",
interactive=True
)
with gr.Column(scale=1):
test_pipeline_button = gr.Button(
value="Test", variant="primary", size="sm")
with gr.TabItem("Audio"):
with gr.Row():
with gr.Column(scale=3):
radio = gr.Radio(
["Use Pipeline", "Fine Tune"],
label="Select Use Pipeline to try out HF models or Fine Tune to test it on your own datasets",
value="Use Pipeline",
interactive=True
)
with gr.Column(scale=1):
test_pipeline_button = gr.Button(
value="Test", variant="primary", size="sm")
playground.launch(share=True)