flicc-agent / app.py
Francisco Zanartu
rename utils to structured for clarity
862e6a5
"""
Module for detecting fallacies in text.
"""
import gradio as gr
from structured.core import HamburgerStyle
from context.palm import rebuttal_generator
from single.GPT4 import rebuttal_gen
rebuttal = HamburgerStyle()
gpt4 = gr.Interface(
fn=rebuttal_gen,
inputs=[
gr.Textbox(
label="input myth", lines=4, placeholder="climate change misinformation"
),
gr.Textbox(
label="openai_api_key",
lines=1,
placeholder="your OpenAi API key, default to None",
),
gr.Textbox(
label="openai_organization",
lines=1,
placeholder="your OpenAi Organization, default to None",
),
],
outputs=gr.Textbox(
lines=4, placeholder="## Fact:\n## Myth:\n## Fallacy:\n## Fact:\n"
),
allow_flagging="never",
description="Single, comprehensive prompt which assigns GPT-4 the role of a climate change analyst as an expert persona to debunk misinformation",
)
palm = gr.Interface(
fn=rebuttal_generator,
inputs=gr.Textbox(
label="input myth", lines=4, placeholder="climate change misinformation"
),
outputs=gr.Textbox(
lines=4, placeholder="## Fact:\n## Myth:\n## Fallacy:\n## Fact:\n"
),
allow_flagging="never",
description="Single prompt with dinamic context relevant to the input myth, uses Palm2 LLM to debunk misinformation",
)
mix = gr.Interface(
fn=rebuttal.rebuttal_generator,
inputs=gr.Textbox(
label="input myth", lines=4, placeholder="climate change misinformation"
),
outputs=gr.Textbox(
lines=4, placeholder="## Fact:\n## Myth:\n## Fallacy:\n## Fact:\n"
),
allow_flagging="never",
description="Four separate prompts, one per component of the output debunking, uses Mixtral LLM to debunk misinformation",
)
demo = gr.TabbedInterface(
[gpt4, palm, mix],
[
"Single prompt, no context",
"Single prompt, with context",
"Structured prompt, with context",
],
title="Generative Debunking of Climate Misinformation",
)
if __name__ == "__main__":
demo.queue().launch()