Amelia-James's picture
Create app.py
4cabe54 verified
raw
history blame
4.11 kB
import os
from dotenv import load_dotenv
from groq import Groq
import streamlit as st
# Load environment variables
load_dotenv()
# Initialize the Groq client with API key
client = Groq(api_key=os.getenv("GROQ_API_KEY"))
# Function to generate MCQs from the user text
def generate_mcqs_from_text(user_text):
prompt = f"""
You are a 35-year experienced educator specializing in crafting challenging and insightful MCQs.
Based on the following text, generate between 30 to 50 multiple-choice questions (MCQs).
Each question should:
1. Test critical thinking and understanding of the content.
2. Include four options (A, B, C, D).
3. Format the output as follows:
Question: [Your question here]
A. [Option 1]
B. [Option 2]
C. [Option 3]
D. [Option 4]
Additionally, provide the correct answers in a separate section titled "Correct Answers" in the format:
Question 1: [Correct Option]
Question 2: [Correct Option], etc.
Text: {user_text}
"""
chat_completion = client.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
model="gemma2-9b-it", # Replace with a valid Groq model
)
return chat_completion.choices[0].message.content
# Function to evaluate user's answers
def evaluate_answers(mcqs, correct_answers, user_answers):
prompt = f"""
You are a 35-year experienced teacher evaluating a student's answers to the following MCQs.
For each question, provide detailed feedback on whether the student's answer is correct or incorrect,
and explain the reasoning.
MCQs: {mcqs}
Correct Answers: {correct_answers}
User Answers: {user_answers}
"""
chat_completion = client.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
model="gemma2-9b-it", # Replace with a valid Groq model
)
return chat_completion.choices[0].message.content
# Streamlit app
st.title("Experienced Teacher's MCQ Generator and Evaluator")
st.sidebar.title("MCQ Generator & Evaluator")
# User input for text data
user_text = st.sidebar.text_area("Enter the text for generating MCQs:")
if "mcqs" not in st.session_state:
st.session_state["mcqs"] = None
if "correct_answers" not in st.session_state:
st.session_state["correct_answers"] = None
# Generate MCQs
if st.sidebar.button("Generate MCQs"):
if user_text.strip():
with st.spinner("Generating MCQs..."):
mcqs_and_answers = generate_mcqs_from_text(user_text)
# Debugging the response
st.write("Raw Response from API:")
st.text(mcqs_and_answers)
# Try to split into MCQs and answers
try:
mcqs, correct_answers = mcqs_and_answers.split("Correct Answers:")
st.session_state["mcqs"] = mcqs.strip()
st.session_state["correct_answers"] = correct_answers.strip()
st.success("MCQs generated successfully!")
except ValueError:
st.error(
"The API response is not in the expected format. Please check the output or adjust the prompt."
)
st.text(mcqs_and_answers) # Show full response for debugging
else:
st.error("Please enter text for generating MCQs.")
# Display generated MCQs
if st.session_state["mcqs"]:
st.subheader("Generated MCQs")
st.text(st.session_state["mcqs"])
# User input for their answers
user_answers = st.text_area(
"Enter your answers for the MCQs (e.g., A, B, C, D for each question):"
)
# Evaluate answers
if st.button("Evaluate Answers"):
if st.session_state["mcqs"] and st.session_state["correct_answers"]:
with st.spinner("Evaluating answers..."):
evaluation_result = evaluate_answers(
st.session_state["mcqs"], st.session_state["correct_answers"], user_answers
)
st.subheader("Evaluation Result")
st.text(evaluation_result)
else:
st.error("Please generate MCQs first!")