zeeshanali01 commited on
Commit
1b92adf
·
verified ·
1 Parent(s): 512a326

Upload 7 files

Browse files
Files changed (8) hide show
  1. .env +1 -0
  2. .gitattributes +1 -0
  3. Dockerfile +20 -0
  4. IMDB_Dataset.csv +3 -0
  5. main.py +73 -0
  6. model.py +36 -0
  7. movie_review_classifier.joblib +3 -0
  8. requirements.txt +8 -0
.env ADDED
@@ -0,0 +1 @@
 
 
1
+ GOOGLE_API_KEY=AIzaSyAe9c-4S0loskd55jVnJoXK8s2HfRUOj4Y
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ IMDB_Dataset.csv filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official Python runtime as a parent image
2
+ FROM python:3.8-slim
3
+
4
+ # Set the working directory in the container
5
+ WORKDIR /app
6
+
7
+ # Copy the current directory contents into the container at /app
8
+ COPY . /app
9
+
10
+ # Install any needed packages specified in requirements.txt
11
+ RUN pip install --no-cache-dir -r requirements.txt
12
+
13
+ # Make port 80 available to the world outside this container
14
+ EXPOSE 80
15
+
16
+ # Define environment variable
17
+ ENV NAME World
18
+
19
+ # Run main.py when the container launches
20
+ CMD ["python", "main.py"]
IMDB_Dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfc447764f82be365fa9c2beef4e8df89d3919e3da95f5088004797d79695aa2
3
+ size 66212309
main.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from pydantic import BaseModel
3
+ from dotenv import load_dotenv
4
+ import os
5
+ import google.generativeai as genai
6
+ import joblib
7
+
8
+ # Load environment variables
9
+ load_dotenv()
10
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
11
+
12
+ # Load the machine learning model
13
+ try:
14
+ model = joblib.load('./movie_review_classifier.joblib')
15
+ except Exception as e:
16
+ raise ImportError(f"Failed to load model: {e}")
17
+
18
+ app = FastAPI()
19
+
20
+ # Define models for requests
21
+ class QueryRequest(BaseModel):
22
+ question: str
23
+
24
+ class Review(BaseModel):
25
+ text: str
26
+
27
+ # Initialize the Gemini chat model
28
+ gemini_model = genai.GenerativeModel("gemini-pro")
29
+ chat = gemini_model.start_chat(history=[])
30
+
31
+ mental_health_prompt = """
32
+ You are an expert in providing mental health support. When a user describes their mental health issues,
33
+ you should provide relevant articles or blog posts to assist them.
34
+ """
35
+
36
+ # Gemini response function
37
+ def get_gemini_response(question, prompt):
38
+ response = chat.send_message(f"{prompt} {question}", stream=True)
39
+ return [chunk.text for chunk in response]
40
+
41
+ # Function to retrieve articles from a database or external source
42
+ def get_articles(query):
43
+ return [
44
+ {"title": "Understanding Anxiety", "url": "https://newsinhealth.nih.gov/2016/03/understanding-anxiety-disorders", "summary": "A comprehensive guide on anxiety disorders."},
45
+ {"title": "Coping with Depression", "url": "https://www.helpguide.org/articles/depression/coping-with-depression.htm", "summary": "Effective strategies for dealing with depression."}
46
+ ]
47
+
48
+ # Mental health support endpoint
49
+ @app.post("/rag")
50
+ async def mental_health_support(request: QueryRequest):
51
+ try:
52
+ responses = get_gemini_response(request.question, mental_health_prompt)
53
+ articles = get_articles(request.question)
54
+ result = {"responses": responses, "articles": articles}
55
+ return result
56
+ except Exception as e:
57
+ raise HTTPException(status_code=500, detail=str(e))
58
+
59
+ # Classification endpoint
60
+ @app.post("/classification")
61
+ async def classify_review(review: Review):
62
+ try:
63
+ prediction = model.predict([review.text])
64
+ return {"predicted_sentiment": prediction[0]}
65
+ except Exception as e:
66
+ raise HTTPException(status_code=500, detail=str(e))
67
+
68
+ # Main function to run the server
69
+ if __name__ == "__main__":
70
+ import uvicorn
71
+ uvicorn.run(app, host="0.0.0.0", port=8000)
72
+
73
+
model.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from sklearn.model_selection import train_test_split
3
+ from sklearn.feature_extraction.text import CountVectorizer
4
+ from sklearn.naive_bayes import MultinomialNB
5
+ from sklearn.pipeline import make_pipeline
6
+ import joblib
7
+
8
+ def load_data():
9
+ # Load dataset
10
+ data = pd.read_csv('./IMDB_Dataset.csv')
11
+ data['review'] = data['review'].apply(lambda x: x.lower()) # convert to lowercase
12
+ return data
13
+
14
+ def train_model(data):
15
+ # Split data into train and test sets
16
+ train_data, _, train_labels, _ = train_test_split(data['review'], data['sentiment'], test_size=0.2, random_state=42)
17
+
18
+ # Create a text processing and classification pipeline
19
+ model = make_pipeline(CountVectorizer(), MultinomialNB())
20
+
21
+ # Train the model
22
+ model.fit(train_data, train_labels)
23
+ return model
24
+
25
+ def save_model(model):
26
+ # Save the model
27
+ joblib.dump(model, './movie_review_classifier.joblib')
28
+
29
+ def main():
30
+ data = load_data()
31
+ model = train_model(data)
32
+ save_model(model)
33
+ print("Model trained and saved successfully.")
34
+
35
+ if __name__ == "__main__":
36
+ main()
movie_review_classifier.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:adc6b14f6e523184f23ad8650bf4bb6461c82d8c8c528f582f086754928d18b4
3
+ size 4291770
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ numpy
2
+ scikit-learn
3
+ fastapi
4
+ joblib
5
+ python-dotenv
6
+ uvicorn
7
+ pandas
8
+ google-generativeai