Spaces:
Sleeping
Sleeping
# gradio final ver ---------------------------- | |
import torch | |
from torch import nn | |
import torch.nn.functional as F | |
import torch.optim as optim | |
from torch.utils.data import Dataset, DataLoader | |
import gluonnlp as nlp | |
import numpy as np | |
from tqdm import tqdm, tqdm_notebook | |
import pandas as pd | |
import ast | |
# Hugging Face๋ฅผ ํตํ ๋ชจ๋ธ ๋ฐ ํ ํฌ๋์ด์ Import | |
from kobert_tokenizer import KoBERTTokenizer | |
from transformers import BertModel | |
from transformers import AdamW | |
from transformers.optimization import get_cosine_schedule_with_warmup | |
n_devices = torch.cuda.device_count() | |
print(n_devices) | |
for i in range(n_devices): | |
print(torch.cuda.get_device_name(i)) | |
if torch.cuda.is_available(): | |
device = torch.device("cuda") | |
print('There are %d GPU(s) available.' % torch.cuda.device_count()) | |
print('We will use the GPU:', torch.cuda.get_device_name(0)) | |
else: | |
device = torch.device("cpu") | |
print('No GPU available, using the CPU instead.') | |
# Kobert_softmax | |
class BERTClassifier(nn.Module): | |
def __init__(self, | |
bert, | |
hidden_size=768, | |
num_classes=6, | |
dr_rate=None, | |
params=None): | |
super(BERTClassifier, self).__init__() | |
self.bert = bert | |
self.dr_rate = dr_rate | |
self.softmax = nn.Softmax(dim=1) # Softmax๋ก ๋ณ๊ฒฝ | |
self.classifier = nn.Sequential( | |
nn.Dropout(p=0.5), | |
nn.Linear(in_features=hidden_size, out_features=512), | |
nn.Linear(in_features=512, out_features=num_classes), | |
) | |
# ์ ๊ทํ ๋ ์ด์ด ์ถ๊ฐ (Layer Normalization) | |
self.layer_norm = nn.LayerNorm(768) | |
# ๋๋กญ์์ | |
self.dropout = nn.Dropout(p=dr_rate) | |
def gen_attention_mask(self, token_ids, valid_length): | |
attention_mask = torch.zeros_like(token_ids) | |
for i, v in enumerate(valid_length): | |
attention_mask[i][:v] = 1 | |
return attention_mask.float() | |
def forward(self, token_ids, valid_length, segment_ids): | |
attention_mask = self.gen_attention_mask(token_ids, valid_length) | |
_, pooler = self.bert(input_ids=token_ids, token_type_ids=segment_ids.long(), attention_mask=attention_mask.float().to(token_ids.device)) | |
pooled_output = self.dropout(pooler) | |
normalized_output = self.layer_norm(pooled_output) | |
out = self.classifier(normalized_output) | |
# LayerNorm ์ ์ฉ | |
pooler = self.layer_norm(pooler) | |
if self.dr_rate: | |
pooler = self.dropout(pooler) | |
logits = self.classifier(pooler) # ๋ถ๋ฅ๋ฅผ ์ํ ๋ก์ง ๊ฐ ๊ณ์ฐ | |
probabilities = self.softmax(logits) # Softmax๋ก ๊ฐ ํด๋์ค์ ํ๋ฅ ๊ณ์ฐ | |
return probabilities # ๊ฐ ํด๋์ค์ ๋ํ ํ๋ฅ ๋ฐํ | |
model = torch.load('./model_weights_softmax(model).pth', map_location=torch.device('cpu')) | |
model.eval() | |
# ๋ฉ๋ก ๋ฐ์ดํฐ ๋ถ๋ฌ์ค๊ธฐ | |
melon_data = pd.read_csv('./melon_data.csv') | |
melon_emotions = pd.read_csv('./melon_emotions_final.csv') | |
melon_emotions = pd.merge(melon_emotions, melon_data, left_on='Title', right_on='title', how='inner') | |
melon_emotions = melon_emotions[['singer', 'Title', 'genre','Emotions']] | |
melon_emotions = melon_emotions.drop_duplicates(subset='Title', keep='first') | |
melon_emotions['Emotions'] = melon_emotions['Emotions'].apply(lambda x: ast.literal_eval(x)) | |
emotions = melon_emotions['Emotions'].to_list() | |
#gradio | |
import numpy as np | |
import pandas as pd | |
import requests | |
from PIL import Image | |
import torch | |
from transformers import AutoProcessor, AutoModelForZeroShotImageClassification, pipeline | |
import gradio as gr | |
import openai | |
from sklearn.metrics.pairwise import cosine_similarity | |
import ast | |
###### ๊ธฐ๋ณธ ์ค์ ###### | |
# OpenAI API ํค ์ค์ | |
openai.api_key = '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' | |
# ๋ชจ๋ธ ๋ฐ ํ๋ก์ธ์ ๋ก๋ | |
processor = AutoProcessor.from_pretrained("openai/clip-vit-large-patch14") | |
model_clip = AutoModelForZeroShotImageClassification.from_pretrained("openai/clip-vit-large-patch14") | |
tokenizer = KoBERTTokenizer.from_pretrained('skt/kobert-base-v1') | |
# ์์ธก ๋ ์ด๋ธ | |
labels = ['a photo of a happy face', 'a photo of a joyful face', 'a photo of a loving face', | |
'a photo of an angry face', 'a photo of a melancholic face', 'a photo of a lonely face'] | |
###### ์ผ๊ตด ๊ฐ์ ๋ฒกํฐ ์์ธก ํจ์ ###### | |
def predict_face_emotion(image): | |
# ์ด๋ฏธ์ง๊ฐ None์ด๊ฑฐ๋ ์๋ชป๋ ๊ฒฝ์ฐ | |
if image is None: | |
return np.zeros(len(labels)) # ๋น ๋ฒกํฐ ๋ฐํ | |
# PIL ์ด๋ฏธ์ง๋ฅผ RGB๋ก ๋ณํ | |
image = image.convert("RGB") | |
# CLIP ๋ชจ๋ธ์ processor๋ฅผ ์ด์ฉํ ์ ์ฒ๋ฆฌ | |
inputs = processor(text=labels, images=image, return_tensors="pt", padding=True) | |
# pixel_values๊ฐ 4์ฐจ์์ธ์ง ํ์ธ ํ ๊ฐ์ ๋ณํ | |
pixel_values = inputs["pixel_values"] # (batch_size, channels, height, width) | |
# CLIP ๋ชจ๋ธ ์์ธก: forward์ ์ฌ๋ฐ๋ฅธ ์ ๋ ฅ ์ ๋ฌ | |
with torch.no_grad(): | |
outputs = model_clip(pixel_values=pixel_values, input_ids=inputs["input_ids"]) | |
# ํ๋ฅ ๊ฐ ๊ณ์ฐ | |
probs = outputs.logits_per_image.softmax(dim=1)[0] | |
return probs.numpy() | |
###### ํ ์คํธ ๊ฐ์ ๋ฒกํฐ ์์ธก ํจ์ ###### | |
sentence_emotions = [] | |
def predict_text_emotion(predict_sentence): | |
if not isinstance(predict_sentence, str): | |
predict_sentence = str(predict_sentence) | |
data = [predict_sentence, '0'] | |
dataset_another = [data] | |
another_test = BERTDataset(dataset_another, 0, 1, tokenizer, vocab, max_len, True, False) | |
test_dataloader = torch.utils.data.DataLoader(another_test, batch_size=1, num_workers=5) | |
model.eval() | |
for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(test_dataloader): | |
token_ids = token_ids.long().to(device) | |
segment_ids = segment_ids.long().to(device) | |
out = model(token_ids, valid_length, segment_ids) | |
for i in out: | |
logits = i.detach().cpu().numpy() | |
emotions = [value.item() for value in i] | |
sentence_emotions.append(emotions) | |
return sentence_emotions[0] # ์ต์ข ๋ฆฌ์คํธ ๋ฐํ | |
###### ์ต์ข ๊ฐ์ ๋ฒกํฐ ๊ณ์ฐ ###### | |
def generate_final_emotion_vector(diary_input, image_input): | |
# ํ ์คํธ ๊ฐ์ ๋ฒกํฐ ์์ธก | |
text_vector = predict_text_emotion(diary_input) | |
# ์ผ๊ตด ๊ฐ์ ๋ฒกํฐ ์์ธก | |
image_vector = predict_face_emotion(image_input) | |
text_vector = np.array(text_vector, dtype=float) | |
image_vector = np.array(image_vector, dtype=float) | |
print(text_vector) | |
print(image_vector) | |
# ์ต์ข ๊ฐ์ ๋ฒกํฐ ๊ฐ์ค์น ์ ์ฉ | |
return (text_vector * 0.7) + (image_vector * 0.3) | |
####### ์ฝ์ฌ์ธ ์ ์ฌ๋ ํจ์ ###### | |
def cosine_similarity_fn(vec1, vec2): | |
dot_product = np.dot(vec1, vec2) | |
norm_vec1 = np.linalg.norm(vec1) | |
norm_vec2 = np.linalg.norm(vec2) | |
if norm_vec1 == 0 or norm_vec2 == 0: | |
return np.nan # ์ ๋ก ๋ฒกํฐ์ธ ๊ฒฝ์ฐ NaN ๋ฐํ | |
return dot_product / (norm_vec1 * norm_vec2) | |
####### ์ด๋ฏธ์ง ๋ค์ด๋ก๋ ํจ์ (PIL ๊ฐ์ฒด ๋ฐํ) ###### | |
def download_image(image_url): | |
try: | |
response = requests.get(image_url) | |
response.raise_for_status() | |
return Image.open(requests.get(image_url, stream=True).raw) | |
except Exception as e: | |
print(f"์ด๋ฏธ์ง ๋ค์ด๋ก๋ ์ค๋ฅ: {e}") | |
return None | |
# ์คํ์ผ ์ต์ | |
options = { | |
1: "๐ผ ์น๊ทผํ", | |
2: "๐ฅ ํธ๋ ๋ํ MZ์ธ๋", | |
3: "๐ ์ ๋จธ๋ฌ์คํ ์ฅ๋๊พธ๋ฌ๊ธฐ", | |
4: "๐ง ์ฐจ๋ถํ ๋ช ์๊ฐ", | |
5: "๐จ ์ฐฝ์์ ์ธ ์์ ๊ฐ", | |
} | |
# ์ผ๊ธฐ ๋ถ์ ํจ์ | |
def chatbot_diary_with_image(style_option, diary_input, image_input, playlist_input): | |
style = options.get(int(style_option.split('.')[0]), "๐ผ ์น๊ทผํ") | |
# GPT ์๋ต (์ผ๊ธฐ ์ฝ๋ฉํธ) | |
try: | |
response_comment = openai.ChatCompletion.create( | |
model="gpt-4-turbo", | |
messages=[{"role": "system", "content": f"๋๋ {style} ์ฑ๋ด์ด์ผ."}, {"role": "user", "content": diary_input}], | |
) | |
comment = response_comment.choices[0].message.content | |
except Exception as e: | |
comment = f"๐ฌ ์ค๋ฅ: {e}" | |
# GPT ๊ธฐ๋ฐ ์ผ๊ธฐ ์ฃผ์ ์ถ์ฒ | |
try: | |
topics = get_initial_response(style_option, diary_input) | |
except Exception as e: | |
topics = f"๐ ์ฃผ์ ์ถ์ฒ ์ค๋ฅ: {e}" | |
# DALLยทE 3 ์ด๋ฏธ์ง ์์ฑ ์์ฒญ (3D ์คํ์ผ ์บ๋ฆญํฐ) | |
try: | |
response = openai.Image.create( | |
model="dall-e-3", | |
prompt=( | |
f"{diary_input}๋ฅผ ๋ฐ์ํด์ ๊ฐ์ ์ ํํํ๋ 3D ์คํ์ผ์ ์ผ๋ฌ์คํธ ์บ๋ฆญํฐ๋ฅผ ๊ทธ๋ ค์ค. " | |
"์บ๋ฆญํฐ๋ ๋ถ๋๋ฝ๊ณ ๋ฅ๊ทผ ๋์์ธ์ ํ์ ์ด ๊ฐ์ ์ ์ ๋๋ฌ๋ด์ผ ํด. " | |
"๊ฐ์ ์ ์๊ฐ์ ์ผ๋ก ํํํ ์ ์๋ ์ํ์ด๋ ์์ ์์ง์ ํฌํจํด์ค. " | |
"๊ฐ์ ์ ๋ถ์๊ธฐ๋ฅผ ๋ฐ์ํ๋ ์ ๋ช ํ๊ณ ๊นจ๋ํ ์์์ ์ฌ์ฉํ๊ณ , ์บ๋ฆญํฐ๊ฐ ์ญ๋์ ์ด๊ณ ์ฌ๋ฏธ์๋ ์์ธ๋ฅผ ์ทจํ ์ ์๋๋ก ํด์ค. " | |
"์ด๋ฏธ์ง์๋ ํ๋์ ์บ๋ฆญํฐ๋ง ๋์ค๊ฒ ํด์ค." | |
"๋ฐฐ๊ฒฝ์ ๋จ์ํ๊ณ ๋ฐ์ ์์์ผ๋ก ์ค์ ํด์ ์บ๋ฆญํฐ๊ฐ ๊ฐ์กฐ๋ ์ ์๋๋ก ํด์ค." | |
), | |
size="1024x1024", | |
n=1 | |
) | |
# URL ๊ฐ์ ธ์ค๊ธฐ ๋ฐ ๋ค์ด๋ก๋ | |
image_url = response['data'][0]['url'] | |
print(f"Generated Image URL: {image_url}") # URL ํ์ธ | |
image = download_image(image_url) | |
except Exception as e: | |
print(f"์ด๋ฏธ์ง ์์ฑ ์ค๋ฅ: {e}") # ์ค๋ฅ ์์ธ ์ถ๋ ฅ | |
image = None | |
# ์ฌ์ฉ์ ์ต์ข ๊ฐ์ ๋ฒกํฐ | |
final_user_emotions = generate_final_emotion_vector(diary_input,image_input) | |
# ๊ฐ ๋ ธ๋์ ๋ํ ์ฝ์ฌ์ธ ์ ์ฌ๋ ๊ณ์ฐ | |
similarities = [cosine_similarity_fn(final_user_emotions, song_vec) for song_vec in emotions] | |
#์ ํจํ ์ ์ฌ๋ ํํฐ๋ง | |
valid_indices = [i for i, sim in enumerate(similarities) if not np.isnan(sim)] | |
filtered_similarities = [similarities[i] for i in valid_indices] | |
recommendations = np.argsort(filtered_similarities)[::-1] # ๋์ ์ ์ฌ๋ ์์ผ๋ก ์ ๋ ฌ | |
results_df = pd.DataFrame({ | |
'Singer' : melon_emotions['singer'].iloc[recommendations].values, | |
'title' : melon_emotions['Title'].iloc[recommendations].values, | |
'genre' : melon_emotions['genre'].iloc[recommendations].values, | |
'Cosine Similarity': [similarities[idx] for idx in recommendations] | |
}) | |
# ๊ฐ์ค์น ๊ฐ ์ค์ | |
gamma = 0.3 | |
similar_playlists = results_df.head(5) | |
similar_playlists = pd.merge(similar_playlists, melon_emotions, left_on="title", right_on="Title", how="inner") | |
similar_playlists = similar_playlists[["title", "Emotions", "singer"]] | |
dissimilar_playlists = results_df.tail(5) | |
dissimilar_playlists = pd.merge(dissimilar_playlists, melon_emotions, left_on="title", right_on="Title", how="inner") | |
dissimilar_playlists = dissimilar_playlists[["title", "Emotions", "singer"]] | |
#๊ฐ์ ๊ณผ ์ ์ฌํ ํ๋ ์ด๋ฆฌ์คํธ | |
if playlist_input == '๋น์ทํ': | |
results = [] | |
seen_songs = set(similar_playlists["title"].values) # ์ด๊ธฐ seen_songs์ similar_playlists์ ๊ณก๋ค์ ์ถ๊ฐ | |
# ์ฌ์ฉ์ ๊ฐ์ ๋ฒกํฐ | |
user_emotion_vector = generate_final_emotion_vector(diary_input, image_input).reshape(1, -1) | |
for index, row in similar_playlists.iterrows(): | |
song_title = row["title"] | |
song_singer = row["singer"] | |
song_vector = np.array(row["Emotions"]).reshape(1, -1) | |
song_results = [] | |
for i, emotion_vec in enumerate(emotions): | |
emotion_title = melon_emotions.iloc[i]["Title"] | |
emotion_singer = melon_emotions.iloc[i]["singer"] | |
emotion_vec = np.array(emotion_vec).reshape(1, -1) | |
# similar_playlists์ ์๋ ๊ณก๊ณผ seen_songs์ ์๋ ๊ณก์ ์ ์ธ | |
if ( | |
emotion_title != song_title and | |
emotion_title not in seen_songs | |
): | |
try: | |
# ๊ณก ๊ฐ ์ ์ฌ๋(Song-Song Similarity) | |
song_song_similarity = cosine_similarity(song_vector, emotion_vec)[0][0] | |
# ์ฌ์ฉ์ ๊ฐ์ ๋ฒกํฐ์์ ์ ์ฌ๋(User-Song Similarity) | |
user_song_similarity = cosine_similarity(user_emotion_vector, emotion_vec)[0][0] | |
# Final Score ๊ณ์ฐ | |
final_score = gamma * song_song_similarity + (1 - gamma) * user_song_similarity | |
song_results.append({ | |
"Title": emotion_title, | |
"Singer": emotion_singer, | |
"Song-Song Similarity": song_song_similarity, | |
"User-Song Similarity": user_song_similarity, | |
"Final Score": final_score | |
}) | |
except ValueError as e: | |
print(f"Error with {song_title} vs {emotion_title}: {e}") | |
continue | |
# Final Score๋ฅผ ๊ธฐ์ค์ผ๋ก ์์ 3๊ณก ์ ํ | |
song_results = sorted(song_results, key=lambda x: x["Final Score"], reverse=True)[:3] | |
seen_songs.update([entry["Title"] for entry in song_results]) | |
results.append({"Song Title": song_title, "Singer": song_singer, "Top 3 Similarities": song_results}) | |
# ๊ฒฐ๊ณผ ์ถ๋ ฅ | |
for result in results: | |
print(f"{result['Singer']} - {result['Song Title']}") | |
for entry in result["Top 3 Similarities"]: | |
print(f"{entry['Singer']} - {entry['Title']} : Final Score {entry['Final Score']:.4f}") | |
print(f" (Song-Song Similarity: {entry['Song-Song Similarity']:.4f}, User-Song Similarity: {entry['User-Song Similarity']:.4f})") | |
print("-" * 30) | |
#๋ฐ๋ ํ๋ ์ด๋ฆฌ์คํธ | |
if playlist_input == '์๋ฐ๋': | |
results = [] | |
seen_songs = set() | |
# ์ฌ์ฉ์ ๊ฐ์ ๋ฒกํฐ | |
user_emotion_vector = generate_final_emotion_vector(diary_input, image_input).reshape(1, -1) | |
for index, row in dissimilar_playlists.iterrows(): | |
song_title = row["title"] | |
song_singer = row["singer"] | |
song_vector = np.array(row["Emotions"]).reshape(1, -1) | |
song_results = [] | |
for i, emotion_vec in enumerate(emotions): | |
emotion_title = melon_emotions.iloc[i]["Title"] | |
emotion_singer = melon_emotions.iloc[i]["singer"] | |
emotion_vec = np.array(emotion_vec).reshape(1, -1) | |
if ( | |
emotion_title != song_title and | |
emotion_title not in dissimilar_playlists["title"].values and | |
emotion_title not in seen_songs | |
): | |
try: | |
# ๊ณก ๊ฐ ์ ์ฌ๋(Song-Song Similarity) | |
song_song_similarity = cosine_similarity(song_vector, emotion_vec)[0][0] | |
# ์ฌ์ฉ์ ๊ฐ์ ๋ฒกํฐ์์ ๋ฐ๋ ์ ์ฌ๋(User-Song Dissimilarity) | |
opposite_user_song_similarity = 1 - cosine_similarity(user_emotion_vector, emotion_vec)[0][0] | |
# Final Score ๊ณ์ฐ | |
final_score = gamma * song_song_similarity + (1 - gamma) * opposite_user_song_similarity | |
song_results.append({ | |
"Title": emotion_title, | |
"Singer": emotion_singer, | |
"Song-Song Similarity": song_song_similarity, | |
"User-Song Dissimilarity": opposite_user_song_similarity, | |
"Final Score": final_score | |
}) | |
except ValueError as e: | |
print(f"Error with {song_title} vs {emotion_title}: {e}") | |
continue | |
# Final Score๋ฅผ ๊ธฐ์ค์ผ๋ก ์์ 3๊ณก ์ ํ (๊ฐ์ด ํฐ ๊ณก์ด ๋ฐ๋๋๋ ๊ณก) | |
song_results = sorted(song_results, key=lambda x: x["Final Score"], reverse=True)[:3] | |
seen_songs.update(entry["Title"] for entry in song_results) | |
results.append({"Song Title": song_title, "Singer": song_singer, "Top 3 Similarities": song_results}) | |
# ๊ฒฐ๊ณผ ์ถ๋ ฅ | |
for result in results: | |
print(f"{result['Singer']} - {result['Song Title']}") | |
for entry in result["Top 3 Similarities"]: | |
print(f"{entry['Singer']} - {entry['Title']} : Final Score {entry['Final Score']:.4f}") | |
print(f' (Song-Song Similarity: {entry["Song-Song Similarity"]:.4f}, User-Song Dissimilarity: {entry["User-Song Dissimilarity"]:.4f})') | |
print("-" * 30) | |
# ๋ฐ์ดํฐํ๋ ์ ๋ณํ์ ์ํ ๋ฆฌ์คํธ ์์ฑ | |
df_rows = [] | |
for result in results: | |
song_title = result['Song Title'] | |
song_singer = result['Singer'] | |
main_song_info = f"{song_singer} - {song_title}" | |
for entry in result["Top 3 Similarities"]: | |
combined_info = f"{entry['Singer']} - {entry['Title']}" | |
df_rows.append({"1st ์ถ์ฒ ํ๋ ์ด๋ฆฌ์คํธ": main_song_info, "2nd ์ถ์ฒ ํ๋ ์ด๋ฆฌ์คํธ": combined_info}) | |
# ๋ฐ์ดํฐํ๋ ์ ์์ฑ | |
final_music_playlist_recommendation = pd.DataFrame(df_rows) | |
# ๊ณก ์ ๋ชฉ ๊ทธ๋ฃนํํ์ฌ ์ฒซ ๋ฒ์งธ ํ์๋ง ๊ณก ์ ๋ชฉ ํ์ | |
final_music_playlist_recommendation["1st ์ถ์ฒ ํ๋ ์ด๋ฆฌ์คํธ"] = final_music_playlist_recommendation.groupby("1st ์ถ์ฒ ํ๋ ์ด๋ฆฌ์คํธ")["1st ์ถ์ฒ ํ๋ ์ด๋ฆฌ์คํธ"].transform( | |
lambda x: [x.iloc[0]] + [""] * (len(x) - 1) | |
) | |
return final_music_playlist_recommendation, comment, topics, image | |
# ์ผ๊ธฐ ์ฃผ์ ์ถ์ฒ ํจ์ | |
def get_initial_response(style, sentence): | |
style = options.get(int(style.split('.')[0]), "๐ผ ์น๊ทผํ") | |
system_prompt_momentum = ( | |
f"๋๋ {style}์ ์ฑ๋ด์ด์ผ. ์ฌ์ฉ์๊ฐ ์์ฑํ ์ผ๊ธฐ๋ฅผ ๋ฐํ์ผ๋ก ์๊ฐ์ ์ ๋ฆฌํ๊ณ ๋ด๋ฉด์ ๋์๋ณผ ์ ์๋๋ก " | |
"๋์์ฃผ๋ ๊ตฌ์ฒด์ ์ธ ์ผ๊ธฐ ์ฝํ ์ธ ๋ ์ง๋ฌธ 4-5๊ฐ๋ฅผ ์ถ์ฒํด์ค." | |
) | |
try: | |
response = openai.ChatCompletion.create( | |
model="gpt-4-turbo", | |
messages=[ | |
{"role": "system", "content": system_prompt_momentum}, | |
{"role": "user", "content": sentence} | |
], | |
temperature=1 | |
) | |
return response.choices[0].message.content | |
except Exception as e: | |
return f"๐ ์ฃผ์ ์ถ์ฒ ์ค๋ฅ: {e}" | |
# Gradio ์ธํฐํ์ด์ค | |
with gr.Blocks() as app: | |
gr.Markdown("# โจ ์ค๋งํธ ๊ฐ์ ์ผ๊ธฐ ์๋น์ค โจ\n\n ์ค๋์ ํ๋ฃจ๋ฅผ ๊ธฐ๋กํ๋ฉด, ๊ทธ์ ๋ง๋ ํ๋ ์ด๋ฆฌ์คํธ์ ์ผ๊ธฐ ํ๊ณ ์ฝํ ์ธ ๋ฅผ ์๋์ผ๋ก ์์ฑํด๋๋ฆฝ๋๋ค!") | |
with gr.Row(): | |
with gr.Column(): | |
chatbot_style = gr.Radio( | |
choices=[f"{k}. {v}" for k, v in options.items()], | |
label="๐ค ์ํ๋ ์ฑ๋ด ์คํ์ผ ์ ํ" | |
) | |
diary_input = gr.Textbox(label="๐ ์ค๋์ ํ๋ฃจ ๊ธฐ๋กํ๊ธฐ", placeholder="ex)์ค๋ ์ํ๊ฐ์ ๋ง์๋ ๊ฑธ ๋ง์ด ๋จน์ด์ ์์ฒญ ์ ๋ฌ์ด") | |
image_input = gr.Image(type="pil", label="๐ท ์ผ๊ตด ํ์ ์ฌ์ง ์ ๋ก๋") | |
playlist_input = gr.Radio(["๋น์ทํ", "์๋ฐ๋"], label="๐ง ์ค๋์ ๊ฐ์ ๊ณผ ใ ใ ๋๋ ํ๋ ์ด๋ฆฌ์คํธ ์ถ์ฒ ๋ฐ๊ธฐ") | |
submit_btn = gr.Button("๐ ๋ถ์ ์์") | |
with gr.Column(): | |
output_playlist = gr.Dataframe(label="๐ง ์ถ์ฒ ํ๋ ์ด๋ฆฌ์คํธ ") | |
output_comment = gr.Textbox(label="๐ฌ AI ์ฝ๋ฉํธ") | |
output_topics = gr.Textbox(label="๐ ์ถ์ฒ ์ผ๊ธฐ ์ฝํ ์ธ ") | |
output_image = gr.Image(label="๐ผ๏ธ ์์ฑ๋ ์ค๋์ ๊ฐ์ ์บ๋ฆญํฐ", type="pil", width=512, height=512) | |
# ๋ฒํผ ํด๋ฆญ ์ด๋ฒคํธ ์ฐ๊ฒฐ | |
submit_btn.click( | |
fn=chatbot_diary_with_image, | |
inputs=[chatbot_style, diary_input, image_input, playlist_input], | |
outputs=[output_playlist, output_comment, output_topics, output_image] | |
) | |
# ์ฑ ์คํ | |
app.launch(debug=True) |