dazzleun-7's picture
Update app.py
068d20d verified
raw
history blame
16.6 kB
# gradio final ver ----------------------------
###### ๊ธฐ๋ณธ ์„ค์ • ######
# OpenAI API ํ‚ค ์„ค์ •
openai.api_key = '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'
# ๋ชจ๋ธ ๋ฐ ํ”„๋กœ์„ธ์„œ ๋กœ๋“œ
processor = AutoProcessor.from_pretrained("openai/clip-vit-large-patch14")
model_clip = AutoModelForZeroShotImageClassification.from_pretrained("openai/clip-vit-large-patch14")
tokenizer = KoBERTTokenizer.from_pretrained('skt/kobert-base-v1')
# ์˜ˆ์ธก ๋ ˆ์ด๋ธ”
labels = ['a photo of a happy face', 'a photo of a joyful face', 'a photo of a loving face',
'a photo of an angry face', 'a photo of a melancholic face', 'a photo of a lonely face']
###### ์–ผ๊ตด ๊ฐ์ • ๋ฒกํ„ฐ ์˜ˆ์ธก ํ•จ์ˆ˜ ######
def predict_face_emotion(image):
# ์ด๋ฏธ์ง€๊ฐ€ None์ด๊ฑฐ๋‚˜ ์ž˜๋ชป๋œ ๊ฒฝ์šฐ
if image is None:
return np.zeros(len(labels)) # ๋นˆ ๋ฒกํ„ฐ ๋ฐ˜ํ™˜
# PIL ์ด๋ฏธ์ง€๋ฅผ RGB๋กœ ๋ณ€ํ™˜
image = image.convert("RGB")
# CLIP ๋ชจ๋ธ์˜ processor๋ฅผ ์ด์šฉํ•œ ์ „์ฒ˜๋ฆฌ
inputs = processor(text=labels, images=image, return_tensors="pt", padding=True)
# pixel_values๊ฐ€ 4์ฐจ์›์ธ์ง€ ํ™•์ธ ํ›„ ๊ฐ•์ œ ๋ณ€ํ™˜
pixel_values = inputs["pixel_values"] # (batch_size, channels, height, width)
# CLIP ๋ชจ๋ธ ์˜ˆ์ธก: forward์— ์˜ฌ๋ฐ”๋ฅธ ์ž…๋ ฅ ์ „๋‹ฌ
with torch.no_grad():
outputs = model_clip(pixel_values=pixel_values, input_ids=inputs["input_ids"])
# ํ™•๋ฅ ๊ฐ’ ๊ณ„์‚ฐ
probs = outputs.logits_per_image.softmax(dim=1)[0]
return probs.numpy()
###### ํ…์ŠคํŠธ ๊ฐ์ • ๋ฒกํ„ฐ ์˜ˆ์ธก ํ•จ์ˆ˜ ######
sentence_emotions = []
def predict_text_emotion(predict_sentence):
if not isinstance(predict_sentence, str):
predict_sentence = str(predict_sentence)
data = [predict_sentence, '0']
dataset_another = [data]
another_test = BERTDataset(dataset_another, 0, 1, tokenizer, vocab, max_len, True, False)
test_dataloader = torch.utils.data.DataLoader(another_test, batch_size=1, num_workers=5)
model.eval()
for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(test_dataloader):
token_ids = token_ids.long().to(device)
segment_ids = segment_ids.long().to(device)
out = model(token_ids, valid_length, segment_ids)
for i in out:
logits = i.detach().cpu().numpy()
emotions = [value.item() for value in i]
sentence_emotions.append(emotions)
return sentence_emotions[0] # ์ตœ์ข… ๋ฆฌ์ŠคํŠธ ๋ฐ˜ํ™˜
###### ์ตœ์ข… ๊ฐ์ • ๋ฒกํ„ฐ ๊ณ„์‚ฐ ######
def generate_final_emotion_vector(diary_input, image_input):
# ํ…์ŠคํŠธ ๊ฐ์ • ๋ฒกํ„ฐ ์˜ˆ์ธก
text_vector = predict_text_emotion(diary_input)
# ์–ผ๊ตด ๊ฐ์ • ๋ฒกํ„ฐ ์˜ˆ์ธก
image_vector = predict_face_emotion(image_input)
text_vector = np.array(text_vector, dtype=float)
image_vector = np.array(image_vector, dtype=float)
print(text_vector)
print(image_vector)
# ์ตœ์ข… ๊ฐ์ • ๋ฒกํ„ฐ ๊ฐ€์ค‘์น˜ ์ ์šฉ
return (text_vector * 0.7) + (image_vector * 0.3)
####### ์ฝ”์‚ฌ์ธ ์œ ์‚ฌ๋„ ํ•จ์ˆ˜ ######
def cosine_similarity_fn(vec1, vec2):
dot_product = np.dot(vec1, vec2)
norm_vec1 = np.linalg.norm(vec1)
norm_vec2 = np.linalg.norm(vec2)
if norm_vec1 == 0 or norm_vec2 == 0:
return np.nan # ์ œ๋กœ ๋ฒกํ„ฐ์ธ ๊ฒฝ์šฐ NaN ๋ฐ˜ํ™˜
return dot_product / (norm_vec1 * norm_vec2)
####### ์ด๋ฏธ์ง€ ๋‹ค์šด๋กœ๋“œ ํ•จ์ˆ˜ (PIL ๊ฐ์ฒด ๋ฐ˜ํ™˜) ######
def download_image(image_url):
try:
response = requests.get(image_url)
response.raise_for_status()
return Image.open(requests.get(image_url, stream=True).raw)
except Exception as e:
print(f"์ด๋ฏธ์ง€ ๋‹ค์šด๋กœ๋“œ ์˜ค๋ฅ˜: {e}")
return None
# ์Šคํƒ€์ผ ์˜ต์…˜
options = {
1: "๐ŸŒผ ์นœ๊ทผํ•œ",
2: "๐Ÿ”ฅ ํŠธ๋ Œ๋””ํ•œ MZ์„ธ๋Œ€",
3: "๐Ÿ˜„ ์œ ๋จธ๋Ÿฌ์Šคํ•œ ์žฅ๋‚œ๊พธ๋Ÿฌ๊ธฐ",
4: "๐Ÿง˜ ์ฐจ๋ถ„ํ•œ ๋ช…์ƒ๊ฐ€",
5: "๐ŸŽจ ์ฐฝ์˜์ ์ธ ์˜ˆ์ˆ ๊ฐ€",
}
# ์ผ๊ธฐ ๋ถ„์„ ํ•จ์ˆ˜
def chatbot_diary_with_image(style_option, diary_input, image_input, playlist_input):
style = options.get(int(style_option.split('.')[0]), "๐ŸŒผ ์นœ๊ทผํ•œ")
# GPT ์‘๋‹ต (์ผ๊ธฐ ์ฝ”๋ฉ˜ํŠธ)
try:
response_comment = openai.ChatCompletion.create(
model="gpt-4-turbo",
messages=[{"role": "system", "content": f"๋„ˆ๋Š” {style} ์ฑ—๋ด‡์ด์•ผ."}, {"role": "user", "content": diary_input}],
)
comment = response_comment.choices[0].message.content
except Exception as e:
comment = f"๐Ÿ’ฌ ์˜ค๋ฅ˜: {e}"
# GPT ๊ธฐ๋ฐ˜ ์ผ๊ธฐ ์ฃผ์ œ ์ถ”์ฒœ
try:
topics = get_initial_response(style_option, diary_input)
except Exception as e:
topics = f"๐Ÿ“ ์ฃผ์ œ ์ถ”์ฒœ ์˜ค๋ฅ˜: {e}"
# DALLยทE 3 ์ด๋ฏธ์ง€ ์ƒ์„ฑ ์š”์ฒญ (3D ์Šคํƒ€์ผ ์บ๋ฆญํ„ฐ)
try:
response = openai.Image.create(
model="dall-e-3",
prompt=(
f"{diary_input}๋ฅผ ๋ฐ˜์˜ํ•ด์„œ ๊ฐ์ •์„ ํ‘œํ˜„ํ•˜๋Š” 3D ์Šคํƒ€์ผ์˜ ์ผ๋Ÿฌ์ŠคํŠธ ์บ๋ฆญํ„ฐ๋ฅผ ๊ทธ๋ ค์ค˜. "
"์บ๋ฆญํ„ฐ๋Š” ๋ถ€๋“œ๋Ÿฝ๊ณ  ๋‘ฅ๊ทผ ๋””์ž์ธ์— ํ‘œ์ •์ด ๊ฐ์ •์„ ์ž˜ ๋“œ๋Ÿฌ๋‚ด์•ผ ํ•ด. "
"๊ฐ์ •์„ ์‹œ๊ฐ์ ์œผ๋กœ ํ‘œํ˜„ํ•  ์ˆ˜ ์žˆ๋Š” ์†Œํ’ˆ์ด๋‚˜ ์ž‘์€ ์ƒ์ง•์„ ํฌํ•จํ•ด์ค˜. "
"๊ฐ์ •์˜ ๋ถ„์œ„๊ธฐ๋ฅผ ๋ฐ˜์˜ํ•˜๋Š” ์„ ๋ช…ํ•˜๊ณ  ๊นจ๋—ํ•œ ์ƒ‰์ƒ์„ ์‚ฌ์šฉํ•˜๊ณ , ์บ๋ฆญํ„ฐ๊ฐ€ ์—ญ๋™์ ์ด๊ณ  ์žฌ๋ฏธ์žˆ๋Š” ์ž์„ธ๋ฅผ ์ทจํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•ด์ค˜. "
"์ด๋ฏธ์ง€์—๋Š” ํ•˜๋‚˜์˜ ์บ๋ฆญํ„ฐ๋งŒ ๋‚˜์˜ค๊ฒŒ ํ•ด์ค˜."
"๋ฐฐ๊ฒฝ์€ ๋‹จ์ˆœํ•˜๊ณ  ๋ฐ์€ ์ƒ‰์ƒ์œผ๋กœ ์„ค์ •ํ•ด์„œ ์บ๋ฆญํ„ฐ๊ฐ€ ๊ฐ•์กฐ๋  ์ˆ˜ ์žˆ๋„๋ก ํ•ด์ค˜."
),
size="1024x1024",
n=1
)
# URL ๊ฐ€์ ธ์˜ค๊ธฐ ๋ฐ ๋‹ค์šด๋กœ๋“œ
image_url = response['data'][0]['url']
print(f"Generated Image URL: {image_url}") # URL ํ™•์ธ
image = download_image(image_url)
except Exception as e:
print(f"์ด๋ฏธ์ง€ ์ƒ์„ฑ ์˜ค๋ฅ˜: {e}") # ์˜ค๋ฅ˜ ์ƒ์„ธ ์ถœ๋ ฅ
image = None
# ์‚ฌ์šฉ์ž ์ตœ์ข… ๊ฐ์ • ๋ฒกํ„ฐ
final_user_emotions = generate_final_emotion_vector(diary_input,image_input)
# ๊ฐ ๋…ธ๋ž˜์— ๋Œ€ํ•œ ์ฝ”์‚ฌ์ธ ์œ ์‚ฌ๋„ ๊ณ„์‚ฐ
similarities = [cosine_similarity_fn(final_user_emotions, song_vec) for song_vec in emotions]
#์œ ํšจํ•œ ์œ ์‚ฌ๋„ ํ•„ํ„ฐ๋ง
valid_indices = [i for i, sim in enumerate(similarities) if not np.isnan(sim)]
filtered_similarities = [similarities[i] for i in valid_indices]
recommendations = np.argsort(filtered_similarities)[::-1] # ๋†’์€ ์œ ์‚ฌ๋„ ์ˆœ์œผ๋กœ ์ •๋ ฌ
results_df = pd.DataFrame({
'Singer' : melon_emotions['singer'].iloc[recommendations].values,
'title' : melon_emotions['Title'].iloc[recommendations].values,
'genre' : melon_emotions['genre'].iloc[recommendations].values,
'Cosine Similarity': [similarities[idx] for idx in recommendations]
})
# ๊ฐ€์ค‘์น˜ ๊ฐ’ ์„ค์ •
gamma = 0.3
similar_playlists = results_df.head(5)
similar_playlists = pd.merge(similar_playlists, melon_emotions, left_on="title", right_on="Title", how="inner")
similar_playlists = similar_playlists[["title", "Emotions", "singer"]]
dissimilar_playlists = results_df.tail(5)
dissimilar_playlists = pd.merge(dissimilar_playlists, melon_emotions, left_on="title", right_on="Title", how="inner")
dissimilar_playlists = dissimilar_playlists[["title", "Emotions", "singer"]]
#๊ฐ์ •๊ณผ ์œ ์‚ฌํ•œ ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ
if playlist_input == '๋น„์Šทํ•œ':
results = []
seen_songs = set(similar_playlists["title"].values) # ์ดˆ๊ธฐ seen_songs์— similar_playlists์˜ ๊ณก๋“ค์„ ์ถ”๊ฐ€
# ์‚ฌ์šฉ์ž ๊ฐ์ • ๋ฒกํ„ฐ
user_emotion_vector = generate_final_emotion_vector(diary_input, image_input).reshape(1, -1)
for index, row in similar_playlists.iterrows():
song_title = row["title"]
song_singer = row["singer"]
song_vector = np.array(row["Emotions"]).reshape(1, -1)
song_results = []
for i, emotion_vec in enumerate(emotions):
emotion_title = melon_emotions.iloc[i]["Title"]
emotion_singer = melon_emotions.iloc[i]["singer"]
emotion_vec = np.array(emotion_vec).reshape(1, -1)
# similar_playlists์— ์žˆ๋Š” ๊ณก๊ณผ seen_songs์— ์žˆ๋Š” ๊ณก์€ ์ œ์™ธ
if (
emotion_title != song_title and
emotion_title not in seen_songs
):
try:
# ๊ณก ๊ฐ„ ์œ ์‚ฌ๋„(Song-Song Similarity)
song_song_similarity = cosine_similarity(song_vector, emotion_vec)[0][0]
# ์‚ฌ์šฉ์ž ๊ฐ์ • ๋ฒกํ„ฐ์™€์˜ ์œ ์‚ฌ๋„(User-Song Similarity)
user_song_similarity = cosine_similarity(user_emotion_vector, emotion_vec)[0][0]
# Final Score ๊ณ„์‚ฐ
final_score = gamma * song_song_similarity + (1 - gamma) * user_song_similarity
song_results.append({
"Title": emotion_title,
"Singer": emotion_singer,
"Song-Song Similarity": song_song_similarity,
"User-Song Similarity": user_song_similarity,
"Final Score": final_score
})
except ValueError as e:
print(f"Error with {song_title} vs {emotion_title}: {e}")
continue
# Final Score๋ฅผ ๊ธฐ์ค€์œผ๋กœ ์ƒ์œ„ 3๊ณก ์„ ํƒ
song_results = sorted(song_results, key=lambda x: x["Final Score"], reverse=True)[:3]
seen_songs.update([entry["Title"] for entry in song_results])
results.append({"Song Title": song_title, "Singer": song_singer, "Top 3 Similarities": song_results})
# ๊ฒฐ๊ณผ ์ถœ๋ ฅ
for result in results:
print(f"{result['Singer']} - {result['Song Title']}")
for entry in result["Top 3 Similarities"]:
print(f"{entry['Singer']} - {entry['Title']} : Final Score {entry['Final Score']:.4f}")
print(f" (Song-Song Similarity: {entry['Song-Song Similarity']:.4f}, User-Song Similarity: {entry['User-Song Similarity']:.4f})")
print("-" * 30)
#๋ฐ˜๋Œ€ ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ
if playlist_input == '์ƒ๋ฐ˜๋œ':
results = []
seen_songs = set()
# ์‚ฌ์šฉ์ž ๊ฐ์ • ๋ฒกํ„ฐ
user_emotion_vector = generate_final_emotion_vector(diary_input, image_input).reshape(1, -1)
for index, row in dissimilar_playlists.iterrows():
song_title = row["title"]
song_singer = row["singer"]
song_vector = np.array(row["Emotions"]).reshape(1, -1)
song_results = []
for i, emotion_vec in enumerate(emotions):
emotion_title = melon_emotions.iloc[i]["Title"]
emotion_singer = melon_emotions.iloc[i]["singer"]
emotion_vec = np.array(emotion_vec).reshape(1, -1)
if (
emotion_title != song_title and
emotion_title not in dissimilar_playlists["title"].values and
emotion_title not in seen_songs
):
try:
# ๊ณก ๊ฐ„ ์œ ์‚ฌ๋„(Song-Song Similarity)
song_song_similarity = cosine_similarity(song_vector, emotion_vec)[0][0]
# ์‚ฌ์šฉ์ž ๊ฐ์ • ๋ฒกํ„ฐ์™€์˜ ๋ฐ˜๋Œ€ ์œ ์‚ฌ๋„(User-Song Dissimilarity)
opposite_user_song_similarity = 1 - cosine_similarity(user_emotion_vector, emotion_vec)[0][0]
# Final Score ๊ณ„์‚ฐ
final_score = gamma * song_song_similarity + (1 - gamma) * opposite_user_song_similarity
song_results.append({
"Title": emotion_title,
"Singer": emotion_singer,
"Song-Song Similarity": song_song_similarity,
"User-Song Dissimilarity": opposite_user_song_similarity,
"Final Score": final_score
})
except ValueError as e:
print(f"Error with {song_title} vs {emotion_title}: {e}")
continue
# Final Score๋ฅผ ๊ธฐ์ค€์œผ๋กœ ์ƒ์œ„ 3๊ณก ์„ ํƒ (๊ฐ’์ด ํฐ ๊ณก์ด ๋ฐ˜๋Œ€๋˜๋Š” ๊ณก)
song_results = sorted(song_results, key=lambda x: x["Final Score"], reverse=True)[:3]
seen_songs.update(entry["Title"] for entry in song_results)
results.append({"Song Title": song_title, "Singer": song_singer, "Top 3 Similarities": song_results})
# ๊ฒฐ๊ณผ ์ถœ๋ ฅ
for result in results:
print(f"{result['Singer']} - {result['Song Title']}")
for entry in result["Top 3 Similarities"]:
print(f"{entry['Singer']} - {entry['Title']} : Final Score {entry['Final Score']:.4f}")
print(f' (Song-Song Similarity: {entry["Song-Song Similarity"]:.4f}, User-Song Dissimilarity: {entry["User-Song Dissimilarity"]:.4f})')
print("-" * 30)
# ๋ฐ์ดํ„ฐํ”„๋ ˆ์ž„ ๋ณ€ํ™˜์„ ์œ„ํ•œ ๋ฆฌ์ŠคํŠธ ์ƒ์„ฑ
df_rows = []
for result in results:
song_title = result['Song Title']
song_singer = result['Singer']
main_song_info = f"{song_singer} - {song_title}"
for entry in result["Top 3 Similarities"]:
combined_info = f"{entry['Singer']} - {entry['Title']}"
df_rows.append({"1st ์ถ”์ฒœ ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ": main_song_info, "2nd ์ถ”์ฒœ ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ": combined_info})
# ๋ฐ์ดํ„ฐํ”„๋ ˆ์ž„ ์ƒ์„ฑ
final_music_playlist_recommendation = pd.DataFrame(df_rows)
# ๊ณก ์ œ๋ชฉ ๊ทธ๋ฃนํ™”ํ•˜์—ฌ ์ฒซ ๋ฒˆ์งธ ํ–‰์—๋งŒ ๊ณก ์ œ๋ชฉ ํ‘œ์‹œ
final_music_playlist_recommendation["1st ์ถ”์ฒœ ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ"] = final_music_playlist_recommendation.groupby("1st ์ถ”์ฒœ ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ")["1st ์ถ”์ฒœ ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ"].transform(
lambda x: [x.iloc[0]] + [""] * (len(x) - 1)
)
return final_music_playlist_recommendation, comment, topics, image
# ์ผ๊ธฐ ์ฃผ์ œ ์ถ”์ฒœ ํ•จ์ˆ˜
def get_initial_response(style, sentence):
style = options.get(int(style.split('.')[0]), "๐ŸŒผ ์นœ๊ทผํ•œ")
system_prompt_momentum = (
f"๋„ˆ๋Š” {style}์˜ ์ฑ—๋ด‡์ด์•ผ. ์‚ฌ์šฉ์ž๊ฐ€ ์ž‘์„ฑํ•œ ์ผ๊ธฐ๋ฅผ ๋ฐ”ํƒ•์œผ๋กœ ์ƒ๊ฐ์„ ์ •๋ฆฌํ•˜๊ณ  ๋‚ด๋ฉด์„ ๋Œ์•„๋ณผ ์ˆ˜ ์žˆ๋„๋ก "
"๋„์™€์ฃผ๋Š” ๊ตฌ์ฒด์ ์ธ ์ผ๊ธฐ ์ฝ˜ํ…์ธ ๋‚˜ ์งˆ๋ฌธ 4-5๊ฐœ๋ฅผ ์ถ”์ฒœํ•ด์ค˜."
)
try:
response = openai.ChatCompletion.create(
model="gpt-4-turbo",
messages=[
{"role": "system", "content": system_prompt_momentum},
{"role": "user", "content": sentence}
],
temperature=1
)
return response.choices[0].message.content
except Exception as e:
return f"๐Ÿ“ ์ฃผ์ œ ์ถ”์ฒœ ์˜ค๋ฅ˜: {e}"
# Gradio ์ธํ„ฐํŽ˜์ด์Šค
with gr.Blocks() as app:
gr.Markdown("# โœจ ์Šค๋งˆํŠธ ๊ฐ์ • ์ผ๊ธฐ ์„œ๋น„์Šค โœจ\n\n ์˜ค๋Š˜์˜ ํ•˜๋ฃจ๋ฅผ ๊ธฐ๋กํ•˜๋ฉด, ๊ทธ์— ๋งž๋Š” ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ์™€ ์ผ๊ธฐ ํšŒ๊ณ  ์ฝ˜ํ…์ธ ๋ฅผ ์ž๋™์œผ๋กœ ์ƒ์„ฑํ•ด๋“œ๋ฆฝ๋‹ˆ๋‹ค!")
with gr.Row():
with gr.Column():
chatbot_style = gr.Radio(
choices=[f"{k}. {v}" for k, v in options.items()],
label="๐Ÿค– ์›ํ•˜๋Š” ์ฑ—๋ด‡ ์Šคํƒ€์ผ ์„ ํƒ"
)
diary_input = gr.Textbox(label="๐Ÿ“œ ์˜ค๋Š˜์˜ ํ•˜๋ฃจ ๊ธฐ๋กํ•˜๊ธฐ", placeholder="ex)์˜ค๋Š˜ ์†Œํ’๊ฐ€์„œ ๋ง›์žˆ๋Š” ๊ฑธ ๋งŽ์ด ๋จน์–ด์„œ ์—„์ฒญ ์‹ ๋‚ฌ์–ด")
image_input = gr.Image(type="pil", label="๐Ÿ“ท ์–ผ๊ตด ํ‘œ์ • ์‚ฌ์ง„ ์—…๋กœ๋“œ")
playlist_input = gr.Radio(["๋น„์Šทํ•œ", "์ƒ๋ฐ˜๋œ"], label="๐ŸŽง ์˜ค๋Š˜์˜ ๊ฐ์ •๊ณผ ใ…‡ใ…‡๋˜๋Š” ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ ์ถ”์ฒœ ๋ฐ›๊ธฐ")
submit_btn = gr.Button("๐Ÿš€ ๋ถ„์„ ์‹œ์ž‘")
with gr.Column():
output_playlist = gr.Dataframe(label="๐ŸŽง ์ถ”์ฒœ ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ ")
output_comment = gr.Textbox(label="๐Ÿ’ฌ AI ์ฝ”๋ฉ˜ํŠธ")
output_topics = gr.Textbox(label="๐Ÿ“ ์ถ”์ฒœ ์ผ๊ธฐ ์ฝ˜ํ…์ธ ")
output_image = gr.Image(label="๐Ÿ–ผ๏ธ ์ƒ์„ฑ๋œ ์˜ค๋Š˜์˜ ๊ฐ์ • ์บ๋ฆญํ„ฐ", type="pil", width=512, height=512)
# ๋ฒ„ํŠผ ํด๋ฆญ ์ด๋ฒคํŠธ ์—ฐ๊ฒฐ
submit_btn.click(
fn=chatbot_diary_with_image,
inputs=[chatbot_style, diary_input, image_input, playlist_input],
outputs=[output_playlist, output_comment, output_topics, output_image]
)
# ์•ฑ ์‹คํ–‰
app.launch(debug=True)