Spaces:
Running
Running
import streamlit as st | |
from huggingface_hub import InferenceClient | |
# Hugging Face API ํ ํฐ์ Hugging Face Secrets์์ ๋ถ๋ฌ์ด | |
HF_TOKEN = st.secrets["HF_TOKEN"] | |
# Inference Client ์ค์ (GRIN-MoE ๋ชจ๋ธ ์ฌ์ฉ) | |
client = InferenceClient(token=HF_TOKEN) | |
# Streamlit ํ์ด์ง ์ค์ | |
st.set_page_config(page_title="GRIN-MoE AI Chat", page_icon="๐ค") | |
st.title("GRIN-MoE ๋ชจ๋ธ๊ณผ ๋ํํด๋ณด์ธ์!") | |
# ์ฑํ ๊ธฐ๋ก์ ์ธ์ ์ ์ ์ฅ | |
if 'messages' not in st.session_state: | |
st.session_state.messages = [] | |
# ์ฌ์ฉ์ ์ ๋ ฅ ๋ฐ๊ธฐ | |
user_input = st.text_input("์ง๋ฌธ์ ์ ๋ ฅํ์ธ์:") | |
# ์คํธ๋ฆฌ๋ฐ ์๋ต ํจ์ | |
def generate_streaming_response(prompt): | |
response_text = "" | |
for message in client.chat_completion( | |
model="microsoft/GRIN-MoE", # ๋ชจ๋ธ ์ด๋ฆ์ ๋ช ์์ ์ผ๋ก ์ ๋ฌ | |
messages=[{"role": "user", "content": prompt}], | |
max_tokens=500, | |
stream=True | |
): | |
delta = message.choices[0].delta.content | |
response_text += delta | |
yield delta | |
# ๋ํ ์ฒ๋ฆฌ | |
if user_input: | |
st.session_state.messages.append({"role": "user", "content": user_input}) | |
# AI ์๋ต์ ์คํธ๋ฆฌ๋ฐ ๋ฐฉ์์ผ๋ก ๋ณด์ฌ์ค | |
with st.spinner('AI๊ฐ ์๋ตํ๋ ์ค...'): | |
response_text = "" | |
for delta in generate_streaming_response(user_input): | |
response_text += delta | |
st.write(response_text) | |
st.session_state.messages.append({"role": "assistant", "content": response_text}) | |
# ์ด์ ๋ํ ๊ธฐ๋ก ์ถ๋ ฅ | |
if st.session_state.messages: | |
for msg in st.session_state.messages: | |
if msg["role"] == "user": | |
st.write(f"**์ฌ์ฉ์:** {msg['content']}") | |
else: | |
st.write(f"**AI:** {msg['content']}") | |