Spaces:
Runtime error
Runtime error
Commit
·
8647e9d
1
Parent(s):
07e4199
add fast api app
Browse files- app.py +33 -0
- requirements.txt +4 -0
app.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
+
import torch
|
5 |
+
|
6 |
+
app = FastAPI()
|
7 |
+
|
8 |
+
# Load the Mongolian Llama model and tokenizer
|
9 |
+
model_name = "Dorjzodovsuren/Mongolian_Llama3-v0.1"
|
10 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
11 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
12 |
+
|
13 |
+
class UserInput(BaseModel):
|
14 |
+
text: str
|
15 |
+
|
16 |
+
@app.post("/generate/")
|
17 |
+
def generate_response(user_input: UserInput):
|
18 |
+
# Tokenize the input text
|
19 |
+
inputs = tokenizer(user_input.text, return_tensors="pt")
|
20 |
+
|
21 |
+
# Generate response
|
22 |
+
with torch.no_grad():
|
23 |
+
outputs = model.generate(
|
24 |
+
**inputs,
|
25 |
+
max_length=100, # Adjust for desired response length
|
26 |
+
num_return_sequences=1,
|
27 |
+
temperature=0.7, # Adjust for creativity
|
28 |
+
top_p=0.9 # Adjust for response diversity
|
29 |
+
)
|
30 |
+
|
31 |
+
# Decode the generated text
|
32 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
33 |
+
return {"response": response}
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn
|
3 |
+
transformers
|
4 |
+
torch
|