Spaces:
Sleeping
Sleeping
File size: 1,901 Bytes
6f14ee7 1390712 ccbb326 1390712 b8f51eb ccbb326 1390712 b8f51eb dc6bd3e ccbb326 b8f51eb ccbb326 b8f51eb ccbb326 1390712 6f14ee7 1390712 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import gradio as gr
import torch
from transformers import GPT2Tokenizer
# Define your model
class GPTLanguageModel(torch.nn.Module):
def __init__(self, vocab_size, hidden_size):
super(GPTLanguageModel, self).__init__()
self.embedding = torch.nn.Embedding(vocab_size, hidden_size)
# Add other layers as needed
def forward(self, input_ids):
return self.embedding(input_ids) # Placeholder for the forward pass
def generate(self, input_ids, max_length=100):
# Custom generation logic here
return input_ids # Placeholder
# Define the Custom Text Generation Pipeline
class CustomTextGenerationPipeline:
def __init__(self, model, tokenizer):
self.model = model
self.tokenizer = tokenizer
def __call__(self, prompt, max_length=100):
input_ids = self.tokenizer.encode(prompt, return_tensors='pt')
generated_ids = self.model.generate(input_ids, max_length=max_length)
return self.tokenizer.decode(generated_ids[0], skip_special_tokens=True)
# Load tokenizer and model
tokenizer = GPT2Tokenizer.from_pretrained("gpt2") # Or your custom tokenizer
vocab_size = tokenizer.vocab_size
model = GPTLanguageModel(vocab_size=vocab_size, hidden_size=768) # Set sizes appropriately
# Load model weights
try:
model.load_state_dict(torch.load("model.pth", map_location=torch.device('cpu'), weights_only=True), strict=False)
except RuntimeError as e:
print(f"Error loading model weights: {e}")
model.eval()
# Create the pipeline
pipeline = CustomTextGenerationPipeline(model, tokenizer)
# Define the Gradio response function
def respond(message):
return pipeline(message, max_length=100)
# Create the Gradio interface
demo = gr.Interface(
fn=respond,
inputs=gr.Textbox(lines=2, placeholder="Enter your prompt..."),
outputs="text",
)
if __name__ == "__main__":
demo.launch()
|