praty7717 commited on
Commit
dc6bd3e
·
verified ·
1 Parent(s): 8df7ba7

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -19
app.py CHANGED
@@ -1,19 +1,19 @@
1
  import gradio as gr
2
  import torch
3
- from transformers import GPT2Tokenizer
4
 
5
- # Assuming 'GPTLanguageModel' is already defined
6
- class GPTLanguageModel(torch.nn.Module):
7
- def forward(self, input_ids):
8
- # Placeholder forward function
9
- pass
10
 
11
- def generate(self, input_ids, max_length=100):
12
- # This is a placeholder. Replace this with your actual text generation logic.
13
- # Right now it just returns the input back, but in your real model, this would
14
- # generate new tokens.
15
- return input_ids # Just returning the input as is, to mimic generation
16
 
 
17
  class CustomTextGenerationPipeline:
18
  def __init__(self, model, tokenizer):
19
  self.model = model
@@ -21,17 +21,9 @@ class CustomTextGenerationPipeline:
21
 
22
  def __call__(self, prompt, max_length=100):
23
  input_ids = self.tokenizer.encode(prompt, return_tensors='pt')
24
- # Generate text using the model (this is currently simplified)
25
  generated_ids = self.model.generate(input_ids, max_length=max_length)
26
  return self.tokenizer.decode(generated_ids[0], skip_special_tokens=True)
27
 
28
- # Load model and tokenizer
29
- model = GPTLanguageModel()
30
- model.load_state_dict(torch.load("model.pth", map_location=torch.device('cpu'))) # Load weights onto CPU
31
- model.eval()
32
-
33
- tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
34
-
35
  # Create the pipeline
36
  pipeline = CustomTextGenerationPipeline(model, tokenizer)
37
 
 
1
  import gradio as gr
2
  import torch
3
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
4
 
5
+ # Define the model class
6
+ class GPTLanguageModel(GPT2LMHeadModel):
7
+ def __init__(self, config):
8
+ super().__init__(config)
 
9
 
10
+ # Load tokenizer and model
11
+ tokenizer = GPT2Tokenizer.from_pretrained("gpt2") # Use your tokenizer path
12
+ model = GPTLanguageModel.from_pretrained("gpt2") # Load the architecture
13
+ model.load_state_dict(torch.load("model.pth", map_location=torch.device('cpu'))) # Load the weights
14
+ model.eval() # Set to evaluation mode
15
 
16
+ # Define a custom text generation pipeline
17
  class CustomTextGenerationPipeline:
18
  def __init__(self, model, tokenizer):
19
  self.model = model
 
21
 
22
  def __call__(self, prompt, max_length=100):
23
  input_ids = self.tokenizer.encode(prompt, return_tensors='pt')
 
24
  generated_ids = self.model.generate(input_ids, max_length=max_length)
25
  return self.tokenizer.decode(generated_ids[0], skip_special_tokens=True)
26
 
 
 
 
 
 
 
 
27
  # Create the pipeline
28
  pipeline = CustomTextGenerationPipeline(model, tokenizer)
29