praty7717 commited on
Commit
8df7ba7
·
verified ·
1 Parent(s): 2f4a4bb

Upload updated app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -2
app.py CHANGED
@@ -5,10 +5,14 @@ from transformers import GPT2Tokenizer
5
  # Assuming 'GPTLanguageModel' is already defined
6
  class GPTLanguageModel(torch.nn.Module):
7
  def forward(self, input_ids):
 
8
  pass
9
 
10
  def generate(self, input_ids, max_length=100):
11
- return torch.tensor([[input_ids]]) # This is a placeholder for generation
 
 
 
12
 
13
  class CustomTextGenerationPipeline:
14
  def __init__(self, model, tokenizer):
@@ -17,12 +21,13 @@ class CustomTextGenerationPipeline:
17
 
18
  def __call__(self, prompt, max_length=100):
19
  input_ids = self.tokenizer.encode(prompt, return_tensors='pt')
 
20
  generated_ids = self.model.generate(input_ids, max_length=max_length)
21
  return self.tokenizer.decode(generated_ids[0], skip_special_tokens=True)
22
 
23
  # Load model and tokenizer
24
  model = GPTLanguageModel()
25
- model.load_state_dict(torch.load("model.pth")) # Load the weights
26
  model.eval()
27
 
28
  tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
 
5
  # Assuming 'GPTLanguageModel' is already defined
6
  class GPTLanguageModel(torch.nn.Module):
7
  def forward(self, input_ids):
8
+ # Placeholder forward function
9
  pass
10
 
11
  def generate(self, input_ids, max_length=100):
12
+ # This is a placeholder. Replace this with your actual text generation logic.
13
+ # Right now it just returns the input back, but in your real model, this would
14
+ # generate new tokens.
15
+ return input_ids # Just returning the input as is, to mimic generation
16
 
17
  class CustomTextGenerationPipeline:
18
  def __init__(self, model, tokenizer):
 
21
 
22
  def __call__(self, prompt, max_length=100):
23
  input_ids = self.tokenizer.encode(prompt, return_tensors='pt')
24
+ # Generate text using the model (this is currently simplified)
25
  generated_ids = self.model.generate(input_ids, max_length=max_length)
26
  return self.tokenizer.decode(generated_ids[0], skip_special_tokens=True)
27
 
28
  # Load model and tokenizer
29
  model = GPTLanguageModel()
30
+ model.load_state_dict(torch.load("model.pth", map_location=torch.device('cpu'))) # Load weights onto CPU
31
  model.eval()
32
 
33
  tokenizer = GPT2Tokenizer.from_pretrained("gpt2")