Ahmed235 commited on
Commit
341ccc1
·
verified ·
1 Parent(s): a0a0944

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -3,14 +3,14 @@ import re
3
  import gradio as gr
4
  from transformers import AutoModelForSequenceClassification, AutoTokenizer
5
  import torch
6
- import torch
7
- torch.device("cpu")
8
  import torch.nn.functional as F
9
  from transformers import pipeline
10
 
11
  # Load the pre-trained model and tokenizer
12
  tokenizer = AutoTokenizer.from_pretrained("Ahmed235/roberta_classification")
13
  model = AutoModelForSequenceClassification.from_pretrained("Ahmed235/roberta_classification")
 
 
14
 
15
  # Create a summarization pipeline
16
  summarizer = pipeline("summarization", model="Falconsai/text_summarization")
@@ -30,6 +30,7 @@ def predict_pptx_content(file_path):
30
 
31
  # Tokenize and encode the cleaned text
32
  input_encoding = tokenizer(cleaned_text, truncation=True, padding=True, return_tensors="pt")
 
33
 
34
  # Perform inference
35
  with torch.no_grad():
 
3
  import gradio as gr
4
  from transformers import AutoModelForSequenceClassification, AutoTokenizer
5
  import torch
 
 
6
  import torch.nn.functional as F
7
  from transformers import pipeline
8
 
9
  # Load the pre-trained model and tokenizer
10
  tokenizer = AutoTokenizer.from_pretrained("Ahmed235/roberta_classification")
11
  model = AutoModelForSequenceClassification.from_pretrained("Ahmed235/roberta_classification")
12
+ device = torch.device("cpu")
13
+ model = model.to(device) # Move the model to the CPU
14
 
15
  # Create a summarization pipeline
16
  summarizer = pipeline("summarization", model="Falconsai/text_summarization")
 
30
 
31
  # Tokenize and encode the cleaned text
32
  input_encoding = tokenizer(cleaned_text, truncation=True, padding=True, return_tensors="pt")
33
+ input_encoding = {key: val.to(device) for key, val in input_encoding.items()} # Move input tensor to CPU
34
 
35
  # Perform inference
36
  with torch.no_grad():