Spaces:
Sleeping
Sleeping
Commit
·
21ead8a
1
Parent(s):
b9954bc
Using our own models.
Browse files- mi_clase.py +8 -4
mi_clase.py
CHANGED
@@ -17,8 +17,12 @@ from nltk.corpus import stopwords
|
|
17 |
from nltk.tokenize import word_tokenize
|
18 |
from transformers import AutoTokenizer, AutoModel
|
19 |
|
20 |
-
|
21 |
-
GENERATOR_MODEL="/
|
|
|
|
|
|
|
|
|
22 |
DEVICE = "cpu" # cpu or cuda
|
23 |
|
24 |
class Pipeline:
|
@@ -27,9 +31,9 @@ class Pipeline:
|
|
27 |
|
28 |
def __init__(self):
|
29 |
self.model = AutoModelForMultipleChoice.from_pretrained(GENERATOR_MODEL)
|
30 |
-
self.tokenizer = AutoTokenizer.from_pretrained(
|
31 |
self.semModel = AutoModel.from_pretrained(QUERY_MODEL)
|
32 |
-
self.semTokenizer = AutoTokenizer.from_pretrained(
|
33 |
self.device = torch.device(DEVICE)
|
34 |
|
35 |
self.semModel.to(self.device)
|
|
|
17 |
from nltk.tokenize import word_tokenize
|
18 |
from transformers import AutoTokenizer, AutoModel
|
19 |
|
20 |
+
|
21 |
+
GENERATOR_MODEL = "JosueElias/pipeline_generator_model"
|
22 |
+
GENERATOR_TOKENIZER = "JosueElias/pipeline_generator_tokenizer"
|
23 |
+
QUERY_MODEL = "JosueElias/pipeline_query_model"
|
24 |
+
QUERY_TOKENIZER = "JosueElias/pipeline_query_tokenizer"
|
25 |
+
|
26 |
DEVICE = "cpu" # cpu or cuda
|
27 |
|
28 |
class Pipeline:
|
|
|
31 |
|
32 |
def __init__(self):
|
33 |
self.model = AutoModelForMultipleChoice.from_pretrained(GENERATOR_MODEL)
|
34 |
+
self.tokenizer = AutoTokenizer.from_pretrained(GENERATOR_TOKENIZER)
|
35 |
self.semModel = AutoModel.from_pretrained(QUERY_MODEL)
|
36 |
+
self.semTokenizer = AutoTokenizer.from_pretrained(QUERY_TOKENIZER)
|
37 |
self.device = torch.device(DEVICE)
|
38 |
|
39 |
self.semModel.to(self.device)
|