Spaces:
Sleeping
Sleeping
app modified
Browse files
app.py
CHANGED
@@ -269,16 +269,16 @@ class ModeloDataset:
|
|
269 |
|
270 |
self.model = RobertaForTokenClassification.from_pretrained("BSC-LT/roberta_model_for_anonimization")
|
271 |
with torch.no_grad():
|
272 |
-
logits = model(input_ids).logits
|
273 |
predicted_token_class_ids = logits.argmax(-1)
|
274 |
i=0
|
275 |
_predicted_tokens_classes=[]
|
276 |
for a in predicted_token_class_ids:
|
277 |
#_predicted_tokens_classes[i]=[model.config.id2label[t.item()] for t in predicted_token_class_ids[i]]
|
278 |
-
_predicted_tokens_classes.append([model.config.id2label[t.item()] for t in predicted_token_class_ids[i]])
|
279 |
i=i+1
|
280 |
labels = predicted_token_class_ids
|
281 |
-
loss = model(input_ids, labels=labels).loss
|
282 |
#print(round(loss.item(), 2))
|
283 |
else:
|
284 |
|
@@ -296,16 +296,16 @@ class ModeloDataset:
|
|
296 |
|
297 |
self.model = AutoModelForTokenClassification.from_pretrained("FacebookAI/xlm-roberta-large-finetuned-conll03-english")
|
298 |
with torch.no_grad():
|
299 |
-
logits = model(input_ids).logits
|
300 |
predicted_token_class_ids = logits.argmax(-1)
|
301 |
i=0
|
302 |
_predicted_tokens_classes=[]
|
303 |
for a in predicted_token_class_ids:
|
304 |
#_predicted_tokens_classes[i]=[model.config.id2label[t.item()] for t in predicted_token_class_ids[i]]
|
305 |
-
_predicted_tokens_classes.append([model.config.id2label[t.item()] for t in predicted_token_class_ids[i]])
|
306 |
i=i+1
|
307 |
labels = predicted_token_class_ids
|
308 |
-
loss = model(input_ids, labels=labels).loss
|
309 |
#print(round(loss.item(), 2))
|
310 |
return ids, _predicted_tokens_classes
|
311 |
def salida_texto( self,ids,pre_tokens):
|
|
|
269 |
|
270 |
self.model = RobertaForTokenClassification.from_pretrained("BSC-LT/roberta_model_for_anonimization")
|
271 |
with torch.no_grad():
|
272 |
+
logits = self.model(input_ids).logits
|
273 |
predicted_token_class_ids = logits.argmax(-1)
|
274 |
i=0
|
275 |
_predicted_tokens_classes=[]
|
276 |
for a in predicted_token_class_ids:
|
277 |
#_predicted_tokens_classes[i]=[model.config.id2label[t.item()] for t in predicted_token_class_ids[i]]
|
278 |
+
_predicted_tokens_classes.append([self.model.config.id2label[t.item()] for t in predicted_token_class_ids[i]])
|
279 |
i=i+1
|
280 |
labels = predicted_token_class_ids
|
281 |
+
loss = self.model(input_ids, labels=labels).loss
|
282 |
#print(round(loss.item(), 2))
|
283 |
else:
|
284 |
|
|
|
296 |
|
297 |
self.model = AutoModelForTokenClassification.from_pretrained("FacebookAI/xlm-roberta-large-finetuned-conll03-english")
|
298 |
with torch.no_grad():
|
299 |
+
logits = self.model(input_ids).logits
|
300 |
predicted_token_class_ids = logits.argmax(-1)
|
301 |
i=0
|
302 |
_predicted_tokens_classes=[]
|
303 |
for a in predicted_token_class_ids:
|
304 |
#_predicted_tokens_classes[i]=[model.config.id2label[t.item()] for t in predicted_token_class_ids[i]]
|
305 |
+
_predicted_tokens_classes.append([self.model.config.id2label[t.item()] for t in predicted_token_class_ids[i]])
|
306 |
i=i+1
|
307 |
labels = predicted_token_class_ids
|
308 |
+
loss = self.model(input_ids, labels=labels).loss
|
309 |
#print(round(loss.item(), 2))
|
310 |
return ids, _predicted_tokens_classes
|
311 |
def salida_texto( self,ids,pre_tokens):
|