Token Classification
GLiNER
PyTorch
multilingual
Inference Endpoints
File size: 468 Bytes
01f73e6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
from gliner import GLiNER
from typing import Dict, List, Any


class EndpointHandler():
    def __init__(self, path=""):
        self.model = GLiNER.from_pretrained("urchade/gliner_large-v2.1")

    def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
        # get inputs
        inputs = data.pop("inputs", data)
        labels = data.pop("labels", None)

        prediction = self.model.batch_predict_entities(inputs, labels)
        return prediction