File size: 4,503 Bytes
cc9c7ee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
from src.utils.mapper import configmapper
from transformers import AutoTokenizer
from datasets import load_dataset
import numpy as np


@configmapper.map("datasets", "toxic_spans_crf_tokens")
class ToxicSpansCRFTokenDataset:
    def __init__(self, config):
        self.config = config
        self.tokenizer = AutoTokenizer.from_pretrained(
            self.config.model_checkpoint_name
        )
        self.dataset = load_dataset("csv", data_files=dict(self.config.train_files))
        self.test_dataset = load_dataset("csv", data_files=dict(self.config.eval_files))

        self.tokenized_inputs = self.dataset.map(
            self.tokenize_and_align_labels_for_train, batched=True
        )
        self.test_tokenized_inputs = self.test_dataset.map(
            self.tokenize_for_test, batched=True
        )

    def tokenize_and_align_labels_for_train(self, examples):
        tokenized_inputs = self.tokenizer(
            examples["text"], **self.config.tokenizer_params
        )

        # tokenized_inputs["text"] = examples["text"]
        example_spans = []
        labels = []
        prediction_mask = np.zeros_like(np.array(tokenized_inputs["input_ids"]))
        offsets_mapping = tokenized_inputs["offset_mapping"]

        ## Wrong Code
        # for i, offset_mapping in enumerate(offsets_mapping):
        #     j = 0
        #     while j < len(offset_mapping):  # [tok1, tok2, tok3] [(0,5),(1,4),(5,7)]
        #         if tokenized_inputs["input_ids"][i][j] in [
        #             self.tokenizer.sep_token_id,
        #             self.tokenizer.pad_token_id,
        #             self.tokenizer.cls_token_id,
        #         ]:
        #             j = j + 1
        #             continue
        #         else:
        #             k = j + 1
        #             while self.tokenizer.convert_ids_to_tokens(
        #                 tokenized_inputs["input_ids"][i][k]
        #             ).startswith("##"):
        #                 offset_mapping[i][j][1] = offset_mapping[i][k][1]
        #             j = k

        for i, offset_mapping in enumerate(offsets_mapping):
            labels.append([])

            spans = eval(examples["spans"][i])
            example_spans.append(spans)
            cls_label = 2  ## DUMMY LABEL
            for j, offsets in enumerate(offset_mapping):
                if tokenized_inputs["input_ids"][i][j] in [
                    self.tokenizer.sep_token_id,
                    self.tokenizer.pad_token_id,
                ]:
                    tokenized_inputs["attention_mask"][i][j] = 0

                if tokenized_inputs["input_ids"][i][j] == self.tokenizer.cls_token_id:
                    labels[-1].append(cls_label)
                    prediction_mask[i][j] = 1

                elif offsets[0] == offsets[1] and offsets[0] == 0:
                    labels[-1].append(2)  ## DUMMY

                else:
                    toxic_offsets = [x in spans for x in range(offsets[0], offsets[1])]
                    ## If any part of the the token is in span, mark it as Toxic
                    if (
                        len(toxic_offsets) > 0
                        and sum(toxic_offsets) / len(toxic_offsets) > 0.0
                    ):
                        labels[-1].append(1)
                    else:
                        labels[-1].append(0)
                    prediction_mask[i][j] = 1

        tokenized_inputs["labels"] = labels
        tokenized_inputs["prediction_mask"] = prediction_mask
        return tokenized_inputs

    def tokenize_for_test(self, examples):
        tokenized_inputs = self.tokenizer(
            examples["text"], **self.config.tokenizer_params
        )
        prediction_mask = np.zeros_like(np.array(tokenized_inputs["input_ids"]))
        labels = np.zeros_like(np.array(tokenized_inputs["input_ids"]))
        
        offsets_mapping = tokenized_inputs["offset_mapping"]

        for i, offset_mapping in enumerate(offsets_mapping):
            for j, offsets in enumerate(offset_mapping):
                if tokenized_inputs["input_ids"][i][j] in [
                    self.tokenizer.sep_token_id,
                    self.tokenizer.pad_token_id,
                ]:
                    tokenized_inputs["attention_mask"][i][j] = 0
                else:
                    prediction_mask[i][j] = 1
        
        tokenized_inputs["prediction_mask"] = prediction_mask
        tokenized_inputs["labels"] = labels
        return tokenized_inputs