parquet-converter commited on
Commit
ecd6696
·
1 Parent(s): 311ae00

Update parquet files

Browse files
README.md DELETED
@@ -1,1797 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - other
4
- language:
5
- - as
6
- - bn
7
- - en
8
- - gu
9
- - hi
10
- - kn
11
- - ml
12
- - mr
13
- - or
14
- - pa
15
- - ta
16
- - te
17
- language_creators:
18
- - found
19
- license:
20
- - other
21
- multilinguality:
22
- - multilingual
23
- pretty_name: IndicGLUE
24
- size_categories:
25
- - 100K<n<1M
26
- source_datasets:
27
- - extended|other
28
- task_categories:
29
- - text-classification
30
- - token-classification
31
- - multiple-choice
32
- task_ids:
33
- - topic-classification
34
- - natural-language-inference
35
- - sentiment-analysis
36
- - semantic-similarity-scoring
37
- - named-entity-recognition
38
- - multiple-choice-qa
39
- paperswithcode_id: null
40
- tags:
41
- - discourse-mode-classification
42
- - paraphrase-identification
43
- - cross-lingual-similarity
44
- - headline-classification
45
- dataset_info:
46
- - config_name: wnli.en
47
- features:
48
- - name: hypothesis
49
- dtype: string
50
- - name: premise
51
- dtype: string
52
- - name: label
53
- dtype:
54
- class_label:
55
- names:
56
- 0: not_entailment
57
- 1: entailment
58
- 2: None
59
- splits:
60
- - name: train
61
- num_bytes: 104577
62
- num_examples: 635
63
- - name: validation
64
- num_bytes: 11886
65
- num_examples: 71
66
- - name: test
67
- num_bytes: 37305
68
- num_examples: 146
69
- download_size: 591249
70
- dataset_size: 153768
71
- - config_name: wnli.hi
72
- features:
73
- - name: hypothesis
74
- dtype: string
75
- - name: premise
76
- dtype: string
77
- - name: label
78
- dtype:
79
- class_label:
80
- names:
81
- 0: not_entailment
82
- 1: entailment
83
- 2: None
84
- splits:
85
- - name: train
86
- num_bytes: 253342
87
- num_examples: 635
88
- - name: validation
89
- num_bytes: 28684
90
- num_examples: 71
91
- - name: test
92
- num_bytes: 90831
93
- num_examples: 146
94
- download_size: 591249
95
- dataset_size: 372857
96
- - config_name: wnli.gu
97
- features:
98
- - name: hypothesis
99
- dtype: string
100
- - name: premise
101
- dtype: string
102
- - name: label
103
- dtype:
104
- class_label:
105
- names:
106
- 0: not_entailment
107
- 1: entailment
108
- 2: None
109
- splits:
110
- - name: train
111
- num_bytes: 251562
112
- num_examples: 635
113
- - name: validation
114
- num_bytes: 28183
115
- num_examples: 71
116
- - name: test
117
- num_bytes: 94586
118
- num_examples: 146
119
- download_size: 591249
120
- dataset_size: 374331
121
- - config_name: wnli.mr
122
- features:
123
- - name: hypothesis
124
- dtype: string
125
- - name: premise
126
- dtype: string
127
- - name: label
128
- dtype:
129
- class_label:
130
- names:
131
- 0: not_entailment
132
- 1: entailment
133
- 2: None
134
- splits:
135
- - name: train
136
- num_bytes: 256657
137
- num_examples: 635
138
- - name: validation
139
- num_bytes: 29226
140
- num_examples: 71
141
- - name: test
142
- num_bytes: 97136
143
- num_examples: 146
144
- download_size: 591249
145
- dataset_size: 383019
146
- - config_name: copa.en
147
- features:
148
- - name: premise
149
- dtype: string
150
- - name: choice1
151
- dtype: string
152
- - name: choice2
153
- dtype: string
154
- - name: question
155
- dtype: string
156
- - name: label
157
- dtype: int32
158
- splits:
159
- - name: train
160
- num_bytes: 46049
161
- num_examples: 400
162
- - name: validation
163
- num_bytes: 11695
164
- num_examples: 100
165
- - name: test
166
- num_bytes: 55862
167
- num_examples: 500
168
- download_size: 757679
169
- dataset_size: 113606
170
- - config_name: copa.hi
171
- features:
172
- - name: premise
173
- dtype: string
174
- - name: choice1
175
- dtype: string
176
- - name: choice2
177
- dtype: string
178
- - name: question
179
- dtype: string
180
- - name: label
181
- dtype: int32
182
- splits:
183
- - name: train
184
- num_bytes: 93392
185
- num_examples: 362
186
- - name: validation
187
- num_bytes: 23575
188
- num_examples: 88
189
- - name: test
190
- num_bytes: 112846
191
- num_examples: 449
192
- download_size: 757679
193
- dataset_size: 229813
194
- - config_name: copa.gu
195
- features:
196
- - name: premise
197
- dtype: string
198
- - name: choice1
199
- dtype: string
200
- - name: choice2
201
- dtype: string
202
- - name: question
203
- dtype: string
204
- - name: label
205
- dtype: int32
206
- splits:
207
- - name: train
208
- num_bytes: 92113
209
- num_examples: 362
210
- - name: validation
211
- num_bytes: 23466
212
- num_examples: 88
213
- - name: test
214
- num_bytes: 110013
215
- num_examples: 448
216
- download_size: 757679
217
- dataset_size: 225592
218
- - config_name: copa.mr
219
- features:
220
- - name: premise
221
- dtype: string
222
- - name: choice1
223
- dtype: string
224
- - name: choice2
225
- dtype: string
226
- - name: question
227
- dtype: string
228
- - name: label
229
- dtype: int32
230
- splits:
231
- - name: train
232
- num_bytes: 93457
233
- num_examples: 362
234
- - name: validation
235
- num_bytes: 23890
236
- num_examples: 88
237
- - name: test
238
- num_bytes: 112071
239
- num_examples: 449
240
- download_size: 757679
241
- dataset_size: 229418
242
- - config_name: sna.bn
243
- features:
244
- - name: text
245
- dtype: string
246
- - name: label
247
- dtype:
248
- class_label:
249
- names:
250
- 0: kolkata
251
- 1: state
252
- 2: national
253
- 3: sports
254
- 4: entertainment
255
- 5: international
256
- splits:
257
- - name: train
258
- num_bytes: 46070054
259
- num_examples: 11284
260
- - name: validation
261
- num_bytes: 5648130
262
- num_examples: 1411
263
- - name: test
264
- num_bytes: 5799983
265
- num_examples: 1411
266
- download_size: 11803096
267
- dataset_size: 57518167
268
- - config_name: csqa.as
269
- features:
270
- - name: question
271
- dtype: string
272
- - name: answer
273
- dtype: string
274
- - name: category
275
- dtype: string
276
- - name: title
277
- dtype: string
278
- - name: options
279
- sequence: string
280
- - name: out_of_context_options
281
- sequence: string
282
- splits:
283
- - name: test
284
- num_bytes: 3800555
285
- num_examples: 2942
286
- download_size: 65099316
287
- dataset_size: 3800555
288
- - config_name: csqa.bn
289
- features:
290
- - name: question
291
- dtype: string
292
- - name: answer
293
- dtype: string
294
- - name: category
295
- dtype: string
296
- - name: title
297
- dtype: string
298
- - name: options
299
- sequence: string
300
- - name: out_of_context_options
301
- sequence: string
302
- splits:
303
- - name: test
304
- num_bytes: 54671146
305
- num_examples: 38845
306
- download_size: 65099316
307
- dataset_size: 54671146
308
- - config_name: csqa.gu
309
- features:
310
- - name: question
311
- dtype: string
312
- - name: answer
313
- dtype: string
314
- - name: category
315
- dtype: string
316
- - name: title
317
- dtype: string
318
- - name: options
319
- sequence: string
320
- - name: out_of_context_options
321
- sequence: string
322
- splits:
323
- - name: test
324
- num_bytes: 29131703
325
- num_examples: 22861
326
- download_size: 65099316
327
- dataset_size: 29131703
328
- - config_name: csqa.hi
329
- features:
330
- - name: question
331
- dtype: string
332
- - name: answer
333
- dtype: string
334
- - name: category
335
- dtype: string
336
- - name: title
337
- dtype: string
338
- - name: options
339
- sequence: string
340
- - name: out_of_context_options
341
- sequence: string
342
- splits:
343
- - name: test
344
- num_bytes: 40409475
345
- num_examples: 35140
346
- download_size: 65099316
347
- dataset_size: 40409475
348
- - config_name: csqa.kn
349
- features:
350
- - name: question
351
- dtype: string
352
- - name: answer
353
- dtype: string
354
- - name: category
355
- dtype: string
356
- - name: title
357
- dtype: string
358
- - name: options
359
- sequence: string
360
- - name: out_of_context_options
361
- sequence: string
362
- splits:
363
- - name: test
364
- num_bytes: 21199880
365
- num_examples: 13666
366
- download_size: 65099316
367
- dataset_size: 21199880
368
- - config_name: csqa.ml
369
- features:
370
- - name: question
371
- dtype: string
372
- - name: answer
373
- dtype: string
374
- - name: category
375
- dtype: string
376
- - name: title
377
- dtype: string
378
- - name: options
379
- sequence: string
380
- - name: out_of_context_options
381
- sequence: string
382
- splits:
383
- - name: test
384
- num_bytes: 47220932
385
- num_examples: 26537
386
- download_size: 65099316
387
- dataset_size: 47220932
388
- - config_name: csqa.mr
389
- features:
390
- - name: question
391
- dtype: string
392
- - name: answer
393
- dtype: string
394
- - name: category
395
- dtype: string
396
- - name: title
397
- dtype: string
398
- - name: options
399
- sequence: string
400
- - name: out_of_context_options
401
- sequence: string
402
- splits:
403
- - name: test
404
- num_bytes: 13667238
405
- num_examples: 11370
406
- download_size: 65099316
407
- dataset_size: 13667238
408
- - config_name: csqa.or
409
- features:
410
- - name: question
411
- dtype: string
412
- - name: answer
413
- dtype: string
414
- - name: category
415
- dtype: string
416
- - name: title
417
- dtype: string
418
- - name: options
419
- sequence: string
420
- - name: out_of_context_options
421
- sequence: string
422
- splits:
423
- - name: test
424
- num_bytes: 2562397
425
- num_examples: 1975
426
- download_size: 65099316
427
- dataset_size: 2562397
428
- - config_name: csqa.pa
429
- features:
430
- - name: question
431
- dtype: string
432
- - name: answer
433
- dtype: string
434
- - name: category
435
- dtype: string
436
- - name: title
437
- dtype: string
438
- - name: options
439
- sequence: string
440
- - name: out_of_context_options
441
- sequence: string
442
- splits:
443
- - name: test
444
- num_bytes: 5806129
445
- num_examples: 5667
446
- download_size: 65099316
447
- dataset_size: 5806129
448
- - config_name: csqa.ta
449
- features:
450
- - name: question
451
- dtype: string
452
- - name: answer
453
- dtype: string
454
- - name: category
455
- dtype: string
456
- - name: title
457
- dtype: string
458
- - name: options
459
- sequence: string
460
- - name: out_of_context_options
461
- sequence: string
462
- splits:
463
- - name: test
464
- num_bytes: 61868609
465
- num_examples: 38590
466
- download_size: 65099316
467
- dataset_size: 61868609
468
- - config_name: csqa.te
469
- features:
470
- - name: question
471
- dtype: string
472
- - name: answer
473
- dtype: string
474
- - name: category
475
- dtype: string
476
- - name: title
477
- dtype: string
478
- - name: options
479
- sequence: string
480
- - name: out_of_context_options
481
- sequence: string
482
- splits:
483
- - name: test
484
- num_bytes: 58785157
485
- num_examples: 41338
486
- download_size: 65099316
487
- dataset_size: 58785157
488
- - config_name: wstp.as
489
- features:
490
- - name: sectionText
491
- dtype: string
492
- - name: correctTitle
493
- dtype: string
494
- - name: titleA
495
- dtype: string
496
- - name: titleB
497
- dtype: string
498
- - name: titleC
499
- dtype: string
500
- - name: titleD
501
- dtype: string
502
- - name: url
503
- dtype: string
504
- splits:
505
- - name: train
506
- num_bytes: 13581364
507
- num_examples: 5000
508
- - name: validation
509
- num_bytes: 1698996
510
- num_examples: 625
511
- - name: test
512
- num_bytes: 1697678
513
- num_examples: 626
514
- download_size: 242008091
515
- dataset_size: 16978038
516
- - config_name: wstp.bn
517
- features:
518
- - name: sectionText
519
- dtype: string
520
- - name: correctTitle
521
- dtype: string
522
- - name: titleA
523
- dtype: string
524
- - name: titleB
525
- dtype: string
526
- - name: titleC
527
- dtype: string
528
- - name: titleD
529
- dtype: string
530
- - name: url
531
- dtype: string
532
- splits:
533
- - name: train
534
- num_bytes: 143340597
535
- num_examples: 47580
536
- - name: validation
537
- num_bytes: 17759264
538
- num_examples: 5947
539
- - name: test
540
- num_bytes: 17633893
541
- num_examples: 5948
542
- download_size: 242008091
543
- dataset_size: 178733754
544
- - config_name: wstp.gu
545
- features:
546
- - name: sectionText
547
- dtype: string
548
- - name: correctTitle
549
- dtype: string
550
- - name: titleA
551
- dtype: string
552
- - name: titleB
553
- dtype: string
554
- - name: titleC
555
- dtype: string
556
- - name: titleD
557
- dtype: string
558
- - name: url
559
- dtype: string
560
- splits:
561
- - name: train
562
- num_bytes: 39353520
563
- num_examples: 10004
564
- - name: validation
565
- num_bytes: 4887780
566
- num_examples: 1251
567
- - name: test
568
- num_bytes: 4699186
569
- num_examples: 1251
570
- download_size: 242008091
571
- dataset_size: 48940486
572
- - config_name: wstp.hi
573
- features:
574
- - name: sectionText
575
- dtype: string
576
- - name: correctTitle
577
- dtype: string
578
- - name: titleA
579
- dtype: string
580
- - name: titleB
581
- dtype: string
582
- - name: titleC
583
- dtype: string
584
- - name: titleD
585
- dtype: string
586
- - name: url
587
- dtype: string
588
- splits:
589
- - name: train
590
- num_bytes: 158529718
591
- num_examples: 44069
592
- - name: validation
593
- num_bytes: 19371932
594
- num_examples: 5509
595
- - name: test
596
- num_bytes: 19593029
597
- num_examples: 5509
598
- download_size: 242008091
599
- dataset_size: 197494679
600
- - config_name: wstp.kn
601
- features:
602
- - name: sectionText
603
- dtype: string
604
- - name: correctTitle
605
- dtype: string
606
- - name: titleA
607
- dtype: string
608
- - name: titleB
609
- dtype: string
610
- - name: titleC
611
- dtype: string
612
- - name: titleD
613
- dtype: string
614
- - name: url
615
- dtype: string
616
- splits:
617
- - name: train
618
- num_bytes: 139950425
619
- num_examples: 35379
620
- - name: validation
621
- num_bytes: 17789810
622
- num_examples: 4422
623
- - name: test
624
- num_bytes: 17897059
625
- num_examples: 4423
626
- download_size: 242008091
627
- dataset_size: 175637294
628
- - config_name: wstp.ml
629
- features:
630
- - name: sectionText
631
- dtype: string
632
- - name: correctTitle
633
- dtype: string
634
- - name: titleA
635
- dtype: string
636
- - name: titleB
637
- dtype: string
638
- - name: titleC
639
- dtype: string
640
- - name: titleD
641
- dtype: string
642
- - name: url
643
- dtype: string
644
- splits:
645
- - name: train
646
- num_bytes: 88360588
647
- num_examples: 27527
648
- - name: validation
649
- num_bytes: 11193368
650
- num_examples: 3441
651
- - name: test
652
- num_bytes: 11150942
653
- num_examples: 3441
654
- download_size: 242008091
655
- dataset_size: 110704898
656
- - config_name: wstp.mr
657
- features:
658
- - name: sectionText
659
- dtype: string
660
- - name: correctTitle
661
- dtype: string
662
- - name: titleA
663
- dtype: string
664
- - name: titleB
665
- dtype: string
666
- - name: titleC
667
- dtype: string
668
- - name: titleD
669
- dtype: string
670
- - name: url
671
- dtype: string
672
- splits:
673
- - name: train
674
- num_bytes: 28302397
675
- num_examples: 10446
676
- - name: validation
677
- num_bytes: 3328826
678
- num_examples: 1306
679
- - name: test
680
- num_bytes: 3631712
681
- num_examples: 1306
682
- download_size: 242008091
683
- dataset_size: 35262935
684
- - config_name: wstp.or
685
- features:
686
- - name: sectionText
687
- dtype: string
688
- - name: correctTitle
689
- dtype: string
690
- - name: titleA
691
- dtype: string
692
- - name: titleB
693
- dtype: string
694
- - name: titleC
695
- dtype: string
696
- - name: titleD
697
- dtype: string
698
- - name: url
699
- dtype: string
700
- splits:
701
- - name: train
702
- num_bytes: 10900034
703
- num_examples: 4015
704
- - name: validation
705
- num_bytes: 1264963
706
- num_examples: 502
707
- - name: test
708
- num_bytes: 1344680
709
- num_examples: 502
710
- download_size: 242008091
711
- dataset_size: 13509677
712
- - config_name: wstp.pa
713
- features:
714
- - name: sectionText
715
- dtype: string
716
- - name: correctTitle
717
- dtype: string
718
- - name: titleA
719
- dtype: string
720
- - name: titleB
721
- dtype: string
722
- - name: titleC
723
- dtype: string
724
- - name: titleD
725
- dtype: string
726
- - name: url
727
- dtype: string
728
- splits:
729
- - name: train
730
- num_bytes: 22189758
731
- num_examples: 8772
732
- - name: validation
733
- num_bytes: 2789214
734
- num_examples: 1097
735
- - name: test
736
- num_bytes: 2685795
737
- num_examples: 1097
738
- download_size: 242008091
739
- dataset_size: 27664767
740
- - config_name: wstp.ta
741
- features:
742
- - name: sectionText
743
- dtype: string
744
- - name: correctTitle
745
- dtype: string
746
- - name: titleA
747
- dtype: string
748
- - name: titleB
749
- dtype: string
750
- - name: titleC
751
- dtype: string
752
- - name: titleD
753
- dtype: string
754
- - name: url
755
- dtype: string
756
- splits:
757
- - name: train
758
- num_bytes: 151929358
759
- num_examples: 48940
760
- - name: validation
761
- num_bytes: 18817195
762
- num_examples: 6117
763
- - name: test
764
- num_bytes: 18815099
765
- num_examples: 6118
766
- download_size: 242008091
767
- dataset_size: 189561652
768
- - config_name: wstp.te
769
- features:
770
- - name: sectionText
771
- dtype: string
772
- - name: correctTitle
773
- dtype: string
774
- - name: titleA
775
- dtype: string
776
- - name: titleB
777
- dtype: string
778
- - name: titleC
779
- dtype: string
780
- - name: titleD
781
- dtype: string
782
- - name: url
783
- dtype: string
784
- splits:
785
- - name: train
786
- num_bytes: 151696915
787
- num_examples: 80000
788
- - name: validation
789
- num_bytes: 19003197
790
- num_examples: 10000
791
- - name: test
792
- num_bytes: 18991941
793
- num_examples: 10000
794
- download_size: 242008091
795
- dataset_size: 189692053
796
- - config_name: inltkh.gu
797
- features:
798
- - name: text
799
- dtype: string
800
- - name: label
801
- dtype:
802
- class_label:
803
- names:
804
- 0: entertainment
805
- 1: business
806
- 2: tech
807
- 3: sports
808
- 4: state
809
- 5: spirituality
810
- 6: tamil-cinema
811
- 7: positive
812
- 8: negative
813
- 9: neutral
814
- splits:
815
- - name: train
816
- num_bytes: 883067
817
- num_examples: 5269
818
- - name: validation
819
- num_bytes: 111205
820
- num_examples: 659
821
- - name: test
822
- num_bytes: 110761
823
- num_examples: 659
824
- download_size: 2054771
825
- dataset_size: 1105033
826
- - config_name: inltkh.ml
827
- features:
828
- - name: text
829
- dtype: string
830
- - name: label
831
- dtype:
832
- class_label:
833
- names:
834
- 0: entertainment
835
- 1: business
836
- 2: tech
837
- 3: sports
838
- 4: state
839
- 5: spirituality
840
- 6: tamil-cinema
841
- 7: positive
842
- 8: negative
843
- 9: neutral
844
- splits:
845
- - name: train
846
- num_bytes: 1108149
847
- num_examples: 5036
848
- - name: validation
849
- num_bytes: 140059
850
- num_examples: 630
851
- - name: test
852
- num_bytes: 138851
853
- num_examples: 630
854
- download_size: 2054771
855
- dataset_size: 1387059
856
- - config_name: inltkh.mr
857
- features:
858
- - name: text
859
- dtype: string
860
- - name: label
861
- dtype:
862
- class_label:
863
- names:
864
- 0: entertainment
865
- 1: business
866
- 2: tech
867
- 3: sports
868
- 4: state
869
- 5: spirituality
870
- 6: tamil-cinema
871
- 7: positive
872
- 8: negative
873
- 9: neutral
874
- splits:
875
- - name: train
876
- num_bytes: 1462618
877
- num_examples: 9672
878
- - name: validation
879
- num_bytes: 180310
880
- num_examples: 1210
881
- - name: test
882
- num_bytes: 180562
883
- num_examples: 1210
884
- download_size: 2054771
885
- dataset_size: 1823490
886
- - config_name: inltkh.ta
887
- features:
888
- - name: text
889
- dtype: string
890
- - name: label
891
- dtype:
892
- class_label:
893
- names:
894
- 0: entertainment
895
- 1: business
896
- 2: tech
897
- 3: sports
898
- 4: state
899
- 5: spirituality
900
- 6: tamil-cinema
901
- 7: positive
902
- 8: negative
903
- 9: neutral
904
- splits:
905
- - name: train
906
- num_bytes: 2659573
907
- num_examples: 5346
908
- - name: validation
909
- num_bytes: 316087
910
- num_examples: 669
911
- - name: test
912
- num_bytes: 320469
913
- num_examples: 669
914
- download_size: 2054771
915
- dataset_size: 3296129
916
- - config_name: inltkh.te
917
- features:
918
- - name: text
919
- dtype: string
920
- - name: label
921
- dtype:
922
- class_label:
923
- names:
924
- 0: entertainment
925
- 1: business
926
- 2: tech
927
- 3: sports
928
- 4: state
929
- 5: spirituality
930
- 6: tamil-cinema
931
- 7: positive
932
- 8: negative
933
- 9: neutral
934
- splits:
935
- - name: train
936
- num_bytes: 1361671
937
- num_examples: 4328
938
- - name: validation
939
- num_bytes: 170475
940
- num_examples: 541
941
- - name: test
942
- num_bytes: 173153
943
- num_examples: 541
944
- download_size: 2054771
945
- dataset_size: 1705299
946
- - config_name: bbca.hi
947
- features:
948
- - name: label
949
- dtype: string
950
- - name: text
951
- dtype: string
952
- splits:
953
- - name: train
954
- num_bytes: 22126213
955
- num_examples: 3467
956
- - name: test
957
- num_bytes: 5501156
958
- num_examples: 866
959
- download_size: 5770136
960
- dataset_size: 27627369
961
- - config_name: cvit-mkb-clsr.en-bn
962
- features:
963
- - name: sentence1
964
- dtype: string
965
- - name: sentence2
966
- dtype: string
967
- splits:
968
- - name: test
969
- num_bytes: 2002009
970
- num_examples: 5522
971
- download_size: 3702442
972
- dataset_size: 2002009
973
- - config_name: cvit-mkb-clsr.en-gu
974
- features:
975
- - name: sentence1
976
- dtype: string
977
- - name: sentence2
978
- dtype: string
979
- splits:
980
- - name: test
981
- num_bytes: 2316311
982
- num_examples: 6463
983
- download_size: 3702442
984
- dataset_size: 2316311
985
- - config_name: cvit-mkb-clsr.en-hi
986
- features:
987
- - name: sentence1
988
- dtype: string
989
- - name: sentence2
990
- dtype: string
991
- splits:
992
- - name: test
993
- num_bytes: 1866335
994
- num_examples: 5169
995
- download_size: 3702442
996
- dataset_size: 1866335
997
- - config_name: cvit-mkb-clsr.en-ml
998
- features:
999
- - name: sentence1
1000
- dtype: string
1001
- - name: sentence2
1002
- dtype: string
1003
- splits:
1004
- - name: test
1005
- num_bytes: 1999869
1006
- num_examples: 4886
1007
- download_size: 3702442
1008
- dataset_size: 1999869
1009
- - config_name: cvit-mkb-clsr.en-mr
1010
- features:
1011
- - name: sentence1
1012
- dtype: string
1013
- - name: sentence2
1014
- dtype: string
1015
- splits:
1016
- - name: test
1017
- num_bytes: 2142129
1018
- num_examples: 5760
1019
- download_size: 3702442
1020
- dataset_size: 2142129
1021
- - config_name: cvit-mkb-clsr.en-or
1022
- features:
1023
- - name: sentence1
1024
- dtype: string
1025
- - name: sentence2
1026
- dtype: string
1027
- splits:
1028
- - name: test
1029
- num_bytes: 276385
1030
- num_examples: 752
1031
- download_size: 3702442
1032
- dataset_size: 276385
1033
- - config_name: cvit-mkb-clsr.en-ta
1034
- features:
1035
- - name: sentence1
1036
- dtype: string
1037
- - name: sentence2
1038
- dtype: string
1039
- splits:
1040
- - name: test
1041
- num_bytes: 2576460
1042
- num_examples: 5637
1043
- download_size: 3702442
1044
- dataset_size: 2576460
1045
- - config_name: cvit-mkb-clsr.en-te
1046
- features:
1047
- - name: sentence1
1048
- dtype: string
1049
- - name: sentence2
1050
- dtype: string
1051
- splits:
1052
- - name: test
1053
- num_bytes: 1781235
1054
- num_examples: 5049
1055
- download_size: 3702442
1056
- dataset_size: 1781235
1057
- - config_name: cvit-mkb-clsr.en-ur
1058
- features:
1059
- - name: sentence1
1060
- dtype: string
1061
- - name: sentence2
1062
- dtype: string
1063
- splits:
1064
- - name: test
1065
- num_bytes: 290450
1066
- num_examples: 1006
1067
- download_size: 3702442
1068
- dataset_size: 290450
1069
- - config_name: iitp-mr.hi
1070
- features:
1071
- - name: text
1072
- dtype: string
1073
- - name: label
1074
- dtype:
1075
- class_label:
1076
- names:
1077
- 0: negative
1078
- 1: neutral
1079
- 2: positive
1080
- splits:
1081
- - name: train
1082
- num_bytes: 6704909
1083
- num_examples: 2480
1084
- - name: validation
1085
- num_bytes: 822222
1086
- num_examples: 310
1087
- - name: test
1088
- num_bytes: 702377
1089
- num_examples: 310
1090
- download_size: 1742048
1091
- dataset_size: 8229508
1092
- - config_name: iitp-pr.hi
1093
- features:
1094
- - name: text
1095
- dtype: string
1096
- - name: label
1097
- dtype:
1098
- class_label:
1099
- names:
1100
- 0: negative
1101
- 1: neutral
1102
- 2: positive
1103
- splits:
1104
- - name: train
1105
- num_bytes: 945593
1106
- num_examples: 4182
1107
- - name: validation
1108
- num_bytes: 120104
1109
- num_examples: 523
1110
- - name: test
1111
- num_bytes: 121914
1112
- num_examples: 523
1113
- download_size: 266545
1114
- dataset_size: 1187611
1115
- - config_name: actsa-sc.te
1116
- features:
1117
- - name: text
1118
- dtype: string
1119
- - name: label
1120
- dtype:
1121
- class_label:
1122
- names:
1123
- 0: positive
1124
- 1: negative
1125
- splits:
1126
- - name: train
1127
- num_bytes: 1370911
1128
- num_examples: 4328
1129
- - name: validation
1130
- num_bytes: 166093
1131
- num_examples: 541
1132
- - name: test
1133
- num_bytes: 168295
1134
- num_examples: 541
1135
- download_size: 378882
1136
- dataset_size: 1705299
1137
- - config_name: md.hi
1138
- features:
1139
- - name: sentence
1140
- dtype: string
1141
- - name: discourse_mode
1142
- dtype: string
1143
- - name: story_number
1144
- dtype: int32
1145
- - name: id
1146
- dtype: int32
1147
- splits:
1148
- - name: train
1149
- num_bytes: 1672117
1150
- num_examples: 7974
1151
- - name: validation
1152
- num_bytes: 211195
1153
- num_examples: 997
1154
- - name: test
1155
- num_bytes: 210183
1156
- num_examples: 997
1157
- download_size: 1048441
1158
- dataset_size: 2093495
1159
- - config_name: wiki-ner.as
1160
- features:
1161
- - name: tokens
1162
- sequence: string
1163
- - name: ner_tags
1164
- sequence:
1165
- class_label:
1166
- names:
1167
- 0: B-LOC
1168
- 1: B-ORG
1169
- 2: B-PER
1170
- 3: I-LOC
1171
- 4: I-ORG
1172
- 5: I-PER
1173
- 6: O
1174
- - name: additional_info
1175
- sequence:
1176
- sequence: string
1177
- splits:
1178
- - name: train
1179
- num_bytes: 375007
1180
- num_examples: 1021
1181
- - name: validation
1182
- num_bytes: 49336
1183
- num_examples: 157
1184
- - name: test
1185
- num_bytes: 50480
1186
- num_examples: 160
1187
- download_size: 5980272
1188
- dataset_size: 474823
1189
- - config_name: wiki-ner.bn
1190
- features:
1191
- - name: tokens
1192
- sequence: string
1193
- - name: ner_tags
1194
- sequence:
1195
- class_label:
1196
- names:
1197
- 0: B-LOC
1198
- 1: B-ORG
1199
- 2: B-PER
1200
- 3: I-LOC
1201
- 4: I-ORG
1202
- 5: I-PER
1203
- 6: O
1204
- - name: additional_info
1205
- sequence:
1206
- sequence: string
1207
- splits:
1208
- - name: train
1209
- num_bytes: 7502896
1210
- num_examples: 20223
1211
- - name: validation
1212
- num_bytes: 988707
1213
- num_examples: 2985
1214
- - name: test
1215
- num_bytes: 985965
1216
- num_examples: 2690
1217
- download_size: 5980272
1218
- dataset_size: 9477568
1219
- - config_name: wiki-ner.gu
1220
- features:
1221
- - name: tokens
1222
- sequence: string
1223
- - name: ner_tags
1224
- sequence:
1225
- class_label:
1226
- names:
1227
- 0: B-LOC
1228
- 1: B-ORG
1229
- 2: B-PER
1230
- 3: I-LOC
1231
- 4: I-ORG
1232
- 5: I-PER
1233
- 6: O
1234
- - name: additional_info
1235
- sequence:
1236
- sequence: string
1237
- splits:
1238
- - name: train
1239
- num_bytes: 1571612
1240
- num_examples: 2343
1241
- - name: validation
1242
- num_bytes: 192828
1243
- num_examples: 297
1244
- - name: test
1245
- num_bytes: 197901
1246
- num_examples: 255
1247
- download_size: 5980272
1248
- dataset_size: 1962341
1249
- - config_name: wiki-ner.hi
1250
- features:
1251
- - name: tokens
1252
- sequence: string
1253
- - name: ner_tags
1254
- sequence:
1255
- class_label:
1256
- names:
1257
- 0: B-LOC
1258
- 1: B-ORG
1259
- 2: B-PER
1260
- 3: I-LOC
1261
- 4: I-ORG
1262
- 5: I-PER
1263
- 6: O
1264
- - name: additional_info
1265
- sequence:
1266
- sequence: string
1267
- splits:
1268
- - name: train
1269
- num_bytes: 3762529
1270
- num_examples: 9463
1271
- - name: validation
1272
- num_bytes: 468702
1273
- num_examples: 1114
1274
- - name: test
1275
- num_bytes: 475277
1276
- num_examples: 1256
1277
- download_size: 5980272
1278
- dataset_size: 4706508
1279
- - config_name: wiki-ner.kn
1280
- features:
1281
- - name: tokens
1282
- sequence: string
1283
- - name: ner_tags
1284
- sequence:
1285
- class_label:
1286
- names:
1287
- 0: B-LOC
1288
- 1: B-ORG
1289
- 2: B-PER
1290
- 3: I-LOC
1291
- 4: I-ORG
1292
- 5: I-PER
1293
- 6: O
1294
- - name: additional_info
1295
- sequence:
1296
- sequence: string
1297
- splits:
1298
- - name: train
1299
- num_bytes: 1352051
1300
- num_examples: 2679
1301
- - name: validation
1302
- num_bytes: 179562
1303
- num_examples: 412
1304
- - name: test
1305
- num_bytes: 180815
1306
- num_examples: 476
1307
- download_size: 5980272
1308
- dataset_size: 1712428
1309
- - config_name: wiki-ner.ml
1310
- features:
1311
- - name: tokens
1312
- sequence: string
1313
- - name: ner_tags
1314
- sequence:
1315
- class_label:
1316
- names:
1317
- 0: B-LOC
1318
- 1: B-ORG
1319
- 2: B-PER
1320
- 3: I-LOC
1321
- 4: I-ORG
1322
- 5: I-PER
1323
- 6: O
1324
- - name: additional_info
1325
- sequence:
1326
- sequence: string
1327
- splits:
1328
- - name: train
1329
- num_bytes: 7678935
1330
- num_examples: 15620
1331
- - name: validation
1332
- num_bytes: 969971
1333
- num_examples: 2067
1334
- - name: test
1335
- num_bytes: 991126
1336
- num_examples: 2042
1337
- download_size: 5980272
1338
- dataset_size: 9640032
1339
- - config_name: wiki-ner.mr
1340
- features:
1341
- - name: tokens
1342
- sequence: string
1343
- - name: ner_tags
1344
- sequence:
1345
- class_label:
1346
- names:
1347
- 0: B-LOC
1348
- 1: B-ORG
1349
- 2: B-PER
1350
- 3: I-LOC
1351
- 4: I-ORG
1352
- 5: I-PER
1353
- 6: O
1354
- - name: additional_info
1355
- sequence:
1356
- sequence: string
1357
- splits:
1358
- - name: train
1359
- num_bytes: 5431537
1360
- num_examples: 12151
1361
- - name: validation
1362
- num_bytes: 701661
1363
- num_examples: 1498
1364
- - name: test
1365
- num_bytes: 655706
1366
- num_examples: 1329
1367
- download_size: 5980272
1368
- dataset_size: 6788904
1369
- - config_name: wiki-ner.or
1370
- features:
1371
- - name: tokens
1372
- sequence: string
1373
- - name: ner_tags
1374
- sequence:
1375
- class_label:
1376
- names:
1377
- 0: B-LOC
1378
- 1: B-ORG
1379
- 2: B-PER
1380
- 3: I-LOC
1381
- 4: I-ORG
1382
- 5: I-PER
1383
- 6: O
1384
- - name: additional_info
1385
- sequence:
1386
- sequence: string
1387
- splits:
1388
- - name: train
1389
- num_bytes: 493782
1390
- num_examples: 1077
1391
- - name: validation
1392
- num_bytes: 58592
1393
- num_examples: 132
1394
- - name: test
1395
- num_bytes: 62235
1396
- num_examples: 153
1397
- download_size: 5980272
1398
- dataset_size: 614609
1399
- - config_name: wiki-ner.pa
1400
- features:
1401
- - name: tokens
1402
- sequence: string
1403
- - name: ner_tags
1404
- sequence:
1405
- class_label:
1406
- names:
1407
- 0: B-LOC
1408
- 1: B-ORG
1409
- 2: B-PER
1410
- 3: I-LOC
1411
- 4: I-ORG
1412
- 5: I-PER
1413
- 6: O
1414
- - name: additional_info
1415
- sequence:
1416
- sequence: string
1417
- splits:
1418
- - name: train
1419
- num_bytes: 520268
1420
- num_examples: 1408
1421
- - name: validation
1422
- num_bytes: 61194
1423
- num_examples: 186
1424
- - name: test
1425
- num_bytes: 61812
1426
- num_examples: 179
1427
- download_size: 5980272
1428
- dataset_size: 643274
1429
- - config_name: wiki-ner.ta
1430
- features:
1431
- - name: tokens
1432
- sequence: string
1433
- - name: ner_tags
1434
- sequence:
1435
- class_label:
1436
- names:
1437
- 0: B-LOC
1438
- 1: B-ORG
1439
- 2: B-PER
1440
- 3: I-LOC
1441
- 4: I-ORG
1442
- 5: I-PER
1443
- 6: O
1444
- - name: additional_info
1445
- sequence:
1446
- sequence: string
1447
- splits:
1448
- - name: train
1449
- num_bytes: 10117152
1450
- num_examples: 20466
1451
- - name: validation
1452
- num_bytes: 1267212
1453
- num_examples: 2586
1454
- - name: test
1455
- num_bytes: 1321650
1456
- num_examples: 2611
1457
- download_size: 5980272
1458
- dataset_size: 12706014
1459
- - config_name: wiki-ner.te
1460
- features:
1461
- - name: tokens
1462
- sequence: string
1463
- - name: ner_tags
1464
- sequence:
1465
- class_label:
1466
- names:
1467
- 0: B-LOC
1468
- 1: B-ORG
1469
- 2: B-PER
1470
- 3: I-LOC
1471
- 4: I-ORG
1472
- 5: I-PER
1473
- 6: O
1474
- - name: additional_info
1475
- sequence:
1476
- sequence: string
1477
- splits:
1478
- - name: train
1479
- num_bytes: 3881235
1480
- num_examples: 7978
1481
- - name: validation
1482
- num_bytes: 458533
1483
- num_examples: 841
1484
- - name: test
1485
- num_bytes: 507830
1486
- num_examples: 1110
1487
- download_size: 5980272
1488
- dataset_size: 4847598
1489
- ---
1490
-
1491
- # Dataset Card for "indic_glue"
1492
-
1493
- ## Table of Contents
1494
- - [Dataset Description](#dataset-description)
1495
- - [Dataset Summary](#dataset-summary)
1496
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
1497
- - [Languages](#languages)
1498
- - [Dataset Structure](#dataset-structure)
1499
- - [Data Instances](#data-instances)
1500
- - [Data Fields](#data-fields)
1501
- - [Data Splits](#data-splits)
1502
- - [Dataset Creation](#dataset-creation)
1503
- - [Curation Rationale](#curation-rationale)
1504
- - [Source Data](#source-data)
1505
- - [Annotations](#annotations)
1506
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
1507
- - [Considerations for Using the Data](#considerations-for-using-the-data)
1508
- - [Social Impact of Dataset](#social-impact-of-dataset)
1509
- - [Discussion of Biases](#discussion-of-biases)
1510
- - [Other Known Limitations](#other-known-limitations)
1511
- - [Additional Information](#additional-information)
1512
- - [Dataset Curators](#dataset-curators)
1513
- - [Licensing Information](#licensing-information)
1514
- - [Citation Information](#citation-information)
1515
- - [Contributions](#contributions)
1516
-
1517
- ## Dataset Description
1518
-
1519
- - **Homepage:** https://ai4bharat.iitm.ac.in/indic-glue
1520
- - **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1521
- - **Paper:** [IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages](https://aclanthology.org/2020.findings-emnlp.445/)
1522
- - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1523
- - **Size of downloaded dataset files:** 3351.18 MB
1524
- - **Size of the generated dataset:** 1573.33 MB
1525
- - **Total amount of disk used:** 4924.51 MB
1526
-
1527
- ### Dataset Summary
1528
-
1529
- IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
1530
- variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
1531
-
1532
- The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task
1533
- in which a system must read a sentence with a pronoun and select the referent of that pronoun from
1534
- a list of choices. The examples are manually constructed to foil simple statistical methods: Each
1535
- one is contingent on contextual information provided by a single word or phrase in the sentence.
1536
- To convert the problem into sentence pair classification, we construct sentence pairs by replacing
1537
- the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the
1538
- pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of
1539
- new examples derived from fiction books that was shared privately by the authors of the original
1540
- corpus. While the included training set is balanced between two classes, the test set is imbalanced
1541
- between them (65% not entailment). Also, due to a data quirk, the development set is adversarial:
1542
- hypotheses are sometimes shared between training and development examples, so if a model memorizes the
1543
- training examples, they will predict the wrong label on corresponding development set
1544
- example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence
1545
- between a model's score on this task and its score on the unconverted original task. We
1546
- call converted dataset WNLI (Winograd NLI). This dataset is translated and publicly released for 3
1547
- Indian languages by AI4Bharat.
1548
-
1549
- ### Supported Tasks and Leaderboards
1550
-
1551
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1552
-
1553
- ### Languages
1554
-
1555
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1556
-
1557
- ## Dataset Structure
1558
-
1559
- ### Data Instances
1560
-
1561
- #### actsa-sc.te
1562
-
1563
- - **Size of downloaded dataset files:** 0.36 MB
1564
- - **Size of the generated dataset:** 1.63 MB
1565
- - **Total amount of disk used:** 1.99 MB
1566
-
1567
- An example of 'validation' looks as follows.
1568
- ```
1569
- This example was too long and was cropped:
1570
-
1571
- {
1572
- "label": 0,
1573
- "text": "\"ప్రయాణాల్లో ఉన్నవారికోసం బస్ స్టేషన్లు, రైల్వే స్టేషన్లలో పల్స్పోలియో బూతులను ఏర్పాటు చేసి చిన్నారులకు పోలియో చుక్కలు వేసేలా ఏర..."
1574
- }
1575
- ```
1576
-
1577
- #### bbca.hi
1578
-
1579
- - **Size of downloaded dataset files:** 5.50 MB
1580
- - **Size of the generated dataset:** 26.35 MB
1581
- - **Total amount of disk used:** 31.85 MB
1582
-
1583
- An example of 'train' looks as follows.
1584
- ```
1585
- This example was too long and was cropped:
1586
-
1587
- {
1588
- "label": "pakistan",
1589
- "text": "\"नेटिजन यानि इंटरनेट पर सक्रिय नागरिक अब ट्विटर पर सरकार द्वारा लगाए प्रतिबंधों के समर्थन या विरोध में अपने विचार व्यक्त करते है..."
1590
- }
1591
- ```
1592
-
1593
- #### copa.en
1594
-
1595
- - **Size of downloaded dataset files:** 0.72 MB
1596
- - **Size of the generated dataset:** 0.11 MB
1597
- - **Total amount of disk used:** 0.83 MB
1598
-
1599
- An example of 'validation' looks as follows.
1600
- ```
1601
- {
1602
- "choice1": "I swept the floor in the unoccupied room.",
1603
- "choice2": "I shut off the light in the unoccupied room.",
1604
- "label": 1,
1605
- "premise": "I wanted to conserve energy.",
1606
- "question": "effect"
1607
- }
1608
- ```
1609
-
1610
- #### copa.gu
1611
-
1612
- - **Size of downloaded dataset files:** 0.72 MB
1613
- - **Size of the generated dataset:** 0.22 MB
1614
- - **Total amount of disk used:** 0.94 MB
1615
-
1616
- An example of 'train' looks as follows.
1617
- ```
1618
- This example was too long and was cropped:
1619
-
1620
- {
1621
- "choice1": "\"સ્ત્રી જાણતી હતી કે તેનો મિત્ર મુશ્કેલ સમયમાંથી પસાર થઈ રહ્યો છે.\"...",
1622
- "choice2": "\"મહિલાને લાગ્યું કે તેના મિત્રએ તેની દયાળુ લાભ લીધો છે.\"...",
1623
- "label": 0,
1624
- "premise": "મહિલાએ તેના મિત્રની મુશ્કેલ વર્તન સહન કરી.",
1625
- "question": "cause"
1626
- }
1627
- ```
1628
-
1629
- #### copa.hi
1630
-
1631
- - **Size of downloaded dataset files:** 0.72 MB
1632
- - **Size of the generated dataset:** 0.22 MB
1633
- - **Total amount of disk used:** 0.94 MB
1634
-
1635
- An example of 'validation' looks as follows.
1636
- ```
1637
- {
1638
- "choice1": "मैंने उसका प्रस्ताव ठुकरा दिया।",
1639
- "choice2": "उन्होंने मुझे उत्पाद खरीदने के लिए राजी किया।",
1640
- "label": 0,
1641
- "premise": "मैंने सेल्समैन की पिच पर शक किया।",
1642
- "question": "effect"
1643
- }
1644
- ```
1645
-
1646
- ### Data Fields
1647
-
1648
- The data fields are the same among all splits.
1649
-
1650
- #### actsa-sc.te
1651
- - `text`: a `string` feature.
1652
- - `label`: a classification label, with possible values including `positive` (0), `negative` (1).
1653
-
1654
- #### bbca.hi
1655
- - `label`: a `string` feature.
1656
- - `text`: a `string` feature.
1657
-
1658
- #### copa.en
1659
- - `premise`: a `string` feature.
1660
- - `choice1`: a `string` feature.
1661
- - `choice2`: a `string` feature.
1662
- - `question`: a `string` feature.
1663
- - `label`: a `int32` feature.
1664
-
1665
- #### copa.gu
1666
- - `premise`: a `string` feature.
1667
- - `choice1`: a `string` feature.
1668
- - `choice2`: a `string` feature.
1669
- - `question`: a `string` feature.
1670
- - `label`: a `int32` feature.
1671
-
1672
- #### copa.hi
1673
- - `premise`: a `string` feature.
1674
- - `choice1`: a `string` feature.
1675
- - `choice2`: a `string` feature.
1676
- - `question`: a `string` feature.
1677
- - `label`: a `int32` feature.
1678
-
1679
- ### Data Splits
1680
-
1681
- #### actsa-sc.te
1682
-
1683
- | |train|validation|test|
1684
- |-----------|----:|---------:|---:|
1685
- |actsa-sc.te| 4328| 541| 541|
1686
-
1687
- #### bbca.hi
1688
-
1689
- | |train|test|
1690
- |-------|----:|---:|
1691
- |bbca.hi| 3467| 866|
1692
-
1693
- #### copa.en
1694
-
1695
- | |train|validation|test|
1696
- |-------|----:|---------:|---:|
1697
- |copa.en| 400| 100| 500|
1698
-
1699
- #### copa.gu
1700
-
1701
- | |train|validation|test|
1702
- |-------|----:|---------:|---:|
1703
- |copa.gu| 362| 88| 448|
1704
-
1705
- #### copa.hi
1706
-
1707
- | |train|validation|test|
1708
- |-------|----:|---------:|---:|
1709
- |copa.hi| 362| 88| 449|
1710
-
1711
- ## Dataset Creation
1712
-
1713
- ### Curation Rationale
1714
-
1715
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1716
-
1717
- ### Source Data
1718
-
1719
- #### Initial Data Collection and Normalization
1720
-
1721
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1722
-
1723
- #### Who are the source language producers?
1724
-
1725
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1726
-
1727
- ### Annotations
1728
-
1729
- #### Annotation process
1730
-
1731
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1732
-
1733
- #### Who are the annotators?
1734
-
1735
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1736
-
1737
- ### Personal and Sensitive Information
1738
-
1739
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1740
-
1741
- ## Considerations for Using the Data
1742
-
1743
- ### Social Impact of Dataset
1744
-
1745
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1746
-
1747
- ### Discussion of Biases
1748
-
1749
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1750
-
1751
- ### Other Known Limitations
1752
-
1753
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1754
-
1755
- ## Additional Information
1756
-
1757
- ### Dataset Curators
1758
-
1759
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1760
-
1761
- ### Licensing Information
1762
-
1763
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1764
-
1765
- ### Citation Information
1766
-
1767
- ```
1768
- @inproceedings{kakwani-etal-2020-indicnlpsuite,
1769
- title = "{I}ndic{NLPS}uite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for {I}ndian Languages",
1770
- author = "Kakwani, Divyanshu and
1771
- Kunchukuttan, Anoop and
1772
- Golla, Satish and
1773
- N.C., Gokul and
1774
- Bhattacharyya, Avik and
1775
- Khapra, Mitesh M. and
1776
- Kumar, Pratyush",
1777
- booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
1778
- month = nov,
1779
- year = "2020",
1780
- address = "Online",
1781
- publisher = "Association for Computational Linguistics",
1782
- url = "https://aclanthology.org/2020.findings-emnlp.445",
1783
- doi = "10.18653/v1/2020.findings-emnlp.445",
1784
- pages = "4948--4961",
1785
- }
1786
-
1787
- @inproceedings{Levesque2011TheWS,
1788
- title={The Winograd Schema Challenge},
1789
- author={H. Levesque and E. Davis and L. Morgenstern},
1790
- booktitle={KR},
1791
- year={2011}
1792
- }
1793
- ```
1794
-
1795
- ### Contributions
1796
-
1797
- Thanks to [@sumanthd17](https://github.com/sumanthd17) for adding this dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cvit-mkb-clsr.en-gu/indic_glue-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba4f944ec8748ec6d13ea24d10642b35505287dccd96e0a36d712865e4c66f56
3
+ size 1093313
dataset_infos.json DELETED
The diff for this file is too large to render. See raw diff
 
indic_glue.py DELETED
@@ -1,979 +0,0 @@
1
- """The IndicGLUE benchmark."""
2
-
3
-
4
- import csv
5
- import json
6
- import textwrap
7
-
8
- import pandas as pd
9
-
10
- import datasets
11
-
12
-
13
- _INDIC_GLUE_CITATION = """\
14
- @inproceedings{kakwani2020indicnlpsuite,
15
- title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
16
- author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
17
- year={2020},
18
- booktitle={Findings of EMNLP},
19
- }
20
- """
21
-
22
- _INDIC_GLUE_DESCRIPTION = """\
23
- IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
24
- variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
25
- """
26
-
27
- _DESCRIPTIONS = {
28
- "wnli": textwrap.dedent(
29
- """
30
- The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task
31
- in which a system must read a sentence with a pronoun and select the referent of that pronoun from
32
- a list of choices. The examples are manually constructed to foil simple statistical methods: Each
33
- one is contingent on contextual information provided by a single word or phrase in the sentence.
34
- To convert the problem into sentence pair classification, we construct sentence pairs by replacing
35
- the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the
36
- pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of
37
- new examples derived from fiction books that was shared privately by the authors of the original
38
- corpus. While the included training set is balanced between two classes, the test set is imbalanced
39
- between them (65% not entailment). Also, due to a data quirk, the development set is adversarial:
40
- hypotheses are sometimes shared between training and development examples, so if a model memorizes the
41
- training examples, they will predict the wrong label on corresponding development set
42
- example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence
43
- between a model's score on this task and its score on the unconverted original task. We
44
- call converted dataset WNLI (Winograd NLI). This dataset is translated and publicly released for 3
45
- Indian languages by AI4Bharat.
46
- """
47
- ),
48
- "copa": textwrap.dedent(
49
- """
50
- The Choice Of Plausible Alternatives (COPA) evaluation provides researchers with a tool for assessing
51
- progress in open-domain commonsense causal reasoning. COPA consists of 1000 questions, split equally
52
- into development and test sets of 500 questions each. Each question is composed of a premise and two
53
- alternatives, where the task is to select the alternative that more plausibly has a causal relation
54
- with the premise. The correct alternative is randomized so that the expected performance of randomly
55
- guessing is 50%. This dataset is translated and publicly released for 3 languages by AI4Bharat.
56
- """
57
- ),
58
- "sna": textwrap.dedent(
59
- """
60
- This dataset is a collection of Bengali News articles. The dataset is used for classifying articles into
61
- 6 different classes namely national, international, state, kolkata, entertainment and sports.
62
- """
63
- ),
64
- "csqa": textwrap.dedent(
65
- """
66
- Given a text with an entity randomly masked, the task is to predict that masked entity from a list of 4
67
- candidate entities. The dataset contains around 239k examples across 11 languages.
68
- """
69
- ),
70
- "wstp": textwrap.dedent(
71
- """
72
- Predict the correct title for a Wikipedia section from a given list of four candidate titles.
73
- The dataset has 400k examples across 11 Indian languages.
74
- """
75
- ),
76
- "inltkh": textwrap.dedent(
77
- """
78
- Obtained from inltk project. The corpus is a collection of headlines tagged with their news category.
79
- Available for langauges: gu, ml, mr and ta.
80
- """
81
- ),
82
- "bbca": textwrap.dedent(
83
- """
84
- This release consists of 4335 Hindi documents with tags from the BBC Hindi News website.
85
- """
86
- ),
87
- "cvit-mkb-clsr": textwrap.dedent(
88
- """
89
- CVIT Maan ki Baat Dataset - Given a sentence in language $L_1$ the task is to retrieve its translation
90
- from a set of candidate sentences in language $L_2$.
91
- The dataset contains around 39k parallel sentence pairs across 8 Indian languages.
92
- """
93
- ),
94
- "iitp-mr": textwrap.dedent(
95
- """
96
- IIT Patna Product Reviews: Sentiment analysis corpus for product reviews posted in Hindi.
97
- """
98
- ),
99
- "iitp-pr": textwrap.dedent(
100
- """
101
- IIT Patna Product Reviews: Sentiment analysis corpus for product reviews posted in Hindi.
102
- """
103
- ),
104
- "actsa-sc": textwrap.dedent(
105
- """
106
- ACTSA Corpus: Sentiment analysis corpus for Telugu sentences.
107
- """
108
- ),
109
- "md": textwrap.dedent(
110
- """
111
- The Hindi Discourse Analysis dataset is a corpus for analyzing discourse modes present in its sentences.
112
- It contains sentences from stories written by 11 famous authors from the 20th Century. 4-5 stories by
113
- each author have been selected which were available in the public domain resulting in a collection of 53 stories.
114
- Most of these short stories were originally written in Hindi but some of them were written in other Indian languages
115
- and later translated to Hindi.
116
- """
117
- ),
118
- "wiki-ner": textwrap.dedent(
119
- """
120
- The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been constructed using
121
- the linked entities in Wikipedia pages for 282 different languages including Danish.
122
- """
123
- ),
124
- }
125
-
126
- _CITATIONS = {
127
- "wnli": textwrap.dedent(
128
- """
129
- @inproceedings{kakwani2020indicnlpsuite,
130
- title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
131
- author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
132
- year={2020},
133
- booktitle={Findings of EMNLP},
134
- }
135
- @inproceedings{Levesque2011TheWS,
136
- title={The Winograd Schema Challenge},
137
- author={H. Levesque and E. Davis and L. Morgenstern},
138
- booktitle={KR},
139
- year={2011}
140
- }
141
- """
142
- ),
143
- "copa": textwrap.dedent(
144
- """
145
- @inproceedings{kakwani2020indicnlpsuite,
146
- title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
147
- author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
148
- year={2020},
149
- booktitle={Findings of EMNLP},
150
- }
151
- @inproceedings{Gordon2011SemEval2012T7,
152
- title={SemEval-2012 Task 7: Choice of Plausible Alternatives: An Evaluation of Commonsense Causal Reasoning},
153
- author={Andrew S. Gordon and Zornitsa Kozareva and Melissa Roemmele},
154
- booktitle={SemEval@NAACL-HLT},
155
- year={2011}
156
- }
157
- """
158
- ),
159
- "sna": textwrap.dedent(
160
- """
161
- https://www.kaggle.com/csoham/classification-bengali-news-articles-indicnlp
162
- """
163
- ),
164
- "csqa": textwrap.dedent(
165
- """
166
- @inproceedings{kakwani2020indicnlpsuite,
167
- title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
168
- author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
169
- year={2020},
170
- booktitle={Findings of EMNLP},
171
- }
172
- """
173
- ),
174
- "wstp": textwrap.dedent(
175
- """
176
- @inproceedings{kakwani2020indicnlpsuite,
177
- title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
178
- author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
179
- year={2020},
180
- booktitle={Findings of EMNLP},
181
- }
182
- """
183
- ),
184
- "inltkh": textwrap.dedent(
185
- """
186
- https://github.com/goru001/inltk
187
- """
188
- ),
189
- "bbca": textwrap.dedent(
190
- """
191
- https://github.com/NirantK/hindi2vec/releases/tag/bbc-hindi-v0.1
192
- """
193
- ),
194
- "cvit-mkb-clsr": textwrap.dedent(
195
- """
196
- @inproceedings{siripragada-etal-2020-multilingual,
197
- title = "A Multilingual Parallel Corpora Collection Effort for {I}ndian Languages",
198
- author = "Siripragada, Shashank and
199
- Philip, Jerin and
200
- Namboodiri, Vinay P. and
201
- Jawahar, C V",
202
- booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference",
203
- month = may,
204
- year = "2020",
205
- address = "Marseille, France",
206
- publisher = "European Language Resources Association",
207
- url = "https://www.aclweb.org/anthology/2020.lrec-1.462",
208
- pages = "3743--3751",
209
- abstract = "We present sentence aligned parallel corpora across 10 Indian Languages - Hindi, Telugu, Tamil, Malayalam, Gujarati, Urdu, Bengali, Oriya, Marathi, Punjabi, and English - many of which are categorized as low resource. The corpora are compiled from online sources which have content shared across languages. The corpora presented significantly extends present resources that are either not large enough or are restricted to a specific domain (such as health). We also provide a separate test corpus compiled from an independent online source that can be independently used for validating the performance in 10 Indian languages. Alongside, we report on the methods of constructing such corpora using tools enabled by recent advances in machine translation and cross-lingual retrieval using deep neural network based methods.",
210
- language = "English",
211
- ISBN = "979-10-95546-34-4",
212
- }
213
- """
214
- ),
215
- "iitp-mr": textwrap.dedent(
216
- """
217
- @inproceedings{akhtar-etal-2016-hybrid,
218
- title = "A Hybrid Deep Learning Architecture for Sentiment Analysis",
219
- author = "Akhtar, Md Shad and
220
- Kumar, Ayush and
221
- Ekbal, Asif and
222
- Bhattacharyya, Pushpak",
223
- booktitle = "Proceedings of {COLING} 2016, the 26th International Conference on Computational Linguistics: Technical Papers",
224
- month = dec,
225
- year = "2016",
226
- address = "Osaka, Japan",
227
- publisher = "The COLING 2016 Organizing Committee",
228
- url = "https://www.aclweb.org/anthology/C16-1047",
229
- pages = "482--493",
230
- abstract = "In this paper, we propose a novel hybrid deep learning archtecture which is highly efficient for sentiment analysis in resource-poor languages. We learn sentiment embedded vectors from the Convolutional Neural Network (CNN). These are augmented to a set of optimized features selected through a multi-objective optimization (MOO) framework. The sentiment augmented optimized vector obtained at the end is used for the training of SVM for sentiment classification. We evaluate our proposed approach for coarse-grained (i.e. sentence level) as well as fine-grained (i.e. aspect level) sentiment analysis on four Hindi datasets covering varying domains. In order to show that our proposed method is generic in nature we also evaluate it on two benchmark English datasets. Evaluation shows that the results of the proposed method are consistent across all the datasets and often outperforms the state-of-art systems. To the best of our knowledge, this is the very first attempt where such a deep learning model is used for less-resourced languages such as Hindi.",
231
- }
232
- """
233
- ),
234
- "iitp-pr": textwrap.dedent(
235
- """
236
- @inproceedings{akhtar-etal-2016-hybrid,
237
- title = "A Hybrid Deep Learning Architecture for Sentiment Analysis",
238
- author = "Akhtar, Md Shad and
239
- Kumar, Ayush and
240
- Ekbal, Asif and
241
- Bhattacharyya, Pushpak",
242
- booktitle = "Proceedings of {COLING} 2016, the 26th International Conference on Computational Linguistics: Technical Papers",
243
- month = dec,
244
- year = "2016",
245
- address = "Osaka, Japan",
246
- publisher = "The COLING 2016 Organizing Committee",
247
- url = "https://www.aclweb.org/anthology/C16-1047",
248
- pages = "482--493",
249
- abstract = "In this paper, we propose a novel hybrid deep learning archtecture which is highly efficient for sentiment analysis in resource-poor languages. We learn sentiment embedded vectors from the Convolutional Neural Network (CNN). These are augmented to a set of optimized features selected through a multi-objective optimization (MOO) framework. The sentiment augmented optimized vector obtained at the end is used for the training of SVM for sentiment classification. We evaluate our proposed approach for coarse-grained (i.e. sentence level) as well as fine-grained (i.e. aspect level) sentiment analysis on four Hindi datasets covering varying domains. In order to show that our proposed method is generic in nature we also evaluate it on two benchmark English datasets. Evaluation shows that the results of the proposed method are consistent across all the datasets and often outperforms the state-of-art systems. To the best of our knowledge, this is the very first attempt where such a deep learning model is used for less-resourced languages such as Hindi.",
250
- }
251
- """
252
- ),
253
- "actsa-sc": textwrap.dedent(
254
- """
255
- @inproceedings{mukku-mamidi-2017-actsa,
256
- title = "{ACTSA}: Annotated Corpus for {T}elugu Sentiment Analysis",
257
- author = "Mukku, Sandeep Sricharan and
258
- Mamidi, Radhika",
259
- booktitle = "Proceedings of the First Workshop on Building Linguistically Generalizable {NLP} Systems",
260
- month = sep,
261
- year = "2017",
262
- address = "Copenhagen, Denmark",
263
- publisher = "Association for Computational Linguistics",
264
- url = "https://www.aclweb.org/anthology/W17-5408",
265
- doi = "10.18653/v1/W17-5408",
266
- pages = "54--58",
267
- abstract = "Sentiment analysis deals with the task of determining the polarity of a document or sentence and has received a lot of attention in recent years for the English language. With the rapid growth of social media these days, a lot of data is available in regional languages besides English. Telugu is one such regional language with abundant data available in social media, but it{'}s hard to find a labelled data of sentences for Telugu Sentiment Analysis. In this paper, we describe an effort to build a gold-standard annotated corpus of Telugu sentences to support Telugu Sentiment Analysis. The corpus, named ACTSA (Annotated Corpus for Telugu Sentiment Analysis) has a collection of Telugu sentences taken from different sources which were then pre-processed and manually annotated by native Telugu speakers using our annotation guidelines. In total, we have annotated 5457 sentences, which makes our corpus the largest resource currently available. The corpus and the annotation guidelines are made publicly available.",
268
- }
269
- """
270
- ),
271
- "md": textwrap.dedent(
272
- """
273
- @inproceedings{Dhanwal2020AnAD,
274
- title={An Annotated Dataset of Discourse Modes in Hindi Stories},
275
- author={Swapnil Dhanwal and Hritwik Dutta and Hitesh Nankani and Nilay Shrivastava and Y. Kumar and Junyi Jessy Li and Debanjan Mahata and Rakesh Gosangi and Haimin Zhang and R. R. Shah and Amanda Stent},
276
- booktitle={LREC},
277
- year={2020}
278
- }
279
- """
280
- ),
281
- "wiki-ner": textwrap.dedent(
282
- """
283
- @inproceedings{pan-etal-2017-cross,
284
- title = "Cross-lingual Name Tagging and Linking for 282 Languages",
285
- author = "Pan, Xiaoman and
286
- Zhang, Boliang and
287
- May, Jonathan and
288
- Nothman, Joel and
289
- Knight, Kevin and
290
- Ji, Heng",
291
- booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
292
- month = jul,
293
- year = "2017",
294
- address = "Vancouver, Canada",
295
- publisher = "Association for Computational Linguistics",
296
- url = "https://www.aclweb.org/anthology/P17-1178",
297
- doi = "10.18653/v1/P17-1178",
298
- pages = "1946--1958",
299
- abstract = "The ambitious goal of this work is to develop a cross-lingual name tagging and linking framework for 282 languages that exist in Wikipedia. Given a document in any of these languages, our framework is able to identify name mentions, assign a coarse-grained or fine-grained type to each mention, and link it to an English Knowledge Base (KB) if it is linkable. We achieve this goal by performing a series of new KB mining methods: generating {``}silver-standard{''} annotations by transferring annotations from English to other languages through cross-lingual links and KB properties, refining annotations through self-training and topic selection, deriving language-specific morphology features from anchor links, and mining word translation pairs from cross-lingual links. Both name tagging and linking results for 282 languages are promising on Wikipedia data and on-Wikipedia data.",
300
- }
301
- """
302
- ),
303
- }
304
-
305
- _TEXT_FEATURES = {
306
- "wnli": {"hypothesis": "sentence1", "premise": "sentence2"},
307
- "copa": {"premise": "premise", "choice1": "choice1", "choice2": "choice2", "question": "question"},
308
- "sna": {"text": "text"},
309
- "csqa": {"question": "question", "answer": "answer", "category": "category", "title": "title"},
310
- "wstp": {
311
- "sectionText": "sectionText",
312
- "correctTitle": "correctTitle",
313
- "titleA": "titleA",
314
- "titleB": "titleB",
315
- "titleC": "titleC",
316
- "titleD": "titleD",
317
- "url": "url",
318
- },
319
- "inltkh": {"text": "text"},
320
- "bbca": {"label": "label", "text": "text"},
321
- "cvit-mkb-clsr": {"sentence1": "sentence1", "sentence2": "sentence2"},
322
- "iitp-mr": {"text": "text"},
323
- "iitp-pr": {"text": "text"},
324
- "actsa-sc": {"text": "text"},
325
- "md": {"sentence": "sentence", "discourse_mode": "discourse_mode"},
326
- "wiki-ner": {},
327
- }
328
-
329
- _DATA_URLS = {
330
- "wnli": "https://ai4b-public-nlu-nlg.objectstore.e2enetworks.net/IndicGLUE/wnli-translated.tar.gz",
331
- "copa": "https://ai4b-public-nlu-nlg.objectstore.e2enetworks.net/IndicGLUE/copa-translated.tar.gz",
332
- "sna": "https://ai4b-public-nlu-nlg.objectstore.e2enetworks.net/IndicGLUE/soham-articles.tar.gz",
333
- "csqa": "https://ai4b-public-nlu-nlg.objectstore.e2enetworks.net/IndicGLUE/wiki-cloze.tar.gz",
334
- "wstp": "https://ai4b-public-nlu-nlg.objectstore.e2enetworks.net/IndicGLUE/wiki-section-titles.tar.gz",
335
- "inltkh": "https://ai4b-public-nlu-nlg.objectstore.e2enetworks.net/IndicGLUE/inltk-headlines.tar.gz",
336
- "bbca": "https://ai4b-public-nlu-nlg.objectstore.e2enetworks.net/IndicGLUE/bbc-articles.tar.gz",
337
- "cvit-mkb-clsr": "https://ai4b-public-nlu-nlg.objectstore.e2enetworks.net/IndicGLUE/cvit-mkb.tar.gz",
338
- "iitp-mr": "https://ai4b-public-nlu-nlg.objectstore.e2enetworks.net/IndicGLUE/iitp-movie-reviews.tar.gz",
339
- "iitp-pr": "https://ai4b-public-nlu-nlg.objectstore.e2enetworks.net/IndicGLUE/iitp-product-reviews.tar.gz",
340
- "actsa-sc": "https://ai4b-public-nlu-nlg.objectstore.e2enetworks.net/IndicGLUE/actsa.tar.gz",
341
- "md": "https://ai4b-public-nlu-nlg.objectstore.e2enetworks.net/IndicGLUE/midas-discourse.tar.gz",
342
- "wiki-ner": "https://ai4b-public-nlu-nlg.objectstore.e2enetworks.net/IndicGLUE/wikiann-ner.tar.gz",
343
- }
344
-
345
- _URLS = {
346
- "wnli": "https://ai4bharat.iitm.ac.in/indic-glue",
347
- "copa": "https://ai4bharat.iitm.ac.in/indic-glue",
348
- "sna": "https://ai4bharat.iitm.ac.in/indic-glue",
349
- "csqa": "https://ai4bharat.iitm.ac.in/indic-glue",
350
- "wstp": "https://ai4bharat.iitm.ac.in/indic-glue",
351
- "inltkh": "https://ai4bharat.iitm.ac.in/indic-glue",
352
- "bbca": "https://ai4bharat.iitm.ac.in/indic-glue",
353
- "cvit-mkb-clsr": "https://ai4bharat.iitm.ac.in/indic-glue",
354
- "iitp-mr": "https://ai4bharat.iitm.ac.in/indic-glue",
355
- "iitp-pr": "https://ai4bharat.iitm.ac.in/indic-glue",
356
- "actsa-sc": "https://ai4bharat.iitm.ac.in/indic-glue",
357
- "md": "https://ai4bharat.iitm.ac.in/indic-glue",
358
- "wiki-ner": "https://ai4bharat.iitm.ac.in/indic-glue",
359
- }
360
-
361
- _INDIC_GLUE_URL = "https://ai4bharat.iitm.ac.in/indic-glue"
362
-
363
- _WNLI_LANGS = ["en", "hi", "gu", "mr"]
364
- _COPA_LANGS = ["en", "hi", "gu", "mr"]
365
- _SNA_LANGS = ["bn"]
366
- _CSQA_LANGS = ["as", "bn", "gu", "hi", "kn", "ml", "mr", "or", "pa", "ta", "te"]
367
- _WSTP_LANGS = ["as", "bn", "gu", "hi", "kn", "ml", "mr", "or", "pa", "ta", "te"]
368
- _iNLTKH_LANGS = ["gu", "ml", "mr", "ta", "te"]
369
- _BBCA_LANGS = ["hi"]
370
- _CVIT_MKB_CLSR = ["en-bn", "en-gu", "en-hi", "en-ml", "en-mr", "en-or", "en-ta", "en-te", "en-ur"]
371
- _IITP_MR_LANGS = ["hi"]
372
- _IITP_PR_LANGS = ["hi"]
373
- _ACTSA_LANGS = ["te"]
374
- _MD_LANGS = ["hi"]
375
- _WIKI_NER_LANGS = ["as", "bn", "gu", "hi", "kn", "ml", "mr", "or", "pa", "ta", "te"]
376
-
377
- _NAMES = []
378
-
379
- for lang in _WNLI_LANGS:
380
- _NAMES.append(f"wnli.{lang}")
381
-
382
- for lang in _COPA_LANGS:
383
- _NAMES.append(f"copa.{lang}")
384
-
385
- for lang in _SNA_LANGS:
386
- _NAMES.append(f"sna.{lang}")
387
-
388
- for lang in _CSQA_LANGS:
389
- _NAMES.append(f"csqa.{lang}")
390
-
391
- for lang in _WSTP_LANGS:
392
- _NAMES.append(f"wstp.{lang}")
393
-
394
- for lang in _iNLTKH_LANGS:
395
- _NAMES.append(f"inltkh.{lang}")
396
-
397
- for lang in _BBCA_LANGS:
398
- _NAMES.append(f"bbca.{lang}")
399
-
400
- for lang in _CVIT_MKB_CLSR:
401
- _NAMES.append(f"cvit-mkb-clsr.{lang}")
402
-
403
- for lang in _IITP_MR_LANGS:
404
- _NAMES.append(f"iitp-mr.{lang}")
405
-
406
- for lang in _IITP_PR_LANGS:
407
- _NAMES.append(f"iitp-pr.{lang}")
408
-
409
- for lang in _ACTSA_LANGS:
410
- _NAMES.append(f"actsa-sc.{lang}")
411
-
412
- for lang in _MD_LANGS:
413
- _NAMES.append(f"md.{lang}")
414
-
415
- for lang in _WIKI_NER_LANGS:
416
- _NAMES.append(f"wiki-ner.{lang}")
417
-
418
-
419
- class IndicGlueConfig(datasets.BuilderConfig):
420
- """BuilderConfig for IndicGLUE."""
421
-
422
- def __init__(self, data_url, citation, url, text_features, **kwargs):
423
- """
424
- Args:
425
-
426
- data_url: `string`, url to download the zip file from.
427
- citation: `string`, citation for the data set.
428
- url: `string`, url for information about the data set.
429
- text_features: `dict[string, string]`, map from the name of the feature
430
- dict for each text field to the name of the column in the csv/json file
431
- **kwargs: keyword arguments forwarded to super.
432
- """
433
- super(IndicGlueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
434
- self.data_url = data_url
435
- self.citation = citation
436
- self.url = url
437
- self.text_features = text_features
438
-
439
-
440
- class IndicGlue(datasets.GeneratorBasedBuilder):
441
-
442
- BUILDER_CONFIGS = [
443
- IndicGlueConfig(
444
- name=name,
445
- description=_DESCRIPTIONS[name.split(".")[0]],
446
- text_features=_TEXT_FEATURES[name.split(".")[0]],
447
- data_url=_DATA_URLS[name.split(".")[0]],
448
- citation=_CITATIONS[name.split(".")[0]],
449
- url=_URLS[name.split(".")[0]],
450
- )
451
- for name in _NAMES
452
- ]
453
-
454
- def _info(self):
455
- features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
456
-
457
- if self.config.name.startswith("copa"):
458
- features["label"] = datasets.Value("int32")
459
-
460
- if self.config.name.startswith("sna"):
461
- features["label"] = datasets.features.ClassLabel(
462
- names=["kolkata", "state", "national", "sports", "entertainment", "international"]
463
- )
464
-
465
- if self.config.name.startswith("inltkh"):
466
- features["label"] = datasets.features.ClassLabel(
467
- names=[
468
- "entertainment",
469
- "business",
470
- "tech",
471
- "sports",
472
- "state",
473
- "spirituality",
474
- "tamil-cinema",
475
- "positive",
476
- "negative",
477
- "neutral",
478
- ]
479
- )
480
-
481
- if self.config.name.startswith("iitp"):
482
- features["label"] = datasets.features.ClassLabel(names=["negative", "neutral", "positive"])
483
-
484
- if self.config.name.startswith("wnli"):
485
- features["label"] = datasets.features.ClassLabel(names=["not_entailment", "entailment", "None"])
486
-
487
- if self.config.name.startswith("actsa"):
488
- features["label"] = datasets.features.ClassLabel(names=["positive", "negative"])
489
-
490
- if self.config.name.startswith("csqa"):
491
- features["options"] = datasets.features.Sequence(datasets.Value("string"))
492
- features["out_of_context_options"] = datasets.features.Sequence(datasets.Value("string"))
493
-
494
- if self.config.name.startswith("md"):
495
- features["story_number"] = datasets.Value("int32")
496
- features["id"] = datasets.Value("int32")
497
-
498
- if self.config.name.startswith("wiki-ner"):
499
- features["tokens"] = datasets.features.Sequence(datasets.Value("string"))
500
- features["ner_tags"] = datasets.features.Sequence(
501
- datasets.features.ClassLabel(names=["B-LOC", "B-ORG", "B-PER", "I-LOC", "I-ORG", "I-PER", "O"])
502
- )
503
- features["additional_info"] = datasets.features.Sequence(
504
- datasets.features.Sequence(datasets.Value("string"))
505
- )
506
-
507
- return datasets.DatasetInfo(
508
- description=_INDIC_GLUE_DESCRIPTION + "\n" + self.config.description,
509
- features=datasets.Features(features),
510
- homepage=self.config.url,
511
- citation=_INDIC_GLUE_CITATION + "\n" + self.config.citation,
512
- )
513
-
514
- def _split_generators(self, dl_manager):
515
-
516
- if self.config.name.startswith("wnli"):
517
- archive = dl_manager.download(self.config.data_url)
518
- task_name = self._get_task_name_from_data_url(self.config.data_url)
519
- dl_dir = task_name + "/" + self.config.name.split(".")[1]
520
- return [
521
- datasets.SplitGenerator(
522
- name=datasets.Split.TRAIN,
523
- gen_kwargs={
524
- "datafile": dl_dir + "/" + "train.csv",
525
- "split": datasets.Split.TRAIN,
526
- "key": "train-split",
527
- "files": dl_manager.iter_archive(archive),
528
- },
529
- ),
530
- datasets.SplitGenerator(
531
- name=datasets.Split.VALIDATION,
532
- gen_kwargs={
533
- "datafile": dl_dir + "/" + "dev.csv",
534
- "split": datasets.Split.VALIDATION,
535
- "key": "val-split",
536
- "files": dl_manager.iter_archive(archive),
537
- },
538
- ),
539
- datasets.SplitGenerator(
540
- name=datasets.Split.TEST,
541
- gen_kwargs={
542
- "datafile": dl_dir + "/" + "test.csv",
543
- "split": datasets.Split.TEST,
544
- "key": "test-split",
545
- "files": dl_manager.iter_archive(archive),
546
- },
547
- ),
548
- ]
549
-
550
- if self.config.name.startswith("copa"):
551
- archive = dl_manager.download(self.config.data_url)
552
- task_name = self._get_task_name_from_data_url(self.config.data_url)
553
- dl_dir = task_name + "/" + self.config.name.split(".")[1]
554
-
555
- return [
556
- datasets.SplitGenerator(
557
- name=datasets.Split.TRAIN,
558
- gen_kwargs={
559
- "datafile": dl_dir + "/" + "train.jsonl",
560
- "split": datasets.Split.TRAIN,
561
- "key": "train-split",
562
- "files": dl_manager.iter_archive(archive),
563
- },
564
- ),
565
- datasets.SplitGenerator(
566
- name=datasets.Split.VALIDATION,
567
- gen_kwargs={
568
- "datafile": dl_dir + "/" + "val.jsonl",
569
- "split": datasets.Split.VALIDATION,
570
- "key": "val-split",
571
- "files": dl_manager.iter_archive(archive),
572
- },
573
- ),
574
- datasets.SplitGenerator(
575
- name=datasets.Split.TEST,
576
- gen_kwargs={
577
- "datafile": dl_dir + "/" + "test.jsonl",
578
- "split": datasets.Split.TEST,
579
- "key": "test-split",
580
- "files": dl_manager.iter_archive(archive),
581
- },
582
- ),
583
- ]
584
-
585
- if self.config.name.startswith("sna"):
586
- archive = dl_manager.download(self.config.data_url)
587
- task_name = self._get_task_name_from_data_url(self.config.data_url)
588
- dl_dir = task_name + "/" + self.config.name.split(".")[1]
589
-
590
- return [
591
- datasets.SplitGenerator(
592
- name=datasets.Split.TRAIN,
593
- gen_kwargs={
594
- "datafile": dl_dir + "/" + "bn-train.csv",
595
- "split": datasets.Split.TRAIN,
596
- "files": dl_manager.iter_archive(archive),
597
- },
598
- ),
599
- datasets.SplitGenerator(
600
- name=datasets.Split.VALIDATION,
601
- gen_kwargs={
602
- "datafile": dl_dir + "/" + "bn-valid.csv",
603
- "split": datasets.Split.VALIDATION,
604
- "files": dl_manager.iter_archive(archive),
605
- },
606
- ),
607
- datasets.SplitGenerator(
608
- name=datasets.Split.TEST,
609
- gen_kwargs={
610
- "datafile": dl_dir + "/" + "bn-test.csv",
611
- "split": datasets.Split.TEST,
612
- "files": dl_manager.iter_archive(archive),
613
- },
614
- ),
615
- ]
616
-
617
- if self.config.name.startswith("csqa"):
618
- archive = dl_manager.download(self.config.data_url)
619
- task_name = self._get_task_name_from_data_url(self.config.data_url)
620
- dl_dir = task_name
621
-
622
- return [
623
- datasets.SplitGenerator(
624
- name=datasets.Split.TEST,
625
- gen_kwargs={
626
- "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}.json",
627
- "split": datasets.Split.TEST,
628
- "files": dl_manager.iter_archive(archive),
629
- },
630
- )
631
- ]
632
-
633
- if self.config.name.startswith("wstp"):
634
- archive = dl_manager.download(self.config.data_url)
635
- task_name = self._get_task_name_from_data_url(self.config.data_url)
636
- dl_dir = task_name + "/" + self.config.name.split(".")[1]
637
-
638
- return [
639
- datasets.SplitGenerator(
640
- name=datasets.Split.TRAIN,
641
- gen_kwargs={
642
- "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-train.json",
643
- "split": datasets.Split.TRAIN,
644
- "files": dl_manager.iter_archive(archive),
645
- },
646
- ),
647
- datasets.SplitGenerator(
648
- name=datasets.Split.VALIDATION,
649
- gen_kwargs={
650
- "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-valid.json",
651
- "split": datasets.Split.VALIDATION,
652
- "files": dl_manager.iter_archive(archive),
653
- },
654
- ),
655
- datasets.SplitGenerator(
656
- name=datasets.Split.TEST,
657
- gen_kwargs={
658
- "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-test.json",
659
- "split": datasets.Split.TEST,
660
- "files": dl_manager.iter_archive(archive),
661
- },
662
- ),
663
- ]
664
-
665
- if (
666
- self.config.name.startswith("inltkh")
667
- or self.config.name.startswith("iitp")
668
- or self.config.name.startswith("actsa")
669
- ):
670
- archive = dl_manager.download(self.config.data_url)
671
- task_name = self._get_task_name_from_data_url(self.config.data_url)
672
- dl_dir = task_name + "/" + self.config.name.split(".")[1]
673
-
674
- return [
675
- datasets.SplitGenerator(
676
- name=datasets.Split.TRAIN,
677
- gen_kwargs={
678
- "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-train.csv",
679
- "split": datasets.Split.TRAIN,
680
- "files": dl_manager.iter_archive(archive),
681
- },
682
- ),
683
- datasets.SplitGenerator(
684
- name=datasets.Split.VALIDATION,
685
- gen_kwargs={
686
- "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-valid.csv",
687
- "split": datasets.Split.VALIDATION,
688
- "files": dl_manager.iter_archive(archive),
689
- },
690
- ),
691
- datasets.SplitGenerator(
692
- name=datasets.Split.TEST,
693
- gen_kwargs={
694
- "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-test.csv",
695
- "split": datasets.Split.TEST,
696
- "files": dl_manager.iter_archive(archive),
697
- },
698
- ),
699
- ]
700
-
701
- if self.config.name.startswith("bbca"):
702
- archive = dl_manager.download(self.config.data_url)
703
- task_name = self._get_task_name_from_data_url(self.config.data_url)
704
- dl_dir = task_name + "/" + self.config.name.split(".")[1]
705
-
706
- return [
707
- datasets.SplitGenerator(
708
- name=datasets.Split.TRAIN,
709
- gen_kwargs={
710
- "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-train.csv",
711
- "split": datasets.Split.TRAIN,
712
- "files": dl_manager.iter_archive(archive),
713
- },
714
- ),
715
- datasets.SplitGenerator(
716
- name=datasets.Split.TEST,
717
- gen_kwargs={
718
- "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-test.csv",
719
- "split": datasets.Split.TEST,
720
- "files": dl_manager.iter_archive(archive),
721
- },
722
- ),
723
- ]
724
-
725
- if self.config.name.startswith("cvit"):
726
- archive = dl_manager.download(self.config.data_url)
727
- task_name = self._get_task_name_from_data_url(self.config.data_url)
728
- dl_dir = task_name + "/" + self.config.name.split(".")[1]
729
-
730
- return [
731
- datasets.SplitGenerator(
732
- name=datasets.Split.TEST,
733
- gen_kwargs={
734
- "datafile": None,
735
- "src": dl_dir + "/" + f"mkb.{self.config.name.split('.')[1].split('-')[0]}",
736
- "tgt": dl_dir + "/" + f"mkb.{self.config.name.split('.')[1].split('-')[1]}",
737
- "split": datasets.Split.TEST,
738
- "files": dl_manager.iter_archive(archive),
739
- },
740
- )
741
- ]
742
-
743
- if self.config.name.startswith("md"):
744
- archive = dl_manager.download(self.config.data_url)
745
- task_name = self._get_task_name_from_data_url(self.config.data_url)
746
- dl_dir = task_name + "/" + self.config.name.split(".")[1]
747
-
748
- return [
749
- datasets.SplitGenerator(
750
- name=datasets.Split.TRAIN,
751
- gen_kwargs={
752
- "datafile": dl_dir + "/" + "train.json",
753
- "split": datasets.Split.TRAIN,
754
- "files": dl_manager.iter_archive(archive),
755
- },
756
- ),
757
- datasets.SplitGenerator(
758
- name=datasets.Split.VALIDATION,
759
- gen_kwargs={
760
- "datafile": dl_dir + "/" + "val.json",
761
- "split": datasets.Split.VALIDATION,
762
- "files": dl_manager.iter_archive(archive),
763
- },
764
- ),
765
- datasets.SplitGenerator(
766
- name=datasets.Split.TEST,
767
- gen_kwargs={
768
- "datafile": dl_dir + "/" + "test.json",
769
- "split": datasets.Split.TEST,
770
- "files": dl_manager.iter_archive(archive),
771
- },
772
- ),
773
- ]
774
-
775
- if self.config.name.startswith("wiki-ner"):
776
- archive = dl_manager.download(self.config.data_url)
777
- task_name = self._get_task_name_from_data_url(self.config.data_url)
778
- dl_dir = task_name + "/" + self.config.name.split(".")[1]
779
-
780
- return [
781
- datasets.SplitGenerator(
782
- name=datasets.Split.TRAIN,
783
- gen_kwargs={
784
- "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-train.txt",
785
- "split": datasets.Split.TRAIN,
786
- "files": dl_manager.iter_archive(archive),
787
- },
788
- ),
789
- datasets.SplitGenerator(
790
- name=datasets.Split.VALIDATION,
791
- gen_kwargs={
792
- "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-valid.txt",
793
- "split": datasets.Split.VALIDATION,
794
- "files": dl_manager.iter_archive(archive),
795
- },
796
- ),
797
- datasets.SplitGenerator(
798
- name=datasets.Split.TEST,
799
- gen_kwargs={
800
- "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-test.txt",
801
- "split": datasets.Split.TEST,
802
- "files": dl_manager.iter_archive(archive),
803
- },
804
- ),
805
- ]
806
-
807
- def _generate_examples(self, **args):
808
- """Yields examples."""
809
- filepath = args["datafile"]
810
- files = args["files"]
811
-
812
- if self.config.name.startswith("wnli"):
813
- if args["key"] == "test-split":
814
- for path, f in files:
815
- if path == filepath:
816
- data = csv.DictReader((line.decode("utf-8") for line in f))
817
- for id_, row in enumerate(data):
818
- yield id_, {"hypothesis": row["sentence1"], "premise": row["sentence2"], "label": "None"}
819
- break
820
- else:
821
- for path, f in files:
822
- if path == filepath:
823
- data = csv.DictReader((line.decode("utf-8") for line in f))
824
- for id_, row in enumerate(data):
825
- label = "entailment" if row["label"] else "not_entailment"
826
- yield id_, {
827
- "hypothesis": row["sentence1"],
828
- "premise": row["sentence2"],
829
- "label": label,
830
- }
831
- break
832
-
833
- if self.config.name.startswith("copa"):
834
- if args["key"] == "test-split":
835
- for path, f in files:
836
- if path == filepath:
837
- lines = f.readlines()
838
- data = map(lambda l: json.loads(l), lines)
839
- data = list(data)
840
- for id_, row in enumerate(data):
841
- yield id_, {
842
- "premise": row["premise"],
843
- "choice1": row["choice1"],
844
- "choice2": row["choice2"],
845
- "question": row["question"],
846
- "label": 0,
847
- }
848
- break
849
- else:
850
- for path, f in files:
851
- if path == filepath:
852
- lines = f.readlines()
853
- data = map(lambda l: json.loads(l), lines)
854
- data = list(data)
855
- for id_, row in enumerate(data):
856
- yield id_, {
857
- "premise": row["premise"],
858
- "choice1": row["choice1"],
859
- "choice2": row["choice2"],
860
- "question": row["question"],
861
- "label": row["label"],
862
- }
863
- break
864
-
865
- if self.config.name.startswith("sna"):
866
- for path, f in files:
867
- if path == filepath:
868
- df = pd.read_csv(f, names=["label", "text"])
869
- for id_, row in df.iterrows():
870
- yield id_, {"text": row["text"], "label": row["label"]}
871
- break
872
-
873
- if self.config.name.startswith("csqa"):
874
- for path, f in files:
875
- if path == filepath:
876
- data = json.load(f)
877
- df = pd.DataFrame(data["cloze_data"])
878
- df["out_of_context_options"].loc[df["out_of_context_options"].isnull()] = (
879
- df["out_of_context_options"].loc[df["out_of_context_options"].isnull()].apply(lambda x: [])
880
- )
881
- for id_, row in df.iterrows():
882
- yield id_, {
883
- "question": row["question"],
884
- "answer": row["answer"],
885
- "category": row["category"],
886
- "title": row["title"],
887
- "out_of_context_options": row["out_of_context_options"],
888
- "options": row["options"],
889
- }
890
- break
891
-
892
- if self.config.name.startswith("wstp"):
893
- for path, f in files:
894
- if path == filepath:
895
- df = pd.read_json(f)
896
- for id_, row in df.iterrows():
897
- yield id_, {
898
- "sectionText": row["sectionText"],
899
- "correctTitle": row["correctTitle"],
900
- "titleA": row["titleA"],
901
- "titleB": row["titleB"],
902
- "titleC": row["titleC"],
903
- "titleD": row["titleD"],
904
- "url": row["url"],
905
- }
906
- break
907
-
908
- if (
909
- self.config.name.startswith("inltkh")
910
- or self.config.name.startswith("bbca")
911
- or self.config.name.startswith("iitp")
912
- ):
913
- for path, f in files:
914
- if path == filepath:
915
- df = pd.read_csv(f, names=["label", "text"])
916
- for id_, row in df.iterrows():
917
- yield id_, {"text": row["text"], "label": row["label"]}
918
- break
919
-
920
- if self.config.name.startswith("actsa"):
921
- for path, f in files:
922
- if path == filepath:
923
- df = pd.read_csv(f, names=["label", "text"])
924
- for id_, row in df.iterrows():
925
- label = "positive" if row["label"] else "negative"
926
- yield id_, {"text": row["text"], "label": label}
927
- break
928
-
929
- if self.config.name.startswith("cvit"):
930
- source = args["src"]
931
- target = args["tgt"]
932
- src, tgt = None, None
933
- for path, f in files:
934
- if path == source:
935
- src = f.read().decode("utf-8").splitlines()
936
- elif path == target:
937
- tgt = f.read().decode("utf-8").splitlines()
938
- if src is not None and tgt is not None:
939
- for id_, row in enumerate(zip(src, tgt)):
940
- yield id_, {"sentence1": row[0], "sentence2": row[1]}
941
- break
942
-
943
- if self.config.name.startswith("md"):
944
- for path, f in files:
945
- if path == filepath:
946
- df = pd.read_json(f)
947
- for id_, row in df.iterrows():
948
- yield id_, {
949
- "story_number": row["Story_no"],
950
- "sentence": row["Sentence"],
951
- "discourse_mode": row["Discourse Mode"],
952
- "id": row["id"],
953
- }
954
- break
955
-
956
- if self.config.name.startswith("wiki-ner"):
957
- for path, f in files:
958
- if path == filepath:
959
- data = f.read().decode("utf-8").splitlines()
960
- tokens = []
961
- labels = []
962
- infos = []
963
- for id_, row in enumerate(data):
964
- row = row.split()
965
-
966
- if len(row) == 0:
967
- yield id_, {"tokens": tokens, "ner_tags": labels, "additional_info": infos}
968
- tokens = []
969
- labels = []
970
- infos = []
971
- continue
972
-
973
- tokens.append(row[0])
974
- labels.append(row[-1])
975
- infos.append(row[1:-1])
976
- break
977
-
978
- def _get_task_name_from_data_url(self, data_url):
979
- return data_url.split("/")[-1].split(".")[0]