model update
Browse files- README.md +13 -13
- config.json +1 -1
- eval/metric.first.answer.paragraph.questions_answers.lmqg_qag_tweetqa.default.json +1 -1
- eval/samples.test.hyp.paragraph.questions_answers.lmqg_qag_tweetqa.default.txt +0 -0
- eval/samples.validation.hyp.paragraph.questions_answers.lmqg_qag_tweetqa.default.txt +0 -0
- pytorch_model.bin +2 -2
- tokenizer_config.json +1 -1
- trainer_config.json +1 -1
README.md
CHANGED
@@ -14,7 +14,7 @@ pipeline_tag: text2text-generation
|
|
14 |
tags:
|
15 |
- questions and answers generation
|
16 |
widget:
|
17 |
-
- text: "generate question and answer:
|
18 |
example_title: "Questions & Answers Generation Example 1"
|
19 |
model-index:
|
20 |
- name: lmqg/t5-small-tweetqa-qag
|
@@ -29,25 +29,25 @@ model-index:
|
|
29 |
metrics:
|
30 |
- name: BLEU4
|
31 |
type: bleu4
|
32 |
-
value: 0.
|
33 |
- name: ROUGE-L
|
34 |
type: rouge-l
|
35 |
-
value: 0.
|
36 |
- name: METEOR
|
37 |
type: meteor
|
38 |
-
value: 0.
|
39 |
- name: BERTScore
|
40 |
type: bertscore
|
41 |
-
value: 0.
|
42 |
- name: MoverScore
|
43 |
type: moverscore
|
44 |
-
value: 0.
|
45 |
- name: QAAlignedF1Score (BERTScore)
|
46 |
type: qa_aligned_f1_score_bertscore
|
47 |
-
value: 0.
|
48 |
- name: QAAlignedF1Score (MoverScore)
|
49 |
type: qa_aligned_f1_score_moverscore
|
50 |
-
value: 0.
|
51 |
---
|
52 |
|
53 |
# Model Card of `lmqg/t5-small-tweetqa-qag`
|
@@ -89,7 +89,7 @@ from lmqg import TransformersQG
|
|
89 |
# initialize model
|
90 |
model = TransformersQG(language='en', model='lmqg/t5-small-tweetqa-qag')
|
91 |
# model prediction
|
92 |
-
question = model.generate_qa(
|
93 |
|
94 |
```
|
95 |
|
@@ -100,7 +100,7 @@ from transformers import pipeline
|
|
100 |
# initialize model
|
101 |
pipe = pipeline("text2text-generation", 'lmqg/t5-small-tweetqa-qag')
|
102 |
# question generation
|
103 |
-
question = pipe('generate question and answer:
|
104 |
|
105 |
```
|
106 |
|
@@ -111,14 +111,14 @@ question = pipe('generate question and answer: Beyonce further expanded her act
|
|
111 |
|
112 |
| Dataset | Type | BLEU4 | ROUGE-L | METEOR | BERTScore | MoverScore | Link |
|
113 |
|:--------|:-----|------:|--------:|-------:|----------:|-----------:|-----:|
|
114 |
-
| [lmqg/qag_tweetqa](https://huggingface.co/datasets/lmqg/qag_tweetqa) | default | 0.
|
115 |
|
116 |
|
117 |
### Metrics (QAG)
|
118 |
|
119 |
| Dataset | Type | QA Aligned F1 Score (BERTScore) | QA Aligned F1 Score (MoverScore) | Link |
|
120 |
|:--------|:-----|--------------------------------:|---------------------------------:|-----:|
|
121 |
-
| [lmqg/qag_tweetqa](https://huggingface.co/datasets/lmqg/qag_tweetqa) | default | 0.
|
122 |
|
123 |
|
124 |
|
@@ -134,7 +134,7 @@ The following hyperparameters were used during fine-tuning:
|
|
134 |
- model: t5-small
|
135 |
- max_length: 256
|
136 |
- max_length_output: 128
|
137 |
-
- epoch:
|
138 |
- batch: 64
|
139 |
- lr: 0.0001
|
140 |
- fp16: False
|
|
|
14 |
tags:
|
15 |
- questions and answers generation
|
16 |
widget:
|
17 |
+
- text: "generate question and answer: Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records."
|
18 |
example_title: "Questions & Answers Generation Example 1"
|
19 |
model-index:
|
20 |
- name: lmqg/t5-small-tweetqa-qag
|
|
|
29 |
metrics:
|
30 |
- name: BLEU4
|
31 |
type: bleu4
|
32 |
+
value: 0.10080358110819482
|
33 |
- name: ROUGE-L
|
34 |
type: rouge-l
|
35 |
+
value: 0.34193464058970124
|
36 |
- name: METEOR
|
37 |
type: meteor
|
38 |
+
value: 0.28019855592470416
|
39 |
- name: BERTScore
|
40 |
type: bertscore
|
41 |
+
value: 0.8964198049713776
|
42 |
- name: MoverScore
|
43 |
type: moverscore
|
44 |
+
value: 0.6047135052650878
|
45 |
- name: QAAlignedF1Score (BERTScore)
|
46 |
type: qa_aligned_f1_score_bertscore
|
47 |
+
value: 0.9142303181239072
|
48 |
- name: QAAlignedF1Score (MoverScore)
|
49 |
type: qa_aligned_f1_score_moverscore
|
50 |
+
value: 0.6307767033392071
|
51 |
---
|
52 |
|
53 |
# Model Card of `lmqg/t5-small-tweetqa-qag`
|
|
|
89 |
# initialize model
|
90 |
model = TransformersQG(language='en', model='lmqg/t5-small-tweetqa-qag')
|
91 |
# model prediction
|
92 |
+
question = model.generate_qa("William Turner was an English painter who specialised in watercolour landscapes")
|
93 |
|
94 |
```
|
95 |
|
|
|
100 |
# initialize model
|
101 |
pipe = pipeline("text2text-generation", 'lmqg/t5-small-tweetqa-qag')
|
102 |
# question generation
|
103 |
+
question = pipe('generate question and answer: Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records.')
|
104 |
|
105 |
```
|
106 |
|
|
|
111 |
|
112 |
| Dataset | Type | BLEU4 | ROUGE-L | METEOR | BERTScore | MoverScore | Link |
|
113 |
|:--------|:-----|------:|--------:|-------:|----------:|-----------:|-----:|
|
114 |
+
| [lmqg/qag_tweetqa](https://huggingface.co/datasets/lmqg/qag_tweetqa) | default | 0.101 | 0.342 | 0.28 | 0.896 | 0.605 | [link](https://huggingface.co/lmqg/t5-small-tweetqa-qag/raw/main/eval/metric.first.answer.paragraph.questions_answers.lmqg_qag_tweetqa.default.json) |
|
115 |
|
116 |
|
117 |
### Metrics (QAG)
|
118 |
|
119 |
| Dataset | Type | QA Aligned F1 Score (BERTScore) | QA Aligned F1 Score (MoverScore) | Link |
|
120 |
|:--------|:-----|--------------------------------:|---------------------------------:|-----:|
|
121 |
+
| [lmqg/qag_tweetqa](https://huggingface.co/datasets/lmqg/qag_tweetqa) | default | 0.914 | 0.631 | [link](https://huggingface.co/lmqg/t5-small-tweetqa-qag/raw/main/eval/metric.first.answer.paragraph.questions_answers.lmqg_qag_tweetqa.default.json) |
|
122 |
|
123 |
|
124 |
|
|
|
134 |
- model: t5-small
|
135 |
- max_length: 256
|
136 |
- max_length_output: 128
|
137 |
+
- epoch: 14
|
138 |
- batch: 64
|
139 |
- lr: 0.0001
|
140 |
- fp16: False
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "lmqg_output/t5_small_tweetqa/
|
3 |
"add_prefix": true,
|
4 |
"architectures": [
|
5 |
"T5ForConditionalGeneration"
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "lmqg_output/t5_small_tweetqa/model_mzgdpa/epoch_10",
|
3 |
"add_prefix": true,
|
4 |
"architectures": [
|
5 |
"T5ForConditionalGeneration"
|
eval/metric.first.answer.paragraph.questions_answers.lmqg_qag_tweetqa.default.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"validation": {"Bleu_1": 0.
|
|
|
1 |
+
{"validation": {"Bleu_1": 0.3345070422535018, "Bleu_2": 0.21544505942559114, "Bleu_3": 0.14314309581452783, "Bleu_4": 0.09683893632934976, "METEOR": 0.30224984331474797, "ROUGE_L": 0.35365280171487196, "BERTScore": 0.8922636171556869, "MoverScore": 0.6050952366700484, "QAAlignedF1Score (BERTScore)": 0.9052095424849287, "QAAlignedF1Score (MoverScore)": 0.6315811777418272}, "test": {"Bleu_1": 0.355266164039276, "Bleu_2": 0.22935230967802653, "Bleu_3": 0.15106487659216425, "Bleu_4": 0.10080358110819482, "METEOR": 0.28019855592470416, "ROUGE_L": 0.34193464058970124, "BERTScore": 0.8964198049713776, "MoverScore": 0.6047135052650878, "QAAlignedF1Score (BERTScore)": 0.9142303181239072, "QAAlignedF1Score (MoverScore)": 0.6307767033392071}}
|
eval/samples.test.hyp.paragraph.questions_answers.lmqg_qag_tweetqa.default.txt
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
eval/samples.validation.hyp.paragraph.questions_answers.lmqg_qag_tweetqa.default.txt
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2eee0fb22c65db993ca331a5cf8ba436195b9d18c6701820d8d6a347d4ef134f
|
3 |
+
size 242014971
|
tokenizer_config.json
CHANGED
@@ -104,7 +104,7 @@
|
|
104 |
"eos_token": "</s>",
|
105 |
"extra_ids": 100,
|
106 |
"model_max_length": 512,
|
107 |
-
"name_or_path": "lmqg_output/t5_small_tweetqa/
|
108 |
"pad_token": "<pad>",
|
109 |
"special_tokens_map_file": null,
|
110 |
"tokenizer_class": "T5Tokenizer",
|
|
|
104 |
"eos_token": "</s>",
|
105 |
"extra_ids": 100,
|
106 |
"model_max_length": 512,
|
107 |
+
"name_or_path": "lmqg_output/t5_small_tweetqa/model_mzgdpa/epoch_10",
|
108 |
"pad_token": "<pad>",
|
109 |
"special_tokens_map_file": null,
|
110 |
"tokenizer_class": "T5Tokenizer",
|
trainer_config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"dataset_path": "lmqg/qag_tweetqa", "dataset_name": "default", "input_types": ["paragraph"], "output_types": ["questions_answers"], "prefix_types": ["qag"], "model": "t5-small", "max_length": 256, "max_length_output": 128, "epoch":
|
|
|
1 |
+
{"dataset_path": "lmqg/qag_tweetqa", "dataset_name": "default", "input_types": ["paragraph"], "output_types": ["questions_answers"], "prefix_types": ["qag"], "model": "t5-small", "max_length": 256, "max_length_output": 128, "epoch": 14, "batch": 64, "lr": 0.0001, "fp16": false, "random_seed": 1, "gradient_accumulation_steps": 1, "label_smoothing": 0.0}
|