Training in progress, step 500
Browse files- .ipynb_checkpoints/lora_orpo-checkpoint.yaml +43 -0
- adapter_config.json +31 -0
- adapter_model.safetensors +3 -0
- lora_orpo.yaml +43 -0
- special_tokens_map.json +23 -0
- tokenizer.json +0 -0
- tokenizer_config.json +126 -0
- trainer_log.jsonl +51 -0
- training_args.bin +3 -0
.ipynb_checkpoints/lora_orpo-checkpoint.yaml
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### model
|
2 |
+
model_name_or_path: tiiuae/falcon-7b-instruct
|
3 |
+
|
4 |
+
### method
|
5 |
+
stage: orpo
|
6 |
+
do_train: true
|
7 |
+
finetuning_type: lora
|
8 |
+
lora_target: all
|
9 |
+
|
10 |
+
### dataset
|
11 |
+
dataset: dpo_mix_en,bct_non_cot_dpo_1000
|
12 |
+
dataset_dir: data_private
|
13 |
+
template: falcon
|
14 |
+
cutoff_len: 1024
|
15 |
+
# max_samples: 1000
|
16 |
+
overwrite_cache: true
|
17 |
+
preprocessing_num_workers: 16
|
18 |
+
|
19 |
+
### output
|
20 |
+
output_dir: saves/Falcon-7B-Instruct/lora/orpo-salt
|
21 |
+
logging_steps: 10
|
22 |
+
save_steps: 500
|
23 |
+
plot_loss: true
|
24 |
+
overwrite_output_dir: true
|
25 |
+
save_total_limit: 3
|
26 |
+
load_best_model_at_end: true
|
27 |
+
push_to_hub: true
|
28 |
+
hub_model_id: chchen/Falcon-7B-Instruct-ORPO-SALT
|
29 |
+
|
30 |
+
### train
|
31 |
+
per_device_train_batch_size: 2
|
32 |
+
gradient_accumulation_steps: 8
|
33 |
+
learning_rate: 0.000005
|
34 |
+
num_train_epochs: 3.0
|
35 |
+
lr_scheduler_type: cosine
|
36 |
+
warmup_steps: 0.1
|
37 |
+
bf16: true
|
38 |
+
|
39 |
+
### eval
|
40 |
+
val_size: 0.1
|
41 |
+
per_device_eval_batch_size: 2
|
42 |
+
evaluation_strategy: steps
|
43 |
+
eval_steps: 500
|
adapter_config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "tiiuae/falcon-7b-instruct",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 16,
|
14 |
+
"lora_dropout": 0.0,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 8,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"query_key_value",
|
24 |
+
"dense",
|
25 |
+
"dense_4h_to_h",
|
26 |
+
"dense_h_to_4h"
|
27 |
+
],
|
28 |
+
"task_type": "CAUSAL_LM",
|
29 |
+
"use_dora": false,
|
30 |
+
"use_rslora": false
|
31 |
+
}
|
adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b8c9c801ad0e0026117f5694d056da71527f0dad31739ebcac9a2abc96a1b8c4
|
3 |
+
size 65309632
|
lora_orpo.yaml
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### model
|
2 |
+
model_name_or_path: tiiuae/falcon-7b-instruct
|
3 |
+
|
4 |
+
### method
|
5 |
+
stage: orpo
|
6 |
+
do_train: true
|
7 |
+
finetuning_type: lora
|
8 |
+
lora_target: all
|
9 |
+
|
10 |
+
### dataset
|
11 |
+
dataset: dpo_mix_en,bct_non_cot_dpo_1000
|
12 |
+
dataset_dir: data_private
|
13 |
+
template: falcon
|
14 |
+
cutoff_len: 1024
|
15 |
+
# max_samples: 1000
|
16 |
+
overwrite_cache: true
|
17 |
+
preprocessing_num_workers: 16
|
18 |
+
|
19 |
+
### output
|
20 |
+
output_dir: saves/Falcon-7B-Instruct/lora/orpo-salt
|
21 |
+
logging_steps: 10
|
22 |
+
save_steps: 500
|
23 |
+
plot_loss: true
|
24 |
+
overwrite_output_dir: true
|
25 |
+
save_total_limit: 3
|
26 |
+
load_best_model_at_end: true
|
27 |
+
push_to_hub: true
|
28 |
+
hub_model_id: chchen/Falcon-7B-Instruct-ORPO-SALT
|
29 |
+
|
30 |
+
### train
|
31 |
+
per_device_train_batch_size: 2
|
32 |
+
gradient_accumulation_steps: 8
|
33 |
+
learning_rate: 0.000005
|
34 |
+
num_train_epochs: 3.0
|
35 |
+
lr_scheduler_type: cosine
|
36 |
+
warmup_steps: 0.1
|
37 |
+
bf16: true
|
38 |
+
|
39 |
+
### eval
|
40 |
+
val_size: 0.1
|
41 |
+
per_device_eval_batch_size: 2
|
42 |
+
evaluation_strategy: steps
|
43 |
+
eval_steps: 500
|
special_tokens_map.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
">>TITLE<<",
|
4 |
+
">>ABSTRACT<<",
|
5 |
+
">>INTRODUCTION<<",
|
6 |
+
">>SUMMARY<<",
|
7 |
+
">>COMMENT<<",
|
8 |
+
">>ANSWER<<",
|
9 |
+
">>QUESTION<<",
|
10 |
+
">>DOMAIN<<",
|
11 |
+
">>PREFIX<<",
|
12 |
+
">>SUFFIX<<",
|
13 |
+
">>MIDDLE<<"
|
14 |
+
],
|
15 |
+
"eos_token": {
|
16 |
+
"content": "<|endoftext|>",
|
17 |
+
"lstrip": false,
|
18 |
+
"normalized": false,
|
19 |
+
"rstrip": false,
|
20 |
+
"single_word": false
|
21 |
+
},
|
22 |
+
"pad_token": "<|endoftext|>"
|
23 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"0": {
|
5 |
+
"content": ">>TITLE<<",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": false,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
},
|
12 |
+
"1": {
|
13 |
+
"content": ">>ABSTRACT<<",
|
14 |
+
"lstrip": false,
|
15 |
+
"normalized": false,
|
16 |
+
"rstrip": false,
|
17 |
+
"single_word": false,
|
18 |
+
"special": true
|
19 |
+
},
|
20 |
+
"2": {
|
21 |
+
"content": ">>INTRODUCTION<<",
|
22 |
+
"lstrip": false,
|
23 |
+
"normalized": false,
|
24 |
+
"rstrip": false,
|
25 |
+
"single_word": false,
|
26 |
+
"special": true
|
27 |
+
},
|
28 |
+
"3": {
|
29 |
+
"content": ">>SUMMARY<<",
|
30 |
+
"lstrip": false,
|
31 |
+
"normalized": false,
|
32 |
+
"rstrip": false,
|
33 |
+
"single_word": false,
|
34 |
+
"special": true
|
35 |
+
},
|
36 |
+
"4": {
|
37 |
+
"content": ">>COMMENT<<",
|
38 |
+
"lstrip": false,
|
39 |
+
"normalized": false,
|
40 |
+
"rstrip": false,
|
41 |
+
"single_word": false,
|
42 |
+
"special": true
|
43 |
+
},
|
44 |
+
"5": {
|
45 |
+
"content": ">>ANSWER<<",
|
46 |
+
"lstrip": false,
|
47 |
+
"normalized": false,
|
48 |
+
"rstrip": false,
|
49 |
+
"single_word": false,
|
50 |
+
"special": true
|
51 |
+
},
|
52 |
+
"6": {
|
53 |
+
"content": ">>QUESTION<<",
|
54 |
+
"lstrip": false,
|
55 |
+
"normalized": false,
|
56 |
+
"rstrip": false,
|
57 |
+
"single_word": false,
|
58 |
+
"special": true
|
59 |
+
},
|
60 |
+
"7": {
|
61 |
+
"content": ">>DOMAIN<<",
|
62 |
+
"lstrip": false,
|
63 |
+
"normalized": false,
|
64 |
+
"rstrip": false,
|
65 |
+
"single_word": false,
|
66 |
+
"special": true
|
67 |
+
},
|
68 |
+
"8": {
|
69 |
+
"content": ">>PREFIX<<",
|
70 |
+
"lstrip": false,
|
71 |
+
"normalized": false,
|
72 |
+
"rstrip": false,
|
73 |
+
"single_word": false,
|
74 |
+
"special": true
|
75 |
+
},
|
76 |
+
"9": {
|
77 |
+
"content": ">>SUFFIX<<",
|
78 |
+
"lstrip": false,
|
79 |
+
"normalized": false,
|
80 |
+
"rstrip": false,
|
81 |
+
"single_word": false,
|
82 |
+
"special": true
|
83 |
+
},
|
84 |
+
"10": {
|
85 |
+
"content": ">>MIDDLE<<",
|
86 |
+
"lstrip": false,
|
87 |
+
"normalized": false,
|
88 |
+
"rstrip": false,
|
89 |
+
"single_word": false,
|
90 |
+
"special": true
|
91 |
+
},
|
92 |
+
"11": {
|
93 |
+
"content": "<|endoftext|>",
|
94 |
+
"lstrip": false,
|
95 |
+
"normalized": false,
|
96 |
+
"rstrip": false,
|
97 |
+
"single_word": false,
|
98 |
+
"special": true
|
99 |
+
}
|
100 |
+
},
|
101 |
+
"additional_special_tokens": [
|
102 |
+
">>TITLE<<",
|
103 |
+
">>ABSTRACT<<",
|
104 |
+
">>INTRODUCTION<<",
|
105 |
+
">>SUMMARY<<",
|
106 |
+
">>COMMENT<<",
|
107 |
+
">>ANSWER<<",
|
108 |
+
">>QUESTION<<",
|
109 |
+
">>DOMAIN<<",
|
110 |
+
">>PREFIX<<",
|
111 |
+
">>SUFFIX<<",
|
112 |
+
">>MIDDLE<<"
|
113 |
+
],
|
114 |
+
"chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ 'User: ' + content + '\nFalcon:' }}{% elif message['role'] == 'assistant' %}{{ content + '\n' }}{% endif %}{% endfor %}",
|
115 |
+
"clean_up_tokenization_spaces": true,
|
116 |
+
"eos_token": "<|endoftext|>",
|
117 |
+
"model_input_names": [
|
118 |
+
"input_ids",
|
119 |
+
"attention_mask"
|
120 |
+
],
|
121 |
+
"model_max_length": 2048,
|
122 |
+
"pad_token": "<|endoftext|>",
|
123 |
+
"padding_side": "right",
|
124 |
+
"split_special_tokens": false,
|
125 |
+
"tokenizer_class": "PreTrainedTokenizerFast"
|
126 |
+
}
|
trainer_log.jsonl
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"current_steps": 10, "total_steps": 1854, "loss": 1.8299, "accuracy": 0.518750011920929, "learning_rate": 4.999648198770648e-06, "epoch": 0.01616488179430188, "percentage": 0.54, "elapsed_time": "0:01:30", "remaining_time": "4:37:14"}
|
2 |
+
{"current_steps": 20, "total_steps": 1854, "loss": 2.0003, "accuracy": 0.48750001192092896, "learning_rate": 4.998578646361359e-06, "epoch": 0.03232976358860376, "percentage": 1.08, "elapsed_time": "0:03:02", "remaining_time": "4:39:09"}
|
3 |
+
{"current_steps": 30, "total_steps": 1854, "loss": 1.9128, "accuracy": 0.46875, "learning_rate": 4.996791614004449e-06, "epoch": 0.04849464538290564, "percentage": 1.62, "elapsed_time": "0:04:37", "remaining_time": "4:41:02"}
|
4 |
+
{"current_steps": 40, "total_steps": 1854, "loss": 2.0298, "accuracy": 0.45625001192092896, "learning_rate": 4.994287614855618e-06, "epoch": 0.06465952717720752, "percentage": 2.16, "elapsed_time": "0:06:01", "remaining_time": "4:33:35"}
|
5 |
+
{"current_steps": 50, "total_steps": 1854, "loss": 2.0958, "accuracy": 0.4124999940395355, "learning_rate": 4.991067367951343e-06, "epoch": 0.0808244089715094, "percentage": 2.7, "elapsed_time": "0:07:45", "remaining_time": "4:40:06"}
|
6 |
+
{"current_steps": 60, "total_steps": 1854, "loss": 1.9577, "accuracy": 0.5, "learning_rate": 4.987131798002389e-06, "epoch": 0.09698929076581128, "percentage": 3.24, "elapsed_time": "0:09:15", "remaining_time": "4:36:40"}
|
7 |
+
{"current_steps": 70, "total_steps": 1854, "loss": 2.1089, "accuracy": 0.42500001192092896, "learning_rate": 4.982482035128285e-06, "epoch": 0.11315417256011315, "percentage": 3.78, "elapsed_time": "0:10:48", "remaining_time": "4:35:18"}
|
8 |
+
{"current_steps": 80, "total_steps": 1854, "loss": 1.7468, "accuracy": 0.5375000238418579, "learning_rate": 4.9771194145328e-06, "epoch": 0.12931905435441504, "percentage": 4.31, "elapsed_time": "0:12:21", "remaining_time": "4:34:00"}
|
9 |
+
{"current_steps": 90, "total_steps": 1854, "loss": 1.9067, "accuracy": 0.4375, "learning_rate": 4.971045476120532e-06, "epoch": 0.1454839361487169, "percentage": 4.85, "elapsed_time": "0:13:55", "remaining_time": "4:32:49"}
|
10 |
+
{"current_steps": 100, "total_steps": 1854, "loss": 1.8297, "accuracy": 0.5062500238418579, "learning_rate": 4.964261964054713e-06, "epoch": 0.1616488179430188, "percentage": 5.39, "elapsed_time": "0:15:28", "remaining_time": "4:31:21"}
|
11 |
+
{"current_steps": 110, "total_steps": 1854, "loss": 1.7844, "accuracy": 0.512499988079071, "learning_rate": 4.956770826256372e-06, "epoch": 0.17781369973732067, "percentage": 5.93, "elapsed_time": "0:17:02", "remaining_time": "4:30:05"}
|
12 |
+
{"current_steps": 120, "total_steps": 1854, "loss": 1.7682, "accuracy": 0.5, "learning_rate": 4.94857421384497e-06, "epoch": 0.19397858153162256, "percentage": 6.47, "elapsed_time": "0:18:36", "remaining_time": "4:28:53"}
|
13 |
+
{"current_steps": 130, "total_steps": 1854, "loss": 1.7352, "accuracy": 0.48124998807907104, "learning_rate": 4.939674480520701e-06, "epoch": 0.21014346332592443, "percentage": 7.01, "elapsed_time": "0:20:12", "remaining_time": "4:27:58"}
|
14 |
+
{"current_steps": 140, "total_steps": 1854, "loss": 1.8234, "accuracy": 0.4625000059604645, "learning_rate": 4.930074181888613e-06, "epoch": 0.2263083451202263, "percentage": 7.55, "elapsed_time": "0:21:53", "remaining_time": "4:28:06"}
|
15 |
+
{"current_steps": 150, "total_steps": 1854, "loss": 1.7178, "accuracy": 0.4625000059604645, "learning_rate": 4.91977607472475e-06, "epoch": 0.2424732269145282, "percentage": 8.09, "elapsed_time": "0:23:21", "remaining_time": "4:25:17"}
|
16 |
+
{"current_steps": 160, "total_steps": 1854, "loss": 1.6837, "accuracy": 0.512499988079071, "learning_rate": 4.908783116184534e-06, "epoch": 0.2586381087088301, "percentage": 8.63, "elapsed_time": "0:24:54", "remaining_time": "4:23:47"}
|
17 |
+
{"current_steps": 170, "total_steps": 1854, "loss": 1.6438, "accuracy": 0.5062500238418579, "learning_rate": 4.897098462953598e-06, "epoch": 0.27480299050313195, "percentage": 9.17, "elapsed_time": "0:26:27", "remaining_time": "4:22:07"}
|
18 |
+
{"current_steps": 180, "total_steps": 1854, "loss": 1.5969, "accuracy": 0.574999988079071, "learning_rate": 4.884725470341331e-06, "epoch": 0.2909678722974338, "percentage": 9.71, "elapsed_time": "0:27:57", "remaining_time": "4:20:03"}
|
19 |
+
{"current_steps": 190, "total_steps": 1854, "loss": 1.6442, "accuracy": 0.45625001192092896, "learning_rate": 4.871667691317377e-06, "epoch": 0.3071327540917357, "percentage": 10.25, "elapsed_time": "0:29:33", "remaining_time": "4:18:50"}
|
20 |
+
{"current_steps": 200, "total_steps": 1854, "loss": 1.5575, "accuracy": 0.512499988079071, "learning_rate": 4.857928875491392e-06, "epoch": 0.3232976358860376, "percentage": 10.79, "elapsed_time": "0:31:02", "remaining_time": "4:16:42"}
|
21 |
+
{"current_steps": 210, "total_steps": 1854, "loss": 1.5606, "accuracy": 0.4937500059604645, "learning_rate": 4.843512968036314e-06, "epoch": 0.33946251768033947, "percentage": 11.33, "elapsed_time": "0:32:31", "remaining_time": "4:14:34"}
|
22 |
+
{"current_steps": 220, "total_steps": 1854, "loss": 1.6726, "accuracy": 0.518750011920929, "learning_rate": 4.828424108555486e-06, "epoch": 0.35562739947464134, "percentage": 11.87, "elapsed_time": "0:34:04", "remaining_time": "4:13:03"}
|
23 |
+
{"current_steps": 230, "total_steps": 1854, "loss": 1.599, "accuracy": 0.45625001192092896, "learning_rate": 4.812666629893957e-06, "epoch": 0.3717922812689432, "percentage": 12.41, "elapsed_time": "0:35:35", "remaining_time": "4:11:19"}
|
24 |
+
{"current_steps": 240, "total_steps": 1854, "loss": 1.6382, "accuracy": 0.4625000059604645, "learning_rate": 4.796245056894273e-06, "epoch": 0.3879571630632451, "percentage": 12.94, "elapsed_time": "0:37:14", "remaining_time": "4:10:29"}
|
25 |
+
{"current_steps": 250, "total_steps": 1854, "loss": 1.5308, "accuracy": 0.550000011920929, "learning_rate": 4.779164105097148e-06, "epoch": 0.404122044857547, "percentage": 13.48, "elapsed_time": "0:38:51", "remaining_time": "4:09:22"}
|
26 |
+
{"current_steps": 260, "total_steps": 1854, "loss": 1.5664, "accuracy": 0.518750011920929, "learning_rate": 4.761428679387373e-06, "epoch": 0.42028692665184886, "percentage": 14.02, "elapsed_time": "0:40:26", "remaining_time": "4:07:55"}
|
27 |
+
{"current_steps": 270, "total_steps": 1854, "loss": 1.5916, "accuracy": 0.5062500238418579, "learning_rate": 4.7430438725853515e-06, "epoch": 0.4364518084461507, "percentage": 14.56, "elapsed_time": "0:42:02", "remaining_time": "4:06:38"}
|
28 |
+
{"current_steps": 280, "total_steps": 1854, "loss": 1.5473, "accuracy": 0.45625001192092896, "learning_rate": 4.724014963984669e-06, "epoch": 0.4526166902404526, "percentage": 15.1, "elapsed_time": "0:43:40", "remaining_time": "4:05:30"}
|
29 |
+
{"current_steps": 290, "total_steps": 1854, "loss": 1.4462, "accuracy": 0.48124998807907104, "learning_rate": 4.704347417836116e-06, "epoch": 0.4687815720347545, "percentage": 15.64, "elapsed_time": "0:45:12", "remaining_time": "4:03:50"}
|
30 |
+
{"current_steps": 300, "total_steps": 1854, "loss": 1.456, "accuracy": 0.44999998807907104, "learning_rate": 4.684046881778603e-06, "epoch": 0.4849464538290564, "percentage": 16.18, "elapsed_time": "0:46:43", "remaining_time": "4:02:00"}
|
31 |
+
{"current_steps": 310, "total_steps": 1854, "loss": 1.5057, "accuracy": 0.5249999761581421, "learning_rate": 4.663119185217409e-06, "epoch": 0.5011113356233583, "percentage": 16.72, "elapsed_time": "0:48:19", "remaining_time": "4:00:40"}
|
32 |
+
{"current_steps": 320, "total_steps": 1854, "loss": 1.3866, "accuracy": 0.53125, "learning_rate": 4.641570337650232e-06, "epoch": 0.5172762174176602, "percentage": 17.26, "elapsed_time": "0:49:52", "remaining_time": "3:59:03"}
|
33 |
+
{"current_steps": 330, "total_steps": 1854, "loss": 1.5835, "accuracy": 0.4625000059604645, "learning_rate": 4.61940652694154e-06, "epoch": 0.533441099211962, "percentage": 17.8, "elapsed_time": "0:51:30", "remaining_time": "3:57:53"}
|
34 |
+
{"current_steps": 340, "total_steps": 1854, "loss": 1.6054, "accuracy": 0.512499988079071, "learning_rate": 4.596634117545689e-06, "epoch": 0.5496059810062639, "percentage": 18.34, "elapsed_time": "0:53:02", "remaining_time": "3:56:12"}
|
35 |
+
{"current_steps": 350, "total_steps": 1854, "loss": 1.546, "accuracy": 0.5249999761581421, "learning_rate": 4.573259648679335e-06, "epoch": 0.5657708628005658, "percentage": 18.88, "elapsed_time": "0:54:41", "remaining_time": "3:54:59"}
|
36 |
+
{"current_steps": 360, "total_steps": 1854, "loss": 1.5233, "accuracy": 0.5, "learning_rate": 4.549289832443663e-06, "epoch": 0.5819357445948676, "percentage": 19.42, "elapsed_time": "0:56:18", "remaining_time": "3:53:40"}
|
37 |
+
{"current_steps": 370, "total_steps": 1854, "loss": 1.4381, "accuracy": 0.44999998807907104, "learning_rate": 4.524731551896978e-06, "epoch": 0.5981006263891695, "percentage": 19.96, "elapsed_time": "0:57:52", "remaining_time": "3:52:08"}
|
38 |
+
{"current_steps": 380, "total_steps": 1854, "loss": 1.437, "accuracy": 0.53125, "learning_rate": 4.4995918590781925e-06, "epoch": 0.6142655081834714, "percentage": 20.5, "elapsed_time": "0:59:25", "remaining_time": "3:50:31"}
|
39 |
+
{"current_steps": 390, "total_steps": 1854, "loss": 1.4849, "accuracy": 0.550000011920929, "learning_rate": 4.473877972981797e-06, "epoch": 0.6304303899777733, "percentage": 21.04, "elapsed_time": "1:01:02", "remaining_time": "3:49:09"}
|
40 |
+
{"current_steps": 400, "total_steps": 1854, "loss": 1.3936, "accuracy": 0.48124998807907104, "learning_rate": 4.447597277484894e-06, "epoch": 0.6465952717720752, "percentage": 21.57, "elapsed_time": "1:02:32", "remaining_time": "3:47:19"}
|
41 |
+
{"current_steps": 410, "total_steps": 1854, "loss": 1.5684, "accuracy": 0.4625000059604645, "learning_rate": 4.42075731922687e-06, "epoch": 0.6627601535663771, "percentage": 22.11, "elapsed_time": "1:04:04", "remaining_time": "3:45:40"}
|
42 |
+
{"current_steps": 420, "total_steps": 1854, "loss": 1.4095, "accuracy": 0.48750001192092896, "learning_rate": 4.3933658054423465e-06, "epoch": 0.6789250353606789, "percentage": 22.65, "elapsed_time": "1:05:37", "remaining_time": "3:44:03"}
|
43 |
+
{"current_steps": 430, "total_steps": 1854, "loss": 1.6431, "accuracy": 0.512499988079071, "learning_rate": 4.365430601748003e-06, "epoch": 0.6950899171549808, "percentage": 23.19, "elapsed_time": "1:07:08", "remaining_time": "3:42:21"}
|
44 |
+
{"current_steps": 440, "total_steps": 1854, "loss": 1.4506, "accuracy": 0.4625000059604645, "learning_rate": 4.336959729883925e-06, "epoch": 0.7112547989492827, "percentage": 23.73, "elapsed_time": "1:08:41", "remaining_time": "3:40:43"}
|
45 |
+
{"current_steps": 450, "total_steps": 1854, "loss": 1.5134, "accuracy": 0.45625001192092896, "learning_rate": 4.307961365410118e-06, "epoch": 0.7274196807435845, "percentage": 24.27, "elapsed_time": "1:10:19", "remaining_time": "3:39:24"}
|
46 |
+
{"current_steps": 460, "total_steps": 1854, "loss": 1.4406, "accuracy": 0.550000011920929, "learning_rate": 4.278443835358854e-06, "epoch": 0.7435845625378864, "percentage": 24.81, "elapsed_time": "1:11:54", "remaining_time": "3:37:53"}
|
47 |
+
{"current_steps": 470, "total_steps": 1854, "loss": 1.4775, "accuracy": 0.4437499940395355, "learning_rate": 4.248415615843523e-06, "epoch": 0.7597494443321883, "percentage": 25.35, "elapsed_time": "1:13:21", "remaining_time": "3:36:00"}
|
48 |
+
{"current_steps": 480, "total_steps": 1854, "loss": 1.4137, "accuracy": 0.5562499761581421, "learning_rate": 4.217885329624666e-06, "epoch": 0.7759143261264903, "percentage": 25.89, "elapsed_time": "1:14:55", "remaining_time": "3:34:29"}
|
49 |
+
{"current_steps": 490, "total_steps": 1854, "loss": 1.4904, "accuracy": 0.5, "learning_rate": 4.186861743633911e-06, "epoch": 0.7920792079207921, "percentage": 26.43, "elapsed_time": "1:16:26", "remaining_time": "3:32:46"}
|
50 |
+
{"current_steps": 500, "total_steps": 1854, "loss": 1.5005, "accuracy": 0.46875, "learning_rate": 4.155353766456497e-06, "epoch": 0.808244089715094, "percentage": 26.97, "elapsed_time": "1:18:04", "remaining_time": "3:31:26"}
|
51 |
+
{"current_steps": 500, "total_steps": 1854, "eval_loss": 1.5202080011367798, "epoch": 0.808244089715094, "percentage": 26.97, "elapsed_time": "1:21:32", "remaining_time": "3:40:48"}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f6e80ca9d1798b5e28e8fd1a1d6eb5c9849005b9eabbf1ad700c4f813e210c4e
|
3 |
+
size 5240
|