Training in progress, step 100
Browse files- adapter_model.safetensors +1 -1
- tokenizer_config.json +1 -0
- trainer_log.jsonl +11 -0
adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 29034840
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:acda72bb812aacf6b5ce45f87c28678a6f2e73c7b48f2258dd8d59ca2f5bf1aa
|
3 |
size 29034840
|
tokenizer_config.json
CHANGED
@@ -137,6 +137,7 @@
|
|
137 |
"model_max_length": 32768,
|
138 |
"pad_token": "<|endoftext|>",
|
139 |
"padding_side": "right",
|
|
|
140 |
"split_special_tokens": false,
|
141 |
"tokenizer_class": "Qwen2Tokenizer",
|
142 |
"unk_token": null
|
|
|
137 |
"model_max_length": 32768,
|
138 |
"pad_token": "<|endoftext|>",
|
139 |
"padding_side": "right",
|
140 |
+
"processor_class": "Qwen2VLProcessor",
|
141 |
"split_special_tokens": false,
|
142 |
"tokenizer_class": "Qwen2Tokenizer",
|
143 |
"unk_token": null
|
trainer_log.jsonl
CHANGED
@@ -9,3 +9,14 @@
|
|
9 |
{"current_steps": 45, "total_steps": 3882, "loss": 0.9327, "lr": 2.307692307692308e-05, "epoch": 0.023177955189286635, "percentage": 1.16, "elapsed_time": "0:14:39", "remaining_time": "20:50:06", "throughput": 598.39, "total_tokens": 526384}
|
10 |
{"current_steps": 50, "total_steps": 3882, "loss": 0.9233, "lr": 2.564102564102564e-05, "epoch": 0.025753283543651816, "percentage": 1.29, "elapsed_time": "0:16:09", "remaining_time": "20:38:40", "throughput": 603.11, "total_tokens": 584856}
|
11 |
{"current_steps": 50, "total_steps": 3882, "eval_loss": 0.9281821846961975, "epoch": 0.025753283543651816, "percentage": 1.29, "elapsed_time": "0:16:57", "remaining_time": "21:40:17", "throughput": 574.53, "total_tokens": 584856}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
{"current_steps": 45, "total_steps": 3882, "loss": 0.9327, "lr": 2.307692307692308e-05, "epoch": 0.023177955189286635, "percentage": 1.16, "elapsed_time": "0:14:39", "remaining_time": "20:50:06", "throughput": 598.39, "total_tokens": 526384}
|
10 |
{"current_steps": 50, "total_steps": 3882, "loss": 0.9233, "lr": 2.564102564102564e-05, "epoch": 0.025753283543651816, "percentage": 1.29, "elapsed_time": "0:16:09", "remaining_time": "20:38:40", "throughput": 603.11, "total_tokens": 584856}
|
11 |
{"current_steps": 50, "total_steps": 3882, "eval_loss": 0.9281821846961975, "epoch": 0.025753283543651816, "percentage": 1.29, "elapsed_time": "0:16:57", "remaining_time": "21:40:17", "throughput": 574.53, "total_tokens": 584856}
|
12 |
+
{"current_steps": 55, "total_steps": 3882, "loss": 0.897, "lr": 2.8205128205128207e-05, "epoch": 0.028328611898016998, "percentage": 1.42, "elapsed_time": "0:18:34", "remaining_time": "21:32:15", "throughput": 577.35, "total_tokens": 643344}
|
13 |
+
{"current_steps": 60, "total_steps": 3882, "loss": 0.9169, "lr": 3.0769230769230774e-05, "epoch": 0.03090394025238218, "percentage": 1.55, "elapsed_time": "0:20:03", "remaining_time": "21:17:49", "throughput": 583.09, "total_tokens": 701808}
|
14 |
+
{"current_steps": 65, "total_steps": 3882, "loss": 0.9019, "lr": 3.3333333333333335e-05, "epoch": 0.03347926860674736, "percentage": 1.67, "elapsed_time": "0:21:32", "remaining_time": "21:04:47", "throughput": 588.34, "total_tokens": 760304}
|
15 |
+
{"current_steps": 70, "total_steps": 3882, "loss": 0.8996, "lr": 3.58974358974359e-05, "epoch": 0.036054596961112545, "percentage": 1.8, "elapsed_time": "0:23:02", "remaining_time": "20:55:11", "throughput": 592.04, "total_tokens": 818760}
|
16 |
+
{"current_steps": 75, "total_steps": 3882, "loss": 0.9073, "lr": 3.846153846153846e-05, "epoch": 0.03862992531547772, "percentage": 1.93, "elapsed_time": "0:24:32", "remaining_time": "20:46:08", "throughput": 595.56, "total_tokens": 877256}
|
17 |
+
{"current_steps": 80, "total_steps": 3882, "loss": 0.9081, "lr": 4.1025641025641023e-05, "epoch": 0.04120525366984291, "percentage": 2.06, "elapsed_time": "0:26:03", "remaining_time": "20:38:41", "throughput": 598.36, "total_tokens": 935752}
|
18 |
+
{"current_steps": 85, "total_steps": 3882, "loss": 0.906, "lr": 4.358974358974359e-05, "epoch": 0.043780582024208085, "percentage": 2.19, "elapsed_time": "0:27:32", "remaining_time": "20:30:19", "throughput": 601.63, "total_tokens": 994216}
|
19 |
+
{"current_steps": 90, "total_steps": 3882, "loss": 0.8952, "lr": 4.615384615384616e-05, "epoch": 0.04635591037857327, "percentage": 2.32, "elapsed_time": "0:29:02", "remaining_time": "20:23:48", "throughput": 604.04, "total_tokens": 1052704}
|
20 |
+
{"current_steps": 95, "total_steps": 3882, "loss": 0.8996, "lr": 4.871794871794872e-05, "epoch": 0.04893123873293845, "percentage": 2.45, "elapsed_time": "0:30:31", "remaining_time": "20:17:00", "throughput": 606.61, "total_tokens": 1111176}
|
21 |
+
{"current_steps": 100, "total_steps": 3882, "loss": 0.9024, "lr": 5.128205128205128e-05, "epoch": 0.05150656708730363, "percentage": 2.58, "elapsed_time": "0:32:01", "remaining_time": "20:10:52", "throughput": 608.88, "total_tokens": 1169664}
|
22 |
+
{"current_steps": 100, "total_steps": 3882, "eval_loss": 0.911374032497406, "epoch": 0.05150656708730363, "percentage": 2.58, "elapsed_time": "0:32:20", "remaining_time": "20:23:12", "throughput": 602.74, "total_tokens": 1169664}
|