ben81828 commited on
Commit
67f76c9
·
verified ·
1 Parent(s): 58149a3

Training in progress, step 1600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:81c47427b42705c35bffe681bec49fac21a6b19f781031ccb1d03b2dd6ac2efa
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36f722668f1a0618c0a9e43f223d905e82d0c1068f32211684745e00d5aaa272
3
  size 29034840
last-checkpoint/global_step1600/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e091a8c7fceb415905f7821beab43c7ed45ca60df330f7eb6bca5d2495e0afd
3
+ size 43429616
last-checkpoint/global_step1600/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc8c286a1f817f5fa92ee5287137ba1da1dd812b56fe458a4b50e828a9ef57ce
3
+ size 43429616
last-checkpoint/global_step1600/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e860255e84d5ed5472a1e731852a76784b39bcc513d580bce574a8f9843c030d
3
+ size 43429616
last-checkpoint/global_step1600/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71678fd629b82fabf0f9b710e39e9496a4c007151a404a0928b1d62ea948f6b2
3
+ size 43429616
last-checkpoint/global_step1600/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d4afb4aaf2fe09b00f009636090ac8140a92a92061b0beb56860dbb1563a322
3
+ size 637299
last-checkpoint/global_step1600/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99752500f7623dfbbb0b804e1c22e1cbd188635e5967f6ff430fde200bbb865f
3
+ size 637171
last-checkpoint/global_step1600/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf3c8ffb4a388a7c685af82e29e62c38dac41b19f00d659db4227bfe73f04b08
3
+ size 637171
last-checkpoint/global_step1600/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bab2075b924174a0c53aaeb6926f645d6f84f031f1c7a5a84a1e25330a267f11
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1550
 
1
+ global_step1600
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f1e48a120d69830576f7b582aa6cc46f0ca41d30015a7a674eaec3dcdfc0f09
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9279ed4b01716237e789d2631c1f29bc5d43c5633c014d4401de21b672c1b355
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4dbabb9273d3983e52a4a981b5f60f8c2e19da375765d05bb9f2caad284b9652
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca1990d68e57c70df5c56d395dd3f3befbe07b380521f4144677c20f6fe2a3eb
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:554ac925bb9c9ea292b7a41caac1cf75285511cf8aa440f37090891ee457a178
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0790066885525e1b9a9390a40ae27abd57abb47f031abface27890732f9e684
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5be5e00123fc0a321e41599b50e07be02f4c165504c601192e5c73f5f5437c30
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1325a2034fe48ebad4f00ac8a2b32ab5c4c43c2497712169a8e3b1112363d916
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:009e196049daa80a75a37312338f9f37a038a260e8bdb3c8e7bae80b1332e3b1
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05a385f582b86d8b587b56eaada5930afef800d2a4f1e7413c113f427a6fcef2
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.3963810205459595,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-1500",
4
- "epoch": 0.7983517898532063,
5
  "eval_steps": 50,
6
- "global_step": 1550,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2766,11 +2766,100 @@
2766
  "eval_steps_per_second": 0.772,
2767
  "num_input_tokens_seen": 18129304,
2768
  "step": 1550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2769
  }
2770
  ],
2771
  "logging_steps": 5,
2772
  "max_steps": 3882,
2773
- "num_input_tokens_seen": 18129304,
2774
  "num_train_epochs": 2,
2775
  "save_steps": 50,
2776
  "stateful_callbacks": {
@@ -2785,7 +2874,7 @@
2785
  "attributes": {}
2786
  }
2787
  },
2788
- "total_flos": 1196197017812992.0,
2789
  "train_batch_size": 1,
2790
  "trial_name": null,
2791
  "trial_params": null
 
1
  {
2
  "best_metric": 0.3963810205459595,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-1500",
4
+ "epoch": 0.8241050733968581,
5
  "eval_steps": 50,
6
+ "global_step": 1600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2766
  "eval_steps_per_second": 0.772,
2767
  "num_input_tokens_seen": 18129304,
2768
  "step": 1550
2769
+ },
2770
+ {
2771
+ "epoch": 0.8009271182075715,
2772
+ "grad_norm": 6.105522334359041,
2773
+ "learning_rate": 7.002109533435066e-05,
2774
+ "loss": 0.4305,
2775
+ "num_input_tokens_seen": 18187728,
2776
+ "step": 1555
2777
+ },
2778
+ {
2779
+ "epoch": 0.8035024465619367,
2780
+ "grad_norm": 7.852746488368037,
2781
+ "learning_rate": 6.982571905530669e-05,
2782
+ "loss": 0.4529,
2783
+ "num_input_tokens_seen": 18246192,
2784
+ "step": 1560
2785
+ },
2786
+ {
2787
+ "epoch": 0.8060777749163018,
2788
+ "grad_norm": 5.8884808032636275,
2789
+ "learning_rate": 6.962998292604517e-05,
2790
+ "loss": 0.4569,
2791
+ "num_input_tokens_seen": 18304632,
2792
+ "step": 1565
2793
+ },
2794
+ {
2795
+ "epoch": 0.808653103270667,
2796
+ "grad_norm": 8.738504630489588,
2797
+ "learning_rate": 6.943389049930931e-05,
2798
+ "loss": 0.3936,
2799
+ "num_input_tokens_seen": 18363136,
2800
+ "step": 1570
2801
+ },
2802
+ {
2803
+ "epoch": 0.8112284316250322,
2804
+ "grad_norm": 8.410645723765466,
2805
+ "learning_rate": 6.923744533430937e-05,
2806
+ "loss": 0.4083,
2807
+ "num_input_tokens_seen": 18421592,
2808
+ "step": 1575
2809
+ },
2810
+ {
2811
+ "epoch": 0.8138037599793974,
2812
+ "grad_norm": 7.654296364994705,
2813
+ "learning_rate": 6.904065099665803e-05,
2814
+ "loss": 0.4564,
2815
+ "num_input_tokens_seen": 18480104,
2816
+ "step": 1580
2817
+ },
2818
+ {
2819
+ "epoch": 0.8163790883337626,
2820
+ "grad_norm": 17.3461741974751,
2821
+ "learning_rate": 6.884351105830568e-05,
2822
+ "loss": 0.3928,
2823
+ "num_input_tokens_seen": 18538600,
2824
+ "step": 1585
2825
+ },
2826
+ {
2827
+ "epoch": 0.8189544166881277,
2828
+ "grad_norm": 16.63308936698189,
2829
+ "learning_rate": 6.864602909747563e-05,
2830
+ "loss": 0.4278,
2831
+ "num_input_tokens_seen": 18597104,
2832
+ "step": 1590
2833
+ },
2834
+ {
2835
+ "epoch": 0.8215297450424929,
2836
+ "grad_norm": 10.83446543348463,
2837
+ "learning_rate": 6.84482086985991e-05,
2838
+ "loss": 0.3956,
2839
+ "num_input_tokens_seen": 18655584,
2840
+ "step": 1595
2841
+ },
2842
+ {
2843
+ "epoch": 0.8241050733968581,
2844
+ "grad_norm": 7.98539427128297,
2845
+ "learning_rate": 6.825005345225019e-05,
2846
+ "loss": 0.3834,
2847
+ "num_input_tokens_seen": 18714072,
2848
+ "step": 1600
2849
+ },
2850
+ {
2851
+ "epoch": 0.8241050733968581,
2852
+ "eval_loss": 0.44774264097213745,
2853
+ "eval_runtime": 19.316,
2854
+ "eval_samples_per_second": 3.106,
2855
+ "eval_steps_per_second": 0.777,
2856
+ "num_input_tokens_seen": 18714072,
2857
+ "step": 1600
2858
  }
2859
  ],
2860
  "logging_steps": 5,
2861
  "max_steps": 3882,
2862
+ "num_input_tokens_seen": 18714072,
2863
  "num_train_epochs": 2,
2864
  "save_steps": 50,
2865
  "stateful_callbacks": {
 
2874
  "attributes": {}
2875
  }
2876
  },
2877
+ "total_flos": 1234783772868608.0,
2878
  "train_batch_size": 1,
2879
  "trial_name": null,
2880
  "trial_params": null