ben81828 commited on
Commit
72d66b5
·
verified ·
1 Parent(s): f9577c0

Training in progress, step 1550, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5c9a15524ca69ba3d1a171c59eaf574e64f0642e13435155688ec32a8d7af040
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81c47427b42705c35bffe681bec49fac21a6b19f781031ccb1d03b2dd6ac2efa
3
  size 29034840
last-checkpoint/global_step1550/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf681e5efd3e47f682c063da3d38d4db386b4d08c53fea2bfbb83e783e63635a
3
+ size 43429616
last-checkpoint/global_step1550/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b67aec5c32eeb07d9ea90510bd992498a7b889457bdf24a2347c7ada1ac571ec
3
+ size 43429616
last-checkpoint/global_step1550/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2b96525d4693047b0ce756922976df4a7eceb5cee4198eae42b69e16babccaa
3
+ size 43429616
last-checkpoint/global_step1550/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8797a0fb24bb222a68365b1418a17c430c8945d7791d9c4904b6cd53ccba1b57
3
+ size 43429616
last-checkpoint/global_step1550/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a43dd54cbbc70405ab57738adb3ec4581722d997b526b922cea67d12d7a3154
3
+ size 637299
last-checkpoint/global_step1550/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2160593af1ce08c3f12b9b90e1c3cbef9f94fa69d7b34c91af296952a4c32634
3
+ size 637171
last-checkpoint/global_step1550/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a26b6fdb593b50a1684d8681f4281e3d55be3afd1e2403c04c5de137e4315716
3
+ size 637171
last-checkpoint/global_step1550/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4062e6de2aea9cf48856d37c0a6068948c993876856dc4d6c5e69e86ba09a28e
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1500
 
1
+ global_step1550
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd3566049ad1f65d2f434d990deb65584d2b2dcb1aac8e89c68ea37dc533eab7
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f1e48a120d69830576f7b582aa6cc46f0ca41d30015a7a674eaec3dcdfc0f09
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2f96a47dc4176412bc893ccb49c004c8fa1cc8c306d67689d87ed20944233c62
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dbabb9273d3983e52a4a981b5f60f8c2e19da375765d05bb9f2caad284b9652
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ebd9fa3db1079ccf750b71f4eeedbf1f04422fc748026a4b866afe133f9fbfd1
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:554ac925bb9c9ea292b7a41caac1cf75285511cf8aa440f37090891ee457a178
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:383e9e252cc8292eef0120c964bdc8033972e800c085c97c42af97379e6b4b5c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5be5e00123fc0a321e41599b50e07be02f4c165504c601192e5c73f5f5437c30
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1a102b72949813698fac5ec632a1bf25ec2ce711ec639920c48d52212dc276b1
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:009e196049daa80a75a37312338f9f37a038a260e8bdb3c8e7bae80b1332e3b1
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.3963810205459595,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-1500",
4
- "epoch": 0.7725985063095545,
5
  "eval_steps": 50,
6
- "global_step": 1500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2677,11 +2677,100 @@
2677
  "eval_steps_per_second": 0.779,
2678
  "num_input_tokens_seen": 17544440,
2679
  "step": 1500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2680
  }
2681
  ],
2682
  "logging_steps": 5,
2683
  "max_steps": 3882,
2684
- "num_input_tokens_seen": 17544440,
2685
  "num_train_epochs": 2,
2686
  "save_steps": 50,
2687
  "stateful_callbacks": {
@@ -2696,7 +2785,7 @@
2696
  "attributes": {}
2697
  }
2698
  },
2699
- "total_flos": 1157604242882560.0,
2700
  "train_batch_size": 1,
2701
  "trial_name": null,
2702
  "trial_params": null
 
1
  {
2
  "best_metric": 0.3963810205459595,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-1500",
4
+ "epoch": 0.7983517898532063,
5
  "eval_steps": 50,
6
+ "global_step": 1550,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2677
  "eval_steps_per_second": 0.779,
2678
  "num_input_tokens_seen": 17544440,
2679
  "step": 1500
2680
+ },
2681
+ {
2682
+ "epoch": 0.7751738346639196,
2683
+ "grad_norm": 10.358996803929989,
2684
+ "learning_rate": 7.195428951199204e-05,
2685
+ "loss": 0.3397,
2686
+ "num_input_tokens_seen": 17602952,
2687
+ "step": 1505
2688
+ },
2689
+ {
2690
+ "epoch": 0.7777491630182848,
2691
+ "grad_norm": 11.954413440576635,
2692
+ "learning_rate": 7.176270566729904e-05,
2693
+ "loss": 0.5039,
2694
+ "num_input_tokens_seen": 17661440,
2695
+ "step": 1510
2696
+ },
2697
+ {
2698
+ "epoch": 0.78032449137265,
2699
+ "grad_norm": 15.247523980815545,
2700
+ "learning_rate": 7.157072681477069e-05,
2701
+ "loss": 0.426,
2702
+ "num_input_tokens_seen": 17719928,
2703
+ "step": 1515
2704
+ },
2705
+ {
2706
+ "epoch": 0.7828998197270152,
2707
+ "grad_norm": 11.560314722738736,
2708
+ "learning_rate": 7.137835643895305e-05,
2709
+ "loss": 0.3587,
2710
+ "num_input_tokens_seen": 17778424,
2711
+ "step": 1520
2712
+ },
2713
+ {
2714
+ "epoch": 0.7854751480813804,
2715
+ "grad_norm": 9.214361802688169,
2716
+ "learning_rate": 7.118559803149865e-05,
2717
+ "loss": 0.4849,
2718
+ "num_input_tokens_seen": 17836936,
2719
+ "step": 1525
2720
+ },
2721
+ {
2722
+ "epoch": 0.7880504764357456,
2723
+ "grad_norm": 9.825924936343675,
2724
+ "learning_rate": 7.099245509110299e-05,
2725
+ "loss": 0.3795,
2726
+ "num_input_tokens_seen": 17895392,
2727
+ "step": 1530
2728
+ },
2729
+ {
2730
+ "epoch": 0.7906258047901107,
2731
+ "grad_norm": 28.893219975863104,
2732
+ "learning_rate": 7.079893112344118e-05,
2733
+ "loss": 0.4206,
2734
+ "num_input_tokens_seen": 17953872,
2735
+ "step": 1535
2736
+ },
2737
+ {
2738
+ "epoch": 0.7932011331444759,
2739
+ "grad_norm": 9.96463686082691,
2740
+ "learning_rate": 7.060502964110418e-05,
2741
+ "loss": 0.3567,
2742
+ "num_input_tokens_seen": 18012320,
2743
+ "step": 1540
2744
+ },
2745
+ {
2746
+ "epoch": 0.7957764614988411,
2747
+ "grad_norm": 16.21585017609129,
2748
+ "learning_rate": 7.041075416353513e-05,
2749
+ "loss": 0.4172,
2750
+ "num_input_tokens_seen": 18070792,
2751
+ "step": 1545
2752
+ },
2753
+ {
2754
+ "epoch": 0.7983517898532063,
2755
+ "grad_norm": 7.729155059823011,
2756
+ "learning_rate": 7.02161082169654e-05,
2757
+ "loss": 0.4666,
2758
+ "num_input_tokens_seen": 18129304,
2759
+ "step": 1550
2760
+ },
2761
+ {
2762
+ "epoch": 0.7983517898532063,
2763
+ "eval_loss": 0.42660555243492126,
2764
+ "eval_runtime": 19.4313,
2765
+ "eval_samples_per_second": 3.088,
2766
+ "eval_steps_per_second": 0.772,
2767
+ "num_input_tokens_seen": 18129304,
2768
+ "step": 1550
2769
  }
2770
  ],
2771
  "logging_steps": 5,
2772
  "max_steps": 3882,
2773
+ "num_input_tokens_seen": 18129304,
2774
  "num_train_epochs": 2,
2775
  "save_steps": 50,
2776
  "stateful_callbacks": {
 
2785
  "attributes": {}
2786
  }
2787
  },
2788
+ "total_flos": 1196197017812992.0,
2789
  "train_batch_size": 1,
2790
  "trial_name": null,
2791
  "trial_params": null