ben81828 commited on
Commit
c8fc9ef
·
verified ·
1 Parent(s): dba6431

Training in progress, step 2100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8c07c45cd740f6c38df8b73c4bc4f9f8c11bd6712886d745d8476e5ba112852
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a111e8798c83c643eeba02e2a149e66e3437bb97bb843d76a35308962c3f1d78
3
  size 29034840
last-checkpoint/global_step2099/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3908f123db8cae3a945e895d5aa1a25ac06fead96ab3d44f58999c89ede68a1e
3
+ size 43429616
last-checkpoint/global_step2099/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c87aab36c08b6bda15ecb4f5a883590379e836f53870ca7d87391088cd1f3778
3
+ size 43429616
last-checkpoint/global_step2099/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26d47c521ed24c4713fe79fad87b641b58f7e9b18218d342415d15e68d1675e8
3
+ size 43429616
last-checkpoint/global_step2099/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6332f495af7a2bd28833ac9d8356f39a73edd88a1a16862d6d97bb8e1ba7e240
3
+ size 43429616
last-checkpoint/global_step2099/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:170819cf1e1ec40f1c64f0f8a28739309ccba0083ab91beb471c76cc01917bb0
3
+ size 637299
last-checkpoint/global_step2099/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cea1f1d451e7ba08fb0317e93f359869010a66eb50256a9b0a830427271d537
3
+ size 637171
last-checkpoint/global_step2099/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a405978cb31a944c3cf457970ae9b8c0a96c904b42cddc15e4901dc4de49c64
3
+ size 637171
last-checkpoint/global_step2099/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59f57c64158a3a073300bf316e496206fb6d7f6f6ccd70d8f76a94d610905b5a
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2049
 
1
+ global_step2099
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d1ddd7e7b4dc44903837b0414e4659f8383cd8f16b41dd396d4eaf5b9829f79
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b105708e2c99c8661b46698b8ccc5799ac83c1f0fc6a30c2d41c9fbfb349d480
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eeddfb77fe4d3b495c4e08307767e08df90e96ef241c3eb80d5f75adec393e80
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc44cd2015d8c8fc2f109f07c797876873a52f478c57b0350b8a2cf5dcb17f25
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e6260196fabb00061b1f1c8de6288382570dc14d02d2aa308050ca858880a97
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9843dec201b5a542ebd69abfc596f99ad5a000cf81dab52c6a2c52a5b9224ea7
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea023bc5b1def54e0c49389175c0fae812f5f764c502525ce775d993d5ab2c03
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2296f0efda653dd4c7e861f5a867baa09d6d8bb50e57bc69af930268b40de9ef
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:041c9bf0c91e6ca81e97b14bb8b67e4083823b3129e241af387f768dc73002f8
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e06f479f50d6b90756aab0e5f1d2b8944be9014d46f9719fade9db1f0995f9b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.2472737729549408,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-1950",
4
- "epoch": 1.0556270924542879,
5
  "eval_steps": 50,
6
- "global_step": 2050,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3656,11 +3656,100 @@
3656
  "eval_steps_per_second": 0.773,
3657
  "num_input_tokens_seen": 23971048,
3658
  "step": 2050
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3659
  }
3660
  ],
3661
  "logging_steps": 5,
3662
  "max_steps": 3882,
3663
- "num_input_tokens_seen": 23971048,
3664
  "num_train_epochs": 2,
3665
  "save_steps": 50,
3666
  "stateful_callbacks": {
@@ -3675,7 +3764,7 @@
3675
  "attributes": {}
3676
  }
3677
  },
3678
- "total_flos": 1581709155893248.0,
3679
  "train_batch_size": 1,
3680
  "trial_name": null,
3681
  "trial_params": null
 
1
  {
2
  "best_metric": 0.2472737729549408,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-1950",
4
+ "epoch": 1.0813803759979397,
5
  "eval_steps": 50,
6
+ "global_step": 2100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3656
  "eval_steps_per_second": 0.773,
3657
  "num_input_tokens_seen": 23971048,
3658
  "step": 2050
3659
+ },
3660
+ {
3661
+ "epoch": 1.0582024208086531,
3662
+ "grad_norm": 11.207729973488336,
3663
+ "learning_rate": 4.929706304405748e-05,
3664
+ "loss": 0.217,
3665
+ "num_input_tokens_seen": 24029544,
3666
+ "step": 2055
3667
+ },
3668
+ {
3669
+ "epoch": 1.0607777491630184,
3670
+ "grad_norm": 10.285736762582813,
3671
+ "learning_rate": 4.9084072903884345e-05,
3672
+ "loss": 0.2132,
3673
+ "num_input_tokens_seen": 24088008,
3674
+ "step": 2060
3675
+ },
3676
+ {
3677
+ "epoch": 1.0633530775173834,
3678
+ "grad_norm": 6.469754305471942,
3679
+ "learning_rate": 4.887109938840783e-05,
3680
+ "loss": 0.2438,
3681
+ "num_input_tokens_seen": 24146480,
3682
+ "step": 2065
3683
+ },
3684
+ {
3685
+ "epoch": 1.0659284058717486,
3686
+ "grad_norm": 7.546288384087562,
3687
+ "learning_rate": 4.8658146363241406e-05,
3688
+ "loss": 0.2623,
3689
+ "num_input_tokens_seen": 24204984,
3690
+ "step": 2070
3691
+ },
3692
+ {
3693
+ "epoch": 1.0685037342261139,
3694
+ "grad_norm": 4.658066041307915,
3695
+ "learning_rate": 4.844521769362654e-05,
3696
+ "loss": 0.2041,
3697
+ "num_input_tokens_seen": 24263456,
3698
+ "step": 2075
3699
+ },
3700
+ {
3701
+ "epoch": 1.071079062580479,
3702
+ "grad_norm": 6.232031131112278,
3703
+ "learning_rate": 4.823231724436271e-05,
3704
+ "loss": 0.246,
3705
+ "num_input_tokens_seen": 24321960,
3706
+ "step": 2080
3707
+ },
3708
+ {
3709
+ "epoch": 1.0736543909348442,
3710
+ "grad_norm": 10.347547308861373,
3711
+ "learning_rate": 4.801944887973714e-05,
3712
+ "loss": 0.2857,
3713
+ "num_input_tokens_seen": 24380464,
3714
+ "step": 2085
3715
+ },
3716
+ {
3717
+ "epoch": 1.0762297192892094,
3718
+ "grad_norm": 7.357894649180612,
3719
+ "learning_rate": 4.7806616463454715e-05,
3720
+ "loss": 0.2577,
3721
+ "num_input_tokens_seen": 24438976,
3722
+ "step": 2090
3723
+ },
3724
+ {
3725
+ "epoch": 1.0788050476435747,
3726
+ "grad_norm": 9.485170393194913,
3727
+ "learning_rate": 4.759382385856779e-05,
3728
+ "loss": 0.2523,
3729
+ "num_input_tokens_seen": 24497448,
3730
+ "step": 2095
3731
+ },
3732
+ {
3733
+ "epoch": 1.0813803759979397,
3734
+ "grad_norm": 6.0022045554501045,
3735
+ "learning_rate": 4.738107492740619e-05,
3736
+ "loss": 0.2805,
3737
+ "num_input_tokens_seen": 24555904,
3738
+ "step": 2100
3739
+ },
3740
+ {
3741
+ "epoch": 1.0813803759979397,
3742
+ "eval_loss": 0.3205481469631195,
3743
+ "eval_runtime": 19.3676,
3744
+ "eval_samples_per_second": 3.098,
3745
+ "eval_steps_per_second": 0.774,
3746
+ "num_input_tokens_seen": 24555904,
3747
+ "step": 2100
3748
  }
3749
  ],
3750
  "logging_steps": 5,
3751
  "max_steps": 3882,
3752
+ "num_input_tokens_seen": 24555904,
3753
  "num_train_epochs": 2,
3754
  "save_steps": 50,
3755
  "stateful_callbacks": {
 
3764
  "attributes": {}
3765
  }
3766
  },
3767
+ "total_flos": 1620301350436864.0,
3768
  "train_batch_size": 1,
3769
  "trial_name": null,
3770
  "trial_params": null