ben81828 commited on
Commit
69a4891
·
verified ·
1 Parent(s): bcf881c

Training in progress, step 2700, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:764955504d6e3b1e90ca51e30bd2f4eb4f8626760b453dba421653e4148c6798
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:493ef098b4e142b34c8f8ffa03b228d10404d9550092e5415ecc2ddce2804e56
3
  size 29034840
last-checkpoint/global_step2699/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6479c80783220f5b315b8cdfb27c69f066a48968353bae9fdd0c6ad51f103be3
3
+ size 43429616
last-checkpoint/global_step2699/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b762b6ca4b4997a5ea83080a7f39306dd23f33bc2b94445246b8ce782127192
3
+ size 43429616
last-checkpoint/global_step2699/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7b8abf2472099a388a11d1952eee5504aba8ae96731236f488d7f52753ae7e3
3
+ size 43429616
last-checkpoint/global_step2699/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd79ab9b573afc088ff9dec4313acf1c44f97bb89ba0e4fb8695dd337c692876
3
+ size 43429616
last-checkpoint/global_step2699/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b0db3b78beff3e56858d2436c1d0a2e545d33d040af80b20f8200dd2b4f2a56
3
+ size 637299
last-checkpoint/global_step2699/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de5fe7df8657de59dc79c17929c6c467fcfb8dde65fda88f1345df754f88c846
3
+ size 637171
last-checkpoint/global_step2699/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08279f88d37f7f8bbe8a892a3e3b3bc04d94c80affdedddfd9a3aef1850c302a
3
+ size 637171
last-checkpoint/global_step2699/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a071bcb76fa2506fab196add6a363d182ee704d850d67a025e43d7d63627b2c
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2649
 
1
+ global_step2699
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e599331812a34463d102d64a4034a0b702a893f362f752003aa577fe71dcc1d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d8d3c7739f9787ea797b86ff1b3a51f9e68197835ba3178915a8a77558f67fc
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ed431e5e71393a0174ad2fd492755f8c1142596f1af3bfe7827c1f8f815dd80
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a22a57799bc43e59db67d9a787ed73040020c5f35990602033f4dab1318787d7
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a2e70789f26a9f56b6b779e87cb1a405615af81562a256e5afe579f40972e827
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29a624b936b77a04d6bfb6940acdd65a710bf39452e419e7ddb5c40fb2261072
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c8c18bc74d5211e761da269c814d7da0687633993838ec22e81ac939a14e91b
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a79306817d4440cd621149537e8cf216b60f847fc6f9531a6147426aa02bb07
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b2d21491c81c396d8a264b58575a84e3c59e8d20650a3ba41b1f862eb888fcb7
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55fc50c620128befbe42c0ee9c82ae426d9d0e14e63b4929582516d207937ad9
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.18780523538589478,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-2650",
4
- "epoch": 1.3646664949781098,
5
  "eval_steps": 50,
6
- "global_step": 2650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -4724,11 +4724,100 @@
4724
  "eval_steps_per_second": 0.773,
4725
  "num_input_tokens_seen": 30988344,
4726
  "step": 2650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4727
  }
4728
  ],
4729
  "logging_steps": 5,
4730
  "max_steps": 3882,
4731
- "num_input_tokens_seen": 30988344,
4732
  "num_train_epochs": 2,
4733
  "save_steps": 50,
4734
  "stateful_callbacks": {
@@ -4743,7 +4832,7 @@
4743
  "attributes": {}
4744
  }
4745
  },
4746
- "total_flos": 2044752555933696.0,
4747
  "train_batch_size": 1,
4748
  "trial_name": null,
4749
  "trial_params": null
 
1
  {
2
  "best_metric": 0.18780523538589478,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-2650",
4
+ "epoch": 1.3904197785217616,
5
  "eval_steps": 50,
6
+ "global_step": 2700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
4724
  "eval_steps_per_second": 0.773,
4725
  "num_input_tokens_seen": 30988344,
4726
  "step": 2650
4727
+ },
4728
+ {
4729
+ "epoch": 1.367241823332475,
4730
+ "grad_norm": 7.2025231601123085,
4731
+ "learning_rate": 2.4926244662411734e-05,
4732
+ "loss": 0.2109,
4733
+ "num_input_tokens_seen": 31046848,
4734
+ "step": 2655
4735
+ },
4736
+ {
4737
+ "epoch": 1.36981715168684,
4738
+ "grad_norm": 4.944078503740031,
4739
+ "learning_rate": 2.474217536081342e-05,
4740
+ "loss": 0.2544,
4741
+ "num_input_tokens_seen": 31105352,
4742
+ "step": 2660
4743
+ },
4744
+ {
4745
+ "epoch": 1.3723924800412053,
4746
+ "grad_norm": 20.15441745302564,
4747
+ "learning_rate": 2.4558564505825088e-05,
4748
+ "loss": 0.2616,
4749
+ "num_input_tokens_seen": 31163848,
4750
+ "step": 2665
4751
+ },
4752
+ {
4753
+ "epoch": 1.3749678083955703,
4754
+ "grad_norm": 9.97997619530482,
4755
+ "learning_rate": 2.4375415430107977e-05,
4756
+ "loss": 0.1722,
4757
+ "num_input_tokens_seen": 31222360,
4758
+ "step": 2670
4759
+ },
4760
+ {
4761
+ "epoch": 1.3775431367499356,
4762
+ "grad_norm": 2.1763725749744465,
4763
+ "learning_rate": 2.4192731457941805e-05,
4764
+ "loss": 0.1966,
4765
+ "num_input_tokens_seen": 31280840,
4766
+ "step": 2675
4767
+ },
4768
+ {
4769
+ "epoch": 1.3801184651043008,
4770
+ "grad_norm": 10.60754914489294,
4771
+ "learning_rate": 2.4010515905164243e-05,
4772
+ "loss": 0.2578,
4773
+ "num_input_tokens_seen": 31339264,
4774
+ "step": 2680
4775
+ },
4776
+ {
4777
+ "epoch": 1.382693793458666,
4778
+ "grad_norm": 15.190148047284161,
4779
+ "learning_rate": 2.3828772079110907e-05,
4780
+ "loss": 0.3076,
4781
+ "num_input_tokens_seen": 31397744,
4782
+ "step": 2685
4783
+ },
4784
+ {
4785
+ "epoch": 1.385269121813031,
4786
+ "grad_norm": 20.889251542966374,
4787
+ "learning_rate": 2.3647503278555233e-05,
4788
+ "loss": 0.1911,
4789
+ "num_input_tokens_seen": 31456240,
4790
+ "step": 2690
4791
+ },
4792
+ {
4793
+ "epoch": 1.3878444501673963,
4794
+ "grad_norm": 4.797824757647722,
4795
+ "learning_rate": 2.3466712793648638e-05,
4796
+ "loss": 0.2204,
4797
+ "num_input_tokens_seen": 31514744,
4798
+ "step": 2695
4799
+ },
4800
+ {
4801
+ "epoch": 1.3904197785217616,
4802
+ "grad_norm": 16.36975645045513,
4803
+ "learning_rate": 2.3286403905860733e-05,
4804
+ "loss": 0.2269,
4805
+ "num_input_tokens_seen": 31573240,
4806
+ "step": 2700
4807
+ },
4808
+ {
4809
+ "epoch": 1.3904197785217616,
4810
+ "eval_loss": 0.24001093208789825,
4811
+ "eval_runtime": 19.3768,
4812
+ "eval_samples_per_second": 3.096,
4813
+ "eval_steps_per_second": 0.774,
4814
+ "num_input_tokens_seen": 31573240,
4815
+ "step": 2700
4816
  }
4817
  ],
4818
  "logging_steps": 5,
4819
  "max_steps": 3882,
4820
+ "num_input_tokens_seen": 31573240,
4821
  "num_train_epochs": 2,
4822
  "save_steps": 50,
4823
  "stateful_callbacks": {
 
4832
  "attributes": {}
4833
  }
4834
  },
4835
+ "total_flos": 2083346708430848.0,
4836
  "train_batch_size": 1,
4837
  "trial_name": null,
4838
  "trial_params": null