ben81828 commited on
Commit
95cf310
·
verified ·
1 Parent(s): 040a7d5

Training in progress, step 950, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dca569a5bee748c34c51ac0d5b0f16a410ab4a71a296a7c172247ce0d6801519
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80df5e3a8d607d66820e6fd586076f2617165a54c1dd83c001587d2b85b8c1ef
3
  size 29034840
last-checkpoint/global_step950/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e233d0aa5b09d0dc9346bf7bb973b9b39be72451e5bbc62feeec79eb51fbca3
3
+ size 43429616
last-checkpoint/global_step950/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a18d7c135c18df72fb2bb6dc1f9d7d84cc2d4d9509446abf4e7f9b8eda81120
3
+ size 43429616
last-checkpoint/global_step950/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44849a34cc26d6fe94df3a9755d666d0035040fbc9e9e5d34f8ef9a19737370e
3
+ size 43429616
last-checkpoint/global_step950/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9329ec1841ccea84942072d3c9fef8201a303a99281369f732ec7fac5af3c024
3
+ size 43429616
last-checkpoint/global_step950/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de8d2452415f67f6cb314134176f636d47ded9477de6e168c2925a95f1e50399
3
+ size 637299
last-checkpoint/global_step950/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59a0c15d11bab9ab36ba888fa09e8a77ddc15c19181e193c722b9f734d07accd
3
+ size 637171
last-checkpoint/global_step950/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8010ba1747feb31117a6b69f86638d1fc0741ad088c9024682efeba498962221
3
+ size 637171
last-checkpoint/global_step950/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39c9dd240d6e20c6b0f5d0f32f43209c894bb5b7adbab13d992cf542fcdcdb1c
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step900
 
1
+ global_step950
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b8f22ced19e790cc864cefe3b7c711d9ae631c44f95d42fb4829688cc3de0153
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f2439da621f14c22b4f733e91bfc9de6b506d28d7b8d6f3eaca2e0b4f24c078
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e0407513eba77d34cbf3adf0e59a58bd80716f4f00f414854253637e82be43d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9e3fb386557f376b8946af5b8c91f9418f374dddb2ad9da4868b1ef16778c32
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6060636c023258ce9b965e244b8a58b4c99d5784dde4405b39737550ef50cd4f
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc7774d06045635bece9e960378fdc6913bf7bbbc903444cc570d1ca6ac25645
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c24ccdfdcde39cb2265c82c50c36ffdfcc670f757aba4bcf4bb0fdc6d1373c4c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d98c54a80a914fecf43d06ea81432499f46e70664f1d04651bf339163e30fa9e
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:728e48e0d034d64ded7c746a218c0b746489e6573b6bc352ba39a730bda96bbd
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe7b1ea5625ff613528c3f7f1e76a7521cccec3038e0a450f6a24711c8e1b799
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.7039459347724915,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-850",
4
- "epoch": 0.4635591037857327,
5
  "eval_steps": 50,
6
- "global_step": 900,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1609,11 +1609,100 @@
1609
  "eval_steps_per_second": 0.762,
1610
  "num_input_tokens_seen": 10526712,
1611
  "step": 900
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1612
  }
1613
  ],
1614
  "logging_steps": 5,
1615
  "max_steps": 3882,
1616
- "num_input_tokens_seen": 10526712,
1617
  "num_train_epochs": 2,
1618
  "save_steps": 50,
1619
  "stateful_callbacks": {
@@ -1628,7 +1717,7 @@
1628
  "attributes": {}
1629
  }
1630
  },
1631
- "total_flos": 694543977283584.0,
1632
  "train_batch_size": 1,
1633
  "trial_name": null,
1634
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.6830747723579407,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-950",
4
+ "epoch": 0.4893123873293845,
5
  "eval_steps": 50,
6
+ "global_step": 950,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1609
  "eval_steps_per_second": 0.762,
1610
  "num_input_tokens_seen": 10526712,
1611
  "step": 900
1612
+ },
1613
+ {
1614
+ "epoch": 0.46613443214009787,
1615
+ "grad_norm": 7.649426233722995,
1616
+ "learning_rate": 9.112590869053359e-05,
1617
+ "loss": 0.6794,
1618
+ "num_input_tokens_seen": 10585232,
1619
+ "step": 905
1620
+ },
1621
+ {
1622
+ "epoch": 0.46870976049446306,
1623
+ "grad_norm": 6.066459270532772,
1624
+ "learning_rate": 9.100438409496444e-05,
1625
+ "loss": 0.6817,
1626
+ "num_input_tokens_seen": 10643728,
1627
+ "step": 910
1628
+ },
1629
+ {
1630
+ "epoch": 0.47128508884882825,
1631
+ "grad_norm": 7.144597127673979,
1632
+ "learning_rate": 9.088211524207497e-05,
1633
+ "loss": 0.6503,
1634
+ "num_input_tokens_seen": 10702240,
1635
+ "step": 915
1636
+ },
1637
+ {
1638
+ "epoch": 0.4738604172031934,
1639
+ "grad_norm": 9.676112884447143,
1640
+ "learning_rate": 9.075910435112766e-05,
1641
+ "loss": 0.6903,
1642
+ "num_input_tokens_seen": 10760656,
1643
+ "step": 920
1644
+ },
1645
+ {
1646
+ "epoch": 0.4764357455575586,
1647
+ "grad_norm": 12.206584747037537,
1648
+ "learning_rate": 9.063535365485341e-05,
1649
+ "loss": 0.6611,
1650
+ "num_input_tokens_seen": 10819128,
1651
+ "step": 925
1652
+ },
1653
+ {
1654
+ "epoch": 0.47901107391192377,
1655
+ "grad_norm": 8.724970113237934,
1656
+ "learning_rate": 9.051086539941108e-05,
1657
+ "loss": 0.6361,
1658
+ "num_input_tokens_seen": 10877600,
1659
+ "step": 930
1660
+ },
1661
+ {
1662
+ "epoch": 0.48158640226628896,
1663
+ "grad_norm": 26.26773221921971,
1664
+ "learning_rate": 9.038564184434676e-05,
1665
+ "loss": 0.7006,
1666
+ "num_input_tokens_seen": 10936088,
1667
+ "step": 935
1668
+ },
1669
+ {
1670
+ "epoch": 0.48416173062065415,
1671
+ "grad_norm": 6.223867131390233,
1672
+ "learning_rate": 9.025968526255275e-05,
1673
+ "loss": 0.7012,
1674
+ "num_input_tokens_seen": 10994560,
1675
+ "step": 940
1676
+ },
1677
+ {
1678
+ "epoch": 0.4867370589750193,
1679
+ "grad_norm": 6.1072541360418295,
1680
+ "learning_rate": 9.013299794022622e-05,
1681
+ "loss": 0.6968,
1682
+ "num_input_tokens_seen": 11053016,
1683
+ "step": 945
1684
+ },
1685
+ {
1686
+ "epoch": 0.4893123873293845,
1687
+ "grad_norm": 8.372921238626587,
1688
+ "learning_rate": 9.00055821768278e-05,
1689
+ "loss": 0.6825,
1690
+ "num_input_tokens_seen": 11111520,
1691
+ "step": 950
1692
+ },
1693
+ {
1694
+ "epoch": 0.4893123873293845,
1695
+ "eval_loss": 0.6830747723579407,
1696
+ "eval_runtime": 19.7357,
1697
+ "eval_samples_per_second": 3.04,
1698
+ "eval_steps_per_second": 0.76,
1699
+ "num_input_tokens_seen": 11111520,
1700
+ "step": 950
1701
  }
1702
  ],
1703
  "logging_steps": 5,
1704
  "max_steps": 3882,
1705
+ "num_input_tokens_seen": 11111520,
1706
  "num_train_epochs": 2,
1707
  "save_steps": 50,
1708
  "stateful_callbacks": {
 
1717
  "attributes": {}
1718
  }
1719
  },
1720
+ "total_flos": 733134262632448.0,
1721
  "train_batch_size": 1,
1722
  "trial_name": null,
1723
  "trial_params": null