ben81828 commited on
Commit
4155cd9
·
verified ·
1 Parent(s): 12e0240

Training in progress, step 350, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ebc02da19989435043b7b50280b5e9e82d323b3ede799685ab53edfcf8cfad6a
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:613cdc2baa395a9cb64ef1794cbc658fcd70f510e6627131b053d1c268735db7
3
  size 29034840
last-checkpoint/global_step350/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b374bd8ecb92e5e8650f29dad059410f7a4ac3f7993d5b8ae525e1f3831ee144
3
+ size 43429616
last-checkpoint/global_step350/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4642b12ab71eef5e6c6cf1dba8b7169b9e7d463b46b12f213038610fa573d62
3
+ size 43429616
last-checkpoint/global_step350/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b309b01513968b4c55883bb092d1cf0d859bac7f8fcbec6bd60ab5c0058a79cb
3
+ size 43429616
last-checkpoint/global_step350/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7eb6f7b4c70da77210c9f08d115aff2414de72ce93b00ad1cd111c4a026aad0
3
+ size 43429616
last-checkpoint/global_step350/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81931d0b9384ae31c6dabf80c0501d96a39631493fc01d9768303da174b201d0
3
+ size 637299
last-checkpoint/global_step350/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f499324fd68027eacf299cbbe7bc3fdb8132535402762a6d42a52093afb2e644
3
+ size 637171
last-checkpoint/global_step350/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc84fd8164b463b5d2b20670849f7afe073adacfef5d35bdc708fd108503c2e5
3
+ size 637171
last-checkpoint/global_step350/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af47b1cce71f0f2679a390a0d88f58018746837e2d03a9ea7b758abb44feb7a6
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step300
 
1
+ global_step350
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef002048764051a71fb00f8f978e9ec32b780dc850bdb059af362cc56494234b
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee97cd82dba4d425fdd8dfdb88d4a43d0d4b1979b5c81ab4a24914fb00d4f332
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:37194a6d48612e1a46a2d5d317ead97c70d9fc4569b0118fcd5f84c3dc9daa5a
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91dad95440fb85dc4a31745642117165c1a72173b2e389679ea8c0b2b6fcd7e2
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:17c179483659a784aa1ace2427daff48c556a6bcc3c330e6f3274e4dc95e4b49
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98698326b023c2af02c94f18726ce52c7f7a6fe290734dd7edbe99bc807fcfa0
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b56857c9b117629f35af2c3d64f522d33a9d8aa94faa81ec6956380a895118c4
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:708e7c6b5bf8a327e688779ebc08830ce249928bcb1ff5c82b1b1d0bf6d2660b
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4a1fa61ac1b274c53a992ffd1c961be55a8fbae66f9b62e05652e59d94531535
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a6b1313b92e71cbfe6fd28b5f67fce5b969bc1141062c32ede2fbf27949b277
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.8932263255119324,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-300",
4
- "epoch": 0.1545197012619109,
5
  "eval_steps": 50,
6
- "global_step": 300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -541,11 +541,100 @@
541
  "eval_steps_per_second": 0.759,
542
  "num_input_tokens_seen": 3508888,
543
  "step": 300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
544
  }
545
  ],
546
  "logging_steps": 5,
547
  "max_steps": 3882,
548
- "num_input_tokens_seen": 3508888,
549
  "num_train_epochs": 2,
550
  "save_steps": 50,
551
  "stateful_callbacks": {
@@ -560,7 +649,7 @@
560
  "attributes": {}
561
  }
562
  },
563
- "total_flos": 231471715450880.0,
564
  "train_batch_size": 1,
565
  "trial_name": null,
566
  "trial_params": null
 
1
  {
2
  "best_metric": 0.8932263255119324,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-300",
4
+ "epoch": 0.1802729848055627,
5
  "eval_steps": 50,
6
+ "global_step": 350,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
541
  "eval_steps_per_second": 0.759,
542
  "num_input_tokens_seen": 3508888,
543
  "step": 300
544
+ },
545
+ {
546
+ "epoch": 0.15709502961627608,
547
+ "grad_norm": 0.36562722920113416,
548
+ "learning_rate": 9.978053721391578e-05,
549
+ "loss": 0.9042,
550
+ "num_input_tokens_seen": 3567368,
551
+ "step": 305
552
+ },
553
+ {
554
+ "epoch": 0.15967035797064125,
555
+ "grad_norm": 0.3765491511973325,
556
+ "learning_rate": 9.976014894458963e-05,
557
+ "loss": 0.9007,
558
+ "num_input_tokens_seen": 3625848,
559
+ "step": 310
560
+ },
561
+ {
562
+ "epoch": 0.16224568632500644,
563
+ "grad_norm": 0.5264420727347517,
564
+ "learning_rate": 9.973885749488589e-05,
565
+ "loss": 0.9036,
566
+ "num_input_tokens_seen": 3684336,
567
+ "step": 315
568
+ },
569
+ {
570
+ "epoch": 0.16482101467937163,
571
+ "grad_norm": 0.24680747784235688,
572
+ "learning_rate": 9.971666325125874e-05,
573
+ "loss": 0.8936,
574
+ "num_input_tokens_seen": 3742800,
575
+ "step": 320
576
+ },
577
+ {
578
+ "epoch": 0.1673963430337368,
579
+ "grad_norm": 0.4982571051665039,
580
+ "learning_rate": 9.969356661654876e-05,
581
+ "loss": 0.8989,
582
+ "num_input_tokens_seen": 3801280,
583
+ "step": 325
584
+ },
585
+ {
586
+ "epoch": 0.16997167138810199,
587
+ "grad_norm": 0.49943012602572584,
588
+ "learning_rate": 9.966956800997546e-05,
589
+ "loss": 0.8983,
590
+ "num_input_tokens_seen": 3859792,
591
+ "step": 330
592
+ },
593
+ {
594
+ "epoch": 0.17254699974246718,
595
+ "grad_norm": 0.37381050353079964,
596
+ "learning_rate": 9.964466786712984e-05,
597
+ "loss": 0.9038,
598
+ "num_input_tokens_seen": 3918272,
599
+ "step": 335
600
+ },
601
+ {
602
+ "epoch": 0.17512232809683234,
603
+ "grad_norm": 0.7501484170811903,
604
+ "learning_rate": 9.961886663996629e-05,
605
+ "loss": 0.8947,
606
+ "num_input_tokens_seen": 3976760,
607
+ "step": 340
608
+ },
609
+ {
610
+ "epoch": 0.17769765645119753,
611
+ "grad_norm": 0.5623847203835772,
612
+ "learning_rate": 9.959216479679458e-05,
613
+ "loss": 0.9179,
614
+ "num_input_tokens_seen": 4035240,
615
+ "step": 345
616
+ },
617
+ {
618
+ "epoch": 0.1802729848055627,
619
+ "grad_norm": 0.34381878607605765,
620
+ "learning_rate": 9.956456282227122e-05,
621
+ "loss": 0.9059,
622
+ "num_input_tokens_seen": 4093688,
623
+ "step": 350
624
+ },
625
+ {
626
+ "epoch": 0.1802729848055627,
627
+ "eval_loss": 0.8960411548614502,
628
+ "eval_runtime": 20.0734,
629
+ "eval_samples_per_second": 2.989,
630
+ "eval_steps_per_second": 0.747,
631
+ "num_input_tokens_seen": 4093688,
632
+ "step": 350
633
  }
634
  ],
635
  "logging_steps": 5,
636
  "max_steps": 3882,
637
+ "num_input_tokens_seen": 4093688,
638
  "num_train_epochs": 2,
639
  "save_steps": 50,
640
  "stateful_callbacks": {
 
649
  "attributes": {}
650
  }
651
  },
652
+ "total_flos": 270058588995584.0,
653
  "train_batch_size": 1,
654
  "trial_name": null,
655
  "trial_params": null