ben81828 commited on
Commit
a1e4fc8
·
verified ·
1 Parent(s): 1ac6c0c

Training in progress, step 1650, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:36f722668f1a0618c0a9e43f223d905e82d0c1068f32211684745e00d5aaa272
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a98d193ab7c674712e3c18a30e49e6fde0bcc59efee6c60450baf3a08afe8f78
3
  size 29034840
last-checkpoint/global_step1650/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c92b99d09b208a5e54f54701424a9351685533109a2b3f0141fdfff156b409a9
3
+ size 43429616
last-checkpoint/global_step1650/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b524a5a5ce1f117b997580292cb026d46cd9b79da9f9b1c06f9530a5b8aa996c
3
+ size 43429616
last-checkpoint/global_step1650/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5489eb43fcb6a44359d04a1bd2ae923acbf692327d488e0afc70ab73f91dbdd8
3
+ size 43429616
last-checkpoint/global_step1650/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b247918492721ac2a81189d9733f60f35c3feef1bda359a86a48fd8fb11aa4c
3
+ size 43429616
last-checkpoint/global_step1650/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4c79466c87dc7a7bb2278fbdb456e69e6c6b0a01149fe0952fd362a1881b0c3
3
+ size 637299
last-checkpoint/global_step1650/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4009b72c3e7da18b4a07fc0d3d8de5ac6d846834d93ff9946ac1044932cdd63
3
+ size 637171
last-checkpoint/global_step1650/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a7cde2a2fb17884fd88468a6046779089bf8060a77027c24f561b10b27cca30
3
+ size 637171
last-checkpoint/global_step1650/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9f660827bea0980ccb09f606308a98aeb8817e9ee50af4a17e22e579fd1e42d
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1600
 
1
+ global_step1650
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9279ed4b01716237e789d2631c1f29bc5d43c5633c014d4401de21b672c1b355
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a90384755f5b036b42b1a465b39dbf24a925a02c04294f9d684bc1de7f4db1e5
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ca1990d68e57c70df5c56d395dd3f3befbe07b380521f4144677c20f6fe2a3eb
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7621b41e55056218f97d5b32ae116de3304a677b9f27b6a62170d83a2bbff176
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0790066885525e1b9a9390a40ae27abd57abb47f031abface27890732f9e684
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:997e9debadfd125b5c8b66ee6dd79ced3d40d353ff9250475f3814fd950012a6
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1325a2034fe48ebad4f00ac8a2b32ab5c4c43c2497712169a8e3b1112363d916
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f0256032419959580948d742425f66782bc8eb029126a091669a42c6ee0eba4
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:05a385f582b86d8b587b56eaada5930afef800d2a4f1e7413c113f427a6fcef2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73abf8d1cae561939afcea6d9d77e3583686a8bac69cce204a4cb222cd869363
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.3963810205459595,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-1500",
4
- "epoch": 0.8241050733968581,
5
  "eval_steps": 50,
6
- "global_step": 1600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2855,11 +2855,100 @@
2855
  "eval_steps_per_second": 0.777,
2856
  "num_input_tokens_seen": 18714072,
2857
  "step": 1600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2858
  }
2859
  ],
2860
  "logging_steps": 5,
2861
  "max_steps": 3882,
2862
- "num_input_tokens_seen": 18714072,
2863
  "num_train_epochs": 2,
2864
  "save_steps": 50,
2865
  "stateful_callbacks": {
@@ -2874,7 +2963,7 @@
2874
  "attributes": {}
2875
  }
2876
  },
2877
- "total_flos": 1234783772868608.0,
2878
  "train_batch_size": 1,
2879
  "trial_name": null,
2880
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.35129043459892273,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-1650",
4
+ "epoch": 0.8498583569405099,
5
  "eval_steps": 50,
6
+ "global_step": 1650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2855
  "eval_steps_per_second": 0.777,
2856
  "num_input_tokens_seen": 18714072,
2857
  "step": 1600
2858
+ },
2859
+ {
2860
+ "epoch": 0.8266804017512233,
2861
+ "grad_norm": 6.7599286769908815,
2862
+ "learning_rate": 6.805156695508075e-05,
2863
+ "loss": 0.4136,
2864
+ "num_input_tokens_seen": 18772552,
2865
+ "step": 1605
2866
+ },
2867
+ {
2868
+ "epoch": 0.8292557301055885,
2869
+ "grad_norm": 5.6959660380818,
2870
+ "learning_rate": 6.7852752809755e-05,
2871
+ "loss": 0.3523,
2872
+ "num_input_tokens_seen": 18830992,
2873
+ "step": 1610
2874
+ },
2875
+ {
2876
+ "epoch": 0.8318310584599536,
2877
+ "grad_norm": 5.054175814859553,
2878
+ "learning_rate": 6.765361462488424e-05,
2879
+ "loss": 0.2983,
2880
+ "num_input_tokens_seen": 18889480,
2881
+ "step": 1615
2882
+ },
2883
+ {
2884
+ "epoch": 0.8344063868143188,
2885
+ "grad_norm": 8.9830923866359,
2886
+ "learning_rate": 6.745415601496127e-05,
2887
+ "loss": 0.3987,
2888
+ "num_input_tokens_seen": 18947936,
2889
+ "step": 1620
2890
+ },
2891
+ {
2892
+ "epoch": 0.836981715168684,
2893
+ "grad_norm": 9.543713321488678,
2894
+ "learning_rate": 6.725438060029485e-05,
2895
+ "loss": 0.5012,
2896
+ "num_input_tokens_seen": 19006432,
2897
+ "step": 1625
2898
+ },
2899
+ {
2900
+ "epoch": 0.8395570435230492,
2901
+ "grad_norm": 9.329917566254677,
2902
+ "learning_rate": 6.705429200694396e-05,
2903
+ "loss": 0.36,
2904
+ "num_input_tokens_seen": 19064920,
2905
+ "step": 1630
2906
+ },
2907
+ {
2908
+ "epoch": 0.8421323718774144,
2909
+ "grad_norm": 4.257164474078224,
2910
+ "learning_rate": 6.685389386665197e-05,
2911
+ "loss": 0.3816,
2912
+ "num_input_tokens_seen": 19123376,
2913
+ "step": 1635
2914
+ },
2915
+ {
2916
+ "epoch": 0.8447077002317795,
2917
+ "grad_norm": 5.420005475317537,
2918
+ "learning_rate": 6.665318981678072e-05,
2919
+ "loss": 0.3503,
2920
+ "num_input_tokens_seen": 19181864,
2921
+ "step": 1640
2922
+ },
2923
+ {
2924
+ "epoch": 0.8472830285861447,
2925
+ "grad_norm": 6.544911058180563,
2926
+ "learning_rate": 6.645218350024456e-05,
2927
+ "loss": 0.3644,
2928
+ "num_input_tokens_seen": 19240352,
2929
+ "step": 1645
2930
+ },
2931
+ {
2932
+ "epoch": 0.8498583569405099,
2933
+ "grad_norm": 9.593843549483235,
2934
+ "learning_rate": 6.625087856544416e-05,
2935
+ "loss": 0.475,
2936
+ "num_input_tokens_seen": 19298848,
2937
+ "step": 1650
2938
+ },
2939
+ {
2940
+ "epoch": 0.8498583569405099,
2941
+ "eval_loss": 0.35129043459892273,
2942
+ "eval_runtime": 19.4001,
2943
+ "eval_samples_per_second": 3.093,
2944
+ "eval_steps_per_second": 0.773,
2945
+ "num_input_tokens_seen": 19298848,
2946
+ "step": 1650
2947
  }
2948
  ],
2949
  "logging_steps": 5,
2950
  "max_steps": 3882,
2951
+ "num_input_tokens_seen": 19298848,
2952
  "num_train_epochs": 2,
2953
  "save_steps": 50,
2954
  "stateful_callbacks": {
 
2963
  "attributes": {}
2964
  }
2965
  },
2966
+ "total_flos": 1273370793476096.0,
2967
  "train_batch_size": 1,
2968
  "trial_name": null,
2969
  "trial_params": null