MichaelHu03 commited on
Commit
b761180
·
verified ·
1 Parent(s): be1fd73

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ tags:
4
+ - autotrain
5
+ - image-classification
6
+ base_model: google/vit-base-patch16-224
7
+ widget:
8
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
9
+ example_title: Tiger
10
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
11
+ example_title: Teapot
12
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
13
+ example_title: Palace
14
+ datasets:
15
+ - mvkvc/artifact-10k
16
+ ---
17
+
18
+ # Model Trained Using AutoTrain
19
+
20
+ - Problem type: Image Classification
21
+
22
+ ## Validation Metrics
23
+ loss: 0.34307238459587097
24
+
25
+ f1: 0.8702363724071394
26
+
27
+ precision: 0.8406337371854613
28
+
29
+ recall: 0.902
30
+
31
+ auc: 0.9263070000000001
32
+
33
+ accuracy: 0.8655
checkpoint-3000/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "ai",
14
+ "1": "real"
15
+ },
16
+ "image_size": 224,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "ai": 0,
21
+ "real": 1
22
+ },
23
+ "layer_norm_eps": 1e-12,
24
+ "model_type": "vit",
25
+ "num_attention_heads": 12,
26
+ "num_channels": 3,
27
+ "num_hidden_layers": 12,
28
+ "patch_size": 16,
29
+ "problem_type": "single_label_classification",
30
+ "qkv_bias": true,
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.45.0"
33
+ }
checkpoint-3000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15409dec5cdb2034e9c2bd474db69dc1bcee23fc0cfd708f6a5f597135fa05c2
3
+ size 343223968
checkpoint-3000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a47b67b75074d5c32e6e61c3c0ae89a8c3fbee57d0564482470469dc53866f5
3
+ size 686563258
checkpoint-3000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95650bf0f0eef9e8a3148906a9d1cc2640dc42f98d239c84584be6e389a6cc93
3
+ size 13990
checkpoint-3000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d63e80ad6bbc2adc3564ccc8a754139157db8b58f99722236b6035ec26a46fcd
3
+ size 1064
checkpoint-3000/trainer_state.json ADDED
@@ -0,0 +1,921 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.34307238459587097,
3
+ "best_model_checkpoint": "autotrain-ht4es-gbvmt/checkpoint-3000",
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 3000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.025,
13
+ "grad_norm": 7.62260103225708,
14
+ "learning_rate": 4.166666666666667e-06,
15
+ "loss": 0.7053,
16
+ "step": 25
17
+ },
18
+ {
19
+ "epoch": 0.05,
20
+ "grad_norm": 9.429513931274414,
21
+ "learning_rate": 8.333333333333334e-06,
22
+ "loss": 0.7098,
23
+ "step": 50
24
+ },
25
+ {
26
+ "epoch": 0.075,
27
+ "grad_norm": 5.347685813903809,
28
+ "learning_rate": 1.25e-05,
29
+ "loss": 0.597,
30
+ "step": 75
31
+ },
32
+ {
33
+ "epoch": 0.1,
34
+ "grad_norm": 5.363152503967285,
35
+ "learning_rate": 1.6666666666666667e-05,
36
+ "loss": 0.6223,
37
+ "step": 100
38
+ },
39
+ {
40
+ "epoch": 0.125,
41
+ "grad_norm": 9.079790115356445,
42
+ "learning_rate": 2.0833333333333336e-05,
43
+ "loss": 0.6182,
44
+ "step": 125
45
+ },
46
+ {
47
+ "epoch": 0.15,
48
+ "grad_norm": 6.9367547035217285,
49
+ "learning_rate": 2.5e-05,
50
+ "loss": 0.6298,
51
+ "step": 150
52
+ },
53
+ {
54
+ "epoch": 0.175,
55
+ "grad_norm": 4.488036632537842,
56
+ "learning_rate": 2.916666666666667e-05,
57
+ "loss": 0.582,
58
+ "step": 175
59
+ },
60
+ {
61
+ "epoch": 0.2,
62
+ "grad_norm": 6.229537487030029,
63
+ "learning_rate": 3.3333333333333335e-05,
64
+ "loss": 0.5998,
65
+ "step": 200
66
+ },
67
+ {
68
+ "epoch": 0.225,
69
+ "grad_norm": 14.98177433013916,
70
+ "learning_rate": 3.7500000000000003e-05,
71
+ "loss": 0.6034,
72
+ "step": 225
73
+ },
74
+ {
75
+ "epoch": 0.25,
76
+ "grad_norm": 8.947093963623047,
77
+ "learning_rate": 4.166666666666667e-05,
78
+ "loss": 0.5486,
79
+ "step": 250
80
+ },
81
+ {
82
+ "epoch": 0.275,
83
+ "grad_norm": 5.548841953277588,
84
+ "learning_rate": 4.5833333333333334e-05,
85
+ "loss": 0.5592,
86
+ "step": 275
87
+ },
88
+ {
89
+ "epoch": 0.3,
90
+ "grad_norm": 6.519041538238525,
91
+ "learning_rate": 5e-05,
92
+ "loss": 0.4745,
93
+ "step": 300
94
+ },
95
+ {
96
+ "epoch": 0.325,
97
+ "grad_norm": 6.388887405395508,
98
+ "learning_rate": 4.9537037037037035e-05,
99
+ "loss": 0.6121,
100
+ "step": 325
101
+ },
102
+ {
103
+ "epoch": 0.35,
104
+ "grad_norm": 4.3396315574646,
105
+ "learning_rate": 4.9074074074074075e-05,
106
+ "loss": 0.5122,
107
+ "step": 350
108
+ },
109
+ {
110
+ "epoch": 0.375,
111
+ "grad_norm": 12.209185600280762,
112
+ "learning_rate": 4.8611111111111115e-05,
113
+ "loss": 0.5585,
114
+ "step": 375
115
+ },
116
+ {
117
+ "epoch": 0.4,
118
+ "grad_norm": 6.66325044631958,
119
+ "learning_rate": 4.814814814814815e-05,
120
+ "loss": 0.6133,
121
+ "step": 400
122
+ },
123
+ {
124
+ "epoch": 0.425,
125
+ "grad_norm": 7.374906063079834,
126
+ "learning_rate": 4.768518518518519e-05,
127
+ "loss": 0.5779,
128
+ "step": 425
129
+ },
130
+ {
131
+ "epoch": 0.45,
132
+ "grad_norm": 6.596988677978516,
133
+ "learning_rate": 4.722222222222222e-05,
134
+ "loss": 0.58,
135
+ "step": 450
136
+ },
137
+ {
138
+ "epoch": 0.475,
139
+ "grad_norm": 8.377278327941895,
140
+ "learning_rate": 4.675925925925926e-05,
141
+ "loss": 0.4981,
142
+ "step": 475
143
+ },
144
+ {
145
+ "epoch": 0.5,
146
+ "grad_norm": 7.247930526733398,
147
+ "learning_rate": 4.62962962962963e-05,
148
+ "loss": 0.6069,
149
+ "step": 500
150
+ },
151
+ {
152
+ "epoch": 0.525,
153
+ "grad_norm": 6.867646217346191,
154
+ "learning_rate": 4.5833333333333334e-05,
155
+ "loss": 0.6142,
156
+ "step": 525
157
+ },
158
+ {
159
+ "epoch": 0.55,
160
+ "grad_norm": 5.440280437469482,
161
+ "learning_rate": 4.5370370370370374e-05,
162
+ "loss": 0.5109,
163
+ "step": 550
164
+ },
165
+ {
166
+ "epoch": 0.575,
167
+ "grad_norm": 8.325813293457031,
168
+ "learning_rate": 4.490740740740741e-05,
169
+ "loss": 0.5326,
170
+ "step": 575
171
+ },
172
+ {
173
+ "epoch": 0.6,
174
+ "grad_norm": 8.603256225585938,
175
+ "learning_rate": 4.4444444444444447e-05,
176
+ "loss": 0.5622,
177
+ "step": 600
178
+ },
179
+ {
180
+ "epoch": 0.625,
181
+ "grad_norm": 10.510509490966797,
182
+ "learning_rate": 4.3981481481481486e-05,
183
+ "loss": 0.675,
184
+ "step": 625
185
+ },
186
+ {
187
+ "epoch": 0.65,
188
+ "grad_norm": 7.54085636138916,
189
+ "learning_rate": 4.351851851851852e-05,
190
+ "loss": 0.5269,
191
+ "step": 650
192
+ },
193
+ {
194
+ "epoch": 0.675,
195
+ "grad_norm": 25.22871971130371,
196
+ "learning_rate": 4.305555555555556e-05,
197
+ "loss": 0.5974,
198
+ "step": 675
199
+ },
200
+ {
201
+ "epoch": 0.7,
202
+ "grad_norm": 7.171282768249512,
203
+ "learning_rate": 4.259259259259259e-05,
204
+ "loss": 0.5236,
205
+ "step": 700
206
+ },
207
+ {
208
+ "epoch": 0.725,
209
+ "grad_norm": 5.170557022094727,
210
+ "learning_rate": 4.212962962962963e-05,
211
+ "loss": 0.4763,
212
+ "step": 725
213
+ },
214
+ {
215
+ "epoch": 0.75,
216
+ "grad_norm": 4.286471843719482,
217
+ "learning_rate": 4.166666666666667e-05,
218
+ "loss": 0.5664,
219
+ "step": 750
220
+ },
221
+ {
222
+ "epoch": 0.775,
223
+ "grad_norm": 7.5836286544799805,
224
+ "learning_rate": 4.1203703703703705e-05,
225
+ "loss": 0.5413,
226
+ "step": 775
227
+ },
228
+ {
229
+ "epoch": 0.8,
230
+ "grad_norm": 7.180052757263184,
231
+ "learning_rate": 4.074074074074074e-05,
232
+ "loss": 0.5323,
233
+ "step": 800
234
+ },
235
+ {
236
+ "epoch": 0.825,
237
+ "grad_norm": 8.674583435058594,
238
+ "learning_rate": 4.027777777777778e-05,
239
+ "loss": 0.5381,
240
+ "step": 825
241
+ },
242
+ {
243
+ "epoch": 0.85,
244
+ "grad_norm": 5.117196559906006,
245
+ "learning_rate": 3.981481481481482e-05,
246
+ "loss": 0.5539,
247
+ "step": 850
248
+ },
249
+ {
250
+ "epoch": 0.875,
251
+ "grad_norm": 6.353272914886475,
252
+ "learning_rate": 3.935185185185186e-05,
253
+ "loss": 0.4588,
254
+ "step": 875
255
+ },
256
+ {
257
+ "epoch": 0.9,
258
+ "grad_norm": 5.968225002288818,
259
+ "learning_rate": 3.888888888888889e-05,
260
+ "loss": 0.5228,
261
+ "step": 900
262
+ },
263
+ {
264
+ "epoch": 0.925,
265
+ "grad_norm": 17.49488639831543,
266
+ "learning_rate": 3.8425925925925924e-05,
267
+ "loss": 0.4842,
268
+ "step": 925
269
+ },
270
+ {
271
+ "epoch": 0.95,
272
+ "grad_norm": 5.153933048248291,
273
+ "learning_rate": 3.7962962962962964e-05,
274
+ "loss": 0.4993,
275
+ "step": 950
276
+ },
277
+ {
278
+ "epoch": 0.975,
279
+ "grad_norm": 7.89682149887085,
280
+ "learning_rate": 3.7500000000000003e-05,
281
+ "loss": 0.5157,
282
+ "step": 975
283
+ },
284
+ {
285
+ "epoch": 1.0,
286
+ "grad_norm": 6.91286039352417,
287
+ "learning_rate": 3.7037037037037037e-05,
288
+ "loss": 0.5277,
289
+ "step": 1000
290
+ },
291
+ {
292
+ "epoch": 1.0,
293
+ "eval_accuracy": 0.787,
294
+ "eval_auc": 0.8603820000000001,
295
+ "eval_f1": 0.7905604719764012,
296
+ "eval_loss": 0.4363800287246704,
297
+ "eval_precision": 0.7775628626692457,
298
+ "eval_recall": 0.804,
299
+ "eval_runtime": 663.7473,
300
+ "eval_samples_per_second": 3.013,
301
+ "eval_steps_per_second": 0.188,
302
+ "step": 1000
303
+ },
304
+ {
305
+ "epoch": 1.025,
306
+ "grad_norm": 24.21603775024414,
307
+ "learning_rate": 3.6574074074074076e-05,
308
+ "loss": 0.487,
309
+ "step": 1025
310
+ },
311
+ {
312
+ "epoch": 1.05,
313
+ "grad_norm": 11.67587947845459,
314
+ "learning_rate": 3.611111111111111e-05,
315
+ "loss": 0.4983,
316
+ "step": 1050
317
+ },
318
+ {
319
+ "epoch": 1.075,
320
+ "grad_norm": 9.920230865478516,
321
+ "learning_rate": 3.564814814814815e-05,
322
+ "loss": 0.5016,
323
+ "step": 1075
324
+ },
325
+ {
326
+ "epoch": 1.1,
327
+ "grad_norm": 6.970316410064697,
328
+ "learning_rate": 3.518518518518519e-05,
329
+ "loss": 0.4673,
330
+ "step": 1100
331
+ },
332
+ {
333
+ "epoch": 1.125,
334
+ "grad_norm": 7.130370616912842,
335
+ "learning_rate": 3.472222222222222e-05,
336
+ "loss": 0.3898,
337
+ "step": 1125
338
+ },
339
+ {
340
+ "epoch": 1.15,
341
+ "grad_norm": 9.946027755737305,
342
+ "learning_rate": 3.425925925925926e-05,
343
+ "loss": 0.4404,
344
+ "step": 1150
345
+ },
346
+ {
347
+ "epoch": 1.175,
348
+ "grad_norm": 1.6654151678085327,
349
+ "learning_rate": 3.3796296296296295e-05,
350
+ "loss": 0.4961,
351
+ "step": 1175
352
+ },
353
+ {
354
+ "epoch": 1.2,
355
+ "grad_norm": 5.618824481964111,
356
+ "learning_rate": 3.3333333333333335e-05,
357
+ "loss": 0.4828,
358
+ "step": 1200
359
+ },
360
+ {
361
+ "epoch": 1.225,
362
+ "grad_norm": 7.5699334144592285,
363
+ "learning_rate": 3.2870370370370375e-05,
364
+ "loss": 0.4093,
365
+ "step": 1225
366
+ },
367
+ {
368
+ "epoch": 1.25,
369
+ "grad_norm": 7.351467609405518,
370
+ "learning_rate": 3.240740740740741e-05,
371
+ "loss": 0.4939,
372
+ "step": 1250
373
+ },
374
+ {
375
+ "epoch": 1.275,
376
+ "grad_norm": 6.4059648513793945,
377
+ "learning_rate": 3.194444444444444e-05,
378
+ "loss": 0.3547,
379
+ "step": 1275
380
+ },
381
+ {
382
+ "epoch": 1.3,
383
+ "grad_norm": 4.268542289733887,
384
+ "learning_rate": 3.148148148148148e-05,
385
+ "loss": 0.3945,
386
+ "step": 1300
387
+ },
388
+ {
389
+ "epoch": 1.325,
390
+ "grad_norm": 4.965268611907959,
391
+ "learning_rate": 3.101851851851852e-05,
392
+ "loss": 0.3791,
393
+ "step": 1325
394
+ },
395
+ {
396
+ "epoch": 1.35,
397
+ "grad_norm": 9.438615798950195,
398
+ "learning_rate": 3.055555555555556e-05,
399
+ "loss": 0.3727,
400
+ "step": 1350
401
+ },
402
+ {
403
+ "epoch": 1.375,
404
+ "grad_norm": 6.795106410980225,
405
+ "learning_rate": 3.0092592592592593e-05,
406
+ "loss": 0.4429,
407
+ "step": 1375
408
+ },
409
+ {
410
+ "epoch": 1.4,
411
+ "grad_norm": 6.984402656555176,
412
+ "learning_rate": 2.962962962962963e-05,
413
+ "loss": 0.4231,
414
+ "step": 1400
415
+ },
416
+ {
417
+ "epoch": 1.425,
418
+ "grad_norm": 4.931349277496338,
419
+ "learning_rate": 2.916666666666667e-05,
420
+ "loss": 0.4342,
421
+ "step": 1425
422
+ },
423
+ {
424
+ "epoch": 1.45,
425
+ "grad_norm": 5.537110805511475,
426
+ "learning_rate": 2.8703703703703706e-05,
427
+ "loss": 0.4478,
428
+ "step": 1450
429
+ },
430
+ {
431
+ "epoch": 1.475,
432
+ "grad_norm": 4.687628269195557,
433
+ "learning_rate": 2.824074074074074e-05,
434
+ "loss": 0.3871,
435
+ "step": 1475
436
+ },
437
+ {
438
+ "epoch": 1.5,
439
+ "grad_norm": 5.930976390838623,
440
+ "learning_rate": 2.777777777777778e-05,
441
+ "loss": 0.4005,
442
+ "step": 1500
443
+ },
444
+ {
445
+ "epoch": 1.525,
446
+ "grad_norm": 9.062422752380371,
447
+ "learning_rate": 2.7314814814814816e-05,
448
+ "loss": 0.4503,
449
+ "step": 1525
450
+ },
451
+ {
452
+ "epoch": 1.55,
453
+ "grad_norm": 5.416477203369141,
454
+ "learning_rate": 2.6851851851851855e-05,
455
+ "loss": 0.4317,
456
+ "step": 1550
457
+ },
458
+ {
459
+ "epoch": 1.575,
460
+ "grad_norm": 14.157992362976074,
461
+ "learning_rate": 2.6388888888888892e-05,
462
+ "loss": 0.382,
463
+ "step": 1575
464
+ },
465
+ {
466
+ "epoch": 1.6,
467
+ "grad_norm": 4.8775177001953125,
468
+ "learning_rate": 2.5925925925925925e-05,
469
+ "loss": 0.4017,
470
+ "step": 1600
471
+ },
472
+ {
473
+ "epoch": 1.625,
474
+ "grad_norm": 5.229184150695801,
475
+ "learning_rate": 2.5462962962962965e-05,
476
+ "loss": 0.5269,
477
+ "step": 1625
478
+ },
479
+ {
480
+ "epoch": 1.65,
481
+ "grad_norm": 10.698781967163086,
482
+ "learning_rate": 2.5e-05,
483
+ "loss": 0.4021,
484
+ "step": 1650
485
+ },
486
+ {
487
+ "epoch": 1.675,
488
+ "grad_norm": 4.068216800689697,
489
+ "learning_rate": 2.4537037037037038e-05,
490
+ "loss": 0.3971,
491
+ "step": 1675
492
+ },
493
+ {
494
+ "epoch": 1.7,
495
+ "grad_norm": 9.067376136779785,
496
+ "learning_rate": 2.4074074074074074e-05,
497
+ "loss": 0.434,
498
+ "step": 1700
499
+ },
500
+ {
501
+ "epoch": 1.725,
502
+ "grad_norm": 5.414083957672119,
503
+ "learning_rate": 2.361111111111111e-05,
504
+ "loss": 0.4614,
505
+ "step": 1725
506
+ },
507
+ {
508
+ "epoch": 1.75,
509
+ "grad_norm": 9.188199043273926,
510
+ "learning_rate": 2.314814814814815e-05,
511
+ "loss": 0.4149,
512
+ "step": 1750
513
+ },
514
+ {
515
+ "epoch": 1.775,
516
+ "grad_norm": 6.829360008239746,
517
+ "learning_rate": 2.2685185185185187e-05,
518
+ "loss": 0.4274,
519
+ "step": 1775
520
+ },
521
+ {
522
+ "epoch": 1.8,
523
+ "grad_norm": 5.337634563446045,
524
+ "learning_rate": 2.2222222222222223e-05,
525
+ "loss": 0.3448,
526
+ "step": 1800
527
+ },
528
+ {
529
+ "epoch": 1.825,
530
+ "grad_norm": 7.2519850730896,
531
+ "learning_rate": 2.175925925925926e-05,
532
+ "loss": 0.4263,
533
+ "step": 1825
534
+ },
535
+ {
536
+ "epoch": 1.85,
537
+ "grad_norm": 5.725991725921631,
538
+ "learning_rate": 2.1296296296296296e-05,
539
+ "loss": 0.4307,
540
+ "step": 1850
541
+ },
542
+ {
543
+ "epoch": 1.875,
544
+ "grad_norm": 5.500362396240234,
545
+ "learning_rate": 2.0833333333333336e-05,
546
+ "loss": 0.3874,
547
+ "step": 1875
548
+ },
549
+ {
550
+ "epoch": 1.9,
551
+ "grad_norm": 7.834714412689209,
552
+ "learning_rate": 2.037037037037037e-05,
553
+ "loss": 0.3281,
554
+ "step": 1900
555
+ },
556
+ {
557
+ "epoch": 1.925,
558
+ "grad_norm": 10.548686981201172,
559
+ "learning_rate": 1.990740740740741e-05,
560
+ "loss": 0.4047,
561
+ "step": 1925
562
+ },
563
+ {
564
+ "epoch": 1.95,
565
+ "grad_norm": 8.157145500183105,
566
+ "learning_rate": 1.9444444444444445e-05,
567
+ "loss": 0.4335,
568
+ "step": 1950
569
+ },
570
+ {
571
+ "epoch": 1.975,
572
+ "grad_norm": 9.798107147216797,
573
+ "learning_rate": 1.8981481481481482e-05,
574
+ "loss": 0.41,
575
+ "step": 1975
576
+ },
577
+ {
578
+ "epoch": 2.0,
579
+ "grad_norm": 10.720842361450195,
580
+ "learning_rate": 1.8518518518518518e-05,
581
+ "loss": 0.4131,
582
+ "step": 2000
583
+ },
584
+ {
585
+ "epoch": 2.0,
586
+ "eval_accuracy": 0.826,
587
+ "eval_auc": 0.9061560000000001,
588
+ "eval_f1": 0.84051329055912,
589
+ "eval_loss": 0.3920663595199585,
590
+ "eval_precision": 0.7758037225042301,
591
+ "eval_recall": 0.917,
592
+ "eval_runtime": 518.777,
593
+ "eval_samples_per_second": 3.855,
594
+ "eval_steps_per_second": 0.241,
595
+ "step": 2000
596
+ },
597
+ {
598
+ "epoch": 2.025,
599
+ "grad_norm": 2.1612956523895264,
600
+ "learning_rate": 1.8055555555555555e-05,
601
+ "loss": 0.2708,
602
+ "step": 2025
603
+ },
604
+ {
605
+ "epoch": 2.05,
606
+ "grad_norm": 16.25421714782715,
607
+ "learning_rate": 1.7592592592592595e-05,
608
+ "loss": 0.4438,
609
+ "step": 2050
610
+ },
611
+ {
612
+ "epoch": 2.075,
613
+ "grad_norm": 22.16561508178711,
614
+ "learning_rate": 1.712962962962963e-05,
615
+ "loss": 0.2589,
616
+ "step": 2075
617
+ },
618
+ {
619
+ "epoch": 2.1,
620
+ "grad_norm": 13.343395233154297,
621
+ "learning_rate": 1.6666666666666667e-05,
622
+ "loss": 0.3357,
623
+ "step": 2100
624
+ },
625
+ {
626
+ "epoch": 2.125,
627
+ "grad_norm": 8.876195907592773,
628
+ "learning_rate": 1.6203703703703704e-05,
629
+ "loss": 0.3356,
630
+ "step": 2125
631
+ },
632
+ {
633
+ "epoch": 2.15,
634
+ "grad_norm": 7.499932765960693,
635
+ "learning_rate": 1.574074074074074e-05,
636
+ "loss": 0.3052,
637
+ "step": 2150
638
+ },
639
+ {
640
+ "epoch": 2.175,
641
+ "grad_norm": 3.757966995239258,
642
+ "learning_rate": 1.527777777777778e-05,
643
+ "loss": 0.2836,
644
+ "step": 2175
645
+ },
646
+ {
647
+ "epoch": 2.2,
648
+ "grad_norm": 10.760390281677246,
649
+ "learning_rate": 1.4814814814814815e-05,
650
+ "loss": 0.2933,
651
+ "step": 2200
652
+ },
653
+ {
654
+ "epoch": 2.225,
655
+ "grad_norm": 8.144658088684082,
656
+ "learning_rate": 1.4351851851851853e-05,
657
+ "loss": 0.3104,
658
+ "step": 2225
659
+ },
660
+ {
661
+ "epoch": 2.25,
662
+ "grad_norm": 7.362667083740234,
663
+ "learning_rate": 1.388888888888889e-05,
664
+ "loss": 0.2983,
665
+ "step": 2250
666
+ },
667
+ {
668
+ "epoch": 2.275,
669
+ "grad_norm": 4.645088195800781,
670
+ "learning_rate": 1.3425925925925928e-05,
671
+ "loss": 0.3655,
672
+ "step": 2275
673
+ },
674
+ {
675
+ "epoch": 2.3,
676
+ "grad_norm": 1.1926023960113525,
677
+ "learning_rate": 1.2962962962962962e-05,
678
+ "loss": 0.2441,
679
+ "step": 2300
680
+ },
681
+ {
682
+ "epoch": 2.325,
683
+ "grad_norm": 13.049036979675293,
684
+ "learning_rate": 1.25e-05,
685
+ "loss": 0.215,
686
+ "step": 2325
687
+ },
688
+ {
689
+ "epoch": 2.35,
690
+ "grad_norm": 9.891176223754883,
691
+ "learning_rate": 1.2037037037037037e-05,
692
+ "loss": 0.3903,
693
+ "step": 2350
694
+ },
695
+ {
696
+ "epoch": 2.375,
697
+ "grad_norm": 5.911032199859619,
698
+ "learning_rate": 1.1574074074074075e-05,
699
+ "loss": 0.2469,
700
+ "step": 2375
701
+ },
702
+ {
703
+ "epoch": 2.4,
704
+ "grad_norm": 10.652032852172852,
705
+ "learning_rate": 1.1111111111111112e-05,
706
+ "loss": 0.3542,
707
+ "step": 2400
708
+ },
709
+ {
710
+ "epoch": 2.425,
711
+ "grad_norm": 5.722354888916016,
712
+ "learning_rate": 1.0648148148148148e-05,
713
+ "loss": 0.2719,
714
+ "step": 2425
715
+ },
716
+ {
717
+ "epoch": 2.45,
718
+ "grad_norm": 7.493654727935791,
719
+ "learning_rate": 1.0185185185185185e-05,
720
+ "loss": 0.3102,
721
+ "step": 2450
722
+ },
723
+ {
724
+ "epoch": 2.475,
725
+ "grad_norm": 7.553443908691406,
726
+ "learning_rate": 9.722222222222223e-06,
727
+ "loss": 0.2539,
728
+ "step": 2475
729
+ },
730
+ {
731
+ "epoch": 2.5,
732
+ "grad_norm": 3.0468859672546387,
733
+ "learning_rate": 9.259259259259259e-06,
734
+ "loss": 0.3312,
735
+ "step": 2500
736
+ },
737
+ {
738
+ "epoch": 2.525,
739
+ "grad_norm": 9.9094820022583,
740
+ "learning_rate": 8.796296296296297e-06,
741
+ "loss": 0.3174,
742
+ "step": 2525
743
+ },
744
+ {
745
+ "epoch": 2.55,
746
+ "grad_norm": 3.5765790939331055,
747
+ "learning_rate": 8.333333333333334e-06,
748
+ "loss": 0.3598,
749
+ "step": 2550
750
+ },
751
+ {
752
+ "epoch": 2.575,
753
+ "grad_norm": 6.845639228820801,
754
+ "learning_rate": 7.87037037037037e-06,
755
+ "loss": 0.2898,
756
+ "step": 2575
757
+ },
758
+ {
759
+ "epoch": 2.6,
760
+ "grad_norm": 6.846150875091553,
761
+ "learning_rate": 7.4074074074074075e-06,
762
+ "loss": 0.2469,
763
+ "step": 2600
764
+ },
765
+ {
766
+ "epoch": 2.625,
767
+ "grad_norm": 10.330842018127441,
768
+ "learning_rate": 6.944444444444445e-06,
769
+ "loss": 0.4315,
770
+ "step": 2625
771
+ },
772
+ {
773
+ "epoch": 2.65,
774
+ "grad_norm": 6.001290321350098,
775
+ "learning_rate": 6.481481481481481e-06,
776
+ "loss": 0.4615,
777
+ "step": 2650
778
+ },
779
+ {
780
+ "epoch": 2.675,
781
+ "grad_norm": 4.979047775268555,
782
+ "learning_rate": 6.0185185185185185e-06,
783
+ "loss": 0.358,
784
+ "step": 2675
785
+ },
786
+ {
787
+ "epoch": 2.7,
788
+ "grad_norm": 5.183391094207764,
789
+ "learning_rate": 5.555555555555556e-06,
790
+ "loss": 0.3412,
791
+ "step": 2700
792
+ },
793
+ {
794
+ "epoch": 2.725,
795
+ "grad_norm": 7.089044094085693,
796
+ "learning_rate": 5.092592592592592e-06,
797
+ "loss": 0.297,
798
+ "step": 2725
799
+ },
800
+ {
801
+ "epoch": 2.75,
802
+ "grad_norm": 11.442684173583984,
803
+ "learning_rate": 4.6296296296296296e-06,
804
+ "loss": 0.2933,
805
+ "step": 2750
806
+ },
807
+ {
808
+ "epoch": 2.775,
809
+ "grad_norm": 4.924402713775635,
810
+ "learning_rate": 4.166666666666667e-06,
811
+ "loss": 0.3735,
812
+ "step": 2775
813
+ },
814
+ {
815
+ "epoch": 2.8,
816
+ "grad_norm": 12.057656288146973,
817
+ "learning_rate": 3.7037037037037037e-06,
818
+ "loss": 0.2839,
819
+ "step": 2800
820
+ },
821
+ {
822
+ "epoch": 2.825,
823
+ "grad_norm": 9.038887977600098,
824
+ "learning_rate": 3.2407407407407406e-06,
825
+ "loss": 0.2204,
826
+ "step": 2825
827
+ },
828
+ {
829
+ "epoch": 2.85,
830
+ "grad_norm": 3.07694673538208,
831
+ "learning_rate": 2.777777777777778e-06,
832
+ "loss": 0.33,
833
+ "step": 2850
834
+ },
835
+ {
836
+ "epoch": 2.875,
837
+ "grad_norm": 1.35588800907135,
838
+ "learning_rate": 2.3148148148148148e-06,
839
+ "loss": 0.2462,
840
+ "step": 2875
841
+ },
842
+ {
843
+ "epoch": 2.9,
844
+ "grad_norm": 10.075614929199219,
845
+ "learning_rate": 1.8518518518518519e-06,
846
+ "loss": 0.2932,
847
+ "step": 2900
848
+ },
849
+ {
850
+ "epoch": 2.925,
851
+ "grad_norm": 5.351684093475342,
852
+ "learning_rate": 1.388888888888889e-06,
853
+ "loss": 0.2768,
854
+ "step": 2925
855
+ },
856
+ {
857
+ "epoch": 2.95,
858
+ "grad_norm": 11.825695037841797,
859
+ "learning_rate": 9.259259259259259e-07,
860
+ "loss": 0.2972,
861
+ "step": 2950
862
+ },
863
+ {
864
+ "epoch": 2.975,
865
+ "grad_norm": 14.367501258850098,
866
+ "learning_rate": 4.6296296296296297e-07,
867
+ "loss": 0.2454,
868
+ "step": 2975
869
+ },
870
+ {
871
+ "epoch": 3.0,
872
+ "grad_norm": 5.5341925621032715,
873
+ "learning_rate": 0.0,
874
+ "loss": 0.2264,
875
+ "step": 3000
876
+ },
877
+ {
878
+ "epoch": 3.0,
879
+ "eval_accuracy": 0.8655,
880
+ "eval_auc": 0.9263070000000001,
881
+ "eval_f1": 0.8702363724071394,
882
+ "eval_loss": 0.34307238459587097,
883
+ "eval_precision": 0.8406337371854613,
884
+ "eval_recall": 0.902,
885
+ "eval_runtime": 560.0101,
886
+ "eval_samples_per_second": 3.571,
887
+ "eval_steps_per_second": 0.223,
888
+ "step": 3000
889
+ }
890
+ ],
891
+ "logging_steps": 25,
892
+ "max_steps": 3000,
893
+ "num_input_tokens_seen": 0,
894
+ "num_train_epochs": 3,
895
+ "save_steps": 500,
896
+ "stateful_callbacks": {
897
+ "EarlyStoppingCallback": {
898
+ "args": {
899
+ "early_stopping_patience": 5,
900
+ "early_stopping_threshold": 0.01
901
+ },
902
+ "attributes": {
903
+ "early_stopping_patience_counter": 0
904
+ }
905
+ },
906
+ "TrainerControl": {
907
+ "args": {
908
+ "should_epoch_stop": false,
909
+ "should_evaluate": false,
910
+ "should_log": false,
911
+ "should_save": true,
912
+ "should_training_stop": true
913
+ },
914
+ "attributes": {}
915
+ }
916
+ },
917
+ "total_flos": 1.859807750750208e+18,
918
+ "train_batch_size": 8,
919
+ "trial_name": null,
920
+ "trial_params": null
921
+ }
checkpoint-3000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0853a107cf83c199ff4884eea1247120ddb0ada59c587b09dbc15bfc652c264
3
+ size 5240
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "ai",
14
+ "1": "real"
15
+ },
16
+ "image_size": 224,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "ai": 0,
21
+ "real": 1
22
+ },
23
+ "layer_norm_eps": 1e-12,
24
+ "model_type": "vit",
25
+ "num_attention_heads": 12,
26
+ "num_channels": 3,
27
+ "num_hidden_layers": 12,
28
+ "patch_size": 16,
29
+ "problem_type": "single_label_classification",
30
+ "qkv_bias": true,
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.45.0"
33
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15409dec5cdb2034e9c2bd474db69dc1bcee23fc0cfd708f6a5f597135fa05c2
3
+ size 343223968
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
runs/Oct04_18-41-19_r-michaelhu03-aiimageclassificationinitial-0lilrclk-ebbcd-prvb0/events.out.tfevents.1728067280.r-michaelhu03-aiimageclassificationinitial-0lilrclk-ebbcd-prvb0.222.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e6f94a1ebfdfc96c1a635e4160a78cf3f8c06ce376c4cbabf42fb5893307ffaf
3
- size 30496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd612a3143fa94b4606dce536c276995d61b8414ac1e7a90e563e3283a2df797
3
+ size 32213
runs/Oct04_18-41-19_r-michaelhu03-aiimageclassificationinitial-0lilrclk-ebbcd-prvb0/events.out.tfevents.1728091728.r-michaelhu03-aiimageclassificationinitial-0lilrclk-ebbcd-prvb0.222.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb4621eaa4b0fe880f4c74281cb959e68a1553ac35f53265881223224fdf8716
3
+ size 607
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0853a107cf83c199ff4884eea1247120ddb0ada59c587b09dbc15bfc652c264
3
+ size 5240
training_params.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_path": "mvkvc/artifact-10k",
3
+ "model": "google/vit-base-patch16-224",
4
+ "username": "MichaelHu03",
5
+ "lr": 5e-05,
6
+ "epochs": 3,
7
+ "batch_size": 8,
8
+ "warmup_ratio": 0.1,
9
+ "gradient_accumulation": 1,
10
+ "optimizer": "adamw_torch",
11
+ "scheduler": "linear",
12
+ "weight_decay": 0.0,
13
+ "max_grad_norm": 1.0,
14
+ "seed": 42,
15
+ "train_split": "train",
16
+ "valid_split": "test",
17
+ "logging_steps": -1,
18
+ "project_name": "autotrain-ht4es-gbvmt",
19
+ "auto_find_batch_size": false,
20
+ "mixed_precision": "fp16",
21
+ "save_total_limit": 1,
22
+ "push_to_hub": true,
23
+ "eval_strategy": "epoch",
24
+ "image_column": "image",
25
+ "target_column": "label",
26
+ "log": "tensorboard",
27
+ "early_stopping_patience": 5,
28
+ "early_stopping_threshold": 0.01
29
+ }