lysandre HF staff commited on
Commit
d5cb37f
·
1 Parent(s): 95fbc89

Upload dataset_infos.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. dataset_infos.json +47 -47
dataset_infos.json CHANGED
@@ -14,80 +14,80 @@
14
  }
15
  },
16
  "splits": {
17
- "evaluate": {
18
- "name": "evaluate",
19
- "num_bytes": 11682,
20
- "num_examples": 531,
21
- "dataset_name": null
22
- },
23
- "huggingface_hub": {
24
- "name": "huggingface_hub",
25
- "num_bytes": 20592,
26
- "num_examples": 936,
27
  "dataset_name": null
28
  },
29
- "optimum": {
30
- "name": "optimum",
31
- "num_bytes": 14696,
32
- "num_examples": 668,
33
  "dataset_name": null
34
  },
35
- "peft": {
36
- "name": "peft",
37
- "num_bytes": 3938,
38
- "num_examples": 179,
39
  "dataset_name": null
40
  },
41
- "gradio": {
42
- "name": "gradio",
43
- "num_bytes": 23078,
44
- "num_examples": 1049,
45
  "dataset_name": null
46
  },
47
  "datasets": {
48
  "name": "datasets",
49
- "num_bytes": 19712,
50
- "num_examples": 896,
51
  "dataset_name": null
52
  },
53
- "diffusers": {
54
- "name": "diffusers",
55
- "num_bytes": 9086,
56
- "num_examples": 413,
57
  "dataset_name": null
58
  },
59
  "safetensors": {
60
  "name": "safetensors",
61
- "num_bytes": 5148,
62
- "num_examples": 234,
63
  "dataset_name": null
64
  },
65
- "tokenizers": {
66
- "name": "tokenizers",
67
- "num_bytes": 23078,
68
- "num_examples": 1049,
69
  "dataset_name": null
70
  },
71
  "pytorch_image_models": {
72
  "name": "pytorch_image_models",
73
- "num_bytes": 23078,
74
- "num_examples": 1049,
75
  "dataset_name": null
76
  },
77
- "accelerate": {
78
- "name": "accelerate",
79
- "num_bytes": 19712,
80
- "num_examples": 896,
81
  "dataset_name": null
82
  },
83
- "transformers": {
84
- "name": "transformers",
85
- "num_bytes": 23760,
86
- "num_examples": 1080,
 
 
 
 
 
 
87
  "dataset_name": null
88
  }
89
  },
90
- "download_size": 116765,
91
- "dataset_size": 197560,
92
- "size_in_bytes": 314325
93
  }}
 
14
  }
15
  },
16
  "splits": {
17
+ "gradio": {
18
+ "name": "gradio",
19
+ "num_bytes": 23100,
20
+ "num_examples": 1050,
 
 
 
 
 
 
21
  "dataset_name": null
22
  },
23
+ "tokenizers": {
24
+ "name": "tokenizers",
25
+ "num_bytes": 23100,
26
+ "num_examples": 1050,
27
  "dataset_name": null
28
  },
29
+ "transformers": {
30
+ "name": "transformers",
31
+ "num_bytes": 23782,
32
+ "num_examples": 1081,
33
  "dataset_name": null
34
  },
35
+ "accelerate": {
36
+ "name": "accelerate",
37
+ "num_bytes": 19734,
38
+ "num_examples": 897,
39
  "dataset_name": null
40
  },
41
  "datasets": {
42
  "name": "datasets",
43
+ "num_bytes": 19734,
44
+ "num_examples": 897,
45
  "dataset_name": null
46
  },
47
+ "huggingface_hub": {
48
+ "name": "huggingface_hub",
49
+ "num_bytes": 20614,
50
+ "num_examples": 937,
51
  "dataset_name": null
52
  },
53
  "safetensors": {
54
  "name": "safetensors",
55
+ "num_bytes": 5170,
56
+ "num_examples": 235,
57
  "dataset_name": null
58
  },
59
+ "peft": {
60
+ "name": "peft",
61
+ "num_bytes": 3960,
62
+ "num_examples": 180,
63
  "dataset_name": null
64
  },
65
  "pytorch_image_models": {
66
  "name": "pytorch_image_models",
67
+ "num_bytes": 23100,
68
+ "num_examples": 1050,
69
  "dataset_name": null
70
  },
71
+ "evaluate": {
72
+ "name": "evaluate",
73
+ "num_bytes": 11704,
74
+ "num_examples": 532,
75
  "dataset_name": null
76
  },
77
+ "optimum": {
78
+ "name": "optimum",
79
+ "num_bytes": 14718,
80
+ "num_examples": 669,
81
+ "dataset_name": null
82
+ },
83
+ "diffusers": {
84
+ "name": "diffusers",
85
+ "num_bytes": 9108,
86
+ "num_examples": 414,
87
  "dataset_name": null
88
  }
89
  },
90
+ "download_size": 116988,
91
+ "dataset_size": 197824,
92
+ "size_in_bytes": 314812
93
  }}