IlyasMoutawwakil HF staff commited on
Commit
5405b71
·
verified ·
1 Parent(s): 5b7b5d6

Upload cuda_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM/benchmark.json CHANGED
@@ -5,11 +5,11 @@
5
  "name": "pytorch",
6
  "version": "2.5.1+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
 
 
8
  "task": "text-generation",
9
  "library": "transformers",
10
  "model_type": "llama",
11
- "model": "hf-internal-testing/tiny-random-LlamaForCausalLM",
12
- "processor": "hf-internal-testing/tiny-random-LlamaForCausalLM",
13
  "device": "cuda",
14
  "device_ids": "0",
15
  "seed": 42,
@@ -94,7 +94,7 @@
94
  "optimum_benchmark_commit": null,
95
  "transformers_version": "4.47.0",
96
  "transformers_commit": null,
97
- "accelerate_version": "1.2.0",
98
  "accelerate_commit": null,
99
  "diffusers_version": "0.31.0",
100
  "diffusers_commit": null,
@@ -112,7 +112,7 @@
112
  "overall": {
113
  "memory": {
114
  "unit": "MB",
115
- "max_ram": 1392.574464,
116
  "max_global_vram": 699.92448,
117
  "max_process_vram": 0.0,
118
  "max_reserved": 44.040192,
@@ -121,42 +121,42 @@
121
  "latency": {
122
  "unit": "s",
123
  "values": [
124
- 0.2690089111328125,
125
- 0.009111552238464356,
126
- 0.008255488395690918,
127
- 0.00858521556854248,
128
- 0.0082575044631958
129
  ],
130
  "count": 5,
131
- "total": 0.30321867179870604,
132
- "mean": 0.06064373435974121,
133
- "p50": 0.00858521556854248,
134
- "p90": 0.1650499675750733,
135
- "p95": 0.21702943935394284,
136
- "p99": 0.2586130167770386,
137
- "stdev": 0.10418305760779996,
138
- "stdev_": 171.79525421337289
139
  },
140
  "throughput": {
141
  "unit": "samples/s",
142
- "value": 164.89749692325304
143
  },
144
  "energy": {
145
  "unit": "kWh",
146
- "cpu": 6.102458797916358e-06,
147
- "ram": 3.324868311959925e-06,
148
- "gpu": 7.795561792000184e-06,
149
- "total": 1.722288890187647e-05
150
  },
151
  "efficiency": {
152
  "unit": "samples/kWh",
153
- "value": 580622.6851356209
154
  }
155
  },
156
  "warmup": {
157
  "memory": {
158
  "unit": "MB",
159
- "max_ram": 1392.574464,
160
  "max_global_vram": 699.92448,
161
  "max_process_vram": 0.0,
162
  "max_reserved": 44.040192,
@@ -165,22 +165,22 @@
165
  "latency": {
166
  "unit": "s",
167
  "values": [
168
- 0.2690089111328125,
169
- 0.009111552238464356
170
  ],
171
  "count": 2,
172
- "total": 0.2781204633712769,
173
- "mean": 0.13906023168563844,
174
- "p50": 0.13906023168563844,
175
- "p90": 0.24301917524337768,
176
- "p95": 0.25601404318809506,
177
- "p99": 0.266409937543869,
178
- "stdev": 0.1299486794471741,
179
- "stdev_": 93.44776567102085
180
  },
181
  "throughput": {
182
  "unit": "samples/s",
183
- "value": 28.76451413544641
184
  },
185
  "energy": null,
186
  "efficiency": null
@@ -188,7 +188,7 @@
188
  "train": {
189
  "memory": {
190
  "unit": "MB",
191
- "max_ram": 1392.574464,
192
  "max_global_vram": 699.92448,
193
  "max_process_vram": 0.0,
194
  "max_reserved": 44.040192,
@@ -197,23 +197,23 @@
197
  "latency": {
198
  "unit": "s",
199
  "values": [
200
- 0.008255488395690918,
201
- 0.00858521556854248,
202
- 0.0082575044631958
203
  ],
204
  "count": 3,
205
- "total": 0.0250982084274292,
206
- "mean": 0.008366069475809734,
207
- "p50": 0.0082575044631958,
208
- "p90": 0.008519673347473144,
209
- "p95": 0.008552444458007813,
210
- "p99": 0.008578661346435546,
211
- "stdev": 0.0001549618740251263,
212
- "stdev_": 1.8522661624218446
213
  },
214
  "throughput": {
215
  "unit": "samples/s",
216
- "value": 717.1826647326848
217
  },
218
  "energy": null,
219
  "efficiency": null
 
5
  "name": "pytorch",
6
  "version": "2.5.1+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
+ "model": "hf-internal-testing/tiny-random-LlamaForCausalLM",
9
+ "processor": "hf-internal-testing/tiny-random-LlamaForCausalLM",
10
  "task": "text-generation",
11
  "library": "transformers",
12
  "model_type": "llama",
 
 
13
  "device": "cuda",
14
  "device_ids": "0",
15
  "seed": 42,
 
94
  "optimum_benchmark_commit": null,
95
  "transformers_version": "4.47.0",
96
  "transformers_commit": null,
97
+ "accelerate_version": "1.2.1",
98
  "accelerate_commit": null,
99
  "diffusers_version": "0.31.0",
100
  "diffusers_commit": null,
 
112
  "overall": {
113
  "memory": {
114
  "unit": "MB",
115
+ "max_ram": 1392.361472,
116
  "max_global_vram": 699.92448,
117
  "max_process_vram": 0.0,
118
  "max_reserved": 44.040192,
 
121
  "latency": {
122
  "unit": "s",
123
  "values": [
124
+ 0.2745579528808594,
125
+ 0.009665535926818849,
126
+ 0.00901632022857666,
127
+ 0.008461312294006347,
128
+ 0.008477696418762207
129
  ],
130
  "count": 5,
131
+ "total": 0.3101788177490235,
132
+ "mean": 0.0620357635498047,
133
+ "p50": 0.00901632022857666,
134
+ "p90": 0.16860098609924318,
135
+ "p95": 0.22157946949005122,
136
+ "p99": 0.26396225620269775,
137
+ "stdev": 0.10626200774652632,
138
+ "stdev_": 171.29152873441316
139
  },
140
  "throughput": {
141
  "unit": "samples/s",
142
+ "value": 161.19733888616707
143
  },
144
  "energy": {
145
  "unit": "kWh",
146
+ "cpu": 6.200777884723231e-06,
147
+ "ram": 3.3791103984226146e-06,
148
+ "gpu": 8.102228704000031e-06,
149
+ "total": 1.7682116987145876e-05
150
  },
151
  "efficiency": {
152
  "unit": "samples/kWh",
153
+ "value": 565543.142106206
154
  }
155
  },
156
  "warmup": {
157
  "memory": {
158
  "unit": "MB",
159
+ "max_ram": 1392.361472,
160
  "max_global_vram": 699.92448,
161
  "max_process_vram": 0.0,
162
  "max_reserved": 44.040192,
 
165
  "latency": {
166
  "unit": "s",
167
  "values": [
168
+ 0.2745579528808594,
169
+ 0.009665535926818849
170
  ],
171
  "count": 2,
172
+ "total": 0.2842234888076782,
173
+ "mean": 0.1421117444038391,
174
+ "p50": 0.1421117444038391,
175
+ "p90": 0.24806871118545534,
176
+ "p95": 0.2613133320331573,
177
+ "p99": 0.27190902871131895,
178
+ "stdev": 0.13244620847702027,
179
+ "stdev_": 93.19863677181228
180
  },
181
  "throughput": {
182
  "unit": "samples/s",
183
+ "value": 28.14686440434645
184
  },
185
  "energy": null,
186
  "efficiency": null
 
188
  "train": {
189
  "memory": {
190
  "unit": "MB",
191
+ "max_ram": 1392.361472,
192
  "max_global_vram": 699.92448,
193
  "max_process_vram": 0.0,
194
  "max_reserved": 44.040192,
 
197
  "latency": {
198
  "unit": "s",
199
  "values": [
200
+ 0.00901632022857666,
201
+ 0.008461312294006347,
202
+ 0.008477696418762207
203
  ],
204
  "count": 3,
205
+ "total": 0.025955328941345217,
206
+ "mean": 0.008651776313781738,
207
+ "p50": 0.008477696418762207,
208
+ "p90": 0.00890859546661377,
209
+ "p95": 0.008962457847595215,
210
+ "p99": 0.00900554775238037,
211
+ "stdev": 0.0002578582417356606,
212
+ "stdev_": 2.980408096368702
213
  },
214
  "throughput": {
215
  "unit": "samples/s",
216
+ "value": 693.4992055264277
217
  },
218
  "energy": null,
219
  "efficiency": null