Upload cuda_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM/benchmark.json with huggingface_hub
Browse files
cuda_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM/benchmark.json
CHANGED
@@ -5,11 +5,11 @@
|
|
5 |
"name": "pytorch",
|
6 |
"version": "2.5.1+cu124",
|
7 |
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
|
|
|
|
|
8 |
"task": "text-generation",
|
9 |
"library": "transformers",
|
10 |
"model_type": "llama",
|
11 |
-
"model": "hf-internal-testing/tiny-random-LlamaForCausalLM",
|
12 |
-
"processor": "hf-internal-testing/tiny-random-LlamaForCausalLM",
|
13 |
"device": "cuda",
|
14 |
"device_ids": "0",
|
15 |
"seed": 42,
|
@@ -94,7 +94,7 @@
|
|
94 |
"optimum_benchmark_commit": null,
|
95 |
"transformers_version": "4.47.0",
|
96 |
"transformers_commit": null,
|
97 |
-
"accelerate_version": "1.2.
|
98 |
"accelerate_commit": null,
|
99 |
"diffusers_version": "0.31.0",
|
100 |
"diffusers_commit": null,
|
@@ -112,7 +112,7 @@
|
|
112 |
"overall": {
|
113 |
"memory": {
|
114 |
"unit": "MB",
|
115 |
-
"max_ram": 1392.
|
116 |
"max_global_vram": 699.92448,
|
117 |
"max_process_vram": 0.0,
|
118 |
"max_reserved": 44.040192,
|
@@ -121,42 +121,42 @@
|
|
121 |
"latency": {
|
122 |
"unit": "s",
|
123 |
"values": [
|
124 |
-
0.
|
125 |
-
0.
|
126 |
-
0.
|
127 |
-
0.
|
128 |
-
0.
|
129 |
],
|
130 |
"count": 5,
|
131 |
-
"total": 0.
|
132 |
-
"mean": 0.
|
133 |
-
"p50": 0.
|
134 |
-
"p90": 0.
|
135 |
-
"p95": 0.
|
136 |
-
"p99": 0.
|
137 |
-
"stdev": 0.
|
138 |
-
"stdev_": 171.
|
139 |
},
|
140 |
"throughput": {
|
141 |
"unit": "samples/s",
|
142 |
-
"value":
|
143 |
},
|
144 |
"energy": {
|
145 |
"unit": "kWh",
|
146 |
-
"cpu": 6.
|
147 |
-
"ram": 3.
|
148 |
-
"gpu":
|
149 |
-
"total": 1.
|
150 |
},
|
151 |
"efficiency": {
|
152 |
"unit": "samples/kWh",
|
153 |
-
"value":
|
154 |
}
|
155 |
},
|
156 |
"warmup": {
|
157 |
"memory": {
|
158 |
"unit": "MB",
|
159 |
-
"max_ram": 1392.
|
160 |
"max_global_vram": 699.92448,
|
161 |
"max_process_vram": 0.0,
|
162 |
"max_reserved": 44.040192,
|
@@ -165,22 +165,22 @@
|
|
165 |
"latency": {
|
166 |
"unit": "s",
|
167 |
"values": [
|
168 |
-
0.
|
169 |
-
0.
|
170 |
],
|
171 |
"count": 2,
|
172 |
-
"total": 0.
|
173 |
-
"mean": 0.
|
174 |
-
"p50": 0.
|
175 |
-
"p90": 0.
|
176 |
-
"p95": 0.
|
177 |
-
"p99": 0.
|
178 |
-
"stdev": 0.
|
179 |
-
"stdev_": 93.
|
180 |
},
|
181 |
"throughput": {
|
182 |
"unit": "samples/s",
|
183 |
-
"value": 28.
|
184 |
},
|
185 |
"energy": null,
|
186 |
"efficiency": null
|
@@ -188,7 +188,7 @@
|
|
188 |
"train": {
|
189 |
"memory": {
|
190 |
"unit": "MB",
|
191 |
-
"max_ram": 1392.
|
192 |
"max_global_vram": 699.92448,
|
193 |
"max_process_vram": 0.0,
|
194 |
"max_reserved": 44.040192,
|
@@ -197,23 +197,23 @@
|
|
197 |
"latency": {
|
198 |
"unit": "s",
|
199 |
"values": [
|
200 |
-
0.
|
201 |
-
0.
|
202 |
-
0.
|
203 |
],
|
204 |
"count": 3,
|
205 |
-
"total": 0.
|
206 |
-
"mean": 0.
|
207 |
-
"p50": 0.
|
208 |
-
"p90": 0.
|
209 |
-
"p95": 0.
|
210 |
-
"p99": 0.
|
211 |
-
"stdev": 0.
|
212 |
-
"stdev_":
|
213 |
},
|
214 |
"throughput": {
|
215 |
"unit": "samples/s",
|
216 |
-
"value":
|
217 |
},
|
218 |
"energy": null,
|
219 |
"efficiency": null
|
|
|
5 |
"name": "pytorch",
|
6 |
"version": "2.5.1+cu124",
|
7 |
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
|
8 |
+
"model": "hf-internal-testing/tiny-random-LlamaForCausalLM",
|
9 |
+
"processor": "hf-internal-testing/tiny-random-LlamaForCausalLM",
|
10 |
"task": "text-generation",
|
11 |
"library": "transformers",
|
12 |
"model_type": "llama",
|
|
|
|
|
13 |
"device": "cuda",
|
14 |
"device_ids": "0",
|
15 |
"seed": 42,
|
|
|
94 |
"optimum_benchmark_commit": null,
|
95 |
"transformers_version": "4.47.0",
|
96 |
"transformers_commit": null,
|
97 |
+
"accelerate_version": "1.2.1",
|
98 |
"accelerate_commit": null,
|
99 |
"diffusers_version": "0.31.0",
|
100 |
"diffusers_commit": null,
|
|
|
112 |
"overall": {
|
113 |
"memory": {
|
114 |
"unit": "MB",
|
115 |
+
"max_ram": 1392.361472,
|
116 |
"max_global_vram": 699.92448,
|
117 |
"max_process_vram": 0.0,
|
118 |
"max_reserved": 44.040192,
|
|
|
121 |
"latency": {
|
122 |
"unit": "s",
|
123 |
"values": [
|
124 |
+
0.2745579528808594,
|
125 |
+
0.009665535926818849,
|
126 |
+
0.00901632022857666,
|
127 |
+
0.008461312294006347,
|
128 |
+
0.008477696418762207
|
129 |
],
|
130 |
"count": 5,
|
131 |
+
"total": 0.3101788177490235,
|
132 |
+
"mean": 0.0620357635498047,
|
133 |
+
"p50": 0.00901632022857666,
|
134 |
+
"p90": 0.16860098609924318,
|
135 |
+
"p95": 0.22157946949005122,
|
136 |
+
"p99": 0.26396225620269775,
|
137 |
+
"stdev": 0.10626200774652632,
|
138 |
+
"stdev_": 171.29152873441316
|
139 |
},
|
140 |
"throughput": {
|
141 |
"unit": "samples/s",
|
142 |
+
"value": 161.19733888616707
|
143 |
},
|
144 |
"energy": {
|
145 |
"unit": "kWh",
|
146 |
+
"cpu": 6.200777884723231e-06,
|
147 |
+
"ram": 3.3791103984226146e-06,
|
148 |
+
"gpu": 8.102228704000031e-06,
|
149 |
+
"total": 1.7682116987145876e-05
|
150 |
},
|
151 |
"efficiency": {
|
152 |
"unit": "samples/kWh",
|
153 |
+
"value": 565543.142106206
|
154 |
}
|
155 |
},
|
156 |
"warmup": {
|
157 |
"memory": {
|
158 |
"unit": "MB",
|
159 |
+
"max_ram": 1392.361472,
|
160 |
"max_global_vram": 699.92448,
|
161 |
"max_process_vram": 0.0,
|
162 |
"max_reserved": 44.040192,
|
|
|
165 |
"latency": {
|
166 |
"unit": "s",
|
167 |
"values": [
|
168 |
+
0.2745579528808594,
|
169 |
+
0.009665535926818849
|
170 |
],
|
171 |
"count": 2,
|
172 |
+
"total": 0.2842234888076782,
|
173 |
+
"mean": 0.1421117444038391,
|
174 |
+
"p50": 0.1421117444038391,
|
175 |
+
"p90": 0.24806871118545534,
|
176 |
+
"p95": 0.2613133320331573,
|
177 |
+
"p99": 0.27190902871131895,
|
178 |
+
"stdev": 0.13244620847702027,
|
179 |
+
"stdev_": 93.19863677181228
|
180 |
},
|
181 |
"throughput": {
|
182 |
"unit": "samples/s",
|
183 |
+
"value": 28.14686440434645
|
184 |
},
|
185 |
"energy": null,
|
186 |
"efficiency": null
|
|
|
188 |
"train": {
|
189 |
"memory": {
|
190 |
"unit": "MB",
|
191 |
+
"max_ram": 1392.361472,
|
192 |
"max_global_vram": 699.92448,
|
193 |
"max_process_vram": 0.0,
|
194 |
"max_reserved": 44.040192,
|
|
|
197 |
"latency": {
|
198 |
"unit": "s",
|
199 |
"values": [
|
200 |
+
0.00901632022857666,
|
201 |
+
0.008461312294006347,
|
202 |
+
0.008477696418762207
|
203 |
],
|
204 |
"count": 3,
|
205 |
+
"total": 0.025955328941345217,
|
206 |
+
"mean": 0.008651776313781738,
|
207 |
+
"p50": 0.008477696418762207,
|
208 |
+
"p90": 0.00890859546661377,
|
209 |
+
"p95": 0.008962457847595215,
|
210 |
+
"p99": 0.00900554775238037,
|
211 |
+
"stdev": 0.0002578582417356606,
|
212 |
+
"stdev_": 2.980408096368702
|
213 |
},
|
214 |
"throughput": {
|
215 |
"unit": "samples/s",
|
216 |
+
"value": 693.4992055264277
|
217 |
},
|
218 |
"energy": null,
|
219 |
"efficiency": null
|