Upload cuda_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM/benchmark_config.json with huggingface_hub
Browse files
cuda_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM/benchmark_config.json
CHANGED
@@ -4,11 +4,11 @@
|
|
4 |
"name": "pytorch",
|
5 |
"version": "2.5.1+cu124",
|
6 |
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
|
|
|
|
|
7 |
"task": "text-generation",
|
8 |
"library": "transformers",
|
9 |
"model_type": "llama",
|
10 |
-
"model": "hf-internal-testing/tiny-random-LlamaForCausalLM",
|
11 |
-
"processor": "hf-internal-testing/tiny-random-LlamaForCausalLM",
|
12 |
"device": "cuda",
|
13 |
"device_ids": "0",
|
14 |
"seed": 42,
|
@@ -93,7 +93,7 @@
|
|
93 |
"optimum_benchmark_commit": null,
|
94 |
"transformers_version": "4.47.0",
|
95 |
"transformers_commit": null,
|
96 |
-
"accelerate_version": "1.2.
|
97 |
"accelerate_commit": null,
|
98 |
"diffusers_version": "0.31.0",
|
99 |
"diffusers_commit": null,
|
|
|
4 |
"name": "pytorch",
|
5 |
"version": "2.5.1+cu124",
|
6 |
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
|
7 |
+
"model": "hf-internal-testing/tiny-random-LlamaForCausalLM",
|
8 |
+
"processor": "hf-internal-testing/tiny-random-LlamaForCausalLM",
|
9 |
"task": "text-generation",
|
10 |
"library": "transformers",
|
11 |
"model_type": "llama",
|
|
|
|
|
12 |
"device": "cuda",
|
13 |
"device_ids": "0",
|
14 |
"seed": 42,
|
|
|
93 |
"optimum_benchmark_commit": null,
|
94 |
"transformers_version": "4.47.0",
|
95 |
"transformers_commit": null,
|
96 |
+
"accelerate_version": "1.2.1",
|
97 |
"accelerate_commit": null,
|
98 |
"diffusers_version": "0.31.0",
|
99 |
"diffusers_commit": null,
|