File size: 3,263 Bytes
4415744
 
 
 
fb655c5
4415744
 
ac847d0
70b5165
b3aa7fd
 
4415744
 
 
 
 
b3aa7fd
 
4415744
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb655c5
 
 
4415744
 
 
05ea1c1
4415744
 
 
 
 
 
 
 
 
 
 
 
 
 
2bee1df
f0dd404
 
4415744
 
 
 
 
fb655c5
4415744
 
7532f31
4415744
020e0b4
4415744
 
 
 
 
e4b54fb
4415744
9680a0a
4415744
9680a0a
4415744
fb655c5
4415744
52035a7
4415744
90010c8
4415744
9680a0a
31ed06f
155fd05
 
 
4415744
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
{
    "name": "cuda_inference_transformers_text-classification_FacebookAI/roberta-base",
    "backend": {
        "name": "pytorch",
        "version": "2.5.1+cu124",
        "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
        "task": "text-classification",
        "library": "transformers",
        "model_type": "roberta",
        "model": "FacebookAI/roberta-base",
        "processor": "FacebookAI/roberta-base",
        "device": "cuda",
        "device_ids": "0",
        "seed": 42,
        "inter_op_num_threads": null,
        "intra_op_num_threads": null,
        "model_kwargs": {},
        "processor_kwargs": {},
        "no_weights": true,
        "device_map": null,
        "torch_dtype": null,
        "eval_mode": true,
        "to_bettertransformer": false,
        "low_cpu_mem_usage": null,
        "attn_implementation": null,
        "cache_implementation": null,
        "autocast_enabled": false,
        "autocast_dtype": null,
        "torch_compile": false,
        "torch_compile_target": "forward",
        "torch_compile_config": {},
        "quantization_scheme": null,
        "quantization_config": {},
        "deepspeed_inference": false,
        "deepspeed_inference_config": {},
        "peft_type": null,
        "peft_config": {}
    },
    "scenario": {
        "name": "inference",
        "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
        "iterations": 1,
        "duration": 1,
        "warmup_runs": 1,
        "input_shapes": {
            "batch_size": 2,
            "sequence_length": 16,
            "num_choices": 2
        },
        "new_tokens": null,
        "memory": true,
        "latency": true,
        "energy": true,
        "forward_kwargs": {},
        "generate_kwargs": {
            "max_new_tokens": 2,
            "min_new_tokens": 2
        },
        "call_kwargs": {
            "num_inference_steps": 2
        }
    },
    "launcher": {
        "name": "process",
        "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
        "device_isolation": true,
        "device_isolation_action": "error",
        "numactl": false,
        "numactl_kwargs": {},
        "start_method": "spawn"
    },
    "environment": {
        "cpu": " AMD EPYC 7R32",
        "cpu_count": 16,
        "cpu_ram_mb": 66697.248768,
        "system": "Linux",
        "machine": "x86_64",
        "platform": "Linux-5.10.228-219.884.amzn2.x86_64-x86_64-with-glibc2.35",
        "processor": "x86_64",
        "python_version": "3.10.12",
        "gpu": [
            "NVIDIA A10G"
        ],
        "gpu_count": 1,
        "gpu_vram_mb": 24146608128,
        "optimum_benchmark_version": "0.5.0.dev0",
        "optimum_benchmark_commit": null,
        "transformers_version": "4.47.0",
        "transformers_commit": null,
        "accelerate_version": "1.2.0",
        "accelerate_commit": null,
        "diffusers_version": "0.31.0",
        "diffusers_commit": null,
        "optimum_version": null,
        "optimum_commit": null,
        "timm_version": "1.0.12",
        "timm_commit": null,
        "peft_version": "0.14.0",
        "peft_commit": null
    },
    "print_report": true,
    "log_report": true
}