Upload folder using huggingface_hub
Browse files- .hydra/hydra.yaml +3 -3
- cli.log +17 -17
- error.log +11 -15
.hydra/hydra.yaml
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
hydra:
|
2 |
run:
|
3 |
-
dir: runs/text_generation/NousResearch/Hermes-3-Llama-3.1-8B/1736822753.
|
4 |
sweep:
|
5 |
dir: sweeps/${experiment_name}/${backend.model}/${now:%Y-%m-%d-%H-%M-%S}
|
6 |
subdir: ${hydra.job.num}
|
@@ -118,7 +118,7 @@ hydra:
|
|
118 |
output_subdir: .hydra
|
119 |
overrides:
|
120 |
hydra:
|
121 |
-
- hydra.run.dir=runs/text_generation/NousResearch/Hermes-3-Llama-3.1-8B/1736822753.
|
122 |
- hydra.mode=RUN
|
123 |
task:
|
124 |
- backend.model=NousResearch/Hermes-3-Llama-3.1-8B
|
@@ -158,7 +158,7 @@ hydra:
|
|
158 |
- path: ''
|
159 |
schema: structured
|
160 |
provider: schema
|
161 |
-
output_dir: /app/runs/text_generation/NousResearch/Hermes-3-Llama-3.1-8B/1736822753.
|
162 |
choices:
|
163 |
benchmark: energy_star
|
164 |
launcher: process
|
|
|
1 |
hydra:
|
2 |
run:
|
3 |
+
dir: runs/text_generation/NousResearch/Hermes-3-Llama-3.1-8B/1736822753.4816654
|
4 |
sweep:
|
5 |
dir: sweeps/${experiment_name}/${backend.model}/${now:%Y-%m-%d-%H-%M-%S}
|
6 |
subdir: ${hydra.job.num}
|
|
|
118 |
output_subdir: .hydra
|
119 |
overrides:
|
120 |
hydra:
|
121 |
+
- hydra.run.dir=runs/text_generation/NousResearch/Hermes-3-Llama-3.1-8B/1736822753.4816654
|
122 |
- hydra.mode=RUN
|
123 |
task:
|
124 |
- backend.model=NousResearch/Hermes-3-Llama-3.1-8B
|
|
|
158 |
- path: ''
|
159 |
schema: structured
|
160 |
provider: schema
|
161 |
+
output_dir: /app/runs/text_generation/NousResearch/Hermes-3-Llama-3.1-8B/1736822753.4816654
|
162 |
choices:
|
163 |
benchmark: energy_star
|
164 |
launcher: process
|
cli.log
CHANGED
@@ -1,17 +1,17 @@
|
|
1 |
-
[2025-01-14 02:45:
|
2 |
-
[2025-01-14 02:45:
|
3 |
-
[2025-01-14 02:45:
|
4 |
-
[PROC-0][2025-01-14 02:
|
5 |
-
[PROC-0][2025-01-14 02:46:
|
6 |
-
[PROC-0][2025-01-14 02:46:
|
7 |
-
[PROC-0][2025-01-14 02:46:
|
8 |
-
[PROC-0][2025-01-14 02:46:
|
9 |
-
[PROC-0][2025-01-14 02:46:
|
10 |
-
[PROC-0][2025-01-14 02:46:
|
11 |
-
[PROC-0][2025-01-14 02:46:
|
12 |
-
[PROC-0][2025-01-14 02:46:
|
13 |
-
[PROC-0][2025-01-14 02:46:
|
14 |
-
[PROC-0][2025-01-14 02:46:
|
15 |
-
[PROC-0][2025-01-14 02:46:
|
16 |
-
[PROC-0][2025-01-14 02:46:
|
17 |
-
[2025-01-14 02:46:03,
|
|
|
1 |
+
[2025-01-14 02:45:57,202][launcher][INFO] - ََAllocating process launcher
|
2 |
+
[2025-01-14 02:45:57,202][process][INFO] - + Setting multiprocessing start method to spawn.
|
3 |
+
[2025-01-14 02:45:57,220][process][INFO] - + Launched benchmark in isolated process 658.
|
4 |
+
[PROC-0][2025-01-14 02:46:00,404][datasets][INFO] - PyTorch version 2.4.0 available.
|
5 |
+
[PROC-0][2025-01-14 02:46:01,534][backend][INFO] - َAllocating pytorch backend
|
6 |
+
[PROC-0][2025-01-14 02:46:01,534][backend][INFO] - + Setting random seed to 42
|
7 |
+
[PROC-0][2025-01-14 02:46:02,148][pytorch][INFO] - + Using AutoModel class AutoModelForCausalLM
|
8 |
+
[PROC-0][2025-01-14 02:46:02,148][pytorch][INFO] - + Creating backend temporary directory
|
9 |
+
[PROC-0][2025-01-14 02:46:02,148][pytorch][INFO] - + Loading model with random weights
|
10 |
+
[PROC-0][2025-01-14 02:46:02,148][pytorch][INFO] - + Creating no weights model
|
11 |
+
[PROC-0][2025-01-14 02:46:02,148][pytorch][INFO] - + Creating no weights model directory
|
12 |
+
[PROC-0][2025-01-14 02:46:02,148][pytorch][INFO] - + Creating no weights model state dict
|
13 |
+
[PROC-0][2025-01-14 02:46:02,151][pytorch][INFO] - + Saving no weights model safetensors
|
14 |
+
[PROC-0][2025-01-14 02:46:02,151][pytorch][INFO] - + Saving no weights model pretrained config
|
15 |
+
[PROC-0][2025-01-14 02:46:02,152][pytorch][INFO] - + Loading no weights AutoModel
|
16 |
+
[PROC-0][2025-01-14 02:46:02,152][pytorch][INFO] - + Loading model directly on device: cuda
|
17 |
+
[2025-01-14 02:46:03,353][experiment][ERROR] - Error during experiment
|
error.log
CHANGED
@@ -28,23 +28,19 @@ Traceback (most recent call last):
|
|
28 |
self.pretrained_model = self.automodel_class.from_pretrained(
|
29 |
File "/opt/conda/lib/python3.9/site-packages/transformers/models/auto/auto_factory.py", line 564, in from_pretrained
|
30 |
return model_class.from_pretrained(
|
31 |
-
File "/opt/conda/lib/python3.9/site-packages/transformers/modeling_utils.py", line
|
32 |
-
|
33 |
-
File "/opt/conda/lib/python3.9/site-packages/transformers/
|
34 |
-
|
35 |
-
File "/opt/conda/lib/python3.9/site-packages/
|
36 |
-
[
|
37 |
-
File "/opt/conda/lib/python3.9/site-packages/transformers/models/llama/modeling_llama.py", line 902, in <listcomp>
|
38 |
-
[LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
39 |
-
File "/opt/conda/lib/python3.9/site-packages/transformers/models/llama/modeling_llama.py", line 691, in __init__
|
40 |
-
self.mlp = LlamaMLP(config)
|
41 |
-
File "/opt/conda/lib/python3.9/site-packages/transformers/models/llama/modeling_llama.py", line 288, in __init__
|
42 |
-
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
|
43 |
-
File "/opt/conda/lib/python3.9/site-packages/torch/nn/modules/linear.py", line 99, in __init__
|
44 |
-
self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
|
45 |
File "/opt/conda/lib/python3.9/site-packages/torch/utils/_device.py", line 79, in __torch_function__
|
46 |
return func(*args, **kwargs)
|
47 |
-
|
|
|
|
|
|
|
|
|
48 |
|
49 |
|
50 |
Set the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.
|
|
|
28 |
self.pretrained_model = self.automodel_class.from_pretrained(
|
29 |
File "/opt/conda/lib/python3.9/site-packages/transformers/models/auto/auto_factory.py", line 564, in from_pretrained
|
30 |
return model_class.from_pretrained(
|
31 |
+
File "/opt/conda/lib/python3.9/site-packages/transformers/modeling_utils.py", line 3738, in from_pretrained
|
32 |
+
state_dict = load_state_dict(resolved_archive_file)
|
33 |
+
File "/opt/conda/lib/python3.9/site-packages/transformers/modeling_utils.py", line 556, in load_state_dict
|
34 |
+
return safe_load_file(checkpoint_file)
|
35 |
+
File "/opt/conda/lib/python3.9/site-packages/safetensors/torch.py", line 315, in load_file
|
36 |
+
result[k] = f.get_tensor(k)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
File "/opt/conda/lib/python3.9/site-packages/torch/utils/_device.py", line 79, in __torch_function__
|
38 |
return func(*args, **kwargs)
|
39 |
+
RuntimeError: CUDA error: out of memory
|
40 |
+
CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.
|
41 |
+
For debugging consider passing CUDA_LAUNCH_BLOCKING=1
|
42 |
+
Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.
|
43 |
+
|
44 |
|
45 |
|
46 |
Set the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.
|