Datasets:
La-matrice
commited on
Upload normalization.py
Browse filesNormalization process of our original dataset
- normalization.py +55 -0
normalization.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import pyarrow as pa
|
3 |
+
import pyarrow.parquet as pq
|
4 |
+
|
5 |
+
# Define normalization parameters
|
6 |
+
norm_params = {
|
7 |
+
'LAT': {'min_val': -66.817333, 'max_val': 51.055833},
|
8 |
+
'LON': {'min_val': -178.116667, 'max_val': 171.358333},
|
9 |
+
'ALTI': {'min_val': 0.0, 'max_val': 3845.0},
|
10 |
+
'AAAAMMJJHH': {'min_val': 1777010107, 'max_val': 2024030803},
|
11 |
+
'ANNEE': {'min_val': 1777, 'max_val': 2024},
|
12 |
+
'MOIS': {'min_val': 1, 'max_val': 12},
|
13 |
+
'JOUR': {'min_val': 1, 'max_val': 31},
|
14 |
+
'HEURE': {'min_val': 0, 'max_val': 23},
|
15 |
+
}
|
16 |
+
|
17 |
+
def normalize_column(column, min_val, max_val):
|
18 |
+
"""Normalize pandas Series from [min_val, max_val] to [0, 1]."""
|
19 |
+
# Ensure column is treated as float for division to work properly.
|
20 |
+
return (column.astype('float64') - min_val) / (max_val - min_val)
|
21 |
+
|
22 |
+
# Load the dataset in chunks
|
23 |
+
dataset_path = 'C:/Users/View/Desktop/oetem/dataset/dataset.parquet'
|
24 |
+
parquet_file = pq.ParquetFile(dataset_path)
|
25 |
+
|
26 |
+
# Determine the output file path
|
27 |
+
output_path = 'C:/Users/View/Desktop/oetem/dataset/dataset_normalized.parquet'
|
28 |
+
|
29 |
+
# Initialize variables for writing
|
30 |
+
writer = None
|
31 |
+
schema = None
|
32 |
+
|
33 |
+
# Process and normalize chunks
|
34 |
+
for i in range(parquet_file.num_row_groups):
|
35 |
+
table = parquet_file.read_row_group(i, columns=list(norm_params.keys()) + [' T'])
|
36 |
+
chunk = table.to_pandas()
|
37 |
+
|
38 |
+
# Normalize the columns
|
39 |
+
for col, params in norm_params.items():
|
40 |
+
chunk[col] = normalize_column(chunk[col], min_val=params['min_val'], max_val=params['max_val'])
|
41 |
+
|
42 |
+
# Convert the DataFrame back to a PyArrow Table for writing
|
43 |
+
#table = pa.Table.from_pandas(chunk)
|
44 |
+
table = pa.Table.from_pandas(chunk, preserve_index=False)
|
45 |
+
|
46 |
+
# If first chunk, initialize the writer with the schema
|
47 |
+
if writer is None:
|
48 |
+
schema = table.schema
|
49 |
+
writer = pq.ParquetWriter(output_path, schema)
|
50 |
+
|
51 |
+
writer.write_table(table)
|
52 |
+
|
53 |
+
# Close the writer to finalize the file
|
54 |
+
if writer is not None:
|
55 |
+
writer.close()
|