|
import pandas as pd |
|
import pyarrow as pa |
|
import pyarrow.parquet as pq |
|
|
|
|
|
norm_params = { |
|
'LAT': {'min_val': -66.817333, 'max_val': 51.055833}, |
|
'LON': {'min_val': -178.116667, 'max_val': 171.358333}, |
|
'ALTI': {'min_val': 0.0, 'max_val': 3845.0}, |
|
'AAAAMMJJHH': {'min_val': 1777010107, 'max_val': 2024030803}, |
|
'ANNEE': {'min_val': 1777, 'max_val': 2024}, |
|
'MOIS': {'min_val': 1, 'max_val': 12}, |
|
'JOUR': {'min_val': 1, 'max_val': 31}, |
|
'HEURE': {'min_val': 0, 'max_val': 23}, |
|
} |
|
|
|
def normalize_column(column, min_val, max_val): |
|
"""Normalize pandas Series from [min_val, max_val] to [0, 1].""" |
|
|
|
return (column.astype('float64') - min_val) / (max_val - min_val) |
|
|
|
|
|
dataset_path = 'C:/Users/View/Desktop/oetem/dataset/dataset.parquet' |
|
parquet_file = pq.ParquetFile(dataset_path) |
|
|
|
|
|
output_path = 'C:/Users/View/Desktop/oetem/dataset/dataset_normalized.parquet' |
|
|
|
|
|
writer = None |
|
schema = None |
|
|
|
|
|
for i in range(parquet_file.num_row_groups): |
|
table = parquet_file.read_row_group(i, columns=list(norm_params.keys()) + [' T']) |
|
chunk = table.to_pandas() |
|
|
|
|
|
for col, params in norm_params.items(): |
|
chunk[col] = normalize_column(chunk[col], min_val=params['min_val'], max_val=params['max_val']) |
|
|
|
|
|
|
|
table = pa.Table.from_pandas(chunk, preserve_index=False) |
|
|
|
|
|
if writer is None: |
|
schema = table.schema |
|
writer = pq.ParquetWriter(output_path, schema) |
|
|
|
writer.write_table(table) |
|
|
|
|
|
if writer is not None: |
|
writer.close() |