|
""" |
|
This script converts the data from the raw data to CSV files. |
|
|
|
Usage: |
|
make newsSpace |
|
python convert.py |
|
""" |
|
|
|
import csv |
|
import html |
|
import os |
|
import sys |
|
|
|
import pandas as pd |
|
|
|
from bs4 import BeautifulSoup |
|
|
|
|
|
HEADER = [ |
|
"source", |
|
"url", |
|
"title", |
|
"image", |
|
"category", |
|
"description", |
|
"rank", |
|
"pubdate", |
|
] |
|
|
|
OUTPUT_FILE_PATH = os.path.join("data", "original.csv") |
|
|
|
|
|
def _clean_text(text): |
|
text = text.replace("\\\n", "\n") |
|
text = html.unescape(text) |
|
|
|
if text == "\\N": |
|
return "" |
|
|
|
return text |
|
|
|
|
|
def _clean_html(text): |
|
html_code = _clean_text(text) |
|
html_code.replace("</p>", "\n\n</p>") |
|
html_code.replace("<br>", "\n") |
|
|
|
soup = BeautifulSoup(html_code, "html.parser") |
|
text = soup.get_text(separator=" ") |
|
|
|
text = text.replace(" \n", "\n").replace("\n ", "\n") |
|
|
|
|
|
lines = [line.strip() for line in text.split("\n")] |
|
|
|
if lines: |
|
output = "\n".join(lines) |
|
output = output.strip() |
|
|
|
if output == "null": |
|
return "" |
|
|
|
return output |
|
|
|
return "" |
|
|
|
|
|
def _clean_image(image): |
|
if image == "none": |
|
return None |
|
return image |
|
|
|
|
|
def _clean_rank(rank): |
|
return int(rank) |
|
|
|
|
|
def run(): |
|
""" |
|
Run the conversion process. |
|
""" |
|
rows = [] |
|
categories = set() |
|
|
|
with open("newsSpace", encoding="ISO-8859-15") as f: |
|
doc = f.read() |
|
|
|
for row in doc.split("\t\\N\n"): |
|
if not row: |
|
continue |
|
|
|
row = row.replace("\\\t", "") |
|
|
|
try: |
|
source, url, title, image, category, description, rank, pubdate = row.split( |
|
"\t" |
|
) |
|
except ValueError: |
|
print(repr(row)) |
|
sys.exit(1) |
|
|
|
categories.add(category) |
|
|
|
obj = { |
|
"source": source, |
|
"url": url, |
|
"title": _clean_text(title), |
|
"image": _clean_image(image), |
|
"category": category, |
|
"description": _clean_text(description), |
|
"rank": _clean_rank(rank), |
|
"pubdate": pubdate, |
|
"text": _clean_html(description), |
|
} |
|
|
|
if obj["text"]: |
|
rows.append(obj) |
|
|
|
|
|
_categories = list(categories) |
|
_categories.sort() |
|
|
|
save_categories(_categories) |
|
|
|
for row in rows: |
|
row["label"] = _categories.index(row["category"]) |
|
|
|
save_csv(rows) |
|
save_csv_categories( |
|
["World", "Sports", "Business", "Sci/Tech"], "top4-balanced", is_balanced=True |
|
) |
|
|
|
|
|
def save_csv(rows, fname=OUTPUT_FILE_PATH): |
|
""" |
|
Save the processed data into a CSV file. |
|
""" |
|
os.makedirs("data", exist_ok=True) |
|
|
|
with open(fname, "w", encoding="utf8") as f: |
|
writer = csv.DictWriter(f, fieldnames=rows[0].keys()) |
|
writer.writeheader() |
|
|
|
for row in rows: |
|
writer.writerow(row) |
|
|
|
|
|
def save_csv_categories(categories, config_name, is_balanced=True, **kwargs): |
|
""" |
|
Filter the data by categories and split the data into training and testing |
|
sets. If is_balanced is True, the data will be balanced to size of the |
|
class with fewer examples. |
|
""" |
|
df = pd.read_csv(OUTPUT_FILE_PATH) |
|
|
|
if is_balanced: |
|
dfs = [] |
|
for category in categories: |
|
_df = df[df["category"] == category] |
|
dfs.append(_df) |
|
|
|
min_size = min([len(_df) for _df in dfs]) |
|
|
|
dfs = [df.sample(min_size) for df in dfs] |
|
df = pd.concat(dfs) |
|
else: |
|
df = df[df["category"].isin(categories)] |
|
|
|
|
|
for i, row in df.iterrows(): |
|
df.at[i, "label"] = categories.index(row["category"]) |
|
|
|
df.to_csv(os.path.join("data", f"{config_name}.csv"), index=False) |
|
|
|
|
|
def save_categories(categories, fname="categories.txt"): |
|
""" |
|
Save the categories into a text file. |
|
""" |
|
with open(fname, "w") as f: |
|
for category in categories: |
|
f.write(category + os.linesep) |
|
|
|
|
|
if __name__ == "__main__": |
|
run() |
|
|