Datasets:
michaelmior
commited on
Move data files
Browse files- .gitignore +1 -1
- test.jsonl.gz → data/test.jsonl.gz +0 -0
- train.jsonl.gz → data/train.jsonl.gz +0 -0
- validation.jsonl.gz → data/validation.jsonl.gz +0 -0
- fetch_files.sh +2 -2
- train_split.py +1 -1
- validate_schemas.py +1 -1
.gitignore
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
.env
|
2 |
-
|
3 |
valid_data/
|
4 |
|
5 |
# Fasttext model
|
|
|
1 |
.env
|
2 |
+
fetched_data/
|
3 |
valid_data/
|
4 |
|
5 |
# Fasttext model
|
test.jsonl.gz → data/test.jsonl.gz
RENAMED
File without changes
|
train.jsonl.gz → data/train.jsonl.gz
RENAMED
File without changes
|
validation.jsonl.gz → data/validation.jsonl.gz
RENAMED
File without changes
|
fetch_files.sh
CHANGED
@@ -5,8 +5,8 @@ pv commits.json |
|
|
5 |
while read url; do
|
6 |
# Strip the url prefix to get the path
|
7 |
path=$(echo "$url" | cut -d/ -f4-)
|
8 |
-
if ! [ -f "
|
9 |
-
curl "$url" --silent --create-dirs -o "
|
10 |
sleep 1
|
11 |
fi
|
12 |
done
|
|
|
5 |
while read url; do
|
6 |
# Strip the url prefix to get the path
|
7 |
path=$(echo "$url" | cut -d/ -f4-)
|
8 |
+
if ! [ -f "fetched_data/$path" ]; then
|
9 |
+
curl "$url" --silent --create-dirs -o "fetched_data/$path"
|
10 |
sleep 1
|
11 |
fi
|
12 |
done
|
train_split.py
CHANGED
@@ -31,7 +31,7 @@ def files_list(licenses):
|
|
31 |
|
32 |
def write_schemas(filename, schema_list, schema_data):
|
33 |
sys.stderr.write(f"Writing {filename}…\n")
|
34 |
-
with gzip.open(filename, "wt") as f:
|
35 |
for schema in tqdm.tqdm(list(schema_list)):
|
36 |
filename = str(os.path.join(*Path(schema).parts[1:]))
|
37 |
|
|
|
31 |
|
32 |
def write_schemas(filename, schema_list, schema_data):
|
33 |
sys.stderr.write(f"Writing {filename}…\n")
|
34 |
+
with gzip.open(Path("data") / filename, "wt") as f:
|
35 |
for schema in tqdm.tqdm(list(schema_list)):
|
36 |
filename = str(os.path.join(*Path(schema).parts[1:]))
|
37 |
|
validate_schemas.py
CHANGED
@@ -36,5 +36,5 @@ if __name__ == "__main__":
|
|
36 |
# Increase the recursion limit to handle large schemas
|
37 |
sys.setrecursionlimit(10000)
|
38 |
|
39 |
-
data_path = Path("
|
40 |
process_map(process_file, list(data_path.rglob("*.json")), chunksize=10)
|
|
|
36 |
# Increase the recursion limit to handle large schemas
|
37 |
sys.setrecursionlimit(10000)
|
38 |
|
39 |
+
data_path = Path("fetched_data")
|
40 |
process_map(process_file, list(data_path.rglob("*.json")), chunksize=10)
|