Datasets:

Modalities:
Text
Formats:
json
Languages:
English
Libraries:
Datasets
pandas
License:
michaelmior commited on
Commit
7bd5ffd
·
verified ·
1 Parent(s): f6dd396

Move data files

Browse files
.gitignore CHANGED
@@ -1,5 +1,5 @@
1
  .env
2
- data/
3
  valid_data/
4
 
5
  # Fasttext model
 
1
  .env
2
+ fetched_data/
3
  valid_data/
4
 
5
  # Fasttext model
test.jsonl.gz → data/test.jsonl.gz RENAMED
File without changes
train.jsonl.gz → data/train.jsonl.gz RENAMED
File without changes
validation.jsonl.gz → data/validation.jsonl.gz RENAMED
File without changes
fetch_files.sh CHANGED
@@ -5,8 +5,8 @@ pv commits.json |
5
  while read url; do
6
  # Strip the url prefix to get the path
7
  path=$(echo "$url" | cut -d/ -f4-)
8
- if ! [ -f "data/$path" ]; then
9
- curl "$url" --silent --create-dirs -o "data/$path"
10
  sleep 1
11
  fi
12
  done
 
5
  while read url; do
6
  # Strip the url prefix to get the path
7
  path=$(echo "$url" | cut -d/ -f4-)
8
+ if ! [ -f "fetched_data/$path" ]; then
9
+ curl "$url" --silent --create-dirs -o "fetched_data/$path"
10
  sleep 1
11
  fi
12
  done
train_split.py CHANGED
@@ -31,7 +31,7 @@ def files_list(licenses):
31
 
32
  def write_schemas(filename, schema_list, schema_data):
33
  sys.stderr.write(f"Writing {filename}…\n")
34
- with gzip.open(filename, "wt") as f:
35
  for schema in tqdm.tqdm(list(schema_list)):
36
  filename = str(os.path.join(*Path(schema).parts[1:]))
37
 
 
31
 
32
  def write_schemas(filename, schema_list, schema_data):
33
  sys.stderr.write(f"Writing {filename}…\n")
34
+ with gzip.open(Path("data") / filename, "wt") as f:
35
  for schema in tqdm.tqdm(list(schema_list)):
36
  filename = str(os.path.join(*Path(schema).parts[1:]))
37
 
validate_schemas.py CHANGED
@@ -36,5 +36,5 @@ if __name__ == "__main__":
36
  # Increase the recursion limit to handle large schemas
37
  sys.setrecursionlimit(10000)
38
 
39
- data_path = Path("data")
40
  process_map(process_file, list(data_path.rglob("*.json")), chunksize=10)
 
36
  # Increase the recursion limit to handle large schemas
37
  sys.setrecursionlimit(10000)
38
 
39
+ data_path = Path("fetched_data")
40
  process_map(process_file, list(data_path.rglob("*.json")), chunksize=10)