| import json |
| import os |
| import gzip |
| import requests |
|
|
| import pandas as pd |
|
|
| urls = { |
| 'dev1': 'https://home.ttic.edu/~kgimpel/comsense_resources/dev1.txt.gz', |
| 'dev2': 'https://home.ttic.edu/~kgimpel/comsense_resources/dev2.txt.gz', |
| 'test': 'https://home.ttic.edu/~kgimpel/comsense_resources/test.txt.gz', |
| 'train': "https://home.ttic.edu/~kgimpel/comsense_resources/train600k.txt.gz" |
| } |
| os.makedirs("dataset", exist_ok=True) |
|
|
|
|
| def wget(url, cache_dir: str = './cache'): |
| """ wget and uncompress data_iterator """ |
| os.makedirs(cache_dir, exist_ok=True) |
| filename = os.path.basename(url) |
| path = f'{cache_dir}/{filename}' |
| if os.path.exists(path): |
| return path.replace('.gz', '') |
| with open(path, "wb") as _f: |
| r = requests.get(url) |
| _f.write(r.content) |
| with gzip.open(path, 'rb') as _f: |
| with open(path.replace('.gz', ''), 'wb') as f_write: |
| f_write.write(_f.read()) |
| os.remove(path) |
| return path.replace('.gz', '') |
|
|
|
|
| def read_file(file_name): |
| with open(file_name) as f_reader: |
| df = pd.DataFrame([i.split('\t') for i in f_reader.read().split('\n') if len(i) > 0], columns=['relation', 'head', 'tail', 'flag']) |
| df = df[df['flag'] != '0'] |
| df.pop('flag') |
| df = df[[not i.startswith("Not") for i in df.relation]] |
| return df |
|
|
|
|
| if __name__ == '__main__': |
| with open(f'dataset/test.jsonl', 'w') as f: |
| test = read_file(wget(urls['test'])) |
| f.write("\n".join([json.dumps(i.to_dict()) for _, i in test.iterrows()])) |
| |
| with open(f'dataset/train.jsonl', 'w') as f: |
| train = read_file(wget(urls['train'])) |
| f.write("\n".join([json.dumps(i.to_dict()) for _, i in train.iterrows()])) |
|
|
| with open(f'dataset/valid.jsonl', 'w') as f: |
| dev1 = read_file(wget(urls['dev1'])) |
| dev2 = read_file(wget(urls['dev2'])) |
| f.write("\n".join([json.dumps(i.to_dict()) for _, i in dev1.iterrows()] + [json.dumps(i.to_dict()) for _, i in dev2.iterrows()])) |
|
|