Skip to content
Open
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 6 additions & 5 deletions codes/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import logging
import os
import random
import unicodedata

import numpy as np
import torch
Expand Down Expand Up @@ -123,7 +124,7 @@ def read_triple(file_path, entity2id, relation2id):
triples = []
with open(file_path) as fin:
for line in fin:
h, r, t = line.strip().split('\t')
h, r, t = map(lambda x: x.strip(), unicodedata.normalize('NFKC', line).split('\t'))
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

str.strip could be a shorter replacement for lambda x: x.strip()

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, didn't realize this was possible.

triples.append((entity2id[h], relation2id[r], entity2id[t]))
return triples

Expand Down Expand Up @@ -160,12 +161,12 @@ def log_metrics(mode, step, metrics):

def main(args):
if (not args.do_train) and (not args.do_valid) and (not args.do_test):
raise ValueError('one of train/val/test mode must be choosed.')
raise ValueError('one of train/val/test mode must be chosen.')

if args.init_checkpoint:
override_config(args)
elif args.data_path is None:
raise ValueError('one of init_checkpoint/data_path must be choosed.')
raise ValueError('one of init_checkpoint/data_path must be chosen.')

if args.do_train and args.save_path is None:
raise ValueError('Where do you want to save your trained model?')
Expand All @@ -179,13 +180,13 @@ def main(args):
with open(os.path.join(args.data_path, 'entities.dict')) as fin:
entity2id = dict()
for line in fin:
eid, entity = line.strip().split('\t')
eid, entity = map(lambda x: x.strip(), unicodedata.normalize('NFKC', line).split('\t'))
entity2id[entity] = int(eid)

with open(os.path.join(args.data_path, 'relations.dict')) as fin:
relation2id = dict()
for line in fin:
rid, relation = line.strip().split('\t')
rid, relation = map(lambda x: x.strip(), unicodedata.normalize('NFKC', line).split('\t'))
relation2id[relation] = int(rid)

# Read regions for Countries S* datasets
Expand Down