From 2936635892e17031c37facfd2115e8cfd6633222 Mon Sep 17 00:00:00 2001 From: Yigit Sever Date: Sun, 22 Sep 2019 01:33:24 +0300 Subject: Introduce linter, stylize --- WMD_matching.py | 114 +++++++++++++++++++++++++++++++----------------- WMD_retrieval.py | 112 ++++++++++++++++++++++++++++++----------------- Wasserstein_Distance.py | 109 +++++++++++++++++++++++++++++---------------- 3 files changed, 219 insertions(+), 116 deletions(-) diff --git a/WMD_matching.py b/WMD_matching.py index 59b64f9..ea496b8 100644 --- a/WMD_matching.py +++ b/WMD_matching.py @@ -1,15 +1,19 @@ import argparse -import numpy as np +import csv import random + +import numpy as np from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.preprocessing import normalize -from Wasserstein_Distance import Wasserstein_Matcher -from Wasserstein_Distance import load_embeddings, clean_corpus_using_embeddings_vocabulary -import csv + +from Wasserstein_Distance import (Wasserstein_Matcher, + clean_corpus_using_embeddings_vocabulary, + load_embeddings) + def main(args): - np.seterr(divide='ignore') # POT has issues with divide by zero errors + np.seterr(divide='ignore') # POT has issues with divide by zero errors source_lang = args.source_lang target_lang = args.target_lang @@ -25,32 +29,38 @@ def main(args): mode = args.mode runfor = list() - if (mode == 'all'): - runfor.extend(['wmd','snk']) + if mode == 'all': + runfor.extend(['wmd', 'snk']) else: runfor.append(mode) - defs_source = [line.rstrip('\n') for line in open(source_defs_filename, encoding='utf8')] - defs_target = [line.rstrip('\n') for line in open(target_defs_filename, encoding='utf8')] + defs_source = [ + line.rstrip('\n') + for line in open(source_defs_filename, encoding='utf8') + ] + defs_target = [ + line.rstrip('\n') + for line in open(target_defs_filename, encoding='utf8') + ] clean_src_corpus, clean_src_vectors, src_keys = clean_corpus_using_embeddings_vocabulary( - set(vectors_source.keys()), - defs_source, - vectors_source, - source_lang, - ) + set(vectors_source.keys()), + defs_source, + vectors_source, + source_lang, + ) clean_target_corpus, clean_target_vectors, target_keys = clean_corpus_using_embeddings_vocabulary( - set(vectors_target.keys()), - defs_target, - vectors_target, - target_lang, - ) + set(vectors_target.keys()), + defs_target, + vectors_target, + target_lang, + ) take = args.instances common_keys = set(src_keys).intersection(set(target_keys)) - take = min(len(common_keys), take) # you can't sample more than length + take = min(len(common_keys), take) # you can't sample more than length experiment_keys = random.sample(common_keys, take) instances = len(experiment_keys) @@ -58,13 +68,18 @@ def main(args): clean_src_corpus = list(clean_src_corpus[experiment_keys]) clean_target_corpus = list(clean_target_corpus[experiment_keys]) - if (not batch): - print(f'{source_lang} - {target_lang} : document sizes: {len(clean_src_corpus)}, {len(clean_target_corpus)}') + if not batch: + print( + f'{source_lang} - {target_lang} : document sizes: {len(clean_src_corpus)}, {len(clean_target_corpus)}' + ) del vectors_source, vectors_target, defs_source, defs_target vec = CountVectorizer().fit(clean_src_corpus + clean_target_corpus) - common = [word for word in vec.get_feature_names() if word in clean_src_vectors or word in clean_target_vectors] + common = [ + word for word in vec.get_feature_names() + if word in clean_src_vectors or word in clean_target_vectors + ] W_common = [] for w in common: if w in clean_src_vectors: @@ -72,8 +87,10 @@ def main(args): else: W_common.append(np.array(clean_target_vectors[w])) - if (not batch): - print(f'{source_lang} - {target_lang}: the vocabulary size is {len(W_common)}') + if not batch: + print( + f'{source_lang} - {target_lang}: the vocabulary size is {len(W_common)}' + ) W_common = np.array(W_common) W_common = normalize(W_common) @@ -82,26 +99,28 @@ def main(args): X_train_idf = vect.transform(clean_src_corpus) X_test_idf = vect.transform(clean_target_corpus) - vect_tf = CountVectorizer(vocabulary=common, dtype=np.double) - vect_tf.fit(clean_src_corpus + clean_target_corpus) - X_train_tf = vect_tf.transform(clean_src_corpus) - X_test_tf = vect_tf.transform(clean_target_corpus) - for metric in runfor: - if (not batch): + if not batch: print(f'{metric}: {source_lang} - {target_lang}') - clf = Wasserstein_Matcher(W_embed=W_common, n_neighbors=5, n_jobs=14, sinkhorn=(metric == 'snk')) + clf = Wasserstein_Matcher(W_embed=W_common, + n_neighbors=5, + n_jobs=14, + sinkhorn=(metric == 'snk')) clf.fit(X_train_idf[:instances], np.ones(instances)) - row_ind, col_ind, a = clf.kneighbors(X_test_idf[:instances], n_neighbors=instances) + row_ind, col_ind, _ = clf.kneighbors(X_test_idf[:instances], + n_neighbors=instances) result = zip(row_ind, col_ind) - p_at_one = len([x for x,y in result if x == y]) + p_at_one = len([x for x, y in result if x == y]) percentage = p_at_one / instances * 100 - if (not batch): + if not batch: print(f'P @ 1: {p_at_one}\ninstances: {instances}\n{percentage}%') else: - fields = [f'{source_lang}', f'{target_lang}', f'{instances}', f'{p_at_one}', f'{percentage}'] + fields = [ + f'{source_lang}', f'{target_lang}', f'{instances}', + f'{p_at_one}', f'{percentage}' + ] with open(f'{metric}_matching_results.csv', 'a') as f: writer = csv.writer(f) writer.writerow(fields) @@ -109,16 +128,31 @@ def main(args): if __name__ == "__main__": - parser = argparse.ArgumentParser(description='matching using wmd and wasserstein distance') + parser = argparse.ArgumentParser( + description='matching using wmd and wasserstein distance') parser.add_argument('source_lang', help='source language short name') parser.add_argument('target_lang', help='target language short name') parser.add_argument('source_vector', help='path of the source vector') parser.add_argument('target_vector', help='path of the target vector') parser.add_argument('source_defs', help='path of the source definitions') parser.add_argument('target_defs', help='path of the target definitions') - parser.add_argument('-b', '--batch', action='store_true', help='running in batch (store results in csv) or running a single instance (output the results)') - parser.add_argument('mode', choices=['all', 'wmd', 'snk'], default='all', help='which methods to run') - parser.add_argument('-n', '--instances', help='number of instances in each language to retrieve', default=1000, type=int) + parser.add_argument( + '-b', + '--batch', + action='store_true', + help= + 'running in batch (store results in csv) or running a single instance (output the results)' + ) + parser.add_argument('mode', + choices=['all', 'wmd', 'snk'], + default='all', + help='which methods to run') + parser.add_argument( + '-n', + '--instances', + help='number of instances in each language to retrieve', + default=1000, + type=int) args = parser.parse_args() diff --git a/WMD_retrieval.py b/WMD_retrieval.py index f32372f..3328023 100644 --- a/WMD_retrieval.py +++ b/WMD_retrieval.py @@ -1,15 +1,19 @@ import argparse -import numpy as np +import csv import random + +import numpy as np from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.preprocessing import normalize -from Wasserstein_Distance import Wasserstein_Retriever -from Wasserstein_Distance import load_embeddings, clean_corpus_using_embeddings_vocabulary, mrr_precision_at_k -import csv + +from Wasserstein_Distance import (Wasserstein_Retriever, + clean_corpus_using_embeddings_vocabulary, + load_embeddings) + def main(args): - np.seterr(divide='ignore') # POT has issues with divide by zero errors + np.seterr(divide='ignore') # POT has issues with divide by zero errors source_lang = args.source_lang target_lang = args.target_lang @@ -25,32 +29,38 @@ def main(args): mode = args.mode runfor = list() - if (mode == 'all'): - runfor.extend(['wmd','snk']) + if mode == 'all': + runfor.extend(['wmd', 'snk']) else: runfor.append(mode) - defs_source = [line.rstrip('\n') for line in open(source_defs_filename, encoding='utf8')] - defs_target = [line.rstrip('\n') for line in open(target_defs_filename, encoding='utf8')] + defs_source = [ + line.rstrip('\n') + for line in open(source_defs_filename, encoding='utf8') + ] + defs_target = [ + line.rstrip('\n') + for line in open(target_defs_filename, encoding='utf8') + ] clean_src_corpus, clean_src_vectors, src_keys = clean_corpus_using_embeddings_vocabulary( - set(vectors_source.keys()), - defs_source, - vectors_source, - source_lang, - ) + set(vectors_source.keys()), + defs_source, + vectors_source, + source_lang, + ) clean_target_corpus, clean_target_vectors, target_keys = clean_corpus_using_embeddings_vocabulary( - set(vectors_target.keys()), - defs_target, - vectors_target, - target_lang, - ) + set(vectors_target.keys()), + defs_target, + vectors_target, + target_lang, + ) take = args.instances common_keys = set(src_keys).intersection(set(target_keys)) - take = min(len(common_keys), take) # you can't sample more than length + take = min(len(common_keys), take) # you can't sample more than length experiment_keys = random.sample(common_keys, take) instances = len(experiment_keys) @@ -58,13 +68,18 @@ def main(args): clean_src_corpus = list(clean_src_corpus[experiment_keys]) clean_target_corpus = list(clean_target_corpus[experiment_keys]) - if (not batch): - print(f'{source_lang} - {target_lang} : document sizes: {len(clean_src_corpus)}, {len(clean_target_corpus)}') + if not batch: + print( + f'{source_lang} - {target_lang} : document sizes: {len(clean_src_corpus)}, {len(clean_target_corpus)}' + ) del vectors_source, vectors_target, defs_source, defs_target vec = CountVectorizer().fit(clean_src_corpus + clean_target_corpus) - common = [word for word in vec.get_feature_names() if word in clean_src_vectors or word in clean_target_vectors] + common = [ + word for word in vec.get_feature_names() + if word in clean_src_vectors or word in clean_target_vectors + ] W_common = [] for w in common: if w in clean_src_vectors: @@ -72,8 +87,10 @@ def main(args): else: W_common.append(np.array(clean_target_vectors[w])) - if (not batch): - print(f'{source_lang} - {target_lang}: the vocabulary size is {len(W_common)}') + if not batch: + print( + f'{source_lang} - {target_lang}: the vocabulary size is {len(W_common)}' + ) W_common = np.array(W_common) W_common = normalize(W_common) @@ -82,26 +99,28 @@ def main(args): X_train_idf = vect.transform(clean_src_corpus) X_test_idf = vect.transform(clean_target_corpus) - vect_tf = CountVectorizer(vocabulary=common, dtype=np.double) - vect_tf.fit(clean_src_corpus + clean_target_corpus) - X_train_tf = vect_tf.transform(clean_src_corpus) - X_test_tf = vect_tf.transform(clean_target_corpus) - for metric in runfor: - if (not batch): + if not batch: print(f'{metric} - tfidf: {source_lang} - {target_lang}') - clf = Wasserstein_Retriever(W_embed=W_common, n_neighbors=5, n_jobs=14, sinkhorn=(metric == 'snk')) + clf = Wasserstein_Retriever(W_embed=W_common, + n_neighbors=5, + n_jobs=14, + sinkhorn=(metric == 'snk')) clf.fit(X_train_idf[:instances], np.ones(instances)) # dist, preds = clf.kneighbors(X_test_idf[:instances], n_neighbors=instances) # mrr, p_at_one = mrr_precision_at_k(list(range(len(preds))), preds) # percentage = p_at_one * 100 - p_at_one, percentage = clf.align(X_test_idf[:instances], n_neighbors=instances) + p_at_one, percentage = clf.align(X_test_idf[:instances], + n_neighbors=instances) - if (not batch): + if not batch: print(f'P @ 1: {p_at_one}\ninstances: {instances}\n{percentage}%') else: - fields = [f'{source_lang}', f'{target_lang}', f'{instances}', f'{p_at_one}', f'{percentage}'] + fields = [ + f'{source_lang}', f'{target_lang}', f'{instances}', + f'{p_at_one}', f'{percentage}' + ] with open(f'{metric}_retrieval_result.csv', 'a') as f: writer = csv.writer(f) writer.writerow(fields) @@ -109,16 +128,31 @@ def main(args): if __name__ == "__main__": - parser = argparse.ArgumentParser(description='run retrieval using wmd or snk') + parser = argparse.ArgumentParser( + description='run retrieval using wmd or snk') parser.add_argument('source_lang', help='source language short name') parser.add_argument('target_lang', help='target language short name') parser.add_argument('source_vector', help='path of the source vector') parser.add_argument('target_vector', help='path of the target vector') parser.add_argument('source_defs', help='path of the source definitions') parser.add_argument('target_defs', help='path of the target definitions') - parser.add_argument('-b', '--batch', action='store_true', help='running in batch (store results in csv) or running a single instance (output the results)') - parser.add_argument('mode', choices=['all', 'wmd', 'snk'], default='all', help='which methods to run') - parser.add_argument('-n', '--instances', help='number of instances in each language to retrieve', default=1000, type=int) + parser.add_argument( + '-b', + '--batch', + action='store_true', + help= + 'running in batch (store results in csv) or running a single instance (output the results)' + ) + parser.add_argument('mode', + choices=['all', 'wmd', 'snk'], + default='all', + help='which methods to run') + parser.add_argument( + '-n', + '--instances', + help='number of instances in each language to retrieve', + default=1000, + type=int) args = parser.parse_args() diff --git a/Wasserstein_Distance.py b/Wasserstein_Distance.py index 08439d2..161c13c 100644 --- a/Wasserstein_Distance.py +++ b/Wasserstein_Distance.py @@ -1,15 +1,14 @@ -import ot -from sklearn.preprocessing import normalize -from lapjv import lapjv -from sklearn.neighbors import KNeighborsClassifier +import numpy as np from sklearn.metrics import euclidean_distances -from sklearn.externals.joblib import Parallel, delayed +from sklearn.neighbors import KNeighborsClassifier +from sklearn.preprocessing import normalize from sklearn.utils import check_array -from sklearn.metrics.scorer import check_scoring -from pathos.multiprocessing import ProcessingPool as Pool -from sklearn.metrics import euclidean_distances -import numpy as np + +import ot +from lapjv import lapjv from mosestokenizer import MosesTokenizer +from pathos.multiprocessing import ProcessingPool as Pool + class Wasserstein_Matcher(KNeighborsClassifier): """ @@ -17,7 +16,13 @@ class Wasserstein_Matcher(KNeighborsClassifier): Source and target distributions are l_1 normalized before computing the Wasserstein distance. Wasserstein is parametrized by the distances between the individual points of the distributions. """ - def __init__(self, W_embed, n_neighbors=1, n_jobs=1, verbose=False, sinkhorn= False, sinkhorn_reg=0.1): + def __init__(self, + W_embed, + n_neighbors=1, + n_jobs=1, + verbose=False, + sinkhorn=False, + sinkhorn_reg=0.1): """ Initialization of the class. Arguments @@ -29,7 +34,10 @@ class Wasserstein_Matcher(KNeighborsClassifier): self.sinkhorn_reg = sinkhorn_reg self.W_embed = W_embed self.verbose = verbose - super(Wasserstein_Matcher, self).__init__(n_neighbors=n_neighbors, n_jobs=n_jobs, metric='precomputed', algorithm='brute') + super(Wasserstein_Matcher, self).__init__(n_neighbors=n_neighbors, + n_jobs=n_jobs, + metric='precomputed', + algorithm='brute') def _wmd(self, i, row, X_train): union_idx = np.union1d(X_train[i].indices, row.indices) @@ -38,9 +46,16 @@ class Wasserstein_Matcher(KNeighborsClassifier): bow_i = X_train[i, union_idx].A.ravel() bow_j = row[:, union_idx].A.ravel() if self.sinkhorn: - return ot.sinkhorn2(bow_i, bow_j, W_dist, self.sinkhorn_reg, numItermax=50, method='sinkhorn_stabilized',)[0] + return ot.sinkhorn2( + bow_i, + bow_j, + W_dist, + self.sinkhorn_reg, + numItermax=50, + method='sinkhorn_stabilized', + )[0] else: - return ot.emd2(bow_i, bow_j, W_dist) + return ot.emd2(bow_i, bow_j, W_dist) def _wmd_row(self, row): X_train = self._fit_X @@ -52,28 +67,31 @@ class Wasserstein_Matcher(KNeighborsClassifier): if X_train is None: X_train = self._fit_X - pool = Pool(nodes=self.n_jobs) # Parallelization of the calculation of the distances - dist = pool.map(self._wmd_row, X_test) + pool = Pool(nodes=self.n_jobs + ) # Parallelization of the calculation of the distances + dist = pool.map(self._wmd_row, X_test) return np.array(dist) - def fit(self, X, y): # X_train_idf - X = check_array(X, accept_sparse='csr', copy=True) # check if array is sparse + def fit(self, X, y): # X_train_idf + X = check_array(X, accept_sparse='csr', + copy=True) # check if array is sparse X = normalize(X, norm='l1', copy=False) - return super(Wasserstein_Matcher, self).fit(X, y) # X_train_idf, np_ones(document collection size) + return super(Wasserstein_Matcher, self).fit( + X, y) # X_train_idf, np_ones(document collection size) def predict(self, X): X = check_array(X, accept_sparse='csr', copy=True) X = normalize(X, norm='l1', copy=False) dist = self._pairwise_wmd(X) - dist = dist * 1000 # for lapjv, small floating point numbers are evil + dist = dist * 1000 # for lapjv, small floating point numbers are evil return super(Wasserstein_Matcher, self).predict(dist) - def kneighbors(self, X, n_neighbors=1): # X : X_train_idf + def kneighbors(self, X, n_neighbors=1): # X : X_train_idf X = check_array(X, accept_sparse='csr', copy=True) X = normalize(X, norm='l1', copy=False) dist = self._pairwise_wmd(X) - dist = dist * 1000 # for lapjv, small floating point numbers are evil - return lapjv(dist) # and here is the matching part + dist = dist * 1000 # for lapjv, small floating point numbers are evil + return lapjv(dist) # and here is the matching part class Wasserstein_Retriever(KNeighborsClassifier): @@ -82,7 +100,13 @@ class Wasserstein_Retriever(KNeighborsClassifier): Source and target distributions are l_1 normalized before computing the Wasserstein distance. Wasserstein is parametrized by the distances between the individual points of the distributions. """ - def __init__(self, W_embed, n_neighbors=1, n_jobs=1, verbose=False, sinkhorn= False, sinkhorn_reg=0.1): + def __init__(self, + W_embed, + n_neighbors=1, + n_jobs=1, + verbose=False, + sinkhorn=False, + sinkhorn_reg=0.1): """ Initialization of the class. Arguments @@ -94,7 +118,10 @@ class Wasserstein_Retriever(KNeighborsClassifier): self.sinkhorn_reg = sinkhorn_reg self.W_embed = W_embed self.verbose = verbose - super(Wasserstein_Retriever, self).__init__(n_neighbors=n_neighbors, n_jobs=n_jobs, metric='precomputed', algorithm='brute') + super(Wasserstein_Retriever, self).__init__(n_neighbors=n_neighbors, + n_jobs=n_jobs, + metric='precomputed', + algorithm='brute') def _wmd(self, i, row, X_train): union_idx = np.union1d(X_train[i].indices, row.indices) @@ -103,9 +130,16 @@ class Wasserstein_Retriever(KNeighborsClassifier): bow_i = X_train[i, union_idx].A.ravel() bow_j = row[:, union_idx].A.ravel() if self.sinkhorn: - return ot.sinkhorn2(bow_i, bow_j, W_dist, self.sinkhorn_reg, numItermax=50, method='sinkhorn_stabilized',)[0] + return ot.sinkhorn2( + bow_i, + bow_j, + W_dist, + self.sinkhorn_reg, + numItermax=50, + method='sinkhorn_stabilized', + )[0] else: - return ot.emd2(bow_i, bow_j, W_dist) + return ot.emd2(bow_i, bow_j, W_dist) def _wmd_row(self, row): X_train = self._fit_X @@ -117,8 +151,8 @@ class Wasserstein_Retriever(KNeighborsClassifier): if X_train is None: X_train = self._fit_X - pool = Pool(nodes=self.n_jobs) # Parallelization of the calculation of the distances - dist = pool.map(self._wmd_row, X_test) + pool = Pool(nodes=self.n_jobs) + dist = pool.map(self._wmd_row, X_test) return np.array(dist) def fit(self, X, y): @@ -144,8 +178,8 @@ class Wasserstein_Retriever(KNeighborsClassifier): precision at one and percentage values """ - dist, preds = self.kneighbors(X, n_neighbors) - mrr, p_at_one = mrr_precision_at_k(list(range(len(preds))), preds) + _, preds = self.kneighbors(X, n_neighbors) + _, p_at_one = mrr_precision_at_k(list(range(len(preds))), preds) percentage = p_at_one * 100 return (p_at_one, percentage) @@ -168,7 +202,8 @@ def load_embeddings(path, dimension=300): fp.seek(0) for line in fp: elems = line.split() - vectors[" ".join(elems[:-dimension])] = " ".join(elems[-dimension:]) + vectors[" ".join(elems[:-dimension])] = " ".join( + elems[-dimension:]) return vectors @@ -177,7 +212,7 @@ def clean_corpus_using_embeddings_vocabulary( corpus, vectors, language, - ): +): ''' Cleans corpus using the dictionary of embeddings. Any word without an associated embedding in the dictionary is ignored. @@ -192,7 +227,8 @@ def clean_corpus_using_embeddings_vocabulary( for word in words: if word in words_we_want: clean_doc.append(word + '__%s' % language) - clean_vectors[word + '__%s' % language] = np.array(vectors[word].split()).astype(np.float) + clean_vectors[word + '__%s' % language] = np.array( + vectors[word].split()).astype(np.float) if len(clean_doc) > 3 and len(clean_doc) < 25: keys.append(key) clean_corpus.append(' '.join(clean_doc)) @@ -208,10 +244,9 @@ def mrr_precision_at_k(golden, preds, k_list=[1,]): precision_at = np.zeros(len(k_list)) for key, elem in enumerate(golden): if elem in preds[key]: - location = np.where(preds[key]==elem)[0][0] - my_score += 1/(1+ location) + location = np.where(preds[key] == elem)[0][0] + my_score += 1 / (1 + location) for k_index, k_value in enumerate(k_list): if location < k_value: precision_at[k_index] += 1 - return my_score/len(golden), (precision_at/len(golden))[0] - + return my_score / len(golden), (precision_at / len(golden))[0] -- cgit v1.2.3-70-g09d2