diff options
| author | Yigit Sever | 2019-09-25 16:09:33 +0300 |
|---|---|---|
| committer | Yigit Sever | 2019-09-25 16:09:33 +0300 |
| commit | 2226f211247be783846073b99d70b673b7cfa592 (patch) | |
| tree | d125c18b19042cd3e3c5edb95106e0cc6eca8977 /sentence_emb_matching.py | |
| parent | c74318070ad85d5d7943e96d343aa961db305316 (diff) | |
| download | Evaluating-Dictionary-Alignment-2226f211247be783846073b99d70b673b7cfa592.tar.gz Evaluating-Dictionary-Alignment-2226f211247be783846073b99d70b673b7cfa592.tar.bz2 Evaluating-Dictionary-Alignment-2226f211247be783846073b99d70b673b7cfa592.zip | |
Clean up sentence embeddings
Diffstat (limited to 'sentence_emb_matching.py')
| -rw-r--r-- | sentence_emb_matching.py | 153 |
1 files changed, 0 insertions, 153 deletions
diff --git a/sentence_emb_matching.py b/sentence_emb_matching.py deleted file mode 100644 index 38812d7..0000000 --- a/sentence_emb_matching.py +++ /dev/null | |||
| @@ -1,153 +0,0 @@ | |||
| 1 | import argparse | ||
| 2 | |||
| 3 | parser = argparse.ArgumentParser(description='run matching using sentence embeddings and cosine similarity') | ||
| 4 | parser.add_argument('source_lang', help='source language short name') | ||
| 5 | parser.add_argument('target_lang', help='target language short name') | ||
| 6 | parser.add_argument('source_vector', help='path of the source vector') | ||
| 7 | parser.add_argument('target_vector', help='path of the target vector') | ||
| 8 | parser.add_argument('source_defs', help='path of the source definitions') | ||
| 9 | parser.add_argument('target_defs', help='path of the target definitions') | ||
| 10 | parser.add_argument('-n', '--instances', help='number of instances in each language to retrieve', default=2000, type=int) | ||
| 11 | |||
| 12 | args = parser.parse_args() | ||
| 13 | |||
| 14 | source_lang = args.source_lang | ||
| 15 | target_lang = args.target_lang | ||
| 16 | |||
| 17 | def load_embeddings(path, dimension = 300): | ||
| 18 | """ | ||
| 19 | Loads the embeddings from a word2vec formatted file. | ||
| 20 | The first line may or may not include the word count and dimension | ||
| 21 | """ | ||
| 22 | vectors = {} | ||
| 23 | with open(path, mode='r', encoding='utf8') as fp: | ||
| 24 | first_line = fp.readline().rstrip('\n') | ||
| 25 | if first_line.count(' ') == 1: # includes the "word_count dimension" information | ||
| 26 | (word_count, dimension) = map(int, first_line.split()) | ||
| 27 | else: # assume the file only contains vectors | ||
| 28 | fp.seek(0) | ||
| 29 | for line in fp: | ||
| 30 | elems = line.split() | ||
| 31 | vectors[" ".join(elems[:-dimension])] = " ".join(elems[-dimension:]) | ||
| 32 | return vectors | ||
| 33 | |||
| 34 | source_vectors_filename = args.source_vector | ||
| 35 | target_vectors_filename = args.target_vector | ||
| 36 | vectors_source = load_embeddings(source_vectors_filename) | ||
| 37 | vectors_target = load_embeddings(target_vectors_filename) | ||
| 38 | |||
| 39 | source_defs_filename = args.source_defs | ||
| 40 | target_defs_filename = args.target_defs | ||
| 41 | defs_source = [line.rstrip('\n') for line in open(source_defs_filename, encoding='utf8')] | ||
| 42 | defs_target = [line.rstrip('\n') for line in open(target_defs_filename, encoding='utf8')] | ||
| 43 | |||
| 44 | import numpy as np | ||
| 45 | from mosestokenizer import * | ||
| 46 | |||
| 47 | def clean_corpus_using_embeddings_vocabulary( | ||
| 48 | embeddings_dictionary, | ||
| 49 | corpus, | ||
| 50 | vectors, | ||
| 51 | language, | ||
| 52 | ): | ||
| 53 | ''' | ||
| 54 | Cleans corpus using the dictionary of embeddings. | ||
| 55 | Any word without an associated embedding in the dictionary is ignored. | ||
| 56 | ''' | ||
| 57 | clean_corpus, clean_vectors, keys = [], {}, [] | ||
| 58 | words_we_want = set(embeddings_dictionary) | ||
| 59 | tokenize = MosesTokenizer(language) | ||
| 60 | for key, doc in enumerate(corpus): | ||
| 61 | clean_doc = [] | ||
| 62 | words = tokenize(doc) | ||
| 63 | for word in words: | ||
| 64 | if word in words_we_want: | ||
| 65 | clean_doc.append(word) | ||
| 66 | clean_vectors[word] = np.array(vectors[word].split()).astype(np.float) | ||
| 67 | if len(clean_doc) > 3 and len(clean_doc) < 25: | ||
| 68 | keys.append(key) | ||
| 69 | clean_corpus.append(' '.join(clean_doc)) | ||
| 70 | tokenize.close() | ||
| 71 | return np.array(clean_corpus), clean_vectors, keys | ||
| 72 | |||
| 73 | clean_src_corpus, clean_src_vectors, src_keys = clean_corpus_using_embeddings_vocabulary( | ||
| 74 | set(vectors_source.keys()), | ||
| 75 | defs_source, | ||
| 76 | vectors_source, | ||
| 77 | source_lang, | ||
| 78 | ) | ||
| 79 | |||
| 80 | clean_target_corpus, clean_target_vectors, target_keys = clean_corpus_using_embeddings_vocabulary( | ||
| 81 | set(vectors_target.keys()), | ||
| 82 | defs_target, | ||
| 83 | vectors_target, | ||
| 84 | target_lang, | ||
| 85 | ) | ||
| 86 | |||
| 87 | import random | ||
| 88 | take = args.instances | ||
| 89 | |||
| 90 | common_keys = set(src_keys).intersection(set(target_keys)) | ||
| 91 | take = min(len(common_keys), take) # you can't sample more than length | ||
| 92 | experiment_keys = random.sample(common_keys, take) | ||
| 93 | |||
| 94 | instances = len(experiment_keys) | ||
| 95 | |||
| 96 | clean_src_corpus = list(clean_src_corpus[experiment_keys]) | ||
| 97 | clean_target_corpus = list(clean_target_corpus[experiment_keys]) | ||
| 98 | |||
| 99 | print(f'{source_lang} - {target_lang} : document sizes: {len(clean_src_corpus)}, {len(clean_target_corpus)}') | ||
| 100 | |||
| 101 | del vectors_source, vectors_target, defs_source, defs_target | ||
| 102 | |||
| 103 | from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer | ||
| 104 | |||
| 105 | vocab_counter = CountVectorizer().fit(clean_src_corpus + clean_target_corpus) | ||
| 106 | common = [w for w in vocab_counter.get_feature_names() if w in clean_src_vectors or w in clean_target_vectors] | ||
| 107 | W_common = [] | ||
| 108 | |||
| 109 | for w in common: | ||
| 110 | if w in clean_src_vectors: | ||
| 111 | W_common.append(np.array(clean_src_vectors[w])) | ||
| 112 | else: | ||
| 113 | W_common.append(np.array(clean_target_vectors[w])) | ||
| 114 | |||
| 115 | print(f'{source_lang} - {target_lang}: the vocabulary size is {len(W_common)}') | ||
| 116 | |||
| 117 | from sklearn.preprocessing import normalize | ||
| 118 | W_common = np.array(W_common) | ||
| 119 | W_common = normalize(W_common) # default is l2 | ||
| 120 | |||
| 121 | vect_tfidf = TfidfVectorizer(vocabulary=common, dtype=np.double, norm='l2') | ||
| 122 | vect_tfidf.fit(clean_src_corpus + clean_target_corpus) | ||
| 123 | X_idf_source = vect_tfidf.transform(clean_src_corpus) | ||
| 124 | X_idf_target = vect_tfidf.transform(clean_target_corpus) | ||
| 125 | |||
| 126 | print(f'Matrices are {X_idf_source.shape} and {W_common.shape}') | ||
| 127 | print(f'The dimensions are {X_idf_source.ndim} and {W_common.ndim}') | ||
| 128 | |||
| 129 | X_idf_source_array = X_idf_source.toarray() | ||
| 130 | X_idf_target_array = X_idf_target.toarray() | ||
| 131 | S_emb_source = np.matmul(X_idf_source_array, W_common) | ||
| 132 | S_emb_target = np.matmul(X_idf_target_array, W_common) | ||
| 133 | |||
| 134 | S_emb_target_transpose = np.transpose(S_emb_target) | ||
| 135 | |||
| 136 | cost_matrix = np.matmul(S_emb_source, S_emb_target_transpose) | ||
| 137 | |||
| 138 | from lapjv import lapjv | ||
| 139 | cost_matrix = cost_matrix * -1000 | ||
| 140 | row_ind, col_ind, a = lapjv(cost_matrix, verbose=False) | ||
| 141 | |||
| 142 | result = zip(row_ind, col_ind) | ||
| 143 | hit_one = len([x for x,y in result if x == y]) | ||
| 144 | print(f'{hit_one} definitions have been mapped correctly, shape of cost matrix: {str(cost_matrix.shape)}') | ||
| 145 | |||
| 146 | import csv | ||
| 147 | percentage = hit_one / instances * 100 | ||
| 148 | fields = [f'{source_lang}', f'{target_lang}', f'{instances}', f'{hit_one}', f'{percentage}'] | ||
| 149 | |||
| 150 | with open('semb_matcing.csv', 'a') as f: | ||
| 151 | writer = csv.writer(f) | ||
| 152 | writer.writerow(fields) | ||
| 153 | |||
