-
Notifications
You must be signed in to change notification settings - Fork 105
/
coherence_metrics.py
232 lines (199 loc) · 7.48 KB
/
coherence_metrics.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
from octis.evaluation_metrics.metrics import AbstractMetric
from octis.dataset.dataset import Dataset
from gensim.corpora.dictionary import Dictionary
from gensim.models import CoherenceModel
from gensim.models import KeyedVectors
import gensim.downloader as api
import octis.configuration.citations as citations
import numpy as np
import itertools
from scipy import spatial
from sklearn.metrics import pairwise_distances
from operator import add
class Coherence(AbstractMetric):
def __init__(self, texts=None, topk=10, processes=1, measure='c_npmi'):
"""
Initialize metric
Parameters
----------
texts : list of documents (list of lists of strings)
topk : how many most likely words to consider in
the evaluation
measure : (default 'c_npmi') measure to use.
processes: number of processes
other measures: 'u_mass', 'c_v', 'c_uci', 'c_npmi'
"""
super().__init__()
if texts is None:
self._texts = _load_default_texts()
else:
self._texts = texts
self._dictionary = Dictionary(self._texts)
self.topk = topk
self.processes = processes
self.measure = measure
def info(self):
return {
"citation": citations.em_coherence,
"name": "Coherence"
}
def score(self, model_output):
"""
Retrieve the score of the metric
Parameters
----------
model_output : dictionary, output of the model
key 'topics' required.
Returns
-------
score : coherence score
"""
topics = model_output["topics"]
if topics is None:
return -1
if self.topk > len(topics[0]):
raise Exception('Words in topics are less than topk')
else:
npmi = CoherenceModel(
topics=topics,
texts=self._texts,
dictionary=self._dictionary,
coherence=self.measure,
processes=self.processes,
topn=self.topk)
return npmi.get_coherence()
class WECoherencePairwise(AbstractMetric):
def __init__(self, word2vec_path=None, binary=False, topk=10):
"""
Initialize metric
Parameters
----------
dictionary with keys
topk : how many most likely words to consider
word2vec_path : if word2vec_file is specified retrieves word embeddings file (in word2vec format)
to compute similarities, otherwise 'word2vec-google-news-300' is downloaded
binary : True if the word2vec file is binary, False otherwise (default False)
"""
super().__init__()
self.binary = binary
self.topk = topk
self.word2vec_path = word2vec_path
if word2vec_path is None:
self._wv = api.load('word2vec-google-news-300')
else:
self._wv = KeyedVectors.load_word2vec_format(
word2vec_path, binary=self.binary)
def info(self):
return {
"citation": citations.em_coherence_we,
"name": "Coherence word embeddings pairwise cosine"
}
def score(self, model_output):
"""
Retrieve the score of the metric
Parameters
----------
model_output : dictionary, output of the model
key 'topics' required.
Returns
-------
score : topic coherence computed on the word embeddings
similarities
"""
topics = model_output["topics"]
result = 0.0
for topic in topics:
E = []
# Create matrix E (normalize word embeddings of
# words represented as vectors in wv)
for word in topic[0:self.topk]:
if word in self._wv.key_to_index.keys():
word_embedding = self._wv.__getitem__(word)
normalized_we = word_embedding / word_embedding.sum()
E.append(normalized_we)
if len(E) > 0:
E = np.array(E)
# Perform cosine similarity between E rows
distances = np.sum(1 - pairwise_distances(E, metric='cosine') - np.diag(np.ones(len(E))))
topic_coherence = distances/(self.topk*(self.topk-1))
else:
topic_coherence = -1
# Update result with the computed coherence of the topic
result += topic_coherence
result = result/len(topics)
return result
class WECoherenceCentroid(AbstractMetric):
def __init__(self, topk=10, word2vec_path=None, binary=True):
"""
Initialize metric
Parameters
----------
topk : how many most likely words to consider
w2v_model_path : a word2vector model path, if not provided, google news 300 will be used instead
"""
super().__init__()
self.topk = topk
self.binary = binary
self.word2vec_path = word2vec_path
if self.word2vec_path is None:
self._wv = api.load('word2vec-google-news-300')
else:
self._wv = KeyedVectors.load_word2vec_format(
self.word2vec_path, binary=self.binary)
@staticmethod
def info():
return {
"citation": citations.em_word_embeddings_pc,
"name": "Coherence word embeddings centroid"
}
def score(self, model_output):
"""
Retrieve the score of the metric
:param model_output: dictionary, output of the model. key 'topics' required.
:return topic coherence computed on the word embeddings
"""
topics = model_output["topics"]
if self.topk > len(topics[0]):
raise Exception('Words in topics are less than topk')
else:
result = 0
for topic in topics:
E = []
# average vector of the words in topic (centroid)
t = np.zeros(self._wv.vector_size)
# Create matrix E (normalize word embeddings of
# words represented as vectors in wv) and
# average vector of the words in topic
for word in topic[0:self.topk]:
if word in self._wv.key_to_index.keys():
word_embedding = self._wv.__getitem__(word)
normalized_we = word_embedding/sum(word_embedding)
E.append(normalized_we)
t = list(map(add, t, word_embedding))
t = np.array(t)
if sum(t) != 0:
t = t/(len(t)*sum(t))
if len(E) > 0:
topic_coherence = 0
# Perform cosine similarity between each word embedding in E
# and t.
for word_embedding in E:
distance = spatial.distance.cosine(word_embedding, t)
topic_coherence += distance
topic_coherence = topic_coherence/self.topk
else:
topic_coherence = -1
# Update result with the computed coherence of the topic
result += topic_coherence
result /= len(topics)
return result
def _load_default_texts():
"""
Loads default general texts
Returns
-------
result : default 20newsgroup texts
"""
dataset = Dataset()
dataset.fetch_dataset("20NewsGroup")
return dataset.get_corpus()