Skip to content

Commit

Permalink
transformed camelCase to snake_case test names (#3033)
Browse files Browse the repository at this point in the history
  • Loading branch information
sezanzeb authored Jan 30, 2021
1 parent 01cffc4 commit 2d17ddf
Show file tree
Hide file tree
Showing 30 changed files with 259 additions and 259 deletions.
2 changes: 1 addition & 1 deletion gensim/test/test_aggregation.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class TestAggregation(unittest.TestCase):
def setUp(self):
self.confirmed_measures = [1.1, 2.2, 3.3, 4.4]

def testArithmeticMean(self):
def test_arithmetic_mean(self):
"""Test arithmetic_mean()"""
obtained = aggregation.arithmetic_mean(self.confirmed_measures)
expected = 2.75
Expand Down
52 changes: 26 additions & 26 deletions gensim/test/test_atmodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def setUp(self):
self.class_ = atmodel.AuthorTopicModel
self.model = self.class_(corpus, id2word=dictionary, author2doc=author2doc, num_topics=2, passes=100)

def testTransform(self):
def test_transform(self):
passed = False
# sometimes, training gets stuck at a local minimum
# in that case try re-training the model from scratch, hoping for a
Expand Down Expand Up @@ -99,7 +99,7 @@ def testTransform(self):
)
self.assertTrue(passed)

def testBasic(self):
def test_basic(self):
# Check that training the model produces a positive topic vector for some author
# Otherwise, many of the other tests are invalid.

Expand All @@ -109,7 +109,7 @@ def testBasic(self):
jill_topics = matutils.sparse2full(jill_topics, model.num_topics)
self.assertTrue(all(jill_topics > 0))

def testEmptyDocument(self):
def test_empty_document(self):
local_texts = common_texts + [['only_occurs_once_in_corpus_and_alone_in_doc']]
dictionary = Dictionary(local_texts)
dictionary.filter_extremes(no_below=2)
Expand All @@ -119,7 +119,7 @@ def testEmptyDocument(self):

self.class_(corpus, author2doc=a2d, id2word=dictionary, num_topics=2)

def testAuthor2docMissing(self):
def test_author2doc_missing(self):
# Check that the results are the same if author2doc is constructed automatically from doc2author.
model = self.class_(
corpus, author2doc=author2doc, doc2author=doc2author,
Expand All @@ -137,7 +137,7 @@ def testAuthor2docMissing(self):
jill_topics2 = matutils.sparse2full(jill_topics2, model.num_topics)
self.assertTrue(np.allclose(jill_topics, jill_topics2))

def testDoc2authorMissing(self):
def test_doc2author_missing(self):
# Check that the results are the same if doc2author is constructed automatically from author2doc.
model = self.class_(
corpus, author2doc=author2doc, doc2author=doc2author,
Expand All @@ -155,7 +155,7 @@ def testDoc2authorMissing(self):
jill_topics2 = matutils.sparse2full(jill_topics2, model.num_topics)
self.assertTrue(np.allclose(jill_topics, jill_topics2))

def testUpdate(self):
def test_update(self):
# Check that calling update after the model already has been trained works.
model = self.class_(corpus, author2doc=author2doc, id2word=dictionary, num_topics=2)

Expand All @@ -169,7 +169,7 @@ def testUpdate(self):
# Did we learn something?
self.assertFalse(all(np.equal(jill_topics, jill_topics2)))

def testUpdateNewDataOldAuthor(self):
def test_update_new_data_old_author(self):
# Check that calling update with new documents and/or authors after the model already has
# been trained works.
# Test an author that already existed in the old dataset.
Expand All @@ -185,7 +185,7 @@ def testUpdateNewDataOldAuthor(self):
# Did we learn more about Jill?
self.assertFalse(all(np.equal(jill_topics, jill_topics2)))

def testUpdateNewDataNewAuthor(self):
def test_update_new_data_new_author(self):
# Check that calling update with new documents and/or authors after the model already has
# been trained works.
# Test a new author, that didn't exist in the old dataset.
Expand All @@ -198,7 +198,7 @@ def testUpdateNewDataNewAuthor(self):
sally_topics = matutils.sparse2full(sally_topics, model.num_topics)
self.assertTrue(all(sally_topics > 0))

def testSerialized(self):
def test_serialized(self):
# Test the model using serialized corpora. Basic tests, plus test of update functionality.

model = self.class_(
Expand Down Expand Up @@ -227,7 +227,7 @@ def testSerialized(self):
# Delete the MmCorpus used for serialization inside the author-topic model.
remove(datapath('testcorpus_serialization.mm'))

def testTransformSerialized(self):
def test_transform_serialized(self):
# Same as testTransform, using serialized corpora.
passed = False
# sometimes, training gets stuck at a local minimum
Expand Down Expand Up @@ -263,7 +263,7 @@ def testTransformSerialized(self):
)
self.assertTrue(passed)

def testAlphaAuto(self):
def test_alpha_auto(self):
model1 = self.class_(
corpus, author2doc=author2doc, id2word=dictionary,
alpha='symmetric', passes=10, num_topics=2
Expand All @@ -276,7 +276,7 @@ def testAlphaAuto(self):
# did we learn something?
self.assertFalse(all(np.equal(model1.alpha, modelauto.alpha)))

def testAlpha(self):
def test_alpha(self):
kwargs = dict(
author2doc=author2doc,
id2word=dictionary,
Expand Down Expand Up @@ -331,7 +331,7 @@ def testAlpha(self):
kwargs['alpha'] = "gensim is cool"
self.assertRaises(ValueError, self.class_, **kwargs)

def testEtaAuto(self):
def test_eta_auto(self):
model1 = self.class_(
corpus, author2doc=author2doc, id2word=dictionary,
eta='symmetric', passes=10, num_topics=2
Expand All @@ -344,7 +344,7 @@ def testEtaAuto(self):
# did we learn something?
self.assertFalse(all(np.equal(model1.eta, modelauto.eta)))

def testEta(self):
def test_eta(self):
kwargs = dict(
author2doc=author2doc,
id2word=dictionary,
Expand Down Expand Up @@ -405,7 +405,7 @@ def testEta(self):
kwargs['eta'] = "asymmetric"
self.assertRaises(ValueError, self.class_, **kwargs)

def testTopTopics(self):
def test_top_topics(self):
top_topics = self.model.top_topics(corpus)

for topic, score in top_topics:
Expand All @@ -416,14 +416,14 @@ def testTopTopics(self):
self.assertTrue(isinstance(k, str))
self.assertTrue(isinstance(v, float))

def testGetTopicTerms(self):
def test_get_topic_terms(self):
topic_terms = self.model.get_topic_terms(1)

for k, v in topic_terms:
self.assertTrue(isinstance(k, numbers.Integral))
self.assertTrue(isinstance(v, float))

def testGetAuthorTopics(self):
def test_get_author_topics(self):

model = self.class_(
corpus, author2doc=author2doc, id2word=dictionary, num_topics=2,
Expand All @@ -440,7 +440,7 @@ def testGetAuthorTopics(self):
self.assertTrue(isinstance(k, int))
self.assertTrue(isinstance(v, float))

def testTermTopics(self):
def test_term_topics(self):

model = self.class_(
corpus, author2doc=author2doc, id2word=dictionary, num_topics=2,
Expand All @@ -459,7 +459,7 @@ def testTermTopics(self):
self.assertTrue(isinstance(topic_no, int))
self.assertTrue(isinstance(probability, float))

def testNewAuthorTopics(self):
def test_new_author_topics(self):

model = self.class_(
corpus, author2doc=author2doc, id2word=dictionary, num_topics=2,
Expand Down Expand Up @@ -498,7 +498,7 @@ def testNewAuthorTopics(self):
self.assertEqual(id2author_len, len(model.id2author))
self.assertEqual(doc2author_len, len(model.doc2author))

def testPasses(self):
def test_passes(self):
# long message includes the original error message with a custom one
self.longMessage = True
# construct what we expect when passes aren't involved
Expand Down Expand Up @@ -526,7 +526,7 @@ def final_rhot(model):
self.assertEqual(model.state.numdocs, len(corpus) * len(test_rhots))
self.assertEqual(model.num_updates, len(corpus) * len(test_rhots))

def testPersistence(self):
def test_persistence(self):
fname = get_tmpfile('gensim_models_atmodel.tst')
model = self.model
model.save(fname)
Expand All @@ -535,7 +535,7 @@ def testPersistence(self):
self.assertTrue(np.allclose(model.expElogbeta, model2.expElogbeta))
self.assertTrue(np.allclose(model.state.gamma, model2.state.gamma))

def testPersistenceIgnore(self):
def test_persistence_ignore(self):
fname = get_tmpfile('gensim_models_atmodel_testPersistenceIgnore.tst')
model = atmodel.AuthorTopicModel(corpus, author2doc=author2doc, num_topics=2)
model.save(fname, ignore='id2word')
Expand All @@ -546,7 +546,7 @@ def testPersistenceIgnore(self):
model2 = atmodel.AuthorTopicModel.load(fname)
self.assertTrue(model2.id2word is None)

def testPersistenceCompressed(self):
def test_persistence_compressed(self):
fname = get_tmpfile('gensim_models_atmodel.tst.gz')
model = self.model
model.save(fname)
Expand All @@ -561,7 +561,7 @@ def testPersistenceCompressed(self):
jill_topics2 = matutils.sparse2full(jill_topics2, model.num_topics)
self.assertTrue(np.allclose(jill_topics, jill_topics2))

def testLargeMmap(self):
def test_large_mmap(self):
fname = get_tmpfile('gensim_models_atmodel.tst')
model = self.model

Expand All @@ -581,7 +581,7 @@ def testLargeMmap(self):
jill_topics2 = matutils.sparse2full(jill_topics2, model.num_topics)
self.assertTrue(np.allclose(jill_topics, jill_topics2))

def testLargeMmapCompressed(self):
def test_large_mmap_compressed(self):
fname = get_tmpfile('gensim_models_atmodel.tst.gz')
model = self.model

Expand All @@ -591,7 +591,7 @@ def testLargeMmapCompressed(self):
# test loading the large model arrays with mmap
self.assertRaises(IOError, self.class_.load, fname, mmap='r')

def testDtypeBackwardCompatibility(self):
def test_dtype_backward_compatibility(self):
atmodel_3_0_1_fname = datapath('atmodel_3_0_1_model')
expected_topics = [(0, 0.068200842977296727), (1, 0.93179915702270333)]

Expand Down
6 changes: 3 additions & 3 deletions gensim/test/test_big.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,23 +43,23 @@ def __iter__(self):
class TestLargeData(unittest.TestCase):
"""Try common operations, using large models. You'll need ~8GB RAM to run these tests"""

def testWord2Vec(self):
def test_word2vec(self):
corpus = BigCorpus(words_only=True, num_docs=100000, num_terms=3000000, doc_len=200)
tmpf = get_tmpfile('gensim_big.tst')
model = gensim.models.Word2Vec(corpus, vector_size=300, workers=4)
model.save(tmpf, ignore=['syn1'])
del model
gensim.models.Word2Vec.load(tmpf)

def testLsiModel(self):
def test_lsi_model(self):
corpus = BigCorpus(num_docs=50000)
tmpf = get_tmpfile('gensim_big.tst')
model = gensim.models.LsiModel(corpus, num_topics=500, id2word=corpus.dictionary)
model.save(tmpf)
del model
gensim.models.LsiModel.load(tmpf)

def testLdaModel(self):
def test_lda_model(self):
corpus = BigCorpus(num_docs=5000)
tmpf = get_tmpfile('gensim_big.tst')
model = gensim.models.LdaModel(corpus, num_topics=500, id2word=corpus.dictionary)
Expand Down
18 changes: 9 additions & 9 deletions gensim/test/test_corpora_dictionary.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,13 @@ class TestDictionary(unittest.TestCase):
def setUp(self):
self.texts = common_texts

def testDocFreqOneDoc(self):
def test_doc_freq_one_doc(self):
texts = [['human', 'interface', 'computer']]
d = Dictionary(texts)
expected = {0: 1, 1: 1, 2: 1}
self.assertEqual(d.dfs, expected)

def testDocFreqAndToken2IdForSeveralDocsWithOneWord(self):
def test_doc_freq_and_token2id_for_several_docs_with_one_word(self):
# two docs
texts = [['human'], ['human']]
d = Dictionary(texts)
Expand Down Expand Up @@ -60,7 +60,7 @@ def testDocFreqAndToken2IdForSeveralDocsWithOneWord(self):
expected = {'human': 0}
self.assertEqual(d.token2id, expected)

def testDocFreqForOneDocWithSeveralWord(self):
def test_doc_freq_for_one_doc_with_several_word(self):
# two words
texts = [['human', 'cat']]
d = Dictionary(texts)
Expand All @@ -73,7 +73,7 @@ def testDocFreqForOneDocWithSeveralWord(self):
expected = {0: 1, 1: 1, 2: 1}
self.assertEqual(d.dfs, expected)

def testDocFreqAndCollectionFreq(self):
def test_doc_freq_and_collection_freq(self):
# one doc
texts = [['human', 'human', 'human']]
d = Dictionary(texts)
Expand All @@ -92,7 +92,7 @@ def testDocFreqAndCollectionFreq(self):
self.assertEqual(d.cfs, {0: 3})
self.assertEqual(d.dfs, {0: 3})

def testBuild(self):
def test_build(self):
d = Dictionary(self.texts)

# Since we don't specify the order in which dictionaries are built,
Expand All @@ -110,15 +110,15 @@ def testBuild(self):
self.assertEqual(sorted(d.token2id.keys()), expected_keys)
self.assertEqual(sorted(d.token2id.values()), expected_values)

def testMerge(self):
def test_merge(self):
d = Dictionary(self.texts)
f = Dictionary(self.texts[:3])
g = Dictionary(self.texts[3:])

f.merge_with(g)
self.assertEqual(sorted(d.token2id.keys()), sorted(f.token2id.keys()))

def testFilter(self):
def test_filter(self):
d = Dictionary(self.texts)
d.filter_extremes(no_below=2, no_above=1.0, keep_n=4)
dfs_expected = {0: 3, 1: 3, 2: 3, 3: 3}
Expand Down Expand Up @@ -161,13 +161,13 @@ def testFilterKeepTokens_keepn(self):
expected = {'graph', 'trees', 'system', 'user', 'worda'}
self.assertEqual(set(d.token2id.keys()), expected)

def testFilterMostFrequent(self):
def test_filter_most_frequent(self):
d = Dictionary(self.texts)
d.filter_n_most_frequent(4)
expected = {0: 2, 1: 2, 2: 2, 3: 2, 4: 2, 5: 2, 6: 2, 7: 2}
self.assertEqual(d.dfs, expected)

def testFilterTokens(self):
def test_filter_tokens(self):
self.maxDiff = 10000
d = Dictionary(self.texts)

Expand Down
Loading

0 comments on commit 2d17ddf

Please sign in to comment.