SYMBOL INDEX (207 symbols across 32 files) FILE: classEval.py function getScores (line 5) | def getScores( true_classes, pred_classes, average): FILE: corpusLoader.py function extractSentenceWords (line 10) | def extractSentenceWords(doc, remove_url=True, remove_punc="utf-8", min_... function load_20news (line 51) | def load_20news(setName): function load_reuters (line 98) | def load_reuters(setName): FILE: csv2topic.py function usage (line 45) | def usage(): function getOptions (line 60) | def getOptions(): function main (line 108) | def main(): FILE: file2topic.py function usage (line 44) | def usage(): function getOptions (line 59) | def getOptions(): function main (line 106) | def main(): FILE: psdvec/analogy.py function pred_ana (line 5) | def pred_ana( model, a, a2, b, maxcands = 10 ): FILE: psdvec/benchspeed.py class Timer (line 4) | class Timer(object): method __init__ (line 5) | def __init__(self, name=None): method getElapseTime (line 11) | def getElapseTime(self, isStr=True): method printElapseTime (line 32) | def printElapseTime(self): function timeToStr (line 35) | def timeToStr(timeNum, fmt="%H:%M:%S"): function block_factorize (line 39) | def block_factorize( core_size, noncore_size, N0, tikhonovCoeff ): FILE: psdvec/corpus2liblinear.py function usage (line 11) | def usage(): function parseConfigFile (line 25) | def parseConfigFile(configFilename): function getFileFeatures (line 34) | def getFileFeatures(filename, V, word2id, sentword2id, remove_stop=False): function processDir (line 86) | def processDir( outFilename, docDir, label, appendToOutput, V, word2ID, ... function main (line 123) | def main(): FILE: psdvec/evaluate-toefl.py function loadToeflTestset (line 12) | def loadToeflTestset(toeflTestsetFilename): function usage (line 31) | def usage(): FILE: psdvec/evaluate.py function usage (line 45) | def usage(): FILE: psdvec/factorize.py function uniwe_factorize (line 16) | def uniwe_factorize(G, u, N0, MAXITERS=0, tikhonovCoeff=0, testenv=None): function nowe_factorize (line 86) | def nowe_factorize(G, N, tikhonovCoeff=0): function we_factorize_GD (line 144) | def we_factorize_GD(G, Weight, N0, MAXITERS=5000, tikhonovCoeff=0, teste... function we_factorize_EM (line 239) | def we_factorize_EM(G, Weight, N0, MAXITERS=5, tikhonovCoeff=0, testenv=... function we_factorize_FW (line 303) | def we_factorize_FW(G, Weight, N0, MAXITERS=6, tikhonovCoeff=0, testenv=... function normalizeWeight (line 490) | def normalizeWeight( RawCounts, do_weight_cutoff, cutQuantile=0.0002, ze... function block_factorize (line 529) | def block_factorize( G, F, V1, N0, tikhonovCoeff, do_weight_cutoff ): function factorize (line 591) | def factorize( alg, algName, G, Weight, N0, MAX_ITERS, tikhonovCoeff, vo... function usage (line 606) | def usage(): function main (line 629) | def main(): FILE: psdvec/patch to gensim.py function get_texts (line 5) | def get_texts(self): function tokenize (line 15) | def tokenize(content): function get_texts (line 19) | def get_texts(self): function tokenize (line 31) | def tokenize(text, lowercase=False, deacc=False, errors="strict", to_low... FILE: psdvec/perlxs.h function XS_pack_charPtrPtr (line 58) | void XS_pack_charPtrPtr( SV * st, char ** s ) function XS_release_charPtrPtr (line 76) | void XS_release_charPtrPtr(char** s) FILE: psdvec/topwordsInList.py function usage (line 10) | def usage(): function parseConfigFile (line 21) | def parseConfigFile(configFilename): function getListWordCount (line 30) | def getListWordCount( docPath, word2freq ): function processDir (line 45) | def processDir( docDir, word2freq ): function main (line 65) | def main(): FILE: psdvec/utils.py function str2dict (line 26) | def str2dict(s): function initConsoleLogger (line 53) | def initConsoleLogger(loggerName): function initFileLogger (line 59) | def initFileLogger(loggerName, isAppending=False): function warning (line 78) | def warning(*objs): class Timer (line 81) | class Timer(object): method __init__ (line 82) | def __init__(self, name=None): method getElapseTime (line 88) | def getElapseTime(self, isStr=True): method printElapseTime (line 109) | def printElapseTime(self): function timeToStr (line 112) | def timeToStr(timeNum, fmt="%H:%M:%S"): function norm1 (line 117) | def norm1(M, Weight=None): function normF (line 140) | def normF(M, Weight=None): function normalize (line 167) | def normalize(data, axis=1): function normalizeF (line 182) | def normalizeF(data, axis=1): function cosine (line 203) | def cosine(x, y): function matSizes (line 209) | def matSizes( norm, Ms, Weight=None ): function sym (line 216) | def sym(M): function skew (line 219) | def skew(M): function getQuantileCut (line 227) | def getQuantileCut(A, fraction): function power_iter (line 247) | def power_iter(M): function lowrank_fact (line 277) | def lowrank_fact(VV, N0): function save_embeddings (line 289) | def save_embeddings( filename, vocab, V, matrixName ): function save_matrix_as_text (line 307) | def save_matrix_as_text( filename, rowTypeName, T, *extraCols, **kwargs ): function load_matrix_from_text (line 333) | def load_matrix_from_text( filename, rowTypeName, colSep=" " ): function load_embeddings (line 401) | def load_embeddings( filename, maxWordCount=-1, extraWords={}, record_sk... function load_embeddings_bin (line 498) | def load_embeddings_bin( filename, maxWordCount=-1, extraWords={}, recor... function load_embeddings_hyper (line 587) | def load_embeddings_hyper(modelPath, vecType): function load_residuals (line 609) | def load_residuals( filename, word2id={}, maxRowCount=-1, maxColCount=-1 ): function loadBigramFile (line 679) | def loadBigramFile( bigram_filename, topWordNum, extraWords, kappa=0.01 ): function loadBigramFileInBlock (line 894) | def loadBigramFileInBlock( bigram_filename, core_size, noncore_size=-1, ... function loadUnigramFile (line 1368) | def loadUnigramFile(filename): function loadExtraWordFile (line 1384) | def loadExtraWordFile(filename): function loadSimTestset (line 1396) | def loadSimTestset(path, extraArgs=None): function loadAnaTestset (line 1405) | def loadAnaTestset(path, extraArgs=None): function loadTestsets (line 1430) | def loadTestsets(loader, testsetDir, testsetNames, extraArgs=None): function predict_ana (line 1458) | def predict_ana( model, a, a2, b, realb2 ): function evaluate_sim (line 1522) | def evaluate_sim(model, testsets, testsetNames, getAbsentWords=False, vo... function evaluate_ana (line 1572) | def evaluate_ana(model, testsets, testsetNames, getAbsentWords=False, vo... function bench (line 1648) | def bench(func, N, topEigenNum=0): function isMemEnoughGramian (line 1665) | def isMemEnoughGramian(D, extraVarsRatio=0): function isMemEnoughEigen (line 1692) | def isMemEnoughEigen(D, extraVarsRatio=5): function extractSentenceWords (line 1711) | def extractSentenceWords(doc, remove_url=True, remove_punc="utf-8", min_... function randomsample (line 1752) | def randomsample( X, n ): function relu (line 1759) | def relu(v, bias): function maxpool (line 1764) | def maxpool(vs): function avgpool (line 1771) | def avgpool(vs): class VecModel (line 1777) | class VecModel: method __init__ (line 1778) | def __init__(self, V, vocab, word2id, vecNormalize=True, precompute_gr... method __contains__ (line 1799) | def __contains__(self, w): method __getitem__ (line 1802) | def __getitem__(self, w): method orig (line 1811) | def orig(self, w): method precomputeGramian (line 1817) | def precomputeGramian(self): method similarity (line 1822) | def similarity(self, x, y): method sim_row (line 1842) | def sim_row(self, x): method most_similar (line 1859) | def most_similar(self, vx, top_num=1): FILE: psdvec/vecnorms.py function usage (line 12) | def usage(): function expectation (line 15) | def expectation(value_probs): function var_div (line 24) | def var_div(value_probs): FILE: snippet2topic.py function usage (line 44) | def usage(): function getOptions (line 59) | def getOptions(): function main (line 107) | def main(): FILE: topic-competitors/LDA/classEval.py function getScores (line 6) | def getScores( true_classes, pred_classes, average): FILE: topic-competitors/LDA/corpusLoader.py function extractSentenceWords (line 17) | def extractSentenceWords(doc, remove_url=True, remove_punc="utf-8", min_... function load_20news (line 62) | def load_20news(setName): function load_reuters (line 109) | def load_reuters(setName): FILE: topic-competitors/LDA/ldaExp.py function usage (line 14) | def usage(): FILE: topic-competitors/kmeans.py function kmeans (line 18) | def kmeans( X, centres, delta=.001, maxiter=10, metric="euclidean", p=2,... function randomsample (line 72) | def randomsample( X, n ): FILE: topic-competitors/labelEval.py function getScores (line 4) | def getScores( true_classes, pred_classes, average): FILE: topic-competitors/liu-doc2vec.py function genDocEmbedding (line 4) | def genDocEmbedding( setName, words_file, topics_file, label_file, V, wo... FILE: topic-competitors/slda/corpus.h function class (line 29) | class document function class (line 67) | class corpus FILE: topic-competitors/slda/main.cpp function help (line 28) | void help( void ) { function main (line 33) | int main(int argc, char* argv[]) FILE: topic-competitors/slda/opt.cpp function softmax_f (line 30) | double softmax_f(const gsl_vector * x, void * opt_param) function softmax_df (line 85) | void softmax_df(const gsl_vector * x, void * opt_param, gsl_vector * df) function softmax_fdf (line 159) | void softmax_fdf(const gsl_vector * x, void * opt_param, double * f, gsl... FILE: topic-competitors/slda/opt.h type opt_parameter (line 31) | struct opt_parameter FILE: topic-competitors/slda/slda.cpp function suffstats (line 214) | suffstats * slda::new_suffstats(int num_docs) FILE: topic-competitors/slda/slda.h type z_stat (line 27) | typedef struct { type suffstats (line 32) | typedef struct { function class (line 41) | class slda FILE: topic-competitors/slda/utils.cpp function log_sum (line 8) | double log_sum(double log_a, double log_b) function trigamma (line 29) | double trigamma(double x) function digamma (line 51) | double digamma(double x) function make_directory (line 114) | void make_directory(char* name) function argmax (line 125) | int argmax(double* x, int n) function map_idx (line 148) | int map_idx(int row, int col, int dim) FILE: topicExp.py function usage (line 49) | def usage(): FILE: topicvecDir.py class topicvecDir (line 23) | class topicvecDir: method __init__ (line 24) | def __init__(self, **kwargs): method setK (line 167) | def setK(self, K): method precompute (line 177) | def precompute(self): method calcEm (line 192) | def calcEm(self, docs_Pi): method calcLoglikelihood (line 199) | def calcLoglikelihood(self): method updateTheta (line 220) | def updateTheta(self): method updatePi (line 224) | def updatePi(self, docs_theta): method calcTopicResiduals (line 256) | def calcTopicResiduals(self, T): method updateTopicEmbeddings (line 269) | def updateTopicEmbeddings(self): method calcSum_pi_v (line 326) | def calcSum_pi_v(self): method genOutputter (line 349) | def genOutputter(self, screenVerboseThres=1): method genProgressor (line 356) | def genProgressor(self): method printTopWordsInTopics (line 366) | def printTopWordsInTopics( self, docs_theta, outputToScreen=False ): method docSentences2wids (line 486) | def docSentences2wids( self, docs_wordsInSentences ): method setDocs (line 537) | def setDocs( self, docs_wordsInSentences, docs_name ): method kmeans (line 579) | def kmeans( self, maxiter=10 ): method inferTopicProps (line 647) | def inferTopicProps( self, T, MAX_ITERS=5 ): method inference (line 683) | def inference(self): FILE: utils.py function str2dict (line 26) | def str2dict(s): function initConsoleLogger (line 53) | def initConsoleLogger(loggerName): function initFileLogger (line 59) | def initFileLogger(loggerName, isAppending=False): function warning (line 78) | def warning(*objs): class Timer (line 81) | class Timer(object): method __init__ (line 82) | def __init__(self, name=None): method getElapseTime (line 88) | def getElapseTime(self, isStr=True): method printElapseTime (line 109) | def printElapseTime(self): function timeToStr (line 112) | def timeToStr(timeNum, fmt="%H:%M:%S"): function norm1 (line 117) | def norm1(M, Weight=None): function normF (line 140) | def normF(M, Weight=None): function normalize (line 167) | def normalize(data, axis=1): function normalizeF (line 182) | def normalizeF(data, axis=1): function cosine (line 203) | def cosine(x, y): function matSizes (line 209) | def matSizes( norm, Ms, Weight=None ): function sym (line 216) | def sym(M): function skew (line 219) | def skew(M): function getQuantileCut (line 227) | def getQuantileCut(A, fraction): function power_iter (line 247) | def power_iter(M): function lowrank_fact (line 277) | def lowrank_fact(VV, N0): function save_embeddings (line 289) | def save_embeddings( filename, vocab, V, matrixName ): function save_matrix_as_text (line 307) | def save_matrix_as_text( filename, rowTypeName, T, *extraCols, **kwargs ): function load_matrix_from_text (line 333) | def load_matrix_from_text( filename, rowTypeName, colSep=" " ): function load_embeddings (line 401) | def load_embeddings( filename, maxWordCount=-1, extraWords={}, record_sk... function load_embeddings_bin (line 498) | def load_embeddings_bin( filename, maxWordCount=-1, extraWords={}, recor... function load_embeddings_hyper (line 587) | def load_embeddings_hyper(modelPath, vecType): function load_residuals (line 609) | def load_residuals( filename, word2id={}, maxRowCount=-1, maxColCount=-1 ): function loadBigramFile (line 679) | def loadBigramFile( bigram_filename, topWordNum, extraWords, kappa=0.01 ): function loadBigramFileInBlock (line 894) | def loadBigramFileInBlock( bigram_filename, core_size, noncore_size=-1, ... function loadUnigramFile (line 1368) | def loadUnigramFile(filename): function loadExtraWordFile (line 1384) | def loadExtraWordFile(filename): function loadSimTestset (line 1396) | def loadSimTestset(path, extraArgs=None): function loadAnaTestset (line 1405) | def loadAnaTestset(path, extraArgs=None): function loadTestsets (line 1430) | def loadTestsets(loader, testsetDir, testsetNames, extraArgs=None): function predict_ana (line 1458) | def predict_ana( model, a, a2, b, realb2 ): function evaluate_sim (line 1522) | def evaluate_sim(model, testsets, testsetNames, getAbsentWords=False, vo... function evaluate_ana (line 1572) | def evaluate_ana(model, testsets, testsetNames, getAbsentWords=False, vo... function bench (line 1648) | def bench(func, N, topEigenNum=0): function isMemEnoughGramian (line 1665) | def isMemEnoughGramian(D, extraVarsRatio=0): function isMemEnoughEigen (line 1692) | def isMemEnoughEigen(D, extraVarsRatio=5): function extractSentenceWords (line 1711) | def extractSentenceWords(doc, remove_url=True, remove_punc="utf-8", min_... function randomsample (line 1752) | def randomsample( X, n ): function relu (line 1759) | def relu(v, bias): function maxpool (line 1764) | def maxpool(vs): function avgpool (line 1771) | def avgpool(vs): class VecModel (line 1777) | class VecModel: method __init__ (line 1778) | def __init__(self, V, vocab, word2id, vecNormalize=True, precompute_gr... method __contains__ (line 1799) | def __contains__(self, w): method __getitem__ (line 1802) | def __getitem__(self, w): method orig (line 1811) | def orig(self, w): method precomputeGramian (line 1817) | def precomputeGramian(self): method similarity (line 1822) | def similarity(self, x, y): method sim_row (line 1842) | def sim_row(self, x): method most_similar (line 1859) | def most_similar(self, vx, top_num=1):