forked from weilinear/PyRPCA
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtopic_extraction.py
More file actions
45 lines (37 loc) · 1.67 KB
/
topic_extraction.py
File metadata and controls
45 lines (37 loc) · 1.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
from time import time
from sklearn.feature_extraction import text
from sklearn import decomposition
from sklearn import datasets
from robustpca import *
n_samples = 5000
n_features = 2000
n_topics = 10
n_top_words = 5
# Load the 20 newsgroups dataset and vectorize it using the most common word
# frequency with TF-IDF weighting (without top 5% stop words)
t0 = time()
print("Loading dataset and extracting TF-IDF features...")
dataset = datasets.fetch_20newsgroups(shuffle=True, random_state=1)
vectorizer = text.CountVectorizer(max_df=0.95, max_features=n_features)
counts = vectorizer.fit_transform(dataset.data[:n_samples])
tfidf = text.TfidfTransformer().fit_transform(counts)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model on with n_samples=%d and n_features=%d..."
% (n_samples, n_features))
# import pdb; pdb.set_trace()
A, E = augmented_largrange_multiplier(np.array(tfidf.todense().T), lmbda=.1, maxiter=20, inexact=True) # decomposition.NMF(n_components=n_topics).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
# Inverse the vectorizer vocabulary to be able
feature_names = vectorizer.get_feature_names()
original_text = open("original.txt", "w")
subtract_text = open("keywords.txt", "w")
for topic_idx, topic in enumerate(np.abs(E.T)):
print("Topic #%d:" % topic_idx)
subtract_text.write(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1] if topic[i] != 0]))
original_text.write(" ".join([feature_names[i]
for i in xrange(n_features) if tfidf[topic_idx, i] != 0]))
subtract_text.write("\n")
original_text.write("\n")
print()