抽取文档主题之sklearn实现
示例代码:
import pandas as pd
raw = pd.read_table('../data/金庸-射雕英雄传txt精校版.txt', names=['txt'], encoding="GBK")
# 章节判断用变量预处理
def m_head(tmpstr):
return tmpstr[:1]
def m_mid(tmpstr):
return tmpstr.find("回 ")
raw['head'] = raw.txt.apply(m_head)
raw['mid'] = raw.txt.apply(m_mid)
raw['len'] = raw.txt.apply(len)
# 章节判断
chapnum = 0
for i in range(len(raw)):
if raw['head'][i] == "第" and raw['mid'][i] > 0 and raw['len'][i] < 30:
chapnum += 1
if chapnum >= 40 and raw['txt'][i] == "附录一:成吉思汗家族":
chapnum = 0
raw.loc[i, 'chap'] = chapnum
# 删除临时变量
del raw['head']
del raw['mid']
del raw['len']
rawgrp = raw.groupby('chap')
chapter = rawgrp.agg(sum) # 只有字符串的情况下,sum函数自动转为合并字符串
chapter = chapter[chapter.index != 0]
chapter
# 设定分词及请理停用词函数
stop_list = list(pd.read_csv('../data/停用词.txt', names=['w'], sep='aaa', encoding='utf-8').w)
stop_list
import jieba
def m_cut(intxt):
return [w for w in jieba.cut(intxt) if w not in stop_list and len(w) > 1]
# 生成分词请理后章节文本
clean_chap = [' '.join(m_cut(w)) for w in chapter.txt]
print(len(clean_chap))
clean_chap
# 将文本中的词语转换为词频矩阵
from sklearn.feature_extraction.text import CountVectorizer
countvec = CountVectorizer(min_df=5)
wordmtx = countvec.fit_transform(clean_chap)
wordmtx
# 基于词频矩阵X计算TF-Idf值
from sklearn.feature_extraction.text import TfidfTransformer
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(wordmtx)
tfidf
# 设定LDA模型
from sklearn.decomposition import LatentDirichletAllocation
n_topics = 10
ldamodel = LatentDirichletAllocation(n_components=n_topics)
ldamodel
# 拟合LDA模型
ldamodel.fit(tfidf)
# 拟合后模型的实质
print(ldamodel.components_.shape)
# 查看前两个矩阵内容
ldamodel.components_[:2]
# 主题词打印函数
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print('Topic #%d:' % topic_idx)
print(' '.join([feature_names[i] for i in topic.argsort()[: - n_top_words - 1: -1]]))
print()
n_top_words = 12
tf_feature_names = countvec.get_feature_names()
print_top_words(ldamodel, tf_feature_names, n_top_words)