{"id":5654,"date":"2024-09-06T21:01:01","date_gmt":"2024-09-06T13:01:01","guid":{"rendered":""},"modified":"2024-09-06T21:01:01","modified_gmt":"2024-09-06T13:01:01","slug":"word2vec\u8bcd\u8bed\u76f8\u4f3c\u5ea6_\u5982\u4f55\u8bad\u7ec3\u6a21\u578b","status":"publish","type":"post","link":"https:\/\/mushiming.com\/5654.html","title":{"rendered":"word2vec\u8bcd\u8bed\u76f8\u4f3c\u5ea6_\u5982\u4f55\u8bad\u7ec3\u6a21\u578b"},"content":{"rendered":"

\n <\/path> \n<\/svg> <\/p>\n

\u4e00\u3001\u9700\u6c42\u63cf\u8ff0<\/h3>\n

     \u4e1a\u52a1\u9700\u6c42\u7684\u76ee\u6807\u662f\u8bc6\u522b\u51fa\u76ee\u6807\u8bcd\u6c47\u7684\u540c\u4e49\u8bcd\u548c\u76f8\u5173\u8bcd\u6c47\uff0c\u5982\u4e0b\u4e3a\u90e8\u5206\u76ee\u6807\u8bcd\u6c47(\u4e3b\u8981\u7528\u4e8e\u533b\u7597\u95ee\u8bca)\uff1a<\/p>\n

\n

\u5c3f
\u75d8\u75d8
\u53d1\u51b7
\u547c\u5438\u56f0\u96be
\u6076\u5fc3<\/p>\n<\/blockquote>\n

\u6570\u636e\u6e90\u662f\u82e5\u5e72im\u6570\u636e\uff0c\u90a3\u4e48\u8fd9\u91cc\u6211\u4eec\u9009\u62e9google \u7684word2vec\u6a21\u578b\u6765\u8bad\u7ec3\u540c\u4e49\u8bcd\u548c\u76f8\u5173\u8bcd\u3002<\/p>\n

\u4e8c\u3001\u6570\u636e\u5904\u7406<\/h3>\n

    \u6570\u636e\u5904\u7406\u8003\u8651\u4ee5\u4e0b\u51e0\u4e2a\u65b9\u9762\uff1a
1. \u4ecehive\u4e2d\u5bfc\u51fa\u4e0d\u540c\u6570\u636e\u91cf\u7684\u6570\u636e
2. \u8fc7\u6ee4\u65e0\u7528\u7684\u8bad\u7ec3\u6837\u672c\uff08\u4f8b\u5982\u5b57\u6570\u5c11\u4e8e5\uff09
3. \u51c6\u5907\u81ea\u5b9a\u4e49\u7684\u8bcd\u6c47\u8868
4. \u51c6\u5907\u505c\u7528\u8bcd\u8868<\/p>\n

\u4e09\u3001\u5de5\u5177\u9009\u62e9<\/h3>\n

    \u9009\u62e9python \u7684gensim\u5e93\uff0c\u7531\u4e8e\u5148\u505a\u9884\u7814\uff0c\u6570\u636e\u91cf\u4e0d\u662f\u5f88\u5927,\u9009\u62e9\u5355\u673a\u5c31\u597d\uff0c\u6682\u65f6\u4e0d\u8003\u8651spark\u8bad\u7ec3\u3002\u540e\u7eed\u751f\u4ea7\u73af\u5883\u8ba1\u5212\u4e0aspark\u3002<\/p>\n

\n

\u8be6\u7ec6\u7684gensim\u4e2dword2vec\u6587\u6863<\/p>\n<\/blockquote>\n

\u4e0a\u8ff0\u6587\u6863\u6709\u5173\u5de5\u5177\u7684\u7528\u6cd5\u5df2\u7ecf\u5f88\u8be6\u7ec6\u4e86\uff0c\u5c31\u4e0d\u591a\u8bf4\u3002<\/p>\n

\u5206\u8bcd\u91c7\u7528jieba\u3002<\/p>\n

\u56db\u3001\u6a21\u578b\u8bad\u7ec3\u6b65\u9aa4\u7b80\u8ff0<\/h3>\n

1.\u5148\u505a\u5206\u8bcd\u3001\u53bb\u505c\u7528\u8bcd\u5904\u7406<\/p>\n

seg_word_line = jieba.cut(line, cut_all = True<\/span>)<\/code><\/pre>\n

2.\u5c06\u5206\u8bcd\u7684\u7ed3\u679c\u4f5c\u4e3a\u6a21\u578b\u7684\u8f93\u5165<\/p>\n

model = gensim.models.Word2Vec(LineSentence(source_separated_words_file), size=200<\/span>, window=5<\/span>, min_count=5<\/span>, alpha=0.02<\/span>, workers=4<\/span>)<\/code><\/pre>\n

3.\u4fdd\u5b58\u6a21\u578b\uff0c\u65b9\u4fbf\u4ee5\u540e\u8c03\u7528\uff0c\u83b7\u5f97\u76ee\u6807\u8bcd\u7684\u540c\u4e49\u8bcd<\/p>\n

similary_words = model.most_similar(w, topn=10<\/span>)<\/code><\/pre>\n

\u4e94\u3001\u91cd\u8981\u8c03\u53c2\u76ee\u6807<\/h3>\n

     \u6bd4\u8f83\u91cd\u8981\u7684\u53c2\u6570\uff1a
1. \u8bad\u7ec3\u6570\u636e\u7684\u5927\u5c0f\uff0c\u5f53\u521d\u53ea\u7528\u4e8610\u4e07\u6570\u636e\uff0c\u8bad\u7ec3\u51fa\u6765\u7684\u6a21\u578b\u5f88\u4e0d\u597d\uff0c\u540e\u8fb9\u4e0d\u65ad\u5730\u5c06\u8bad\u7ec3\u8bed\u6599\u589e\u52a0\u5230800\u4e07\uff0c\u6548\u679c\u5f97\u5230\u4e86\u660e\u663e\u7684\u63d0\u5347
2. \u5411\u91cf\u7684\u7ef4\u5ea6\uff0c\u8fd9\u662f\u8bcd\u6c47\u5411\u91cf\u7684\u7ef4\u6570\uff0c\u8fd9\u4e2a\u4f1a\u5f71\u54cd\u5230\u8ba1\u7b97\uff0c\u7406\u8bba\u4e0a\u6765\u8bf4\u7ef4\u6570\u5927\u4e00\u70b9\u4f1a\u597d\u3002
3. \u5b66\u4e60\u901f\u7387
4. \u7a97\u53e3\u5927\u5c0f<\/p>\n

\u5728\u8c03\u53c2\u4e0a\uff0c\u5e76\u6ca1\u6709\u82b1\u592a\u591a\u7cbe\u529b\uff0c\u56e0\u4e3a\u76ee\u6d4b\u7ed3\u679c\u8fd8\u597d\uff0c\u5230\u65f6\u4e0a\u7ebf\u4f7f\u7528\u524d\u518d\u4ed4\u7ec6\u8c03\u6574\u3002<\/p>\n

\u516d\u3001\u6a21\u578b\u7684\u5b9e\u9645\u6548\u679c<\/h3>\n\n\n\n\n\n\n\n\n\n
\u76ee\u6807\u8bcd<\/th>\n\u540c\u4e49\u8bcd\u76f8\u5173\u8bcd<\/th>\n<\/tr>\n<\/thead>\n
\u5c3f<\/td>\n\u5c3f\u6db2,\u6492\u5c3f,\u5c3f\u6025,\u5c3f\u5c3f\u6709,\u5c3f\u5230,\u5185\u88e4,\u5c3f\u610f,\u5c0f\u89e3,\u524d\u5217\u817a\u708e,\u5c0f\u4fbf<\/td>\n<\/tr>\n
\u75d8\u75d8<\/td>\n\u9017\u9017,\u8c46\u8c46,\u75d8\u5b50,\u5c0f\u75d8,\u9752\u6625\u75d8,\u7ea2\u75d8,\u957f\u75d8\u75d8,\u7c89\u523a,\u8bbd\u523a,\u767d\u5934<\/td>\n<\/tr>\n
\u53d1\u51b7<\/td>\n\u53d1\u70eb,\u6ca1\u529b,\u5ffd\u51b7\u5ffd\u70ed,\u65f6\u51b7\u65f6\u70ed,\u5c0f\u67f4\u80e1,\u5934\u660f,\u55dc\u7761,38.9,\u5934\u6655,\u53d1\u5bd2<\/td>\n<\/tr>\n
\u547c\u5438\u56f0\u96be<\/td>\n\u6c14\u6765,\u6c14\u7d27,\u7a92\u606f,\u5927\u6c14,\u900f\u4e0d\u8fc7\u6c14,\u51fa\u4e0d\u4e0a,\u6fd2\u6b7b,\u7c97\u6c14,\u538b\u6c14,\u5fc3\u5f8b\u4e0d\u9f50<\/td>\n<\/tr>\n
\u6076\u5fc3<\/td>\n\u95f7,\u529b\u6c14,\u5455\u5fc3,\u80c0\u6c14,\u6da8,\u4e0d\u597d\u53d7,\u4e0d\u8fdb,\u6655\u8f66,\u95f7\u95f7,\u7cbe\u795e<\/td>\n<\/tr>\n<\/tbody>\n<\/table>\n

\u4e03\u3001\u53ef\u4ee5\u8dd1\u7684CODE<\/h3>\n
import<\/span> codecs import<\/span> jieba import<\/span> gensim from<\/span> gensim.models.word2vec import<\/span> LineSentence def<\/span> read_source_file<\/span>(source_file_name)<\/span>:<\/span> try<\/span>: file_reader = codecs.open(source_file_name, 'r'<\/span>, 'utf-8'<\/span>,errors=\"ignore\"<\/span>) lines = file_reader.readlines() print(\"Read complete!\"<\/span>) file_reader.close() return<\/span> lines except<\/span>: print(\"There are some errors while reading.\"<\/span>) def<\/span> write_file<\/span>(target_file_name, content)<\/span>:<\/span> file_write = codecs.open(target_file_name, 'w+'<\/span>, 'utf-8'<\/span>) file_write.writelines(content) print(\"Write sussfully!\"<\/span>) file_write.close() def<\/span> separate_word<\/span>(filename,user_dic_file, separated_file)<\/span>:<\/span> print(\"separate_word\"<\/span>) lines = read_source_file(filename) #jieba.load_userdict(user_dic_file)<\/span> stopkey=[line.strip() for<\/span> line in<\/span> codecs.open('stopword_zh.txt'<\/span>,'r'<\/span>,'utf-8'<\/span>).readlines()] output = codecs.open(separated_file, 'w'<\/span>, 'utf-8'<\/span>) num = 0<\/span> for<\/span> line in<\/span> lines: num = num + 1<\/span> if<\/span> num% 10000<\/span> == 0<\/span>: print(\"Processing line number: \"<\/span> + str(num)) seg_word_line = jieba.cut(line, cut_all = True<\/span>) wordls = list(set(seg_word_line)-set(stopkey)) if<\/span> len(wordls)>0<\/span>: word_line = ' '<\/span>.join(wordls) + '\\n'<\/span> output.write(word_line) output.close() return<\/span> separated_file def<\/span> build_model<\/span>(source_separated_words_file,model_path)<\/span>:<\/span> print(\"start building...\"<\/span>,source_separated_words_file) model = gensim.models.Word2Vec(LineSentence(source_separated_words_file), size=200<\/span>, window=5<\/span>, min_count=5<\/span>, alpha=0.02<\/span>, workers=4<\/span>) model.save(model_path) print(\"build successful!\"<\/span>, model_path) return<\/span> model def<\/span> get_similar_words_str<\/span>(w, model, topn = 10<\/span>)<\/span>:<\/span> result_words = get_similar_words_list(w, model) return<\/span> str(result_words) def<\/span> get_similar_words_list<\/span>(w, model, topn = 10<\/span>)<\/span>:<\/span> result_words = [] try<\/span>: similary_words = model.most_similar(w, topn=10<\/span>) print(similary_words) for<\/span> (word, similarity) in<\/span> similary_words: result_words.append(word) print(result_words) except<\/span>: print(\"There are some errors!\"<\/span> + w) return<\/span> result_words def<\/span> load_models<\/span>(model_path)<\/span>:<\/span> return<\/span> gensim.models.Word2Vec.load(model_path) if<\/span> \"__name__ == __main__()\"<\/span>: filename = \"d:\\\\data\\\\dk_mainsuit_800w.txt\"<\/span> #source file<\/span> user_dic_file = \"new_dict.txt\"<\/span> # user dic file<\/span> separated_file = \"d:\\\\data\\\\dk_spe_file_.txt\"<\/span> # separeted words file<\/span> model_path = \"information_model0830\"<\/span> # model file<\/span> #source_separated_words_file = separate_word(filename, user_dic_file, separated_file)<\/span> source_separated_words_file = separated_file # if separated word file exist, don't separate_word again<\/span> build_model(source_separated_words_file, model_path)# if model file is exist, don't buile modl <\/span> model = load_models(model_path) words = get_similar_words_str('\u5934\u75db'<\/span>, model) print(words) <\/code><\/pre>\n","protected":false},"excerpt":{"rendered":"word2vec\u8bcd\u8bed\u76f8\u4f3c\u5ea6_\u5982\u4f55\u8bad\u7ec3\u6a21\u578b\u4e00\u3001\u9700\u6c42\u63cf\u8ff0\u4e1a\u52a1\u9700\u6c42\u7684\u76ee\u6807\u662f\u8bc6\u522b\u51fa\u76ee\u6807\u8bcd\u6c47\u7684\u540c\u4e49\u8bcd\u548c\u76f8\u5173\u8bcd\u6c47\uff0c\u5982\u4e0b\u4e3a\u90e8\u5206\u76ee\u6807\u8bcd\u6c47(\u4e3b\u8981...","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[],"tags":[],"_links":{"self":[{"href":"https:\/\/mushiming.com\/wp-json\/wp\/v2\/posts\/5654"}],"collection":[{"href":"https:\/\/mushiming.com\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/mushiming.com\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/mushiming.com\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/mushiming.com\/wp-json\/wp\/v2\/comments?post=5654"}],"version-history":[{"count":0,"href":"https:\/\/mushiming.com\/wp-json\/wp\/v2\/posts\/5654\/revisions"}],"wp:attachment":[{"href":"https:\/\/mushiming.com\/wp-json\/wp\/v2\/media?parent=5654"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/mushiming.com\/wp-json\/wp\/v2\/categories?post=5654"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/mushiming.com\/wp-json\/wp\/v2\/tags?post=5654"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}