您好,登錄后才能下訂單哦!
本文實(shí)例為大家分享了python下載微信公眾號(hào)相關(guān)文章的具體代碼,供大家參考,具體內(nèi)容如下
目的:從零開始學(xué)自動(dòng)化測試公眾號(hào)中下載“pytest"一系列文檔
1、搜索微信號(hào)文章關(guān)鍵字搜索
2、對(duì)搜索結(jié)果前N頁進(jìn)行解析,獲取文章標(biāo)題和對(duì)應(yīng)URL
主要使用的是requests和bs4中的Beautifulsoup
Weixin.py
import requests from urllib.parse import quote from bs4 import BeautifulSoup import re from WeixinSpider.HTML2doc import MyHTMLParser class WeixinSpider(object): def __init__(self, gzh_name, pageno,keyword): self.GZH_Name = gzh_name self.pageno = pageno self.keyword = keyword.lower() self.page_url = [] self.article_list = [] self.headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'} self.timeout = 5 # [...] 用來表示一組字符,單獨(dú)列出:[amk] 匹配 'a','m'或'k' # re+ 匹配1個(gè)或多個(gè)的表達(dá)式。 self.pattern = r'[\\/:*?"<>|\r\n]+' def get_page_url(self): for i in range(1,self.pageno+1): # https://weixin.sogou.com/weixin?query=從零開始學(xué)自動(dòng)化測試&_sug_type_=&s_from=input&_sug_=n&type=2&page=2&ie=utf8 url = "https://weixin.sogou.com/weixin?query=%s&_sug_type_=&s_from=input&_sug_=n&type=2&page=%s&ie=utf8" \ % (quote(self.GZH_Name),i) self.page_url.append(url) def get_article_url(self): article = {} for url in self.page_url: response = requests.get(url,headers=self.headers,timeout=self.timeout) result = BeautifulSoup(response.text, 'html.parser') articles = result.select('ul[class="news-list"] > li > div[class="txt-box"] > h4 > a ') for a in articles: # print(a.text) # print(a["href"]) if self.keyword in a.text.lower(): new_text=re.sub(self.pattern,"",a.text) article[new_text] = a["href"] self.article_list.append(article) headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'} timeout = 5 gzh_name = 'pytest文檔' My_GZH = WeixinSpider(gzh_name,5,'pytest') My_GZH.get_page_url() # print(My_GZH.page_url) My_GZH.get_article_url() # print(My_GZH.article_list) for article in My_GZH.article_list: for (key,value) in article.items(): url=value html_response = requests.get(url,headers=headers,timeout=timeout) myHTMLParser = MyHTMLParser(key) myHTMLParser.feed(html_response.text) myHTMLParser.doc.save(myHTMLParser.docfile)
HTML2doc.py
from html.parser import HTMLParser import requests from docx import Document import re from docx.shared import RGBColor import docx class MyHTMLParser(HTMLParser): def __init__(self,docname): HTMLParser.__init__(self) self.docname=docname self.docfile = r"D:\pytest\%s.doc"%self.docname self.doc=Document() self.title = False self.code = False self.text='' self.processing =None self.codeprocessing =None self.picindex = 1 self.headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'} self.timeout = 5 def handle_startendtag(self, tag, attrs): # 圖片的處理比較復(fù)雜,首先需要找到對(duì)應(yīng)的圖片的url,然后下載并寫入doc中 if tag == "img": if len(attrs) == 0: pass else: for (variable, value) in attrs: if variable == "data-type": picname = r"D:\pytest\%s%s.%s" % (self.docname, self.picindex, value) # print(picname) if variable == "data-src": picdata = requests.get(value, headers=self.headers, timeout=self.timeout) # print(value) self.picindex = self.picindex + 1 # print(self.picindex) with open(picname, "wb") as pic: pic.write(picdata.content) try: self.doc.add_picture(picname) except docx.image.exceptions.UnexpectedEndOfFileError as e: print(e) def handle_starttag(self, tag, attrs): if re.match(r"h(\d)", tag): self.title = True if tag =="p": self.processing = tag if tag == "code": self.code = True self.codeprocessing = tag def handle_data(self, data): if self.title == True: self.doc.add_heading(data, level=2) # if self.in_div == True and self.tag == "p": if self.processing: self.text = self.text + data if self.code == True: p =self.doc.add_paragraph() run=p.add_run(data) run.font.color.rgb = RGBColor(111,111,111) def handle_endtag(self, tag): self.title = False # self.code = False if tag == self.processing: self.doc.add_paragraph(self.text) self.processing = None self.text='' if tag == self.codeprocessing: self.code =False
運(yùn)行結(jié)果:
缺少部分文檔,如pytest文檔4,是因?yàn)樗压肺⑿盼恼滤阉鹘Y(jié)果中就沒有
以上就是本文的全部內(nèi)容,希望對(duì)大家的學(xué)習(xí)有所幫助,也希望大家多多支持億速云。
免責(zé)聲明:本站發(fā)布的內(nèi)容(圖片、視頻和文字)以原創(chuàng)、轉(zhuǎn)載和分享為主,文章觀點(diǎn)不代表本網(wǎng)站立場,如果涉及侵權(quán)請(qǐng)聯(lián)系站長郵箱:is@yisu.com進(jìn)行舉報(bào),并提供相關(guān)證據(jù),一經(jīng)查實(shí),將立刻刪除涉嫌侵權(quán)內(nèi)容。