您好,登錄后才能下訂單哦!
這期內(nèi)容當(dāng)中小編將會(huì)給大家?guī)碛嘘P(guān)簡(jiǎn)單的rss閱讀工具,文章內(nèi)容豐富且以專業(yè)的角度為大家分析和敘述,閱讀完這篇文章希望大家可以有所收獲。
#!usr/bin/env python # -*- coding:UTF-8 -*- import re from lxml import etree from bs4 import BeautifulSoup as sp import requests import urllib2 import StringIO import sys reload(sys) sys.setdefaultencoding("utf-8") headers={'User-Agent' : 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'} def urlread(url): try: req=requests.get(url,headers=headers) req.encoding="utf-8" return req.text.encode("utf-8") except: req=urllib2.Request(url,headers=headers) response=urllib2.urlopen(req) return response.read().encode("utf-8") class Item: def __init__(self,title,link,date,description): self.title=title.strip() self.link=link.strip() self.pubDate=date.strip() self.decription=self.filter(description).strip() def filter(self,description): description=re.sub("<.*?>",'',description) description=re.sub("\r",'',description) description=re.sub("\n",'',description) description=re.sub(" "," ",description) if len(description)>240: description=description[:240]+'...' return description def __str__(self): return "%s\n%s\n%s\n<%s>\n" % ( self.title, self.link, self.decription, self.pubDate ) __repr__=__str__ class BSParser(object): #url='' def __init__(self,url): xml=urlread(url) self.reset(xml) def reset(self,xml=None): if xml==None: self.soup=sp("<xml> </xml>") else: self.soup=sp(xml,"xml") def callback(self,method,obj,tags): rst=None attr=method.lower() for tag in tags: try: rst=getattr(obj,attr)(tag) except: continue if rst: break return rst def getfields(self,tags=["item",'entry']): return self.callback(method="FIND_ALL", obj=self.soup, tags=tags) def gettitle(self,obj,tags=["title"]): return self.callback("FIND",obj,tags).text def getlink(self,obj,tags=["link"]): rst=self.callback("FIND",obj,tags).text if not rst: rst=self.callback("FIND",obj,tags).get("href") return rst def getdate(self,obj,tags=["pubDate","published"]): return self.callback("FIND",obj,tags).text def getdescription(self,obj,tags=["description","content"]): return self.callback("FIND",obj,tags).text def run(self): for item in self.getfields(): title=self.gettitle(item) link=self.getlink(item) date=self.getdate(item) description=self.getdescription(item) newsitem=Item(title,link,date,description) yield newsitem def test(): parser=Parser() for item in parser.run(): print item if __name__=="__main__": test()
上述就是小編為大家分享的簡(jiǎn)單的rss閱讀工具了,如果剛好有類似的疑惑,不妨參照上述分析進(jìn)行理解。如果想知道更多相關(guān)知識(shí),歡迎關(guān)注億速云行業(yè)資訊頻道。
免責(zé)聲明:本站發(fā)布的內(nèi)容(圖片、視頻和文字)以原創(chuàng)、轉(zhuǎn)載和分享為主,文章觀點(diǎn)不代表本網(wǎng)站立場(chǎng),如果涉及侵權(quán)請(qǐng)聯(lián)系站長(zhǎng)郵箱:is@yisu.com進(jìn)行舉報(bào),并提供相關(guān)證據(jù),一經(jīng)查實(shí),將立刻刪除涉嫌侵權(quán)內(nèi)容。