您好,登錄后才能下訂單哦!
#coding=utf-8
import urllib2
import re
import xlwt
import smtplib
import random
import time
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
# 1.先獲取整個(gè)頁(yè)面信息
# Urllib 模塊提供了讀取web頁(yè)面數(shù)據(jù)的接口,我們可以像讀取本地文件一樣讀取www和ftp上的數(shù)據(jù)。
# 首先,我們定義了一個(gè)getHtml()函數(shù):
# urllib.urlopen()方法用于打開(kāi)一個(gè)URL地址。
# read()方法用于讀取URL上的數(shù)據(jù),向getHtml()函數(shù)傳遞一個(gè)網(wǎng)址,并把整個(gè)頁(yè)面下載下來(lái)。執(zhí)行程序就會(huì)把整個(gè)網(wǎng)頁(yè)打印輸出。
#
# #添加頭部header
# Agent_list = [ ]
# user_agent = random.choice(Agent_list)
# page.add_header('User-Agent',user_agent)
#
#
# #定義opener,設(shè)置代理IP
# ip_list = []
# httpproxy_handler = urllib2.ProxyHandler({'http':random.choice(ip_list)})
# opener = urllib2.build_opener(httpproxy_handler)
# urllib2.install_opener(opener)
def getHtml(url):
Agent_list = ['Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.87 Safari/537.36',
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11"
]
ip_list = ['223.198.16.58:9999','110.244.12.96:9999','61.145.8.103:9999','223.198.19.214:9999','112.85.125.111:9999']
user_agent = random.choice(Agent_list)
httpproxy_handler = urllib2.ProxyHandler({'http':random.choice(ip_list)})
opener = urllib2.build_opener(httpproxy_handler)
urllib2.install_opener(opener)
page = urllib2.Request(url)
page.add_header('User-Agent',user_agent)
response = urllib2.urlopen(page)
html = response.read()
return html
# 2.篩選頁(yè)面中想要的數(shù)據(jù)
# 我們又創(chuàng)建了geturl()函數(shù),用于在獲取的整個(gè)頁(yè)面中篩選需要的視頻連接。
# re模塊主要包含了正則表達(dá)式:
# r"<a href='(/html/gndy/+.+html)'",表示查找類(lèi)似/html/gndy/jddy/20190607/58704.html地址的超鏈接。
# 正則表達(dá)式輸出的是括號(hào)()里面的內(nèi)容。
#
# 正則表達(dá)式示例:r'(.*) are (.*?) .*'
# 首先,這是一個(gè)字符串,前面的一個(gè) r 表示字符串為非轉(zhuǎn)義的原始字符串,讓編譯器忽略反斜杠,也就是忽略轉(zhuǎn)義字符。
# 但是這個(gè)字符串里沒(méi)有反斜杠,所以這個(gè) r 可有可無(wú)。
# . :匹配一個(gè)除了換行符任意一個(gè)字符
# ^ :只有后面跟的字符串在開(kāi)頭,才能匹配上
# * :它控制它前面那個(gè)字符,他前面那個(gè)字符出現(xiàn)0到多個(gè)都可以匹配上
# + :匹配前面那個(gè)字符1到多次
# ?:匹配前面那個(gè)字符0到1個(gè),多余的只匹配一個(gè)
# (.*) 第一個(gè)匹配分組,.* 代表匹配除換行符之外的所有字符。
# (.*?) 第二個(gè)匹配分組,.*? 后面多個(gè)問(wèn)號(hào),代表非貪婪模式,也就是說(shuō)只匹配符合條件的最少字符
# 后面的一個(gè) .* 沒(méi)有括號(hào)包圍,所以不是分組,匹配效果和第一個(gè)一樣,但是不計(jì)入匹配結(jié)果中。
#
# re.search("com","COM",re.I).group()
# re.I 使匹配對(duì)大小寫(xiě)不敏感
# re.L 做本地化識(shí)別(locale-aware)匹配
# re.M 多行匹配,影響^和$
# re.S 使.匹配包括換行在內(nèi)的所有字符
#
# <a href="">..</a>表示超鏈接
# re.compile() 可以把正則表達(dá)式編譯成一個(gè)正則表達(dá)式對(duì)象.
# re.findall() 方法讀取html 中包含 urlre(正則表達(dá)式)的數(shù)據(jù)。
# 運(yùn)行腳本將得到整個(gè)頁(yè)面中包含圖片的URL地址。
#
# reg = r"<a href='(/html/gndy/.*)</a><br/>"
# 匹配類(lèi)似<a href='/html/gndy/jddy/20160320/50523.html'>IMDB評(píng)分8分左右影片400余部</a><br/> 并輸出括號(hào)里面的內(nèi)容
# 得到 /html/gndy/jddy/20160320/50523.html'>IMDB評(píng)分8分左右影片400余部
# 可以用 reg = r"<a href='(/html/gndy/.*?)</a><br/>"
# 可以用 reg = r"<a href='(/html/gndy/.+)</a><br/>"
# 可以用 reg = r"<a href='(/html/gndy/.+?)</a><br/>"
#
# re.split(r"'>+",resource_url) 將得到的結(jié)果‘/html/gndy/jddy/20160320/50523.html'>IMDB評(píng)分8分左右影片400余部’ 按照‘>切割成兩部分。
# down_addr = '' + down_page[j] down_page無(wú)法顯示漢字,所以做了下轉(zhuǎn)化
#
# for i in range (1,20) 查詢(xún)需要的條記錄,從1開(kāi)始的原因是因?yàn)榈?個(gè)記錄不是需要的數(shù)據(jù)。
def geturl(html):
reg = r"<a href='(/html/gndy/.*)</a><br/>"
urlre = re.compile(reg)
urllist = re.findall(urlre,html)
wbk = xlwt.Workbook(encoding='gbk')
worksheet = wbk.add_sheet('My worksheet')
list1=('Name','Page','Url')
for i in range(1,20):
resource_url = "https://dytt8.net" + urllist[i]
result = re.split(r"'>+",resource_url)
Agent_list = ['Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.87 Safari/537.36',
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
ip_list = ['223.198.16.58:9999','110.244.12.96:9999','61.145.8.103:9999','223.198.19.214:9999','112.85.125.111:9999']
user_agent = random.choice(Agent_list)
httpproxy_handler = urllib2.ProxyHandler({'http':random.choice(ip_list)})
opener = urllib2.build_opener(httpproxy_handler)
urllib2.install_opener(opener)
user_agent = random.choice(Agent_list)
down_page = urllib2.Request(result[0])
down_page.add_header('User-Agent',user_agent)
print down_page.get_header('User-agent')
response_page = urllib2.urlopen(down_page)
down_html = response_page.read()
addr_code = r'<a href="(ftp://.*)">'
addr_re = re.compile(addr_code)
down_url = re.findall(addr_re,down_html)
down_addr = '' + down_url[0]
if i == 1:
for list in range(0,len(list1)):
worksheet.write(i-1,list,list1[list])
else:
worksheet.write(i-1,0,result[1])
worksheet.write(i-1,1,result[0])
worksheet.write(i-1,2,down_addr)
time.sleep(5)
wbk.save('renew.xls')
#3. 發(fā)送郵件
def send_mail():
user = 'xxxx.com'
pwd = 'xxxxs'
to = 'xxxx'
msg = MIMEMultipart()
msg["Subject"] = '電影記錄'
msg ["From"] = user
msg ["To"] = to
part1 = MIMEText("你好,\n\n 電影記錄見(jiàn)附件。")
msg.attach(part1)
part2 = MIMEApplication (open(r'E:\2xx3\python腳本\html\renew.xls','rb').read())
part2.add_header('Content-Disposition','attachment',filename='renew.xls')
msg.attach(part2)
s = smtplib.SMTP("smtp.139.com",timeout=30)
s.login(user,pwd)
s.sendmail(user,to,msg.as_string())
s.close()
html = getHtml("https://www.dytt8.net/index0.html")
geturl(html)
send_mail()
免責(zé)聲明:本站發(fā)布的內(nèi)容(圖片、視頻和文字)以原創(chuàng)、轉(zhuǎn)載和分享為主,文章觀點(diǎn)不代表本網(wǎng)站立場(chǎng),如果涉及侵權(quán)請(qǐng)聯(lián)系站長(zhǎng)郵箱:is@yisu.com進(jìn)行舉報(bào),并提供相關(guān)證據(jù),一經(jīng)查實(shí),將立刻刪除涉嫌侵權(quán)內(nèi)容。