- https://aspx.sc.chinaz.com/query.aspx?keyword=%E5%85%8D%E8%B4%B9&issale=&classID=864&page=1
# ● 爬取要求:
# ○ 1、翻页获取到页面的源码
# ○ 2、用xpath解析数据,获取到页面所有模板名字和下载链接
# ○ 3、把数据保存到csv">● 目标网站:https://aspx.sc.chinaz.com/query.aspx?keyword=%E5%85%8D%E8%B4%B9&issale=&classID=864&page=1
# ● 爬取要求:
# ○ 1、翻页获取到页面的源码
# ○ 2、用xpath解析数据,获取到页面所有模板名字和下载链接
# ○ 3、把数据保存到csv
● 目标网站:https://aspx.sc.chinaz.com/query.aspx?keyword=%E5%85%8D%E8%B4%B9&issale=&classID=864&page=1
# ● 爬取要求:
# ○ 1、翻页获取到页面的源码
# ○ 2、用xpath解析数据,获取到页面所有模板名字和下载链接
# ○ 3、把数据保存到csv
import requests
import csv
from lxml import etree
data_list = []
url = ‘https://aspx.sc.chinaz.com/query.aspx?keyword=%E5%85%8D%E8%B4%B9&issale=&classID=864&page=1‘
headers ={
‘User-Agent’: ‘Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36’,
‘Referer’: ‘https://movie.douban.com/top250‘,
‘Cookie’: ‘ll=”118209”; bid=XH3uFKbUm_c; _vwo_uuid_v2=D7C8435EC663049D4F42BF928D423501F|857cbd98e66a4181dc467ac25b2a282e; utmz=30149280.1638023234.2.2.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; utmz=223695111.1638023234.2.2.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; gads=ID=dab516e4d80bb148-221a8b8a4bcf0030:T=1638026985:RT=1638026985:S=ALNI_MbEgBelScusrg4_W33iA7u6sAd9lw; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1646618815%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DzyLT4OTFKu76b6MNi8GtECZWukDg3gLsdQxeYZFQYI8G1zlYTHwvEQmXCIoehtlv%26wd%3D%26eqid%3Daeac2f5200039f9e0000000561a24041%22%5D; _pk_ses.100001.4cf6=*; ap_v=0,6.0; utma=30149280.1746268973.1638020811.1638026980.1646618815.4; utmb=30149280.0.10.1646618815; utma=223695111.939078493.1638020811.1638026980.1646618815.4; utmb=223695111.0.10.1646618815; yadk_uid=giyOaJ8uMA6qSNRTHnFryIoaJdDPt7vW; utmc=30149280; utmc=223695111; _pk_id.100001.4cf6=1fc2c86fd2242501.1638020810.4.1646619408.1638027419.’
}
response = requests.get(url, headers=headers)
# print(response.text)
# 解析数据
html = etree.HTML(response.content)
sj = html.xpath(‘//div[@id=”main”]/div[1]/div’)
for data in sj:
ss = {}
url2 = “https:” + data.xpath(‘./a[1]/@href’)[0]
# print(url2)
re = requests.get(url2, headers=headers)
html2 = etree.HTML(re.content)
ss[‘url’] = html2.xpath(‘//a[text()=”福建电信下载”]/@href’)[0]
ss[‘名字’] = data.xpath(‘./p[1]/a[1]/text()’)[0].replace(‘免费’,’’).replace(‘下载’, ‘’)
# print(ss)
data_list.append(ss)
with open(‘vt.csv’, ‘w’, encoding=’utf-8-sig’,newline=’’)as f:
sj = csv.DictWriter(f, fieldnames=[‘url’, ‘名字’])
sj.writeheader()
sj.writerows(data_list)
