from fake_useragent import UserAgent
    from lxml import etree
    from threading import Thread
    import requests
    import csv
    headers = {
    ‘User-Agent’ : UserAgent().chrome
    }

    def pa_page(n):
    url=f”https://aspx.sc.chinaz.com/query.aspx?keyword=%E5%85%8D%E8%B4%B9&issale=&classID=864&page={n}
    resp = requests.get(url, headers=headers)

    1. html = etree.HTML(resp.text)
    2. muban = html.xpath('//*[@id="container"]/div/p/a')
    3. data=[]<br /> for i in muban:<br /> data.append({"模板名":i.text**,**"链接":i.xpath('@href')[**0**]})
    4. with open('模板.csv'**, **'a'**, **newline=''**, **encoding='utf-8') as f :<br /> writer = csv.DictWriter(f**, **fieldnames=['模板名'**, **'链接']) # 提前预览列名,当下面代码写入数据时,会将其一一对应。<br /> writer.writeheader() # 写入列名<br /> writer.writerows(data) # 写入数据<br /> print("数据已经写入成功!!!")

    if name == ‘main‘:
    for i in range(1,100):
    Thread(target=pa_page,args=(i,)).start()