‘’’
    目标url: https://www.fabiaoqing.com/biaoqing/lists/page/1.html
    需求:爬表情包前10页,保存图片 (每页45个表情包)
    分析:
    第一页:https://www.fabiaoqing.com/biaoqing/lists/page/1.html
    第二页:https://www.fabiaoqing.com/biaoqing/lists/page/2.html
    第三页:https://www.fabiaoqing.com/biaoqing/lists/page/3.html

    ‘’’

    1. imgs_url = []
    2. for i in range(1, 11):
    3. url = f'https://www.fabiaoqing.com/biaoqing/lists/page/{i}.html'
    4. html = requests.get(url)
    5. html_list = etree.HTML(html.text)
    6. links = html_list.xpath('.//div[@class="tagbqppdiv"]/a/img')
    7. for link in links:
    8. img_url = link.xpath('.//@data-original')[0]
    9. imgs_url.append(img_url)
    10. path = os.getcwd() + '\\imgs1\\' # 先设置好要保存图片的目录
    11. for img_url in imgs_url:
    12. img_name = os.path.join(path, f'{img_url[31:]}') # 把目录和图片名合并为一个长的字符串
    13. r = requests.get(img_url, stream=True)
    14. # if r.status_code == 200: # 昨晚有这一行代码,只爬了405个,今天去掉后就能下到450个
    15. open(img_name, 'wb').write(r.content)
    16. del r
    17. print('保存完成!')

    总结一下:
    if r.status_code == 200: # 昨晚有这一行代码,只爬了405个,今天去掉后就能下到450个,为什么?