1. import requests
    2. from bs4 import BeautifulSoup
    3. headers={
    4. 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360S'
    5. }
    6. url='https://www.shicimingju.com/book/sanguoyanyi.html'
    7. page_text=requests.get(url=url,headers=headers).text
    8. #encode编码,将ISO-8859-1编码成unicode
    9. page_text=page_text.encode("ISO-8859-1")
    10. #decode解码,将unicode解码成utf-8
    11. page_text=page_text.decode("utf-8")
    12. #print(page_text)
    13. soup=BeautifulSoup(page_text,'lxml')
    14. li_list=soup.select('.book-mulu > ul > li')
    15. fp=open('./sanguo.txt','w',encoding='utf-8')
    16. for li in li_list:
    17. title=li.a.string
    18. detail_url='https://www.shicimingju.com'+li.a['href']
    19. detail_page_text=requests.get(url=detail_url,headers=headers).text
    20. # encode编码,将ISO-8859-1编码成unicode
    21. detail_page_text = detail_page_text.encode("ISO-8859-1").decode("utf-8")
    22. detail_soup=BeautifulSoup(detail_page_text,'lxml')
    23. div_tag=detail_soup.find('div',class_='chapter_content')
    24. content=div_tag.getText()
    25. fp.write(title+':'+content+'\n')
    26. print(title+' 爬取成功')