爬取小说

spider

  1. import scrapy
  2. from xiaoshuo.items import XiaoshuoItem
  3. class XiaoshuoSpiderSpider(scrapy.Spider):
  4. name = 'xiaoshuo_spider'
  5. allowed_domains = ['zy200.com']
  6. url = 'http://www.zy200.com/5/5943/'
  7. start_urls = [url + '11667352.html']
  8. def parse(self, response):
  9. info = response.xpath("/html/body/div[@id='content']/text()").extract()
  10. href = response.xpath("//div[@class='zfootbar']/a[3]/@href").extract_first()
  11. xs_item = XiaoshuoItem()
  12. xs_item['content'] = info
  13. yield xs_item
  14. if href != 'index.html':
  15. new_url = self.url + href
  16. yield scrapy.Request(new_url, callback=self.parse)

items

  1. import scrapy
  2. class XiaoshuoItem(scrapy.Item):
  3. # define the fields for your item here like:
  4. content = scrapy.Field()
  5. href = scrapy.Field()

pipeline

  1. class XiaoshuoPipeline(object):
  2. def __init__(self):
  3. self.filename = open("dp1.txt", "w", encoding="utf-8")
  4. def process_item(self, item, spider):
  5. content = item["title"] + item["content"] + '\n'
  6. self.filename.write(content)
  7. self.filename.flush()
  8. return item
  9. def close_spider(self, spider):
  10. self.filename.close()