1. class Spider(object_ref):
    2. #定义spider名字的字符串(string)。spider的名字定义了Scrapy如何定位(并初始化)spider,
    3. #所以其必须是唯一的。
    4. #name是spider最重要的属性,而且是必须的。
    5. #一般做法是以该网站(domain)(加或不加 后缀 )来命名spider。
    6. #例如,如果spider爬取 mywebsite.com ,该spider通常会被命名为 mywebsite
    7. name = None
    8. custom_settings = None
    9. # 初始化爬虫名字,及请求连接
    10. def __init__(self, name=None, **kwargs):
    11. # 判断是否有爬虫名字,如果爬虫没有名字,中断后续操作则报错
    12. if name is not None:
    13. self.name = name
    14. elif not getattr(self, 'name', None):
    15. raise ValueError("%s must have a name" % type(self).__name__)
    16. # python对象或类型通过内置成员__dict__来存储成员信息
    17. self.__dict__.update(kwargs)
    18. # URL列表。当没有指定的URL时,spider将从该列表中开始进行爬取。
    19. # 因此,第一个被获取到的页面的URL将是该列表之一。
    20. # 后续的URL将会从获取到的数据中提取。
    21. if not hasattr(self, 'start_urls'):
    22. self.start_urls = []
    23. @property
    24. def logger(self):
    25. logger = logging.getLogger(self.name)
    26. return logging.LoggerAdapter(logger, {'spider': self})
    27. # 打印Scrapy执行后的log信息
    28. def log(self, message, level=logging.DEBUG, **kw):
    29. """Log the given message at the given log level
    30. This helper wraps a log call to the logger within the spider, but you
    31. can use it directly (e.g. Spider.logger.info('msg')) or use any other
    32. Python logger too.
    33. """
    34. self.logger.log(level, message, **kw)
    35. @classmethod
    36. def from_crawler(cls, crawler, *args, **kwargs):
    37. spider = cls(*args, **kwargs)
    38. spider._set_crawler(crawler)
    39. return spider
    40. def _set_crawler(self, crawler):
    41. self.crawler = crawler
    42. self.settings = crawler.settings
    43. crawler.signals.connect(self.close, signals.spider_closed)
    44. # 该方法将读取start_urls内的地址,
    45. # 并为每一个地址生成一个Request对象,交给Scrapy下载并返回Response
    46. # 该方法仅调用一次
    47. def start_requests(self):
    48. cls = self.__class__
    49. if method_is_overridden(cls, Spider, 'make_requests_from_url'):
    50. warnings.warn(
    51. "Spider.make_requests_from_url method is deprecated; it "
    52. "won't be called in future Scrapy releases. Please "
    53. "override Spider.start_requests method instead (see %s.%s)." % (
    54. cls.__module__, cls.__name__
    55. ),
    56. )
    57. for url in self.start_urls:
    58. yield self.make_requests_from_url(url)
    59. else:
    60. for url in self.start_urls:
    61. yield Request(url, dont_filter=True)
    62. def make_requests_from_url(self, url):
    63. """ This method is deprecated. """
    64. return Request(url, dont_filter=True)
    65. # 默认的Request对象回调函数,处理返回的response。
    66. # 生成Item或者Request对象。用户必须实现这个类
    67. def parse(self, response):
    68. raise NotImplementedError('{}.parse callback is not defined'.format(self.__class__.__name__))
    69. @classmethod
    70. def update_settings(cls, settings):
    71. settings.setdict(cls.custom_settings or {}, priority='spider')
    72. @classmethod
    73. def handles_request(cls, request):
    74. return url_is_from_spider(request.url, cls)
    75. @staticmethod
    76. def close(spider, reason):
    77. closed = getattr(spider, 'closed', None)
    78. if callable(closed):
    79. return closed(reason)
    80. def __str__(self):
    81. return "<%s %r at 0x%0x>" % (type(self).__name__, self.name, id(self))
    82. __repr__ = __str__
    83. # Top-level imports
    84. from scrapy.spiders.crawl import CrawlSpider, Rule
    85. from scrapy.spiders.feed import XMLFeedSpider, CSVFeedSpider
    86. from scrapy.spiders.sitemap import SitemapSpider