一、爬虫文件
import scrapy
from ..items import NewsItem
from scrapy_redis import spiders
class CbsnewsSpiderSpider(spiders.RedisSpider):
name = "abc_spider"
redis_key = 'abc:start_urls'
def parse(self, response):
"""
第一层:解析自动GET请求的初始网址
:param response:
:return:
"""
abc_list = response.xpath('//nav[@class="header__nav"]//a/@href').extract()
if not abc_list:
return
for column_url in abc_list:
yield scrapy.Request(column_url, callback=self.abc_source, meta={'column_url': column_url})
def abc_source(self,response):
"""
第二层:解析第一层获取并请求回来的网址
:param response:
:return:
"""
column_url = response.meta['column_url']
print(column_url)
pass
二、settings.py配置
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
SCHEDULER_PERSIST = True