0
点赞
收藏
分享

微信扫一扫

scrapy 动态加载下载数据

清冷的蓝天天 2022-04-02 阅读 54
python

配置就开启管道 中间件 之类的

中间件中改动

from scrapy.http import HtmlResponse


    def process_response(self, request, response, spider):
        bro = spider.bro
        if request.url in spider.model_urls:
            bro.get(request.url)
            # 滚动加载更多数据
            # bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
   
            page_text = bro.page_source
            new_res = HtmlResponse(url=request.url, body=page_text, encoding='utf-8', request=request)
            return new_res
        else:
            return response

爬虫文件

import scrapy
from selenium import webdriver
from selenium.webdriver.edge.service import Service
from middlePro.items import MiddleproItem


class MiddleSpider(scrapy.Spider):
    name = 'middle'
    # allowed_domains = ['www.baidu.com']
    start_urls = ['https://news.163.com/']
    model_urls = []

    def __init__(self):
        self.ser = Service(r'/Users/xiaodunmeng/Desktop/xiaoproject/shixun/xinscrapy/middlePro/middlePro/msedgedriver')
        self.bro = webdriver.Edge(service=self.ser)

    def parse(self, response):
        li_list = response.xpath('//*[@id="index2016_wrap"]/div[2]/div[2]/div[2]/div[2]/div/ul/li')
        alist = [2, 3]

        for index in alist:
            model_url = li_list[index].xpath('./a/@href').extract_first()
            self.model_urls.append(model_url)

        for url in self.model_urls:
            yield scrapy.Request(url, callback=self.parse_model)

    def parse_model(self, response):
        div_lsit = response.xpath('//div[@class="ndi_main"]/div')
        for div in div_lsit:
            title = div.xpath('./div/div[1]/h3/a/text()').extract_first()
            new_detail_url = div.xpath('./div/div[1]/h3/a/@href').extract_first()

            item = MiddleproItem()
            item['title'] = title

            yield scrapy.Request(url=new_detail_url, callback=self.parse_detail, meta={'item': item})

    def parse_detail(self, response):
        content = response.xpath('//*[@id="content"]/div[2]//text()').extract()
        content = ''.join(content)
        item = response.meta['item']
        item['content'] = content

        yield item

    def closed(self, spider):
        self.bro.quit()

举报

相关推荐

0 条评论