selenium ajax dynamic pagination base spider

2019-06-07 00:42发布

I am trying to run my Base spider for dynamic pagination but I am not getting success in crawling. I have used selenium ajax dynamic pagination. the url I am using is: http://www.demo.com. Here is my code:

# -*- coding: utf-8 -*-
import scrapy

import re

from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor

from scrapy.selector import Selector

from scrapy.spider import BaseSpider

from demo.items import demoItem

from selenium import webdriver

def removeUnicodes(strData):

        if(strData):

            #strData = strData.decode('unicode_escape').encode('ascii','ignore')

            strData = strData.encode('utf-8').strip() 

            strData = re.sub(r'[\n\r\t]',r' ',strData.strip())

            #print 'Output:',strData

        return strData


class demoSpider(scrapy.Spider):
    name = "demourls"

    allowed_domains = ["demo.com"]

    start_urls = ['http://www.demo.com']

     def __init__(self):
        self.driver = webdriver.Firefox()

    def parse(self, response):
        print "*****************************************************"
        self.driver.get(response.url)
        print response.url
        print "______________________________"



        hxs = Selector(response)
        item = sumItem()
        finalurls = []
        while True:
            next = self.driver.find_element_by_xpath('//div[@class="showMoreCars hide"]/a')

            try:
                next.click()
                # get the data and write it to scrapy items
                item['pageurl'] = response.url
                item['title'] =  removeUnicodes(hxs.xpath('.//h1[@class="page-heading"]/text()').extract()[0])
                urls = hxs.xpath('.//a[@id="linkToDetails"]/@href').extract()
                print '**********************************************2***url*****************************************',urls


                for url in urls:
                    print '---------url-------',url
                finalurls.append(url)          

                item['urls'] = finalurls

            except:
                break

        self.driver.close()

        return item

my items.py is

from scrapy.item import Item, Field


class demoItem(Item):



    page = Field()
    urls = Field()

    pageurl = Field()
    title = Field()

when I am trying to crawl it and convert it in json I am getting my json file as:

[{"pageurl": "http://www.demo.com", "urls": [], "title": "demo"}]

I am not able to crawl all urls as it is dynamically loading.

2条回答
叛逆
2楼-- · 2019-06-07 01:09

First of all, you don't need to press showMoreCars button as it will be dynamically pressed after page load. Instead, waiting for some second will be enough.

Apart from you scrapy code, selenium is able to capture all hrefs for you. Here is an example of what you need to do in selenium.

from selenium import webdriver

driver = webdriver.Firefox()

driver.get("http://www.carwale.com/used/cars-in-trichy/#city=194&kms=0-&year=0-&budget=0-&pn=2")
driver.implicitly_wait(5)
urls = driver.find_elements_by_xpath('.//a[@id="linkToDetails"]')
for url in urls:
    print url.get_attribute("href")
driver.close()

all you need is to merge this with your scrapy part.

Output:

http://www.carwale.com/used/cars-in-trichy/renault-pulse-s586981/
http://www.carwale.com/used/cars-in-trichy/marutisuzuki-ritz-2009-2012-s598266/
http://www.carwale.com/used/cars-in-trichy/mahindrarenault-logan-2007-2009-s607757/
http://www.carwale.com/used/cars-in-trichy/marutisuzuki-ritz-2009-2012-s589835/
http://www.carwale.com/used/cars-in-trichy/hyundai-santro-xing-2003-2008-s605866/
http://www.carwale.com/used/cars-in-trichy/chevrolet-captiva-s599023/
http://www.carwale.com/used/cars-in-trichy/chevrolet-enjoy-s595824/
http://www.carwale.com/used/cars-in-trichy/tata-indicav2-s606823/
http://www.carwale.com/used/cars-in-trichy/tata-indicav2-s606617/
http://www.carwale.com/used/cars-in-trichy/marutisuzuki-estilo-2009-2014-s592745/
http://www.carwale.com/used/cars-in-trichy/toyota-etios-2013-2014-s605950/
http://www.carwale.com/used/cars-in-trichy/tata-indica-vista-2008-2011-s599001/
http://www.carwale.com/used/cars-in-trichy/opel-corsa-s591616/
http://www.carwale.com/used/cars-in-trichy/hyundai-i20-2008-2010-s596173/
http://www.carwale.com/used/cars-in-trichy/tata-indica-vista-2012-2014-s600753/
http://www.carwale.com/used/cars-in-trichy/fiat-punto-2009-2011-s606934/
http://www.carwale.com/used/cars-in-trichy/mitsubishi-pajero-s597849/
http://www.carwale.com/used/cars-in-trichy/fiat-linea20082014-s596079/
http://www.carwale.com/used/cars-in-trichy/tata-indicav2-s597390/
http://www.carwale.com/used/cars-in-trichy/mahindra-xylo-2009-2012-s603434/
查看更多
Emotional °昔
3楼-- · 2019-06-07 01:20

I hope the below code will help.

somespider.py

# -*- coding: utf-8 -*-
import scrapy
import re
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
from scrapy.spider import BaseSpider
from demo.items import DemoItem
from selenium import webdriver

def removeUnicodes(strData):
        if(strData):
            strData = strData.encode('utf-8').strip() 
            strData = re.sub(r'[\n\r\t]',r' ',strData.strip())
        return strData

class demoSpider(scrapy.Spider):
    name = "domainurls"
    allowed_domains = ["domain.com"]
    start_urls = ['http://www.domain.com/used/cars-in-trichy/']

    def __init__(self):
        self.driver = webdriver.Remote("http://127.0.0.1:4444/wd/hub", webdriver.DesiredCapabilities.HTMLUNITWITHJS)

    def parse(self, response):
        self.driver.get(response.url)
        self.driver.implicitly_wait(5)
        hxs = Selector(response)
        item = DemoItem()
        finalurls = []
        while True:
            next = self.driver.find_element_by_xpath('//div[@class="showMoreCars hide"]/a')

            try:
                next.click()
                # get the data and write it to scrapy items
                item['pageurl'] = response.url
                item['title'] =  removeUnicodes(hxs.xpath('.//h1[@class="page-heading"]/text()').extract()[0])
                urls = self.driver.find_elements_by_xpath('.//a[@id="linkToDetails"]')

                for url in urls:
                    url = url.get_attribute("href")
                    finalurls.append(removeUnicodes(url))          

                item['urls'] = finalurls

            except:
                break

        self.driver.close()
        return item

items.py

from scrapy.item import Item, Field

class DemoItem(Item):
    page = Field()
    urls = Field()
    pageurl = Field()
    title = Field()

Note: You need to have selenium rc server running because HTMLUNITWITHJS works with selenium rc only using Python.

Run your selenium rc server issuing the command :

java -jar selenium-server-standalone-2.44.0.jar

Run your spider using command:

spider crawl domainurls -o someoutput.json
查看更多
登录 后发表回答