spider文件:
# -*- coding: utf-8 -*-
import datetime
import urlparse
import socket
import scrapy
from scrapy.loader.processors import MapCompose
from scrapy.loader import ItemLoader
from scrapy.http import Request
from ..items import ArticlecollectItem
class BasicSpider(scrapy.Spider):
name = "manual"
allowed_domains = [""]
# Start on the first index page
start_urls = (
'',
)
def parse(self, response):
# Get the next index URLs and yield Requests
next_selector = response.xpath('/html/body/div[4]/div[1]/div[5]/div[2]/div/a[last()]/@href')
for url in next_selector.extract():
yield Request(urlparse.urljoin(response.url, url))
# Get item URLs and yield Requests
item_selector = response.xpath('/html/body/div[4]/div[1]/ul/li/div[1]/div[1]/a/@href')
for url in item_selector.extract():
yield Request(urlparse.urljoin(response.url, url), callback=self.parse_item)
def parse_item(self, response):
# Create the loader using the response
l = ItemLoader(item=ArticlecollectItem(), response=response)
# Load fields using XPath expressions
l.add_xpath('sellpoint', '//*[@id="introduction"]/div/div[2]/div/div[2]/div[2]/text()',
MapCompose(unicode.strip))
l.add_xpath('decorDes', '//*[@id="introduction"]/div/div[2]/div/div[4]/div[2]/text()',
MapCompose(unicode.strip))
l.add_xpath('description','//*[@id="introduction"]/div/div[2]/div/div[3]/div[2]/text()',
MapCompose(unicode.strip))
l.add_xpath('areaDes', '//*[@id="introduction"]/div/div[2]/div/div[6]/div[2]/text()',
MapCompose(lambda i: urlparse.urljoin(response.url, i)))
# Housekeeping fields
l.add_value('url', response.url)
l.add_value('project', self.settings.get('BOT_NAME'))
l.add_value('spider', self.name)
l.add_value('server', socket.gethostname())
l.add_value('date', datetime.datetime.now())
return l.load_item()
输出的东西:
2016-09-01 14:17:59 [scrapy] INFO: Spider opened
2016-09-01 14:17:59 [scrapy] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2016-09-01 14:17:59 [scrapy] DEBUG: Telnet console listening on 127.0.0.1:6023
2016-09-01 14:17:59 [scrapy] DEBUG: Crawled (200)
2016-09-01 14:18:00 [scrapy] DEBUG: Crawled (200)
2016-09-01 14:18:00 [scrapy] DEBUG: Filtered offsite request to 'bj.lianjia.com':
2016-09-01 14:18:00 [scrapy] INFO: Closing spider (finished)
2016-09-01 14:18:00 [scrapy] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 620,
'downloader/request_count': 2,
'downloader/request_method_count/GET': 2,
'downloader/response_bytes': 30392,
'downloader/response_count': 2,
'downloader/response_status_count/200': 2,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2016, 9, 1, 6, 18, 0, 173000),
'log_count/DEBUG': 4,
'log_count/INFO': 7,
'offsite/domains': 1,
'offsite/filtered': 30,
'request_depth_max': 1,
'response_received_count': 2,
'scheduler/dequeued': 1,
'scheduler/dequeued/memory': 1,
'scheduler/enqueued': 1,
'scheduler/enqueued/memory': 1,
'start_time': datetime.datetime(2016, 9, 1, 6, 17, 59, 774000)}
2016-09-01 14:18:00 [scrapy] INFO: Spider closed (finished)
为什么只能爬取第一页的第一条网站啊,而且里面的内容还没有抓到。但是抓到了一条robot.txt,robot.txt的内容是:
User-Agent: *
Disallow: /solr
Disallow: /test
Disallow: /tools
Disallow: /exchange
Disallow: /agent
Disallow: /owner
Disallow: /center
Disallow: /cmsmanage
Disallow: /compatible
Disallow: /admin
Disallow: /Admin
Disallow: /install