Scrapy是一个为了爬取网站数据,提取结构性数据而编写的应用框架。 可以应用在包括数据挖掘,信息处理或存储历史数据等一系列的程序中。
架构
图
scrapy中文手册
1.安装SCRAPY
2.创建一个Scrapy项目
点击(此处)折叠或打开
scrapy startproject tutorial
生成
tutorial/
scrapy.cfg
tutorial/
__init__.py
items.py
pipelines.py
settings.py
spiders/
__init__.py
...
3.新建抓取文件spiders/baike_spider.py,实现代码
点击(此处)折叠或打开
#-*-coding:utf-8-*-
from scrapy.spider import BaseSpider
from scrapy.http import Request
from scrapy.selector import HtmlXPathSelector
from tutorial.items import BaikeItem
from scrapy.contrib.spiders import CrawlSpider,Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
i=0
not_data=0 #统计未采集到的条数
class BaikeSpider(CrawlSpider):
name="baike"
#减慢爬取速度 为1s
# download_delay=1
allowed_domains=['baike.baidu.com']
start_urls=[
"互联网"
]
rules=[
Rule(SgmlLinkExtractor(allow=('/?limit=30&index=([\w]+)&offset=([\d]+)\#gotoList',),)),
Rule(SgmlLinkExtractor(allow=('/view/',),
restrict_xpaths=('//div[@class="list"]')),
callback='parse_item',
),
]
def parse_item(self,response):
global i,not_data
i+=1 #记录抓取条数
print i
item=BaikeItem()
sel=HtmlXPathSelector(response)
baike_url=str(response.url)
baike_name=sel.xpath('//div[@id="sec-content0"]/h1/span[@class="lemmaTitleH1"]/text()').extract()
baike_desc=sel.xpath('//div[@class="card-summary-content"]/div[@class="para"]/text()').extract()[0]
ifnotbaike_name:
not_data+=1 #记录未抓取到的条数
print not_data
ifnotbaike_desc:
baike_desc='未抓取到'
item['title']=[n.encode('utf-8')forninbaike_name]
item['link']=baike_url.encode('utf-8')
item['desc']=baike_desc
yield item
4.编写item
点击(此处)折叠或打开
class CsdnItem(scrapy.Item):
blog_name=scrapy.Field()
blog_url=scrapy.Field()
5.保存数据到mysql
点击(此处)折叠或打开
#-*-coding:utf-8-*-
# Define your item pipelines here
#保存文件
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See:
from scrapy import log
from twisted.enterprise import adbapi
from scrapy.http import Request
from scrapy.exceptions import DropItem
import time
import MySQLdb
import MySQLdb.cursors
import json
import codecs
class TutorialPipeline(object):
def __init__(self):
self.dbpool = adbapi.ConnectionPool('MySQLdb',
db = 'test',
user = 'root',
passwd = '',
cursorclass = MySQLdb.cursors.DictCursor,
charset = 'utf8',
use_unicode = False
)
def process_item(self, item, spider):
query = self.dbpool.runInteraction(self._conditional_insert, item)
query.addErrback(self.handle_error)
def _conditional_insert(self, tx, item):
if item.get('title'):
tx.execute(\
"insert into test(id,title, link, descc ) \
values (null,%s,%s,%s)",
(item['title'],item['link'],item['desc'])
)
6.执行代码
7.效果
评论 (0)