一、介绍
本例子用scrapy-splash通过搜狗搜索引擎,输入给定关键字抓取资讯信息。
给定关键字:数字;融合;电视
抓取信息内如下:
1、资讯标题
2、资讯链接
3、资讯时间
4、资讯来源
二、网站信息
三、数据抓取
针对上面的网站信息,来进行抓取
1、首先抓取信息列表
抓取代码:sels = site.xpath('//div[@class="vrwrap"]')
2、抓取标题
抓取代码:titles = sel.xpath('.//h3[@class="vrTitle"]/a/text()|.//h3[@class="vrTitle"]/a/em/text()')
3、抓取链接
抓取代码:it['url'] = sel.xpath('.//h3[@class="vrTitle"]/a/@href')[0].extract()
4、抓取日期
抓取代码:strdate = sel.xpath('.//p[@class="news-from"]/text()')
5、抓取来源
抓取代码:sources = sel.xpath('.//p[@class="news-from"]/text()')
四、完整代码
# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request
from scrapy.spiders import Spider
from scrapy_splash import SplashRequest
from scrapy_splash import SplashMiddleware
from scrapy.http import Request, HtmlResponse
from scrapy.selector import Selector
from scrapy_splash import SplashRequest
from scrapy_ott.items import SplashTestItem
from scrapy_ott.mongoDB import mongoDbBase
import scrapy_ott.IniFile
import sys
import os
import re
import timereload(sys)
sys.setdefaultencoding(&#39;utf-8&#39;)# sys.stdout &#61; open(&#39;output.txt&#39;, &#39;w&#39;)class sougouSpider(Spider):name &#61; &#39;sougou&#39;db &#61; mongoDbBase()configfile &#61; os.path.join(os.getcwd(), &#39;scrapy_ott\setting.conf&#39;)cf &#61; scrapy_ott.IniFile.ConfigFile(configfile)information_keywords &#61; cf.GetValue("section", "information_keywords")information_wordlist &#61; information_keywords.split(&#39;;&#39;)# websearchurl &#61; cf.GetValue("sougou", "websearchurl")start_urls &#61; []for word in information_wordlist:start_urls.append(&#39;http://news.sogou.com/news?page&#61;1&query&#61;&#39; &#43; word)start_urls.append(&#39;http://news.sogou.com/news?page&#61;2&query&#61;&#39; &#43; word)start_urls.append(&#39;http://news.sogou.com/news?page&#61;3&query&#61;&#39; &#43; word)# request需要封装成SplashRequestdef start_requests(self):for url in self.start_urls:index &#61; url.rfind(&#39;&#61;&#39;)yield SplashRequest(url, self.parse, args&#61;{&#39;wait&#39;: &#39;2&#39;},meta&#61;{&#39;keyword&#39;: url[index &#43; 1:]})def Comapre_to_days(self,leftdate, rightdate):&#39;&#39;&#39;比较连个字符串日期&#xff0c;左边日期大于右边日期多少天:param leftdate: 格式&#xff1a;2017-04-15:param rightdate: 格式&#xff1a;2017-04-15:return: 天数&#39;&#39;&#39;l_time &#61; time.mktime(time.strptime(leftdate, &#39;%Y-%m-%d&#39;))r_time &#61; time.mktime(time.strptime(rightdate, &#39;%Y-%m-%d&#39;))result &#61; int(l_time - r_time) / 86400return resultdef date_isValid(self, strDateText):&#39;&#39;&#39;判断日期时间字符串是否合法&#xff1a;如果给定时间大于当前时间是合法&#xff0c;或者说当前时间给定的范围内:param strDateText: 四种格式 &#39;慧聪网 7小时前&#39;; &#39;新浪游戏 29分钟前&#39; ; &#39;中国行业研究网 2017-6-13&#39;:return: True:合法&#xff1b;False:不合法&#39;&#39;&#39;currentDate &#61; time.strftime(&#39;%Y-%m-%d&#39;)source &#61; strDateText.split(&#39; &#39;)[0]if strDateText.find(&#39;分钟前&#39;) > 0 :return True,source,currentDateelif strDateText.find(&#39;小时前&#39;) > 0:datePattern &#61; re.compile(r&#39;\d{1,2}&#39;)ch &#61; int(time.strftime(&#39;%H&#39;)) # 当前小时数strDate &#61; re.findall(datePattern, strDateText)if len(strDate) &#61;&#61; 1:if int(strDate[0]) <&#61; ch: # 只有小于当前小时数&#xff0c;才认为是今天return True,source,currentDateelse:datePattern &#61; re.compile(r&#39;\d{4}-\d{1,2}-\d{1,2}&#39;)strDate &#61; re.findall(datePattern, strDateText)if len(strDate) &#61;&#61; 1:if self.Comapre_to_days(currentDate, strDate[0]) &#61;&#61; 0:return True,source,currentDatereturn False, &#39;&#39;,&#39;&#39;def parse(self, response):keyword &#61; response.meta[&#39;keyword&#39;]site &#61; Selector(response)sels &#61; site.xpath(&#39;//div[&#64;class&#61;"vrwrap"]&#39;)item_list &#61; []for sel in sels:strdate &#61; sel.xpath(&#39;.//p[&#64;class&#61;"news-from"]/text()&#39;)if len(strdate)>0:flag, source, date &#61; self.date_isValid(strdate[0].extract())if flag:titles &#61; sel.xpath(&#39;.//h3[&#64;class&#61;"vrTitle"]/a/text()|.//h3[&#64;class&#61;"vrTitle"]/a/em/text()&#39;)title &#61; &#39;&#39;for t in titles:title &#43;&#61; str(t.extract())if title.find(keyword) > -1:it &#61; SplashTestItem()it[&#39;source&#39;] &#61; sourceit[&#39;url&#39;] &#61; sel.xpath(&#39;.//h3[&#64;class&#61;"vrTitle"]/a/&#64;href&#39;)[0].extract()it[&#39;date&#39;] &#61; dateit[&#39;keyword&#39;] &#61; keywordit[&#39;title&#39;] &#61; titleitem_list.append(it)if len(item_list)>0:self.db.SaveInformations(item_list)return item_list