首页 > 编程 > Python > 正文

Python大数据之从网页上爬取数据的方法详解

2019-11-25 11:25:14
字体:
来源:转载
供稿:网友

本文实例讲述了Python大数据之从网页上爬取数据的方法。分享给大家供大家参考,具体如下:

myspider.py  :

#!/usr/bin/python# -*- coding:utf-8 -*-from scrapy.spiders import Spiderfrom lxml import etreefrom jredu.items import JreduItemclass JreduSpider(Spider):  name = 'tt' #爬虫的名字,必须的,唯一的  allowed_domains = ['sohu.com']  start_urls = [    'http://www.sohu.com'  ]  def parse(self, response):    content = response.body.decode('utf-8')    dom = etree.HTML(content)    for ul in dom.xpath("//div[@class='focus-news-box']/div[@class='list16']/ul"):      lis = ul.xpath("./li")      for li in lis:        item = JreduItem() #定义对象        if ul.index(li) == 0:          strong = li.xpath("./a/strong/text()")          li.xpath("./a/@href")          item['title']= strong[0]          item['href'] = li.xpath("./a/@href")[0]        else:          la = li.xpath("./a[last()]/text()")          item['title'] = la[0]          item['href'] = li.xpath("./a[last()]/href")[0]        yield item

items.py    :

# -*- coding: utf-8 -*-# Define here the models for your scraped items## See documentation in:# http://doc.scrapy.org/en/latest/topics/items.htmlimport scrapyclass JreduItem(scrapy.Item):#相当于Java里的实体类  # define the fields for your item here like:  # name = scrapy.Field()  title = scrapy.Field()#创建一个field对象  href = scrapy.Field()  pass

middlewares.py  :

# -*- coding: utf-8 -*-# Define here the models for your spider middleware## See documentation in:# http://doc.scrapy.org/en/latest/topics/spider-middleware.htmlfrom scrapy import signalsclass JreduSpiderMiddleware(object):  # Not all methods need to be defined. If a method is not defined,  # scrapy acts as if the spider middleware does not modify the  # passed objects.  @classmethod  def from_crawler(cls, crawler):    # This method is used by Scrapy to create your spiders.    s = cls()    crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)    return s  def process_spider_input(self, response, spider):    # Called for each response that goes through the spider    # middleware and into the spider.    # Should return None or raise an exception.    return None  def process_spider_output(self, response, result, spider):    # Called with the results returned from the Spider, after    # it has processed the response.    # Must return an iterable of Request, dict or Item objects.    for i in result:      yield i  def process_spider_exception(self, response, exception, spider):    # Called when a spider or process_spider_input() method    # (from other spider middleware) raises an exception.    # Should return either None or an iterable of Response, dict    # or Item objects.    pass  def process_start_requests(self, start_requests, spider):    # Called with the start requests of the spider, and works    # similarly to the process_spider_output() method, except    # that it doesn't have a response associated.    # Must return only requests (not items).    for r in start_requests:      yield r  def spider_opened(self, spider):    spider.logger.info('Spider opened: %s' % spider.name)

pipelines.py  :

# -*- coding: utf-8 -*-# Define your item pipelines here## Don't forget to add your pipeline to the ITEM_PIPELINES setting# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.htmlimport codecsimport jsonclass JreduPipeline(object):  def __init__(self):    self.fill = codecs.open("data.txt",encoding="utf-8",mode="wb");  def process_item(self, item, spider):    line = json.dumps(dict(item))+"/n"    self.fill.write(line)    return item

settings.py   :

# -*- coding: utf-8 -*-# Scrapy settings for jredu project## For simplicity, this file contains only settings considered important or# commonly used. You can find more settings consulting the documentation:##   http://doc.scrapy.org/en/latest/topics/settings.html#   http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#   http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.htmlBOT_NAME = 'jredu'SPIDER_MODULES = ['jredu.spiders']NEWSPIDER_MODULE = 'jredu.spiders'# Crawl responsibly by identifying yourself (and your website) on the user-agent#USER_AGENT = 'jredu (+http://www.yourdomain.com)'# Obey robots.txt rulesROBOTSTXT_OBEY = True# Configure maximum concurrent requests performed by Scrapy (default: 16)#CONCURRENT_REQUESTS = 32# Configure a delay for requests for the same website (default: 0)# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay# See also autothrottle settings and docs#DOWNLOAD_DELAY = 3# The download delay setting will honor only one of:#CONCURRENT_REQUESTS_PER_DOMAIN = 16#CONCURRENT_REQUESTS_PER_IP = 16# Disable cookies (enabled by default)#COOKIES_ENABLED = False# Disable Telnet Console (enabled by default)#TELNETCONSOLE_ENABLED = False# Override the default request headers:#DEFAULT_REQUEST_HEADERS = {#  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',#  'Accept-Language': 'en',#}# Enable or disable spider middlewares# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html#SPIDER_MIDDLEWARES = {#  'jredu.middlewares.JreduSpiderMiddleware': 543,#}# Enable or disable downloader middlewares# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#DOWNLOADER_MIDDLEWARES = {#  'jredu.middlewares.MyCustomDownloaderMiddleware': 543,#}# Enable or disable extensions# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html#EXTENSIONS = {#  'scrapy.extensions.telnet.TelnetConsole': None,#}# Configure item pipelines# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.htmlITEM_PIPELINES = {  'jredu.pipelines.JreduPipeline': 300,}# Enable and configure the AutoThrottle extension (disabled by default)# See http://doc.scrapy.org/en/latest/topics/autothrottle.html#AUTOTHROTTLE_ENABLED = True# The initial download delay#AUTOTHROTTLE_START_DELAY = 5# The maximum download delay to be set in case of high latencies#AUTOTHROTTLE_MAX_DELAY = 60# The average number of requests Scrapy should be sending in parallel to# each remote server#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0# Enable showing throttling stats for every response received:#AUTOTHROTTLE_DEBUG = False# Enable and configure HTTP caching (disabled by default)# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings#HTTPCACHE_ENABLED = True#HTTPCACHE_EXPIRATION_SECS = 0#HTTPCACHE_DIR = 'httpcache'#HTTPCACHE_IGNORE_HTTP_CODES = []#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

最后需要一个程序入口的方法:

main.py     :

#!/usr/bin/python# -*- coding:utf-8 -*-#爬虫文件的执行入口from scrapy import cmdlinecmdline.execute("scrapy crawl tt".split())

更多关于Python相关内容可查看本站专题:《Python Socket编程技巧总结》、《Python正则表达式用法总结》、《Python数据结构与算法教程》、《Python函数使用技巧总结》、《Python字符串操作技巧汇总》、《Python入门与进阶经典教程》及《Python文件与目录操作技巧汇总

希望本文所述对大家Python程序设计有所帮助。

发表评论 共有条评论
用户名: 密码:
验证码: 匿名发表