利用scrapy爬取知乎用户信息并存储在mongodb数据库中

一、首先在setting.py中修改True改为False

# Obey robots.txt rules
#允许爬取robots.txt文件内不能爬取的资源
ROBOTSTXT_OBEY = True
#知乎是由反扒措施的必须添加User-agent以及authorization
DEFAULT_REQUEST_HEADERS = {
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  'Accept-Language': 'en',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36' ,
    'authorization':'Bearer 2|1:0|10:1521707334|4:z_c0|80:MS4xS1NYS0FnQUFBQUFtQUFBQVlBSlZUVWEzb0Z0ZDl0MGhaa0VJOWM0ODhiejhRenVUd0tURnFnPT0=|98bb6f4900c1b4b52ece0282393ede5e31046c9a90216e69dec1feece8086ee0'
}

二、编写spider的项目文件

1、zhihu_user.py

# -*- coding: utf-8 -*-
import json

import scrapy
from scrapy import Spider,Request

from ..items import UserItem


class ZhihuUserSpider(Spider):
    name = 'zhihu_user'
    allowed_domains = ['www.zhihu.com']
    start_urls = ['http://www.zhihu.com/']
    start_user='excited-vczh'
    user_url='https://www.zhihu.com/api/v4/members/{user}?include={include}'
    user_query='allow_message,is_followed,is_following,is_org,is_blocking,employments,answer_count,follower_count,articles_count,gender,badge[?(type=best_answerer)].topics'
    follows_url='https://www.zhihu.com/api/v4/members/{user}/followees?include={include}&offset={offset}&limit={limit}'
    follows_query='data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics'
    followers_url='https://www.zhihu.com/api/v4/members/{user}/followers?include={include}&offset={offset}&limit={limit}'
    followers_query='data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics'
    def start_requests(self):
        yield Request(self.user_url.format(user=self.start_user,include=self.user_query),self.parse_user)
        yield Request(self.follows_url.format(user=self.start_user,include=self.follows_query,offset=0,limit=20),callback=self.parse_query)
    def parse_user(self, response):
        result=json.loads(response.text)
        item=UserItem()
        for field in item.fields:
            if field in result.keys():
                item[field]=result.get(field)
        yield item
        yield Request(self.follows_url.format(user=result.get('url_token'),include=self.follows_query,offset=0,limit=20),callback=self.parse_query)
        yield Request(self.followers_url.format(user=result.get('url_token'),include=self.followers_query,offset=0,limit=20),callback=self.parse_querys)
    def parse_query(self, response):
        results=json.loads(response.text)
        if 'data' in results.keys():
            for result in results.get('data'):
                yield Request(self.user_url.format(user=result.get('url_token'),include=self.user_query),callback=self.parse_user)
        if 'paging' in results.keys() and results.get('paging').get('is_end')==False:
            next_page=results.get('paging').get('next')
            yield Request(next_page,self.parse_query)
    def parse_querys(self, response):
        results=json.loads(response.text)
        if 'data' in results.keys():
            for result in results.get('data'):
                yield Request(self.user_url.format(user=result.get('url_token'),include=self.user_query),callback=self.parse_user)
        if 'paging' in results.keys() and results.get('paging').get('is_end')==False:
            next_page=results.get('paging').get('next')
            yield Request(next_page,self.parse_querys)

2、items.py

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html

import scrapy
from scrapy import Item,Field

class UserItem(Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    headline=Field()
    avatar_url=Field()
    name=Field()
    type=Field()
    url_token=Field()
    user_type=Field()

3、pipelines.py

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html


import pymongo

class MongoPipeline(object):

    collection_name = 'scrapy_items'

    def __init__(self, mongo_uri, mongo_db):
        self.mongo_uri = mongo_uri
        self.mongo_db = mongo_db

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            mongo_uri=crawler.settings.get('MONGO_URI'),
            mongo_db=crawler.settings.get('MONGO_DATABASE')
        )

    def open_spider(self, spider):
        self.client = pymongo.MongoClient(self.mongo_uri)
        self.db = self.client[self.mongo_db]

    def close_spider(self, spider):
        self.client.close()

    def process_item(self, item, spider):
        self.db['user'].update({'url_token':item['url_token']},{'$set':item},True)
        #self.db[self.collection_name].insert_one(dict(item))
        return item

4、setting.py

BOT_NAME = 'zhihu'

SPIDER_MODULES = ['zhihu.spiders']
NEWSPIDER_MODULE = 'zhihu.spiders'
MONGO_URI='localhost'
MONGO_DATABASE='zhihu'

三、最后启动setting.py中的

ITEM_PIPELINES = {
       'zhihu.pipelines.MongoPipeline': 300,
}
#去掉注释

四、结果展示

image

-------------本文结束感谢您的阅读-------------
坚持原创技术分享,您的支持将鼓励我继续创作!