崔庆才老师爬虫的学习笔记。

一、爬取实战

1、爬取知乎用户信息

假设一个用户有三个粉丝,三个粉丝又各自有三个粉丝,以此类推,粉丝数越来越多。我们可以采取递归方式的抓取方式,只要是有粉丝有关注的用户都可以适用这种抓取方式。那些零关注零粉丝的⼤大虾就放过他们吧。

2、思路

(1)选定起始人

选定一位关注数或粉丝数多的大V作为爬取起始点。

(2)获取粉丝和关注列表

通过知乎接口获得该大V的粉丝列表和关注列表。

(3)获取列表用户信息

通过知乎接口获得列表中每位用户的详细信息。

(4)获取每位用户粉丝和关注

进一步对列表中的每一个用户,获取他们的粉丝和关注列表,实现递归爬取。

3、实战

以大V轮子哥为起始人:https://www.zhihu.com/people/excited-vczh/activities

从关注列表和粉丝列表两大递归方向获取每个用户的信息。

zhihuuser / scrapy.cfg

1
2
3
4
5
6
[settings]
default = zhihuuser.settings

[deploy]
#url = http://localhost:6800/
project = zhihuuser

zhihuuser / zhihuuser / items.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
# -*- coding: utf-8 -*-

from scrapy import Item, Field

class UserItem(Item): # 定义保存数据的数据结构
# define the fields for your item here like:
id = Field()
name = Field()
avatar_url = Field()
headline = Field()
description = Field()
url = Field()
url_token = Field()
gender = Field()
cover_url = Field()
type = Field()
badge = Field()

answer_count = Field()
articles_count = Field()
commercial_question_count = Field()
favorite_count = Field()
favorited_count = Field()
follower_count = Field()
following_columns_count = Field()
following_count = Field()
pins_count = Field()
question_count = Field()
thank_from_count = Field()
thank_to_count = Field()
thanked_count = Field()
vote_from_count = Field()
vote_to_count = Field()
voteup_count = Field()
following_favlists_count = Field()
following_question_count = Field()
following_topic_count = Field()
marked_answers_count = Field()
mutual_followees_count = Field()
hosted_live_count = Field()
participated_live_count = Field()

locations = Field()
educations = Field()
employments = Field()

zhihuuser / zhihuuser / middlewares.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
# -*- coding: utf-8 -*-

from scrapy import signals


class ZhihuSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.

@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s

def process_spider_input(response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.

# Should return None or raise an exception.
return None

def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.

# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i

def process_spider_exception(response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.

# Should return either None or an iterable of Response, dict
# or Item objects.
pass

def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.

# Must return only requests (not items).
for r in start_requests:
yield r

def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)

zhihuuser / zhihuuser / pipelines.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
# -*- coding: utf-8 -*-

import pymongo


class ZhihuPipeline(object):
def process_item(self, item, spider):
return item


class MongoPipeline(object): # 保存数据到mongodb
collection_name = 'users' # 定义数据库集合名称

def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db

@classmethod
def from_crawler(cls, crawler): # 获取settings中的MONGO_URI和MONGO_DATABASE
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DATABASE')
)

def open_spider(self, spider): # 启动spider时连接mongo_db
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]

def close_spider(self, spider): # 关闭spider时关闭数据库
self.client.close()

def process_item(self, item, spider): # 项目管道主程序,update()有去重、插入、更新多重作用。
# 第一个参数是查询条件,第二个传入的参数,第三个参数设置为True表示按照查询条件,如果查询到了进行更行,没查找到则进行插入
self.db[self.collection_name].update({'url_token': item['url_token']}, dict(item), True)
return item

zhihuuser / zhihuuser / settings.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
# -*- coding: utf-8 -*-

BOT_NAME = 'zhihuuser'

SPIDER_MODULES = ['zhihuuser.spiders']
NEWSPIDER_MODULE = 'zhihuuser.spiders'

ROBOTSTXT_OBEY = False # 设置为False,表示不遵守robotstxt协议,防止部分内容不能爬取

DEFAULT_REQUEST_HEADERS = { # 知乎这两个headers字段必须设置,不然不能正常访问
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
'authorization': 'oauth c3cef7c66a1843f8b3a9e6a1e3160e20',
}

ITEM_PIPELINES = {
'zhihuuser.pipelines.MongoPipeline': 300, # 设置启动MongoPipeline
# 'scrapy_redis.pipelines.RedisPipeline': 301
}

MONGO_URI = 'localhost' # 设置MongoDB的host
MONGO_DATABASE = 'zhihu' # 设置MongoDB的数据库名称

zhihuuser / zhihuuser / spiders / zhihu.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
# -*- coding: utf-8 -*-

import json

from scrapy import Spider, Request
from zhihuuser.items import UserItem


class ZhihuSpider(Spider):
name = "zhihu"
allowed_domains = ["www.zhihu.com"]
user_url = 'https://www.zhihu.com/api/v4/members/{user}?include={include}'
follows_url = 'https://www.zhihu.com/api/v4/members/{user}/followees?include={include}&offset={offset}&limit={limit}'
followers_url = 'https://www.zhihu.com/api/v4/members/{user}/followers?include={include}&offset={offset}&limit={limit}'
start_user = 'excited-vczh'
user_query = 'locations,employments,gender,educations,business,voteup_count,thanked_Count,follower_count,following_count,cover_url,following_topic_count,following_question_count,following_favlists_count,following_columns_count,answer_count,articles_count,pins_count,question_count,commercial_question_count,favorite_count,favorited_count,logs_count,marked_answers_count,marked_answers_text,message_thread_token,account_status,is_active,is_force_renamed,is_bind_sina,sina_weibo_url,sina_weibo_name,show_sina_weibo,is_blocking,is_blocked,is_following,is_followed,mutual_followees_count,vote_to_count,vote_from_count,thank_to_count,thank_from_count,thanked_count,description,hosted_live_count,participated_live_count,allow_message,industry_category,org_name,org_homepage,badge[?(type=best_answerer)].topics'
follows_query = 'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics'
followers_query = 'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics'

def start_requests(self): # 起始请求
# 请求起始人,回调用户解析
yield Request(self.user_url.format(user=self.start_user, include=self.user_query), self.parse_user)

# 请求关注列表,回调关注列表解析
yield Request(self.follows_url.format(user=self.start_user, include=self.follows_query, limit=20, offset=0),
self.parse_follows)

# 请求粉丝列表,回调粉丝列表解析
yield Request(self.followers_url.format(user=self.start_user, include=self.followers_query, limit=20, offset=0),
self.parse_followers)

def parse_user(self, response): # 用户解析
result = json.loads(response.text)
item = UserItem()

for field in item.fields: # 返回结果生成item
if field in result.keys():
item[field] = result.get(field)
yield item

# 请求关注列表,回调关注列表解析
yield Request(
self.follows_url.format(user=result.get('url_token'), include=self.follows_query, limit=20, offset=0),
self.parse_follows)

# 请求粉丝列表,回调粉丝列表解析
yield Request(
self.followers_url.format(user=result.get('url_token'), include=self.followers_query, limit=20, offset=0),
self.parse_followers)

def parse_follows(self, response): # 关注列表解析
results = json.loads(response.text)

if 'data' in results.keys():
for result in results.get('data'):
# 请求用户界面,回调用户解析
yield Request(self.user_url.format(user=result.get('url_token'), include=self.user_query),
self.parse_user)

if 'paging' in results.keys() and results.get('paging').get('is_end') == False:
next_page = results.get('paging').get('next')
# 请求下一页,回调关注列表解析
yield Request(next_page,
self.parse_follows)

def parse_followers(self, response): # 粉丝列表解析
results = json.loads(response.text)

if 'data' in results.keys():
for result in results.get('data'):
# 请求用户界面,回调用户解析
yield Request(self.user_url.format(user=result.get('url_token'), include=self.user_query),
self.parse_user)

if 'paging' in results.keys() and results.get('paging').get('is_end') == False:
next_page = results.get('paging').get('next')
# 请求下一页,回调粉丝列表解析
yield Request(next_page,
self.parse_followers)

持续更新…

× 多少都行~
打赏二维码