2024-10-19 16:43:25 +00:00
|
|
|
|
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
|
|
|
|
|
# 1. 不得用于任何商业用途。
|
|
|
|
|
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
|
|
|
|
|
# 3. 不得进行大规模爬取或对平台造成运营干扰。
|
|
|
|
|
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
|
|
|
|
|
# 5. 不得用于任何非法或不当的用途。
|
|
|
|
|
#
|
|
|
|
|
# 详细许可条款请参阅项目根目录下的LICENSE文件。
|
|
|
|
|
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
|
|
|
|
|
|
|
|
|
|
|
2023-12-24 09:57:48 +00:00
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
# @Author : relakkes@gmail.com
|
|
|
|
|
# @Time : 2023/12/23 15:41
|
|
|
|
|
# @Desc : 微博爬虫主流程代码
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import asyncio
|
|
|
|
|
import os
|
|
|
|
|
import random
|
|
|
|
|
from asyncio import Task
|
2023-12-30 10:54:21 +00:00
|
|
|
|
from typing import Dict, List, Optional, Tuple
|
2023-12-24 09:57:48 +00:00
|
|
|
|
|
2024-04-10 01:54:28 +00:00
|
|
|
|
from playwright.async_api import (BrowserContext, BrowserType, Page,
|
|
|
|
|
async_playwright)
|
2024-04-17 15:13:40 +00:00
|
|
|
|
|
|
|
|
|
import config
|
|
|
|
|
from base.base_crawler import AbstractCrawler
|
2023-12-24 09:57:48 +00:00
|
|
|
|
from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool
|
2024-01-14 14:06:31 +00:00
|
|
|
|
from store import weibo as weibo_store
|
2023-12-24 09:57:48 +00:00
|
|
|
|
from tools import utils
|
2024-08-23 00:29:24 +00:00
|
|
|
|
from var import crawler_type_var, source_keyword_var
|
2023-12-24 09:57:48 +00:00
|
|
|
|
|
|
|
|
|
from .client import WeiboClient
|
|
|
|
|
from .exception import DataFetchError
|
|
|
|
|
from .field import SearchType
|
|
|
|
|
from .help import filter_search_result_card
|
2023-12-24 16:02:11 +00:00
|
|
|
|
from .login import WeiboLogin
|
2023-12-24 09:57:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class WeiboCrawler(AbstractCrawler):
|
|
|
|
|
context_page: Page
|
|
|
|
|
wb_client: WeiboClient
|
|
|
|
|
browser_context: BrowserContext
|
|
|
|
|
|
|
|
|
|
def __init__(self):
|
2023-12-30 10:54:21 +00:00
|
|
|
|
self.index_url = "https://www.weibo.com"
|
2024-01-06 11:18:07 +00:00
|
|
|
|
self.mobile_index_url = "https://m.weibo.cn"
|
2023-12-30 10:54:21 +00:00
|
|
|
|
self.user_agent = utils.get_user_agent()
|
2024-01-06 11:18:07 +00:00
|
|
|
|
self.mobile_user_agent = utils.get_mobile_user_agent()
|
2023-12-24 09:57:48 +00:00
|
|
|
|
|
|
|
|
|
async def start(self):
|
|
|
|
|
playwright_proxy_format, httpx_proxy_format = None, None
|
|
|
|
|
if config.ENABLE_IP_PROXY:
|
|
|
|
|
ip_proxy_pool = await create_ip_pool(config.IP_PROXY_POOL_COUNT, enable_validate_ip=True)
|
|
|
|
|
ip_proxy_info: IpInfoModel = await ip_proxy_pool.get_proxy()
|
|
|
|
|
playwright_proxy_format, httpx_proxy_format = self.format_proxy_info(ip_proxy_info)
|
|
|
|
|
|
|
|
|
|
async with async_playwright() as playwright:
|
|
|
|
|
# Launch a browser context.
|
|
|
|
|
chromium = playwright.chromium
|
|
|
|
|
self.browser_context = await self.launch_browser(
|
|
|
|
|
chromium,
|
|
|
|
|
None,
|
2024-01-06 11:18:07 +00:00
|
|
|
|
self.mobile_user_agent,
|
2023-12-24 09:57:48 +00:00
|
|
|
|
headless=config.HEADLESS
|
|
|
|
|
)
|
|
|
|
|
# stealth.min.js is a js script to prevent the website from detecting the crawler.
|
|
|
|
|
await self.browser_context.add_init_script(path="libs/stealth.min.js")
|
|
|
|
|
self.context_page = await self.browser_context.new_page()
|
2024-01-06 11:18:07 +00:00
|
|
|
|
await self.context_page.goto(self.mobile_index_url)
|
2023-12-24 09:57:48 +00:00
|
|
|
|
|
|
|
|
|
# Create a client to interact with the xiaohongshu website.
|
|
|
|
|
self.wb_client = await self.create_weibo_client(httpx_proxy_format)
|
|
|
|
|
if not await self.wb_client.pong():
|
|
|
|
|
login_obj = WeiboLogin(
|
2024-06-09 01:35:52 +00:00
|
|
|
|
login_type=config.LOGIN_TYPE,
|
2023-12-24 09:57:48 +00:00
|
|
|
|
login_phone="", # your phone number
|
|
|
|
|
browser_context=self.browser_context,
|
|
|
|
|
context_page=self.context_page,
|
|
|
|
|
cookie_str=config.COOKIES
|
|
|
|
|
)
|
|
|
|
|
await login_obj.begin()
|
2024-01-06 11:18:07 +00:00
|
|
|
|
|
|
|
|
|
# 登录成功后重定向到手机端的网站,再更新手机端登录成功的cookie
|
|
|
|
|
utils.logger.info("[WeiboCrawler.start] redirect weibo mobile homepage and update cookies on mobile platform")
|
|
|
|
|
await self.context_page.goto(self.mobile_index_url)
|
|
|
|
|
await asyncio.sleep(2)
|
2023-12-24 09:57:48 +00:00
|
|
|
|
await self.wb_client.update_cookies(browser_context=self.browser_context)
|
|
|
|
|
|
2024-06-09 01:35:52 +00:00
|
|
|
|
crawler_type_var.set(config.CRAWLER_TYPE)
|
|
|
|
|
if config.CRAWLER_TYPE == "search":
|
2023-12-24 09:57:48 +00:00
|
|
|
|
# Search for video and retrieve their comment information.
|
|
|
|
|
await self.search()
|
2024-06-09 01:35:52 +00:00
|
|
|
|
elif config.CRAWLER_TYPE == "detail":
|
2023-12-24 09:57:48 +00:00
|
|
|
|
# Get the information and comments of the specified post
|
2023-12-24 16:02:11 +00:00
|
|
|
|
await self.get_specified_notes()
|
2024-08-23 21:52:11 +00:00
|
|
|
|
elif config.CRAWLER_TYPE == "creator":
|
|
|
|
|
# Get creator's information and their notes and comments
|
|
|
|
|
await self.get_creators_and_notes()
|
2023-12-24 09:57:48 +00:00
|
|
|
|
else:
|
|
|
|
|
pass
|
2024-04-05 16:41:05 +00:00
|
|
|
|
utils.logger.info("[WeiboCrawler.start] Weibo Crawler finished ...")
|
2023-12-24 09:57:48 +00:00
|
|
|
|
|
|
|
|
|
async def search(self):
|
|
|
|
|
"""
|
|
|
|
|
search weibo note with keywords
|
|
|
|
|
:return:
|
|
|
|
|
"""
|
|
|
|
|
utils.logger.info("[WeiboCrawler.search] Begin search weibo keywords")
|
2024-04-03 16:11:22 +00:00
|
|
|
|
weibo_limit_count = 10 # weibo limit page fixed value
|
|
|
|
|
if config.CRAWLER_MAX_NOTES_COUNT < weibo_limit_count:
|
|
|
|
|
config.CRAWLER_MAX_NOTES_COUNT = weibo_limit_count
|
2024-06-09 01:35:52 +00:00
|
|
|
|
start_page = config.START_PAGE
|
|
|
|
|
for keyword in config.KEYWORDS.split(","):
|
2024-08-23 00:29:24 +00:00
|
|
|
|
source_keyword_var.set(keyword)
|
2023-12-24 09:57:48 +00:00
|
|
|
|
utils.logger.info(f"[WeiboCrawler.search] Current search keyword: {keyword}")
|
|
|
|
|
page = 1
|
2024-04-11 16:52:47 +00:00
|
|
|
|
while (page - start_page + 1) * weibo_limit_count <= config.CRAWLER_MAX_NOTES_COUNT:
|
|
|
|
|
if page < start_page:
|
|
|
|
|
utils.logger.info(f"[WeiboCrawler.search] Skip page: {page}")
|
|
|
|
|
page += 1
|
|
|
|
|
continue
|
2024-06-24 14:24:51 +00:00
|
|
|
|
utils.logger.info(f"[WeiboCrawler.search] search weibo keyword: {keyword}, page: {page}")
|
2023-12-24 09:57:48 +00:00
|
|
|
|
search_res = await self.wb_client.get_note_by_keyword(
|
|
|
|
|
keyword=keyword,
|
|
|
|
|
page=page,
|
|
|
|
|
search_type=SearchType.DEFAULT
|
|
|
|
|
)
|
|
|
|
|
note_id_list: List[str] = []
|
|
|
|
|
note_list = filter_search_result_card(search_res.get("cards"))
|
|
|
|
|
for note_item in note_list:
|
2023-12-24 16:02:11 +00:00
|
|
|
|
if note_item:
|
2023-12-24 09:57:48 +00:00
|
|
|
|
mblog: Dict = note_item.get("mblog")
|
2024-04-09 09:21:52 +00:00
|
|
|
|
if mblog:
|
|
|
|
|
note_id_list.append(mblog.get("id"))
|
|
|
|
|
await weibo_store.update_weibo_note(note_item)
|
|
|
|
|
await self.get_note_images(mblog)
|
2023-12-24 09:57:48 +00:00
|
|
|
|
|
|
|
|
|
page += 1
|
2023-12-24 16:02:11 +00:00
|
|
|
|
await self.batch_get_notes_comments(note_id_list)
|
|
|
|
|
|
|
|
|
|
async def get_specified_notes(self):
|
|
|
|
|
"""
|
|
|
|
|
get specified notes info
|
|
|
|
|
:return:
|
|
|
|
|
"""
|
|
|
|
|
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
|
|
|
|
|
task_list = [
|
|
|
|
|
self.get_note_info_task(note_id=note_id, semaphore=semaphore) for note_id in
|
|
|
|
|
config.WEIBO_SPECIFIED_ID_LIST
|
|
|
|
|
]
|
|
|
|
|
video_details = await asyncio.gather(*task_list)
|
|
|
|
|
for note_item in video_details:
|
|
|
|
|
if note_item:
|
2024-01-14 14:06:31 +00:00
|
|
|
|
await weibo_store.update_weibo_note(note_item)
|
2023-12-24 16:02:11 +00:00
|
|
|
|
await self.batch_get_notes_comments(config.WEIBO_SPECIFIED_ID_LIST)
|
|
|
|
|
|
|
|
|
|
async def get_note_info_task(self, note_id: str, semaphore: asyncio.Semaphore) -> Optional[Dict]:
|
|
|
|
|
"""
|
|
|
|
|
Get note detail task
|
|
|
|
|
:param note_id:
|
|
|
|
|
:param semaphore:
|
|
|
|
|
:return:
|
|
|
|
|
"""
|
|
|
|
|
async with semaphore:
|
|
|
|
|
try:
|
|
|
|
|
result = await self.wb_client.get_note_info_by_id(note_id)
|
|
|
|
|
return result
|
|
|
|
|
except DataFetchError as ex:
|
|
|
|
|
utils.logger.error(f"[WeiboCrawler.get_note_info_task] Get note detail error: {ex}")
|
|
|
|
|
return None
|
|
|
|
|
except KeyError as ex:
|
|
|
|
|
utils.logger.error(
|
|
|
|
|
f"[WeiboCrawler.get_note_info_task] have not fund note detail note_id:{note_id}, err: {ex}")
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
async def batch_get_notes_comments(self, note_id_list: List[str]):
|
|
|
|
|
"""
|
|
|
|
|
batch get notes comments
|
|
|
|
|
:param note_id_list:
|
|
|
|
|
:return:
|
|
|
|
|
"""
|
2024-03-16 03:52:42 +00:00
|
|
|
|
if not config.ENABLE_GET_COMMENTS:
|
|
|
|
|
utils.logger.info(f"[WeiboCrawler.batch_get_note_comments] Crawling comment mode is not enabled")
|
|
|
|
|
return
|
|
|
|
|
|
2023-12-24 16:02:11 +00:00
|
|
|
|
utils.logger.info(f"[WeiboCrawler.batch_get_notes_comments] note ids:{note_id_list}")
|
|
|
|
|
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
|
|
|
|
|
task_list: List[Task] = []
|
|
|
|
|
for note_id in note_id_list:
|
|
|
|
|
task = asyncio.create_task(self.get_note_comments(note_id, semaphore), name=note_id)
|
|
|
|
|
task_list.append(task)
|
|
|
|
|
await asyncio.gather(*task_list)
|
|
|
|
|
|
|
|
|
|
async def get_note_comments(self, note_id: str, semaphore: asyncio.Semaphore):
|
|
|
|
|
"""
|
|
|
|
|
get comment for note id
|
|
|
|
|
:param note_id:
|
|
|
|
|
:param semaphore:
|
|
|
|
|
:return:
|
|
|
|
|
"""
|
|
|
|
|
async with semaphore:
|
|
|
|
|
try:
|
|
|
|
|
utils.logger.info(f"[WeiboCrawler.get_note_comments] begin get note_id: {note_id} comments ...")
|
2024-01-14 14:06:31 +00:00
|
|
|
|
await self.wb_client.get_note_all_comments(
|
2023-12-24 16:02:11 +00:00
|
|
|
|
note_id=note_id,
|
2024-08-04 16:48:42 +00:00
|
|
|
|
crawl_interval=random.randint(1,3), # 微博对API的限流比较严重,所以延时提高一些
|
2024-10-23 08:32:02 +00:00
|
|
|
|
callback=weibo_store.batch_update_weibo_note_comments,
|
|
|
|
|
max_count=config.CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES
|
2023-12-24 16:02:11 +00:00
|
|
|
|
)
|
|
|
|
|
except DataFetchError as ex:
|
|
|
|
|
utils.logger.error(f"[WeiboCrawler.get_note_comments] get note_id: {note_id} comment error: {ex}")
|
|
|
|
|
except Exception as e:
|
|
|
|
|
utils.logger.error(f"[WeiboCrawler.get_note_comments] may be been blocked, err:{e}")
|
2023-12-24 09:57:48 +00:00
|
|
|
|
|
2024-04-09 09:21:52 +00:00
|
|
|
|
async def get_note_images(self, mblog: Dict):
|
|
|
|
|
"""
|
|
|
|
|
get note images
|
|
|
|
|
:param mblog:
|
|
|
|
|
:return:
|
|
|
|
|
"""
|
|
|
|
|
if not config.ENABLE_GET_IMAGES:
|
|
|
|
|
utils.logger.info(f"[WeiboCrawler.get_note_images] Crawling image mode is not enabled")
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
pics: Dict = mblog.get("pics")
|
|
|
|
|
if not pics:
|
|
|
|
|
return
|
|
|
|
|
for pic in pics:
|
|
|
|
|
url = pic.get("url")
|
|
|
|
|
if not url:
|
|
|
|
|
continue
|
|
|
|
|
content = await self.wb_client.get_note_image(url)
|
|
|
|
|
if content != None:
|
|
|
|
|
extension_file_name = url.split(".")[-1]
|
|
|
|
|
await weibo_store.update_weibo_note_image(pic["pid"], content, extension_file_name)
|
|
|
|
|
|
2024-08-23 21:52:11 +00:00
|
|
|
|
|
|
|
|
|
async def get_creators_and_notes(self) -> None:
|
|
|
|
|
"""
|
|
|
|
|
Get creator's information and their notes and comments
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
utils.logger.info("[WeiboCrawler.get_creators_and_notes] Begin get weibo creators")
|
|
|
|
|
for user_id in config.WEIBO_CREATOR_ID_LIST:
|
|
|
|
|
createor_info_res: Dict = await self.wb_client.get_creator_info_by_id(creator_id=user_id)
|
|
|
|
|
if createor_info_res:
|
|
|
|
|
createor_info: Dict = createor_info_res.get("userInfo", {})
|
|
|
|
|
utils.logger.info(f"[WeiboCrawler.get_creators_and_notes] creator info: {createor_info}")
|
|
|
|
|
if not createor_info:
|
|
|
|
|
raise DataFetchError("Get creator info error")
|
|
|
|
|
await weibo_store.save_creator(user_id, user_info=createor_info)
|
|
|
|
|
|
|
|
|
|
# Get all note information of the creator
|
|
|
|
|
all_notes_list = await self.wb_client.get_all_notes_by_creator_id(
|
|
|
|
|
creator_id=user_id,
|
|
|
|
|
container_id=createor_info_res.get("lfid_container_id"),
|
|
|
|
|
crawl_interval=0,
|
|
|
|
|
callback=weibo_store.batch_update_weibo_notes
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
note_ids = [note_item.get("mlog", {}).get("id") for note_item in all_notes_list if
|
|
|
|
|
note_item.get("mlog", {}).get("id")]
|
|
|
|
|
await self.batch_get_notes_comments(note_ids)
|
|
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
utils.logger.error(
|
|
|
|
|
f"[WeiboCrawler.get_creators_and_notes] get creator info error, creator_id:{user_id}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-12-24 09:57:48 +00:00
|
|
|
|
async def create_weibo_client(self, httpx_proxy: Optional[str]) -> WeiboClient:
|
|
|
|
|
"""Create xhs client"""
|
|
|
|
|
utils.logger.info("[WeiboCrawler.create_weibo_client] Begin create weibo API client ...")
|
|
|
|
|
cookie_str, cookie_dict = utils.convert_cookies(await self.browser_context.cookies())
|
|
|
|
|
weibo_client_obj = WeiboClient(
|
|
|
|
|
proxies=httpx_proxy,
|
|
|
|
|
headers={
|
2023-12-30 10:54:21 +00:00
|
|
|
|
"User-Agent": utils.get_mobile_user_agent(),
|
2023-12-24 09:57:48 +00:00
|
|
|
|
"Cookie": cookie_str,
|
|
|
|
|
"Origin": "https://m.weibo.cn",
|
|
|
|
|
"Referer": "https://m.weibo.cn",
|
|
|
|
|
"Content-Type": "application/json;charset=UTF-8"
|
|
|
|
|
},
|
|
|
|
|
playwright_page=self.context_page,
|
|
|
|
|
cookie_dict=cookie_dict,
|
|
|
|
|
)
|
|
|
|
|
return weibo_client_obj
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def format_proxy_info(ip_proxy_info: IpInfoModel) -> Tuple[Optional[Dict], Optional[Dict]]:
|
|
|
|
|
"""format proxy info for playwright and httpx"""
|
|
|
|
|
playwright_proxy = {
|
|
|
|
|
"server": f"{ip_proxy_info.protocol}{ip_proxy_info.ip}:{ip_proxy_info.port}",
|
|
|
|
|
"username": ip_proxy_info.user,
|
|
|
|
|
"password": ip_proxy_info.password,
|
|
|
|
|
}
|
|
|
|
|
httpx_proxy = {
|
2024-01-13 07:50:02 +00:00
|
|
|
|
f"{ip_proxy_info.protocol}": f"http://{ip_proxy_info.user}:{ip_proxy_info.password}@{ip_proxy_info.ip}:{ip_proxy_info.port}"
|
2023-12-24 09:57:48 +00:00
|
|
|
|
}
|
|
|
|
|
return playwright_proxy, httpx_proxy
|
|
|
|
|
|
|
|
|
|
async def launch_browser(
|
|
|
|
|
self,
|
|
|
|
|
chromium: BrowserType,
|
|
|
|
|
playwright_proxy: Optional[Dict],
|
|
|
|
|
user_agent: Optional[str],
|
|
|
|
|
headless: bool = True
|
|
|
|
|
) -> BrowserContext:
|
|
|
|
|
"""Launch browser and create browser context"""
|
|
|
|
|
utils.logger.info("[WeiboCrawler.launch_browser] Begin create browser context ...")
|
|
|
|
|
if config.SAVE_LOGIN_STATE:
|
|
|
|
|
user_data_dir = os.path.join(os.getcwd(), "browser_data",
|
2024-06-09 01:35:52 +00:00
|
|
|
|
config.USER_DATA_DIR % config.PLATFORM) # type: ignore
|
2023-12-24 09:57:48 +00:00
|
|
|
|
browser_context = await chromium.launch_persistent_context(
|
|
|
|
|
user_data_dir=user_data_dir,
|
|
|
|
|
accept_downloads=True,
|
|
|
|
|
headless=headless,
|
|
|
|
|
proxy=playwright_proxy, # type: ignore
|
|
|
|
|
viewport={"width": 1920, "height": 1080},
|
|
|
|
|
user_agent=user_agent
|
|
|
|
|
)
|
|
|
|
|
return browser_context
|
|
|
|
|
else:
|
|
|
|
|
browser = await chromium.launch(headless=headless, proxy=playwright_proxy) # type: ignore
|
|
|
|
|
browser_context = await browser.new_context(
|
|
|
|
|
viewport={"width": 1920, "height": 1080},
|
|
|
|
|
user_agent=user_agent
|
|
|
|
|
)
|
|
|
|
|
return browser_context
|