feat: 百度贴吧一级评论done

This commit is contained in:
Relakkes 2024-08-07 02:34:56 +08:00
parent 3c98808409
commit 026d81e131
8 changed files with 1042 additions and 91 deletions

View File

@ -46,7 +46,7 @@ MAX_CONCURRENCY_NUM = 1
ENABLE_GET_IMAGES = False
# 是否开启爬评论模式, 默认不开启爬评论
ENABLE_GET_COMMENTS = False
ENABLE_GET_COMMENTS = True
# 是否开启爬二级评论模式, 默认不开启爬二级评论
# 老版本项目使用了 db, 则需参考 schema/tables.sql line 287 增加表字段

View File

@ -9,8 +9,9 @@ from playwright.async_api import BrowserContext
from tenacity import (RetryError, retry, stop_after_attempt,
wait_fixed)
import config
from base.base_crawler import AbstractApiClient
from model.m_baidu_tieba import TiebaNote
from model.m_baidu_tieba import TiebaNote, TiebaComment
from proxy.proxy_ip_pool import ProxyIpPool
from tools import utils
@ -195,41 +196,38 @@ class BaiduTieBaClient(AbstractApiClient):
page_content = await self.get(uri, return_ori_content=True)
return self._page_extractor.extract_note_detail(page_content)
async def get_note_all_comments(self, note_id: str, crawl_interval: float = 1.0,
callback: Optional[Callable] = None) -> List[Dict]:
async def get_note_all_comments(self, note_detail: TiebaNote, crawl_interval: float = 1.0,
callback: Optional[Callable] = None) -> List[TiebaComment]:
"""
获取指定帖子下的所有一级评论该方法会一直查找一个帖子下的所有评论信息
Args:
note_id: 帖子ID
note_detail: 帖子详情对象
crawl_interval: 爬取一次笔记的延迟单位
callback: 一次笔记爬取结束后
Returns:
"""
uri = f"/p/{note_id}"
result = []
comments_has_more = True
comments_cursor = 1
while comments_has_more:
comments_res = await self.get(uri, params={"pn": comments_cursor})
comments_has_more = comments_res.get("has_more", False)
comments_cursor = comments_res.get("cursor", "")
if "comments" not in comments_res:
utils.logger.info(
f"[XiaoHongShuClient.get_note_all_comments] No 'comments' key found in response: {comments_res}")
uri = f"/p/{note_detail.note_id}"
result: List[TiebaComment] = []
current_page = 1
while note_detail.total_replay_page >= current_page:
params = {
"pn": current_page
}
page_content = await self.get(uri, params=params, return_ori_content=True)
comments = self._page_extractor.extract_tieba_note_parment_comments(page_content, note_id=note_detail.note_id)
if not comments:
break
comments = comments_res["comments"]
if callback:
await callback(note_id, comments)
await asyncio.sleep(crawl_interval)
await callback(note_detail.note_id, comments)
result.extend(comments)
sub_comments = await self.get_comments_all_sub_comments(comments, crawl_interval, callback)
result.extend(sub_comments)
await asyncio.sleep(crawl_interval)
current_page += 1
return result
async def get_comments_all_sub_comments(self, comments: List[Dict], crawl_interval: float = 1.0,
callback: Optional[Callable] = None) -> List[Dict]:
callback: Optional[Callable] = None) -> List[TiebaComment]:
"""
获取指定评论下的所有子评论
Args:
@ -240,12 +238,7 @@ class BaiduTieBaClient(AbstractApiClient):
Returns:
"""
result = []
for comment in comments:
sub_comments = comment.get("comments")
if sub_comments:
if callback:
await callback(comment.get("id"), sub_comments)
await asyncio.sleep(crawl_interval)
result.extend(sub_comments)
return result
if not config.ENABLE_GET_SUB_COMMENTS:
return []
# todo 未完成子评论的爬取

View File

@ -114,10 +114,12 @@ class TieBaCrawler(AbstractCrawler):
self.get_note_detail_async_task(note_id=note_id, semaphore=semaphore) for note_id in note_id_list
]
note_details = await asyncio.gather(*task_list)
note_details_model: List[TiebaNote] = []
for note_detail in note_details:
if note_detail is not None:
note_details_model.append(note_detail)
await tieba_store.update_tieba_note(note_detail)
await self.batch_get_note_comments(config.TIEBA_SPECIFIED_ID_LIST)
await self.batch_get_note_comments(note_details_model)
async def get_note_detail_async_task(self, note_id: str, semaphore: asyncio.Semaphore) -> Optional[TiebaNote]:
"""
@ -146,42 +148,39 @@ class TieBaCrawler(AbstractCrawler):
f"[BaiduTieBaCrawler.get_note_detail] have not fund note detail note_id:{note_id}, err: {ex}")
return None
async def batch_get_note_comments(self, note_id_list: List[str]):
async def batch_get_note_comments(self, note_detail_list: List[TiebaNote]):
"""
Batch get note comments
Args:
note_id_list:
note_detail_list:
Returns:
"""
if not config.ENABLE_GET_COMMENTS:
utils.logger.info(f"[BaiduTieBaCrawler.batch_get_note_comments] Crawling comment mode is not enabled")
return
utils.logger.info(
f"[BaiduTieBaCrawler.batch_get_note_comments] Begin batch get note comments, note list: {note_id_list}")
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list: List[Task] = []
for note_id in note_id_list:
task = asyncio.create_task(self.get_comments_async_task(note_id, semaphore), name=note_id)
for note_detail in note_detail_list:
task = asyncio.create_task(self.get_comments_async_task(note_detail, semaphore), name=note_detail.note_id)
task_list.append(task)
await asyncio.gather(*task_list)
async def get_comments_async_task(self, note_id: str, semaphore: asyncio.Semaphore):
async def get_comments_async_task(self, note_detail: TiebaNote, semaphore: asyncio.Semaphore):
"""
Get comments async task
Args:
note_id:
note_detail:
semaphore:
Returns:
"""
async with semaphore:
utils.logger.info(f"[BaiduTieBaCrawler.get_comments] Begin get note id comments {note_id}")
utils.logger.info(f"[BaiduTieBaCrawler.get_comments] Begin get note id comments {note_detail.note_id}")
await self.tieba_client.get_note_all_comments(
note_id=note_id,
note_detail=note_detail,
crawl_interval=random.random(),
callback=tieba_store.batch_update_tieba_note_comments
)

View File

@ -1,10 +1,12 @@
# -*- coding: utf-8 -*-
import re
import json
import html
from typing import List, Dict, Tuple
from parsel import Selector
from model.m_baidu_tieba import TiebaNote
from model.m_baidu_tieba import TiebaNote, TiebaComment
from constant import baidu_tieba as const
@ -40,7 +42,6 @@ class TieBaExtractor:
result.append(tieba_note)
return result
def extract_note_detail(self, page_content: str) -> TiebaNote:
"""
提取贴吧帖子详情
@ -66,8 +67,10 @@ class TieBaExtractor:
title=content_selector.xpath("//title/text()").get(default='').strip(),
desc=content_selector.xpath("//meta[@name='description']/@content").get(default='').strip(),
note_url=const.TIEBA_URL + f"/p/{note_id}",
user_link=const.TIEBA_URL + first_floor_selector.xpath(".//a[@class='p_author_face ']/@href").get(default='').strip(),
user_nickname=first_floor_selector.xpath(".//a[@class='p_author_name j_user_card']/text()").get(default='').strip(),
user_link=const.TIEBA_URL + first_floor_selector.xpath(".//a[@class='p_author_face ']/@href").get(
default='').strip(),
user_nickname=first_floor_selector.xpath(".//a[@class='p_author_name j_user_card']/text()").get(
default='').strip(),
user_avatar=first_floor_selector.xpath(".//a[@class='p_author_face ']/img/@src").get(default='').strip(),
tieba_name=content_selector.xpath("//a[@class='card_title_fname']/text()").get(default='').strip(),
tieba_link=const.TIEBA_URL + content_selector.xpath("//a[@class='card_title_fname']/@href").get(default=''),
@ -79,33 +82,44 @@ class TieBaExtractor:
note.title = note.title.replace(f"{note.tieba_name}】_百度贴吧", "")
return note
@staticmethod
def extract_tieba_note_comments(page_content: str) -> List[Dict]:
def extract_tieba_note_parment_comments(self, page_content: str, note_id: str) -> List[TiebaComment]:
"""
提取贴吧帖子评论
提取贴吧帖子一级评论
Args:
page_content:
note_id:
Returns:
"""
xpath_selector = "//div[@id='j_p_postlist']/div[@class='l_post l_post_bright j_l_post clearfix']"
xpath_selector = "//div[@class='l_post l_post_bright j_l_post clearfix ']"
comment_list = Selector(text=page_content).xpath(xpath_selector)
result = []
for comment in comment_list:
comment_id = comment.xpath(".//@data-pid").get(default='').strip()
author = comment.xpath(".//a[@data-field]/text()").get(default='').strip()
author_link = comment.xpath(".//a[@data-field]/@href").get(default='')
content = comment.xpath(".//div[@class='d_post_content j_d_post_content ']/text()").get(default='').strip()
date = comment.xpath(".//span[@class='tail-info']/text()").get(default='').strip()
result: List[TiebaComment] = []
for comment_selector in comment_list:
comment_field_value: Dict = self.extract_data_field_value(comment_selector)
if not comment_field_value:
continue
result.append({
"comment_id": comment_id,
"author": author,
"author_link": author_link,
"content": content,
"time": date,
})
other_info_content = comment_selector.xpath(".//div[@class='post-tail-wrap']").get(default="").strip()
ip_location, publish_time = self.extract_ip_and_pub_time(other_info_content)
tieba_comment = TiebaComment(
comment_id=str(comment_field_value.get("content").get("post_id")),
sub_comment_count=comment_field_value.get("content").get("comment_num"),
content=comment_field_value.get("content").get("content"),
note_url=const.TIEBA_URL + f"/p/{note_id}",
user_link=const.TIEBA_URL + comment_selector.xpath(".//a[@class='p_author_face ']/@href").get(default='').strip(),
user_nickname=comment_selector.xpath(".//a[@class='p_author_name j_user_card']/text()").get(
default='').strip(),
user_avatar=comment_selector.xpath(".//a[@class='p_author_face ']/img/@src").get(
default='').strip(),
tieba_name=comment_selector.xpath("//a[@class='card_title_fname']/text()").get(default='').strip(),
ip_location=ip_location,
publish_time=publish_time,
note_id=note_id,
)
print(tieba_comment.model_dump())
result.append(tieba_comment)
return result
@staticmethod
def extract_ip_and_pub_time(html_content: str) -> Tuple[str, str]:
@ -125,6 +139,31 @@ class TieBaExtractor:
pub_time = time_match.group(1) if time_match else ""
return ip, pub_time
@staticmethod
def extract_data_field_value(selector: Selector) -> Dict:
"""
提取data-field的值
Args:
selector:
Returns:
"""
data_field_value = selector.xpath("./@data-field").get(default='').strip()
if not data_field_value or data_field_value == "{}":
return {}
try:
# 先使用 html.unescape 处理转义字符 再json.loads 将 JSON 字符串转换为 Python 字典
unescaped_json_str = html.unescape(data_field_value)
data_field_dict_value = json.loads(unescaped_json_str)
except Exception as ex:
print(f"extract_data_field_value错误信息{ex}, 尝试使用其他方式解析")
data_field_dict_value = {}
return data_field_dict_value
def test_extract_search_note_list():
with open("test_data/search_keyword_notes.html", "r", encoding="utf-8") as f:
content = f.read()
@ -140,7 +179,14 @@ def test_extract_note_detail():
result = extractor.extract_note_detail(content)
print(result.model_dump())
def test_extract_tieba_note_parment_comments():
with open("test_data/note_comments.html", "r", encoding="utf-8") as f:
content = f.read()
extractor = TieBaExtractor()
result = extractor.extract_tieba_note_parment_comments(content, "123456")
print(result)
if __name__ == '__main__':
test_extract_search_note_list()
test_extract_note_detail()
# test_extract_search_note_list()
# test_extract_note_detail()
test_extract_tieba_note_parment_comments()

File diff suppressed because one or more lines are too long

View File

@ -4,6 +4,9 @@ from pydantic import BaseModel, Field
class TiebaNote(BaseModel):
"""
百度贴吧帖子
"""
note_id: str = Field(..., description="帖子ID")
title: str = Field(..., description="帖子标题")
desc: str = Field(default="", description="帖子描述")
@ -17,3 +20,23 @@ class TiebaNote(BaseModel):
total_replay_num: int = Field(default=0, description="回复总数")
total_replay_page: int = Field(default=0, description="回复总页数")
ip_location: Optional[str] = Field(default="", description="IP地理位置")
class TiebaComment(BaseModel):
"""
百度贴吧评论
"""
comment_id: str = Field(..., description="评论ID")
parment_comment_id: str = Field(default="", description="父评论ID")
content: str = Field(..., description="评论内容")
user_link: str = Field(default="", description="用户主页链接")
user_nickname: str = Field(default="", description="用户昵称")
user_avatar: str = Field(default="", description="用户头像地址")
publish_time: str = Field(default="", description="发布时间")
ip_location: Optional[str] = Field(default="", description="IP地理位置")
sub_comment_count: int = Field(default=0, description="子评论数")
note_id: str = Field(..., description="帖子ID")
note_url: str = Field(..., description="帖子链接")
tieba_name: str = Field(..., description="所属的贴吧名称")

View File

@ -372,3 +372,26 @@ CREATE TABLE tieba_note
KEY `idx_tieba_note_note_id` (`note_id`),
KEY `idx_tieba_note_publish_time` (`publish_time`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci COMMENT='贴吧帖子表';
DROP TABLE IF EXISTS `tieba_comment`;
CREATE TABLE tieba_comment
(
id BIGINT AUTO_INCREMENT PRIMARY KEY,
comment_id VARCHAR(255) NOT NULL COMMENT '评论ID',
parment_comment_id VARCHAR(255) DEFAULT '' COMMENT '父评论ID',
content TEXT NOT NULL COMMENT '评论内容',
user_link VARCHAR(255) DEFAULT '' COMMENT '用户主页链接',
user_nickname VARCHAR(255) DEFAULT '' COMMENT '用户昵称',
user_avatar VARCHAR(255) DEFAULT '' COMMENT '用户头像地址',
publish_time VARCHAR(255) DEFAULT '' COMMENT '发布时间',
ip_location VARCHAR(255) DEFAULT '' COMMENT 'IP地理位置',
sub_comment_count INT DEFAULT 0 COMMENT '子评论数',
note_id VARCHAR(255) NOT NULL COMMENT '帖子ID',
note_url VARCHAR(255) NOT NULL COMMENT '帖子链接',
tieba_name VARCHAR(255) NOT NULL COMMENT '所属的贴吧名称',
add_ts BIGINT NOT NULL COMMENT '添加时间戳',
last_modify_ts BIGINT NOT NULL COMMENT '最后修改时间戳',
KEY `idx_tieba_comment_comment_id` (`note_id`),
KEY `idx_tieba_comment_note_id` (`note_id`),
KEY `idx_tieba_comment_publish_time` (`publish_time`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci COMMENT='贴吧评论表';

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from typing import List
from model.m_baidu_tieba import TiebaNote
from model.m_baidu_tieba import TiebaNote, TiebaComment
from . import tieba_store_impl
from .tieba_store_impl import *
@ -38,14 +38,23 @@ async def update_tieba_note(note_item: TiebaNote):
await TieBaStoreFactory.create_store().store_content(save_note_item)
async def batch_update_tieba_note_comments(note_id: str, comments: List[Dict]):
async def batch_update_tieba_note_comments(note_id:str, comments: List[TiebaComment]):
"""
Batch update tieba note comments
Args:
note_id:
comments:
Returns:
"""
if not comments:
return
for comment_item in comments:
await update_tieba_note_comment(note_id, comment_item)
async def update_tieba_note_comment(note_id: str, comment_item: Dict):
async def update_tieba_note_comment(note_id: str, comment_item: TiebaComment):
"""
Update tieba note comment
Args:
@ -55,23 +64,7 @@ async def update_tieba_note_comment(note_id: str, comment_item: Dict):
Returns:
"""
user_info = comment_item.get("user_info", {})
comment_id = comment_item.get("id")
comment_pictures = [item.get("url_default", "") for item in comment_item.get("pictures", [])]
target_comment = comment_item.get("target_comment", {})
local_db_item = {
"comment_id": comment_id,
"create_time": comment_item.get("create_time"),
"ip_location": comment_item.get("ip_location"),
"note_id": note_id,
"content": comment_item.get("content"),
"user_id": user_info.get("user_id"),
"nickname": user_info.get("nickname"),
"avatar": user_info.get("image"),
"sub_comment_count": comment_item.get("sub_comment_count", 0),
"pictures": ",".join(comment_pictures),
"parent_comment_id": target_comment.get("id", 0),
"last_modify_ts": utils.get_current_timestamp(),
}
utils.logger.info(f"[store.tieba.update_tieba_note_comment] tieba note comment:{local_db_item}")
await TieBaStoreFactory.create_store().store_comment(local_db_item)
save_comment_item = comment_item.model_dump()
save_comment_item.update({"last_modify_ts": utils.get_current_timestamp()})
utils.logger.info(f"[store.tieba.update_tieba_note_comment] tieba note id: {note_id} comment:{save_comment_item}")
await TieBaStoreFactory.create_store().store_comment(save_comment_item)