Merge pull request #223 from Ermeng98/main

新增对微博博客内照片获取的支持 文件存放路径data/weibo/images
This commit is contained in:
程序员阿江-Relakkes 2024-04-11 00:11:04 +08:00 committed by GitHub
commit bba9841c26
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 118 additions and 10 deletions

View File

@ -56,6 +56,12 @@ class AbstractStore(ABC):
async def store_creator(self, creator: Dict):
pass
class AbstractStoreImage(ABC):
#TODO: support all platform
# only weibo is supported
# @abstractmethod
async def store_image(self, image_content_item: Dict):
pass
class AbstactApiClient(ABC):
@abstractmethod

View File

@ -33,6 +33,9 @@ CRAWLER_MAX_NOTES_COUNT = 20
# 并发爬虫数量控制
MAX_CONCURRENCY_NUM = 4
# 是否开启爬图片模式, 默认不开启爬图片
ENABLE_GET_IMAGES = False
# 是否开启爬评论模式, 默认不开启爬评论
ENABLE_GET_COMMENTS = False

View File

@ -12,7 +12,6 @@ from urllib.parse import urlencode
import httpx
from playwright.async_api import BrowserContext, Page
from tools import utils
from .exception import DataFetchError
@ -35,6 +34,7 @@ class WeiboClient:
self._host = "https://m.weibo.cn"
self.playwright_page = playwright_page
self.cookie_dict = cookie_dict
self._image_agent_host = "https://i1.wp.com/"
async def request(self, method, url, **kwargs) -> Any:
async with httpx.AsyncClient(proxies=self.proxies) as client:
@ -181,3 +181,25 @@ class WeiboClient:
else:
utils.logger.info(f"[WeiboClient.get_note_info_by_id] 未找到$render_data的值")
return dict()
async def get_note_image(self, image_url: str) -> bytes:
image_url = image_url[8:] # 去掉 https://
sub_url = image_url.split("/")
image_url = ""
for i in range(len(sub_url)):
if i == 1:
image_url += "large/" #都获取高清大图
elif i == len(sub_url) - 1:
image_url += sub_url[i]
else:
image_url += sub_url[i] + "/"
# 微博图床对外存在防盗链,所以需要代理访问
# 由于微博图片是通过 i1.wp.com 来访问的,所以需要拼接一下
final_uri = (f"{self._image_agent_host}" f"{image_url}")
async with httpx.AsyncClient(proxies=self.proxies) as client:
response = await client.request("GET", final_uri, timeout=self.timeout)
if not response.reason_phrase == "OK":
utils.logger.error(f"[WeiboClient.get_note_image] request {final_uri} err, res:{response.text}")
return None
else:
return response.content

View File

@ -10,11 +10,10 @@ import random
from asyncio import Task
from typing import Dict, List, Optional, Tuple
from playwright.async_api import (BrowserContext, BrowserType, Page,
async_playwright)
import config
from base.base_crawler import AbstractCrawler
from playwright.async_api import (BrowserContext, BrowserType, Page,
async_playwright)
from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool
from store import weibo as weibo_store
from tools import utils
@ -121,8 +120,10 @@ class WeiboCrawler(AbstractCrawler):
for note_item in note_list:
if note_item:
mblog: Dict = note_item.get("mblog")
note_id_list.append(mblog.get("id"))
await weibo_store.update_weibo_note(note_item)
if mblog:
note_id_list.append(mblog.get("id"))
await weibo_store.update_weibo_note(note_item)
await self.get_note_images(mblog)
page += 1
await self.batch_get_notes_comments(note_id_list)
@ -200,6 +201,28 @@ class WeiboCrawler(AbstractCrawler):
except Exception as e:
utils.logger.error(f"[WeiboCrawler.get_note_comments] may be been blocked, err:{e}")
async def get_note_images(self, mblog: Dict):
"""
get note images
:param mblog:
:return:
"""
if not config.ENABLE_GET_IMAGES:
utils.logger.info(f"[WeiboCrawler.get_note_images] Crawling image mode is not enabled")
return
pics: Dict = mblog.get("pics")
if not pics:
return
for pic in pics:
url = pic.get("url")
if not url:
continue
content = await self.wb_client.get_note_image(url)
if content != None:
extension_file_name = url.split(".")[-1]
await weibo_store.update_weibo_note_image(pic["pid"], content, extension_file_name)
async def create_weibo_client(self, httpx_proxy: Optional[str]) -> WeiboClient:
"""Create xhs client"""
utils.logger.info("[WeiboCrawler.create_weibo_client] Begin create weibo API client ...")

View File

@ -8,11 +8,10 @@ import functools
import sys
from typing import Optional
from base.base_crawler import AbstractLogin
from playwright.async_api import BrowserContext, Page
from tenacity import (RetryError, retry, retry_if_result, stop_after_attempt,
wait_fixed)
from base.base_crawler import AbstractLogin
from tools import utils

View File

@ -7,6 +7,7 @@ from typing import List
import config
from .weibo_store_image import *
from .weibo_store_impl import *
@ -14,7 +15,7 @@ class WeibostoreFactory:
STORES = {
"csv": WeiboCsvStoreImplement,
"db": WeiboDbStoreImplement,
"json": WeiboJsonStoreImplement
"json": WeiboJsonStoreImplement,
}
@staticmethod
@ -86,3 +87,6 @@ async def update_weibo_note_comment(note_id: str, comment_item: Dict):
utils.logger.info(
f"[store.weibo.update_weibo_note_comment] Weibo note comment: {comment_id}, content: {save_comment_item.get('content', '')[:24]} ...")
await WeibostoreFactory.create_store().store_comment(comment_item=save_comment_item)
async def update_weibo_note_image(picid: str, pic_content, extension_file_name):
await WeiboStoreImage().store_image({"pic_id": picid, "pic_content": pic_content, "extension_file_name": extension_file_name})

View File

@ -0,0 +1,52 @@
# -*- coding: utf-8 -*-
# @Author : Erm
# @Time : 2024/4/9 17:35
# @Desc : 微博保存图片类
import pathlib
from typing import Dict
import aiofiles
from base.base_crawler import AbstractStoreImage
from tools import utils
class WeiboStoreImage(AbstractStoreImage):
image_store_path: str = "data/weibo/images"
async def store_image(self, image_content_item: Dict):
"""
store content
Args:
content_item:
Returns:
"""
await self.save_image(image_content_item.get("pic_id"), image_content_item.get("pic_content"), image_content_item.get("extension_file_name"))
def make_save_file_name(self, picid: str, extension_file_name: str) -> str:
"""
make save file name by store type
Args:
picid: image id
Returns:
"""
return f"{self.image_store_path}/{picid}.{extension_file_name}"
async def save_image(self, picid: str, pic_content: str, extension_file_name="jpg"):
"""
save image to local
Args:
picid: image id
pic_content: image content
Returns:
"""
pathlib.Path(self.image_store_path).mkdir(parents=True, exist_ok=True)
save_file_name = self.make_save_file_name(picid, extension_file_name)
async with aiofiles.open(save_file_name, 'wb') as f:
await f.write(pic_content)
utils.logger.info(f"[WeiboImageStoreImplement.save_image] save image {save_file_name} success ...")

View File

@ -10,7 +10,6 @@ import pathlib
from typing import Dict
import aiofiles
from base.base_crawler import AbstractStore
from tools import utils
from var import crawler_type_var