Merge remote-tracking branch 'upstream/main'

This commit is contained in:
xiaohuozi
2024-12-16 23:41:52 +08:00
88 changed files with 26762 additions and 685 deletions

View File

@@ -38,7 +38,7 @@ class AutoSignIn(_PluginBase):
# 插件图标
plugin_icon = "signin.png"
# 插件版本
plugin_version = "2.4"
plugin_version = "2.4.2"
# 插件作者
plugin_author = "thsrite"
# 作者主页

View File

@@ -39,7 +39,15 @@ class HaiDan(_ISiteSigninHandler):
render = site_info.get("render")
# 签到
html_text = self.get_page_source(url='https://www.haidan.video/signin.php',
# 签到页会重定向到index.php由于302重定向特性导致index.php没有携带cookie
self.get_page_source(url='https://www.haidan.video/signin.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
# 重新携带cookie获取index.php查看签到结果
html_text = self.get_page_source(url='https://www.haidan.video/index.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,

View File

@@ -0,0 +1,64 @@
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.string import StringUtils
class PTTime(_ISiteSigninHandler):
"""
PT时间签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "pttime.org"
# 签到成功
_succeed_regex = ['签到成功']
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 签到
# 签到返回:<html><head></head><body>签到成功</body></html>
html_text = self.get_page_source(url='https://www.pttime.org/attendance.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._succeed_regex)
if sign_status:
logger.info(f"{site} 签到成功")
return True, '签到成功'
logger.error(f"{site} 签到失败,签到接口返回 {html_text}")
return False, '签到失败'

View File

@@ -0,0 +1,434 @@
# 基础库
import datetime
import json
from typing import Any, Dict, List, Optional, Type
# 第三方库
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
import pytz
from sqlalchemy import JSON
from sqlalchemy.orm import Session
# 项目库
from app.chain.subscribe import SubscribeChain, Subscribe
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.event import eventmanager, Event
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo
from app.db.models.subscribehistory import SubscribeHistory
from app.db.site_oper import SiteOper
from app.db.subscribe_oper import SubscribeOper
from app.db import db_query
from app.helper.subscribe import SubscribeHelper
from app.log import logger
from app.plugins import _PluginBase
from app.schemas.types import EventType, NotificationType
from app.utils.http import RequestUtils
class BangumiColl(_PluginBase):
# 插件名称
plugin_name = "Bangumi收藏订阅"
# 插件描述
plugin_desc = "将Bangumi用户收藏添加到订阅"
# 插件图标
plugin_icon = "bangumi_b.png"
# 插件版本
plugin_version = "1.5.2"
# 插件作者
plugin_author = "Attente"
# 作者主页
author_url = "https://github.com/wikrin"
# 插件配置项ID前缀
plugin_config_prefix = "bangumicoll_"
# 加载顺序
plugin_order = 23
# 可使用的用户级别
auth_level = 1
# 私有属性
_scheduler = None
siteoper: SiteOper = None
subscribehelper: SubscribeHelper = None
subscribeoper: SubscribeOper = None
# 配置属性
_enabled: bool = False
_total_change: bool = False
_cron: str = ""
_notify: bool = False
_onlyonce: bool = False
_include: str = ""
_exclude: str = ""
_uid: str = ""
_collection_type = []
_save_path: str = ""
_sites: list = []
def init_plugin(self, config: dict = None):
self.subscribechain = SubscribeChain()
self.siteoper = SiteOper()
self.subscribehelper = SubscribeHelper()
self.subscribeoper = SubscribeOper()
# 停止现有任务
self.stop_service()
self.load_config(config)
if self._onlyonce:
self.schedule_once()
def load_config(self, config: dict):
"""加载配置"""
if config:
# 遍历配置中的键并设置相应的属性
for key in (
"enabled",
"total_change",
"cron",
"notify",
"onlyonce",
"uid",
"collection_type",
"save_path",
"sites",
):
setattr(self, f"_{key}", config.get(key, getattr(self, f"_{key}")))
# 获得所有站点
site_ids = {site.id for site in self.siteoper.list_order_by_pri()}
# 过滤已删除的站点
self._sites = [site_id for site_id in self._sites if site_id in site_ids]
# 更新配置
self.__update_config()
def schedule_once(self):
"""调度一次性任务"""
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
logger.info("Bangumi收藏订阅立即运行一次")
self._scheduler.add_job(
func=self.bangumi_coll,
trigger='date',
run_date=datetime.datetime.now(tz=pytz.timezone(settings.TZ))
+ datetime.timedelta(seconds=3),
)
self._scheduler.start()
# 关闭一次性开关
self._onlyonce = False
self.__update_config()
def __update_config(self):
"""更新设置"""
self.update_config(
{
"enabled": self._enabled,
"notify": self._notify,
"total_change": self._total_change,
"onlyonce": self._onlyonce,
"cron": self._cron,
"uid": self._uid,
"collection_type": self._collection_type,
"include": self._include,
"exclude": self._exclude,
"save_path": self._save_path,
"sites": self._sites,
}
)
def get_form(self):
from .page_components import form
# 列出所有站点
sites_options = [
{"title": site.name, "value": site.id}
for site in self.siteoper.list_order_by_pri()
]
return form(sites_options)
def get_service(self) -> List[Dict[str, Any]]:
"""
注册插件公共服务
"""
if self._enabled or self._cron:
trigger = CronTrigger.from_crontab(self._cron) if self._cron else "interval"
kwargs = {"hours": 6} if not self._cron else {}
return [
{
"id": "BangumiColl",
"name": "Bangumi收藏订阅",
"trigger": trigger,
"func": self.bangumi_coll,
"kwargs": kwargs,
}
]
return []
def stop_service(self):
"""退出插件"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
logger.error(f"退出插件失败:{str(e)}")
@eventmanager.register(EventType.SiteDeleted)
def site_deleted(self, event: Event):
"""
删除对应站点
"""
site_id = event.event_data.get("site_id")
if site_id in self._sites:
self._sites.remove(site_id)
self.__update_config()
def get_api(self):
pass
def get_command(self):
pass
def get_page(self):
pass
def get_state(self):
return self._enabled
def bangumi_coll(self):
"""订阅Bangumi用户收藏"""
if not self._uid:
logger.error("请设置UID")
return
try:
res = self.get_bgm_res(addr="UserCollections", id=self._uid)
items = self.parse_collection_items(res)
# 新增和移除条目
self.manage_subscriptions(items)
except Exception as e:
logger.error(f"执行失败: {str(e)}")
def parse_collection_items(self, response) -> Dict[int, Dict[str, Any]]:
"""解析获取的收藏条目"""
data = response.json().get("data", [])
if not data:
logger.error(f"Bangumi用户{self._uid} ,没有任何收藏")
return {}
logger.info("解析Bangumi条目信息...")
return {
item.get("subject_id"): {
"name": item['subject'].get('name'),
"name_cn": item['subject'].get('name_cn'),
"date": item['subject'].get('date'),
"eps": item['subject'].get('eps'),
}
for item in data
if item.get("type") in self._collection_type
}
def manage_subscriptions(self, items: Dict[int, Dict[str, Any]]):
"""管理订阅的新增和删除"""
db_sub = {
i.bangumiid: i.id
for i in self.subscribechain.subscribeoper.list()
if i.bangumiid
}
db_hist = self.get_subscribe_history()
new_sub = items.keys() - db_sub.keys() - db_hist
del_sub = db_sub.keys() - items.keys()
logger.debug(f"待新增条目:{new_sub}")
logger.debug(f"待移除条目:{del_sub}")
if del_sub and self._notify:
del_items = {db_sub[i]: i for i in del_sub}
logger.info("开始移除订阅...")
self.delete_subscribe(del_items)
logger.info("移除完成")
if new_sub:
logger.info("开始添加订阅...")
msg = self.add_subscribe({i: items[i] for i in new_sub})
logger.info("添加完成")
if msg:
logger.info("\n".ljust(49, ' ').join(list(msg.values())))
# 添加订阅
def add_subscribe(self, items: Dict[int, Dict[str, Any]]) -> Dict:
"""添加订阅"""
fail_items = {}
for self._subid, item in items.items():
meta = MetaInfo(item.get("name_cn"))
if not meta.name:
fail_items[self._subid] = f"{item.get('name_cn')} 未识别到有效数据"
logger.warn(f"{item.get('name_cn')} 未识别到有效数据")
continue
meta.year = item.get("date")[:4] if item.get("date") else None
mediainfo = self.chain.recognize_media(meta=meta)
meta.total_episode = item.get("eps", 0)
if not mediainfo:
fail_items[self._subid] = f"{item.get('name_cn')} 媒体信息识别失败"
continue
self.update_media_info(item, mediainfo)
sid = self.subscribeoper.list_by_tmdbid(
mediainfo.tmdb_id, mediainfo.number_of_seasons
)
if sid:
logger.info(f"{mediainfo.title_year} 正在订阅中")
if len(sid) == 1:
self.subscribeoper.update(
sid=sid[0].id, payload={"bangumiid": self._subid}
)
logger.info(f"{mediainfo.title_year} Bangumi条目id更新成功")
continue
sid, msg = self.subscribechain.add(
title=mediainfo.title,
year=mediainfo.year,
season=mediainfo.number_of_seasons,
bangumiid=self._subid,
exist_ok=True,
username="Bangumi订阅",
**self.prepare_kwargs(meta, mediainfo),
)
if not sid:
fail_items[self._subid] = f"{item.get('name_cn')} {msg}"
return fail_items
def prepare_kwargs(self, meta: MetaBase, mediainfo: MediaInfo) -> Dict:
"""准备额外参数"""
kwargs = {
"save_path": self._save_path,
"sites": (
self._sites
if self.are_types_equal(attribute_name='sites')
else json.dumps(self._sites)
),
}
total_episode = len(mediainfo.seasons.get(mediainfo.number_of_seasons) or [])
if (
meta.begin_season
and mediainfo.number_of_seasons != meta.begin_season
or total_episode != meta.total_episode
):
meta = self.get_eps(meta)
total_ep: int = meta.end_episode if meta.end_episode else total_episode
lock_eps: int = total_ep - meta.begin_episode + 1
prev_eps: list = [i for i in range(1, meta.begin_episode)]
kwargs.update(
{
"total_episode": total_ep,
"start_episode": meta.begin_episode,
"lack_episode": lock_eps,
"manual_total_episode": (
1 if meta.total_episode and self._total_change else 0
), # 手动修改过总集数
"note": (
prev_eps
if self.are_types_equal("note")
else json.dumps(prev_eps)
),
}
)
logger.info(
f"{mediainfo.title_year} 更新总集数为: {total_ep},开始集数为: {meta.begin_episode}"
)
return kwargs
def update_media_info(self, item: dict, mediainfo: MediaInfo):
"""更新媒体信息"""
for info in mediainfo.season_info:
if self.are_dates(item.get("date"), info.get("air_date")):
mediainfo.number_of_seasons = info.get("season_number")
mediainfo.number_of_episodes = info.get("episode_count")
break
def get_eps(self, meta: MetaBase) -> MetaBase:
"""获取Bangumi条目的集数信息"""
try:
res = self.get_bgm_res(addr="getEpisodes", id=self._subid)
data = res.json().get("data", [{}])[0]
prev = data.get("sort", 1) - data.get("ep", 1)
total = res.json().get("total", None)
meta.begin_episode = prev + 1
meta.end_episode = prev + total if total else None
except Exception as e:
logger.error(f"获取集数信息失败: {str(e)}")
finally:
return meta
# 移除订阅
def delete_subscribe(self, del_items: Dict[int, int]):
"""删除订阅"""
for subscribe_id in del_items.keys():
try:
subscribe = self.subscribeoper.get(subscribe_id)
if subscribe:
self.subscribeoper.delete(subscribe_id)
self.subscribehelper.sub_done_async(
{"tmdbid": subscribe.tmdbid, "doubanid": subscribe.doubanid}
)
self.post_message(
mtype=NotificationType.Subscribe,
title=f"{subscribe.name}({subscribe.year}) 第{subscribe.season}季 已取消订阅",
text=f"原因: 未在Bangumi收藏中找到该条目\n订阅用户: {subscribe.username}\n创建时间: {subscribe.date}",
image=subscribe.backdrop,
)
except Exception as e:
logger.error(f"删除订阅失败 {subscribe_id}: {str(e)}")
@staticmethod
def get_bgm_res(addr: str, id: int | str):
url = {
"UserCollections": f"https://api.bgm.tv/v0/users/{str(id)}/collections?subject_type=2",
"getEpisodes": f"https://api.bgm.tv/v0/episodes?subject_id={str(id)}&type=0&limit=1",
}
headers = {
"User-Agent": "wikrin/MoviePilot-Plugins (https://github.com/wikrin/MoviePilot-Plugins)"
}
return RequestUtils(headers=headers).get_res(url=url[addr])
@staticmethod
def are_dates(date_str1, date_str2, threshold_days: int = 7) -> bool:
"""对比两个日期字符串是否接近"""
date1 = datetime.datetime.strptime(date_str1, '%Y-%m-%d')
date2 = datetime.datetime.strptime(date_str2, '%Y-%m-%d')
return abs((date1 - date2).days) <= threshold_days
@db_query
def get_subscribe_history(self, db: Session = None) -> set:
"""获取已完成的订阅"""
try:
result = (
db.query(SubscribeHistory)
.filter(SubscribeHistory.bangumiid.isnot(None))
.all()
)
return {i.bangumiid for i in result}
except Exception as e:
logger.error(f"获取订阅历史失败: {str(e)}")
return set()
@staticmethod
def are_types_equal(
attribute_name: str, expected_type: Type[Any] = JSON(), class_=Subscribe
) -> bool:
"""比较类中属性的类型与expected_type是否一致"""
column = class_.__table__.columns.get(attribute_name)
if column is None:
raise AttributeError(
f"Class: {class_.__name__} 没有属性: '{attribute_name}'"
)
return isinstance(column.type, type(expected_type))

View File

@@ -0,0 +1,318 @@
from bs4 import BeautifulSoup
def form(sites_options) -> list:
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {'cols': 12, 'md': 3},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
},
}
],
},
{
'component': 'VCol',
'props': {'cols': 12, 'md': 3},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'notify',
'label': '自动取消订阅并通知',
},
}
],
},
{
'component': 'VCol',
'props': {'cols': 12, 'md': 3},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'total_change',
'label': '不跟随TMDB变动',
},
}
],
},
{
'component': 'VCol',
'props': {'cols': 12, 'md': 3},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
},
}
],
},
],
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {'cols': 8, 'md': 4},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '执行周期',
'placeholder': '5位cron表达式留空自动',
},
}
],
},
{
'component': 'VCol',
'props': {'cols': 8, 'md': 4},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'uid',
'label': 'UID/用户名',
'placeholder': '设置了用户名填写用户名否则填写UID',
},
},
],
},
{
'component': 'VCol',
'props': {'cols': 8, 'md': 4},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'collection_type',
'label': '收藏类型',
'chips': True,
'multiple': True,
'items': [
{'title': '在看', 'value': 3},
{'title': '想看', 'value': 1},
],
},
}
],
},
],
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {'cols': 12, 'md': 6},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'include',
'label': '包含',
'placeholder': '暂未实现',
},
}
],
},
{
'component': 'VCol',
'props': {'cols': 12, 'md': 6},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'exclude',
'label': '排除',
'placeholder': '暂未实现',
},
}
],
},
],
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {'cols': 12, 'md': 6},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'save_path',
'label': '保存目录',
'placeholder': '留空自动',
},
}
],
},
{
'component': 'VCol',
'props': {'cols': 12, 'md': 6},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'sites',
'label': '选择站点',
'chips': True,
'multiple': True,
'items': sites_options,
},
}
],
},
],
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
},
'content': parse_html(
'<p>注意: 该插件仅会将<strong>公开</strong>的收藏添加到<strong>订阅</strong>。</p>'
),
}
],
}
],
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
},
'content': parse_html(
'<p>注意: 开启<strong>自动取消订阅并通知</strong>后,已添加的订阅在下一次执行时若不在已选择的<strong>收藏类型</strong>中,将会被取消订阅。</p>'
),
}
],
}
],
},
],
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
},
'content': parse_html(
'<p>注意: 开启<strong>不跟随TMDB变动</strong>后,从<a href="https://bangumi.github.io/api/#/%E7%AB%A0%E8%8A%82/getEpisodes" target="_blank"><u>Bangumi API</u></a>获取的总集数将不再跟随TMDB的集数变动。</p>'
),
},
],
},
],
},
], {
"enabled": False,
"total_change": False,
"notify": False,
"onlyonce": False,
"cron": "",
"uid": "",
"collection_type": [3],
"include": "",
"exclude": "",
"save_path": "",
"sites": [],
}
def parse_html(html_string: str) -> list:
soup = BeautifulSoup(html_string, 'html.parser')
result: list = []
# 定义需要直接转为文本的标签
inline_text_tags = {'strong', 'u', 'em', 'b', 'i'}
def process_element(element: BeautifulSoup):
# 处理纯文本节点
if element.name is None:
text = element.strip()
return text if text else ""
# 处理HTML标签
component = element.name
props = {attr: element[attr] for attr in element.attrs}
content = []
# 递归处理子元素
for child in element.children:
child_content = process_element(child)
if isinstance(child_content, str):
content.append({'component': 'span', 'text': child_content})
elif child_content: # 只有在child_content不为空时添加
content.append(child_content)
# 构建标签对象
tag_data = {
'component': component,
'props': props,
'content': content if component not in inline_text_tags else [],
}
if content and component in inline_text_tags:
tag_data['text'] = ' '.join(
item['text'] for item in content if 'text' in item
)
return tag_data
# 遍历所有子元素
for element in soup.children:
element_content = process_element(element)
if element_content: # 只增加非空内容
result.append(element_content)
return result

View File

@@ -25,6 +25,7 @@ from app.modules.qbittorrent import Qbittorrent
from app.modules.transmission import Transmission
from app.plugins import _PluginBase
from app.schemas import NotificationType, TorrentInfo, MediaType
from app.schemas.types import EventType
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
@@ -63,15 +64,15 @@ class BrushConfig:
self.delete_size_range = config.get("delete_size_range")
self.up_speed = self.__parse_number(config.get("up_speed"))
self.dl_speed = self.__parse_number(config.get("dl_speed"))
self.auto_archive_days = self.__parse_number(config.get("auto_archive_days"))
self.save_path = config.get("save_path")
self.clear_task = config.get("clear_task", False)
self.archive_task = config.get("archive_task", False)
self.except_tags = config.get("except_tags", True)
self.delete_except_tags = config.get("delete_except_tags")
self.except_subscribe = config.get("except_subscribe", True)
self.brush_sequential = config.get("brush_sequential", False)
self.proxy_download = config.get("proxy_download", False)
self.proxy_delete = config.get("proxy_delete", False)
self.log_more = config.get("log_more", False)
self.active_time_range = config.get("active_time_range")
self.downloader_monitor = config.get("downloader_monitor")
self.qb_category = config.get("qb_category")
@@ -257,7 +258,7 @@ class BrushFlow(_PluginBase):
# 插件图标
plugin_icon = "brush.jpg"
# 插件版本
plugin_version = "3.3"
plugin_version = "3.8"
# 插件作者
plugin_author = "jxxghp,InfinityPacer"
# 作者主页
@@ -295,7 +296,6 @@ class BrushFlow(_PluginBase):
# endregion
def init_plugin(self, config: dict = None):
logger.info(f"站点刷流服务初始化")
self.siteshelper = SitesHelper()
self.siteoper = SiteOper()
self.torrents = TorrentsChain()
@@ -340,11 +340,10 @@ class BrushFlow(_PluginBase):
brush_config.archive_task = False
self.__update_config()
if brush_config.log_more:
if brush_config.enable_site_config:
logger.info(f"已开启站点独立配置,配置信息:{brush_config}")
else:
logger.info(f"没有开启站点独立配置,配置信息:{brush_config}")
if brush_config.enable_site_config:
logger.debug(f"已开启站点独立配置,配置信息:{brush_config}")
else:
logger.debug(f"没有开启站点独立配置,配置信息:{brush_config}")
# 停止现有任务
self.stop_service()
@@ -366,8 +365,6 @@ class BrushFlow(_PluginBase):
# 如果开启&存在站点时,才需要启用后台任务
self._task_brush_enable = brush_config.enabled and brush_config.brushsites
# brush_config.onlyonce = True
# 检查是否启用了一次性任务
if brush_config.onlyonce:
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
@@ -974,11 +971,6 @@ class BrushFlow(_PluginBase):
'component': 'VWindow',
'props': {
'model': '_tabs'
# VWindow设置paddnig会导致切换Tab时页面高度变动调整为修改VRow的方案
# 'style': {
# 'padding-top': '24px',
# 'padding-bottom': '24px',
# },
},
'content': [
{
@@ -1140,6 +1132,25 @@ class BrushFlow(_PluginBase):
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'auto_archive_days',
'label': '自动归档记录天数',
'placeholder': '超过此天数后自动归档',
'type': 'number',
"min": "0"
}
}
]
}
]
}
@@ -1426,11 +1437,28 @@ class BrushFlow(_PluginBase):
'component': 'VTextField',
'props': {
'model': 'seed_inactivetime',
'label': '未活动时间(分钟) ',
'label': '未活动时间(分钟)',
'placeholder': '超过时删除任务'
}
}
]
},
{
'component': 'VCol',
'props': {
"cols": 12,
"md": 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'delete_except_tags',
'label': '删除排除标签',
'placeholder': 'MOVIEPILOT,H&R'
}
}
]
}
]
}
@@ -1476,8 +1504,8 @@ class BrushFlow(_PluginBase):
{
'component': 'VSwitch',
'props': {
'model': 'except_tags',
'label': '删种排除MoviePilot任务',
'model': 'except_subscribe',
'label': '排除订阅(实验性功能)',
}
}
]
@@ -1492,8 +1520,8 @@ class BrushFlow(_PluginBase):
{
'component': 'VSwitch',
'props': {
'model': 'except_subscribe',
'label': '排除订阅(实验性功能)',
'model': 'qb_first_last_piece',
'label': '优先下载首尾文件块',
}
}
]
@@ -1640,43 +1668,6 @@ class BrushFlow(_PluginBase):
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'qb_first_last_piece',
'label': '优先下载首尾文件块',
}
}
]
}
]
},
{
'component': 'VRow',
"content": [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'log_more',
'label': '记录更多日志',
}
}
]
}
]
}
@@ -1742,7 +1733,7 @@ class BrushFlow(_PluginBase):
'props': {
'type': 'error',
'variant': 'tonal',
'text': '注意排除H&R并不保证能完全适配所有站点部分站点在列表页不显示H&R标志但实际上是有H&R的请注意核对使用'
'text': '注意排除H&R并不保证能完全适配所有站点部分站点在列表页不显示H&R标志但实际上是有H&R的请注意核对使用'
}
}
]
@@ -1849,7 +1840,7 @@ class BrushFlow(_PluginBase):
"onlyonce": False,
"clear_task": False,
"archive_task": False,
"except_tags": True,
"delete_except_tags": f"{settings.TORRENT_TAG},H&R" if settings.TORRENT_TAG else "H&R",
"except_subscribe": True,
"brush_sequential": False,
"proxy_download": False,
@@ -1857,7 +1848,6 @@ class BrushFlow(_PluginBase):
"freeleech": "free",
"hr": "yes",
"enable_site_config": False,
"log_more": False,
"downloader_monitor": False,
"auto_qb_category": False,
"qb_first_last_piece": False,
@@ -2055,9 +2045,6 @@ class BrushFlow(_PluginBase):
if brush_config.site_hr_active:
logger.info(f"站点 {siteinfo.name} 已开启全站H&R选项所有种子设置为H&R种子")
# 由于缓存原因这里不能直接改torrents在后续加入任务中调整
# for torrent in torrents:
# torrent.hit_and_run = True
# 排除包含订阅的种子
if brush_config.except_subscribe:
@@ -2068,7 +2055,7 @@ class BrushFlow(_PluginBase):
torrents_size = self.__calculate_seeding_torrents_size(torrent_tasks=torrent_tasks)
logger.info(f"正在准备种子刷流,数量{len(torrents)}")
logger.info(f"正在准备种子刷流,数量 {len(torrents)}")
# 过滤种子
for torrent in torrents:
@@ -2078,6 +2065,8 @@ class BrushFlow(_PluginBase):
if not pre_condition_passed:
return False
logger.debug(f"种子详情:{torrent}")
# 判断能否通过保种体积刷流条件
size_condition_passed, reason = self.__evaluate_size_condition_for_brush(torrents_size=torrents_size,
add_torrent_size=torrent.size)
@@ -2098,8 +2087,8 @@ class BrushFlow(_PluginBase):
logger.warn(f"{torrent.title} 添加刷流任务失败!")
continue
# 保存任务信息
torrent_tasks[hash_string] = {
# 触发刷流下载时间并保存任务信息
torrent_task = {
"site": siteinfo.id,
"site_name": siteinfo.name,
"title": torrent.title,
@@ -2134,6 +2123,13 @@ class BrushFlow(_PluginBase):
"time": time.time()
}
self.eventmanager.send_event(etype=EventType.PluginAction, data={
"action": "brushflow_download_added",
"hash": hash_string,
"data": torrent_task
})
torrent_tasks[hash_string] = torrent_task
# 统计数据
torrents_size += torrent.size
statistic_info["count"] += 1
@@ -2306,7 +2302,8 @@ class BrushFlow(_PluginBase):
return True, None
def __log_brush_conditions(self, passed: bool, reason: str, torrent: Any = None):
@staticmethod
def __log_brush_conditions(passed: bool, reason: str, torrent: Any = None):
"""
记录刷流日志
"""
@@ -2314,9 +2311,7 @@ class BrushFlow(_PluginBase):
if not torrent:
logger.warn(f"没有通过前置刷流条件校验,原因:{reason}")
else:
brush_config = self.__get_brush_config()
if brush_config.log_more:
logger.warn(f"种子没有通过刷流条件校验,原因:{reason} 种子:{torrent.title}|{torrent.description}")
logger.debug(f"种子没有通过刷流条件校验,原因:{reason} 种子:{torrent.title}|{torrent.description}")
# endregion
@@ -2331,10 +2326,6 @@ class BrushFlow(_PluginBase):
if not brush_config.downloader:
return
if not self.__is_current_time_in_range():
logger.info(f"当前不在指定的刷流时间区间内,检查操作将暂时暂停")
return
with lock:
logger.info("开始检查刷流下载任务 ...")
torrent_tasks: Dict[str, dict] = self.get_data("torrents") or {}
@@ -2372,34 +2363,58 @@ class BrushFlow(_PluginBase):
# 更新刷流任务列表中在下载器中删除的种子为删除状态
self.__update_undeleted_torrents_missing_in_downloader(torrent_tasks, torrent_check_hashes, check_torrents)
# 排除MoviePilot种子
if check_torrents and brush_config.except_tags:
check_torrents = self.__filter_torrents_by_tag(torrents=check_torrents,
exclude_tag=settings.TORRENT_TAG)
# 根据配置的标签进行种子排除
if check_torrents:
logger.info(f"当前刷流任务共 {len(check_torrents)} 个有效种子,正在准备按设定的种子标签进行排除")
# 初始化一个空的列表来存储需要排除的标签
tags_to_exclude = set()
# 如果 delete_except_tags 非空且不是纯空白,则添加到排除列表中
if brush_config.delete_except_tags and brush_config.delete_except_tags.strip():
tags_to_exclude.update(tag.strip() for tag in brush_config.delete_except_tags.split(','))
# 将所有需要排除的标签组合成一个字符串,每个标签之间用逗号分隔
combined_tags = ",".join(tags_to_exclude)
if combined_tags: # 确保有标签需要排除
pre_filter_count = len(check_torrents) # 获取过滤前的任务数量
check_torrents = self.__filter_torrents_by_tag(torrents=check_torrents, exclude_tag=combined_tags)
post_filter_count = len(check_torrents) # 获取过滤后的任务数量
excluded_count = pre_filter_count - post_filter_count # 计算被排除的任务数量
logger.info(
f"有效种子数 {pre_filter_count},排除标签 '{combined_tags}' 后,"
f"剩余种子数 {post_filter_count},排除种子数 {excluded_count}")
else:
logger.info("没有配置有效的排除标签,所有种子均参与后续处理")
need_delete_hashes = []
# 如果配置了动态删除以及删种阈值,则根据动态删种进行分组处理
if brush_config.proxy_delete and brush_config.delete_size_range:
logger.info("已开启动态删种,按系统默认动态删种条件开始检查任务")
proxy_delete_hashes = self.__delete_torrent_for_proxy(torrents=check_torrents,
torrent_tasks=torrent_tasks) or []
need_delete_hashes.extend(proxy_delete_hashes)
# 否则均认为是没有开启动态删种
# 种子删除检查
if not check_torrents:
logger.info("没有需要检查的任务,跳过")
else:
logger.info("没有开启动态删种,按用户设置删种条件开始检查任务")
not_proxy_delete_hashes = self.__delete_torrent_for_evaluate_conditions(torrents=check_torrents,
torrent_tasks=torrent_tasks) or []
need_delete_hashes.extend(not_proxy_delete_hashes)
need_delete_hashes = []
if need_delete_hashes:
# 如果是QB则重新汇报Tracker
if brush_config.downloader == "qbittorrent":
self.__qb_torrents_reannounce(torrent_hashes=need_delete_hashes)
# 删除种子
if downloader.delete_torrents(ids=need_delete_hashes, delete_file=True):
for torrent_hash in need_delete_hashes:
torrent_tasks[torrent_hash]["deleted"] = True
# 如果配置了动态删除以及删种阈值,则根据动态删种进行分组处理
if brush_config.proxy_delete and brush_config.delete_size_range:
logger.info("已开启动态删种,按系统默认动态删种条件开始检查任务")
proxy_delete_hashes = self.__delete_torrent_for_proxy(torrents=check_torrents,
torrent_tasks=torrent_tasks) or []
need_delete_hashes.extend(proxy_delete_hashes)
# 否则均认为是没有开启动态删种
else:
logger.info("没有开启动态删种,按用户设置删种条件开始检查任务")
not_proxy_delete_hashes = self.__delete_torrent_for_evaluate_conditions(torrents=check_torrents,
torrent_tasks=torrent_tasks) or []
need_delete_hashes.extend(not_proxy_delete_hashes)
if need_delete_hashes:
# 如果是QB则重新汇报Tracker
if brush_config.downloader == "qbittorrent":
self.__qb_torrents_reannounce(torrent_hashes=need_delete_hashes)
# 删除种子
if downloader.delete_torrents(ids=need_delete_hashes, delete_file=True):
for torrent_hash in need_delete_hashes:
torrent_tasks[torrent_hash]["deleted"] = True
torrent_tasks[torrent_hash]["deleted_time"] = time.time()
# 归档数据
self.__auto_archive_tasks(torrent_tasks=torrent_tasks)
self.__update_and_save_statistic_info(torrent_tasks)
@@ -2618,8 +2633,7 @@ class BrushFlow(_PluginBase):
reason=reason)
logger.info(f"站点:{site_name}{reason},删除种子:{torrent_title}|{torrent_desc}")
else:
if brush_config.log_more:
logger.info(f"站点:{site_name}{reason},不删除种子:{torrent_title}|{torrent_desc}")
logger.debug(f"站点:{site_name}{reason},不删除种子:{torrent_title}|{torrent_desc}")
return delete_hashes
@@ -2657,8 +2671,7 @@ class BrushFlow(_PluginBase):
reason=reason)
logger.info(f"站点:{site_name}{reason},删除种子:{torrent_title}|{torrent_desc}")
else:
if brush_config.log_more:
logger.info(f"站点:{site_name}{reason},不删除种子:{torrent_title}|{torrent_desc}")
logger.debug(f"站点:{site_name}{reason},不删除种子:{torrent_title}|{torrent_desc}")
return delete_hashes
@@ -2829,6 +2842,7 @@ class BrushFlow(_PluginBase):
torrent_task = torrent_tasks[hash_value]
# 标记为已删除
torrent_task["deleted"] = True
torrent_task["deleted_time"] = time.time()
# 处理日志相关内容
delete_tasks.append(torrent_task)
site_name = torrent_task.get("site_name", "")
@@ -2914,7 +2928,7 @@ class BrushFlow(_PluginBase):
"active_downloaded": active_downloaded
})
logger.info(f"刷流任务统计数据总任务数:{total_count},活跃任务数:{active_count},已删除:{total_deleted}"
logger.info(f"刷流任务统计数据总任务数:{total_count},活跃任务数:{active_count},已删除:{total_deleted}"
f"待归档:{total_unarchived}"
f"活跃上传量:{StringUtils.str_filesize(active_uploaded)}"
f"活跃下载量:{StringUtils.str_filesize(active_downloaded)}"
@@ -2954,7 +2968,8 @@ class BrushFlow(_PluginBase):
"seed_avgspeed": "平均上传速度",
"seed_inactivetime": "未活动时间",
"up_speed": "单任务上传限速",
"dl_speed": "单任务下载限速"
"dl_speed": "单任务下载限速",
"auto_archive_days": "自动清理记录天数"
}
config_range_number_attr_to_desc = {
@@ -3026,15 +3041,15 @@ class BrushFlow(_PluginBase):
"delete_size_range": brush_config.delete_size_range,
"up_speed": brush_config.up_speed,
"dl_speed": brush_config.dl_speed,
"auto_archive_days": brush_config.auto_archive_days,
"save_path": brush_config.save_path,
"clear_task": brush_config.clear_task,
"archive_task": brush_config.archive_task,
"except_tags": brush_config.except_tags,
"delete_except_tags": brush_config.delete_except_tags,
"except_subscribe": brush_config.except_subscribe,
"brush_sequential": brush_config.brush_sequential,
"proxy_download": brush_config.proxy_download,
"proxy_delete": brush_config.proxy_delete,
"log_more": brush_config.log_more,
"active_time_range": brush_config.active_time_range,
"downloader_monitor": brush_config.downloader_monitor,
"qb_category": brush_config.qb_category,
@@ -3131,7 +3146,7 @@ class BrushFlow(_PluginBase):
data = data.get(key)
if not data:
return None
logger.info(f"获取到下载地址:{data}")
logger.debug(f"获取到下载地址:{data}")
return data
return None
@@ -3201,8 +3216,7 @@ class BrushFlow(_PluginBase):
# 获取种子Hash
torrent_hash = self.qb.get_torrent_id_by_tag(tags=tag)
if not torrent_hash:
logger.error(f"{brush_config.downloader} 获取种子Hash失败"
f"{',请尝试启用「代理下载种子」配置项' if not brush_config.proxy_download else ''}")
logger.error(f"{brush_config.downloader} 获取种子Hash失败,详细信息请查看 README")
return None
return torrent_hash
return None
@@ -3654,12 +3668,21 @@ class BrushFlow(_PluginBase):
"""
获取正在下载的任务数量
"""
brush_config = self.__get_brush_config()
downloader = self.__get_downloader(brush_config.downloader)
if not downloader:
try:
brush_config = self.__get_brush_config()
downloader = self.__get_downloader(brush_config.downloader)
if not downloader:
return 0
torrents = downloader.get_downloading_torrents(tags=brush_config.brush_tag)
if torrents is None:
logger.warn("获取下载数量失败,可能是下载器连接发生异常")
return 0
return len(torrents)
except Exception as e:
logger.error(f"获取下载数量发生异常: {e}")
return 0
torrents = downloader.get_downloading_torrents()
return len(torrents) or 0
@staticmethod
def __get_pubminutes(pubdate: str) -> float:
@@ -3705,14 +3728,21 @@ class BrushFlow(_PluginBase):
def __filter_torrents_by_tag(self, torrents: List[Any], exclude_tag: str) -> List[Any]:
"""
根据标签过滤torrents
根据标签过滤torrents,排除标签格式为逗号分隔的字符串,例如 "MOVIEPILOT, H&R"
"""
# 如果排除标签字符串为空,则返回原始列表
if not exclude_tag:
return torrents
# 将 exclude_tag 字符串分割成一个集合,并去除每个标签两端的空白,忽略空白标签并自动去重
exclude_tags = set(tag.strip() for tag in exclude_tag.split(',') if tag.strip())
filter_torrents = []
for torrent in torrents:
# 使用 __get_label 方法获取每个 torrent 的标签列表
labels = self.__get_label(torrent)
# 如果排除标签不在这个列表中,则添加到过滤后的列表
if exclude_tag not in labels:
# 检查是否有任何一个排除标签存在于标签列表中
if not any(exclude in labels for exclude in exclude_tags):
filter_torrents.append(torrent)
return filter_torrents
@@ -3752,7 +3782,8 @@ class BrushFlow(_PluginBase):
doubanid=subscribe.doubanid,
cache=True)
if mediainfo:
logger.info(f"subscribe {subscribe.name} {mediainfo.to_dict()}")
logger.info(f"订阅 {subscribe.name} 已识别到媒体信息")
logger.debug(f"subscribe {subscribe.name} {mediainfo.to_dict()}")
subscribe_titles.extend(mediainfo.names)
subscribe_titles = [title.strip() for title in subscribe_titles if title and title.strip()]
self._subscribe_infos[subscribe_key] = subscribe_titles
@@ -3766,7 +3797,8 @@ class BrushFlow(_PluginBase):
for key in set(self._subscribe_infos) - current_keys:
del self._subscribe_infos[key]
logger.info(f"订阅标题匹配完成,当前订阅的标题集合为:{self._subscribe_infos}")
logger.info("订阅标题匹配完成")
logger.debug(f"当前订阅的标题集合为:{self._subscribe_infos}")
unique_titles = {title for titles in self._subscribe_infos.values() for title in titles}
return unique_titles
@@ -3833,6 +3865,45 @@ class BrushFlow(_PluginBase):
"""
return sum(task.get("size", 0) for task in torrent_tasks.values() if not task.get("deleted", False))
def __auto_archive_tasks(self, torrent_tasks: Dict[str, dict]) -> None:
"""
自动归档已经删除的种子数据
"""
if not self._brush_config.auto_archive_days or self._brush_config.auto_archive_days <= 0:
logger.info("自动归档记录天数小于等于0取消自动归档")
return
# 用于存储已删除的数据
archived_tasks: Dict[str, dict] = self.get_data("archived") or {}
current_time = time.time()
archive_threshold_seconds = self._brush_config.auto_archive_days * 86400 # 将天数转换为秒数
# 准备一个列表,记录所有需要从原始数据中删除的键
keys_to_delete = set()
# 遍历所有 torrent 条目
for key, value in torrent_tasks.items():
deleted_time = value.get("deleted_time")
# 场景 1: 检查任务是否已被标记为删除且超出保留天数
if (value.get("deleted") and isinstance(deleted_time, (int, float)) and
current_time - deleted_time > archive_threshold_seconds):
keys_to_delete.add(key)
archived_tasks[key] = value
continue
# 场景 2: 检查没有明确删除时间的历史数据
if value.get("deleted") and deleted_time is None:
keys_to_delete.add(key)
archived_tasks[key] = value
continue
# 从原始字典中移除已删除的条目
for key in keys_to_delete:
del torrent_tasks[key]
self.save_data("archived", archived_tasks)
def __archive_tasks(self):
"""
归档已经删除的种子数据
@@ -3843,7 +3914,7 @@ class BrushFlow(_PluginBase):
archived_tasks: Dict[str, dict] = self.get_data("archived") or {}
# 准备一个列表,记录所有需要从原始数据中删除的键
keys_to_delete = []
keys_to_delete = set()
# 遍历所有 torrent 条目
for key, value in torrent_tasks.items():
@@ -3852,7 +3923,7 @@ class BrushFlow(_PluginBase):
# 如果是,加入到归档字典中
archived_tasks[key] = value
# 记录键,稍后删除
keys_to_delete.append(key)
keys_to_delete.add(key)
# 从原始字典中移除已删除的条目
for key in keys_to_delete:

View File

@@ -18,7 +18,7 @@ class CustomHosts(_PluginBase):
# 插件图标
plugin_icon = "hosts.png"
# 插件版本
plugin_version = "1.1"
plugin_version = "1.2"
# 插件作者
plugin_author = "thsrite"
# 作者主页
@@ -235,6 +235,12 @@ class CustomHosts(_PluginBase):
for host in hosts:
if not host:
continue
host = host.strip()
if host.startswith('#'): # 检查是否为注释行
host_entry = HostsEntry(entry_type='comment', comment=host)
new_entrys.append(host_entry)
continue
host_arr = str(host).split()
try:
host_entry = HostsEntry(entry_type='ipv4' if IpUtils.is_ipv4(str(host_arr[0])) else 'ipv6',

View File

@@ -0,0 +1,269 @@
import re
import time
import hmac
import hashlib
import base64
import urllib.parse
from app.plugins import _PluginBase
from app.core.event import eventmanager, Event
from app.schemas.types import EventType, NotificationType
from app.utils.http import RequestUtils
from typing import Any, List, Dict, Tuple
from app.log import logger
class DingdingMsg(_PluginBase):
# 插件名称
plugin_name = "钉钉机器人"
# 插件描述
plugin_desc = "支持使用钉钉机器人发送消息通知。"
# 插件图标
plugin_icon = "Dingding_A.png"
# 插件版本
plugin_version = "1.12"
# 插件作者
plugin_author = "nnlegenda"
# 作者主页
author_url = "https://github.com/nnlegenda"
# 插件配置项ID前缀
plugin_config_prefix = "dingdingmsg_"
# 加载顺序
plugin_order = 25
# 可使用的用户级别
auth_level = 1
# 私有属性
_enabled = False
_token = None
_secret = None
_msgtypes = []
def init_plugin(self, config: dict = None):
if config:
self._enabled = config.get("enabled")
self._token = config.get("token")
self._secret = config.get("secret")
self._msgtypes = config.get("msgtypes") or []
def get_state(self) -> bool:
return self._enabled and (True if self._token else False) and (True if self._secret else False)
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
# 编历 NotificationType 枚举,生成消息类型选项
MsgTypeOptions = []
for item in NotificationType:
MsgTypeOptions.append({
"title": item.value,
"value": item.name
})
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'token',
'label': '钉钉机器人token',
'placeholder': 'xxxxxx',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'secret',
'label': '加签',
'placeholder': 'SECxxx',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VSelect',
'props': {
'multiple': True,
'chips': True,
'model': 'msgtypes',
'label': '消息类型',
'items': MsgTypeOptions
}
}
]
}
]
},
]
}
], {
"enabled": False,
'token': '',
'msgtypes': []
}
def get_page(self) -> List[dict]:
pass
@eventmanager.register(EventType.NoticeMessage)
def send(self, event: Event):
"""
消息发送事件
"""
if not self.get_state():
return
if not event.event_data:
return
msg_body = event.event_data
# 渠道
channel = msg_body.get("channel")
if channel:
return
# 类型
msg_type: NotificationType = msg_body.get("type")
# 标题
title = msg_body.get("title")
# 文本
text = msg_body.get("text")
# 封面
cover = msg_body.get("image")
if not title and not text:
logger.warn("标题和内容不能同时为空")
return
if (msg_type and self._msgtypes
and msg_type.name not in self._msgtypes):
logger.info(f"消息类型 {msg_type.value} 未开启消息发送")
return
sc_url = self.url_sign(self._token, self._secret)
try:
if text:
# 对text进行Markdown特殊字符转义
text = re.sub(r"([_`])", r"\\\1", text)
else:
text = ""
if cover:
data = {
"msgtype": "markdown",
"markdown": {
"title": title,
"text": "### %s\n\n"
"![Cover](%s)\n\n"
"> %s\n\n > MoviePilot %s\n" % (title, cover, text, msg_type.value)
}
}
else:
data = {
"msgtype": "markdown",
"markdown": {
"title": title,
"text": "### %s\n\n"
"> %s\n\n > MoviePilot %s\n" % (title, text, msg_type.value)
}
}
res = RequestUtils(content_type="application/json").post_res(sc_url, json=data)
if res and res.status_code == 200:
ret_json = res.json()
errno = ret_json.get('errcode')
error = ret_json.get('errmsg')
if errno == 0:
logger.info("钉钉机器人消息发送成功")
else:
logger.warn(f"钉钉机器人消息发送失败,错误码:{errno},错误原因:{error}")
elif res is not None:
logger.warn(f"钉钉机器人消息发送失败,错误码:{res.status_code},错误原因:{res.reason}")
else:
logger.warn("钉钉机器人消息发送失败,未获取到返回信息")
except Exception as msg_e:
logger.error(f"钉钉机器人消息发送失败,{str(msg_e)}")
def stop_service(self):
"""
退出插件
"""
pass
def url_sign(self, access_token: str, secret: str) -> str:
"""
加签
"""
# 生成时间戳和签名
timestamp = str(round(time.time() * 1000))
secret_enc = secret.encode('utf-8')
string_to_sign = '{}\n{}'.format(timestamp, secret)
string_to_sign_enc = string_to_sign.encode('utf-8')
hmac_code = hmac.new(secret_enc, string_to_sign_enc, digestmod=hashlib.sha256).digest()
sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
# 组合请求的完整 URL
full_url = f'https://oapi.dingtalk.com/robot/send?access_token={access_token}&timestamp={timestamp}&sign={sign}'
return full_url

View File

@@ -330,7 +330,7 @@ class DirMonitor(_PluginBase):
return
# 不是媒体文件不处理
if file_path.suffix not in settings.RMT_MEDIAEXT:
if file_path.suffix.casefold() not in map(str.casefold, settings.RMT_MEDIAEXT):
logger.debug(f"{event_path} 不是媒体文件")
return

View File

@@ -34,7 +34,7 @@ class DoubanSync(_PluginBase):
# 插件图标
plugin_icon = "douban.png"
# 插件版本
plugin_version = "1.8"
plugin_version = "1.9.1"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
@@ -498,6 +498,11 @@ class DoubanSync(_PluginBase):
"""
if not self._users:
return
# 版本
if hasattr(settings, 'VERSION_FLAG'):
version = settings.VERSION_FLAG # V2
else:
version = "v1"
# 读取历史记录
if self._clearflag:
history = []
@@ -509,7 +514,12 @@ class DoubanSync(_PluginBase):
continue
logger.info(f"开始同步用户 {user_id} 的豆瓣想看数据 ...")
url = self._interests_url % user_id
results = self.rsshelper.parse(url)
if version == "v2":
results = self.rsshelper.parse(url, headers={
"User-Agent": settings.USER_AGENT
})
else:
results = self.rsshelper.parse(url)
if not results:
logger.warn(f"未获取到用户 {user_id} 豆瓣RSS数据{url}")
continue

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,296 @@
import re
import requests
from app.modules.wechat import WeChat
from app.schemas.types import NotificationType,MessageChannel
import os
import json
import requests
import base64
import hashlib
from typing import Dict, Any
from Crypto import Random
from Crypto.Cipher import AES
def bytes_to_key(data: bytes, salt: bytes, output=48) -> bytes:
# 兼容v2 将bytes_to_key和encrypt导入
assert len(salt) == 8, len(salt)
data += salt
key = hashlib.md5(data).digest()
final_key = key
while len(final_key) < output:
key = hashlib.md5(key + data).digest()
final_key += key
return final_key[:output]
def encrypt(message: bytes, passphrase: bytes) -> bytes:
"""
CryptoJS 加密原文
This is a modified copy of https://stackoverflow.com/questions/36762098/how-to-decrypt-password-from-javascript-cryptojs-aes-encryptpassword-passphras
"""
salt = Random.new().read(8)
key_iv = bytes_to_key(passphrase, salt, 32 + 16)
key = key_iv[:32]
iv = key_iv[32:]
aes = AES.new(key, AES.MODE_CBC, iv)
length = 16 - (len(message) % 16)
data = message + (chr(length) * length).encode()
return base64.b64encode(b"Salted__" + salt + aes.encrypt(data))
class PyCookieCloud:
def __init__(self, url: str, uuid: str, password: str):
self.url: str = url
self.uuid: str = uuid
self.password: str = password
def check_connection(self) -> bool:
"""
Test the connection to the CookieCloud server.
:return: True if the connection is successful, False otherwise.
"""
try:
resp = requests.get(self.url, timeout=3) # 设置超时为3秒
return resp.status_code == 200
except Exception as e:
return False
def update_cookie(self, formatted_cookies: Dict[str, Any]) -> bool:
"""
Update cookie data to CookieCloud.
:param formatted_cookies: cookie value to update.
:return: if update success, return True, else return False.
"""
if '.work.weixin.qq.com' not in formatted_cookies:
formatted_cookies['.work.weixin.qq.com'] = []
formatted_cookies['.work.weixin.qq.com'].append({
'name': '_upload_type',
'value': 'A',
'domain': '.work.weixin.qq.com',
'path': '/',
'expires': -1,
'httpOnly': False,
'secure': False,
'sameSite': 'Lax'
})
cookie = {'cookie_data': formatted_cookies}
raw_data = json.dumps(cookie)
encrypted_data = encrypt(raw_data.encode('utf-8'), self.get_the_key().encode('utf-8')).decode('utf-8')
cookie_cloud_request = requests.post(self.url + '/update',
json={'uuid': self.uuid, 'encrypted': encrypted_data})
if cookie_cloud_request.status_code == 200:
if cookie_cloud_request.json().get('action') == 'done':
return True
return False
def get_the_key(self) -> str:
"""
Get the key used to encrypt and decrypt data.
:return: the key.
"""
md5 = hashlib.md5()
md5.update((self.uuid + '-' + self.password).encode('utf-8'))
return md5.hexdigest()[:16]
@staticmethod
def load_cookie_lifetime(settings_file: str = None): # 返回时间戳 单位秒
if os.path.exists(settings_file):
with open(settings_file, 'r') as file:
settings = json.load(file)
return settings.get('_cookie_lifetime', 0)
else:
return 0
@staticmethod
def save_cookie_lifetime(settings_file, cookie_lifetime): # 传入时间戳 单位秒
with open(settings_file, 'w') as file:
json.dump({'_cookie_lifetime': cookie_lifetime}, file)
@staticmethod
def increase_cookie_lifetime(settings_file, seconds: int):
if os.path.exists(settings_file):
with open(settings_file, 'r') as file:
settings = json.load(file)
current_lifetime = settings.get('_cookie_lifetime', 0)
else:
current_lifetime = 0
new_lifetime = current_lifetime + seconds
# 保存新的 _cookie_lifetime
PyCookieCloud.save_cookie_lifetime(settings_file, new_lifetime)
class MySender:
def __init__(self, token=None, func=None):
self.tokens = token.split('||') if token and '||' in token else [token] if token else []
self.channels = [MySender._detect_channel(t) for t in self.tokens]
self.current_index = 0 # 当前使用的 token 和 channel 的索引
self.first_text_sent = False # 是否已发送过纯文本消息
self.init_success = bool(self.tokens) # 标识初始化是否成功
self.post_message_func = func # V2 微信模式的 post_message 方法
@staticmethod
def _detect_channel(token):
"""根据 token 确定通知渠道"""
if "WeChat" in token:
return "WeChat"
letters_only = ''.join(re.findall(r'[A-Za-z]', token))
if token.lower().startswith("sct"):
return "ServerChan"
elif letters_only.isupper():
return "AnPush"
else:
return "PushPlus"
def send(self, title, content=None, image=None, force_send=False, diy_channel=None):
"""发送消息"""
if not self.init_success:
return
# 对纯文本消息进行限制
if not image and not force_send:
if self.first_text_sent:
return
self.first_text_sent = True
# 如果指定了自定义通道,直接尝试发送
if diy_channel:
return self._try_send(title, content, image, diy_channel)
# 尝试按顺序发送,直到成功或遍历所有通道
for i in range(len(self.tokens)):
token = self.tokens[self.current_index]
channel = self.channels[self.current_index]
try:
result = self._try_send(title, content, image, channel, token)
if result is None: # 成功时返回 None
return
except Exception as e:
pass # 忽略单个错误,继续尝试下一个通道
self.current_index = (self.current_index + 1) % len(self.tokens)
return f"所有的通知方式都发送失败"
def _try_send(self, title, content, image, channel, token=None):
"""尝试使用指定通道发送消息"""
if channel == "WeChat" and self.post_message_func:
return self._send_v2_wechat(title, content, image, token)
elif channel == "WeChat":
return self._send_wechat(title, content, image, token)
elif channel == "ServerChan":
return self._send_serverchan(title, content, image)
elif channel == "AnPush":
return self._send_anpush(title, content, image)
elif channel == "PushPlus":
return self._send_pushplus(title, content, image)
else:
raise ValueError(f"Unknown channel: {channel}")
@staticmethod
def _send_wechat(title, content, image, token):
wechat = WeChat()
if token and ',' in token:
channel, actual_userid = token.split(',', 1)
else:
actual_userid = None
if image:
send_status = wechat.send_msg(title='企业微信登录二维码', image=image, link=image, userid=actual_userid)
else:
send_status = wechat.send_msg(title=title, text=content, userid=actual_userid)
if send_status is None:
return "微信通知发送错误"
return None
def _send_serverchan(self, title, content, image):
tmp_tokens = self.tokens[self.current_index]
if ',' in tmp_tokens:
before_comma, after_comma = tmp_tokens.split(',', 1)
if before_comma.startswith('sctp') and image:
token = after_comma # 图片发到公众号
else:
token = before_comma # 发到 server3
else:
token = tmp_tokens
if token.startswith('sctp'):
match = re.match(r'sctp(\d+)t', token)
if match:
num = match.group(1)
url = f'https://{num}.push.ft07.com/send/{token}.send'
else:
return '错误的Server3 Sendkey'
else:
url = f'https://sctapi.ftqq.com/{token}.send'
params = {'title': title, 'desp': f'![img]({image})' if image else content}
headers = {'Content-Type': 'application/json;charset=utf-8'}
response = requests.post(url, json=params, headers=headers)
result = response.json()
if result.get('code') != 0:
return f"Server酱通知错误: {result.get('message')}"
return None
def _send_anpush(self, title, content, image):
token = self.tokens[self.current_index] # 获取当前通道对应的 token
if ',' in token:
channel, token = token.split(',', 1)
else:
return "可能AnPush 没有配置消息通道ID"
url = f"https://api.anpush.com/push/{token}"
payload = {
"title": title,
"content": f"<img src=\"{image}\" width=\"100%\">" if image else content,
"channel": channel
}
headers = {"Content-Type": "application/x-www-form-urlencoded"}
response = requests.post(url, headers=headers, data=payload)
result = response.json()
# 判断返回的code和msgIds
if result.get('code') != 200:
return f"AnPush: {result.get('msg')}"
elif not result.get('data') or not result['data'].get('msgIds'):
return "AnPush 消息通道未找到"
return None
def _send_pushplus(self, title, content, image):
token = self.tokens[self.current_index] # 获取当前通道对应的 token
pushplus_url = f"http://www.pushplus.plus/send/{token}"
# PushPlus发送逻辑
data = {
"title": title,
"content": f"企业微信登录二维码<br/><img src='{image}' />" if image else content,
"template": "html"
}
response = requests.post(pushplus_url, json=data)
result = response.json()
if result.get('code') != 200:
return f"PushPlus send failed: {result.get('msg')}"
return None
def _send_v2_wechat(self, title, content, image, token):
"""V2 微信通知发送"""
if token and ',' in token:
_, actual_userid = token.split(',', 1)
else:
actual_userid = None
self.post_message_func(
channel=MessageChannel.Wechat,
mtype=NotificationType.Plugin,
title=title,
text=content,
image=image,
link=image,
userid=actual_userid
)
return None # 由于self.post_message()了None外没有其他返回值。无法判断是否发送成功V2直接默认成功
def reset_limit(self):
"""解除限制,允许再次发送纯文本消息"""
self.first_text_sent = False

View File

@@ -0,0 +1,87 @@
import hashlib
from typing import Dict, Any
import json
import requests
from urllib.parse import urljoin
from Cryptodome import Random
from Cryptodome.Cipher import AES
import base64
BLOCK_SIZE = 16
def pad(data):
length = BLOCK_SIZE - (len(data) % BLOCK_SIZE)
return data + (chr(length) * length).encode()
def bytes_to_key(data, salt, output=48):
# extended from https://gist.github.com/gsakkis/4546068
assert len(salt) == 8, len(salt)
data += salt
key = hashlib.md5(data).digest()
final_key = key
while len(final_key) < output:
key = hashlib.md5(key + data).digest()
final_key += key
return final_key[:output]
def encrypt(message, passphrase):
salt = Random.new().read(8)
key_iv = bytes_to_key(passphrase, salt, 32 + 16)
key = key_iv[:32]
iv = key_iv[32:]
aes = AES.new(key, AES.MODE_CBC, iv)
return base64.b64encode(b"Salted__" + salt + aes.encrypt(pad(message)))
class PyCookieCloud:
def __init__(self, url: str, uuid: str, password: str):
self.url: str = url
self.uuid: str = uuid
self.password: str = password
def check_connection(self) -> bool:
"""
Test the connection to the CookieCloud server.
:return: True if the connection is successful, False otherwise.
"""
try:
resp = requests.get(self.url)
if resp.status_code == 200:
return True
else:
return False
except Exception as e:
print(str(e))
return False
def update_cookie(self, cookie: Dict[str, Any]) -> bool:
"""
Update cookie data to CookieCloud.
:param cookie: cookie value to update, if this cookie does not contain 'cookie_data' key, it will be added into 'cookie_data'.
:return: if update success, return True, else return False.
"""
if 'cookie_data' not in cookie:
cookie = {'cookie_data': cookie}
raw_data = json.dumps(cookie)
encrypted_data = encrypt(raw_data.encode('utf-8'), self.get_the_key().encode('utf-8')).decode('utf-8')
cookie_cloud_request = requests.post(urljoin(self.url, '/update'),
data={'uuid': self.uuid, 'encrypted': encrypted_data})
if cookie_cloud_request.status_code == 200:
if cookie_cloud_request.json()['action'] == 'done':
return True
return False
def get_the_key(self) -> str:
"""
Get the key used to encrypt and decrypt data.
:return: the key.
"""
md5 = hashlib.md5()
md5.update((self.uuid + '-' + self.password).encode('utf-8'))
return md5.hexdigest()[:16]

File diff suppressed because it is too large Load Diff

View File

@@ -34,7 +34,7 @@ class IYUUAutoSeed(_PluginBase):
# 插件图标
plugin_icon = "IYUU.png"
# 插件版本
plugin_version = "1.9.3"
plugin_version = "1.9.6"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
@@ -957,6 +957,10 @@ class IYUUAutoSeed(_PluginBase):
if self._skipverify:
# 跳过校验
logger.info(f"{download_id} 跳过校验,请自行检查...")
# 请注意这里是故意不自动开始的
# 跳过校验存在直接失败、种子目录相同文件不同等异常情况
# 必须要用户自行二次确认之后才能开始做种
# 否则会出现反复下载刷掉分享率、做假种的情况
else:
# 追加校验任务
logger.info(f"添加校验检查任务:{download_id} ...")

View File

@@ -11,7 +11,7 @@ class IyuuHelper(object):
适配新版本IYUU开发版
"""
_version = "8.2.0"
_api_base = "https://dev.iyuu.cn"
_api_base = "https://2025.iyuu.cn"
_sites = {}
_token = None
_sid_sha1 = None

View File

@@ -1,11 +1,14 @@
import threading
from queue import Queue
from time import time, sleep
from typing import Any, List, Dict, Tuple
from urllib.parse import urlencode
from app.plugins import _PluginBase
from app.core.event import eventmanager, Event
from app.log import logger
from app.plugins import _PluginBase
from app.schemas.types import EventType, NotificationType
from app.utils.http import RequestUtils
from typing import Any, List, Dict, Tuple
from app.log import logger
class IyuuMsg(_PluginBase):
@@ -16,7 +19,7 @@ class IyuuMsg(_PluginBase):
# 插件图标
plugin_icon = "Iyuu_A.png"
# 插件版本
plugin_version = "1.2"
plugin_version = "1.3"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
@@ -33,12 +36,30 @@ class IyuuMsg(_PluginBase):
_token = None
_msgtypes = []
# 消息处理线程
processing_thread = None
# 上次发送时间
last_send_time = 0
# 消息队列
message_queue = Queue()
# 消息发送间隔(秒)
send_interval = 5
# 退出事件
__event = threading.Event()
def init_plugin(self, config: dict = None):
self.__event.clear()
if config:
self._enabled = config.get("enabled")
self._token = config.get("token")
self._msgtypes = config.get("msgtypes") or []
if self._enabled and self._token:
# 启动处理队列的后台线程
self.processing_thread = threading.Thread(target=self.process_queue)
self.processing_thread.daemon = True
self.processing_thread.start()
def get_state(self) -> bool:
return self._enabled and (True if self._token else False)
@@ -143,55 +164,77 @@ class IyuuMsg(_PluginBase):
@eventmanager.register(EventType.NoticeMessage)
def send(self, event: Event):
"""
消息发送事件
消息发送事件,将消息加入队列
"""
if not self.get_state():
return
if not event.event_data:
if not self.get_state() or not event.event_data:
return
msg_body = event.event_data
# 渠道
channel = msg_body.get("channel")
if channel:
return
# 类型
msg_type: NotificationType = msg_body.get("type")
# 标题
title = msg_body.get("title")
# 文本
text = msg_body.get("text")
if not title and not text:
# 验证消息的有效性
if not msg_body.get("title") and not msg_body.get("text"):
logger.warn("标题和内容不能同时为空")
return
if (msg_type and self._msgtypes
and msg_type.name not in self._msgtypes):
logger.info(f"消息类型 {msg_type.value} 未开启消息发送")
return
# 将消息加入队列
self.message_queue.put(msg_body)
logger.info("消息已加入队列等待发送")
try:
sc_url = "https://iyuu.cn/%s.send?%s" % (self._token, urlencode({"text": title, "desp": text}))
res = RequestUtils().get_res(sc_url)
if res and res.status_code == 200:
ret_json = res.json()
errno = ret_json.get('errcode')
error = ret_json.get('errmsg')
if errno == 0:
logger.info("IYUU消息发送成功")
def process_queue(self):
"""
处理队列中的消息,按间隔时间发送
"""
while True:
if self.__event.is_set():
logger.info("消息发送线程正在退出...")
break
# 获取队列中的下一条消息
msg_body = self.message_queue.get()
# 检查是否满足发送间隔时间
current_time = time()
time_since_last_send = current_time - self.last_send_time
if time_since_last_send < self.send_interval:
sleep(self.send_interval - time_since_last_send)
# 处理消息内容
channel = msg_body.get("channel")
if channel:
continue
msg_type: NotificationType = msg_body.get("type")
title = msg_body.get("title")
text = msg_body.get("text")
# 检查消息类型是否已启用
if msg_type and self._msgtypes and msg_type.name not in self._msgtypes:
logger.info(f"消息类型 {msg_type.value} 未开启消息发送")
continue
# 尝试发送消息
try:
sc_url = "https://iyuu.cn/%s.send?%s" % (self._token, urlencode({"text": title, "desp": text}))
res = RequestUtils().get_res(sc_url)
if res and res.status_code == 200:
ret_json = res.json()
errno = ret_json.get('errcode')
error = ret_json.get('errmsg')
if errno == 0:
logger.info("IYUU消息发送成功")
# 更新上次发送时间
self.last_send_time = time()
else:
logger.warn(f"IYUU消息发送失败错误码{errno},错误原因:{error}")
elif res is not None:
logger.warn(f"IYUU消息发送失败错误码{res.status_code},错误原因:{res.reason}")
else:
logger.warn(f"IYUU消息发送失败错误码:{errno},错误原因:{error}")
elif res is not None:
logger.warn(f"IYUU消息发送失败错误码:{res.status_code},错误原因:{res.reason}")
else:
logger.warn("IYUU消息发送失败未获取到返回信息")
except Exception as msg_e:
logger.error(f"IYUU消息发送失败{str(msg_e)}")
logger.warn("IYUU消息发送失败未获取到返回信息")
except Exception as msg_e:
logger.error(f"IYUU消息发送失败{str(msg_e)}")
# 标记任务完成
self.message_queue.task_done()
def stop_service(self):
"""
退出插件
"""
pass
self.__event.set()

View File

@@ -20,7 +20,7 @@ class MediaServerMsg(_PluginBase):
# 插件图标
plugin_icon = "mediaplay.png"
# 插件版本
plugin_version = "1.2"
plugin_version = "1.3"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
@@ -40,6 +40,7 @@ class MediaServerMsg(_PluginBase):
# 私有属性
_enabled = False
_types = []
_webhook_msg_keys = {}
# 拼装消息内容
_webhook_actions = {
@@ -198,6 +199,13 @@ class MediaServerMsg(_PluginBase):
logger.info(f"未开启 {event_info.event} 类型的消息通知")
return
expiring_key = f"{event_info.item_id}-{event_info.client}-{event_info.user_name}"
# 过滤停止播放重复消息
if str(event_info.event) == "playback.stop" and expiring_key in self._webhook_msg_keys.keys():
# 刷新过期时间
self.__add_element(expiring_key)
return
# 消息标题
if event_info.item_type in ["TV", "SHOW"]:
message_title = f"{self._webhook_actions.get(event_info.event)}剧集 {event_info.item_name}"
@@ -255,10 +263,31 @@ class MediaServerMsg(_PluginBase):
else:
play_link = None
if str(event_info.event) == "playback.stop":
# 停止播放消息,添加到过期字典
self.__add_element(expiring_key)
if str(event_info.event) == "playback.start":
# 开始播放消息,删除过期字典
self.__remove_element(expiring_key)
# 发送消息
self.post_message(mtype=NotificationType.MediaServer,
title=message_title, text=message_content, image=image_url, link=play_link)
def __add_element(self, key, duration=600):
expiration_time = time.time() + duration
# 如果元素已经存在,更新其过期时间
self._webhook_msg_keys[key] = expiration_time
def __remove_element(self, key):
self._webhook_msg_keys = {k: v for k, v in self._webhook_msg_keys.items() if k != key}
def __get_elements(self):
current_time = time.time()
# 过滤掉过期的元素
self._webhook_msg_keys = {k: v for k, v in self._webhook_msg_keys.items() if v > current_time}
return list(self._webhook_msg_keys.keys())
def stop_service(self):
"""
退出插件

View File

@@ -29,7 +29,7 @@ class MediaSyncDel(_PluginBase):
# 插件图标
plugin_icon = "mediasyncdel.png"
# 插件版本
plugin_version = "1.7"
plugin_version = "1.7.1"
# 插件作者
plugin_author = "thsrite"
# 作者主页
@@ -1324,7 +1324,7 @@ class MediaSyncDel(_PluginBase):
downloader=downloader)
# 暂停辅种
else:
self.chain.stop_torrents(hashs=torrent, download=downloader)
self.chain.stop_torrents(hashs=torrent, downloader=downloader)
logger.info(f"辅种:{downloader} - {torrent} 暂停")
# 处理辅种的辅种

View File

@@ -15,7 +15,7 @@ class MPServerStatus(_PluginBase):
# 插件图标
plugin_icon = "Duplicati_A.png"
# 插件版本
plugin_version = "1.0"
plugin_version = "1.1"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
@@ -73,7 +73,21 @@ class MPServerStatus(_PluginBase):
}
def get_page(self) -> List[dict]:
pass
"""
获取插件页面
"""
if not self._enable:
return [
{
'component': 'div',
'text': '插件未启用',
'props': {
'class': 'text-center',
}
}
]
_, _, elements = self.get_dashboard()
return elements
def get_dashboard(self) -> Optional[Tuple[Dict[str, Any], Dict[str, Any], List[dict]]]:
"""

View File

@@ -11,11 +11,11 @@ class PushPlusMsg(_PluginBase):
# 插件名称
plugin_name = "PushPlus消息推送"
# 插件描述
plugin_desc = "支持使用PushPlus发送消息通知。"
plugin_desc = "支持使用PushPlus发送消息通知(需实名认证)"
# 插件图标
plugin_icon = "Pushplus_A.png"
# 插件版本
plugin_version = "1.0"
plugin_version = "1.1"
# 插件作者
plugin_author = "cheng"
# 作者主页
@@ -128,6 +128,27 @@ class PushPlusMsg(_PluginBase):
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
'text': '由于pushplus规则更新没有实名认证的用户无法发送消息所以需要用户自己去官网进行认证。官网地址:https://www.pushplus.plus'
}
}
]
}
]
}
]
}
], {

View File

@@ -497,7 +497,6 @@ class RemoveLink(_PluginBase):
self._transferhistory.delete(transfer_history.id)
logger.info(f"删除历史记录:{transfer_history.id}")
def delete_empty_folders(self, path):
"""
从指定路径开始,逐级向上层目录检测并删除空目录,直到遇到非空目录或到达指定监控目录为止
@@ -589,7 +588,7 @@ class RemoveLink(_PluginBase):
mtype=NotificationType.SiteMessage,
title=f"【清理硬链接】",
text=f"监控到删除源文件:[{file_path}]\n"
f"同步删除硬链接文件:[{path}]",
f"同步删除硬链接文件:[{path}]",
)
except Exception as e:
logger.error(

View File

@@ -14,8 +14,7 @@ from ruamel.yaml import CommentedMap
from app import schemas
from app.core.config import settings
from app.core.event import Event
from app.core.event import eventmanager
from app.core.event import Event, eventmanager
from app.db.models import PluginData
from app.db.site_oper import SiteOper
from app.helper.browser import PlaywrightHelper
@@ -43,7 +42,7 @@ class SiteStatistic(_PluginBase):
# 插件图标
plugin_icon = "statistic.png"
# 插件版本
plugin_version = "3.9.1"
plugin_version = "4.0.1"
# 插件作者
plugin_author = "lightolly"
# 作者主页
@@ -931,6 +930,12 @@ class SiteStatistic(_PluginBase):
拼装插件详情页面,需要返回页面配置,同时附带数据
"""
def format_bonus(bonus):
try:
return f'{float(bonus):,.1f}'
except ValueError:
return '0.0'
# 获取数据
today, stattistic_data, yesterday_sites_data = self.__get_data()
if not stattistic_data:
@@ -995,7 +1000,7 @@ class SiteStatistic(_PluginBase):
},
{
'component': 'td',
'text': '{:,.1f}'.format(data.get('bonus') or 0)
'text': format_bonus(data.get('bonus') or 0)
},
{
'component': 'td',

View File

@@ -118,7 +118,7 @@ class NexusPhpSiteUserInfo(ISiteUserInfo):
if bonus_match and bonus_match.group(1).strip():
self.bonus = StringUtils.str_float(bonus_match.group(1))
return
bonus_match = re.search(r"mybonus.[\[\]:<>/a-zA-Z_\-=\"'\s#;.(使用魔力值豆]+\s*([\d,.]+)[<()&\s]", html_text)
bonus_match = re.search(r"mybonus.[\[\]:<>/a-zA-Z_\-=\"'\s#;.(使用&说明魔力值豆]+\s*([\d,.]+)[\[<()&\s]", html_text)
try:
if bonus_match and bonus_match.group(1).strip():
self.bonus = StringUtils.str_float(bonus_match.group(1))
@@ -340,6 +340,12 @@ class NexusPhpSiteUserInfo(ISiteUserInfo):
self.user_level = user_levels_text[0].xpath("string(.)").strip()
return
# 适配PTT用户等级
user_levels_text = html.xpath('//tr/td[text()="用户等级"]/following-sibling::td[1]/b/@title')
if user_levels_text:
self.user_level = user_levels_text[0].strip()
return
user_levels_text = html.xpath('//a[contains(@href, "userdetails")]/text()')
if not self.user_level and user_levels_text:
for user_level_text in user_levels_text:

View File

@@ -62,8 +62,8 @@ class TYemaSiteUserInfo(ISiteUserInfo):
self.user_level = user_info.get("level")
self.join_at = StringUtils.unify_datetime_str(user_info.get("registerTime"))
self.upload = user_info.get('uploadSize')
self.download = user_info.get('downloadSize')
self.upload = user_info.get('promotionUploadSize')
self.download = user_info.get('promotionDownloadSize')
self.ratio = round(self.upload / (self.download or 1), 2)
self.bonus = user_info.get("bonus")
self.message_unread = 0

View File

@@ -23,7 +23,7 @@ class SpeedLimiter(_PluginBase):
# 插件图标
plugin_icon = "Librespeed_A.png"
# 插件版本
plugin_version = "1.1"
plugin_version = "1.3"
# 插件作者
plugin_author = "Shurelol"
# 作者主页
@@ -48,6 +48,7 @@ class SpeedLimiter(_PluginBase):
_noplay_up_speed: float = 0
_noplay_down_speed: float = 0
_bandwidth: float = 0
_reserved_bandwidth: float = 0
_allocation_ratio: str = ""
_auto_limit: bool = False
_limit_enabled: bool = False
@@ -55,6 +56,7 @@ class SpeedLimiter(_PluginBase):
_unlimited_ips = {}
# 当前限速状态
_current_state = ""
_exclude_path = ""
def init_plugin(self, config: dict = None):
# 读取配置
@@ -66,9 +68,15 @@ class SpeedLimiter(_PluginBase):
self._noplay_up_speed = float(config.get("noplay_up_speed")) if config.get("noplay_up_speed") else 0
self._noplay_down_speed = float(config.get("noplay_down_speed")) if config.get("noplay_down_speed") else 0
self._current_state = f"U:{self._noplay_up_speed},D:{self._noplay_down_speed}"
self._exclude_path = config.get("exclude_path")
try:
# 总带宽
self._bandwidth = int(float(config.get("bandwidth") or 0)) * 1000000
self._reserved_bandwidth = int(float(config.get("reserved_bandwidth") or 0)) * 1000000
# 减去预留带宽
if self._reserved_bandwidth:
self._bandwidth -= self._reserved_bandwidth
# 自动限速开关
if self._bandwidth > 0:
self._auto_limit = True
@@ -316,6 +324,23 @@ class SpeedLimiter(_PluginBase):
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'reserved_bandwidth',
'label': '预留带宽(应对突发流量和额外开销)',
'placeholder': 'Mbps'
}
}
]
}
]
},
@@ -355,6 +380,23 @@ class SpeedLimiter(_PluginBase):
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'exclude_path',
'label': '不限速路径',
'placeholder': '包含该路径的媒体不限速,多个请换行'
}
}
]
}
]
}
@@ -371,7 +413,8 @@ class SpeedLimiter(_PluginBase):
"bandwidth": None,
"allocation_ratio": "",
"ipv4": "",
"ipv6": ""
"ipv6": "",
"exclude_path": ""
}
def get_page(self) -> List[dict]:
@@ -415,7 +458,9 @@ class SpeedLimiter(_PluginBase):
sessions = res.json()
for session in sessions:
if session.get("NowPlayingItem") and not session.get("PlayState", {}).get("IsPaused"):
playing_sessions.append(session)
if not self.__path_execluded(session.get("NowPlayingItem").get("Path")):
playing_sessions.append(session)
except Exception as e:
logger.error(f"获取Emby播放会话失败{str(e)}")
continue
@@ -429,6 +474,8 @@ class SpeedLimiter(_PluginBase):
# 未设置不限速范围则默认不限速内网ip
elif not IpUtils.is_private_ip(session.get("RemoteEndPoint")) \
and session.get("NowPlayingItem", {}).get("MediaType") == "Video":
logger.debug(f"当前播放内容:{session.get('NowPlayingItem').get('FileName')}"
f"比特率:{int(session.get('NowPlayingItem', {}).get('Bitrate') or 0)}")
total_bit_rate += int(session.get("NowPlayingItem", {}).get("Bitrate") or 0)
elif media_server == "jellyfin":
req_url = "[HOST]Sessions?api_key=[APIKEY]"
@@ -438,7 +485,8 @@ class SpeedLimiter(_PluginBase):
sessions = res.json()
for session in sessions:
if session.get("NowPlayingItem") and not session.get("PlayState", {}).get("IsPaused"):
playing_sessions.append(session)
if not self.__path_execluded(session.get("NowPlayingItem").get("Path")):
playing_sessions.append(session)
except Exception as e:
logger.error(f"获取Jellyfin播放会话失败{str(e)}")
continue
@@ -481,6 +529,7 @@ class SpeedLimiter(_PluginBase):
total_bit_rate += int(session.get("bitrate") or 0)
if total_bit_rate:
logger.debug(f"比特率总计:{total_bit_rate}")
# 开启智能限速计算上传限速
if self._auto_limit:
play_up_speed = self.__calc_limit(total_bit_rate)
@@ -488,6 +537,7 @@ class SpeedLimiter(_PluginBase):
play_up_speed = self._play_up_speed
# 当前正在播放,开始限速
logger.debug(f"上传限速:{play_up_speed} KB/s")
self.__set_limiter(limit_type="播放", upload_limit=play_up_speed,
download_limit=self._play_down_speed)
else:
@@ -495,11 +545,24 @@ class SpeedLimiter(_PluginBase):
self.__set_limiter(limit_type="未播放", upload_limit=self._noplay_up_speed,
download_limit=self._noplay_down_speed)
def __path_execluded(self, path: str) -> bool:
"""
判断是否在不限速路径内
"""
if self._exclude_path:
exclude_paths = self._exclude_path.split("\n")
for exclude_path in exclude_paths:
if exclude_path in path:
logger.info(f"{path} 在不限速路径:{exclude_path} 内,跳过限速")
return True
return False
def __calc_limit(self, total_bit_rate: float) -> float:
"""
计算智能上传限速
"""
if not self._bandwidth:
# 当前总比特率大于总带宽,则设置为最低限速
if not self._bandwidth or total_bit_rate > self._bandwidth:
return 10
return round((self._bandwidth - total_bit_rate) / 8 / 1024, 2)
@@ -518,71 +581,67 @@ class SpeedLimiter(_PluginBase):
try:
cnt = 0
text = ""
for download in self._downloader:
if cnt != 0:
text = f"{text}\n===================="
text = f"{text}\n下载器:{download}"
upload_limit_final = upload_limit
if self._auto_limit and limit_type == "播放":
# 开启了播放智能限速
if len(self._downloader) == 1:
# 只有一个下载器
upload_limit = int(upload_limit)
upload_limit_final = int(upload_limit)
else:
# 多个下载器
if not self._allocation_ratio:
# 平均
upload_limit = int(upload_limit / len(self._downloader))
upload_limit_final = int(upload_limit / len(self._downloader))
else:
# 按比例
allocation_count = sum([int(i) for i in self._allocation_ratio.split(":")])
upload_limit = int(upload_limit * int(self._allocation_ratio.split(":")[cnt]) / allocation_count)
upload_limit_final = int(upload_limit * int(self._allocation_ratio.split(":")[cnt]) / allocation_count)
logger.debug(f"下载器:{download} 分配比例:{self._allocation_ratio.split(':')[cnt]}/{allocation_count} 分配上传限速:{upload_limit_final} KB/s")
cnt += 1
if upload_limit:
text = f"上传:{upload_limit} KB/s"
if upload_limit_final:
text = f"{text}\n上传:{upload_limit_final} KB/s"
else:
text = f"上传:未限速"
text = f"{text}\n上传:未限速"
if download_limit:
text = f"{text}\n下载:{download_limit} KB/s"
else:
text = f"{text}\n下载:未限速"
if str(download) == 'qbittorrent':
if self._qb:
self._qb.set_speed_limit(download_limit=download_limit, upload_limit=upload_limit)
# 发送通知
if self._notify:
title = "【播放限速】"
if upload_limit or download_limit:
subtitle = f"Qbittorrent 开始{limit_type}限速"
self.post_message(
mtype=NotificationType.MediaServer,
title=title,
text=f"{subtitle}\n{text}"
)
else:
self.post_message(
mtype=NotificationType.MediaServer,
title=title,
text=f"Qbittorrent 已取消限速"
)
self._qb.set_speed_limit(download_limit=download_limit, upload_limit=upload_limit_final)
else:
if self._tr:
self._tr.set_speed_limit(download_limit=download_limit, upload_limit=upload_limit)
# 发送通知
if self._notify:
title = "【播放限速】"
if upload_limit or download_limit:
subtitle = f"Transmission 开始{limit_type}限速"
self.post_message(
mtype=NotificationType.MediaServer,
title=title,
text=f"{subtitle}\n{text}"
)
else:
self.post_message(
mtype=NotificationType.MediaServer,
title=title,
text=f"Transmission 已取消限速"
)
self._tr.set_speed_limit(download_limit=download_limit, upload_limit=upload_limit_final)
# 发送通知
self._notify_message(text, bool(upload_limit or download_limit), limit_type)
except Exception as e:
logger.error(f"设置限速失败:{str(e)}")
def _notify_message(self, text: str, is_limit: bool, limit_type: str):
"""
发送通知
"""
if self._notify:
title = "【播放限速】"
if is_limit:
subtitle = f"{limit_type},开始限速"
self.post_message(
mtype=NotificationType.MediaServer,
title=title,
text=f"{subtitle}\n{text}"
)
else:
self.post_message(
mtype=NotificationType.MediaServer,
title=title,
text=f"{limit_type},取消限速"
)
@staticmethod
def __allow_access(allow_ips: dict, ip: str) -> bool:
"""

View File

@@ -0,0 +1,299 @@
import json
from datetime import datetime, timedelta
from hashlib import md5
from urllib.parse import urlparse
import pytz
from app.core.config import settings
from app.db.site_oper import SiteOper
from app.plugins import _PluginBase
from typing import Any, List, Dict, Tuple, Optional
from app.log import logger
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.utils.common import encrypt, decrypt
class SyncCookieCloud(_PluginBase):
# 插件名称
plugin_name = "同步CookieCloud"
# 插件描述
plugin_desc = "同步MoviePilot站点Cookie到本地CookieCloud。"
# 插件图标
plugin_icon = "Cookiecloud_A.png"
# 插件版本
plugin_version = "1.4"
# 插件作者
plugin_author = "thsrite"
# 作者主页
author_url = "https://github.com/thsrite"
# 插件配置项ID前缀
plugin_config_prefix = "synccookiecloud_"
# 加载顺序
plugin_order = 28
# 可使用的用户级别
auth_level = 1
# 私有属性
_enabled: bool = False
_onlyonce: bool = False
_cron: str = ""
siteoper = None
_scheduler: Optional[BackgroundScheduler] = None
def init_plugin(self, config: dict = None):
self.siteoper = SiteOper()
# 停止现有任务
self.stop_service()
if config:
self._enabled = config.get("enabled")
self._onlyonce = config.get("onlyonce")
self._cron = config.get("cron")
if self._enabled or self._onlyonce:
# 定时服务
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
# 立即运行一次
if self._onlyonce:
logger.info(f"同步CookieCloud服务启动立即运行一次")
self._scheduler.add_job(self.__sync_to_cookiecloud, 'date',
run_date=datetime.now(
tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3),
name="同步CookieCloud")
# 关闭一次性开关
self._onlyonce = False
# 保存配置
self.__update_config()
# 周期运行
if self._cron:
try:
self._scheduler.add_job(func=self.__sync_to_cookiecloud,
trigger=CronTrigger.from_crontab(self._cron),
name="同步CookieCloud")
except Exception as err:
logger.error(f"定时任务配置错误:{err}")
# 推送实时消息
self.systemmessage.put(f"执行周期配置错误:{err}")
# 启动任务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
def __sync_to_cookiecloud(self):
"""
同步站点cookie到cookiecloud
"""
# 获取所有站点
sites = self.siteoper.list_order_by_pri()
if not sites:
return
if not settings.COOKIECLOUD_ENABLE_LOCAL:
logger.error('本地CookieCloud服务器未启用')
return
cookies = {}
for site in sites:
domain = urlparse(site.url).netloc
cookie = site.cookie
if not cookie:
logger.error(f"站点 {domain} 无cookie跳过处理...")
continue
# 解析cookie
site_cookies = []
for ck in cookie.split(";"):
kv = ck.split("=")
if len(kv) < 2:
continue
site_cookies.append({
"domain": domain,
"name": ck.split("=")[0],
"value": ck.split("=")[1]
})
# 存储cookies
cookies[domain] = site_cookies
if cookies:
crypt_key = self._get_crypt_key()
try:
cookies = {'cookie_data': cookies}
encrypted_data = encrypt(json.dumps(cookies).encode('utf-8'), crypt_key).decode('utf-8')
except Exception as e:
logger.error(f"CookieCloud加密失败{e}")
return
ck = {'encrypted': encrypted_data}
cookie_path = settings.COOKIE_PATH / f"{settings.COOKIECLOUD_KEY}.json"
cookie_path.write_bytes(json.dumps(ck).encode('utf-8'))
logger.info(f"同步站点cookie到本地CookieCloud成功")
else:
logger.error(f"同步站点cookie到本地CookieCloud失败未获取到站点cookie")
def __decrypted(self, encrypt_data: dict):
"""
获取并解密本地CookieCloud数据
"""
encrypted = encrypt_data.get("encrypted")
if not encrypted:
return {}, "未获取到cookie密文"
else:
crypt_key = self._get_crypt_key()
try:
decrypted_data = decrypt(encrypted, crypt_key).decode('utf-8')
result = json.loads(decrypted_data)
except Exception as e:
return {}, "cookie解密失败" + str(e)
if not result:
return {}, "cookie解密为空"
if result.get("cookie_data"):
contents = result.get("cookie_data")
else:
contents = result
return contents
@staticmethod
def _get_crypt_key() -> bytes:
"""
使用UUID和密码生成CookieCloud的加解密密钥
"""
md5_generator = md5()
md5_generator.update(
(str(settings.COOKIECLOUD_KEY).strip() + '-' + str(settings.COOKIECLOUD_PASSWORD).strip()).encode('utf-8'))
return (md5_generator.hexdigest()[:16]).encode('utf-8')
def __update_config(self):
self.update_config({
"enabled": self._enabled,
"onlyonce": self._onlyonce,
"cron": self._cron
})
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '执行周期',
'placeholder': '5位cron表达式留空自动'
}
}
]
},
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
'text': '需要MoviePilot设定-站点启用本地CookieCloud服务器。'
}
}
]
}
]
},
]
}
], {
"enabled": False,
"onlyonce": False,
"cron": "5 1 * * *",
}
def get_page(self) -> List[dict]:
pass
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
logger.error("退出插件失败:%s" % str(e))

View File

@@ -22,7 +22,7 @@ class SyncDownloadFiles(_PluginBase):
# 插件图标
plugin_icon = "Youtube-dl_A.png"
# 插件版本
plugin_version = "1.1"
plugin_version = "1.1.1"
# 插件作者
plugin_author = "thsrite"
# 作者主页
@@ -265,7 +265,7 @@ class SyncDownloadFiles(_PluginBase):
if last_sync_time:
# 获取种子时间
if dl_tpe == "qbittorrent":
torrent_date = time.gmtime(torrent.get("added_on")) # 将时间戳转换为时间元组
torrent_date = time.localtime(torrent.get("added_on")) # 将时间戳转换为时间元组
torrent_date = time.strftime("%Y-%m-%d %H:%M:%S", torrent_date) # 格式化时间
else:
torrent_date = torrent.added_date

View File

@@ -21,7 +21,7 @@ class TmdbWallpaper(_PluginBase):
# 插件图标
plugin_icon = "Macos_Sierra.png"
# 插件版本
plugin_version = "1.1"
plugin_version = "1.2"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
@@ -220,24 +220,30 @@ class TmdbWallpaper(_PluginBase):
"""
下载MoviePilot的登录壁纸到本地
"""
if not self._savepath:
return
if settings.WALLPAPER == "tmdb":
url = TmdbChain().get_random_wallpager()
filename = url.split("/")[-1]
else:
url = WebUtils.get_bing_wallpaper()
filename = f"{datetime.now().strftime('%Y%m%d')}.jpg"
# 下载壁纸
if url:
def __save_file(_url: str, _filename: str):
"""
保存文件
"""
try:
savepath = Path(self._savepath)
logger.info(f"下载壁纸:{url}")
r = RequestUtils().get_res(url)
logger.info(f"下载壁纸:{_url}")
r = RequestUtils().get_res(_url)
if r and r.status_code == 200:
with open(savepath / filename, "wb") as f:
with open(savepath / _filename, "wb") as f:
f.write(r.content)
except Exception as e:
logger.error(f"下载壁纸失败:{str(e)}")
if not self._savepath:
return
if settings.WALLPAPER == "tmdb":
urls = TmdbChain().get_trending_wallpapers() or []
for url in urls:
filename = url.split("/")[-1]
__save_file(url, filename)
else:
logger.error(f"获取壁纸地址失败")
url = WebUtils.get_bing_wallpaper()
if url:
filename = f"{datetime.now().strftime('%Y%m%d')}.jpg"
__save_file(url, filename)

View File

@@ -27,7 +27,7 @@ class TorrentTransfer(_PluginBase):
# 插件图标
plugin_icon = "seed.png"
# 插件版本
plugin_version = "1.4"
plugin_version = "1.6"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
@@ -55,19 +55,21 @@ class TorrentTransfer(_PluginBase):
_notify = False
_nolabels = None
_includelabels = None
_includecategory = None
_nopaths = None
_deletesource = False
_deleteduplicate = False
_fromtorrentpath = None
_autostart = False
_transferemptylabel = False
_add_torrent_tags = None
# 退出事件
_event = Event()
# 待检查种子清单
_recheck_torrents = {}
_is_recheck_running = False
# 任务标签
_torrent_tags = ["已整理", "转移做种"]
_torrent_tags = []
def init_plugin(self, config: dict = None):
self.torrent = TorrentHelper()
@@ -79,6 +81,7 @@ class TorrentTransfer(_PluginBase):
self._notify = config.get("notify")
self._nolabels = config.get("nolabels")
self._includelabels = config.get("includelabels")
self._includecategory = config.get("includecategory")
self._frompath = config.get("frompath")
self._topath = config.get("topath")
self._fromdownloader = config.get("fromdownloader")
@@ -89,6 +92,12 @@ class TorrentTransfer(_PluginBase):
self._nopaths = config.get("nopaths")
self._autostart = config.get("autostart")
self._transferemptylabel = config.get("transferemptylabel")
self._add_torrent_tags = config.get("add_torrent_tags")
if self._add_torrent_tags is None:
self._add_torrent_tags = "已整理,转移做种"
config["add_torrent_tags"] = self._add_torrent_tags
self.update_config(config=config)
self._torrent_tags = self._add_torrent_tags.strip().split(",") if self._add_torrent_tags else []
# 停止现有任务
self.stop_service()
@@ -97,14 +106,12 @@ class TorrentTransfer(_PluginBase):
if self.get_state() or self._onlyonce:
self.qb = Qbittorrent()
self.tr = Transmission()
# 检查配置
if self._fromtorrentpath and not Path(self._fromtorrentpath).exists():
logger.error(f"源下载器种子文件保存路径不存在:{self._fromtorrentpath}")
self.systemmessage.put(f"源下载器种子文件保存路径不存在:{self._fromtorrentpath}", title="自动转移做种")
return
if self._fromdownloader == self._todownloader:
logger.error(f"源下载器和目的下载器不能相同")
self.systemmessage.put(f"源下载器和目的下载器不能相同", title="自动转移做种")
if not self.__validate_config():
self._enabled = False
self._onlyonce = False
config["enabled"] = self._enabled
config["onlyonce"] = self._onlyonce
self.update_config(config=config)
return
# 定时服务
@@ -121,24 +128,8 @@ class TorrentTransfer(_PluginBase):
seconds=3))
# 关闭一次性开关
self._onlyonce = False
self.update_config({
"enabled": self._enabled,
"onlyonce": self._onlyonce,
"cron": self._cron,
"notify": self._notify,
"nolabels": self._nolabels,
"includelabels": self._includelabels,
"frompath": self._frompath,
"topath": self._topath,
"fromdownloader": self._fromdownloader,
"todownloader": self._todownloader,
"deletesource": self._deletesource,
"deleteduplicate": self._deleteduplicate,
"fromtorrentpath": self._fromtorrentpath,
"nopaths": self._nopaths,
"autostart": self._autostart,
"transferemptylabel": self._transferemptylabel
})
config["onlyonce"] = self._onlyonce
self.update_config(config=config)
# 启动服务
if self._scheduler.get_jobs():
@@ -269,6 +260,39 @@ class TorrentTransfer(_PluginBase):
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'add_torrent_tags',
'label': '添加种子标签',
'placeholder': '已整理,转移做种'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'includecategory',
'label': '转移种子分类',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
@@ -282,7 +306,7 @@ class TorrentTransfer(_PluginBase):
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
'md': 6
},
'content': [
{
@@ -293,7 +317,7 @@ class TorrentTransfer(_PluginBase):
}
}
]
}
},
]
},
{
@@ -494,6 +518,7 @@ class TorrentTransfer(_PluginBase):
"cron": "",
"nolabels": "",
"includelabels": "",
"includecategory": "",
"frompath": "",
"topath": "",
"fromdownloader": "",
@@ -503,7 +528,8 @@ class TorrentTransfer(_PluginBase):
"fromtorrentpath": "",
"nopaths": "",
"autostart": True,
"transferemptylabel": False
"transferemptylabel": False,
"add_torrent_tags": "已整理,转移做种"
}
def get_page(self) -> List[dict]:
@@ -520,6 +546,21 @@ class TorrentTransfer(_PluginBase):
else:
return None
def __validate_config(self) -> bool:
"""
校验配置
"""
# 检查配置
if self._fromtorrentpath and not Path(self._fromtorrentpath).exists():
logger.error(f"源下载器种子文件保存路径不存在:{self._fromtorrentpath}")
self.systemmessage.put(f"源下载器种子文件保存路径不存在:{self._fromtorrentpath}", title="自动转移做种")
return False
if self._fromdownloader == self._todownloader:
logger.error(f"源下载器和目的下载器不能相同")
self.systemmessage.put(f"源下载器和目的下载器不能相同", title="自动转移做种")
return False
return True
def __download(self, downloader: str, content: bytes,
save_path: str) -> Optional[str]:
"""
@@ -531,7 +572,7 @@ class TorrentTransfer(_PluginBase):
state = self.qb.add_torrent(content=content,
download_dir=save_path,
is_paused=True,
tag=["已整理", "转移做种", tag])
tag=self._torrent_tags + [tag])
if not state:
return None
else:
@@ -546,7 +587,7 @@ class TorrentTransfer(_PluginBase):
torrent = self.tr.add_torrent(content=content,
download_dir=save_path,
is_paused=True,
labels=["已整理", "转移做种"])
labels=self._torrent_tags)
if not torrent:
return None
else:
@@ -561,6 +602,9 @@ class TorrentTransfer(_PluginBase):
"""
logger.info("开始转移做种任务 ...")
if not self.__validate_config():
return
# 源下载器
downloader = self._fromdownloader
# 目的下载器
@@ -600,13 +644,20 @@ class TorrentTransfer(_PluginBase):
# 获取种子标签
torrent_labels = self.__get_label(torrent, downloader)
# 获取种子分类
torrent_category = self.__get_category(torrent, downloader)
# 种子为无标签,则进行规范化
is_torrent_labels_empty = torrent_labels == [''] or torrent_labels == [] or torrent_labels is None
if is_torrent_labels_empty:
torrent_labels = []
#根据设置决定是否转移无标签的种子
# 如果分类项存在数值,则进行判断
if self._includecategory:
# 排除未标记的分类
if torrent_category not in self._includecategory.split(','):
logger.info(f"种子 {hash_str} 不含有转移分类 {self._includecategory},跳过 ...")
continue
# 根据设置决定是否转移无标签的种子
if is_torrent_labels_empty:
if not self._transferemptylabel:
continue
@@ -724,6 +775,9 @@ class TorrentTransfer(_PluginBase):
and fastresume_trackers[0]:
# 重新赋值
torrent_main['announce'] = fastresume_trackers[0][0]
# 保留其他tracker避免单一tracker无法连接
if len(fastresume_trackers) > 1 or len(fastresume_trackers[0]) > 1:
torrent_main['announce-list'] = fastresume_trackers
# 替换种子文件路径
torrent_file = settings.TEMP_PATH / f"{torrent_item.get('hash')}.torrent"
# 编码并保存到临时文件
@@ -867,6 +921,18 @@ class TorrentTransfer(_PluginBase):
print(str(e))
return []
@staticmethod
def __get_category(torrent: Any, dl_type: str):
"""
获取种子分类
"""
try:
return torrent.get("category").strip() \
if dl_type == "qbittorrent" else ""
except Exception as e:
print(str(e))
return ""
@staticmethod
def __get_save_path(torrent: Any, dl_type: str):
"""

View File

@@ -15,12 +15,11 @@ from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver
from app import schemas
from app.chain.media import MediaChain
from app.chain.tmdb import TmdbChain
from app.chain.transfer import TransferChain
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.event import eventmanager, Event
from app.core.metainfo import MetaInfoPath
from app.db.downloadhistory_oper import DownloadHistoryOper
from app.db.transferhistory_oper import TransferHistoryOper
from app.log import logger
@@ -73,11 +72,11 @@ class VCBAnimeMonitor(_PluginBase):
# 插件名称
plugin_name = "整理VCB动漫压制组作品"
# 插件描述
plugin_desc = "提高部分VCB-Studio作品的识别准确率,将VCB-Studio的作品统一转移到指定目录同时进行刮削整理"
plugin_desc = "一款辅助整理&提高识别VCB-Stuido动漫压制组作品的插件"
# 插件图标
plugin_icon = "vcbmonitor.png"
# 插件版本
plugin_version = "1.8"
plugin_version = "1.8.2.2"
# 插件作者
plugin_author = "pixel@qingwa"
# 作者主页
@@ -91,7 +90,6 @@ class VCBAnimeMonitor(_PluginBase):
# 私有属性
_switch_ova = False
_high_mode = False
_torrents_path = None
new_save_path = None
qb = None
@@ -100,6 +98,7 @@ class VCBAnimeMonitor(_PluginBase):
downloadhis = None
transferchian = None
tmdbchain = None
mediaChain = None
_observer = []
_enabled = False
_notify = False
@@ -126,6 +125,7 @@ class VCBAnimeMonitor(_PluginBase):
self.transferhis = TransferHistoryOper()
self.downloadhis = DownloadHistoryOper()
self.transferchian = TransferChain()
self.mediaChain = MediaChain()
self.tmdbchain = TmdbChain()
# 清空配置
self._dirconf = {}
@@ -145,7 +145,6 @@ class VCBAnimeMonitor(_PluginBase):
self._size = config.get("size") or 0
self._scrape = config.get("scrape")
self._switch_ova = config.get("ova")
self._high_mode = config.get("high_mode")
self._torrents_path = config.get("torrents_path") or ""
# 停止现有任务
@@ -164,13 +163,16 @@ class VCBAnimeMonitor(_PluginBase):
return
# 启用种子目录监控
if self._torrents_path is not None and Path(self._torrents_path).exists() and self._enabled:
if self._torrents_path and Path(self._torrents_path).exists() and self._enabled:
# 只取第一个目录作为新的保存
first_path = monitor_dirs[0]
if SystemUtils.is_windows():
self.new_save_path = first_path.split(':')[0] + ":" + first_path.split(':')[1]
else:
self.new_save_path = first_path.split(':')[0]
try:
first_path = monitor_dirs[0]
if SystemUtils.is_windows():
self.new_save_path = first_path.split(':')[0] + ":" + first_path.split(':')[1]
else:
self.new_save_path = first_path.split(':')[0]
except Exception:
logger.error(f"目录保存失败,请检查输入目录是否合法")
# print(self.new_save_path)
try:
observer = Observer()
@@ -181,7 +183,7 @@ class VCBAnimeMonitor(_PluginBase):
observer.start()
logger.info(f"{self._torrents_path} 的种子目录监控服务启动开启监控新增的VCB-Studio种子文件")
except Exception as e:
logger.error(f"{self._torrents_path} 启动种子目录监控失败:{str(e)}")
logger.debug(f"{self._torrents_path} 启动种子目录监控失败:{str(e)}")
else:
logger.info("种子目录为空不转移qb中正在下载的VCB-Studio文件")
@@ -224,7 +226,8 @@ class VCBAnimeMonitor(_PluginBase):
try:
if target_path and target_path.is_relative_to(Path(mon_path)):
logger.warn(f"{target_path} 是监控目录 {mon_path} 的子目录,无法监控")
self.systemmessage.put(f"{target_path} 是下载目录 {mon_path} 的子目录,无法监控", title="整理VCB动漫压制组作品")
self.systemmessage.put(f"{target_path} 是下载目录 {mon_path} 的子目录,无法监控",
title="整理VCB动漫压制组作品")
continue
except Exception as e:
logger.debug(str(e))
@@ -290,7 +293,6 @@ class VCBAnimeMonitor(_PluginBase):
"size": self._size,
"scrape": self._scrape,
"ova": self._switch_ova,
"high_mode": self._high_mode,
"torrents_path": self._torrents_path
})
@@ -376,33 +378,56 @@ class VCBAnimeMonitor(_PluginBase):
logger.debug(f"{event_path} 不是媒体文件")
return
# 判断是不是蓝光目录
bluray_flag = False
if re.search(r"BDMV[/\\]STREAM", event_path, re.IGNORECASE):
bluray_flag = True
# 截取BDMV前面的路径
blurray_dir = event_path[:event_path.find("BDMV")]
file_path = Path(blurray_dir)
logger.info(f"{event_path} 是蓝光目录,更正文件路径为:{str(file_path)}")
# 查询历史记录,已转移的不处理
if self.transferhis.get_by_src(str(file_path)):
logger.info(f"{file_path} 已整理过")
return
# 元数据
if file_path.parent.name == "SPs":
logger.warn("位于SPs目录下,跳过处理")
if file_path.parent.name.lower() in ["sps", "scans", "cds", "previews", "extras"]:
logger.warn("位于特典或其他特殊目录下,跳过处理")
return
remeta = ReMeta(ova_switch=self._switch_ova, high_performance=self._high_mode)
if 'VCB-Studio' not in file_path.stem.strip():
logger.warn("不属于VCB的作品不处理")
return
remeta = ReMeta(ova_switch=self._switch_ova)
file_meta = remeta.handel_file(file_path=file_path)
if file_meta:
if not file_meta.name:
logger.error(f"{file_path.name} 无法识别有效信息")
return
if remeta.is_special and not self._switch_ova:
if remeta.is_ova and not self._switch_ova:
logger.warn(f"{file_path.name} 为OVA资源未开启OVA开关不处理")
return
if remeta.is_special and self._switch_ova:
logger.info(f"{file_path.name} 为OVA资源,开始处理")
if self.get_data(key=f"OVA_{file_meta.title}") is not None:
ova_history_ep = int(self.get_data(key=f"OVA_{file_meta.title}")) + 1
file_meta.begin_episode = ova_history_ep
self.save_data(key=f"OVA_{file_meta.title}", value=ova_history_ep)
if remeta.is_ova and self._switch_ova:
logger.info(f"{file_path.name} 为OVA资源,开始历史记录处理")
ova_history_ep_list = self.get_data(file_meta.title)
if ova_history_ep_list and isinstance(ova_history_ep_list, list):
ep = file_meta.begin_episode
if ep in ova_history_ep_list:
for i in range(1, 100):
if ep + i not in ova_history_ep_list:
ova_history_ep_list.append(ep + i)
file_meta.begin_episode = ep + i
logger.info(
f"{file_path.name} 为OVA资源,历史记录中已存在,自动识别为第{ep + i}")
break
else:
ova_history_ep_list.append(ep)
self.save_data(file_meta.title, ova_history_ep_list)
else:
file_meta.begin_episode = 1
self.save_data(key=f"OVA_{file_meta.title}", value=1)
self.save_data(file_meta.title, [file_meta.begin_episode])
else:
return
@@ -418,14 +443,23 @@ class VCBAnimeMonitor(_PluginBase):
# 根据父路径获取下载历史
download_history = None
# 按文件全路径查询
download_file = self.downloadhis.get_file_by_fullpath(str(file_path))
if download_file:
download_history = self.downloadhis.get_by_hash(download_file.download_hash)
if bluray_flag:
# 蓝光原盘,按目录名查询
# FIXME 理论上DownloadHistory表中的path应该是全路径但实际表中登记的数据只有目录名暂按目录名查询
download_history = self.downloadhis.get_by_path(file_path.name)
else:
# 按文件全路径查询
download_file = self.downloadhis.get_file_by_fullpath(str(file_path))
if download_file:
download_history = self.downloadhis.get_by_hash(download_file.download_hash)
# 识别媒体信息
mediainfo: MediaInfo = self.chain.recognize_media(meta=file_meta,
tmdbid=download_history.tmdbid if download_history else None)
if download_history and download_history.tmdbid:
mediainfo: MediaInfo = self.mediaChain.recognize_media(mtype=MediaType(download_history.type),
tmdbid=download_history.tmdbid,
doubanid=download_history.doubanid)
else:
mediainfo: MediaInfo = self.mediaChain.recognize_by_meta(file_meta)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{file_meta.name}')
@@ -615,13 +649,13 @@ class VCBAnimeMonitor(_PluginBase):
if not torrent_path.exists():
return
# 只处理刚刚添加的种子也就是获取正在下载的种子
logger.info(f"开始转移qb中正在下载的VCB资源,转移目录为:{self.new_save_path}")
# 等待种子文件下载完成
time.sleep(5)
with lock:
torrents = self.qb.get_downloading_torrents()
for torrent in torrents:
if "VCB-Studio" in torrent.name:
logger.info(f"开始转移qb中正在下载的VCB资源,转移目录为:{self.new_save_path}")
# 原本存在的暂停的种子不处理
if torrent.state_enum == qbittorrentapi.TorrentState.PAUSED_DOWNLOAD:
continue
@@ -813,22 +847,6 @@ class VCBAnimeMonitor(_PluginBase):
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'high_mode',
'label': '高性能处理模式',
}
}
]
},
{
'component': 'VCol',
'props': {
@@ -983,7 +1001,7 @@ class VCBAnimeMonitor(_PluginBase):
'props': {
'model': 'monitor_dirs',
'label': '监控目录',
'rows': 5,
'rows': 4,
'placeholder': '每一行一个目录,支持以下几种配置方式,转移方式支持 move、copy、link、softlink、rclone_copy、rclone_move\n'
'监控目录\n'
'监控目录#转移方式\n'
@@ -1031,8 +1049,10 @@ class VCBAnimeMonitor(_PluginBase):
'props': {
'type': 'info',
'variant': 'tonal',
'text': '核心用法与目录同步插件相同不同点在于只识别处理VCB-Studio资源,\n'
'不处理SPs目录下的文件,OVA/OAD集数根据入库顺序累加命名,不保证与TMDB集数匹配'
'text': '核心用法与目录同步插件相同不同点在于只识别处理VCB-Studio资源'
'默认不处理SPs、CDs、SCans目录下的文件OVA/OAD集数暂时根据入库顺序累加命名'
'因此不保证与TMDB集数匹配。部分季度以罗马音音译为名的作品暂时无法识别出准确季度。'
'有想法有问题欢迎点击插件作者主页提issue'
}
}
]
@@ -1053,9 +1073,9 @@ class VCBAnimeMonitor(_PluginBase):
'props': {
'type': 'info',
'variant': 'tonal',
'text': '最佳使用方式监控目录单独设置一个作为保存VCB-Studio资源的目录,\n'
'填入监控种子目录,开启后会将正在QB(仅支持QB)下载器内的VCB-Studio资源转移到监控目录实现自动整理('
'仅支持第一个监控目录),\n'
'text': '最佳使用方式监控目录单独设置一个作为保存VCB-Studio资源的目录'
'填入监控种子目录开启后会将正在QB(仅支持QB)下载器内正在下载的VCB-Studio资源转移到监控目录实现自动整理('
'仅支持第一个监控目录)'
'监控种子目录为空则不转移文件'
}
}
@@ -1077,7 +1097,6 @@ class VCBAnimeMonitor(_PluginBase):
"cron": "",
"size": 0,
"ova": False,
"high_mode": False,
"torrents_path": "",
}

View File

@@ -1,5 +1,6 @@
import concurrent
import re
from dataclasses import dataclass
from pathlib import Path
from typing import List
from app.chain.media import MediaChain
@@ -8,196 +9,276 @@ from app.core.metainfo import MetaInfoPath
from app.log import logger
from app.schemas import MediaType
season_patterns = [
{"pattern": re.compile(r"S(\d+)$", re.IGNORECASE), "group": 1},
{"pattern": re.compile(r"(\d+)$", re.IGNORECASE), "group": 1},
{"pattern": re.compile(r"(\d+)(st|nd|rd|th)?\s*season", re.IGNORECASE), "group": 1},
{"pattern": re.compile(r"(.*) ?\s*season (\d+)", re.IGNORECASE), "group": 2},
{"pattern": re.compile(r"\s(II|III|IV|V|VI|VII|VIII|IX|X)$", re.IGNORECASE), "group": "1"}
]
episode_patterns = [
{"pattern": re.compile(r"(\d+)\((\d+)\)", re.IGNORECASE), "group": 2},
{"pattern": re.compile(r"(\d+)", re.IGNORECASE), "group": 1},
{"pattern": re.compile(r'(\d+)v\d+', re.IGNORECASE), "group": 1},
]
def roman_to_int(s) -> int:
"""
:param s: 罗马数字字符串
罗马数字转整数
"""
roman_dict = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
total = 0
prev_value = 0
ova_patterns = [
re.compile(r".*?(OVA|OAD).*?", re.IGNORECASE),
re.compile(r"\d+\.5"),
re.compile(r"00")
]
for char in reversed(s): # 反向遍历罗马数字字符串
current_value = roman_dict[char]
if current_value >= prev_value:
total += current_value # 如果当前值大于等于前一个值,加上当前值
else:
total -= current_value # 如果当前值小于前一个值,减去当前值
prev_value = current_value
final_season_patterns = [
re.compile('final season', re.IGNORECASE),
re.compile('The Final', re.IGNORECASE),
re.compile(r'\sFinal')
]
return total
movie_patterns = [
re.compile("Movie", re.IGNORECASE),
re.compile("the Movie", re.IGNORECASE),
]
@dataclass
class VCBMetaBase:
# 转化为小写后的原始文件名称 (不含后缀)
original_title: str = ""
# 解析后不包含季度和集数的标题
title: str = ""
# 类型:TV / Movie (默认TV)
type: str = "TV"
# 可能含有季度的标题,一级解析后的标题
season_title: str = ""
# 可能含有集数的字符串列表
ep_title: List[str] = None
# 识别出来的季度
season: int = None
# 识别出来的集数
ep: int = None
# 是否是OVA/OAD
is_ova: bool = False
# TMDB ID
tmdb_id: int = None
blocked_words = ["vcb-studio", "360p", "480p", "720p", "1080p", "2160p", "hdr", "x265", "x264", "aac", "flac"]
class ReMeta:
# 解析之后的标题:
title: str = None
# 识别出来的集数
ep: int = None
# 识别出来的季度
season: int = None
# 特殊季识别开关
is_special = False
# OVA/OAD识别开关
ova_switch: bool = False
# 高性能处理开关
high_performance = False
season_patterns = [
{"pattern": re.compile(r"S(\d+)$"), "group": 1},
{"pattern": re.compile(r"(\d+)$"), "group": 1},
{"pattern": re.compile(r"(\d+)(st|nd|rd|th)?\s*[Ss][Ee][Aa][Ss][Oo][Nn]"), "group": 1},
{"pattern": re.compile(r"(.*) ?\s*[Ss][Ee][Aa][Ss][Oo][Nn] (\d+)"), "group": 2},
{"pattern": re.compile(r"\s(II|III|IV|V|VI|VII|VIII|IX|X)$"), "group": "1"}
]
episode_patterns = [
{"pattern": re.compile(r"\[(\d+)\((\d+)\)]"), "group": 2},
{"pattern": re.compile(r"\[(\d+)]"), "group": 1},
{"pattern": re.compile(r'\[(\d+)v\d+]'), "group": 1},
]
_ova_patterns = [re.compile(r"\[.*?(OVA|OAD).*?]"),
re.compile(r"\[\d+\.5]"),
re.compile(r"\[00\]")]
final_season_patterns = [re.compile('final season', re.IGNORECASE),
re.compile('The Final', re.IGNORECASE),
re.compile(r'\sFinal')
]
# 自定义添加的季度正则表达式
_custom_season_patterns = []
def __init__(self, ova_switch: bool = False, high_performance: bool = False):
def __init__(self, ova_switch: bool = False, custom_season_patterns: list[dict] = None):
self.meta = None
# TODO:自定义季度匹配规则
self.custom_season_patterns = custom_season_patterns
self.season_patterns = season_patterns
self.ova_switch = ova_switch
self.high_performance = high_performance
self.vcb_meta = VCBMetaBase()
self.is_ova = False
def is_tv(self, title: str) -> bool:
"""
判断是否是TV
"""
if title.count("[") != 4 and title.count("]") != 4:
self.vcb_meta.type = "Movie"
self.vcb_meta.title = re.sub(r'\[.*?\]', '', title).strip()
return False
return True
def handel_file(self, file_path: Path):
file_name = file_path.stem.strip().lower()
self.vcb_meta.original_title = file_name
if not self.is_tv(file_name):
logger.warn(
"不符合VCB-Studio的剧集命名规范归类为电影,跳过剧集模块处理。注意:年份较为久远的作品可能在此会判断错误")
self.parse_movie()
else:
self.tv_mode()
self.is_ova = self.vcb_meta.is_ova
meta = MetaInfoPath(file_path)
self.title = meta.title
self.title = Path(self.title).stem.strip()
if 'VCB-Studio' not in meta.title:
logger.warn("不属于VCB的作品不处理")
return None
if meta.title.count("[") != 4 and meta.title.count("]") != 4:
# 可能是电影,电影只有三组[],因此去除所有[]后只剩下电影名
logger.warn("不符合VCB-Studio的剧集命名规范跳过剧集模块处理交给默认处理逻辑")
meta.title = re.sub(r'\[.*?\]', '', meta.title).strip()
meta.en_name = meta.title
return meta
split_title: List[str] | None = self.split_season_ep(self.title)
if split_title:
self.handle_season_ep(split_title)
if self.season is not None:
meta.begin_season = self.season
else:
logger.warn("未识别出季度,默认处理逻辑返回第一季")
if self.ep is not None:
meta.begin_episode = self.ep
else:
logger.warn("未识别出集数,默认处理逻辑返回第一集")
meta.title = self.title
meta.en_name = self.title
logger.info(f"识别出季度为{self.season},集数为{self.ep},标题为:{self.title}")
meta.title = self.vcb_meta.title
meta.en_name = self.vcb_meta.title
if self.vcb_meta.type == "Movie":
meta.type = MediaType.MOVIE
else:
meta.type = MediaType.TV
if self.vcb_meta.ep is not None:
meta.begin_episode = self.vcb_meta.ep
if self.vcb_meta.season is not None:
meta.begin_season = self.vcb_meta.season
if self.vcb_meta.tmdb_id is not None:
meta.tmdbid = self.vcb_meta.tmdb_id
return meta
# 分离季度部分和集数部分
def split_season_ep(self, pre_title: str):
split_ep = re.findall(r"(\[.*?])", pre_title)[1]
if not split_ep:
logger.warn("未识别出集数位置信息,结束识别!")
return None
split_title = re.sub(r"\[.*?\]", "", pre_title).strip()
logger.info(f"分离出包含季度的部分:{split_title} \n 分离出包含集数的部分: {split_ep}")
return [split_title, split_ep]
def split_season_ep(self):
# 把所有的[] 里面的内容获取出来,不需要[]本身
self.vcb_meta.ep_title = re.findall(r'\[(.*?)\]', self.vcb_meta.original_title)
# 去除所有[]后只剩下剧名
self.vcb_meta.season_title = re.sub(r"\[.*?\]", "", self.vcb_meta.original_title).strip()
if self.vcb_meta.ep_title:
self.culling_blocked_words()
logger.info(
f"分离出包含可能季度的内容部分:{self.vcb_meta.season_title} | 可能包含集数的内容部分: {self.vcb_meta.ep_title}")
self.vcb_meta.title = self.vcb_meta.season_title
if not self.vcb_meta.ep_title:
self.vcb_meta.title = self.vcb_meta.season_title
logger.warn("未识别出可能存在集数位置的信息,跳过剩余识别步骤!")
def handle_season_ep(self, title: List[str]):
if self.high_performance:
with concurrent.futures.ProcessPoolExecutor(max_workers=2) as executor:
title_season_result = executor.submit(self.handle_season, title[0])
ep_result = executor.submit(self.re_ep, title[1], )
try:
title_season_result = title_season_result.result() # Blocks until the task is complete.
ep_result = ep_result.result() # Blocks until the task is complete.
except Exception as exc:
print('Generated an exception: %s' % exc)
else:
title_season_result = self.handle_season(title[0])
ep_result = self.re_ep(title[1])
self.title = title_season_result["title"]
is_ova = ep_result["is_ova"]
if ep_result["ep"] is not None:
self.ep = ep_result["ep"]
if title_season_result["season"]:
self.season = title_season_result["season"]
if is_ova:
self.season = 0
self.is_special = True
def tv_mode(self):
logger.info("开始分离季度和集数部分")
self.split_season_ep()
if not self.vcb_meta.ep_title:
return
self.parse_season()
self.parse_episode()
# 处理季度
def handle_season(self, pre_title: str) -> dict:
title_season = {"title": pre_title, "season": 1}
for season_pattern in self.season_patterns:
pattern = season_pattern["pattern"]
group = season_pattern["group"]
match = pattern.search(pre_title)
def parse_season(self):
"""
从标题中解析季度
"""
flag = False
for pattern in season_patterns:
match = pattern["pattern"].search(self.vcb_meta.season_title)
if match:
if type(group) == str:
title_season["season"] = roman_to_int(match.group(int(group)))
title_season["title"] = re.sub(pattern, "", pre_title).strip()
if isinstance(pattern["group"], int):
self.vcb_meta.season = int(match.group(pattern["group"]))
else:
title_season["season"] = int(match.group(group))
title_season["title"] = re.sub(pattern, "", pre_title).strip()
return title_season
for final_season_pattern in self.final_season_patterns:
match = final_season_pattern.search(pre_title)
if match:
logger.info("识别出最终季度,开始处理!")
title_season["title"] = re.sub(final_season_pattern, "", pre_title).strip()
title_season["season"] = self.handle_final_season(title=pre_title)
break
return title_season
self.vcb_meta.season = self.roman_to_int(match.group(pattern["group"]))
# 匹配成功后,标题中去除季度信息
self.vcb_meta.title = pattern["pattern"].sub("", self.vcb_meta.season_title).strip
logger.info(f"识别出季度为{self.vcb_meta.season}")
return
logger.info(f"正常匹配季度失败开始匹配ova/oad/最终季度")
if not flag:
# 匹配是否为最终季
for pattern in final_season_patterns:
if pattern.search(self.vcb_meta.season_title):
logger.info("命中到最终季匹配规则")
self.vcb_meta.title = pattern.sub("", self.vcb_meta.season_title).strip()
self.handle_final_season()
return
logger.info("未识别出最终季度开始匹配OVA/OAD")
# 匹配是否为OVA/OAD
if "ova" in self.vcb_meta.season_title or "oad" in self.vcb_meta.season_title:
logger.info("季度部分命中到OVA/OAD匹配规则")
if self.ova_switch:
logger.info("开启OVA/OAD处理逻辑")
self.vcb_meta.is_ova = True
for pattern in ova_patterns:
if pattern.search(self.vcb_meta.season_title):
self.vcb_meta.title = pattern.sub("", self.vcb_meta.season_title).strip()
self.vcb_meta.title = re.sub("ova|oad", "", self.vcb_meta.season_title).strip()
self.vcb_meta.season = 0
return
logger.warn("未识别出季度,默认处理逻辑返回第一季")
self.vcb_meta.title = self.vcb_meta.season_title
self.vcb_meta.season = 1
# 处理存在“Final”字样命名的季度
def handle_final_season(self, title: str) -> int | None:
medias = MediaChain().search(title=title)[1]
if not medias:
logger.warn("没有找到对应的媒体信息!")
return
# 根据类型进行过滤只取类型是电视剧和动漫的media
medias = [media for media in medias if media.type == MediaType.TV]
if not medias:
logger.warn("没有找到动漫或电视剧的媒体信息!")
return
media = sorted(medias, key=lambda x: x.popularity, reverse=True)[0]
media_tmdb_id = media.tmdb_id
seasons_info = TmdbChain().tmdb_seasons(tmdbid=media_tmdb_id)
if seasons_info is None:
logger.warn("无法获取最终季")
else:
logger.info(f"获取到最终季,季度为{len(seasons_info)}")
return len(seasons_info)
def parse_episode(self):
"""
从标题中解析集数
"""
# 从ep_title中剔除不相关的内容之后只剩下存在集数的字符串
ep = self.vcb_meta.ep_title[0]
for pattern in episode_patterns:
match = pattern["pattern"].search(ep)
if match:
self.vcb_meta.ep = int(match.group(pattern["group"]))
logger.info(f"识别出集数为{self.vcb_meta.ep}")
return
# 直接进入判断是否为OVA/OAD
for pattern in ova_patterns:
if pattern.search(ep):
self.vcb_meta.is_ova = True
# 直接获取数字
self.vcb_meta.ep = int(re.search(r"\d+", ep).group()) or 1
logger.info(f"OVA模式下识别出集数为{self.vcb_meta.ep}")
self.vcb_meta.season = 0
return
def re_ep(self, ep_title: str, ) -> dict:
def culling_blocked_words(self):
"""
# 集数匹配处理模块
:param ep_title: 从title解析出的集数,ep_title固定格式[集数]
1.先判断是否存在OVA/OAD,形如:[OVA],[12(OVA)],[12.5]这种形式都是属于OVA/OAD交给处理OVA模块处理
2.集数通常有两种情况一种:[12]直接性,另一种:[12(24)],这一种应该去括号内的为集数
:return: 集数(int)
从ep_title中剔除不相关的内容
"""
ep_ova = {"ep": None, "is_ova": False}
for ova_pattern in self._ova_patterns:
match = ova_pattern.search(ep_title)
if match:
ep_ova["is_ova"] = True
ep_ova["ep"] = 1
return ep_ova
for ep_pattern in self.episode_patterns:
pattern = ep_pattern["pattern"]
group = ep_pattern["group"]
match = pattern.search(ep_title)
if match:
ep_ova["ep"] = int(match.group(group))
return ep_ova
return ep_ova
blocked_set = set(blocked_words) # 将阻止词列表转换为集合
result = [ep for ep in self.vcb_meta.ep_title if not any(word in ep for word in blocked_set)]
self.vcb_meta.ep_title = result
def handle_final_season(self):
_, medias = MediaChain().search(title=self.vcb_meta.title)
if not medias:
logger.warning("匹配到最终季时无法找到对应的媒体信息季度返回默认值1")
self.vcb_meta.season = 1
return
filter_medias = [media for media in medias if media.type == MediaType.TV]
if not filter_medias:
logger.warning("匹配到最终季时无法找到对应的媒体信息季度返回默认值1")
self.vcb_meta.season = 1
return
medias = [media for media in filter_medias if media.popularity or media.vote_average]
if not medias:
logger.warning("匹配到最终季时无法找到对应的媒体信息季度返回默认值1")
self.vcb_meta.season = 1
return
# 获取欢迎度最高或者评分最高的媒体
medias_sorted = sorted(medias, key=lambda x: x.popularity or x.vote_average, reverse=True)[0]
self.vcb_meta.tmdb_id = medias_sorted.tmdb_id
if medias_sorted.tmdb_id:
seasons_info = TmdbChain().tmdb_seasons(tmdbid=medias_sorted.tmdb_id)
if seasons_info:
self.vcb_meta.season = len(seasons_info)
logger.info(f"获取到最终季度,季度为{self.vcb_meta.season}")
return
logger.warning("无法获取到最终季度信息季度返回默认值1")
self.vcb_meta.season = 1
def parse_movie(self):
logger.info("开始尝试剧场版模式解析")
for pattern in movie_patterns:
if pattern.search(self.vcb_meta.title):
logger.info("命中剧场版匹配规则,加上剧场版标识辅助识别")
self.vcb_meta.type = "Movie"
self.vcb_meta.title = pattern.sub("", self.vcb_meta.title).strip()
self.vcb_meta.title = self.vcb_meta.title
return
def find_ova_episode(self):
"""
搜索OVA的集数
TODO:模糊匹配OVA的集数
"""
pass
@staticmethod
def roman_to_int(s) -> int:
"""
:param s: 罗马数字字符串
罗马数字转整数
"""
roman_dict = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
total = 0
prev_value = 0
for char in reversed(s): # 反向遍历罗马数字字符串
current_value = roman_dict[char]
if current_value >= prev_value:
total += current_value # 如果当前值大于等于前一个值,加上当前值
else:
total -= current_value # 如果当前值小于前一个值,减去当前值
prev_value = current_value
return total
# if __name__ == '__main__':
# ReMeta(
# ova_switch=True,
# ).handel_file(Path(
# r"[Airota&Nekomoe kissaten&VCB-Studio] Yuru Camp [Heya Camp EP00][Ma10p_1080p][x265_flac].mkv"))

View File

@@ -1,9 +1,11 @@
from app.plugins import _PluginBase
from typing import Any, List, Dict, Tuple
from app.core.config import settings
from app.core.event import eventmanager
from app.log import logger
from app.plugins import _PluginBase
from app.schemas.types import EventType
from app.utils.http import RequestUtils
from typing import Any, List, Dict, Tuple
from app.log import logger
class WebHook(_PluginBase):
@@ -14,7 +16,7 @@ class WebHook(_PluginBase):
# 插件图标
plugin_icon = "webhook.png"
# 插件版本
plugin_version = "1.0"
plugin_version = "1.1"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
@@ -134,6 +136,9 @@ class WebHook(_PluginBase):
if not self._enabled or not self._webhook_url:
return
if not event or not event.event_type:
return
def __to_dict(_event):
"""
递归将对象转换为字典
@@ -159,21 +164,27 @@ class WebHook(_PluginBase):
else:
return str(_event)
version = getattr(settings, "VERSION_FLAG", "v1")
event_type = event.event_type if version == "v1" else event.event_type.value
event_info = {
"type": event.event_type,
"type": event_type,
"data": __to_dict(event.event_data)
}
if self._method == 'POST':
ret = RequestUtils(content_type="application/json").post_res(self._webhook_url, json=event_info)
else:
ret = RequestUtils().get_res(self._webhook_url, params=event_info)
if ret:
logger.info("发送成功:%s" % self._webhook_url)
elif ret is not None:
logger.error(f"发送失败,状态码:{ret.status_code},返回信息:{ret.text} {ret.reason}")
else:
logger.error("发送失败,未获取到返回信息")
try:
if self._method == 'POST':
ret = RequestUtils(content_type="application/json").post_res(self._webhook_url, json=event_info)
else:
ret = RequestUtils().get_res(self._webhook_url, params=event_info)
if ret:
logger.info(f"发送成功:{self._webhook_url}")
elif ret is not None:
logger.error(f"发送失败,状态码:{ret.status_code},返回信息:{ret.text} {ret.reason}")
else:
logger.error("发送失败,未获取到返回信息")
except Exception as e:
logger.error(f"发送请求时发生异常:{e}")
def stop_service(self):
"""

View File

@@ -75,7 +75,7 @@ class DoubanHelper:
response = RequestUtils(headers=self.headers).get_res(url)
if not response.status_code == 200:
logger.error(f"搜索 {title} 失败 状态码:{response.status_code}")
return None
return None, None, None
# self.headers["Cookie"] = response.cookies
soup = BeautifulSoup(response.text.encode('utf-8'), 'lxml')
title_divs = soup.find_all("div", class_="title")

View File

@@ -31,7 +31,7 @@ class ZvideoHelper(_PluginBase):
# 插件图标
plugin_icon = "zvideo.png"
# 插件版本
plugin_version = "1.3"
plugin_version = "1.4"
# 插件作者
plugin_author = "DzAvril"
# 作者主页