Merge branch 'jxxghp:main' into main

This commit is contained in:
TimoYoung
2025-04-21 18:07:19 +08:00
committed by GitHub
13 changed files with 817 additions and 230 deletions

View File

@@ -313,11 +313,12 @@
"name": "IYUU自动辅种",
"description": "基于IYUU官方Api实现自动辅种。",
"labels": "做种,IYUU",
"version": "1.9.10",
"version": "1.9.11",
"icon": "IYUU.png",
"author": "jxxghp",
"level": 2,
"history": {
"v1.9.11": "修复馒头不能辅种的问题",
"v1.9.10": "Revert 辅种结束后,一起开始所有辅种后暂停的种子(排除了出错的种子)",
"v1.9.9": "修复qb辅种结束后自动开始暂停的种子",
"v1.9.8": "辅种结束后,一起开始所有辅种后暂停的种子(排除了出错的种子)",
@@ -738,13 +739,17 @@
},
"TrackerEditor": {
"name": "Tracker替换",
"description": "批量替换种子tracker支持周期性巡检如为TR仅支持4.0以上版本)。",
"description": "批量替换修改种子tracker。",
"labels": "做种",
"version": "1.5",
"version": "1.8",
"icon": "trackereditor_A.png",
"author": "honue",
"level": 1,
"v2": true
"v2": true,
"history": {
"v1.8": "修复老版本tr修改出错问题优化log输出",
"v1.7": "支持多个tracker替换配置"
}
},
"ContractCheck": {
"name": "契约检查",
@@ -833,12 +838,13 @@
"name": "MoviePilot服务器监控",
"description": "在仪表板中实时显示MoviePilot公共服务器状态。",
"labels": "仪表板",
"version": "1.1",
"version": "1.2",
"icon": "Duplicati_A.png",
"author": "jxxghp",
"level": 1,
"v2": true,
"history": {
"v1.2": "优化数量示",
"v1.1": "增加详情界面显示"
}
},
@@ -897,12 +903,14 @@
"name": "极影视助手",
"description": "极影视功能扩展",
"labels": "媒体库",
"version": "1.4",
"version": "1.6",
"icon": "zvideo.png",
"author": "DzAvril",
"level": 1,
"v2": true,
"history": {
"v1.6": "增加定期更新豆瓣评分",
"v1.5": "适配新版极影视",
"v1.4": "修复请求失败后返回值数量不正确的问题",
"v1.3": "降低对豆瓣接口的请求频率",
"v1.2": "修复无法获取豆瓣评分的问题",
@@ -970,12 +978,14 @@
"name": "Bangumi收藏订阅",
"description": "Bangumi用户收藏添加到订阅",
"labels": "订阅",
"version": "1.5.4",
"version": "1.5.6",
"icon": "bangumi_b.png",
"author": "Attente",
"level": 1,
"v2": true,
"history": {
"v1.5.6": "修复远程命令名称, 完善远程命令回执",
"v1.5.5": "添加剧集组(需V2.3.8+), 新增远程命令",
"v1.5.4": "fix: wikrin/MoviePilot-Plugins/issues/2",
"v1.5.3": "增加多语言标题匹配, 去除未实现设置项",
"v1.5.2": "修复定时任务未正确注册的问题",

View File

@@ -224,11 +224,12 @@
"name": "IYUU自动辅种",
"description": "基于IYUU官方Api实现自动辅种。",
"labels": "做种,IYUU",
"version": "2.13",
"version": "2.14",
"icon": "IYUU.png",
"author": "jxxghp,CKun",
"level": 2,
"history": {
"v2.14": "修复馒头不能辅种的问题",
"v2.13": "开启跳过校验后需手动开启自动开始",
"v2.12": "增加qb下载器分类复用配置",
"v2.11": "修复qb跳过校验不自动开始的问题",
@@ -249,11 +250,12 @@
"name": "青蛙辅种助手",
"description": "参考ReseedPuppy和IYUU辅种插件实现自动辅种支持站点青蛙、AGSVPT、麒麟、UBits、聆音、憨憨等。",
"labels": "做种",
"version": "3.0",
"version": "3.0.1",
"icon": "qingwa.png",
"author": "233@qingwa",
"level": 2,
"history": {
"v3.0.1": "遗漏了一个私有属性",
"v3.0": "兼容MoviePilot V2 版本"
}
},
@@ -361,11 +363,12 @@
"name": "豆瓣想看",
"description": "同步豆瓣想看数据,自动添加订阅。",
"labels": "订阅",
"version": "2.0.1",
"version": "2.1.0",
"icon": "douban.png",
"author": "jxxghp",
"author": "jxxghp,dwhmofly",
"level": 2,
"history": {
"v2.1.0": "新增配置项-搜索下载,开启后会优先搜索站点资源进行下载,下载不到才会添加订阅",
"v2.0.1": "支持将豆瓣ID转换为MoviePilot中已有用户在用户个人信息中绑定豆瓣ID需要MoviePilot v2.2.6+",
"v2.0.0": "优化cron表达式输入"
}
@@ -398,13 +401,15 @@
"name": "绕过Trackers",
"description": "提供tracker服务器IP地址列表帮助IPv6连接绕过OpenClash",
"labels": "工具",
"version": "1.1",
"version": "1.3",
"icon": "Clash_A.png",
"author": "wumode",
"level": 2,
"history": {
"v1.0": "支持自定义Trackers",
"v1.1": "更新列表后发送通知"
"v1.1": "更新列表后发送通知",
"v1.2": "修复Trackers加载错误",
"v1.3": "新增一些Trackers"
}
}
}

View File

@@ -179,7 +179,7 @@ class CrossSeed(_PluginBase):
# 插件图标
plugin_icon = "qingwa.png"
# 插件版本
plugin_version = "3.0"
plugin_version = "3.0.1"
# 插件作者
plugin_author = "233@qingwa"
# 作者主页
@@ -197,6 +197,7 @@ class CrossSeed(_PluginBase):
sites = None
siteoper = None
torrent = None
downloader_helper = None
# 开关
_enabled = False
_cron = None

View File

@@ -10,7 +10,7 @@ from apscheduler.triggers.cron import CronTrigger
from app import schemas
from app.chain.media import MediaChain
from app.db.user_oper import UserOper
from app.schemas.types import MediaType
from app.schemas.types import MediaType, EventType, SystemConfigKey
from app.chain.download import DownloadChain
from app.chain.search import SearchChain
@@ -22,7 +22,6 @@ from app.core.metainfo import MetaInfo
from app.helper.rss import RssHelper
from app.log import logger
from app.plugins import _PluginBase
from app.schemas.types import EventType
lock = Lock()
@@ -35,9 +34,9 @@ class DoubanSync(_PluginBase):
# 插件图标
plugin_icon = "douban.png"
# 插件版本
plugin_version = "2.0.1"
plugin_version = "2.1.0"
# 插件作者
plugin_author = "jxxghp"
plugin_author = "jxxghp,dwhmofly"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
@@ -67,6 +66,7 @@ class DoubanSync(_PluginBase):
_users: str = ""
_clear: bool = False
_clearflag: bool = False
_search_download = False
def init_plugin(self, config: dict = None):
self.rsshelper = RssHelper()
@@ -88,6 +88,7 @@ class DoubanSync(_PluginBase):
self._users = config.get("users")
self._onlyonce = config.get("onlyonce")
self._clear = config.get("clear")
self._search_download = config.get("search_download")
if self._enabled or self._onlyonce:
if self._onlyonce:
@@ -308,7 +309,7 @@ class DoubanSync(_PluginBase):
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
'md': 4
},
'content': [
{
@@ -319,6 +320,45 @@ class DoubanSync(_PluginBase):
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4,
'style': 'display:flex;align-items: center;'
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'search_download',
'label': '搜索下载',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
'text': '搜索下载开启后,会优先按订阅优先级规则组搜索过滤下载,搜索站点为设置的订'
'阅站点,下载失败/无资源/剧集不完整时仍会添加订阅'
}
}
]
}
]
}
@@ -331,7 +371,8 @@ class DoubanSync(_PluginBase):
"cron": "*/30 * * * *",
"days": 7,
"users": "",
"clear": False
"clear": False,
"search_download": False
}
def get_page(self) -> List[dict]:
@@ -360,6 +401,8 @@ class DoubanSync(_PluginBase):
mtype = history.get("type")
time_str = history.get("time")
doubanid = history.get("doubanid")
action = "下载" if history.get("action") == "download" else "订阅" if history.get("action") == "subscribe" \
else "存在" if history.get("action") == "exist" else history.get("action")
contents.append(
{
'component': 'VCard',
@@ -434,6 +477,13 @@ class DoubanSync(_PluginBase):
'class': 'pa-0 px-2'
},
'text': f'时间:{time_str}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'操作:{action}'
}
]
}
@@ -464,7 +514,8 @@ class DoubanSync(_PluginBase):
"cron": self._cron,
"days": self._days,
"users": self._users,
"clear": self._clear
"clear": self._clear,
"search_download": self._search_download
})
def delete_history(self, doubanid: str, apikey: str):
@@ -590,16 +641,59 @@ class DoubanSync(_PluginBase):
else:
# 用户转换
real_name = self.__get_username_by_douban(user_id)
# 添加订阅
logger.info(f'{mediainfo.title_year} 媒体库中不存在或不完整,添加订阅 ...')
self.subscribechain.add(title=mediainfo.title,
year=mediainfo.year,
mtype=mediainfo.type,
tmdbid=mediainfo.tmdb_id,
season=meta.begin_season,
exist_ok=True,
username=real_name or f"豆瓣{nickname}想看")
action = "subscribe"
if self._search_download:
# 先搜索资源
logger.info(f'媒体库中不存在或不完整,开启搜索下载,开始搜索 {mediainfo.title_year} 的资源...')
# 按订阅优先级规则组搜索过滤,站点为设置的订阅站点
filter_results = self.searchchain.process(
mediainfo=mediainfo,
no_exists=no_exists,
sites=self.systemconfig.get(SystemConfigKey.RssSites),
rule_groups=self.systemconfig.get(SystemConfigKey.SubscribeFilterRuleGroups)
)
if filter_results:
logger.info(f'找到符合条件的资源,开始下载 {mediainfo.title_year} ...')
action = "download"
if mediainfo.type == MediaType.MOVIE:
# 电影类型调用单次下载
download_id = self.downloadchain.download_single(
context=filter_results[0],
username=real_name or f"豆瓣{nickname}想看"
)
if not download_id:
logger.info(f'下载失败,添加订阅 {mediainfo.title_year} ...')
self.add_subscribe(mediainfo, meta, nickname, real_name)
action = "subscribe"
else:
# 电视剧类型调用批量下载
downloaded_list, no_exists = self.downloadchain.batch_download(
contexts=filter_results,
no_exists=no_exists,
username=real_name or f"豆瓣{nickname}想看"
)
if no_exists:
logger.info(f'下载失败或未下载完所有剧集,添加订阅 {mediainfo.title_year} ...')
sub_id, message = self.add_subscribe(mediainfo, meta, nickname, real_name)
action = "subscribe"
# 更新订阅信息
logger.info(f'根据缺失剧集更新订阅信息 {mediainfo.title_year} ...')
subscribe = self.subscribechain.subscribeoper.get(sub_id)
if subscribe:
self.subscribechain.finish_subscribe_or_not(subscribe=subscribe,
meta=meta,
mediainfo=mediainfo,
downloads=downloaded_list,
lefts=no_exists)
else:
logger.info(f'未找到符合条件资源,添加订阅 {mediainfo.title_year} ...')
self.add_subscribe(mediainfo, meta, nickname, real_name)
action = "subscribe"
else:
logger.info(f'媒体库中不存在或不完整,未开启搜索下载,添加订阅 {mediainfo.title_year} ...')
self.add_subscribe(mediainfo, meta, nickname, real_name)
action = "subscribe"
# 存储历史记录
history.append({
"action": action,
@@ -620,6 +714,17 @@ class DoubanSync(_PluginBase):
# 缓存只清理一次
self._clearflag = False
def add_subscribe(self, mediainfo, meta, nickname, real_name):
return self.subscribechain.add(
title=mediainfo.title,
year=mediainfo.year,
mtype=mediainfo.type,
tmdbid=mediainfo.tmdb_id,
season=meta.begin_season,
exist_ok=True,
username=real_name or f"豆瓣{nickname}想看"
)
@eventmanager.register(EventType.PluginAction)
def remote_sync(self, event: Event):
"""

View File

@@ -33,7 +33,7 @@ class IYUUAutoSeed(_PluginBase):
# 插件图标
plugin_icon = "IYUU.png"
# 插件版本
plugin_version = "2.13"
plugin_version = "2.14"
# 插件作者
plugin_author = "jxxghp,CKun"
# 作者主页
@@ -1241,11 +1241,11 @@ class IYUUAutoSeed(_PluginBase):
将mteam种子下载链接域名替换为使用API
"""
api_url = re.sub(r'//[^/]+\.m-team', '//api.m-team', site.get('url'))
ua = site.get("ua") or settings.USER_AGENT
res = RequestUtils(
headers={
'Content-Type': 'application/json',
'User-Agent': f'{site.get("ua")}',
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': f'{ua}',
'Accept': 'application/json, text/plain, */*',
'x-api-key': apikey
}
@@ -1339,7 +1339,7 @@ class IYUUAutoSeed(_PluginBase):
logger.info(f"正在获取种子下载链接:{page_url} ...")
res = RequestUtils(
cookies=site.get("cookie"),
ua=site.get("ua"),
ua=site.get("ua") or settings.USER_AGENT,
proxies=settings.PROXY if site.get("proxy") else None
).get_res(url=page_url)
if res is not None and res.status_code in (200, 500):

View File

@@ -1,8 +1,9 @@
from typing import Any, List, Dict, Tuple, Optional
from datetime import datetime, timedelta
import pickle
import ipaddress
import socket
import base64
import json
from apscheduler.schedulers.background import BackgroundScheduler
from fastapi import Response
@@ -29,7 +30,7 @@ class ToBypassTrackers(_PluginBase):
# 插件图标
plugin_icon = "Clash_A.png"
# 插件版本
plugin_version = "1.1"
plugin_version = "1.3"
# 插件作者
plugin_author = "wumode"
# 作者主页
@@ -74,11 +75,12 @@ class ToBypassTrackers(_PluginBase):
self.stop_service()
self.siteoper = SiteOper()
self.trackers = {}
self.ipv6_txt = self.get_data('ipv6_txt') if self.get_data('ipv6_txt') else ""
self.ipv4_txt = self.get_data('ipv4_txt') if self.get_data('ipv4_txt') else ""
self.ipv6_txt = self.get_data("ipv6_txt") if self.get_data("ipv6_txt") else ""
self.ipv4_txt = self.get_data("ipv4_txt") if self.get_data("ipv4_txt") else ""
try:
with open(f"{settings.ROOT_PATH}/app/plugins/tobypasstrackers/sites/trackers", "rb") as f:
self.trackers = pickle.load(f)
with open(f"{settings.ROOT_PATH}/app/plugins/tobypasstrackers/sites/trackers", "r", encoding="utf-8") as f:
base64_str = f.read()
self.trackers = json.loads(base64.b64decode(base64_str).decode("utf-8"))
except Exception as e:
logger.error(f"插件加载错误:{e}")
# 配置
@@ -103,7 +105,7 @@ class ToBypassTrackers(_PluginBase):
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
if self._onlyonce:
logger.info(f"立即运行一次")
self._scheduler.add_job(self.update_ips, 'date',
self._scheduler.add_job(self.update_ips, "date",
run_date=datetime.now(
tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3)
)
@@ -573,8 +575,8 @@ class ToBypassTrackers(_PluginBase):
return [str(sub_net) for sub_net in remaining_ranges]
# replacing = data.get('replace')
chnroute6_lists_url = 'https://ispip.clang.cn/all_cn_ipv6.txt'
chnroute_lists_url = 'https://ispip.clang.cn/all_cn.txt'
chnroute6_lists_url = "https://ispip.clang.cn/all_cn_ipv6.txt"
chnroute_lists_url = "https://ispip.clang.cn/all_cn.txt"
ipv6_list = []
ip_list = []
domains = []
@@ -623,7 +625,7 @@ class ToBypassTrackers(_PluginBase):
domains.append(custom_tracker)
for domain in domains:
if self._bypass_ipv6:
ipv6_addresses = DnsHelper.query_domain(domain, self._dns_input, 'AAAA')
ipv6_addresses = DnsHelper.query_domain(domain, self._dns_input, "AAAA")
if ipv6_addresses is None:
logger.warn(f"{domain} AAAA 记录查询失败")
failed_msg.append(f"{domain_name_map.get(domain, domain)}{domain}: AAAA记录查询失败")
@@ -685,7 +687,7 @@ class ToBypassTrackers(_PluginBase):
ip_list.pop(index)
length = int(ip_larger.split('/')[1])
if length < 12:
remaining_ip = __exclude_ip_range(ip_larger, f'{ip}/{length + 8}')
remaining_ip = __exclude_ip_range(ip_larger, f"{ip}/{length + 8}")
ip_list.extend(remaining_ip)
for ip in exempted_ipv6:
index = __search_ip(ip, ipv6_list)
@@ -695,7 +697,7 @@ class ToBypassTrackers(_PluginBase):
ipv6_list.pop(index)
length = int(ip_larger.split('/')[1])
if length < 32:
remaining_ip = __exclude_ip_range(ip_larger, f'{ip}/{min(32, length + 8)}')
remaining_ip = __exclude_ip_range(ip_larger, f"{ip}/{min(32, length + 8)}")
ipv6_list.extend(remaining_ip)
self.ipv4_txt = "\n".join(ip_list)
self.ipv6_txt = "\n".join(ipv6_list)

View File

@@ -1,17 +1,17 @@
# 基础库
import datetime
import json
from typing import Any, Dict, List, Optional, Type
from typing import Any, Dict, List
# 第三方库
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
import pytz
from sqlalchemy import JSON
from sqlalchemy.orm import Session
# 项目库
from app.chain.subscribe import SubscribeChain, Subscribe
from app.chain.download import DownloadChain
from app.chain.subscribe import SubscribeChain
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.event import eventmanager, Event
@@ -23,8 +23,9 @@ from app.db.subscribe_oper import SubscribeOper
from app.db import db_query
from app.helper.subscribe import SubscribeHelper
from app.log import logger
from app.modules.themoviedb import TmdbApi
from app.plugins import _PluginBase
from app.schemas.types import EventType, NotificationType
from app.schemas.types import EventType, MediaType, NotificationType
from app.utils.http import RequestUtils
@@ -36,7 +37,7 @@ class BangumiColl(_PluginBase):
# 插件图标
plugin_icon = "bangumi_b.png"
# 插件版本
plugin_version = "1.5.4"
plugin_version = "1.5.6"
# 插件作者
plugin_author = "Attente"
# 作者主页
@@ -50,9 +51,7 @@ class BangumiColl(_PluginBase):
# 私有属性
_scheduler = None
siteoper: SiteOper = None
subscribehelper: SubscribeHelper = None
subscribeoper: SubscribeOper = None
_is_v2 = True if settings.VERSION_FLAG else False
# 配置属性
_enabled: bool = False
@@ -64,12 +63,16 @@ class BangumiColl(_PluginBase):
_collection_type = []
_save_path: str = ""
_sites: list = []
_match_groups: bool = False
_group_select_order: list = []
def init_plugin(self, config: dict = None):
self.subscribechain = SubscribeChain()
self.downloadchain = DownloadChain()
self.siteoper = SiteOper()
self.subscribechain = SubscribeChain()
self.subscribehelper = SubscribeHelper()
self.subscribeoper = SubscribeOper()
self.tmdbapi = TmdbApi()
# 停止现有任务
self.stop_service()
@@ -92,6 +95,8 @@ class BangumiColl(_PluginBase):
"collection_type",
"save_path",
"sites",
"match_groups",
"group_select_order",
):
setattr(self, f"_{key}", config.get(key, getattr(self, f"_{key}")))
# 获得所有站点
@@ -130,6 +135,8 @@ class BangumiColl(_PluginBase):
"collection_type": self._collection_type,
"save_path": self._save_path,
"sites": self._sites,
"match_groups": self._match_groups,
"group_select_order": self._group_select_order,
}
)
@@ -141,7 +148,7 @@ class BangumiColl(_PluginBase):
{"title": site.name, "value": site.id}
for site in self.siteoper.list_order_by_pri()
]
return form(sites_options)
return form(sites_options, self._is_v2)
def get_service(self) -> List[Dict[str, Any]]:
"""
@@ -185,7 +192,15 @@ class BangumiColl(_PluginBase):
pass
def get_command(self):
pass
return [
{
"cmd": "/bangumi_coll",
"event": EventType.PluginAction,
"desc": "Bangumi收藏订阅",
"category": "",
"data": {"action": "bangumi_coll"}
}
]
def get_page(self):
pass
@@ -193,20 +208,44 @@ class BangumiColl(_PluginBase):
def get_state(self):
return self._enabled
def bangumi_coll(self):
@eventmanager.register(EventType.PluginAction)
def action_event_handler(self, event: Event):
"""
远程命令处理
"""
event_data = event.event_data
if not event_data or event_data.get("action") != "bangumi_coll":
return
self.post_message(channel=event_data.get("channel"),
title=f"开始添加用户: {self._uid} 的收藏 ...",
userid=event_data.get("user"))
# 运行任务
msg = self.bangumi_coll()
self.post_message(channel=event_data.get("channel"),
title="添加完成" if not msg else msg,
userid=event_data.get("user"))
def bangumi_coll(self) -> str:
"""订阅Bangumi用户收藏"""
if not self._uid:
logger.error("设置UID")
return
logger.error("设置UID")
return "未设置UID"
try:
res = self.get_bgm_res(addr="UserCollections", id=self._uid)
items = self.parse_collection_items(res)
# 新增和移除条目
self.manage_subscriptions(items)
if msg := self.manage_subscriptions(items):
msg = "\n".join(list(msg.values()))
logger.info(msg)
except Exception as e:
logger.error(f"执行失败: {str(e)}")
msg = f"执行失败: {str(e)}"
logger.error(msg)
finally:
return msg
def parse_collection_items(self, response) -> Dict[int, Dict[str, Any]]:
"""解析获取的收藏条目"""
@@ -221,111 +260,319 @@ class BangumiColl(_PluginBase):
"name_cn": item['subject'].get('name_cn'),
"date": item['subject'].get('date'),
"eps": item['subject'].get('eps'),
"tags": [tag.get('name') for tag in item['subject'].get('tags', [{}])]
}
for item in data
if item.get("type") in self._collection_type
if item.get("type") in self._collection_type and item['subject'].get('date')\
# 只添加未来30天内放送的条目
and self.is_date_in_range(item['subject'].get('date'), threshold_days=30)[0]
}
def manage_subscriptions(self, items: Dict[int, Dict[str, Any]]):
"""管理订阅的新增和删除"""
# 查询订阅
db_sub = {
i.bangumiid: i.id
for i in self.subscribechain.subscribeoper.list()
if i.bangumiid
}
db_hist = self.get_subscribe_history()
new_sub = items.keys() - db_sub.keys() - db_hist
del_sub = db_sub.keys() - items.keys()
logger.debug(f"待新增条目:{new_sub}")
logger.debug(f"待移除条目:{del_sub}")
# bangumi 条目
_bgm = set(items.keys())
# 订阅记录
_sub = set(db_sub.keys())
# 插件数据
plugin_data: list = self.get_data(key="exclude") or []
# 订阅历史记录
db_hist: set = self.get_subscribe_history()
# 更新插件数据
_tmp = (set(plugin_data) & _bgm) - _sub - db_hist
new_sub = _bgm - _sub - db_hist - _tmp
del_sub = _sub - _bgm
if _tmp:
# 更新排除条目
self.save_data(key="exclude", value=list(_tmp))
if del_sub and self._notify:
del_items = {db_sub[i]: i for i in del_sub}
logger.info("开始移除订阅...")
self.delete_subscribe(del_items)
logger.info(f"开始移除订阅: {del_sub} ...")
self.delete_subscribe({db_sub[i]: i for i in del_sub})
logger.info("移除完成")
if new_sub:
logger.info("开始添加订阅...")
logger.info(f"开始添加订阅: {new_sub} ...")
msg = self.add_subscribe({i: items[i] for i in new_sub})
logger.info("添加完成")
if msg:
logger.info("\n".ljust(49, ' ').join(list(msg.values())))
return msg
# 添加订阅
def add_subscribe(self, items: Dict[int, Dict[str, Any]]) -> Dict:
"""添加订阅"""
fail_items = {}
for self._subid, item in items.items():
for subid, item in items.items():
if item.get("name_cn"):
meta = MetaInfo(item.get("name_cn"))
meta.en_name = item.get("name")
else:
meta = MetaInfo(item.get("name"))
if not meta.name:
fail_items[self._subid] = f"{self._subid} 未识别到有效数据"
logger.warn(f"{self._subid} 未识别到有效数据")
fail_items[subid] = f"{subid} 未识别到有效数据"
logger.warn(f"{subid} 未识别到有效数据")
continue
# 年份信息
sub_air_date = item.get("date")
meta.year = sub_air_date[:4] if sub_air_date else None
# 通过`tags`识别类型
tags = item.get("tags") or []
mtype = MediaType.MOVIE if "剧场版" in tags else MediaType.TV
mediainfo = None
for retry in range(2):
if retry:
meta.cn_name = meta.org_string
meta.en_name = meta.title
if (mediainfo := self.chain.recognize_media(
meta=meta,
mtype=mtype,
cache=False
)) or any(
getattr(meta, attr) == meta.org_string
for attr in ('cn_name', 'en_name')
):
break
meta.year = item.get("date")[:4] if item.get("date") else None
mediainfo = self.chain.recognize_media(meta=meta, cache=False)
meta.total_episode = item.get("eps", 0)
if not mediainfo:
fail_items[self._subid] = f"{item.get('name_cn')} 媒体信息识别失败"
fail_items[subid] = f"{item.get('name_cn')} 媒体信息识别失败"
logger.debug(f"识别失败详情 | subid:{subid} meta:{vars(meta)}")
continue
meta.total_episode = item.get("eps", 0)
mediainfo.bangumi_id = subid
# 根据发行日期判断是不是续作
if mediainfo.type == MediaType.TV \
and not self.is_date_in_range(sub_air_date, mediainfo.release_date)[0]:
# 识别剧集组标志
group_flag: bool = True
if "OVA" in item.get("tags"):
# 季0 处理
if tmdb_info := self.chain.tmdb_info(mediainfo.tmdb_id, mediainfo.type, 0):
for info in tmdb_info.get("episodes", []):
if self.is_date_in_range(sub_air_date, info.get("air_date"), 2)[0]:
mediainfo.season = 0
meta.begin_episode = info.get("episode_number")
else: # 信息不完整, 跳过条目
continue
self.update_media_info(item, mediainfo)
else:
# 过滤信息不完整和第0季
season_info = [info for info in mediainfo.season_info if info.get("season_number") and info.get("air_date") and info.get("episode_count")]
# 获取 bangumi 信息
meta = self.get_eps(meta, subid)
# 先通过season_info处理三季及以上的情况, tmdb存在第二季也不能保证不会被合并
if len(season_info) > 2:
# tmdb不合并季, 更新季信息
mediainfo.season = self.get_best_season_number(sub_air_date, mediainfo.season_info)
group_flag = False
elif len(season_info) == 2:
# 第二季特殊处理, 通过bangumi 'sort'字段判断集号连续性
if meta.begin_episode:
if meta.begin_episode == 1:
# 不合并季
mediainfo.season = self.get_best_season_number(sub_air_date, mediainfo.season_info)
group_flag = False
else:
group_flag = True
if self._match_groups and group_flag and mediainfo.episode_groups:
# tmdb季分割
season_data = self._season_split(mediainfo)
# 总季数传递
meta.total_season = len(season_data)
# 根据bgm 和 tmdb 信息判断
if len(season_data) > 1:
# 转换为方法入参格式
_season = [{"season_number": k, "air_date": v.get('air_date')} for k, v in season_data.items()]
# BGM条目在分割后的季号
_season_num = self.get_best_season_number(sub_air_date, _season)
# 季分割后的播出时间
air_date = season_data[_season_num].get('air_date')
# 季集的可能性
season_list = []
for info in mediainfo.season_info:
if info.get("season_number") == 0:
season_list.append((len(season_info)+1, len(mediainfo.seasons[1])+info.get("episode_count")))
season_list.append((len(season_info), len(mediainfo.seasons[1])))
# 预匹配剧集组
candidate_groups = (
group for group in mediainfo.episode_groups
if any(
group.get("group_count") == s[0] and
group.get("episode_count") == s[1]
for s in season_list
)
)
for group in candidate_groups:
if season_num := self.get_group_season(group.get("id"), air_date, mediainfo):
mediainfo.episode_group = group.get("id")
mediainfo.season = season_num
break
else:
mediainfo = self._match_group(air_date, meta, mediainfo)
# 非续作
elif mediainfo.type == MediaType.TV: mediainfo.season = 1
# 检查本地媒体
exist_flag, no_exists = self.downloadchain.get_no_exists_info(meta=meta, mediainfo=mediainfo)
if exist_flag:
# 添加到排除
self.update_data(key="exclude", value=subid)
logger.info(f'{mediainfo.title_year} 媒体库中已存在')
continue
elif not no_exists.get(mediainfo.tmdb_id, {}).get(mediainfo.season):
# 添加到排除
self.update_data(key="exclude", value=subid)
logger.info(f'{mediainfo.title_year} 媒体库中已存在 第 {mediainfo.season}')
continue
sid = self.subscribeoper.list_by_tmdbid(
mediainfo.tmdb_id, mediainfo.number_of_seasons
mediainfo.tmdb_id, mediainfo.season
)
if sid:
logger.info(f"{mediainfo.title_year} 正在订阅中")
if len(sid) == 1:
self.subscribeoper.update(
sid=sid[0].id, payload={"bangumiid": self._subid}
sid=sid[0].id, payload={"bangumiid": subid}
)
logger.info(f"{mediainfo.title_year} Bangumi条目id更新成功")
continue
sid, msg = self.subscribechain.add(
title=mediainfo.title,
year=mediainfo.year,
season=mediainfo.number_of_seasons,
bangumiid=self._subid,
exist_ok=True,
username="Bangumi订阅",
**self.prepare_kwargs(meta, mediainfo),
)
# 添加订阅
sid, msg = self.subscribechain.add(**self.prepare_add_args(meta, mediainfo))
if not sid:
fail_items[self._subid] = f"{item.get('name_cn') or item.get('name')} {msg}"
fail_items[subid] = f"{item.get('name_cn') or item.get('name')} {msg}"
return fail_items
def prepare_kwargs(self, meta: MetaBase, mediainfo: MediaInfo) -> Dict:
"""准备额外参数"""
kwargs = {
def _season_split(self, mediainfo: MediaInfo, season: int = 1) -> Dict[int, dict]:
"""
将tmdb多季合并的季信息进行拆分
"""
if tmdb_info := self.chain.tmdb_info(mediainfo.tmdb_id, mediainfo.type, season):
season = 1
air_date = tmdb_info.get("air_date")
episodes: list[dict] = tmdb_info.get("episodes", [])
season_data = {season: {"air_date": air_date, "count": 0}}
for ep in episodes:
if not air_date:
air_date = ep.get("air_date")
season_data[season] = {"air_date": air_date, "count": 0}
season_data[season]["count"] += 1
if ep.get("episode_type") == "finale":
air_date = None
# 季号递增
season += 1
return season_data
def _match_group(self, air_date: str, meta: MetaBase, mediainfo: MediaInfo) -> MediaInfo:
"""
根据剧集组类型匹配剧集组
:param air_date: 播出日期
:param meta: bangumi 元数据
:param mediainfo: 媒体信息
:return: MediaInfo
"""
if not mediainfo.episode_groups:
return mediainfo
# 处理元数据
begin_ep = meta.begin_episode or 1
total_season = meta.total_season or 2
# 按类型预分组
episode_groups_by_type: dict[int, list[dict]] = {}
for group in mediainfo.episode_groups:
group_type = group.get("type")
if group_type not in episode_groups_by_type:
episode_groups_by_type[group_type] = []
episode_groups_by_type[group_type].append(group)
# 按优先级遍历类型
for group_type in self._group_select_order:
# 获取当前类型的所有剧集组
groups = episode_groups_by_type.get(group_type, [])
for group in groups:
group_count = group.get("group_count", 0)
episode_count = group.get("episode_count", 0)
if (
group_count >= total_season
and episode_count >= begin_ep
):
logger.info(
f"{mediainfo.title_year} 正在匹配 剧集组: "
f"{group.get('name', '未知')}({group.get('id')}) "
f"{group_count}{episode_count}")
if season_num := self.get_group_season(
group.get("id"), air_date, mediainfo
):
mediainfo.episode_group = group.get("id")
mediainfo.season = season_num
return mediainfo
return mediainfo
def get_group_season(self, group_id: str, air_date: str, mediainfo: MediaInfo) -> int:
"""
根据播出日期赋值剧集组季号
:param group_id: 剧集组id
:param air_date: 播出日期
:param mediainfo: MediaInfo
:return: 季号
"""
if group_seasons := self.tmdbapi.get_tv_group_seasons(group_id):
for group_season in group_seasons:
if self.is_date_in_range(air_date, group_season.get("episodes")[0].get("air_date"))[0]:
logger.info(f"{mediainfo.title_year} 剧集组: {group_id}{group_season.get('order')}")
return group_season.get("order")
def prepare_add_args(self, meta: MetaBase, mediainfo: MediaInfo) -> Dict:
"""
订阅参数
"""
add_args = {
"title": mediainfo.title,
"year": mediainfo.year,
"mtype": mediainfo.type,
"tmdbid": mediainfo.tmdb_id,
"season": mediainfo.season or 1,
"bangumiid": mediainfo.bangumi_id,
"exist_ok": True,
"username": "Bangumi订阅",
"save_path": self._save_path,
"sites": (
self._sites
if self.are_types_equal(attribute_name='sites')
if self._is_v2
else json.dumps(self._sites)
),
}
# 仅v2支持剧集组
if self._is_v2:
add_args["episode_group"] = mediainfo.episode_group
total_episode = len(mediainfo.seasons.get(mediainfo.number_of_seasons) or [])
if self._match_groups and mediainfo.episode_group:
return add_args
total_episode = len(mediainfo.seasons.get(mediainfo.season or 1) or [])
if (
meta.begin_season
and mediainfo.number_of_seasons != meta.begin_season
and mediainfo.season != meta.begin_season
or total_episode != meta.total_episode
):
meta = self.get_eps(meta)
meta = self.get_eps(meta, mediainfo.bangumi_id)
total_ep: int = meta.end_episode if meta.end_episode else total_episode
lock_eps: int = total_ep - meta.begin_episode + 1
prev_eps: list = [i for i in range(1, meta.begin_episode)]
kwargs.update(
add_args.update(
{
"total_episode": total_ep,
"start_episode": meta.begin_episode,
@@ -335,7 +582,7 @@ class BangumiColl(_PluginBase):
), # 手动修改过总集数
"note": (
prev_eps
if self.are_types_equal("note")
if self._is_v2
else json.dumps(prev_eps)
),
}
@@ -344,22 +591,31 @@ class BangumiColl(_PluginBase):
f"{mediainfo.title_year} 更新总集数为: {total_ep},开始集数为: {meta.begin_episode}"
)
return kwargs
return add_args
def update_media_info(self, item: dict, mediainfo: MediaInfo):
"""更新媒体信息"""
for info in mediainfo.season_info:
if self.are_dates(item.get("date"), info.get("air_date")):
mediainfo.number_of_seasons = info.get("season_number")
mediainfo.number_of_episodes = info.get("episode_count")
def get_best_season_number(self, air_date: str, season_info: list[dict]) -> int:
"""更新媒体信息"""
best_info = None
min_days = float('inf')
for info in season_info:
result, days = self.is_date_in_range(air_date, info.get("air_date"))
if result:
best_info = info
break
elif 0 < days < min_days:
min_days = days
best_info = info
def get_eps(self, meta: MetaBase) -> MetaBase:
if best_info:
return best_info.get("season_number")
def get_eps(self, meta: MetaBase, sub_id: int) -> MetaBase:
"""获取Bangumi条目的集数信息"""
try:
res = self.get_bgm_res(addr="getEpisodes", id=self._subid)
res = self.get_bgm_res(addr="getEpisodes", id=sub_id)
data = res.json().get("data", [{}])[0]
prev = data.get("sort", 1) - data.get("ep", 1)
prev = data.get("sort", 0) - data.get("ep", 1)
total = res.json().get("total", None)
begin = prev + 1
end = prev + total if total else None
@@ -370,12 +626,11 @@ class BangumiColl(_PluginBase):
return meta
# 移除订阅
def delete_subscribe(self, del_items: Dict[int, int]):
def delete_subscribe(self, del_items: dict[int, int]):
"""删除订阅"""
for subscribe_id in del_items.keys():
try:
subscribe = self.subscribeoper.get(subscribe_id)
if subscribe:
if subscribe := self.subscribeoper.get(subscribe_id):
self.subscribeoper.delete(subscribe_id)
self.subscribehelper.sub_done_async(
{"tmdbid": subscribe.tmdbid, "doubanid": subscribe.doubanid}
@@ -383,7 +638,10 @@ class BangumiColl(_PluginBase):
self.post_message(
mtype=NotificationType.Subscribe,
title=f"{subscribe.name}({subscribe.year}) 第{subscribe.season}季 已取消订阅",
text=f"原因: 未在Bangumi收藏中找到该条目\n订阅用户: {subscribe.username}\n创建时间: {subscribe.date}",
text=(
f"原因: 已选Bangumi收藏类型中不存在\n"
f"订阅用户: {subscribe.username}\n"
f"创建时间: {subscribe.date}"),
image=subscribe.backdrop,
)
except Exception as e:
@@ -401,17 +659,44 @@ class BangumiColl(_PluginBase):
return RequestUtils(headers=headers).get_res(url=url[addr])
@staticmethod
def are_dates(date_str1: str, date_str2: str, threshold_days: int = 7) -> bool:
"""对比两个日期字符串是否接近"""
if date_str1 is None or date_str2 is None:
return False
def is_date_in_range(air_date: str, reference_date: str = None, threshold_days: int = 8) -> tuple[bool, int]:
"""
两个日期接近或在未来指定天数内, 并返回target_date - reference_date(或当前时间)的天数差
:param air_date: 目标日期
:param reference_date: 参考日期
:param threshold_days: 阈值天数
:return: bool, int
只传入 target_date 时,判断是否在未来 threshold_days 天内
传入 target_date 和 reference_date 时,判断两个日期是否接近
"""
try:
date1 = datetime.datetime.strptime(date_str1, '%Y-%m-%d')
date2 = datetime.datetime.strptime(date_str2, '%Y-%m-%d')
return abs((date1 - date2).days) <= threshold_days
except ValueError as e:
# 解析目标日期
date1 = datetime.datetime.strptime(air_date, '%Y-%m-%d').date()
# 单日期模式是否在未来threshold_days内
if reference_date is None:
today = datetime.datetime.now().date()
delta = (date1 - today).days
return delta <= threshold_days, delta
# 双日期模式:两个日期是否接近
date2 = datetime.datetime.strptime(reference_date, '%Y-%m-%d').date()
# 天数差
delta = (date1 - date2).days
return abs(delta) <= threshold_days, delta
except (ValueError, TypeError) as e:
logger.error(f"日期格式错误: {str(e)}")
return False
return False, 0
def update_data(self, key, value):
# 获取插件数据
data = self.get_data(key=key) or []
if value not in data:
data.append(value)
self.save_data(key=key, value=data)
@db_query
def get_subscribe_history(self, db: Session = None) -> set:
@@ -427,14 +712,3 @@ class BangumiColl(_PluginBase):
logger.error(f"获取订阅历史失败: {str(e)}")
return set()
@staticmethod
def are_types_equal(
attribute_name: str, expected_type: Type[Any] = JSON(), class_=Subscribe
) -> bool:
"""比较类中属性的类型与expected_type是否一致"""
column = class_.__table__.columns.get(attribute_name)
if column is None:
raise AttributeError(
f"Class: {class_.__name__} 没有属性: '{attribute_name}'"
)
return isinstance(column.type, type(expected_type))

View File

@@ -1,7 +1,7 @@
from bs4 import BeautifulSoup
def form(sites_options) -> list:
def form(sites_options: list[dict], is_v2: bool = True) -> list:
return [
{
'component': 'VForm',
@@ -43,7 +43,7 @@ def form(sites_options) -> list:
'component': 'VSwitch',
'props': {
'model': 'total_change',
'label': '跟随TMDB变动',
'label': '更新元数据',
},
}
],
@@ -71,8 +71,8 @@ def form(sites_options) -> list:
'props': {'cols': 8, 'md': 4},
'content': [
{
'component': 'VTextField',
# 'component': 'VCronField', # 暂不支持
# 'component': 'VTextField', # 组件替换为VCronField
'component': 'VCronField',
'props': {
'model': 'cron',
'label': '执行周期',
@@ -116,6 +116,74 @@ def form(sites_options) -> list:
},
],
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
},
'content': parse_html(
'<p>提示: <strong>剧集组优先级</strong>越靠前优先级越高。</p>'
),
},
],
},
],
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {'cols': 8, 'md': 4},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'match_groups',
'disabled': not is_v2,
'label': '剧集组填充(实验性)',
}
}
]
},
{
'component': 'VCol',
'props': {'cols': 8},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'group_select_order',
'label': '剧集组优先级',
'disabled': not is_v2,
'chips': True,
'multiple': True,
'clearable': True,
'items': [
{"title": "初始播出日期", "value": 1},
{"title": "绝对", "value": 2},
{"title": "DVD", "value": 3},
{"title": "数字", "value": 4},
{"title": "故事线", "value": 5},
{"title": "制片", "value": 6},
{"title": "电视", "value": 7},
],
},
}
]
},
]
},
{
'component': 'VRow',
'content': [
@@ -144,6 +212,7 @@ def form(sites_options) -> list:
'label': '选择站点',
'chips': True,
'multiple': True,
'clearable': True,
'items': sites_options,
},
}
@@ -215,7 +284,7 @@ def form(sites_options) -> list:
'variant': 'tonal',
},
'content': parse_html(
'<p>注意: 开启<strong>不跟随TMDB变动</strong>后,从<a href="https://bangumi.github.io/api/#/%E7%AB%A0%E8%8A%82/getEpisodes" target="_blank"><u>Bangumi API</u></a>获取总集数将不再跟随TMDB的集数变动。</p>'
'<p>注意: 开启<strong>不更新元数据</strong>后,从<a href="https://bangumi.github.io/api/#/%E7%AB%A0%E8%8A%82/getEpisodes" target="_blank"><u>Bangumi API</u></a>获取总集数将不会因<strong>订阅元数据更新</strong>改变。</p>'
),
},
],
@@ -232,6 +301,8 @@ def form(sites_options) -> list:
"collection_type": [3],
"save_path": "",
"sites": [],
"match_groups": False,
"group_select_order": [],
}

View File

@@ -34,7 +34,7 @@ class IYUUAutoSeed(_PluginBase):
# 插件图标
plugin_icon = "IYUU.png"
# 插件版本
plugin_version = "1.9.10"
plugin_version = "1.9.11"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
@@ -1064,11 +1064,11 @@ class IYUUAutoSeed(_PluginBase):
将mteam种子下载链接域名替换为使用API
"""
api_url = re.sub(r'//[^/]+\.m-team', '//api.m-team', site.get('url'))
ua = site.get("ua") or settings.USER_AGENT
res = RequestUtils(
headers={
'Content-Type': 'application/json',
'User-Agent': f'{site.get("ua")}',
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': f'{ua}',
'Accept': 'application/json, text/plain, */*',
'x-api-key': apikey
}
@@ -1162,7 +1162,7 @@ class IYUUAutoSeed(_PluginBase):
logger.info(f"正在获取种子下载链接:{page_url} ...")
res = RequestUtils(
cookies=site.get("cookie"),
ua=site.get("ua"),
ua=site.get("ua") or settings.USER_AGENT,
proxies=settings.PROXY if site.get("proxy") else None
).get_res(url=page_url)
if res is not None and res.status_code in (200, 500):

View File

@@ -15,7 +15,7 @@ class MPServerStatus(_PluginBase):
# 插件图标
plugin_icon = "Duplicati_A.png"
# 插件版本
plugin_version = "1.1"
plugin_version = "1.2"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
@@ -391,7 +391,7 @@ class MPServerStatus(_PluginBase):
'props': {
'class': 'text-h6'
},
'text': requests
'text': f"{requests:,}"
}
]
}
@@ -443,7 +443,7 @@ class MPServerStatus(_PluginBase):
'props': {
'class': 'text-h6'
},
'text': accepts
'text': f"{accepts:,}"
}
]
}

View File

@@ -15,11 +15,11 @@ class TrackerEditor(_PluginBase):
# 插件名称
plugin_name = "Tracker替换"
# 插件描述
plugin_desc = "批量替换种子tracker支持周期性巡检如为TR仅支持4.0以上版本)"
plugin_desc = "批量替换修改种子tracker"
# 插件图标
plugin_icon = "trackereditor_A.png"
# 插件版本
plugin_version = "1.5"
plugin_version = "1.8"
# 插件作者
plugin_author = "honue"
# 作者主页
@@ -36,8 +36,7 @@ class TrackerEditor(_PluginBase):
_password: str = None
_host: str = None
_port: int = None
_target_domain: str = None
_replace_domain: str = None
_tracker_config: str = None
_onlyonce: bool = False
_downloader: Union[Qbittorrent, Transmission] = None
@@ -54,8 +53,7 @@ class TrackerEditor(_PluginBase):
self._port = config.get("port")
self._username = config.get("username")
self._password = config.get("password")
self._target_domain = config.get("target_domain")
self._replace_domain = config.get("replace_domain")
self._tracker_config = config.get("tracker_config")
self._run_con_enable = config.get("run_con_enable")
self._run_con = config.get("run_con")
self._notify = config.get("notify")
@@ -68,7 +66,14 @@ class TrackerEditor(_PluginBase):
self.__update_config()
def task(self):
logger.info(f"{'*' * 30}TrackerEditor: 开始执行Tracker替换{'*' * 30}")
tracker_configs: List[str] = self._tracker_config.split("\n")
tracker_dict = {}
for tracker_config in tracker_configs:
if tracker_config.count('|') == 1:
tracker_dict[tracker_config.split('|')[0]] = tracker_config.split('|')[1]
else:
logger.error(f"配置行错误: {tracker_config}")
logger.info(f"【TrackerEditor】: 开始执行Tracker替换")
torrent_total_cnt: int = 0
torrent_update_cnt: int = 0
if self._downloader_type == "qbittorrent":
@@ -80,11 +85,12 @@ class TrackerEditor(_PluginBase):
return
for torrent in torrent_info_list:
for tracker in torrent.trackers:
if self._target_domain in tracker.url:
original_url = tracker.url
new_url = tracker.url.replace(self._target_domain, self._replace_domain)
logger.info(f"{original_url} 替换为\n {new_url}")
torrent.edit_tracker(orig_url=original_url, new_url=new_url)
for target_domain in tracker_dict.keys():
if target_domain in tracker.url:
original_url = tracker.url
new_url = tracker.url.replace(target_domain, tracker_dict[target_domain])
logger.info(f"{original_url[:30]}... 替换为 {new_url[:30]}...")
torrent.edit_tracker(orig_url=original_url, new_url=new_url)
torrent_update_cnt += 1
elif self._downloader_type == "transmission":
@@ -99,12 +105,16 @@ class TrackerEditor(_PluginBase):
for torrent in torrent_list:
new_tracker_list = []
for tracker in torrent.tracker_list:
if self._target_domain in tracker:
new_url = tracker.replace(self._target_domain, self._replace_domain)
new_tracker_list.append(new_url)
logger.info(f"{tracker} 替换为\n {new_url}")
torrent_update_cnt += 1
else:
tracker_replaced = False
for target_domain in tracker_dict.keys():
if target_domain in tracker:
new_url = tracker.replace(target_domain, tracker_dict[target_domain])
new_tracker_list.append(new_url)
logger.info(f"{tracker[:30]}... 替换为 {new_url[:30]}...")
torrent_update_cnt += 1
tracker_replaced = True
break
if not tracker_replaced:
new_tracker_list.append(tracker)
if int(tr_version[0]) >= 4:
# 版本大于等于4.x
@@ -119,7 +129,7 @@ class TrackerEditor(_PluginBase):
break
if torrent_update_cnt == 0:
logger.info(f"tracker修改条数为0")
logger.info(f"{'*' * 30}TrackerEditor: Tracker替换完成{'*' * 30}")
logger.info(f"TrackerEditor: Tracker替换完成")
if (self._run_con_enable and self._notify) or (self._onlyonce and self._notify):
title = '【Tracker替换】'
msg = f'''扫描下载器{self._downloader_type}\n总的种子数: {torrent_total_cnt}\n已修改种子数: {torrent_update_cnt}'''
@@ -133,8 +143,7 @@ class TrackerEditor(_PluginBase):
"password": self._password,
"host": self._host,
"port": self._port,
"target_domain": self._target_domain,
"replace_domain": self._replace_domain,
"tracker_config": self._tracker_config,
"run_cron_enable": self._run_con_enable,
"run_cron": self._run_con,
"notify": self._notify
@@ -166,7 +175,7 @@ class TrackerEditor(_PluginBase):
'component': 'VSwitch',
'props': {
'model': 'run_con_enable',
'label': '启用周期性巡检 (注: 请开启时务必填写cron表达式)',
'label': '定时执行',
}
}
]
@@ -324,45 +333,31 @@ class TrackerEditor(_PluginBase):
]
}
]
}, {
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'component': 'VTextarea',
'props': {
'model': 'target_domain',
'label': '待替换文本',
'placeholder': 'target.com'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'replace_domain',
'label': '替换的文本',
'placeholder': 'replace.net'
'model': 'tracker_config',
'label': 'tracker替换配置',
'rows': 6,
'placeholder': '每一行一个配置,中间以|分隔\n'
'待替换文本|替换的文本',
}
}
]
}
]
}, {
},
{
'component': 'VRow',
'content': [
{
@@ -398,8 +393,7 @@ class TrackerEditor(_PluginBase):
'props': {
'type': 'info',
'variant': 'tonal',
'text': '周期性巡检时指的是允许设置间隔一段进行巡检下载器中的种子Tracker' + '\n'
'当匹配到等待替换的tracker时进行替换其中cron表达式是5位例如:* * * * * 指的是每过一分钟轮训一次',
'text': '支持qbtr仅支持4.0以上版本' + '\n',
'style': 'white-space: pre-line;'
}
}
@@ -416,8 +410,7 @@ class TrackerEditor(_PluginBase):
"port": 8989,
"username": "username",
"password": "password",
"target_domain": "",
"replace_domain": "",
"tracker_config":"",
"run_con_enable": False,
"run_con": "",
"notify": True
@@ -434,11 +427,11 @@ class TrackerEditor(_PluginBase):
def get_service(self) -> List[Dict[str, Any]]:
if self._run_con_enable and self._run_con:
logger.info(f"{'*' * 30}TrackerEditor: 注册公共调度服务{'*' * 30}")
logger.info(f"TrackerEditor: 注册定时任务")
return [
{
"id": "TrackerChangeRun",
"name": "启用周期性Tracker替换",
"name": "定时Tracker替换",
"trigger": CronTrigger.from_crontab(self._run_con),
"func": self.task,
"kwargs": {}
@@ -451,4 +444,4 @@ class TrackerEditor(_PluginBase):
mtype=NotificationType.SiteMessage,
title=title,
text=message
)
)

View File

@@ -31,7 +31,7 @@ class ZvideoHelper(_PluginBase):
# 插件图标
plugin_icon = "zvideo.png"
# 插件版本
plugin_version = "1.4"
plugin_version = "1.6"
# 插件作者
plugin_author = "DzAvril"
# 作者主页
@@ -55,6 +55,7 @@ class ZvideoHelper(_PluginBase):
_cached_data: dict = {}
_db_path = ""
_cookie = ""
_douban_score_update_days = 0
# 定时器
_scheduler: Optional[BackgroundScheduler] = None
@@ -72,6 +73,7 @@ class ZvideoHelper(_PluginBase):
self._sync_douban_status = config.get("sync_douban_status")
self._clean_cache = config.get("clean_cache")
self._use_douban_score = config.get("use_douban_score")
self._douban_score_update_days = int(config.get("douban_score_update_days"))
self._douban_helper = DoubanHelper(user_cookie=self._cookie)
# 获取历史数据
@@ -134,6 +136,7 @@ class ZvideoHelper(_PluginBase):
"sync_douban_status": self._sync_douban_status,
"clean_cache": self._clean_cache,
"use_douban_score": self._use_douban_score,
"douban_score_update_days": self._douban_score_update_days,
}
)
@@ -292,8 +295,12 @@ class ZvideoHelper(_PluginBase):
)
for meta_info in meta_info_list:
douban_id = meta_info["relation"]["douban"]["douban_id"]
title = meta_info["title"]
try:
douban_id = meta_info["relation"]["douban"]["douban_id"]
title = meta_info["title"]
except Exception as e:
logger.error(f"meta_info: {meta_info},解析失败: {e}")
continue
if self._cached_data.get(title) != None:
logger.info(f"已处理过: {title},跳过...")
continue
@@ -374,8 +381,12 @@ class ZvideoHelper(_PluginBase):
)
for meta_info in meta_info_list:
douban_id = meta_info["relation"]["douban"]["douban_id"]
title = meta_info["title"]
try:
douban_id = meta_info["relation"]["douban"]["douban_id"]
title = meta_info["title"]
except Exception as e:
logger.error(f"meta_info: {meta_info},解析失败: {e}")
continue
if self._cached_data.get(title) == DoubanStatus.DONE.value:
logger.info(f"已处理过: {title},跳过...")
continue
@@ -437,11 +448,11 @@ class ZvideoHelper(_PluginBase):
conn.text_factory = str
cursor = conn.cursor()
cursor.execute("SELECT rowid, extend_type, meta_info FROM zvideo_collection")
cursor.execute("SELECT rowid, extend_type, meta_info, updated_at FROM zvideo_collection")
rows = cursor.fetchall()
message = ""
for row in rows:
rowid, extend_type, meta_info_json = row
rowid, extend_type, meta_info_json, updated_at = row
# 合集,不处理
if extend_type == 7:
continue
@@ -449,28 +460,106 @@ class ZvideoHelper(_PluginBase):
# 如果meta_info为空跳过
if meta_info_dict.get("douban_score") == None:
continue
if meta_info_dict["douban_score"] == 0:
title = meta_info_dict["title"]
title = meta_info_dict["title"]
current_time = datetime.now()
need_update = False
# 检查是否需要更新评分
try:
# 确保douban_score是数值类型
douban_score = float(meta_info_dict.get("douban_score", 0))
except (TypeError, ValueError):
douban_score = 0
if douban_score == 0:
need_update = True
logger.info(f"未找到豆瓣评分,需要更新:{title}")
elif updated_at and self._douban_score_update_days > 0:
try:
# 处理update_at的时间格式去掉时区信息
update_at_str = updated_at.split('+')[0]
# 根据格式选择不同的解析方式
if '.' in update_at_str:
# 处理微秒部分确保最多6位数字
parts = update_at_str.split('.')
if len(parts) > 1:
# 截取微秒部分最多6位
microseconds = parts[1][:6]
update_at_str = f"{parts[0]}.{microseconds}"
update_time = datetime.strptime(update_at_str, "%Y-%m-%d %H:%M:%S.%f")
else:
# 没有微秒部分的时间格式
update_time = datetime.strptime(update_at_str, "%Y-%m-%d %H:%M:%S")
time_diff = current_time - update_time
# 检查是否超过更新周期
if time_diff.days >= self._douban_score_update_days:
need_update = True
logger.info(f"豆瓣评分已过期,需要更新:{title},上次更新时间:{update_at_str}")
except Exception as e:
logger.error(f"解析update_at时间失败: {e}, 原始值: {updated_at}")
need_update = True
elif not updated_at and self._douban_score_update_days > 0:
need_update = True
logger.info(f"未找到更新时间,需要更新豆瓣评分:{title}")
if need_update:
# 记录原来的评分
old_score = meta_info_dict.get("douban_score", 0)
# 确保转换为浮点数进行比较
try:
old_score = float(old_score)
except (TypeError, ValueError):
old_score = 0
_, _, score = self.get_douban_info_by_name(title)
if score:
# 确保score也是浮点数
try:
score = float(score)
except (TypeError, ValueError):
score = 0
# 判断评分是否变化
score_changed = old_score > 0 and old_score != score
meta_info_dict["douban_score"] = score
logger.info(f"更新豆瓣评分:{title} {score}")
message += f"{title} 更新豆瓣评分:{score}\n"
# 更新meta_info
updated_meta_info_json = json.dumps(meta_info_dict, ensure_ascii=False)
# 生成带微秒和时区信息的时间字符串,确保与原格式一致
tz = pytz.timezone(settings.TZ)
current_time = datetime.now(tz)
# 格式化为"2024-01-31 23:25:28.609023+08:00"格式
current_time_str = current_time.strftime("%Y-%m-%d %H:%M:%S.%f") + current_time.strftime("%z")[:3] + ":" + current_time.strftime("%z")[3:]
# 更新meta_info和updated_at
cursor.execute(
"UPDATE zvideo_collection SET meta_info = ?, updated_at = ? WHERE rowid = ?",
(updated_meta_info_json, current_time_str, rowid),
)
conn.commit()
# 生成包含评分变化的日志和通知信息
if score_changed:
change_direction = "上升" if score > old_score else "下降"
change_amount = abs(score - old_score)
change_msg = f"更新豆瓣评分:{title} {old_score}{score} ({change_direction}{change_amount:.1f})"
logger.info(change_msg)
message += f"{title} 评分{change_direction}{old_score}{score}\n"
elif old_score == 0 and score > 0:
# 首次获取评分
logger.info(f"首次获取豆瓣评分:{title} {score}")
message += f"{title} 获取豆瓣评分:{score}\n"
else:
# 评分未变化,只记录日志不发送通知
logger.info(f"豆瓣评分未变化:{title} {score}")
else:
logger.error(f"未找到豆瓣评分:{title}")
else:
logger.info(
f"已存在豆瓣评分:{meta_info_dict['title']} {meta_info_dict['douban_score']}"
f"无需更新豆瓣评分:{title} {meta_info_dict['douban_score']}"
)
continue
# 使用ensure_ascii=False来保持中文字符不变
updated_meta_info_json = json.dumps(meta_info_dict, ensure_ascii=False)
cursor.execute(
"UPDATE zvideo_collection SET meta_info = ? WHERE rowid = ?",
(updated_meta_info_json, rowid),
)
conn.commit()
if self._notify and len(message) > 0:
self.post_message(
mtype=NotificationType.SiteMessage,
@@ -491,7 +580,7 @@ class ZvideoHelper(_PluginBase):
cursor.execute(
"""
UPDATE zvideo_collection
SET meta_info = JSON_SET(meta_info, '$.score', CAST(JSON_EXTRACT(meta_info, '$.douban_score') AS JSON))
SET score = CAST(JSON_EXTRACT(meta_info, '$.douban_score') AS DECIMAL(3,1))
WHERE CAST(JSON_EXTRACT(meta_info, '$.douban_score') AS DECIMAL(3,1)) <> 0.0
"""
)
@@ -511,7 +600,8 @@ class ZvideoHelper(_PluginBase):
cursor.execute(
"""
UPDATE zvideo_collection
SET meta_info = JSON_SET(meta_info, '$.score', CAST(score AS JSON))
SET score = CAST(JSON_EXTRACT(meta_info, '$.score') AS DECIMAL(3,1))
WHERE JSON_EXTRACT(meta_info, '$.score') IS NOT NULL
"""
)
conn.commit()
@@ -623,6 +713,20 @@ class ZvideoHelper(_PluginBase):
}
],
},
{
"component": "VCol",
"props": {"cols": 12, "md": 4},
"content": [
{
"component": "VTextField",
"props": {
"model": "douban_score_update_days",
"label": "豆瓣评分更新周期(天)",
"placeholder": "0则不更新",
},
}
],
},
],
},
{
@@ -728,6 +832,27 @@ class ZvideoHelper(_PluginBase):
}
],
},
{
"component": "VRow",
"content": [
{
"component": "VCol",
"props": {
"cols": 12,
},
"content": [
{
"component": "VAlert",
"props": {
"type": "info",
"variant": "tonal",
"text": "豆瓣评分更新周期是指多少天后重新获取豆瓣评分防止评分变化。设为0则不更新已有评分",
},
}
],
}
],
},
],
}
], {
@@ -735,6 +860,7 @@ class ZvideoHelper(_PluginBase):
"notify": False,
"onlyonce": False,
"cron": "0 0 * * *",
"douban_score_update_days": 0,
}
def get_page(self) -> List[dict]: