Merge remote-tracking branch 'upstream/main'

This commit is contained in:
xiaohuozi
2024-12-16 23:41:52 +08:00
88 changed files with 26762 additions and 685 deletions

View File

@@ -506,3 +506,7 @@ def get_dashboard(self, key: str, **kwargs) -> Optional[Tuple[Dict[str, Any], Di
}
```
- 新增加的插件请配置在`package.json`中的末尾,这样可被识别为最新增加,可用于用户排序。
### 10. 如何开发V2版本的插件以及实现插件多版本兼容
- 请参阅 [V2版本插件开发指南](./docs/V2_Plugin_Development.md)

View File

@@ -0,0 +1,573 @@
# MoviePilot V2 插件开发指南(更新版)
本指南详细介绍了如何开发适用于 MoviePilot V2 版本的插件,并实现插件的多版本兼容性,同时包括了服务封装类的使用示例,帮助开发者快速升级插件至 V2 版本。
## 1. 多版本插件开发与兼容性
### 1.1 开发 V2 版本的插件
要开发适用于 MoviePilot V2 版本的插件,请按照以下步骤操作:
1. **目录结构调整**
- 将插件代码放置在 `plugins.v2` 文件夹中。
- 将插件的定义放置在 `package.v2.json` 中,以实现该插件仅在 MoviePilot V2 版本中可见。
2. **插件定义示例**
```json
{
"CustomSites": {
"name": "自定义站点",
"description": "增加自定义站点为签到和统计使用。",
"labels": "站点",
"version": "1.0",
"icon": "world.png",
"author": "lightolly",
"level": 2
}
}
```
### 1.2 实现插件多版本兼容
如果 V1 版本插件在 V2 版本中实际可用,或在插件中主动兼容了 V1 和 V2 版本,则可以在 `package.json` 中定义 `"v2": true` 属性,以便在 MoviePilot V2 版本插件市场中显示。
```json
{
"CustomSites": {
"name": "自定义站点",
"description": "增加自定义站点为签到和统计使用。",
"labels": "站点",
"version": "1.0",
"icon": "world.png",
"author": "lightolly",
"level": 2,
"v2": true
}
}
```
- **目录结构示例**
```
plugins/
├── customsites/
│ ├── __init__.py
│ └── ...
plugins.v2/
├── customsites/
│ ├── __init__.py
│ └── ...
package.json
package.v2.json
```
- **插件代码中实现版本兼容**
在插件代码中,可以根据 `version` 变量执行不同的逻辑,以适应不同的 MoviePilot 版本。
```python
from app.core.config import settings
class MyPlugin:
def init_plugin(self, config: dict = None):
if hasattr(settings, 'VERSION_FLAG'):
version = settings.VERSION_FLAG # V2
else:
version = "v1"
if version == "v2":
self.setup_v2()
else:
self.setup_v1()
def setup_v2(self):
# V2版本特有的初始化逻辑
pass
def setup_v1(self):
# V1版本特有的初始化逻辑
pass
```
## 2. 服务封装与使用示例
为了插件调用并共享实例,主程序针对几种服务进行了封装。以下是相关实现及如何在插件中使用这些封装的详细说明,帮助开发者快速将插件从 V1 升级到 V2。
### 2.1 服务封装类介绍
#### `ServiceInfo`
`ServiceInfo` 是一个数据类,用于封装服务的相关信息。
```python
from dataclasses import dataclass
from typing import Optional, Any
@dataclass
class ServiceInfo:
"""
封装服务相关信息的数据类
"""
# 名称
name: Optional[str] = None
# 实例
instance: Optional[Any] = None
# 模块
module: Optional[Any] = None
# 类型
type: Optional[str] = None
# 配置
config: Optional[Any] = None
```
#### `ServiceConfigHelper`
`ServiceConfigHelper` 是一个配置帮助类,用于获取不同类型的服务配置。
```python
from typing import List, Optional
from app.db.systemconfig_oper import SystemConfigOper
from app.schemas import DownloaderConf, MediaServerConf, NotificationConf, NotificationSwitchConf
class ServiceConfigHelper:
"""
配置帮助类,获取不同类型的服务配置
"""
@staticmethod
def get_configs(config_key: SystemConfigKey, conf_type: Type) -> List:
"""
通用获取配置的方法,根据 config_key 获取相应的配置并返回指定类型的配置列表
:param config_key: 系统配置的 key
:param conf_type: 用于实例化配置对象的类类型
:return: 配置对象列表
"""
config_data = SystemConfigOper().get(config_key)
if not config_data:
return []
# 直接使用 conf_type 来实例化配置对象
return [conf_type(**conf) for conf in config_data]
@staticmethod
def get_downloader_configs() -> List[DownloaderConf]:
"""
获取下载器的配置
"""
return ServiceConfigHelper.get_configs(SystemConfigKey.Downloaders, DownloaderConf)
@staticmethod
def get_mediaserver_configs() -> List[MediaServerConf]:
"""
获取媒体服务器的配置
"""
return ServiceConfigHelper.get_configs(SystemConfigKey.MediaServers, MediaServerConf)
@staticmethod
def get_notification_configs() -> List[NotificationConf]:
"""
获取消息通知渠道的配置
"""
return ServiceConfigHelper.get_configs(SystemConfigKey.Notifications, NotificationConf)
@staticmethod
def get_notification_switches() -> List[NotificationSwitchConf]:
"""
获取消息通知场景的开关
"""
return ServiceConfigHelper.get_configs(SystemConfigKey.NotificationSwitchs, NotificationSwitchConf)
@staticmethod
def get_notification_switch(mtype: NotificationType) -> Optional[str]:
"""
获取指定类型的消息通知场景的开关
"""
switchs = ServiceConfigHelper.get_notification_switches()
for switch in switchs:
if switch.type == mtype.value:
return switch.action
return None
```
#### `ServiceBaseHelper`
`ServiceBaseHelper` 是一个通用的服务帮助类,提供了获取配置和服务实例的通用逻辑。
```python
from typing import Dict, List, Optional, Type, TypeVar, Generic, Iterator
from app.core.module import ModuleManager
from app.schemas import ServiceInfo
from app.schemas.types import SystemConfigKey, ModuleType
TConf = TypeVar("TConf")
class ServiceBaseHelper(Generic[TConf]):
"""
通用服务帮助类,抽象获取配置和服务实例的通用逻辑
"""
def __init__(self, config_key: SystemConfigKey, conf_type: Type[TConf], module_type: ModuleType):
self.modulemanager = ModuleManager()
self.config_key = config_key
self.conf_type = conf_type
self.module_type = module_type
def get_configs(self, include_disabled: bool = False) -> Dict[str, TConf]:
"""
获取配置列表
:param include_disabled: 是否包含禁用的配置,默认 False仅返回启用的配置
:return: 配置字典
"""
configs: List[TConf] = ServiceConfigHelper.get_configs(self.config_key, self.conf_type)
return {
config.name: config
for config in configs
if (config.name and config.type and config.enabled) or include_disabled
} if configs else {}
def get_config(self, name: str) -> Optional[TConf]:
"""
获取指定名称配置
"""
if not name:
return None
configs = self.get_configs()
return configs.get(name)
def iterate_module_instances(self) -> Iterator[ServiceInfo]:
"""
迭代所有模块的实例及其对应的配置,返回 ServiceInfo 实例
"""
configs = self.get_configs()
modules = self.modulemanager.get_running_type_modules(self.module_type)
for module in modules:
if not module:
continue
module_instances = module.get_instances()
if not isinstance(module_instances, dict):
continue
for name, instance in module_instances.items():
if not instance:
continue
config = configs.get(name)
service_info = ServiceInfo(
name=name,
instance=instance,
module=module,
type=config.type if config else None,
config=config
)
yield service_info
def get_services(self, type_filter: Optional[str] = None, name_filters: Optional[List[str]] = None) \
-> Dict[str, ServiceInfo]:
"""
获取服务信息列表,并根据类型和名称列表进行过滤
:param type_filter: 需要过滤的服务类型
:param name_filters: 需要过滤的服务名称列表
:return: 过滤后的服务信息字典
"""
name_filters_set = set(name_filters) if name_filters else None
return {
service_info.name: service_info
for service_info in self.iterate_module_instances()
if service_info.config and (
type_filter is None or service_info.type == type_filter
) and (
name_filters_set is None or service_info.name in name_filters_set)
}
def get_service(self, name: str, type_filter: Optional[str] = None) -> Optional[ServiceInfo]:
"""
获取指定名称的服务信息,并根据类型过滤
:param name: 服务名称
:param type_filter: 需要过滤的服务类型
:return: 对应的服务信息,若不存在或类型不匹配则返回 None
"""
if not name:
return None
for service_info in self.iterate_module_instances():
if service_info.name == name:
if service_info.config and (type_filter is None or service_info.type == type_filter):
return service_info
return None
```
### 2.2 特定服务的帮助类
以下是针对不同服务类型的帮助类,这些类继承自 `ServiceBaseHelper`,并预设了特定的配置。同时,为了简化类型检查,新增了相应的方法来判断服务类型。
#### `DownloaderHelper`
用于管理下载器服务。
```python
from typing import Optional
from app.helper.service import ServiceBaseHelper
from app.schemas import DownloaderConf, ServiceInfo
from app.schemas.types import SystemConfigKey, ModuleType
class DownloaderHelper(ServiceBaseHelper[DownloaderConf]):
"""
下载器帮助类
"""
def __init__(self):
super().__init__(
config_key=SystemConfigKey.Downloaders,
conf_type=DownloaderConf,
module_type=ModuleType.Downloader
)
def is_downloader(
self,
service_type: Optional[str] = None,
service: Optional[ServiceInfo] = None,
name: Optional[str] = None,
) -> bool:
"""
通用的下载器类型判断方法
:param service_type: 下载器的类型名称(如 'qbittorrent', 'transmission'
:param service: 要判断的服务信息
:param name: 服务的名称
:return: 如果服务类型或实例为指定类型,返回 True否则返回 False
"""
# 如果未提供 service 则通过 name 获取服务
service = service or self.get_service(name=name)
# 判断服务类型是否为指定类型
return bool(service and service.type == service_type)
```
#### `MediaServerHelper`
用于管理媒体服务器服务。
```python
from typing import Optional
from app.helper.service import ServiceBaseHelper
from app.schemas import MediaServerConf, ServiceInfo
from app.schemas.types import SystemConfigKey, ModuleType
class MediaServerHelper(ServiceBaseHelper[MediaServerConf]):
"""
媒体服务器帮助类
"""
def __init__(self):
super().__init__(
config_key=SystemConfigKey.MediaServers,
conf_type=MediaServerConf,
module_type=ModuleType.MediaServer
)
def is_media_server(
self,
service_type: Optional[str] = None,
service: Optional[ServiceInfo] = None,
name: Optional[str] = None,
) -> bool:
"""
通用的媒体服务器类型判断方法
:param service_type: 媒体服务器的类型名称(如 'plex', 'emby', 'jellyfin'
:param service: 要判断的服务信息
:param name: 服务的名称
:return: 如果服务类型或实例为指定类型,返回 True否则返回 False
"""
# 如果未提供 service 则通过 name 获取服务
service = service or self.get_service(name=name)
# 判断服务类型是否为指定类型
return bool(service and service.type == service_type)
```
#### `NotificationHelper`
用于管理消息通知服务。
```python
from typing import Optional
from app.helper.service import ServiceBaseHelper
from app.schemas import NotificationConf, ServiceInfo
from app.schemas.types import SystemConfigKey, ModuleType
class NotificationHelper(ServiceBaseHelper[NotificationConf]):
"""
消息通知帮助类
"""
def __init__(self):
super().__init__(
config_key=SystemConfigKey.Notifications,
conf_type=NotificationConf,
module_type=ModuleType.Notification
)
def is_notification(
self,
service_type: Optional[str] = None,
service: Optional[ServiceInfo] = None,
name: Optional[str] = None,
) -> bool:
"""
通用的消息通知服务类型判断方法
:param service_type: 消息通知服务的类型名称(如 'wechat', 'voicechat', 'telegram', 等)
:param service: 要判断的服务信息
:param name: 服务的名称
:return: 如果服务类型或实例为指定类型,返回 True否则返回 False
"""
# 如果未提供 service 则通过 name 获取服务
service = service or self.get_service(name=name)
# 判断服务类型是否为指定类型
return bool(service and service.type == service_type)
```
### 2.3 在插件中使用服务帮助类
通过这些帮助类,插件可以方便地获取和管理各种服务。以下是 `DownloaderHelper` 的使用示例,包括类型检查服务和监听模块重载事件的两种方法。
#### 获取下载器选项
插件可以通过 `DownloaderHelper` 获取所有可用的下载器配置,并生成选项列表供用户选择。
```python
from app.helper.downloader import DownloaderHelper
class MyPlugin:
def init_plugin(self, config: dict = None):
self.downloaderhelper = DownloaderHelper()
self.downloader_options = [
{"title": config.name, "value": config.name}
for config in self.downloaderhelper.get_configs().values()
]
```
#### 获取特定下载器服务
根据用户选择的下载器名称,插件可以获取对应的服务实例,并执行相应的操作。以下展示了两种方法:
1. **使用事件监听进行模块重载,从而保持服务实例共享**
如果外部模块进行了重载,需要监听模块重载事件以重置下载器服务。
```python
from typing import Optional, Union
from app.helper.downloader import DownloaderHelper
from app.modules.qbittorrent import Qbittorrent
from app.modules.transmission import Transmission
from app.events import EventType, eventmanager
class MyPlugin:
def init_plugin(self, config: dict = None):
self.downloaderhelper = DownloaderHelper()
self._downloader = None
self.__setup_downloader(config.get("downloader_name"))
def __setup_downloader(self, downloader_name: str):
self._downloader = self.downloaderhelper.get_service(name=downloader_name)
def __get_downloader(self) -> Optional[Union[Transmission, Qbittorrent]]:
"""
获取下载器实例
"""
if not self._downloader:
return None
return self._downloader.instance
@eventmanager.register(EventType.ModuleReload)
def module_reload(self, event: Event):
"""
模块重载事件
"""
if not event:
return
event_data = event.event_data or {}
module_id = event_data.get("module_id")
# 如果模块标识不存在,则说明所有模块均发生重载
if not module_id:
self.__setup_downloader()
def check_downloader_type(self) -> bool:
"""
检查下载器类型是否为 qbittorrent 或 transmission
"""
downloader = self.__get_downloader()
if self.downloaderhelper.is_downloader(service_type="qbittorrent", service=downloader):
# 处理 qbittorrent 类型
return True
elif self.downloaderhelper.is_downloader(service_type="transmission", service=downloader):
# 处理 transmission 类型
return True
return False
```
2. **使用 Property 实现服务实例共享**
通过 `Property` 方法,从而保持服务实例共享,而无需通过事件监听。
```python
from typing import Optional, Union
from app.helper.downloader import DownloaderHelper
from app.modules.qbittorrent import Qbittorrent
from app.modules.transmission import Transmission
class MyPlugin:
def init_plugin(self, config: dict = None):
self.downloaderhelper = DownloaderHelper()
self.downloader_name = config.get("downloader_name")
@property
def service_info(self) -> Optional[ServiceInfo]:
"""
服务信息
"""
service = self.downloaderhelper.get_service(name=self.downloader_name)
if not service:
return None
if service.instance.is_inactive():
return None
return service
@property
def downloader(self) -> Optional[Union[Qbittorrent, Transmission]]:
"""
下载器实例
"""
return self.service_info.instance if self.service_info else None
def check_downloader_type(self) -> bool:
"""
检查下载器类型是否为 qbittorrent 或 transmission
"""
if self.downloaderhelper.is_downloader(service_type="qbittorrent", service=self.service_info):
# 处理 qbittorrent 类型
return True
elif self.downloaderhelper.is_downloader(service_type="transmission", service=self.service_info):
# 处理 transmission 类型
return True
return False
```
### 2.4 服务封装的优势
- **统一管理**:通过 `ServiceBaseHelper`,不同类型的服务配置和实例管理变得统一和简洁。
- **灵活扩展**:新增服务类型时,只需创建相应的帮助类,无需修改现有逻辑。
- **便捷调用**:插件可以轻松获取所需的服务实例,简化了服务的调用过程。
### 2.5 从 V1 升级到 V2 的注意事项
- **使用帮助类**:确保插件中使用了新的服务帮助类,如 `DownloaderHelper`、`MediaServerHelper`、`NotificationHelper` 等,而不是直接操作服务实例。
- **更新依赖**:检查并更新 `requirements.txt` 中的依赖,确保与 V2 的服务封装兼容。
- **测试插件**:在 V2 环境中全面测试插件,确保所有服务调用正常工作。

BIN
icons/Dingding_A.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

BIN
icons/bangumi_b.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.0 KiB

View File

@@ -3,11 +3,13 @@
"name": "站点自动签到",
"description": "自动模拟登录、签到站点。",
"labels": "站点",
"version": "2.4",
"version": "2.4.2",
"icon": "signin.png",
"author": "thsrite",
"level": 2,
"history": {
"v2.4.2": "修复PT时间签到失败问题",
"v2.4.1": "修复海胆签到失败问题",
"v2.4": "适配m-team Api地址变化",
"v2.3.2": "修复YemaPT登录失败支持YemaPT自动签到",
"v2.3.1": "修复签到报错问题",
@@ -25,17 +27,22 @@
"version": "1.0",
"icon": "world.png",
"author": "lightolly",
"level": 2
"level": 2,
"v2": true
},
"SiteStatistic": {
"name": "站点数据统计",
"description": "自动统计和展示站点数据。",
"labels": "站点,仪表板",
"version": "3.9.1",
"version": "4.0.1",
"icon": "statistic.png",
"author": "lightolly",
"level": 2,
"history": {
"v4.0.1": "修复PTT的魔力值统计",
"v4.0": "修复插件数据页异常",
"v3.9.3": "修复PTT的用户等级统计",
"v3.9.2": "修复YemaPT的上传下载统计错误",
"v3.9.1": "修复mteam域名地址",
"v3.9": "修复YemaPT站点数据统计",
"v3.8": "适配m-team Api地址变化",
@@ -60,17 +67,21 @@
"version": "1.2",
"icon": "Chrome_A.png",
"author": "thsrite",
"level": 2
"level": 2,
"v2": true
},
"DoubanSync": {
"name": "豆瓣想看",
"description": "同步豆瓣想看数据,自动添加订阅。",
"labels": "订阅",
"version": "1.8",
"version": "1.9.1",
"icon": "douban.png",
"author": "jxxghp",
"level": 2,
"v2": true,
"history": {
"v1.9.1": "修复版本兼容问题",
"v1.9": "请求豆瓣RSS时增加请求头",
"v1.8": "不同步在看条目",
"v1.7": "增强API安全性",
"v1.6": "同步历史记录支持手动删除需要主程序升级至v1.8.4+版本",
@@ -111,6 +122,7 @@
"icon": "movie.jpg",
"author": "jxxghp",
"level": 2,
"v2": true,
"history": {
"v1.9.1": "优化媒体类型的判断处理",
"v1.9": "增强API安全性",
@@ -143,11 +155,12 @@
"name": "媒体文件同步删除",
"description": "同步删除历史记录、源文件和下载任务。",
"labels": "文件整理",
"version": "1.7",
"version": "1.7.1",
"icon": "mediasyncdel.png",
"author": "thsrite",
"level": 1,
"history": {
"v1.7.1": "修复删除剧集辅种失败报错问题",
"v1.7": "修复重新整理被一并删除问题",
"v1.6": "修复删除辅种",
"v1.5": "支持手动删除订阅历史记录(本次更新之后的历史)"
@@ -157,11 +170,13 @@
"name": "自定义Hosts",
"description": "修改系统hosts文件加速网络访问。",
"labels": "网络",
"version": "1.1",
"version": "1.2",
"icon": "hosts.png",
"author": "thsrite",
"level": 1,
"v2": true,
"history": {
"v1.2": "支持写入注释",
"v1.1": "关闭插件时自动恢复系统hosts"
}
},
@@ -169,10 +184,15 @@
"name": "播放限速",
"description": "外网播放媒体库视频时,自动对下载器进行限速。",
"labels": "网络",
"version": "1.1",
"version": "1.3",
"icon": "Librespeed_A.png",
"author": "Shurelol",
"level": 1
"level": 1,
"history": {
"v1.3": "修复bug增加预留带宽设置",
"v1.2.1": "修复多下载器时限速比例计算错误问题",
"v1.2": "增加不限速路径配置,以应对网盘直链播放的情况"
}
},
"CloudflareSpeedTest": {
"name": "Cloudflare IP优选",
@@ -182,6 +202,7 @@
"icon": "cloudflare.jpg",
"author": "thsrite",
"level": 1,
"v2": true,
"history": {
"v1.4": "修复立即运行一次",
"v1.3": "调整插件开启状态判断条件",
@@ -205,11 +226,12 @@
"name": "媒体库服务器通知",
"description": "发送Emby/Jellyfin/Plex服务器的播放、入库等通知消息。",
"labels": "消息通知,媒体库",
"version": "1.2",
"version": "1.3",
"icon": "mediaplay.png",
"author": "jxxghp",
"level": 1,
"history": {
"v1.3": "兼容处理Emby部分客户端暂停重复推送停止播放webhook的场景",
"v1.2": "播放通知增加超链接跳转需要v1.9.4+"
}
},
@@ -225,10 +247,15 @@
"WebHook": {
"name": "Webhook",
"description": "事件发生时向第三方地址发送请求。",
"version": "1.0",
"version": "1.1",
"icon": "webhook.png",
"author": "jxxghp",
"level": 1
"level": 1,
"v2": true,
"history": {
"v1.1": "兼容MoviePilot V2 版本",
"v1.0": "新增Webhook插件支持事件发生时向第三方地址发送请求"
}
},
"ChatGPT": {
"name": "ChatGPT",
@@ -265,7 +292,7 @@
"author": "thsrite",
"level": 1,
"history": {
"v1.3":"去除已废弃的环境变量引用",
"v1.3": "去除已废弃的环境变量引用",
"v1.2": "增强API安全性"
}
},
@@ -273,11 +300,14 @@
"name": "IYUU自动辅种",
"description": "基于IYUU官方Api实现自动辅种。",
"labels": "做种,IYUU",
"version": "1.9.3",
"version": "1.9.6",
"icon": "IYUU.png",
"author": "jxxghp",
"level": 2,
"history": {
"v1.9.6": "调整IYUU最新域名",
"v1.9.5": "Revert qBittorrent跳检之后自动开始",
"v1.9.4": "修复qBittorrent辅种后不会自动开始做种",
"v1.9.3": "修复Monika因缺少rsskey种子下载失败的问题",
"v1.9.2": "适配馒头使用API下载种子",
"v1.9.1": "支持自定义辅种的种子分类",
@@ -305,13 +335,16 @@
},
"VCBAnimeMonitor": {
"name": "整理VCB动漫压制组作品",
"description": "提高部分VCB-Studio作品的识别准确率,将VCB-Studio的作品统一转移到指定目录同时进行刮削整理",
"description": "一款辅助整理&提高识别VCB-Stuido动漫压制组作品的插件",
"labels": "文件整理,识别",
"version": "1.8",
"version": "1.8.2.1",
"icon": "vcbmonitor.png",
"author": "pixel@qingwa",
"level": 2,
"history": {
"v1.8.2.1": "修复日志输出&同步目录监控插件功能",
"v1.8.2": "提高识别率",
"v1.8.1": "重构插件,测试版",
"v1.8": "增加了元数据刮削开关,升级后需要手动打开,否则默认不刮削",
"v1.7.1": "修复偶尔安装失败问题"
}
@@ -320,11 +353,13 @@
"name": "自动转移做种",
"description": "定期转移下载器中的做种任务到另一个下载器。",
"labels": "做种",
"version": "1.4",
"version": "1.6",
"icon": "seed.png",
"author": "jxxghp",
"level": 2,
"history": {
"v1.6": "支持根据种子类别进行转移,并允许修改转移后的默认标签",
"v1.5": "修复在转移时只保留了第一个tracker导致红种问题。此修复确保保留所有的tracker以提高在不同网络条件下的可达性",
"v1.4": "支持自动删除源下载器在目的下载器中存在的种子"
}
},
@@ -346,20 +381,28 @@
"name": "下载器文件同步",
"description": "同步下载器的文件信息到数据库,删除文件时联动删除下载任务。",
"labels": "下载管理",
"version": "1.1",
"version": "1.1.1",
"icon": "Youtube-dl_A.png",
"author": "thsrite",
"level": 1
"level": 1,
"history": {
"v1.1.1": "修复时区问题导致的上次同步后8h内的种子不同步的问题"
}
},
"BrushFlow": {
"name": "站点刷流",
"description": "自动托管刷流,将会提高对应站点的访问频率。",
"labels": "刷流,仪表板",
"version": "3.3",
"version": "3.8",
"icon": "brush.jpg",
"author": "jxxghp,InfinityPacer",
"level": 2,
"history": {
"v3.8": "添加自动归档记录天数配置项,支持定时归档已删除数据",
"v3.7": "下载数量调整为仅获取刷流标签种子并修复了一些细节问题",
"v3.6": "优化检查服务中的时间管控",
"v3.5": "移除「删种排除MoviePilot任务」配置项请使用「删除排除标签」替代完善刷流任务触发插件事件相关逻辑联动H&R助手",
"v3.4": "移除「记录更多日志」配置项并调整为DEBUG日志支持「删除排除标签」配置项增加刷流任务时支持触发插件事件联动H&R助手",
"v3.3": "支持QB删除种子时强制汇报Tracker站点独立配置增加「站点全局H&R」配置项",
"v3.2": "支持推送QB种子时启用「先下载首尾文件块」选项",
"v3.1": "支持仪表板显示站点刷流数据需要主程序升级v1.8.7+版本",
@@ -378,7 +421,8 @@
"version": "1.1",
"icon": "downloadmsg.png",
"author": "thsrite",
"level": 2
"level": 2,
"v2": true
},
"AutoClean": {
"name": "定时清理媒体库",
@@ -397,6 +441,7 @@
"icon": "invites.png",
"author": "thsrite",
"level": 2,
"v2": true,
"history": {
"v1.4": "自定义保留消息天数"
}
@@ -443,16 +488,8 @@
"version": "1.1",
"icon": "Bark_A.png",
"author": "jxxghp",
"level": 1
},
"IyuuMsg": {
"name": "IYUU消息推送",
"description": "支持使用IYUU发送消息通知。",
"labels": "消息通知,IYUU",
"version": "1.2",
"icon": "Iyuu_A.png",
"author": "jxxghp",
"level": 1
"level": 1,
"v2": true
},
"PushDeerMsg": {
"name": "PushDeer消息推送",
@@ -461,7 +498,8 @@
"version": "1.1",
"icon": "pushdeer.png",
"author": "jxxghp",
"level": 1
"level": 1,
"v2": true
},
"ConfigCenter": {
"name": "配置中心",
@@ -483,16 +521,26 @@
"version": "1.0",
"icon": "Wecom_A.png",
"author": "叮叮当",
"level": 1
"level": 1,
"v2": true
},
"EpisodeGroupMeta": {
"name": "TMDB剧集组刮削",
"description": "从TMDB剧集组刮削季集的实际顺序。",
"labels": "刮削",
"version": "1.1",
"version": "2.6",
"icon": "Element_A.png",
"author": "叮叮当",
"level": 1
"level": 1,
"v2": true,
"history": {
"v2.6": "修复无法获取媒体库中季0的问题",
"v2.5": "修复当媒体服务器中剧集的季不完整时会中断的问题",
"v2.3": "修复v2版本无法读取媒体库的问题",
"v2.2": "修复v2版本无法读取数据的问题",
"v2.1": "增加发送通知提醒选择剧集组",
"v2.0": "增加手动选择剧集组的功能"
}
},
"CustomIndexer": {
"name": "自定义索引站点",
@@ -501,7 +549,8 @@
"version": "1.0",
"icon": "spider.png",
"author": "jxxghp",
"level": 1
"level": 1,
"v2": true
},
"FFmpegThumb": {
"name": "FFmpeg缩略图",
@@ -519,7 +568,8 @@
"version": "1.0",
"icon": "Pushplus_A.png",
"author": "cheng",
"level": 1
"level": 1,
"v2": true
},
"DownloadSiteTag": {
"name": "下载任务分类与标签",
@@ -541,6 +591,7 @@
"icon": "Ombi_A.png",
"author": "DzAvril",
"level": 1,
"v2": true,
"history": {
"v2.2": "修复直接删除文件夹导致的插件崩溃的bug",
"v2.1": "联动删除历史记录",
@@ -559,6 +610,7 @@
"icon": "Linkace_C.png",
"author": "jxxghp",
"level": 1,
"v2": true,
"history": {
"v1.6": "增强API安全性"
}
@@ -570,7 +622,8 @@
"version": "1.2",
"icon": "Bookstack_A.png",
"author": "jxxghp",
"level": 1
"level": 1,
"v2": true
},
"RemoteIdentifiers": {
"name": "共享识别词",
@@ -579,7 +632,8 @@
"version": "2.2",
"icon": "words.png",
"author": "honue",
"level": 1
"level": 1,
"v2": true
},
"NeoDBSync": {
"name": "NeoDB 想看",
@@ -589,6 +643,7 @@
"icon": "NeoDB.jpeg",
"author": "hcplantern",
"level": 1,
"v2": true,
"history": {
"v1.1": "直接添加订阅,不提前进行搜索下载"
}
@@ -643,7 +698,8 @@
"version": "1.1",
"icon": "ipAddress.png",
"author": "DzAvril",
"level": 1
"level": 1,
"v2": true
},
"TrackerEditor": {
"name": "Tracker替换",
@@ -652,7 +708,8 @@
"version": "1.5",
"icon": "trackereditor_A.png",
"author": "honue",
"level": 1
"level": 1,
"v2": true
},
"ContractCheck": {
"name": "契约检查",
@@ -666,7 +723,8 @@
"v1.4": "支持仪表板组件显示",
"v1.3": "修复观众做种数据异常问题",
"v1.2": "修复契约检查无数据返回的问题"
}
},
"v2": true
},
"FeiShuMsg": {
"name": "飞书机器人消息通知",
@@ -675,7 +733,8 @@
"version": "1.0",
"icon": "FeiShu_A.png",
"author": "InfinityPacer",
"level": 2
"level": 2,
"v2": true
},
"IyuuAuth": {
"name": "IYUU站点绑定",
@@ -685,6 +744,7 @@
"icon": "Iyuu_A.png",
"author": "jxxghp",
"level": 1,
"v2": true,
"history": {
"v1.1": "修复IYUU站点绑定失败问题"
}
@@ -696,17 +756,20 @@
"version": "1.0",
"icon": "Ntfy_A.png",
"author": "lethargicScribe",
"level": 1
"level": 1,
"v2": true
},
"TmdbWallpaper": {
"name": "登录壁纸本地化",
"description": "将MoviePilot的登录壁纸下载到本地。",
"labels": "工具",
"version": "1.1",
"version": "1.2",
"icon": "Macos_Sierra.png",
"author": "jxxghp",
"level": 1,
"v2": true,
"history": {
"v1.2": "一次性下载多张壁纸",
"v1.1": "修复下载Bing每日壁纸时文件名错乱的问题"
}
},
@@ -714,10 +777,14 @@
"name": "MoviePilot服务器监控",
"description": "在仪表板中实时显示MoviePilot公共服务器状态。",
"labels": "仪表板",
"version": "1.0",
"version": "1.1",
"icon": "Duplicati_A.png",
"author": "jxxghp",
"level": 1
"level": 1,
"v2": true,
"history": {
"v1.1": "增加详情界面显示"
}
},
"CleanInvalidSeed": {
"name": "清理QB无效做种",
@@ -751,6 +818,7 @@
"icon": "TrendingShow.jpg",
"author": "jxxghp",
"level": 1,
"v2": true,
"history": {
"v1.3": "调整组件大小",
"v1.2": "不同屏幕大小,支持分开设置"
@@ -763,17 +831,20 @@
"version": "1.1",
"icon": "Calibre_B.png",
"author": "jxxghp",
"level": 1
"level": 1,
"v2": true
},
"ZvideoHelper": {
"name": "极影视助手",
"description": "极影视功能扩展",
"labels": "媒体库",
"version": "1.3",
"version": "1.4",
"icon": "zvideo.png",
"author": "DzAvril",
"level": 1,
"v2": true,
"history": {
"v1.4": "修复请求失败后返回值数量不正确的问题",
"v1.3": "降低对豆瓣接口的请求频率",
"v1.2": "修复无法获取豆瓣评分的问题",
"v1.1": "支持将极影视评分修改为豆瓣评分",
@@ -788,5 +859,77 @@
"icon": "Mosquitto_A.png",
"author": "blacklips",
"level": 1
},
"DingdingMsg": {
"name": "钉钉机器人",
"description": "支持使用钉钉机器人发送消息通知。",
"labels": "消息通知,钉钉机器人",
"version": "1.12",
"icon": "Dingding_A.png",
"author": "nnlegenda",
"level": 1,
"v2": true
},
"DynamicWeChat": {
"name": "动态企微可信IP",
"description": "修改企微应用可信IP支持Srever酱等第三方通知。验证码以结尾发送到企业微信应用",
"labels": "消息通知",
"version": "1.6.0",
"icon": "Wecom_A.png",
"author": "RamenRa",
"level": 2,
"v2": true,
"history": {
"v1.6.0": "忽略因网络波动导致获取ip错误。自定义的类合并为helper.py。后续核心功能没问题将不再更新",
"v1.5.2": "可以从指定url获取ip,修复不使用cc时cookie失效过快v1可配置第三方为备用通知server酱可以将文本发送到server3,二维码给服务号",
"v1.5.1": "修复v2微信通知可以指定微信通知ID",
"v1.5.0": "支持企微应用通知和第Serve酱等第三方推送。按要求修改插件名称",
"v1.4.1": "完善面板说明",
"v1.4.0": "修复强制更改IP时配置面板延时过长的问题。庆祝v2进入正式版显示了一个没用的参数"
}
},
"SyncCookieCloud": {
"name": "同步CookieCloud",
"description": "同步MoviePilot站点Cookie到本地CookieCloud。",
"labels": "站点",
"version": "1.4",
"icon": "Cookiecloud_A.png",
"author": "thsrite",
"level": 1,
"history": {
"v1.4": "调整逻辑,修复问题",
"v1.3": "感谢MidnightShake共享代码同步时保留MoviePilot不匹配站点的cookie",
"v1.2": "同步到本地CookieCloud",
"v1.1": "修复CookieCloud覆盖到浏览器",
"v1.0": "同步MoviePilot站点Cookie到CookieCloud"
}
},
"BangumiColl": {
"name": "Bangumi收藏订阅",
"description": "Bangumi用户收藏添加到订阅",
"labels": "订阅",
"version": "1.5.2",
"icon": "bangumi_b.png",
"author": "Attente",
"level": 1,
"v2": true,
"history": {
"v1.5.2": "修复定时任务未正确注册的问题",
"v1.5.1": "修复季度信息未传递的问题. 新增站点列表同步删除",
"v1.5": "修复总集数会同步TMDB变动的问题,增加开关选项"
}
},
"IyuuMsg": {
"name": "IYUU消息推送",
"description": "支持使用IYUU发送消息通知。",
"labels": "消息通知,IYUU",
"version": "1.3",
"icon": "Iyuu_A.png",
"author": "jxxghp",
"level": 1,
"v2": true,
"history": {
"v1.3": "消息限流发送以缓解IYUU服务器压力"
}
}
}

298
package.v2.json Normal file
View File

@@ -0,0 +1,298 @@
{
"SiteStatistic": {
"name": "站点数据统计",
"description": "站点统计数据图表。",
"labels": "站点,仪表板",
"version": "1.4.1",
"icon": "statistic.png",
"author": "lightolly,jxxghp",
"level": 2,
"history": {
"v1.4.1": "支持数据刷新时发送消息通知",
"v1.3": "远程刷新命令移植到主程序",
"v1.2": "继续修复增量数据统计问题",
"v1.1": "修复增量数据统计问题",
"v1.0": "MoviePilot V2 版本站点数据统计插件"
}
},
"BrushFlow": {
"name": "站点刷流",
"description": "自动托管刷流,将会提高对应站点的访问频率。",
"labels": "刷流,仪表板",
"version": "4.0.1",
"icon": "brush.jpg",
"author": "jxxghp,InfinityPacer",
"level": 2,
"history": {
"v4.0.1": "NexusPHP 站点支持自动跳过下载提示页调整为站点独立配置项",
"v4.0": "NexusPHP 站点支持自动跳过下载提示页",
"v3.9": "MoviePilot V2 版本站点刷流插件"
}
},
"AutoSignIn": {
"name": "站点自动签到",
"description": "自动模拟登录、签到站点。",
"labels": "站点",
"version": "2.5",
"icon": "signin.png",
"author": "thsrite",
"level": 2,
"history": {
"v2.5": "MoviePilot V2 版本站点自动签到插件"
}
},
"DownloadSiteTag": {
"name": "下载任务分类与标签",
"description": "自动给下载任务分类与打站点标签、剧集名称标签",
"labels": "下载管理",
"version": "2.2",
"icon": "Youtube-dl_B.png",
"author": "叮叮当",
"level": 1,
"history": {
"v2.2": "MoviePilot V2 版本下载任务分类与标签插件"
}
},
"MediaServerRefresh": {
"name": "媒体库服务器刷新",
"description": "入库后自动刷新Emby/Jellyfin/Plex服务器海报墙。",
"labels": "媒体库",
"version": "1.3.1",
"icon": "refresh2.png",
"author": "jxxghp",
"level": 1,
"history": {
"v1.3": "MoviePilot V2 版本媒体库服务器刷新插件",
"v1.3.1": "修复兼容性问题"
}
},
"MediaServerMsg": {
"name": "媒体库服务器通知",
"description": "发送Emby/Jellyfin/Plex服务器的播放、入库等通知消息。",
"labels": "消息通知,媒体库",
"version": "1.5",
"icon": "mediaplay.png",
"author": "jxxghp",
"level": 1,
"history": {
"v1.5": "支持独立控制媒体服务器通知",
"v1.4": "MoviePilot V2 版本媒体库服务器通知插件"
}
},
"ChatGPT": {
"name": "ChatGPT",
"description": "消息交互支持与ChatGPT对话。",
"labels": "消息通知,识别",
"version": "2.0.1",
"icon": "Chatgpt_A.png",
"author": "jxxghp",
"level": 1,
"history": {
"v2.0.1": "修复辅助识别",
"v2.0": "适配MoviePilot V2 版本,采用链式事件机制"
}
},
"TorrentTransfer": {
"name": "自动转移做种",
"description": "定期转移下载器中的做种任务到另一个下载器。",
"labels": "做种",
"version": "1.7.1",
"icon": "seed.png",
"author": "jxxghp",
"level": 2,
"history": {
"v1.7": "MoviePilot V2 版本自动转移做种插件",
"v1.7.1": "修复兼容性问题"
}
},
"RssSubscribe": {
"name": "自定义订阅",
"description": "定时刷新RSS报文识别内容后添加订阅或直接下载。",
"labels": "订阅",
"version": "2.0",
"icon": "rss.png",
"author": "jxxghp",
"level": 2,
"history": {
"v2.0": "兼容MoviePilot V2 版本"
}
},
"FFmpegThumb": {
"name": "FFmpeg缩略图",
"description": "TheMovieDb没有背景图片时使用FFmpeg截取视频文件缩略图",
"labels": "刮削",
"version": "2.0",
"icon": "ffmpeg.png",
"author": "jxxghp",
"level": 1,
"history": {
"v2.0": "兼容MoviePilot V2 版本"
}
},
"LibraryScraper": {
"name": "媒体库刮削",
"description": "定时对媒体库进行刮削,补齐缺失元数据和图片。",
"labels": "刮削",
"version": "2.0",
"icon": "scraper.png",
"author": "jxxghp",
"level": 1,
"history": {
"v2.0": "兼容MoviePilot V2 版本",
"v1.5": "修复未获取fanart图片的问题",
"v1.4.1": "修复nfo文件读取失败时任务中断问题"
}
},
"PersonMeta": {
"name": "演职人员刮削",
"description": "刮削演职人员图片以及中文名称。",
"labels": "媒体库,刮削",
"version": "2.0.1",
"icon": "actor.png",
"author": "jxxghp",
"level": 1,
"history": {
"v2.0": "兼容MoviePilot V2 版本",
"v1.4": "人物图片调整为优先从TMDB获取避免douban图片CDN加载过慢的问题",
"v1.3": "修复v1.8.5版本后刮削报错问题"
}
},
"SpeedLimiter": {
"name": "播放限速",
"description": "外网播放媒体库视频时,自动对下载器进行限速。",
"labels": "网络",
"version": "2.1",
"icon": "Librespeed_A.png",
"author": "Shurelol",
"level": 1,
"history": {
"v2.1": "修复表单参数",
"v2.0": "兼容MoviePilot V2 版本",
"v1.2": "增加不限速路径配置,以应对网盘直链播放的情况"
}
},
"AutoClean": {
"name": "定时清理媒体库",
"description": "定时清理用户下载的种子、源文件、媒体库文件。",
"labels": "媒体库",
"version": "2.0",
"icon": "clean.png",
"author": "thsrite",
"level": 2,
"history": {
"v2.0": "兼容MoviePilot V2 版本"
}
},
"TorrentRemover": {
"name": "自动删种",
"description": "自动删除下载器中的下载任务。",
"labels": "做种",
"version": "2.1.1",
"icon": "delete.jpg",
"author": "jxxghp",
"level": 2,
"history": {
"v2.1.1": "修复兼容MoviePilot V2 版本",
"v2.0": "兼容MoviePilot V2 版本"
}
},
"IYUUAutoSeed": {
"name": "IYUU自动辅种",
"description": "基于IYUU官方Api实现自动辅种。",
"labels": "做种,IYUU",
"version": "2.2",
"icon": "IYUU.png",
"author": "jxxghp",
"level": 2,
"history": {
"v2.2": "修复种子校验服务未生效",
"v2.1": "调整IYUU最新域名",
"v2.0": "兼容MoviePilot V2 版本"
}
},
"QbCommand": {
"name": "QB远程操作",
"description": "通过定时任务或交互命令远程操作QB暂停/开始/限速等。",
"labels": "下载管理,Qbittorrent",
"version": "2.0",
"icon": "Qbittorrent_A.png",
"author": "DzAvril",
"level": 1,
"history": {
"v2.0": "适配MoviePilot V2 版本"
}
},
"HistoryToV2": {
"name": "历史记录迁移",
"description": "将MoviePilot V1版本的整理历史记录迁移至V2版本。",
"labels": "整理,历史记录",
"version": "1.1",
"icon": "Moviepilot_A.png",
"author": "jxxghp",
"level": 1,
"history": {
"v1.1": "修复启动提示信息"
}
},
"SyncCookieCloud": {
"name": "同步CookieCloud",
"description": "同步MoviePilot站点Cookie到本地CookieCloud。",
"labels": "站点",
"version": "2.1",
"icon": "Cookiecloud_A.png",
"author": "thsrite",
"level": 1,
"history": {
"v2.1": "兼容MoviePilot V2"
}
},
"ChineseSubFinder": {
"name": "ChineseSubFinder",
"description": "整理入库时通知ChineseSubFinder下载字幕。",
"labels": "字幕",
"version": "2.0",
"icon": "chinesesubfinder.png",
"author": "jxxghp",
"level": 1,
"history": {
"v2.0": "兼容MoviePilot V2"
}
},
"CleanInvalidSeed": {
"name": "清理QB无效做种",
"description": "清理已经被站点删除的种子及对应源文件仅支持QB",
"labels": "Qbittorrent",
"version": "2.0",
"icon": "clean_a.png",
"author": "DzAvril",
"level": 1,
"history": {
"v2.0": "适配 MoviePilot V2"
}
},
"PlayletCategory": {
"name": "短剧自动分类",
"description": "网络短剧自动整理到独立的分类目录。",
"labels": "文件整理",
"version": "2.1",
"icon": "Amule_A.png",
"author": "jxxghp,longqiuyu",
"level": 1,
"history": {
"v2.1": "兼容MoviePilot V2",
"v2.0": "适配新的目录结构变化,短剧分类名称调整为配置目录路径,升级后需要重新调整设置后才能使用。"
}
},
"MoviePilotUpdateNotify": {
"name": "MoviePilot更新推送",
"description": "MoviePilot推送release更新通知、自动重启。",
"labels": "消息通知,自动更新",
"version": "2.0",
"icon": "Moviepilot_A.png",
"author": "thsrite",
"level": 1,
"history": {
"v2.0": "兼容MoviePilot V2"
}
}
}

View File

@@ -0,0 +1,605 @@
import time
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Any, List, Dict, Tuple, Optional
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app import schemas
from app.chain.storage import StorageChain
from app.core.config import settings
from app.core.event import eventmanager
from app.db.downloadhistory_oper import DownloadHistoryOper
from app.db.transferhistory_oper import TransferHistoryOper
from app.log import logger
from app.plugins import _PluginBase
from app.schemas import NotificationType, DownloadHistory
from app.schemas.types import EventType
class AutoClean(_PluginBase):
# 插件名称
plugin_name = "定时清理媒体库"
# 插件描述
plugin_desc = "定时清理用户下载的种子、源文件、媒体库文件。"
# 插件图标
plugin_icon = "clean.png"
# 插件版本
plugin_version = "2.0"
# 插件作者
plugin_author = "thsrite"
# 作者主页
author_url = "https://github.com/thsrite"
# 插件配置项ID前缀
plugin_config_prefix = "autoclean_"
# 加载顺序
plugin_order = 23
# 可使用的用户级别
auth_level = 2
# 私有属性
_enabled = False
# 任务执行间隔
_cron = None
_type = None
_onlyonce = False
_notify = False
_cleantype = None
_cleandate = None
_cleanuser = None
_downloadhis = None
_transferhis = None
# 定时器
_scheduler: Optional[BackgroundScheduler] = None
def init_plugin(self, config: dict = None):
# 停止现有任务
self.stop_service()
if config:
self._enabled = config.get("enabled")
self._cron = config.get("cron")
self._onlyonce = config.get("onlyonce")
self._notify = config.get("notify")
self._cleantype = config.get("cleantype")
self._cleandate = config.get("cleandate")
self._cleanuser = config.get("cleanuser")
# 加载模块
if self._enabled:
self._downloadhis = DownloadHistoryOper()
self._transferhis = TransferHistoryOper()
if self._onlyonce:
# 定时服务
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
logger.info(f"定时清理媒体库服务启动,立即运行一次")
self._scheduler.add_job(func=self.__clean, trigger='date',
run_date=datetime.now(tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3),
name="定时清理媒体库")
# 关闭一次性开关
self._onlyonce = False
self.update_config({
"onlyonce": False,
"cron": self._cron,
"cleantype": self._cleantype,
"cleandate": self._cleandate,
"enabled": self._enabled,
"cleanuser": self._cleanuser,
"notify": self._notify,
})
# 启动任务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
def __get_clean_date(self, deltatime: str = None):
# 清理日期
current_time = datetime.now()
if deltatime:
days_ago = current_time - timedelta(days=int(deltatime))
else:
days_ago = current_time - timedelta(days=int(self._cleandate))
return days_ago.strftime("%Y-%m-%d")
def __clean(self):
"""
定时清理媒体库
"""
if not self._cleandate:
logger.error("未配置媒体库全局清理时间,停止运行")
return
# 查询用户清理日期之前的下载历史,不填默认清理全部用户的下载
if not self._cleanuser:
clean_date = self.__get_clean_date()
downloadhis_list = self._downloadhis.list_by_user_date(date=clean_date)
logger.info(f'获取到日期 {clean_date} 之前的下载历史 {len(downloadhis_list)}')
self.__clean_history(date=clean_date, clean_type=self._cleantype, downloadhis_list=downloadhis_list)
# 根据填写的信息判断怎么清理
else:
# username:days#cleantype
clean_type = self._cleantype
clean_date = self._cleandate
# 1.3.7版本及之前处理多位用户
if str(self._cleanuser).count(','):
for username in str(self._cleanuser).split(","):
downloadhis_list = self._downloadhis.list_by_user_date(date=clean_date,
username=username)
logger.info(
f'获取到用户 {username} 日期 {clean_date} 之前的下载历史 {len(downloadhis_list)}')
self.__clean_history(date=clean_date, clean_type=self._cleantype, downloadhis_list=downloadhis_list)
return
for userinfo in str(self._cleanuser).split("\n"):
if userinfo.count('#'):
clean_type = userinfo.split('#')[1]
username_and_days = userinfo.split('#')[0]
else:
username_and_days = userinfo
if username_and_days.count(':'):
clean_date = username_and_days.split(':')[1]
username = username_and_days.split(':')[0]
else:
username = userinfo
# 转strftime
clean_date = self.__get_clean_date(clean_date)
logger.info(f'{username} 使用 {clean_type} 清理方式,清理 {clean_date} 之前的下载历史')
downloadhis_list = self._downloadhis.list_by_user_date(date=clean_date,
username=username)
logger.info(
f'获取到用户 {username} 日期 {clean_date} 之前的下载历史 {len(downloadhis_list)}')
self.__clean_history(date=clean_date, clean_type=clean_type,
downloadhis_list=downloadhis_list)
def __clean_history(self, date: str, clean_type: str, downloadhis_list: List[DownloadHistory]):
"""
清理下载历史、转移记录
"""
if not downloadhis_list:
logger.warn(f"未获取到日期 {date} 之前的下载记录,停止运行")
return
# 读取历史记录
pulgin_history = self.get_data('history') or []
# 创建一个字典来保存分组结果
downloadhis_grouped_dict: Dict[tuple, List[DownloadHistory]] = defaultdict(list)
# 遍历DownloadHistory对象列表
for downloadhis in downloadhis_list:
# 获取type和tmdbid的值
dtype = downloadhis.type
tmdbid = downloadhis.tmdbid
# 将DownloadHistory对象添加到对应分组的列表中
downloadhis_grouped_dict[(dtype, tmdbid)].append(downloadhis)
# 输出分组结果
for key, downloadhis_list in downloadhis_grouped_dict.items():
logger.info(f"开始清理 {key}")
del_transferhis_cnt = 0
del_media_name = downloadhis_list[0].title
del_media_user = downloadhis_list[0].username
del_media_type = downloadhis_list[0].type
del_media_year = downloadhis_list[0].year
del_media_season = downloadhis_list[0].seasons
del_media_episode = downloadhis_list[0].episodes
del_image = downloadhis_list[0].image
for downloadhis in downloadhis_list:
if not downloadhis.download_hash:
logger.debug(f'下载历史 {downloadhis.id} {downloadhis.title} 未获取到download_hash跳过处理')
continue
# 根据hash获取转移记录
transferhis_list = self._transferhis.list_by_hash(download_hash=downloadhis.download_hash)
if not transferhis_list:
logger.warn(f"下载历史 {downloadhis.download_hash} 未查询到转移记录,跳过处理")
continue
for history in transferhis_list:
# 册除媒体库文件
if clean_type in ["dest", "all"]:
dest_fileitem = schemas.FileItem(**history.dest_fileitem)
StorageChain().delete_file(dest_fileitem)
# 删除记录
self._transferhis.delete(history.id)
# 删除源文件
if clean_type in ["src", "all"]:
src_fileitem = schemas.FileItem(**history.src_fileitem)
StorageChain().delete_file(src_fileitem)
# 发送事件
eventmanager.send_event(
EventType.DownloadFileDeleted,
{
"src": history.src
}
)
# 累加删除数量
del_transferhis_cnt += len(transferhis_list)
if del_transferhis_cnt:
# 发送消息
if self._notify:
self.post_message(
mtype=NotificationType.MediaServer,
title="【定时清理媒体库任务完成】",
text=f"清理媒体名称 {del_media_name}\n"
f"下载媒体用户 {del_media_user}\n"
f"删除历史记录 {del_transferhis_cnt}")
pulgin_history.append({
"type": del_media_type,
"title": del_media_name,
"year": del_media_year,
"season": del_media_season,
"episode": del_media_episode,
"image": del_image,
"del_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
})
# 保存历史
self.save_data("history", pulgin_history)
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_service(self) -> List[Dict[str, Any]]:
"""
注册插件公共服务
[{
"id": "服务ID",
"name": "服务名称",
"trigger": "触发器cron/interval/date/CronTrigger.from_crontab()",
"func": self.xxx,
"kwargs": {} # 定时器参数
}]
"""
if self._enabled and self._cron:
return [
{
"id": "AutoClean",
"name": "清理媒体库定时服务",
"trigger": CronTrigger.from_crontab(self._cron),
"func": self.__clean,
"kwargs": {}
}
]
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'notify',
'label': '开启通知',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '执行周期',
'placeholder': '0 0 ? ? ?'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'cleantype',
'label': '全局清理方式',
'items': [
{'title': '媒体库文件', 'value': 'dest'},
{'title': '源文件', 'value': 'src'},
{'title': '所有文件', 'value': 'all'},
]
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cleandate',
'label': '全局清理日期',
'placeholder': '清理多少天之前的下载记录(天)'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VTextarea',
'props': {
'model': 'cleanuser',
'label': '清理配置',
'rows': 6,
'placeholder': '每一行一个配置,支持以下几种配置方式,清理方式支持 src、desc、all 分别对应源文件,媒体库文件,所有文件\n'
'用户名缺省默认清理所有用户(慎重留空),清理天数缺省默认使用全局清理天数,清理方式缺省默认使用全局清理方式\n'
'用户名/插件名豆瓣想看、豆瓣榜单、RSS订阅\n'
'用户名#清理方式\n'
'用户名:清理天数\n'
'用户名:清理天数#清理方式',
}
}
]
}
]
}
]
}
], {
"enabled": False,
"onlyonce": False,
"notify": False,
"cleantype": "dest",
"cron": "",
"cleanuser": "",
"cleandate": 30
}
def get_page(self) -> List[dict]:
"""
拼装插件详情页面,需要返回页面配置,同时附带数据
"""
# 查询同步详情
historys = self.get_data('history')
if not historys:
return [
{
'component': 'div',
'text': '暂无数据',
'props': {
'class': 'text-center',
}
}
]
# 数据按时间降序排序
historys = sorted(historys, key=lambda x: x.get('del_time'), reverse=True)
# 拼装页面
contents = []
for history in historys:
htype = history.get("type")
title = history.get("title")
year = history.get("year")
season = history.get("season")
episode = history.get("episode")
image = history.get("image")
del_time = history.get("del_time")
if season:
sub_contents = [
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'类型:{htype}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'标题:{title}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'年份:{year}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'季:{season}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'集:{episode}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'时间:{del_time}'
}
]
else:
sub_contents = [
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'类型:{htype}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'标题:{title}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'年份:{year}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'时间:{del_time}'
}
]
contents.append(
{
'component': 'VCard',
'content': [
{
'component': 'div',
'props': {
'class': 'd-flex justify-space-start flex-nowrap flex-row',
},
'content': [
{
'component': 'div',
'content': [
{
'component': 'VImg',
'props': {
'src': image,
'height': 120,
'width': 80,
'aspect-ratio': '2/3',
'class': 'object-cover shadow ring-gray-500',
'cover': True
}
}
]
},
{
'component': 'div',
'content': sub_contents
}
]
}
]
}
)
return [
{
'component': 'div',
'props': {
'class': 'grid gap-3 grid-info-card',
},
'content': contents
}
]
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
logger.error("退出插件失败:%s" % str(e))

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,147 @@
import random
import re
from typing import Tuple
from lxml import etree
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class Pt52(_ISiteSigninHandler):
"""
52pt
如果填写openai key则调用chatgpt获取答案
否则随机
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "52pt.site"
# 已签到
_sign_regex = ['今天已经签过到了']
# 签到成功,待补充
_success_regex = ['\\d+点魔力值']
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
render = site_info.get("render")
proxy = site_info.get("proxy")
# 判断今日是否已签到
html_text = self.get_page_source(url='https://52pt.site/bakatest.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"今日已签到")
return True, '今日已签到'
# 没有签到则解析html
html = etree.HTML(html_text)
if not html:
return False, '签到失败'
# 获取页面问题、答案
questionid = html.xpath("//input[@name='questionid']/@value")[0]
option_ids = html.xpath("//input[@name='choice[]']/@value")
question_str = html.xpath("//td[@class='text' and contains(text(),'请问:')]/text()")[0]
# 正则获取问题
match = re.search(r'请问:(.+)', question_str)
if match:
question_str = match.group(1)
logger.debug(f"获取到签到问题 {question_str}")
else:
logger.error(f"未获取到签到问题")
return False, f"{site}】签到失败,未获取到签到问题"
# 正确答案默认随机如果gpt返回则用gpt返回的答案提交
choice = [option_ids[random.randint(0, len(option_ids) - 1)]]
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
def __signin(self, questionid: str,
choice: list,
site: str,
site_cookie: str,
ua: str,
proxy: bool) -> Tuple[bool, str]:
"""
签到请求
questionid: 450
choice[]: 8
choice[]: 4
usercomment: 此刻心情:无
submit: 提交
多选会有多个choice[]....
"""
data = {
'questionid': questionid,
'choice[]': choice[0] if len(choice) == 1 else choice,
'usercomment': '太难了!',
'wantskip': '不会'
}
logger.debug(f"签到请求参数 {data}")
sign_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).post_res(url='https://52pt.site/bakatest.php', data=data)
if not sign_res or sign_res.status_code != 200:
logger.error(f"{site} 签到失败,签到接口请求失败")
return False, '签到失败,签到接口请求失败'
# 判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._success_regex)
if sign_status:
logger.info(f"{site} 签到成功")
return True, '签到成功'
else:
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
logger.error(f"{site} 签到失败,请到页面查看")
return False, '签到失败,请到页面查看'

View File

@@ -0,0 +1,99 @@
# -*- coding: utf-8 -*-
import re
from abc import ABCMeta, abstractmethod
from typing import Tuple
import chardet
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.helper.browser import PlaywrightHelper
from app.log import logger
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class _ISiteSigninHandler(metaclass=ABCMeta):
"""
实现站点签到的基类所有站点签到类都需要继承此类并实现match和signin方法
实现类放置到sitesignin目录下将会自动加载
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = ""
@abstractmethod
def match(self, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
if StringUtils.url_equal(url, self.site_url):
return True
return False
@abstractmethod
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: True|False,签到结果信息
"""
pass
@staticmethod
def get_page_source(url: str, cookie: str, ua: str, proxy: bool, render: bool, token: str = None) -> str:
"""
获取页面源码
:param url: Url地址
:param cookie: Cookie
:param ua: UA
:param proxy: 是否使用代理
:param render: 是否渲染
:param token: JWT Token
:return: 页面源码,错误信息
"""
if render:
return PlaywrightHelper().get_page_source(url=url,
cookies=cookie,
ua=ua,
proxies=settings.PROXY_SERVER if proxy else None)
else:
if token:
headers = {
"Authorization": token,
"User-Agent": ua
}
else:
headers = {
"User-Agent": ua,
"Cookie": cookie
}
res = RequestUtils(headers=headers,
proxies=settings.PROXY if proxy else None).get_res(url=url)
if res is not None:
# 使用chardet检测字符编码
raw_data = res.content
if raw_data:
try:
result = chardet.detect(raw_data)
encoding = result['encoding']
# 解码为字符串
return raw_data.decode(encoding)
except Exception as e:
logger.error(f"chardet解码失败{str(e)}")
return res.text
else:
return res.text
return ""
@staticmethod
def sign_in_result(html_res: str, regexs: list) -> bool:
"""
判断是否签到成功
"""
html_text = re.sub(r"#\d+", "", re.sub(r"\d+px", "", html_res))
for regex in regexs:
if re.search(str(regex), html_text):
return True
return False

View File

@@ -0,0 +1,75 @@
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.string import StringUtils
class BTSchool(_ISiteSigninHandler):
"""
学校签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "pt.btschool.club"
# 已签到
_sign_text = '每日签到'
@classmethod
def match(cls, url) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
render = site_info.get("render")
proxy = site_info.get("proxy")
logger.info(f"{site} 开始签到")
# 判断今日是否已签到
html_text = self.get_page_source(url='https://pt.btschool.club',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
# 已签到
if self._sign_text not in html_text:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
html_text = self.get_page_source(url='https://pt.btschool.club/index.php?action=addbonus',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,签到接口请求失败")
return False, '签到失败,签到接口请求失败'
# 签到成功
if self._sign_text not in html_text:
logger.info(f"{site} 签到成功")
return True, '签到成功'

View File

@@ -0,0 +1,148 @@
import random
import re
from typing import Tuple
from lxml import etree
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class CHDBits(_ISiteSigninHandler):
"""
彩虹岛签到
如果填写openai key则调用chatgpt获取答案
否则随机
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "ptchdbits.co"
# 已签到
_sign_regex = ['今天已经签过到了']
# 签到成功,待补充
_success_regex = ['\\d+点魔力值']
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 判断今日是否已签到
html_text = self.get_page_source(url='https://ptchdbits.co/bakatest.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
# 没有签到则解析html
html = etree.HTML(html_text)
if not html:
return False, '签到失败'
# 获取页面问题、答案
questionid = html.xpath("//input[@name='questionid']/@value")[0]
option_ids = html.xpath("//input[@name='choice[]']/@value")
question_str = html.xpath("//td[@class='text' and contains(text(),'请问:')]/text()")[0]
# 正则获取问题
match = re.search(r'请问:(.+)', question_str)
if match:
question_str = match.group(1)
logger.debug(f"获取到签到问题 {question_str}")
else:
logger.error(f"未获取到签到问题")
return False, f"{site}】签到失败,未获取到签到问题"
# 正确答案默认随机如果gpt返回则用gpt返回的答案提交
choice = [option_ids[random.randint(0, len(option_ids) - 1)]]
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
def __signin(self, questionid: str,
choice: list,
site: str,
site_cookie: str,
ua: str,
proxy: bool) -> Tuple[bool, str]:
"""
签到请求
questionid: 450
choice[]: 8
choice[]: 4
usercomment: 此刻心情:无
submit: 提交
多选会有多个choice[]....
"""
data = {
'questionid': questionid,
'choice[]': choice[0] if len(choice) == 1 else choice,
'usercomment': '太难了!',
'wantskip': '不会'
}
logger.debug(f"签到请求参数 {data}")
sign_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).post_res(url='https://ptchdbits.co/bakatest.php', data=data)
if not sign_res or sign_res.status_code != 200:
logger.error(f"{site} 签到失败,签到接口请求失败")
return False, '签到失败,签到接口请求失败'
# 判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._success_regex)
if sign_status:
logger.info(f"{site} 签到成功")
return True, '签到成功'
else:
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
logger.error(f"{site} 签到失败,请到页面查看")
return False, '签到失败,请到页面查看'

View File

@@ -0,0 +1,70 @@
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.string import StringUtils
class HaiDan(_ISiteSigninHandler):
"""
海胆签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "haidan.video"
# 签到成功
_succeed_regex = ['(?<=value=")已经打卡(?=")']
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 签到
# 签到页会重定向到index.php由于302重定向特性导致index.php没有携带cookie
self.get_page_source(url='https://www.haidan.video/signin.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
# 重新携带cookie获取index.php查看签到结果
html_text = self.get_page_source(url='https://www.haidan.video/index.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._succeed_regex)
if sign_status:
logger.info(f"{site} 签到成功")
return True, '签到成功'
logger.error(f"{site} 签到失败,签到接口返回 {html_text}")
return False, '签到失败'

View File

@@ -0,0 +1,83 @@
import json
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class Hares(_ISiteSigninHandler):
"""
白兔签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "club.hares.top"
# 已签到
_sign_text = '已签到'
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 获取页面html
html_text = self.get_page_source(url='https://club.hares.top',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 模拟访问失败,请检查站点连通性")
return False, '模拟访问失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 模拟访问失败Cookie已失效")
return False, '模拟访问失败Cookie已失效'
# if self._sign_text in html_res.text:
# logger.info(f"今日已签到")
# return True, '今日已签到'
headers = {
'Accept': 'application/json',
"User-Agent": ua
}
sign_res = RequestUtils(cookies=site_cookie,
headers=headers,
proxies=settings.PROXY if proxy else None
).get_res(url="https://club.hares.top/attendance.php?action=sign")
if not sign_res or sign_res.status_code != 200:
logger.error(f"{site} 签到失败,签到接口请求失败")
return False, '签到失败,签到接口请求失败'
# {"code":1,"msg":"您今天已经签到过了"}
# {"code":0,"msg":"签到成功"}
sign_dict = json.loads(sign_res.text)
if sign_dict['code'] == 0:
logger.info(f"{site} 签到成功")
return True, '签到成功'
else:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'

View File

@@ -0,0 +1,69 @@
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class HDArea(_ISiteSigninHandler):
"""
好大签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "hdarea.club"
# 签到成功
_success_text = "此次签到您获得"
_repeat_text = "请不要重复签到哦"
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxies = settings.PROXY if site_info.get("proxy") else None
# 获取页面html
data = {
'action': 'sign_in'
}
html_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=proxies
).post_res(url="https://www.hdarea.club/sign_in.php", data=data)
if not html_res or html_res.status_code != 200:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_res.text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
# 判断是否已签到
# '已连续签到278天此次签到您获得了100魔力值奖励!'
if self._success_text in html_res.text:
logger.info(f"{site} 签到成功")
return True, '签到成功'
if self._repeat_text in html_res.text:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
logger.error(f"{site} 签到失败,签到接口返回 {html_res.text}")
return False, '签到失败'

View File

@@ -0,0 +1,117 @@
import json
from typing import Tuple
from lxml import etree
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class HDChina(_ISiteSigninHandler):
"""
瓷器签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "hdchina.org"
# 已签到
_sign_regex = ['<a class="label label-default" href="#">已签到</a>']
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxies = settings.PROXY if site_info.get("proxy") else None
# 尝试解决瓷器cookie每天签到后过期,只保留hdchina=部分
cookie = ""
# 按照分号进行字符串拆分
sub_strs = site_cookie.split(";")
# 遍历每个子字符串
for sub_str in sub_strs:
if "hdchina=" in sub_str:
# 如果子字符串包含"hdchina=",则保留该子字符串
cookie += sub_str + ";"
if "hdchina=" not in cookie:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
site_cookie = cookie
# 获取页面html
html_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=proxies
).get_res(url="https://hdchina.org/index.php")
if not html_res or html_res.status_code != 200:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_res.text or "阻断页面" in html_res.text:
logger.error(f"{site} 签到失败Cookie失效")
return False, '签到失败Cookie失效'
# 获取新返回的cookie进行签到
site_cookie = ';'.join(['{}={}'.format(k, v) for k, v in html_res.cookies.get_dict().items()])
# 判断是否已签到
html_res.encoding = "utf-8"
sign_status = self.sign_in_result(html_res=html_res.text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
# 没有签到则解析html
html = etree.HTML(html_res.text)
if not html:
return False, '签到失败'
# x_csrf
x_csrf = html.xpath("//meta[@name='x-csrf']/@content")[0]
if not x_csrf:
logger.error("{site} 签到失败获取x-csrf失败")
return False, '签到失败'
logger.debug(f"获取到x-csrf {x_csrf}")
# 签到
data = {
'csrf': x_csrf
}
sign_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=proxies
).post_res(url="https://hdchina.org/plugin_sign-in.php?cmd=signin", data=data)
if not sign_res or sign_res.status_code != 200:
logger.error(f"{site} 签到失败,签到接口请求失败")
return False, '签到失败,签到接口请求失败'
sign_dict = json.loads(sign_res.text)
logger.debug(f"签到返回结果 {sign_dict}")
if sign_dict['state']:
# {'state': 'success', 'signindays': 10, 'integral': 20}
logger.info(f"{site} 签到成功")
return True, '签到成功'
else:
# {'state': False, 'msg': '不正确的CSRF / Incorrect CSRF token'}
logger.error(f"{site} 签到失败不正确的CSRF / Incorrect CSRF token")
return False, '签到失败'

View File

@@ -0,0 +1,66 @@
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.string import StringUtils
class HDCity(_ISiteSigninHandler):
"""
城市签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "hdcity.city"
# 签到成功
_success_text = '本次签到获得魅力'
# 重复签到
_repeat_text = '已签到'
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 获取页面html
html_text = self.get_page_source(url='https://hdcity.city/sign',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
# 判断是否已签到
# '已连续签到278天此次签到您获得了100魔力值奖励!'
if self._success_text in html_text:
logger.info(f"{site} 签到成功")
return True, '签到成功'
if self._repeat_text in html_text:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
logger.error(f"{site} 签到失败,签到接口返回 {html_text}")
return False, '签到失败'

View File

@@ -0,0 +1,136 @@
import json
import time
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.helper.ocr import OcrHelper
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class HDSky(_ISiteSigninHandler):
"""
天空ocr签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "hdsky.me"
# 已签到
_sign_regex = ['已签到']
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 判断今日是否已签到
html_text = self.get_page_source(url='https://hdsky.me',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
# 获取验证码请求,考虑到网络问题获取失败,多获取几次试试
res_times = 0
img_hash = None
while not img_hash and res_times <= 3:
image_res = RequestUtils(cookies=site_cookie,
ua=ua,
content_type='application/x-www-form-urlencoded; charset=UTF-8',
referer="https://hdsky.me/index.php",
accept_type="*/*",
proxies=settings.PROXY if proxy else None
).post_res(url='https://hdsky.me/image_code_ajax.php',
data={'action': 'new'})
if image_res and image_res.status_code == 200:
image_json = json.loads(image_res.text)
if image_json["success"]:
img_hash = image_json["code"]
break
res_times += 1
logger.info(f"获取 {site} 验证码失败,正在进行重试,目前重试次数:{res_times}")
time.sleep(1)
# 获取到二维码hash
if img_hash:
# 完整验证码url
img_get_url = 'https://hdsky.me/image.php?action=regimage&imagehash=%s' % img_hash
logger.info(f"获取到 {site} 验证码链接:{img_get_url}")
# ocr识别多次获取6位验证码
times = 0
ocr_result = None
# 识别几次
while times <= 3:
# ocr二维码识别
ocr_result = OcrHelper().get_captcha_text(image_url=img_get_url,
cookie=site_cookie,
ua=ua)
logger.info(f"OCR识别 {site} 验证码:{ocr_result}")
if ocr_result:
if len(ocr_result) == 6:
logger.info(f"OCR识别 {site} 验证码成功:{ocr_result}")
break
times += 1
logger.info(f"OCR识别 {site} 验证码失败,正在进行重试,目前重试次数:{times}")
time.sleep(1)
if ocr_result:
# 组装请求参数
data = {
'action': 'showup',
'imagehash': img_hash,
'imagestring': ocr_result
}
# 访问签到链接
res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).post_res(url='https://hdsky.me/showup.php', data=data)
if res and res.status_code == 200:
if json.loads(res.text)["success"]:
logger.info(f"{site} 签到成功")
return True, '签到成功'
elif str(json.loads(res.text)["message"]) == "date_unmatch":
# 重复签到
logger.warn(f"{site} 重复成功")
return True, '今日已签到'
elif str(json.loads(res.text)["message"]) == "invalid_imagehash":
# 验证码错误
logger.warn(f"{site} 签到失败:验证码错误")
return False, '签到失败:验证码错误'
logger.error(f'{site} 签到失败:未获取到验证码')
return False, '签到失败:未获取到验证码'

View File

@@ -0,0 +1,82 @@
import re
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.string import StringUtils
class HDUpt(_ISiteSigninHandler):
"""
hdu签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "pt.hdupt.com"
# 已签到
_sign_regex = ['<span id="yiqiandao">']
# 签到成功
_success_text = '本次签到获得魅力'
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 获取页面html
html_text = self.get_page_source(url='https://pt.hdupt.com',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
# 签到
html_text = self.get_page_source(url='https://pt.hdupt.com/added.php?action=qiandao',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
logger.debug(f"{site} 签到接口返回 {html_text}")
# 判断是否已签到 sign_res.text = ".23"
if len(list(map(int, re.findall(r"\d+", html_text)))) > 0:
logger.info(f"{site} 签到成功")
return True, '签到成功'
logger.error(f"{site} 签到失败,签到接口返回 {html_text}")
return False, '签到失败'

View File

@@ -0,0 +1,61 @@
from typing import Tuple
from urllib.parse import urljoin
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class MTorrent(_ISiteSigninHandler):
"""
m-team签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "m-team"
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if cls.site_url in url.split(".") else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作,馒头实际没有签到,非仿真模式下需要更新访问时间
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
headers = {
"Content-Type": "application/json",
"User-Agent": site_info.get("ua"),
"Accept": "application/json, text/plain, */*",
"Authorization": site_info.get("token")
}
url = site_info.get('url')
domain = StringUtils.get_url_domain(url)
# 更新最后访问时间
res = RequestUtils(headers=headers,
timeout=60,
proxies=settings.PROXY if site_info.get("proxy") else None,
referer=f"{url}index"
).post_res(url=f"https://api.{domain}/api/member/updateLastBrowse")
if res:
return True, "模拟登录成功"
elif res is not None:
return False, f"模拟登录失败,状态码:{res.status_code}"
else:
return False, "模拟登录失败,无法打开网站"
def login(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行登录操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 登录结果信息
"""
return self.signin(site_info)

View File

@@ -0,0 +1,70 @@
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class NexusHD(_ISiteSigninHandler):
"""
NexusHD签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "v6.nexushd.org"
# 签到成功
_success_text = "本次签到获得"
_repeat_text = "你今天已经签到过了"
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxies = settings.PROXY if site_info.get("proxy") else None
# 获取页面html
data = {
'action': 'post',
'content': ''
}
html_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=proxies
).post_res(url="https://v6.nexushd.org/signin.php", data=data)
if not html_res or html_res.status_code != 200:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_res.text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
# 判断是否已签到
# '已连续签到278天此次签到您获得了100魔力值奖励!'
if self._success_text in html_res.text:
logger.info(f"{site} 签到成功")
return True, '签到成功'
if self._repeat_text in html_res.text:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
logger.error(f"{site} 签到失败,签到接口返回 {html_res.text}")
return False, '签到失败'

View File

@@ -0,0 +1,132 @@
import json
import time
from typing import Tuple
from lxml import etree
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.helper.ocr import OcrHelper
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class Opencd(_ISiteSigninHandler):
"""
皇后ocr签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "open.cd"
# 已签到
_repeat_text = "/plugin_sign-in.php?cmd=show-log"
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 判断今日是否已签到
html_text = self.get_page_source(url='https://www.open.cd',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
if self._repeat_text in html_text:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
# 获取签到参数
html_text = self.get_page_source(url='https://www.open.cd/plugin_sign-in.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
# 没有签到则解析html
html = etree.HTML(html_text)
if not html:
return False, '签到失败'
# 签到参数
img_url = html.xpath('//form[@id="frmSignin"]//img/@src')[0]
img_hash = html.xpath('//form[@id="frmSignin"]//input[@name="imagehash"]/@value')[0]
if not img_url or not img_hash:
logger.error(f"{site} 签到失败,获取签到参数失败")
return False, '签到失败,获取签到参数失败'
# 完整验证码url
img_get_url = 'https://www.open.cd/%s' % img_url
logger.debug(f"{site} 获取到{site}验证码链接 {img_get_url}")
# ocr识别多次获取6位验证码
times = 0
ocr_result = None
# 识别几次
while times <= 3:
# ocr二维码识别
ocr_result = OcrHelper().get_captcha_text(image_url=img_get_url,
cookie=site_cookie,
ua=ua)
logger.debug(f"ocr识别{site}验证码 {ocr_result}")
if ocr_result:
if len(ocr_result) == 6:
logger.info(f"ocr识别{site}验证码成功 {ocr_result}")
break
times += 1
logger.debug(f"ocr识别{site}验证码失败,正在进行重试,目前重试次数 {times}")
time.sleep(1)
if ocr_result:
# 组装请求参数
data = {
'imagehash': img_hash,
'imagestring': ocr_result
}
# 访问签到链接
sign_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).post_res(url='https://www.open.cd/plugin_sign-in.php?cmd=signin', data=data)
if sign_res and sign_res.status_code == 200:
logger.debug(f"sign_res返回 {sign_res.text}")
# sign_res.text = '{"state":"success","signindays":"0","integral":"10"}'
sign_dict = json.loads(sign_res.text)
if sign_dict['state']:
logger.info(f"{site} 签到成功")
return True, '签到成功'
else:
logger.error(f"{site} 签到失败,签到接口返回 {sign_dict}")
return False, '签到失败'
logger.error(f'{site} 签到失败:未获取到验证码')
return False, '签到失败:未获取到验证码'

View File

@@ -0,0 +1,65 @@
import json
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.string import StringUtils
class PTerClub(_ISiteSigninHandler):
"""
猫签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "pterclub.com"
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 签到
html_text = self.get_page_source(url='https://pterclub.com/attendance-ajax.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
try:
sign_dict = json.loads(html_text)
except Exception as e:
logger.error(f"{site} 签到失败,签到接口返回数据异常,错误信息:{str(e)}")
return False, '签到失败,签到接口返回数据异常'
if sign_dict['status'] == '1':
# {"status":"1","data":" (签到已成功300)","message":"<p>这是您的第<b>237</b>次签到,
# 已连续签到<b>237</b>天。</p><p>本次签到获得<b>300</b>克猫粮。</p>"}
logger.info(f"{site} 签到成功")
return True, '签到成功'
else:
# {"status":"0","data":"抱歉","message":"您今天已经签到过了,请勿重复刷新。"}
logger.info(f"{site} 今日已签到")
return True, '今日已签到'

View File

@@ -0,0 +1,64 @@
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.string import StringUtils
class PTTime(_ISiteSigninHandler):
"""
PT时间签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "pttime.org"
# 签到成功
_succeed_regex = ['签到成功']
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 签到
# 签到返回:<html><head></head><body>签到成功</body></html>
html_text = self.get_page_source(url='https://www.pttime.org/attendance.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._succeed_regex)
if sign_status:
logger.info(f"{site} 签到成功")
return True, '签到成功'
logger.error(f"{site} 签到失败,签到接口返回 {html_text}")
return False, '签到失败'

View File

@@ -0,0 +1,274 @@
import json
import os
import time
from io import BytesIO
from typing import Tuple
from PIL import Image
from lxml import etree
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class Tjupt(_ISiteSigninHandler):
"""
北洋签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "tjupt.org"
# 签到地址
_sign_in_url = 'https://www.tjupt.org/attendance.php'
# 已签到
_sign_regex = ['<a href="attendance.php">今日已签到</a>']
# 签到成功
_succeed_regex = ['这是您的首次签到,本次签到获得\\d+个魔力值。',
'签到成功,这是您的第\\d+次签到,已连续签到\\d+天,本次签到获得\\d+个魔力值。',
'重新签到成功,本次签到获得\\d+个魔力值']
# 存储正确的答案,后续可直接查
_answer_path = settings.TEMP_PATH / "signin/"
_answer_file = _answer_path / "tjupt.json"
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 创建正确答案存储目录
if not os.path.exists(os.path.dirname(self._answer_file)):
os.makedirs(os.path.dirname(self._answer_file))
# 获取北洋签到页面html
html_text = self.get_page_source(url=self._sign_in_url,
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
# 获取签到后返回html判断是否签到成功
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
# 没有签到则解析html
html = etree.HTML(html_text)
if not html:
return False, '签到失败'
img_url = html.xpath('//table[@class="captcha"]//img/@src')[0]
if not img_url:
logger.error(f"{site} 签到失败,未获取到签到图片")
return False, '签到失败,未获取到签到图片'
# 签到图片
img_url = "https://www.tjupt.org" + img_url
logger.info(f"获取到签到图片 {img_url}")
# 获取签到图片hash
captcha_img_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).get_res(url=img_url)
if not captcha_img_res or captcha_img_res.status_code != 200:
logger.error(f"{site} 签到图片 {img_url} 请求失败")
return False, '签到失败,未获取到签到图片'
captcha_img = Image.open(BytesIO(captcha_img_res.content))
captcha_img_hash = self._tohash(captcha_img)
logger.debug(f"签到图片hash {captcha_img_hash}")
# 签到答案选项
values = html.xpath("//input[@name='answer']/@value")
options = html.xpath("//input[@name='answer']/following-sibling::text()")
if not values or not options:
logger.error(f"{site} 签到失败,未获取到答案选项")
return False, '签到失败,未获取到答案选项'
# value+选项
answers = list(zip(values, options))
logger.debug(f"获取到所有签到选项 {answers}")
# 查询已有答案
exits_answers = {}
try:
with open(self._answer_file, 'r') as f:
json_str = f.read()
exits_answers = json.loads(json_str)
# 查询本地本次验证码hash答案
captcha_answer = exits_answers[captcha_img_hash]
# 本地存在本次hash对应的正确答案再遍历查询
if captcha_answer:
for value, answer in answers:
if str(captcha_answer) == str(answer):
# 确实是答案
return self.__signin(answer=value,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
except (FileNotFoundError, IOError, OSError) as e:
logger.debug(f"查询本地已知答案失败:{str(e)},继续请求豆瓣查询")
# 本地不存在正确答案则请求豆瓣查询匹配
for value, answer in answers:
if answer:
# 豆瓣检索
db_res = RequestUtils().get_res(url=f'https://movie.douban.com/j/subject_suggest?q={answer}')
if not db_res or db_res.status_code != 200:
logger.debug(f"签到选项 {answer} 未查询到豆瓣数据")
continue
# 豆瓣返回结果
db_answers = json.loads(db_res.text)
if not isinstance(db_answers, list):
db_answers = [db_answers]
if len(db_answers) == 0:
logger.debug(f"签到选项 {answer} 查询到豆瓣数据为空")
for db_answer in db_answers:
answer_img_url = db_answer['img']
# 获取答案hash
answer_img_res = RequestUtils(referer="https://movie.douban.com").get_res(url=answer_img_url)
if not answer_img_res or answer_img_res.status_code != 200:
logger.debug(f"签到答案 {answer} {answer_img_url} 请求失败")
continue
answer_img = Image.open(BytesIO(answer_img_res.content))
answer_img_hash = self._tohash(answer_img)
logger.debug(f"签到答案图片hash {answer} {answer_img_hash}")
# 获取选项图片与签到图片相似度大于0.9默认是正确答案
score = self._comparehash(captcha_img_hash, answer_img_hash)
logger.info(f"签到图片与选项 {answer} 豆瓣图片相似度 {score}")
if score > 0.9:
# 确实是答案
return self.__signin(answer=value,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site,
exits_answers=exits_answers,
captcha_img_hash=captcha_img_hash)
# 间隔5s防止请求太频繁被豆瓣屏蔽ip
time.sleep(5)
logger.error(f"豆瓣图片匹配,未获取到匹配答案")
# 没有匹配签到成功,则签到失败
return False, '签到失败,未获取到匹配答案'
def __signin(self, answer, site_cookie, ua, proxy, site, exits_answers=None, captcha_img_hash=None):
"""
签到请求
"""
data = {
'answer': answer,
'submit': '提交'
}
logger.debug(f"提交data {data}")
sign_in_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).post_res(url=self._sign_in_url, data=data)
if not sign_in_res or sign_in_res.status_code != 200:
logger.error(f"{site} 签到失败,签到接口请求失败")
return False, '签到失败,签到接口请求失败'
# 获取签到后返回html判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_in_res.text,
regexs=self._succeed_regex)
if sign_status:
logger.info(f"签到成功")
if exits_answers and captcha_img_hash:
# 签到成功写入本地文件
self.__write_local_answer(exits_answers=exits_answers or {},
captcha_img_hash=captcha_img_hash,
answer=answer)
return True, '签到成功'
else:
logger.error(f"{site} 签到失败,请到页面查看")
return False, '签到失败,请到页面查看'
def __write_local_answer(self, exits_answers, captcha_img_hash, answer):
"""
签到成功写入本地文件
"""
try:
exits_answers[captcha_img_hash] = answer
# 序列化数据
formatted_data = json.dumps(exits_answers, indent=4)
with open(self._answer_file, 'w') as f:
f.write(formatted_data)
except (FileNotFoundError, IOError, OSError) as e:
logger.debug(f"签到成功写入本地文件失败:{str(e)}")
@staticmethod
def _tohash(img, shape=(10, 10)):
"""
获取图片hash
"""
img = img.resize(shape)
gray = img.convert('L')
s = 0
hash_str = ''
for i in range(shape[1]):
for j in range(shape[0]):
s = s + gray.getpixel((j, i))
avg = s / (shape[0] * shape[1])
for i in range(shape[1]):
for j in range(shape[0]):
if gray.getpixel((j, i)) > avg:
hash_str = hash_str + '1'
else:
hash_str = hash_str + '0'
return hash_str
@staticmethod
def _comparehash(hash1, hash2, shape=(10, 10)):
"""
比较图片hash
返回相似度
"""
n = 0
if len(hash1) != len(hash2):
return -1
for i in range(len(hash1)):
if hash1[i] == hash2[i]:
n = n + 1
return n / (shape[0] * shape[1])

View File

@@ -0,0 +1,97 @@
import re
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class TTG(_ISiteSigninHandler):
"""
TTG签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "totheglory.im"
# 已签到
_sign_regex = ['<b style="color:green;">已签到</b>']
_sign_text = '亲,您今天已签到过,不要太贪哦'
# 签到成功
_success_text = '您已连续签到'
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 获取页面html
html_text = self.get_page_source(url="https://totheglory.im",
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
# 判断是否已签到
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
# 获取签到参数
signed_timestamp = re.search('(?<=signed_timestamp: ")\\d{10}', html_text).group()
signed_token = re.search('(?<=signed_token: ").*(?=")', html_text).group()
logger.debug(f"signed_timestamp={signed_timestamp} signed_token={signed_token}")
data = {
'signed_timestamp': signed_timestamp,
'signed_token': signed_token
}
# 签到
sign_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).post_res(url="https://totheglory.im/signed.php",
data=data)
if not sign_res or sign_res.status_code != 200:
logger.error(f"{site} 签到失败,签到接口请求失败")
return False, '签到失败,签到接口请求失败'
sign_res.encoding = "utf-8"
if self._success_text in sign_res.text:
logger.info(f"{site} 签到成功")
return True, '签到成功'
if self._sign_text in sign_res.text:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
logger.error(f"{site} 签到失败,未知原因")
return False, '签到失败,未知原因'

View File

@@ -0,0 +1,123 @@
import datetime
import random
import re
from typing import Tuple
from lxml import etree
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class U2(_ISiteSigninHandler):
"""
U2签到 随机
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "u2.dmhy.org"
# 已签到
_sign_regex = ['<a href="showup.php">已签到</a>',
'<a href="showup.php">Show Up</a>',
'<a href="showup.php">Показать</a>',
'<a href="showup.php">已簽到</a>',
'<a href="showup.php">已簽到</a>']
# 签到成功
_success_text = "window.location.href = 'showup.php';</script>"
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
now = datetime.datetime.now()
# 判断当前时间是否小于9点
if now.hour < 9:
logger.error(f"{site} 签到失败9点前不签到")
return False, '签到失败9点前不签到'
# 获取页面html
html_text = self.get_page_source(url="https://u2.dmhy.org/showup.php",
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
# 判断是否已签到
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
# 没有签到则解析html
html = etree.HTML(html_text)
if not html:
return False, '签到失败'
# 获取签到参数
req = html.xpath("//form//td/input[@name='req']/@value")[0]
hash_str = html.xpath("//form//td/input[@name='hash']/@value")[0]
form = html.xpath("//form//td/input[@name='form']/@value")[0]
submit_name = html.xpath("//form//td/input[@type='submit']/@name")
submit_value = html.xpath("//form//td/input[@type='submit']/@value")
if not re or not hash_str or not form or not submit_name or not submit_value:
logger.error("{site} 签到失败,未获取到相关签到参数")
return False, '签到失败'
# 随机一个答案
answer_num = random.randint(0, 3)
data = {
'req': req,
'hash': hash_str,
'form': form,
'message': '一切随缘~',
submit_name[answer_num]: submit_value[answer_num]
}
# 签到
sign_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).post_res(url="https://u2.dmhy.org/showup.php?action=show",
data=data)
if not sign_res or sign_res.status_code != 200:
logger.error(f"{site} 签到失败,签到接口请求失败")
return False, '签到失败,签到接口请求失败'
# 判断是否签到成功
# sign_res.text = "<script type="text/javascript">window.location.href = 'showup.php';</script>"
if self._success_text in sign_res.text:
logger.info(f"{site} 签到成功")
return True, '签到成功'
else:
logger.error(f"{site} 签到失败,未知原因")
return False, '签到失败,未知原因'

View File

@@ -0,0 +1,78 @@
from typing import Tuple
from urllib.parse import urljoin
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
class YemaPT(_ISiteSigninHandler):
"""
YemaPT 签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "yemapt.org"
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if cls.site_url in url else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
headers = {
"Content-Type": "application/json",
"User-Agent": site_info.get("ua"),
"Accept": "application/json, text/plain, */*",
}
# 获取用户信息,更新最后访问时间
res = (RequestUtils(headers=headers,
timeout=15,
cookies=site_info.get("cookie"),
proxies=settings.PROXY if site_info.get("proxy") else None,
referer=site_info.get('url')
).get_res(urljoin(site_info.get('url'), "api/consumer/checkIn")))
if res and res.json().get("success"):
return True, "签到成功"
elif res is not None:
return False, f"签到失败,签到结果:{res.json().get('errorMessage')}"
else:
return False, "签到失败,无法打开网站"
def login(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行登录操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 登录结果信息
"""
headers = {
"Content-Type": "application/json",
"User-Agent": site_info.get("ua"),
"Accept": "application/json, text/plain, */*",
}
# 获取用户信息,更新最后访问时间
res = (RequestUtils(headers=headers,
timeout=15,
cookies=site_info.get("cookie"),
proxies=settings.PROXY if site_info.get("proxy") else None,
referer=site_info.get('url')
).get_res(urljoin(site_info.get('url'), "api/user/profile")))
if res and res.json().get("success"):
return True, "模拟登录成功"
elif res is not None:
return False, f"模拟登录失败,状态码:{res.status_code}"
else:
return False, "模拟登录失败,无法打开网站"

View File

@@ -0,0 +1,88 @@
import json
from typing import Tuple
from lxml import etree
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class ZhuQue(_ISiteSigninHandler):
"""
ZHUQUE签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "zhuque.in"
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 获取页面html
html_text = self.get_page_source(url="https://zhuque.in",
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 模拟登录失败,请检查站点连通性")
return False, '模拟登录失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 模拟登录失败Cookie已失效")
return False, '模拟登录失败Cookie已失效'
html = etree.HTML(html_text)
if not html:
return False, '模拟登录失败'
# 释放技能
msg = '失败'
x_csrf_token = html.xpath("//meta[@name='x-csrf-token']/@content")[0]
if x_csrf_token:
data = {
"all": 1,
"resetModal": "true"
}
headers = {
"x-csrf-token": str(x_csrf_token),
"Content-Type": "application/json; charset=utf-8",
"User-Agent": ua
}
skill_res = RequestUtils(cookies=site_cookie,
headers=headers,
proxies=settings.PROXY if proxy else None
).post_res(url="https://zhuque.in/api/gaming/fireGenshinCharacterMagic", json=data)
if not skill_res or skill_res.status_code != 200:
logger.error(f"模拟登录失败,释放技能失败")
# '{"status":200,"data":{"code":"FIRE_GENSHIN_CHARACTER_MAGIC_SUCCESS","bonus":0}}'
skill_dict = json.loads(skill_res.text)
if skill_dict['status'] == 200:
bonus = int(skill_dict['data']['bonus'])
msg = f'成功,获得{bonus}魔力'
logger.info(f'{site}】模拟登录成功,技能释放{msg}')
return True, f'模拟登录成功,技能释放{msg}'

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,263 @@
from typing import Any, List, Dict, Tuple
from app.core.config import settings
from app.core.event import eventmanager, Event
from app.log import logger
from app.plugins import _PluginBase
from app.plugins.chatgpt.openai import OpenAi
from app.schemas.types import EventType, ChainEventType
class ChatGPT(_PluginBase):
# 插件名称
plugin_name = "ChatGPT"
# 插件描述
plugin_desc = "消息交互支持与ChatGPT对话。"
# 插件图标
plugin_icon = "Chatgpt_A.png"
# 插件版本
plugin_version = "2.0.1"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
plugin_config_prefix = "chatgpt_"
# 加载顺序
plugin_order = 15
# 可使用的用户级别
auth_level = 1
# 私有属性
openai = None
_enabled = False
_proxy = False
_recognize = False
_openai_url = None
_openai_key = None
_model = None
def init_plugin(self, config: dict = None):
if config:
self._enabled = config.get("enabled")
self._proxy = config.get("proxy")
self._recognize = config.get("recognize")
self._openai_url = config.get("openai_url")
self._openai_key = config.get("openai_key")
self._model = config.get("model")
if self._openai_url and self._openai_key:
self.openai = OpenAi(api_key=self._openai_key, api_url=self._openai_url,
proxy=settings.PROXY if self._proxy else None,
model=self._model)
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'proxy',
'label': '使用代理服务器',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'recognize',
'label': '辅助识别',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'openai_url',
'label': 'OpenAI API Url',
'placeholder': 'https://api.openai.com',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'openai_key',
'label': 'sk-xxx'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'model',
'label': '自定义模型',
'placeholder': 'gpt-3.5-turbo',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
'text': '开启插件后,消息交互时使用请[问帮你]开头或者以号结尾或者超过10个汉字/单词则会触发ChatGPT回复。'
'开启辅助识别后,内置识别功能无法正常识别种子/文件名称时将使用ChatGTP进行AI辅助识别可以提升动漫等非规范命名的识别成功率。'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"proxy": False,
"recognize": False,
"openai_url": "https://api.openai.com",
"openai_key": "",
"model": "gpt-3.5-turbo"
}
def get_page(self) -> List[dict]:
pass
@eventmanager.register(EventType.UserMessage)
def talk(self, event: Event):
"""
监听用户消息获取ChatGPT回复
"""
if not self._enabled:
return
if not self.openai:
return
text = event.event_data.get("text")
userid = event.event_data.get("userid")
channel = event.event_data.get("channel")
if not text:
return
response = self.openai.get_response(text=text, userid=userid)
if response:
self.post_message(channel=channel, title=response, userid=userid)
@eventmanager.register(ChainEventType.NameRecognize)
def recognize(self, event: Event):
"""
监听识别事件使用ChatGPT辅助识别名称
"""
if not self._recognize:
return
if not event.event_data:
return
title = event.event_data.get("title")
if not title:
return
# 调用ChatGPT
response = self.openai.get_media_name(filename=title)
logger.info(f"ChatGPT返回结果{response}")
if response:
event.event_data = {
'title': title,
'name': response.get("title"),
'year': response.get("year"),
'season': response.get("season"),
'episode': response.get("episode")
}
else:
event.event_data = {}
def stop_service(self):
"""
退出插件
"""
pass

View File

@@ -0,0 +1,206 @@
import json
import time
from typing import List, Union
import openai
from cacheout import Cache
OpenAISessionCache = Cache(maxsize=100, ttl=3600, timer=time.time, default=None)
class OpenAi:
_api_key: str = None
_api_url: str = None
_model: str = "gpt-3.5-turbo"
def __init__(self, api_key: str = None, api_url: str = None, proxy: dict = None, model: str = None):
self._api_key = api_key
self._api_url = api_url
openai.api_base = self._api_url + "/v1"
openai.api_key = self._api_key
if proxy and proxy.get("https"):
openai.proxy = proxy.get("https")
if model:
self._model = model
def get_state(self) -> bool:
return True if self._api_key else False
@staticmethod
def __save_session(session_id: str, message: str):
"""
保存会话
:param session_id: 会话ID
:param message: 消息
:return:
"""
seasion = OpenAISessionCache.get(session_id)
if seasion:
seasion.append({
"role": "assistant",
"content": message
})
OpenAISessionCache.set(session_id, seasion)
@staticmethod
def __get_session(session_id: str, message: str) -> List[dict]:
"""
获取会话
:param session_id: 会话ID
:return: 会话上下文
"""
seasion = OpenAISessionCache.get(session_id)
if seasion:
seasion.append({
"role": "user",
"content": message
})
else:
seasion = [
{
"role": "system",
"content": "请在接下来的对话中请使用中文回复,并且内容尽可能详细。"
},
{
"role": "user",
"content": message
}]
OpenAISessionCache.set(session_id, seasion)
return seasion
def __get_model(self, message: Union[str, List[dict]],
prompt: str = None,
user: str = "MoviePilot",
**kwargs):
"""
获取模型
"""
if not isinstance(message, list):
if prompt:
message = [
{
"role": "system",
"content": prompt
},
{
"role": "user",
"content": message
}
]
else:
message = [
{
"role": "user",
"content": message
}
]
return openai.ChatCompletion.create(
model=self._model,
user=user,
messages=message,
**kwargs
)
@staticmethod
def __clear_session(session_id: str):
"""
清除会话
:param session_id: 会话ID
:return:
"""
if OpenAISessionCache.get(session_id):
OpenAISessionCache.delete(session_id)
def get_media_name(self, filename: str):
"""
从文件名中提取媒体名称等要素
:param filename: 文件名
:return: Json
"""
if not self.get_state():
return None
result = ""
try:
_filename_prompt = "I will give you a movie/tvshow file name.You need to return a Json." \
"\nPay attention to the correct identification of the film name." \
"\n{\"title\":string,\"version\":string,\"part\":string,\"year\":string,\"resolution\":string,\"season\":number|null,\"episode\":number|null}"
completion = self.__get_model(prompt=_filename_prompt, message=filename)
result = completion.choices[0].message.content
return json.loads(result)
except Exception as e:
print(f"{str(e)}{result}")
return {}
def get_response(self, text: str, userid: str):
"""
聊天对话,获取答案
:param text: 输入文本
:param userid: 用户ID
:return:
"""
if not self.get_state():
return ""
try:
if not userid:
return "用户信息错误"
else:
userid = str(userid)
if text == "#清除":
self.__clear_session(userid)
return "会话已清除"
# 获取历史上下文
messages = self.__get_session(userid, text)
completion = self.__get_model(message=messages, user=userid)
result = completion.choices[0].message.content
if result:
self.__save_session(userid, text)
return result
except openai.error.RateLimitError as e:
return f"请求被ChatGPT拒绝了{str(e)}"
except openai.error.APIConnectionError as e:
return f"ChatGPT网络连接失败{str(e)}"
except openai.error.Timeout as e:
return f"没有接收到ChatGPT的返回消息{str(e)}"
except Exception as e:
return f"请求ChatGPT出现错误{str(e)}"
def translate_to_zh(self, text: str):
"""
翻译为中文
:param text: 输入文本
"""
if not self.get_state():
return False, None
system_prompt = "You are a translation engine that can only translate text and cannot interpret it."
user_prompt = f"translate to zh-CN:\n\n{text}"
result = ""
try:
completion = self.__get_model(prompt=system_prompt,
message=user_prompt,
temperature=0,
top_p=1,
frequency_penalty=0,
presence_penalty=0)
result = completion.choices[0].message.content.strip()
return True, result
except Exception as e:
print(f"{str(e)}{result}")
return False, str(e)
def get_question_answer(self, question: str):
"""
从给定问题和选项中获取正确答案
:param question: 问题及选项
:return: Json
"""
if not self.get_state():
return None
result = ""
try:
_question_prompt = "下面我们来玩一个游戏,你是老师,我是学生,你需要回答我的问题,我会给你一个题目和几个选项,你的回复必须是给定选项中正确答案对应的序号,请直接回复数字"
completion = self.__get_model(prompt=_question_prompt, message=question)
result = completion.choices[0].message.content
return result
except Exception as e:
print(f"{str(e)}{result}")
return {}

View File

@@ -0,0 +1,255 @@
from functools import lru_cache
from pathlib import Path
from typing import List, Tuple, Dict, Any
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.event import eventmanager, Event
from app.log import logger
from app.plugins import _PluginBase
from app.schemas import TransferInfo, FileItem
from app.schemas.types import EventType, MediaType
from app.utils.http import RequestUtils
from app.utils.system import SystemUtils
class ChineseSubFinder(_PluginBase):
# 插件名称
plugin_name = "ChineseSubFinder"
# 插件描述
plugin_desc = "整理入库时通知ChineseSubFinder下载字幕。"
# 插件图标
plugin_icon = "chinesesubfinder.png"
# 插件版本
plugin_version = "2.0"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
plugin_config_prefix = "chinesesubfinder_"
# 加载顺序
plugin_order = 5
# 可使用的用户级别
auth_level = 1
# 私有属性
_save_tmp_path = None
_enabled = False
_host = None
_api_key = None
_remote_path = None
_local_path = None
def init_plugin(self, config: dict = None):
self._save_tmp_path = settings.TEMP_PATH
if config:
self._enabled = config.get("enabled")
self._api_key = config.get("api_key")
self._host = config.get('host')
if self._host:
if not self._host.startswith('http'):
self._host = "http://" + self._host
if not self._host.endswith('/'):
self._host = self._host + "/"
self._local_path = config.get("local_path")
self._remote_path = config.get("remote_path")
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'host',
'label': '服务器'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'api_key',
'label': 'API密钥'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'local_path',
'label': '本地路径'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'remote_path',
'label': '远端路径'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"host": "",
"api_key": "",
"local_path": "",
"remote_path": ""
}
def get_state(self) -> bool:
return self._enabled
def get_page(self) -> List[dict]:
pass
def stop_service(self):
pass
@eventmanager.register(EventType.TransferComplete)
def download(self, event: Event):
"""
调用ChineseSubFinder下载字幕
"""
if not self._enabled or not self._host or not self._api_key:
return
item = event.event_data
if not item:
return
# 请求地址
req_url = "%sapi/v1/add-job" % self._host
# 媒体信息
item_media: MediaInfo = item.get("mediainfo")
# 转移信息
item_transfer: TransferInfo = item.get("transferinfo")
# 类型
item_type = item_media.type
# 目的路径
item_dest: FileItem = item_transfer.target_diritem
# 是否蓝光原盘
item_bluray = SystemUtils.is_bluray_dir(Path(item_dest.path))
# 文件清单
item_file_list = item_transfer.file_list_new
if item_bluray:
# 蓝光原盘虚拟个文件
item_file_list = ["%s.mp4" % Path(item_dest.path) / item_dest.name]
for file_path in item_file_list:
# 路径替换
if self._local_path and self._remote_path and file_path.startswith(self._local_path):
file_path = file_path.replace(self._local_path, self._remote_path).replace('\\', '/')
# 调用CSF下载字幕
self.__request_csf(req_url=req_url,
file_path=file_path,
item_type=0 if item_type == MediaType.MOVIE else 1,
item_bluray=item_bluray)
@lru_cache(maxsize=128)
def __request_csf(self, req_url, file_path, item_type, item_bluray):
# 一个名称只建一个任务
logger.info("通知ChineseSubFinder下载字幕: %s" % file_path)
params = {
"video_type": item_type,
"physical_video_file_full_path": file_path,
"task_priority_level": 3,
"media_server_inside_video_id": "",
"is_bluray": item_bluray
}
try:
res = RequestUtils(headers={
"Authorization": "Bearer %s" % self._api_key
}).post(req_url, json=params)
if not res or res.status_code != 200:
logger.error("调用ChineseSubFinder API失败")
else:
# 如果文件目录没有识别的nfo元数据 此接口会返回控制符推测是ChineseSubFinder的原因
# emby refresh元数据时异步的
if res.text:
job_id = res.json().get("job_id")
message = res.json().get("message")
if not job_id:
logger.warn("ChineseSubFinder下载字幕出错%s" % message)
else:
logger.info("ChineseSubFinder任务添加成功%s" % job_id)
elif res.status_code != 200:
logger.warn(f"ChineseSubFinder调用出错{res.status_code} - {res.reason}")
except Exception as e:
logger.error("连接ChineseSubFinder出错" + str(e))

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,859 @@
import datetime
import threading
from typing import List, Tuple, Dict, Any, Optional
import pytz
from app.helper.sites import SitesHelper
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.core.config import settings
from app.core.context import Context
from app.core.event import eventmanager, Event
from app.db.downloadhistory_oper import DownloadHistoryOper
from app.db.models.downloadhistory import DownloadHistory
from app.helper.downloader import DownloaderHelper
from app.log import logger
from app.plugins import _PluginBase
from app.schemas import ServiceInfo
from app.schemas.types import EventType, MediaType
from app.utils.string import StringUtils
class DownloadSiteTag(_PluginBase):
# 插件名称
plugin_name = "下载任务分类与标签"
# 插件描述
plugin_desc = "自动给下载任务分类与打站点标签、剧集名称标签"
# 插件图标
plugin_icon = "Youtube-dl_B.png"
# 插件版本
plugin_version = "2.2"
# 插件作者
plugin_author = "叮叮当"
# 作者主页
author_url = "https://github.com/cikezhu"
# 插件配置项ID前缀
plugin_config_prefix = "DownloadSiteTag_"
# 加载顺序
plugin_order = 2
# 可使用的用户级别
auth_level = 1
# 日志前缀
LOG_TAG = "[DownloadSiteTag] "
# 退出事件
_event = threading.Event()
# 私有属性
downloadhistory_oper = None
sites_helper = None
downloader_helper = None
_scheduler = None
_enabled = False
_onlyonce = False
_interval = "计划任务"
_interval_cron = "5 4 * * *"
_interval_time = 6
_interval_unit = "小时"
_enabled_media_tag = False
_enabled_tag = True
_enabled_category = False
_category_movie = None
_category_tv = None
_category_anime = None
_downloaders = None
def init_plugin(self, config: dict = None):
self.downloadhistory_oper = DownloadHistoryOper()
self.downloader_helper = DownloaderHelper()
self.sites_helper = SitesHelper()
# 读取配置
if config:
self._enabled = config.get("enabled")
self._onlyonce = config.get("onlyonce")
self._interval = config.get("interval") or "计划任务"
self._interval_cron = config.get("interval_cron") or "5 4 * * *"
self._interval_time = self.str_to_number(config.get("interval_time"), 6)
self._interval_unit = config.get("interval_unit") or "小时"
self._enabled_media_tag = config.get("enabled_media_tag")
self._enabled_tag = config.get("enabled_tag")
self._enabled_category = config.get("enabled_category")
self._category_movie = config.get("category_movie") or "电影"
self._category_tv = config.get("category_tv") or "电视"
self._category_anime = config.get("category_anime") or "动漫"
self._downloaders = config.get("downloaders")
# 停止现有任务
self.stop_service()
if self._onlyonce:
# 创建定时任务控制器
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
# 执行一次, 关闭onlyonce
self._onlyonce = False
config.update({"onlyonce": self._onlyonce})
self.update_config(config)
# 添加 补全下载历史的标签与分类 任务
self._scheduler.add_job(func=self._complemented_history, trigger='date',
run_date=datetime.datetime.now(
tz=pytz.timezone(settings.TZ)) + datetime.timedelta(seconds=3)
)
if self._scheduler and self._scheduler.get_jobs():
# 启动服务
self._scheduler.print_jobs()
self._scheduler.start()
@property
def service_infos(self) -> Optional[Dict[str, ServiceInfo]]:
"""
服务信息
"""
if not self._downloaders:
logger.warning("尚未配置下载器,请检查配置")
return None
services = self.downloader_helper.get_services(name_filters=self._downloaders)
if not services:
logger.warning("获取下载器实例失败,请检查配置")
return None
active_services = {}
for service_name, service_info in services.items():
if service_info.instance.is_inactive():
logger.warning(f"下载器 {service_name} 未连接,请检查配置")
else:
active_services[service_name] = service_info
if not active_services:
logger.warning("没有已连接的下载器,请检查配置")
return None
return active_services
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_service(self) -> List[Dict[str, Any]]:
"""
注册插件公共服务
[{
"id": "服务ID",
"name": "服务名称",
"trigger": "触发器cron/interval/date/CronTrigger.from_crontab()",
"func": self.xxx,
"kwargs": {} # 定时器参数
}]
"""
if self._enabled:
if self._interval == "计划任务" or self._interval == "固定间隔":
if self._interval == "固定间隔":
if self._interval_unit == "小时":
return [{
"id": "DownloadSiteTag",
"name": "补全下载历史的标签与分类",
"trigger": "interval",
"func": self._complemented_history,
"kwargs": {
"hours": self._interval_time
}
}]
else:
if self._interval_time < 5:
self._interval_time = 5
logger.info(f"{self.LOG_TAG}启动定时服务: 最小不少于5分钟, 防止执行间隔太短任务冲突")
return [{
"id": "DownloadSiteTag",
"name": "补全下载历史的标签与分类",
"trigger": "interval",
"func": self._complemented_history,
"kwargs": {
"minutes": self._interval_time
}
}]
else:
return [{
"id": "DownloadSiteTag",
"name": "补全下载历史的标签与分类",
"trigger": CronTrigger.from_crontab(self._interval_cron),
"func": self._complemented_history,
"kwargs": {}
}]
return []
@staticmethod
def str_to_number(s: str, i: int) -> int:
try:
return int(s)
except ValueError:
return i
def _complemented_history(self):
"""
补全下载历史的标签与分类
"""
if not self.service_infos:
return
logger.info(f"{self.LOG_TAG}开始执行 ...")
# 记录处理的种子, 供辅种(无下载历史)使用
dispose_history = {}
# 所有站点索引
indexers = [indexer.get("name") for indexer in self.sites_helper.get_indexers()]
# JackettIndexers索引器支持多个站点, 如果不存在历史记录, 则通过tracker会再次附加其他站点名称
indexers.append("JackettIndexers")
indexers = set(indexers)
tracker_mappings = {
"chdbits.xyz": "ptchdbits.co",
"agsvpt.trackers.work": "agsvpt.com",
"tracker.cinefiles.info": "audiences.me",
}
for service in self.service_infos.values():
downloader = service.name
downloader_obj = service.instance
logger.info(f"{self.LOG_TAG}开始扫描下载器 {downloader} ...")
if not downloader_obj:
logger.error(f"{self.LOG_TAG} 获取下载器失败 {downloader}")
continue
# 获取下载器中的种子
torrents, error = downloader_obj.get_torrents()
# 如果下载器获取种子发生错误 或 没有种子 则跳过
if error or not torrents:
continue
logger.info(f"{self.LOG_TAG}按时间重新排序 {downloader} 种子数:{len(torrents)}")
# 按添加时间进行排序, 时间靠前的按大小和名称加入处理历史, 判定为原始种子, 其他为辅种
torrents = self._torrents_sort(torrents=torrents, dl_type=service.type)
logger.info(f"{self.LOG_TAG}下载器 {downloader} 分析种子信息中 ...")
for torrent in torrents:
try:
if self._event.is_set():
logger.info(
f"{self.LOG_TAG}停止服务")
return
# 获取已处理种子的key (size, name)
_key = self._torrent_key(torrent=torrent, dl_type=service.type)
# 获取种子hash
_hash = self._get_hash(torrent=torrent, dl_type=service.type)
if not _hash:
continue
# 获取种子当前标签
torrent_tags = self._get_label(torrent=torrent, dl_type=service.type)
torrent_cat = self._get_category(torrent=torrent, dl_type=service.type)
# 提取种子hash对应的下载历史
history: DownloadHistory = self.downloadhistory_oper.get_by_hash(_hash)
if not history:
# 如果找到已处理种子的历史, 表明当前种子是辅种, 否则创建一个空DownloadHistory
if _key and _key in dispose_history:
history = dispose_history[_key]
# 因为辅种站点必定不同, 所以需要更新站点名字 history.torrent_site
history.torrent_site = None
else:
history = DownloadHistory()
else:
# 加入历史记录
if _key:
dispose_history[_key] = history
# 如果标签已经存在任意站点, 则不再添加站点标签
if indexers.intersection(set(torrent_tags)):
history.torrent_site = None
# 如果站点名称为空, 尝试通过trackers识别
elif not history.torrent_site:
trackers = self._get_trackers(torrent=torrent, dl_type=service.type)
for tracker in trackers:
# 检查tracker是否包含特定的关键字并进行相应的映射
for key, mapped_domain in tracker_mappings.items():
if key in tracker:
domain = mapped_domain
break
else:
domain = StringUtils.get_url_domain(tracker)
site_info = self.sites_helper.get_indexer(domain)
if site_info:
history.torrent_site = site_info.get("name")
break
# 如果通过tracker还是无法获取站点名称, 且tmdbid, type, title都是空的, 那么跳过当前种子
if not history.torrent_site and not history.tmdbid and not history.type and not history.title:
continue
# 按设置生成需要写入的标签与分类
_tags = []
_cat = None
# 站点标签, 如果勾选开关的话 因允许torrent_site为空时运行到此, 因此需要判断torrent_site不为空
if self._enabled_tag and history.torrent_site:
_tags.append(history.torrent_site)
# 媒体标题标签, 如果勾选开关的话 因允许title为空时运行到此, 因此需要判断title不为空
if self._enabled_media_tag and history.title:
_tags.append(history.title)
# 分类, 如果勾选开关的话 <tr暂不支持> 因允许mtype为空时运行到此, 因此需要判断mtype不为空。为防止不必要的识别, 种子已经存在分类torrent_cat时 也不执行
if service.type == "qbittorrent" and self._enabled_category and not torrent_cat and history.type:
# 如果是电视剧 需要区分是否动漫
genre_ids = None
# 因允许tmdbid为空时运行到此, 因此需要判断tmdbid不为空
history_type = MediaType(history.type) if history.type else None
if history.tmdbid and history_type == MediaType.TV:
# tmdb_id获取tmdb信息
tmdb_info = self.chain.tmdb_info(mtype=history_type, tmdbid=history.tmdbid)
if tmdb_info:
genre_ids = tmdb_info.get("genre_ids")
_cat = self._genre_ids_get_cat(history.type, genre_ids)
# 去除种子已经存在的标签
if _tags and torrent_tags:
_tags = list(set(_tags) - set(torrent_tags))
# 如果分类一样, 那么不需要修改
if _cat == torrent_cat:
_cat = None
# 判断当前种子是否不需要修改
if not _cat and not _tags:
continue
# 执行通用方法, 设置种子标签与分类
self._set_torrent_info(service=service, _hash=_hash, _torrent=torrent, _tags=_tags, _cat=_cat,
_original_tags=torrent_tags)
except Exception as e:
logger.error(
f"{self.LOG_TAG}分析种子信息时发生了错误: {str(e)}")
logger.info(f"{self.LOG_TAG}执行完成")
def _genre_ids_get_cat(self, mtype, genre_ids=None):
"""
根据genre_ids判断是否<动漫>分类
"""
_cat = None
if mtype == MediaType.MOVIE or mtype == MediaType.MOVIE.value:
# 电影
_cat = self._category_movie
elif mtype:
ANIME_GENREIDS = settings.ANIME_GENREIDS
if genre_ids \
and set(genre_ids).intersection(set(ANIME_GENREIDS)):
# 动漫
_cat = self._category_anime
else:
# 电视剧
_cat = self._category_tv
return _cat
@staticmethod
def _torrent_key(torrent: Any, dl_type: str) -> Optional[Tuple[int, str]]:
"""
按种子大小和时间返回key
"""
if dl_type == "qbittorrent":
size = torrent.get('size')
name = torrent.get('name')
else:
size = torrent.total_size
name = torrent.name
if not size or not name:
return None
else:
return size, name
@staticmethod
def _torrents_sort(torrents: Any, dl_type: str):
"""
按种子添加时间排序
"""
if dl_type == "qbittorrent":
torrents = sorted(torrents, key=lambda x: x.get("added_on"), reverse=False)
else:
torrents = sorted(torrents, key=lambda x: x.added_date, reverse=False)
return torrents
@staticmethod
def _get_hash(torrent: Any, dl_type: str):
"""
获取种子hash
"""
try:
return torrent.get("hash") if dl_type == "qbittorrent" else torrent.hashString
except Exception as e:
print(str(e))
return ""
@staticmethod
def _get_trackers(torrent: Any, dl_type: str):
"""
获取种子trackers
"""
try:
if dl_type == "qbittorrent":
"""
url 字符串 跟踪器网址
status 整数 跟踪器状态。有关可能的值,请参阅下表
tier 整数 跟踪器优先级。较低级别的跟踪器在较高级别的跟踪器之前试用。当特殊条目(如 DHT不存在时层号用作占位符时层号有效。>= 0< 0tier
num_peers 整数 跟踪器报告的当前 torrent 的对等体数量
num_seeds 整数 当前种子的种子数,由跟踪器报告
num_leeches 整数 当前种子的水蛭数量,如跟踪器报告的那样
num_downloaded 整数 跟踪器报告的当前 torrent 的已完成下载次数
msg 字符串 跟踪器消息(无法知道此消息是什么 - 由跟踪器管理员决定)
"""
return [tracker.get("url") for tracker in (torrent.trackers or []) if
tracker.get("tier", -1) >= 0 and tracker.get("url")]
else:
"""
class Tracker(Container):
@property
def id(self) -> int:
return self.fields["id"]
@property
def announce(self) -> str:
return self.fields["announce"]
@property
def scrape(self) -> str:
return self.fields["scrape"]
@property
def tier(self) -> int:
return self.fields["tier"]
"""
return [tracker.announce for tracker in (torrent.trackers or []) if
tracker.tier >= 0 and tracker.announce]
except Exception as e:
print(str(e))
return []
@staticmethod
def _get_label(torrent: Any, dl_type: str):
"""
获取种子标签
"""
try:
return [str(tag).strip() for tag in torrent.get("tags", "").split(',')] \
if dl_type == "qbittorrent" else torrent.labels or []
except Exception as e:
print(str(e))
return []
@staticmethod
def _get_category(torrent: Any, dl_type: str):
"""
获取种子分类
"""
try:
return torrent.get("category") if dl_type == "qbittorrent" else None
except Exception as e:
print(str(e))
return None
def _set_torrent_info(self, service: ServiceInfo, _hash: str, _torrent: Any = None, _tags=None, _cat: str = None,
_original_tags: list = None):
"""
设置种子标签与分类
"""
if not service or not service.instance:
return
if _tags is None:
_tags = []
downloader_obj = service.instance
if not _torrent:
_torrent, error = downloader_obj.get_torrents(ids=_hash)
if not _torrent or error:
logger.error(
f"{self.LOG_TAG}设置种子标签与分类时发生了错误: 通过 {_hash} 查询不到任何种子!")
return
logger.info(
f"{self.LOG_TAG}设置种子标签与分类: {_hash} 查询到 {len(_torrent)} 个种子")
_torrent = _torrent[0]
# 判断是否可执行
if _hash and _torrent:
# 下载器api不通用, 因此需分开处理
if service.type == "qbittorrent":
# 设置标签
if _tags:
downloader_obj.set_torrents_tag(ids=_hash, tags=_tags)
# 设置分类 <tr暂不支持>
if _cat:
# 尝试设置种子分类, 如果失败, 则创建再设置一遍
try:
_torrent.setCategory(category=_cat)
except Exception as e:
logger.warn(f"下载器 {service.name} 种子id: {_hash} 设置分类 {_cat} 失败:{str(e)}, "
f"尝试创建分类再设置 ...")
downloader_obj.qbc.torrents_createCategory(name=_cat)
_torrent.setCategory(category=_cat)
else:
# 设置标签
if _tags:
# _original_tags = None表示未指定, 因此需要获取原始标签
if _original_tags is None:
_original_tags = self._get_label(torrent=_torrent, dl_type=service.type)
# 如果原始标签不是空的, 那么合并原始标签
if _original_tags:
_tags = list(set(_original_tags).union(set(_tags)))
downloader_obj.set_torrent_tag(ids=_hash, tags=_tags)
logger.warn(
f"{self.LOG_TAG}下载器: {service.name} 种子id: {_hash} {(' 标签: ' + ','.join(_tags)) if _tags else ''} {(' 分类: ' + _cat) if _cat else ''}")
@eventmanager.register(EventType.DownloadAdded)
def download_added(self, event: Event):
"""
添加下载事件
"""
if not self.get_state():
return
if not event.event_data:
return
try:
downloader = event.event_data.get("downloader")
if not downloader:
logger.info("触发添加下载事件,但没有获取到下载器信息,跳过后续处理")
return
service = self.service_infos.get(downloader)
if not service:
logger.info(f"触发添加下载事件,但没有监听下载器 {downloader},跳过后续处理")
return
context: Context = event.event_data.get("context")
_hash = event.event_data.get("hash")
_torrent = context.torrent_info
_media = context.media_info
_tags = []
_cat = None
# 站点标签, 如果勾选开关的话
if self._enabled_tag and _torrent.site_name:
_tags.append(_torrent.site_name)
# 媒体标题标签, 如果勾选开关的话
if self._enabled_media_tag and _media.title:
_tags.append(_media.title)
# 分类, 如果勾选开关的话 <tr暂不支持>
if self._enabled_category and _media.type:
_cat = self._genre_ids_get_cat(_media.type, _media.genre_ids)
if _hash and (_tags or _cat):
# 执行通用方法, 设置种子标签与分类
self._set_torrent_info(service=service, _hash=_hash, _tags=_tags, _cat=_cat)
except Exception as e:
logger.error(
f"{self.LOG_TAG}分析下载事件时发生了错误: {str(e)}")
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 3
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 3
},
'content': [
{
'component': 'VCheckboxBtn',
'props': {
'model': 'enabled_tag',
'label': '自动站点标签',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 3
},
'content': [
{
'component': 'VCheckboxBtn',
'props': {
'model': 'enabled_media_tag',
'label': '自动剧名标签',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 3
},
'content': [
{
'component': 'VCheckboxBtn',
'props': {
'model': 'enabled_category',
'label': '自动设置分类',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VCheckboxBtn',
'props': {
'model': 'onlyonce',
'label': '补全下载历史的标签与分类(一次性任务)'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VSelect',
'props': {
'multiple': True,
'chips': True,
'clearable': True,
'model': 'downloaders',
'label': '下载器',
'items': [{"title": config.name, "value": config.name}
for config in self.downloader_helper.get_configs().values()]
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 3
},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'interval',
'label': '定时任务',
'items': [
{'title': '禁用', 'value': '禁用'},
{'title': '计划任务', 'value': '计划任务'},
{'title': '固定间隔', 'value': '固定间隔'}
]
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 3,
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'interval_cron',
'label': '计划任务设置',
'placeholder': '5 4 * * *'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 6,
'md': 3,
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'interval_time',
'label': '固定间隔设置, 间隔每',
'placeholder': '6'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 6,
'md': 3,
},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'interval_unit',
'label': '单位',
'items': [
{'title': '小时', 'value': '小时'},
{'title': '分钟', 'value': '分钟'}
]
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'category_movie',
'label': '电影分类名称(默认: 电影)',
'placeholder': '电影'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'category_tv',
'label': '电视分类名称(默认: 电视)',
'placeholder': '电视'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'category_anime',
'label': '动漫分类名称(默认: 动漫)',
'placeholder': '动漫'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
'text': '定时任务:支持两种定时方式,主要针对辅种刷流等种子补全站点信息。如没有对应的需求建议切换为禁用。'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"onlyonce": False,
"enabled_tag": True,
"enabled_media_tag": False,
"enabled_category": False,
"category_movie": "电影",
"category_tv": "电视",
"category_anime": "动漫",
"interval": "计划任务",
"interval_cron": "5 4 * * *",
"interval_time": "6",
"interval_unit": "小时"
}
def get_page(self) -> List[dict]:
pass
def stop_service(self):
"""
停止服务
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e))

View File

@@ -0,0 +1,363 @@
import threading
from datetime import datetime, timedelta
from pathlib import Path
from threading import Event as ThreadEvent
from typing import List, Tuple, Dict, Any
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.core.config import settings
from app.core.event import eventmanager, Event
from app.log import logger
from app.plugins import _PluginBase
from app.plugins.ffmpegthumb.ffmpeg_helper import FfmpegHelper
from app.schemas import TransferInfo
from app.schemas.types import EventType
from app.utils.system import SystemUtils
ffmpeg_lock = threading.Lock()
class FFmpegThumb(_PluginBase):
# 插件名称
plugin_name = "FFmpeg缩略图"
# 插件描述
plugin_desc = "TheMovieDb没有背景图片时使用FFmpeg截取视频文件缩略图。"
# 插件图标
plugin_icon = "ffmpeg.png"
# 插件版本
plugin_version = "2.0"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
plugin_config_prefix = "ffmpegthumb_"
# 加载顺序
plugin_order = 31
# 可使用的用户级别
user_level = 1
# 私有属性
_scheduler = None
_enabled = False
_onlyonce = False
_cron = None
_timeline = "00:03:01"
_scan_paths = ""
_exclude_paths = ""
# 退出事件
_event = ThreadEvent()
def init_plugin(self, config: dict = None):
# 读取配置
if config:
self._enabled = config.get("enabled")
self._onlyonce = config.get("onlyonce")
self._cron = config.get("cron")
self._timeline = config.get("timeline")
self._scan_paths = config.get("scan_paths") or ""
self._exclude_paths = config.get("exclude_paths") or ""
# 停止现有任务
self.stop_service()
# 启动定时任务 & 立即运行一次
if self._enabled or self._onlyonce:
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
if self._cron:
logger.info(f"FFmpeg缩略图服务启动周期{self._cron}")
try:
self._scheduler.add_job(func=self.__libraryscan,
trigger=CronTrigger.from_crontab(self._cron),
name="FFmpeg缩略图")
except Exception as e:
logger.error(f"FFmpeg缩略图服务启动失败原因{str(e)}")
self.systemmessage.put(f"FFmpeg缩略图服务启动失败原因{str(e)}", title="FFmpeg缩略图")
if self._onlyonce:
logger.info(f"FFmpeg缩略图服务立即运行一次")
self._scheduler.add_job(func=self.__libraryscan, trigger='date',
run_date=datetime.now(tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3),
name="FFmpeg缩略图")
# 关闭一次性开关
self._onlyonce = False
self.update_config({
"onlyonce": False,
"enabled": self._enabled,
"cron": self._cron,
"timeline": self._timeline,
"scan_paths": self._scan_paths,
"exclude_paths": self._exclude_paths
})
if self._scheduler.get_jobs():
# 启动服务
self._scheduler.print_jobs()
self._scheduler.start()
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'timeline',
'label': '截取时间',
'placeholder': '00:03:01'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '定时扫描周期',
'placeholder': '5位cron表达式留空关闭'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VTextarea',
'props': {
'model': 'scan_paths',
'label': '定时扫描路径',
'rows': 5,
'placeholder': '每一行一个目录'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VTextarea',
'props': {
'model': 'exclude_paths',
'label': '定时扫描排除路径',
'rows': 2,
'placeholder': '每一行一个目录'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
'text': '开启插件后默认会实时处理增量整理的媒体文件需要处理存量媒体文件时才需开启定时需要提前安装FFmpeghttps://www.ffmpeg.org'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"cron": "",
"timeline": "00:03:01",
"scan_paths": "",
"err_hosts": ""
}
def get_page(self) -> List[dict]:
pass
@eventmanager.register(EventType.TransferComplete)
def scan_rt(self, event: Event):
"""
根据事件实时扫描缩略图
"""
if not self._enabled:
return
# 事件数据
transferinfo: TransferInfo = event.event_data.get("transferinfo")
if not transferinfo:
return
if transferinfo.target_diritem and transferinfo.target_diritem.storage != "local":
logger.warn(f"FFmpeg缩略图不支持非本地存储{transferinfo.target_diritem.storage}")
return
file_list = transferinfo.file_list_new
for file in file_list:
logger.info(f"FFmpeg缩略图处理文件{file}")
file_path = Path(file)
if not file_path.exists():
logger.warn(f"{file_path} 不存在")
continue
if file_path.suffix not in settings.RMT_MEDIAEXT:
logger.warn(f"{file_path} 不是支持的视频文件")
continue
self.gen_file_thumb(file_path)
def __libraryscan(self):
"""
开始扫描媒体库
"""
if not self._scan_paths:
return
# 排除目录
exclude_paths = self._exclude_paths.split("\n")
# 已选择的目录
paths = self._scan_paths.split("\n")
for path in paths:
if not path:
continue
scan_path = Path(path)
if not scan_path.exists():
logger.warning(f"FFmpeg缩略图扫描路径不存在{path}")
continue
logger.info(f"开始FFmpeg缩略图扫描{path} ...")
# 遍历目录下的所有文件
for file_path in SystemUtils.list_files(scan_path, extensions=settings.RMT_MEDIAEXT):
if self._event.is_set():
logger.info(f"FFmpeg缩略图扫描服务停止")
return
# 排除目录
exclude_flag = False
for exclude_path in exclude_paths:
try:
if file_path.is_relative_to(Path(exclude_path)):
exclude_flag = True
break
except Exception as err:
print(str(err))
if exclude_flag:
logger.debug(f"{file_path} 在排除目录中,跳过 ...")
continue
# 开始处理文件
self.gen_file_thumb(file_path)
logger.info(f"目录 {path} 扫描完成")
def gen_file_thumb(self, file_path: Path):
"""
处理一个文件
"""
# 单线程处理
with ffmpeg_lock:
try:
thumb_path = file_path.with_name(file_path.stem + "-thumb.jpg")
if thumb_path.exists():
logger.info(f"缩略图已存在:{thumb_path}")
return
if FfmpegHelper.get_thumb(video_path=str(file_path),
image_path=str(thumb_path), frames=self._timeline):
logger.info(f"{file_path} 缩略图已生成:{thumb_path}")
except Exception as err:
logger.error(f"FFmpeg处理文件 {file_path} 时发生错误:{str(err)}")
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e))

View File

@@ -0,0 +1,82 @@
import json
import subprocess
from app.utils.system import SystemUtils
class FfmpegHelper:
@staticmethod
def get_thumb(video_path: str, image_path: str, frames: str = None):
"""
使用ffmpeg从视频文件中截取缩略图
"""
if not frames:
frames = "00:03:01"
if not video_path or not image_path:
return False
cmd = 'ffmpeg -i "{video_path}" -ss {frames} -vframes 1 -f image2 "{image_path}"'.format(video_path=video_path,
frames=frames,
image_path=image_path)
result = SystemUtils.execute(cmd)
if result:
return True
return False
@staticmethod
def extract_wav(video_path: str, audio_path: str, audio_index: str = None):
"""
使用ffmpeg从视频文件中提取16000hz, 16-bit的wav格式音频
"""
if not video_path or not audio_path:
return False
# 提取指定音频流
if audio_index:
command = ['ffmpeg', "-hide_banner", "-loglevel", "warning", '-y', '-i', video_path,
'-map', f'0:a:{audio_index}',
'-acodec', 'pcm_s16le', '-ac', '1', '-ar', '16000', audio_path]
else:
command = ['ffmpeg', "-hide_banner", "-loglevel", "warning", '-y', '-i', video_path,
'-acodec', 'pcm_s16le', '-ac', '1', '-ar', '16000', audio_path]
ret = subprocess.run(command).returncode
if ret == 0:
return True
return False
@staticmethod
def get_metadata(video_path: str):
"""
获取视频元数据
"""
if not video_path:
return False
try:
command = ['ffprobe', '-v', 'quiet', '-print_format', 'json', '-show_format', '-show_streams', video_path]
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode == 0:
return json.loads(result.stdout.decode("utf-8"))
except Exception as e:
print(e)
return None
@staticmethod
def extract_subtitle(video_path: str, subtitle_path: str, subtitle_index: str = None):
"""
从视频中提取字幕
"""
if not video_path or not subtitle_path:
return False
if subtitle_index:
command = ['ffmpeg', "-hide_banner", "-loglevel", "warning", '-y', '-i', video_path,
'-map', f'0:s:{subtitle_index}',
subtitle_path]
else:
command = ['ffmpeg', "-hide_banner", "-loglevel", "warning", '-y', '-i', video_path, subtitle_path]
ret = subprocess.run(command).returncode
if ret == 0:
return True
return False

View File

@@ -0,0 +1,336 @@
import json
from pathlib import Path
from typing import Any, List, Dict, Tuple, Optional
from app.db import SessionFactory
from app.db.models import TransferHistory
from app.log import logger
from app.plugins import _PluginBase
from app.utils.http import RequestUtils
class HistoryToV2(_PluginBase):
# 插件名称
plugin_name = "历史记录迁移"
# 插件描述
plugin_desc = "将MoviePilot V1版本的整理历史记录迁移至V2版本。"
# 插件图标
plugin_icon = "Moviepilot_A.png"
# 插件版本
plugin_version = "1.1"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
plugin_config_prefix = "historytov2_"
# 加载顺序
plugin_order = 99
# 可使用的用户级别
auth_level = 1
# 私有属性
historyoper = None
_enabled = False
_host = None
_username = None
_password = None
def init_plugin(self, config: dict = None):
if config:
self._enabled = config.get("enabled")
self._host = config.get("host")
self._username = config.get("username")
self._password = config.get("password")
if self._enabled:
if self._host and self._username and self._password:
# 关闭开关
self.__close_config()
# 登录MP获取token
token = self.__login_mp()
if token:
# 当前页码
page = 1
# 总记录数
total = 0
# 获取历史记录
history = self.__get_history(token)
while history:
# 处理历史记录
logger.info(f"开始处理第 {page} 页历史记录 ...")
self.__insert_history(history)
# 处理成功一批
total += len(history)
logger.info(f"{page} 页处理完成,共处理 {total} 条记录")
# 获取下一页历史记录
page += 1
history = self.__get_history(token, page=page)
# 处理完成
logger.info(f"历史记录迁移完成,共迁移 {total} 条记录!")
self.systemmessage.put(f"历史记录迁移完成,共迁移 {total} 条记录!", title="MoviePilot历史记录迁移")
else:
self.systemmessage.put(f"配置不完整,服务启动失败!", title="MoviePilot历史记录迁移")
# 关闭开关
self.__close_config()
def __close_config(self):
"""
关闭开关
"""
self._enabled = False
self.update_config({
"enabled": self._enabled,
"host": self._host,
"username": self._username,
"password": self._password
})
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'host',
'label': 'MoviePilot V1地址',
'placeholder': 'http://localhost:3000',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'username',
'label': '登录用户名',
'placeholder': 'admin'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'password',
'label': '登录密码',
'type': 'password',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
'text': 'MoviePilot V1 需要是启动状态且能正常访问V1版本和V2版本目录映射需要保持一致迁移时间可能较长完成后会收到系统通知。'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"host": None,
"username": None,
"password": None
}
def get_page(self) -> List[dict]:
pass
def stop_service(self):
"""
退出插件
"""
pass
def __login_mp(self) -> Optional[str]:
"""
登录MP获取token
"""
if not self._host or not self._username or not self._password:
return None
url = f"{self._host}/api/v1/login/access-token"
headers = {
"Content-Type": "application/x-www-form-urlencoded"
}
data = {
"username": self._username,
"password": self._password
}
logger.info(f"登录MoviePilot: {url}")
# 发送POST请求
response = RequestUtils(headers=headers).post_res(url, data=data)
# 检查响应状态
if response.status_code == 200:
# 成功获取token
token_data = response.json()
logger.info(f"登录MoviePilot成功获取token{token_data['access_token']}", )
return token_data["access_token"]
else:
# 处理失败响应
logger.warn(f"登录MoviePilot失败: {response.json()}")
self.systemmessage.put(f"登录MoviePilot失败无法同步历史记录", title="MoviePilot历史记录迁移")
return None
def __get_history(self, token: str, page: int = 1, count: int = 30) -> Optional[List[dict]]:
"""
获取历史记录
"""
if not token:
return []
url = f"{self._host}/api/v1/history/transfer"
headers = {
"Authorization": f"Bearer {token}"
}
params = {
"page": page,
"count": count
}
logger.info(f"查询转移历史记录: {url}params: {params}")
# 发送GET请求
response = RequestUtils(headers=headers).get_res(url, params=params)
# 检查响应状态
if response.status_code == 200:
# 返回数据
response_data = response.json()
data = response_data.get("data")
logger.info(f"查询转移历史记录成功,共 {len(data.get('list'))} 条记录")
return data.get("list")
else:
# 处理失败响应
logger.warn("查询转移历史记录失败:", response.json())
self.systemmessage.put(f"查询转移历史记录失败,无法同步历史记录!", title="MoviePilot历史记录迁移")
return []
@staticmethod
def __insert_history(history: List[dict]):
"""
插入历史记录
"""
if not history:
return
with SessionFactory() as db:
for item in history:
if item.get("src"):
transferhistory = TransferHistory.get_by_src(db, item.get("src"))
if transferhistory:
transferhistory.delete(db, transferhistory.id)
try:
TransferHistory(
src=item.get("src"),
src_storage="local",
src_fileitem={
"storage": "local",
"type": "file",
"path": item.get("src"),
"name": Path(item.get("src")).name,
"basename": Path(item.get("src")).stem,
"extension": Path(item.get("src")).suffix[1:],
},
dest=item.get("dest"),
dest_storage="local",
dest_fileitem={
"storage": "local",
"type": "file",
"path": item.get("dest"),
"name": Path(item.get("dest")).name,
"basename": Path(item.get("dest")).stem,
"extension": Path(item.get("dest")).suffix[1:],
},
mode=item.get("mode"),
type=item.get("type"),
category=item.get("category"),
title=item.get("title"),
year=item.get("year"),
tmdbid=item.get("tmdbid"),
imdbid=item.get("imdbid"),
tvdbid=item.get("tvdbid"),
doubanid=item.get("doubanid"),
seasons=item.get("seasons"),
episodes=item.get("episodes"),
image=item.get("image"),
download_hash=item.get("download_hash"),
status=item.get("status"),
files=json.loads(item.get("files")) if item.get("files") else [],
date=item.get("date"),
errmsg=item.get("errmsg")
).create(db)
except Exception as e:
logger.error(f"插入历史记录失败:{e}")
continue

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,115 @@
import hashlib
import json
import time
from typing import Tuple, Optional
from app.utils.http import RequestUtils
class IyuuHelper(object):
"""
适配新版本IYUU开发版
"""
_version = "8.2.0"
_api_base = "https://2025.iyuu.cn"
_sites = {}
_token = None
_sid_sha1 = None
def __init__(self, token: str):
self._token = token
if self._token:
self.init_config()
def init_config(self):
pass
def __request_iyuu(self, url: str, method: str = "get", params: dict = None) -> Tuple[Optional[dict], str]:
"""
向IYUUApi发送请求
"""
if method == "post":
ret = RequestUtils(
accept_type="application/json",
headers={'token': self._token}
).post_res(f'{self._api_base + url}', json=params)
else:
ret = RequestUtils(
accept_type="application/json",
headers={'token': self._token}
).get_res(f'{self._api_base + url}', params=params)
if ret:
result = ret.json()
if result.get('code') == 0:
return result.get('data'), ""
else:
return None, f'请求IYUU失败状态码{result.get("code")},返回信息:{result.get("msg")}'
elif ret is not None:
return None, f"请求IYUU失败状态码{ret.status_code},错误原因:{ret.reason}"
else:
return None, f"请求IYUU失败未获取到返回信息"
def get_torrent_url(self, sid: str) -> Tuple[Optional[str], Optional[str]]:
if not sid:
return None, None
if not self._sites:
self._sites = self.__get_sites()
if not self._sites.get(sid):
return None, None
site = self._sites.get(sid)
return site.get('base_url'), site.get('download_page')
def __get_sites(self) -> dict:
"""
返回支持辅种的全部站点
:return: 站点列表、错误信息
"""
result, msg = self.__request_iyuu(url='/reseed/sites/index')
if result:
ret_sites = {}
sites = result.get('sites')
for site in sites:
ret_sites[site.get('id')] = site
return ret_sites
else:
print(msg)
return {}
def __report_existing(self) -> Optional[str]:
"""
汇报辅种的站点
:return:
"""
if not self._sites:
self._sites = self.__get_sites()
sid_list = list(self._sites.keys())
result, msg = self.__request_iyuu(url='/reseed/sites/reportExisting',
method='post',
params={'sid_list': sid_list})
if result:
return result.get('sid_sha1')
return None
def get_seed_info(self, info_hashs: list) -> Tuple[Optional[dict], str]:
"""
返回info_hash对应的站点id、种子id
:param info_hashs:
:return:
"""
if not self._sid_sha1:
self._sid_sha1 = self.__report_existing()
info_hashs.sort()
json_data = json.dumps(info_hashs, separators=(',', ':'), ensure_ascii=False)
sha1 = self.get_sha1(json_data)
result, msg = self.__request_iyuu(url='/reseed/index/index', method='post', params={
'hash': json_data,
'sha1': sha1,
'sid_sha1': self._sid_sha1,
'timestamp': int(time.time()),
'version': self._version
})
return result, msg
@staticmethod
def get_sha1(json_str: str) -> str:
return hashlib.sha1(json_str.encode('utf-8')).hexdigest()

View File

@@ -0,0 +1,460 @@
from datetime import datetime, timedelta
from pathlib import Path
from threading import Event
from typing import List, Tuple, Dict, Any
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app import schemas
from app.chain.media import MediaChain
from app.core.config import settings
from app.core.metainfo import MetaInfoPath
from app.db.transferhistory_oper import TransferHistoryOper
from app.helper.nfo import NfoReader
from app.log import logger
from app.plugins import _PluginBase
from app.schemas import MediaType
from app.utils.system import SystemUtils
class LibraryScraper(_PluginBase):
# 插件名称
plugin_name = "媒体库刮削"
# 插件描述
plugin_desc = "定时对媒体库进行刮削,补齐缺失元数据和图片。"
# 插件图标
plugin_icon = "scraper.png"
# 插件版本
plugin_version = "2.0"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
plugin_config_prefix = "libraryscraper_"
# 加载顺序
plugin_order = 7
# 可使用的用户级别
user_level = 1
# 私有属性
transferhis = None
mediachain = None
_scheduler = None
_scraper = None
# 限速开关
_enabled = False
_onlyonce = False
_cron = None
_mode = ""
_scraper_paths = ""
_exclude_paths = ""
# 退出事件
_event = Event()
def init_plugin(self, config: dict = None):
self.mediachain = MediaChain()
# 读取配置
if config:
self._enabled = config.get("enabled")
self._onlyonce = config.get("onlyonce")
self._cron = config.get("cron")
self._mode = config.get("mode") or ""
self._scraper_paths = config.get("scraper_paths") or ""
self._exclude_paths = config.get("exclude_paths") or ""
# 停止现有任务
self.stop_service()
# 启动定时任务 & 立即运行一次
if self._enabled or self._onlyonce:
self.transferhis = TransferHistoryOper()
if self._onlyonce:
logger.info(f"媒体库刮削服务,立即运行一次")
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
self._scheduler.add_job(func=self.__libraryscraper, trigger='date',
run_date=datetime.now(tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3),
name="媒体库刮削")
# 关闭一次性开关
self._onlyonce = False
self.update_config({
"onlyonce": False,
"enabled": self._enabled,
"cron": self._cron,
"mode": self._mode,
"scraper_paths": self._scraper_paths,
"exclude_paths": self._exclude_paths
})
if self._scheduler.get_jobs():
# 启动服务
self._scheduler.print_jobs()
self._scheduler.start()
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_service(self) -> List[Dict[str, Any]]:
"""
注册插件公共服务
[{
"id": "服务ID",
"name": "服务名称",
"trigger": "触发器cron/interval/date/CronTrigger.from_crontab()",
"func": self.xxx,
"kwargs": {} # 定时器参数
}]
"""
if self._enabled and self._cron:
return [{
"id": "LibraryScraper",
"name": "媒体库刮削",
"trigger": CronTrigger.from_crontab(self._cron),
"func": self.__libraryscraper,
"kwargs": {}
}]
elif self._enabled:
return [{
"id": "LibraryScraper",
"name": "媒体库刮削",
"trigger": CronTrigger.from_crontab("0 0 */7 * *"),
"func": self.__libraryscraper,
"kwargs": {}
}]
return []
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'mode',
'label': '覆盖模式',
'items': [
{'title': '不覆盖已有元数据', 'value': ''},
{'title': '覆盖所有元数据和图片', 'value': 'force_all'},
]
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '执行周期',
'placeholder': '5位cron表达式留空自动'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VTextarea',
'props': {
'model': 'scraper_paths',
'label': '削刮路径',
'rows': 5,
'placeholder': '每一行一个目录'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VTextarea',
'props': {
'model': 'exclude_paths',
'label': '排除路径',
'rows': 2,
'placeholder': '每一行一个目录'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
'text': '刮削路径后拼接#电视剧/电影,强制指定该媒体路径媒体类型。'
'不加默认根据文件名自动识别媒体类型。'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"cron": "0 0 */7 * *",
"mode": "",
"scraper_paths": "",
"err_hosts": ""
}
def get_page(self) -> List[dict]:
pass
def __libraryscraper(self):
"""
开始刮削媒体库
"""
if not self._scraper_paths:
return
# 排除目录
exclude_paths = self._exclude_paths.split("\n")
# 已选择的目录
paths = self._scraper_paths.split("\n")
# 需要适削的媒体文件夹
scraper_paths = []
for path in paths:
if not path:
continue
# 强制指定该路径媒体类型
mtype = None
if str(path).count("#") == 1:
mtype = next(
(mediaType for mediaType in MediaType.__members__.values() if
mediaType.value == str(str(path).split("#")[1])),
None)
path = str(path).split("#")[0]
# 判断路径是否存在
scraper_path = Path(path)
if not scraper_path.exists():
logger.warning(f"媒体库刮削路径不存在:{path}")
continue
logger.info(f"开始检索目录:{path} {mtype} ...")
# 遍历所有文件
files = SystemUtils.list_files(scraper_path, settings.RMT_MEDIAEXT)
for file_path in files:
if self._event.is_set():
logger.info(f"媒体库刮削服务停止")
return
# 排除目录
exclude_flag = False
for exclude_path in exclude_paths:
try:
if file_path.is_relative_to(Path(exclude_path)):
exclude_flag = True
break
except Exception as err:
print(str(err))
if exclude_flag:
logger.debug(f"{file_path} 在排除目录中,跳过 ...")
continue
# 识别是电影还是电视剧
if not mtype:
file_meta = MetaInfoPath(file_path)
mtype = file_meta.type
if mtype == MediaType.TV:
dir_item = (file_path.parent.parent, mtype)
if dir_item not in scraper_paths:
logger.info(f"发现电视剧目录:{dir_item}")
scraper_paths.append(dir_item)
else:
dir_item = (file_path.parent, mtype)
if dir_item not in scraper_paths:
logger.info(f"发现电影目录:{dir_item}")
scraper_paths.append(dir_item)
# 开始刮削
if scraper_paths:
for item in scraper_paths:
logger.info(f"开始刮削目录:{item[0]} ...")
self.__scrape_dir(path=item[0], mtype=item[1])
else:
logger.info(f"未发现需要刮削的目录")
def __scrape_dir(self, path: Path, mtype: MediaType):
"""
削刮一个目录,该目录必须是媒体文件目录
"""
# 优先读取本地nfo文件
tmdbid = None
if mtype == MediaType.MOVIE:
# 电影
movie_nfo = path / "movie.nfo"
if movie_nfo.exists():
tmdbid = self.__get_tmdbid_from_nfo(movie_nfo)
file_nfo = path / (path.stem + ".nfo")
if not tmdbid and file_nfo.exists():
tmdbid = self.__get_tmdbid_from_nfo(file_nfo)
else:
# 电视剧
tv_nfo = path / "tvshow.nfo"
if tv_nfo.exists():
tmdbid = self.__get_tmdbid_from_nfo(tv_nfo)
if tmdbid:
# 按TMDBID识别
logger.info(f"读取到本地nfo文件的tmdbid{tmdbid}")
mediainfo = self.chain.recognize_media(tmdbid=tmdbid, mtype=mtype)
else:
# 按名称识别
meta = MetaInfoPath(path)
meta.type = mtype
mediainfo = self.chain.recognize_media(meta=meta)
if not mediainfo:
logger.warn(f"未识别到媒体信息:{path}")
return
# 如果未开启新增已入库媒体是否跟随TMDB信息变化则根据tmdbid查询之前的title
if not settings.SCRAP_FOLLOW_TMDB:
transfer_history = self.transferhis.get_by_type_tmdbid(tmdbid=mediainfo.tmdb_id,
mtype=mediainfo.type.value)
if transfer_history:
mediainfo.title = transfer_history.title
# 获取图片
self.chain.obtain_images(mediainfo)
# 刮削
self.mediachain.scrape_metadata(
fileitem=schemas.FileItem(
storage="local",
type="dir",
path=str(path).replace("\\", "/") + "/",
name=path.name,
basename=path.stem,
modify_time=path.stat().st_mtime,
),
mediainfo=mediainfo,
overwrite=True if self._mode else False
)
logger.info(f"{path} 刮削完成")
@staticmethod
def __get_tmdbid_from_nfo(file_path: Path):
"""
从nfo文件中获取信息
:param file_path:
:return: tmdbid
"""
if not file_path:
return None
xpaths = [
"uniqueid[@type='Tmdb']",
"uniqueid[@type='tmdb']",
"uniqueid[@type='TMDB']",
"tmdbid"
]
try:
reader = NfoReader(file_path)
for xpath in xpaths:
tmdbid = reader.get_element_value(xpath)
if tmdbid:
return tmdbid
except Exception as err:
logger.warn(f"从nfo文件中获取tmdbid失败{str(err)}")
return None
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e))

View File

@@ -0,0 +1,379 @@
import time
from typing import Any, List, Dict, Tuple, Optional
from app.core.event import eventmanager, Event
from app.helper.mediaserver import MediaServerHelper
from app.log import logger
from app.plugins import _PluginBase
from app.schemas import WebhookEventInfo, ServiceInfo
from app.schemas.types import EventType, MediaType, MediaImageType, NotificationType
from app.utils.web import WebUtils
class MediaServerMsg(_PluginBase):
# 插件名称
plugin_name = "媒体库服务器通知"
# 插件描述
plugin_desc = "发送Emby/Jellyfin/Plex服务器的播放、入库等通知消息。"
# 插件图标
plugin_icon = "mediaplay.png"
# 插件版本
plugin_version = "1.5"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
plugin_config_prefix = "mediaservermsg_"
# 加载顺序
plugin_order = 14
# 可使用的用户级别
auth_level = 1
# 私有属性
mediaserver_helper = None
_enabled = False
_add_play_link = False
_mediaservers = None
_types = []
_webhook_msg_keys = {}
# 拼装消息内容
_webhook_actions = {
"library.new": "新入库",
"system.webhooktest": "测试",
"playback.start": "开始播放",
"playback.stop": "停止播放",
"user.authenticated": "登录成功",
"user.authenticationfailed": "登录失败",
"media.play": "开始播放",
"media.stop": "停止播放",
"PlaybackStart": "开始播放",
"PlaybackStop": "停止播放",
"item.rate": "标记了"
}
_webhook_images = {
"emby": "https://emby.media/notificationicon.png",
"plex": "https://www.plex.tv/wp-content/uploads/2022/04/new-logo-process-lines-gray.png",
"jellyfin": "https://play-lh.googleusercontent.com/SCsUK3hCCRqkJbmLDctNYCfehLxsS4ggD1ZPHIFrrAN1Tn9yhjmGMPep2D9lMaaa9eQi"
}
def init_plugin(self, config: dict = None):
self.mediaserver_helper = MediaServerHelper()
if config:
self._enabled = config.get("enabled")
self._types = config.get("types") or []
self._mediaservers = config.get("mediaservers") or []
self._add_play_link = config.get("add_play_link", False)
def service_infos(self, type_filter: Optional[str] = None) -> Optional[Dict[str, ServiceInfo]]:
"""
服务信息
"""
if not self._mediaservers:
logger.warning("尚未配置媒体服务器,请检查配置")
return None
services = self.mediaserver_helper.get_services(type_filter=type_filter, name_filters=self._mediaservers)
if not services:
logger.warning("获取媒体服务器实例失败,请检查配置")
return None
active_services = {}
for service_name, service_info in services.items():
if service_info.instance.is_inactive():
logger.warning(f"媒体服务器 {service_name} 未连接,请检查配置")
else:
active_services[service_name] = service_info
if not active_services:
logger.warning("没有已连接的媒体服务器,请检查配置")
return None
return active_services
def service_info(self, name: str) -> Optional[ServiceInfo]:
"""
服务信息
"""
service_infos = self.service_infos() or {}
return service_infos.get(name)
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
types_options = [
{"title": "新入库", "value": "library.new"},
{"title": "开始播放", "value": "playback.start|media.play|PlaybackStart"},
{"title": "停止播放", "value": "playback.stop|media.stop|PlaybackStop"},
{"title": "用户标记", "value": "item.rate"},
{"title": "测试", "value": "system.webhooktest"},
{"title": "登录成功", "value": "user.authenticated"},
{"title": "登录失败", "value": "user.authenticationfailed"},
]
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'add_play_link',
'label': '添加播放链接',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VSelect',
'props': {
'multiple': True,
'chips': True,
'clearable': True,
'model': 'mediaservers',
'label': '媒体服务器',
'items': [{"title": config.name, "value": config.name}
for config in self.mediaserver_helper.get_configs().values()]
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VSelect',
'props': {
'chips': True,
'multiple': True,
'model': 'types',
'label': '消息类型',
'items': types_options
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
'text': '需要设置媒体服务器Webhook回调相对路径为 /api/v1/webhook?token=API_TOKEN&source=媒体服务器名3001端口其中 API_TOKEN 为设置的 API_TOKEN。'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"types": []
}
def get_page(self) -> List[dict]:
pass
@eventmanager.register(EventType.WebhookMessage)
def send(self, event: Event):
"""
发送通知消息
"""
if not self._enabled:
return
event_info: WebhookEventInfo = event.event_data
if not event_info:
return
# 不在支持范围不处理
if not self._webhook_actions.get(event_info.event):
return
# 不在选中范围不处理
msgflag = False
for _type in self._types:
if event_info.event in _type.split("|"):
msgflag = True
break
if not msgflag:
logger.info(f"未开启 {event_info.event} 类型的消息通知")
return
if not self.service_infos():
logger.info(f"未开启任一媒体服务器的消息通知")
return
if event_info.server_name and not self.service_info(name=event_info.server_name):
logger.info(f"未开启媒体服务器 {event_info.server_name} 的消息通知")
return
if event_info.channel and not self.service_infos(type_filter=event_info.channel):
logger.info(f"未开启媒体服务器类型 {event_info.channel} 的消息通知")
return
expiring_key = f"{event_info.item_id}-{event_info.client}-{event_info.user_name}"
# 过滤停止播放重复消息
if str(event_info.event) == "playback.stop" and expiring_key in self._webhook_msg_keys.keys():
# 刷新过期时间
self.__add_element(expiring_key)
return
# 消息标题
if event_info.item_type in ["TV", "SHOW"]:
message_title = f"{self._webhook_actions.get(event_info.event)}剧集 {event_info.item_name}"
elif event_info.item_type == "MOV":
message_title = f"{self._webhook_actions.get(event_info.event)}电影 {event_info.item_name}"
elif event_info.item_type == "AUD":
message_title = f"{self._webhook_actions.get(event_info.event)}有声书 {event_info.item_name}"
else:
message_title = f"{self._webhook_actions.get(event_info.event)}"
# 消息内容
message_texts = []
if event_info.user_name:
message_texts.append(f"用户:{event_info.user_name}")
if event_info.device_name:
message_texts.append(f"设备:{event_info.client} {event_info.device_name}")
if event_info.ip:
message_texts.append(f"IP地址{event_info.ip} {WebUtils.get_location(event_info.ip)}")
if event_info.percentage:
percentage = round(float(event_info.percentage), 2)
message_texts.append(f"进度:{percentage}%")
if event_info.overview:
message_texts.append(f"剧情:{event_info.overview}")
message_texts.append(f"时间:{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))}")
# 消息内容
message_content = "\n".join(message_texts)
# 消息图片
image_url = event_info.image_url
# 查询剧集图片
if (event_info.tmdb_id
and event_info.season_id
and event_info.episode_id):
specific_image = self.chain.obtain_specific_image(
mediaid=event_info.tmdb_id,
mtype=MediaType.TV,
image_type=MediaImageType.Backdrop,
season=event_info.season_id,
episode=event_info.episode_id
)
if specific_image:
image_url = specific_image
# 使用默认图片
if not image_url:
image_url = self._webhook_images.get(event_info.channel)
play_link = None
if self._add_play_link:
if event_info.server_name:
service = self.service_infos().get(event_info.server_name)
if service:
play_link = service.instance.get_play_url(event_info.item_id)
elif event_info.channel:
services = self.mediaserver_helper.get_services(type_filter=event_info.channel)
for service in services.values():
play_link = service.instance.get_play_url(event_info.item_id)
if play_link:
break
if str(event_info.event) == "playback.stop":
# 停止播放消息,添加到过期字典
self.__add_element(expiring_key)
if str(event_info.event) == "playback.start":
# 开始播放消息,删除过期字典
self.__remove_element(expiring_key)
# 发送消息
self.post_message(mtype=NotificationType.MediaServer,
title=message_title, text=message_content, image=image_url, link=play_link)
def __add_element(self, key, duration=600):
expiration_time = time.time() + duration
# 如果元素已经存在,更新其过期时间
self._webhook_msg_keys[key] = expiration_time
def __remove_element(self, key):
self._webhook_msg_keys = {k: v for k, v in self._webhook_msg_keys.items() if k != key}
def __get_elements(self):
current_time = time.time()
# 过滤掉过期的元素
self._webhook_msg_keys = {k: v for k, v in self._webhook_msg_keys.items() if v > current_time}
return list(self._webhook_msg_keys.keys())
def stop_service(self):
"""
退出插件
"""
pass

View File

@@ -0,0 +1,223 @@
import time
from pathlib import Path
from typing import Any, List, Dict, Tuple, Optional
from app.core.context import MediaInfo
from app.core.event import eventmanager, Event
from app.helper.mediaserver import MediaServerHelper
from app.log import logger
from app.plugins import _PluginBase
from app.schemas import TransferInfo, RefreshMediaItem, ServiceInfo
from app.schemas.types import EventType
class MediaServerRefresh(_PluginBase):
# 插件名称
plugin_name = "媒体库服务器刷新"
# 插件描述
plugin_desc = "入库后自动刷新Emby/Jellyfin/Plex服务器海报墙。"
# 插件图标
plugin_icon = "refresh2.png"
# 插件版本
plugin_version = "1.3.1"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
plugin_config_prefix = "mediaserverrefresh_"
# 加载顺序
plugin_order = 14
# 可使用的用户级别
auth_level = 1
# 私有属性
mediaserver_helper = None
_enabled = False
_delay = 0
_mediaservers = None
def init_plugin(self, config: dict = None):
self.mediaserver_helper = MediaServerHelper()
if config:
self._enabled = config.get("enabled")
self._delay = config.get("delay") or 0
self._mediaservers = config.get("mediaservers") or []
@property
def service_infos(self) -> Optional[Dict[str, ServiceInfo]]:
"""
服务信息
"""
if not self._mediaservers:
logger.warning("尚未配置媒体服务器,请检查配置")
return None
services = self.mediaserver_helper.get_services(name_filters=self._mediaservers)
if not services:
logger.warning("获取媒体服务器实例失败,请检查配置")
return None
active_services = {}
for service_name, service_info in services.items():
if service_info.instance.is_inactive():
logger.warning(f"媒体服务器 {service_name} 未连接,请检查配置")
else:
active_services[service_name] = service_info
if not active_services:
logger.warning("没有已连接的媒体服务器,请检查配置")
return None
return active_services
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VSelect',
'props': {
'multiple': True,
'chips': True,
'clearable': True,
'model': 'mediaservers',
'label': '媒体服务器',
'items': [{"title": config.name, "value": config.name}
for config in self.mediaserver_helper.get_configs().values()]
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'delay',
'label': '延迟时间(秒)',
'placeholder': '0'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"delay": 0
}
def get_page(self) -> List[dict]:
pass
@eventmanager.register(EventType.TransferComplete)
def refresh(self, event: Event):
"""
发送通知消息
"""
if not self._enabled:
return
event_info: dict = event.event_data
if not event_info:
return
# 刷新媒体库
if not self.service_infos:
return
if self._delay:
logger.info(f"延迟 {self._delay} 秒后刷新媒体库... ")
time.sleep(float(self._delay))
# 入库数据
transferinfo: TransferInfo = event_info.get("transferinfo")
if not transferinfo or not transferinfo.target_diritem or not transferinfo.target_diritem.path:
return
mediainfo: MediaInfo = event_info.get("mediainfo")
items = [
RefreshMediaItem(
title=mediainfo.title,
year=mediainfo.year,
type=mediainfo.type,
category=mediainfo.category,
target_path=Path(transferinfo.target_diritem.path)
)
]
for name, service in self.service_infos.items():
# Emby
if self.mediaserver_helper.is_media_server("emby", service=service):
service.instance.refresh_library_by_items(items)
# Jeyllyfin
if self.mediaserver_helper.is_media_server("jellyfin", service=service):
# FIXME Jellyfin未找到刷新单个项目的API
service.instance.refresh_root_library()
# Plex
if self.mediaserver_helper.is_media_server("plex", service=service):
service.instance.refresh_library_by_items(items)
def stop_service(self):
"""
退出插件
"""
pass

View File

@@ -0,0 +1,375 @@
import datetime
import re
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.chain.system import SystemChain
from app.core.config import settings
from app.plugins import _PluginBase
from typing import Any, List, Dict, Tuple, Optional
from app.log import logger
from app.schemas import NotificationType
from app.utils.http import RequestUtils
from app.utils.system import SystemUtils
class MoviePilotUpdateNotify(_PluginBase):
# 插件名称
plugin_name = "MoviePilot更新推送"
# 插件描述
plugin_desc = "MoviePilot推送release更新通知、自动重启。"
# 插件图标
plugin_icon = "Moviepilot_A.png"
# 插件版本
plugin_version = "2.0"
# 插件作者
plugin_author = "thsrite"
# 作者主页
author_url = "https://github.com/thsrite"
# 插件配置项ID前缀
plugin_config_prefix = "moviepilotupdatenotify_"
# 加载顺序
plugin_order = 25
# 可使用的用户级别
auth_level = 1
# 私有属性
_enabled = False
# 任务执行间隔
_cron = None
_restart = False
_notify = False
_update_types = []
# 定时器
_scheduler: Optional[BackgroundScheduler] = None
def init_plugin(self, config: dict = None):
# 停止现有任务
self.stop_service()
if config:
self._enabled = config.get("enabled")
self._cron = config.get("cron")
self._restart = config.get("restart")
self._notify = config.get("notify")
self._update_types = config.get("update_types") or []
def __check_update(self):
"""
检查MoviePilot更新
"""
# 检查后端更新
server_update = self.__check_server_update() if self._update_types and "后端" in self._update_types else False
# 检查前端更新
front_update = self.__check_front_update() if self._update_types and "前端" in self._update_types else False
# 自动重启
if (server_update or front_update) and self._restart:
logger.info("开始执行自动重启…")
SystemUtils.restart()
def __check_server_update(self):
"""
检查后端更新
"""
release_version, description, update_time = self.__get_backend_latest()
if not release_version:
logger.error("后端最新版本获取失败")
return False
# 本地版本
local_version = SystemChain().get_server_local_version()
if local_version and release_version <= local_version:
logger.info(f"当前后端版本:{local_version} 远程版本:{release_version} 停止运行")
return False
logger.info(f"发现MoviePilot后端更新{release_version} {description} {update_time}")
# 推送更新消息
self.__notify_update(update_time=update_time,
release_version=release_version,
description=description,
mtype="后端")
return True
def __check_front_update(self):
"""
检查前端更新
"""
release_version, description, update_time = self.__get_front_latest()
if not release_version:
logger.error("前端最新版本获取失败")
return False
# 本地版本
local_version = SystemChain().get_frontend_version()
if local_version and release_version <= local_version:
logger.info(f"当前前端版本:{local_version} 远程版本:{release_version} 停止运行")
return False
logger.info(f"发现MoviePilot前端更新{release_version} {description} {update_time}")
# 推送更新消息
self.__notify_update(update_time=update_time,
release_version=release_version,
description=description,
mtype="前端")
return True
def __notify_update(self, update_time, release_version, description, mtype):
"""
推送更新消息
"""
# 推送更新消息
if self._notify:
# 将时间字符串转为datetime对象
dt = datetime.datetime.strptime(update_time, "%Y-%m-%dT%H:%M:%SZ")
# 设置时区
timezone = pytz.timezone(settings.TZ)
dt = dt.replace(tzinfo=timezone)
# 将datetime对象转换为带时区的字符串
update_time = dt.strftime("%Y-%m-%d %H:%M:%S")
if not description.startswith(release_version):
description = f"{release_version}\n\n{description}"
self.post_message(
mtype=NotificationType.SiteMessage,
title=f"【MoviePilot{mtype}更新通知】",
text=f"{description}\n\n{update_time}")
@staticmethod
def __get_latest_version(repo_url: str) -> Optional[dict]:
"""
获取最新版本
"""
# 获取所有发布的版本列表
response = RequestUtils(
proxies=settings.PROXY,
headers=settings.GITHUB_HEADERS
).get_res(repo_url)
if response:
v2_releases = [r for r in response.json() if re.match(r"^v2\.", r['tag_name'])]
if not v2_releases:
logger.warn("未获取到最新版本号!")
return None
# 找到最新的v2版本
latest_v2 = sorted(v2_releases, key=lambda s: list(map(int, re.findall(r'\d+', s['tag_name']))))[-1]
logger.info(f"获取到最新版本:{latest_v2}")
return latest_v2
else:
logger.error("无法获取版本信息请检查网络连接或GitHub API请求。")
return None
def __get_backend_latest(self) -> Tuple[str, str, str]:
"""
获取最新版本
"""
result = self.__get_latest_version("https://api.github.com/repos/jxxghp/MoviePilot/releases")
if result:
return result['tag_name'], result['body'], result['published_at']
return None, None, None
def __get_front_latest(self):
"""
获取前端最新版本
"""
result = self.__get_latest_version("https://api.github.com/repos/jxxghp/MoviePilot-Frontend/releases")
if result:
return result['tag_name'], result['body'], result['published_at']
return None, None, None
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_service(self) -> List[Dict[str, Any]]:
"""
注册插件公共服务
[{
"id": "服务ID",
"name": "服务名称",
"trigger": "触发器cron/interval/date/CronTrigger.from_crontab()",
"func": self.xxx,
"kwargs": {} # 定时器参数
}]
"""
if self._enabled and self._cron:
return [
{
"id": "MoviePilotUpdateNotify",
"name": "MoviePilot更新检查服务",
"trigger": CronTrigger.from_crontab(self._cron),
"func": self.__check_update,
"kwargs": {}
}
]
return []
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'restart',
'label': '自动重启',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'notify',
'label': '发送通知',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '检查周期',
'placeholder': '5位cron表达式'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSelect',
'props': {
'multiple': True,
'chips': True,
'model': 'update_types',
'label': '更新类型',
'items': [
{
"title": "后端",
"vale": "后端"
},
{
"title": "前端",
"vale": "前端"
}
]
}
}
]
},
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
'text': '如要开启自动重启请确认MOVIEPILOT_AUTO_UPDATE设置为true重启即更新。'
}
}
]
},
]
}
]
}
], {
"enabled": False,
"restart": False,
"notify": False,
"cron": "0 9 * * *",
"update_types": ["后端", "前端"]
}
def get_page(self) -> List[dict]:
pass
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
logger.error("退出插件失败:%s" % str(e))

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,357 @@
import random
import time
import shutil
import subprocess
import threading
from pathlib import Path
from typing import Any, List, Dict, Tuple
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.event import eventmanager, Event
from app.log import logger
from app.plugins import _PluginBase
from app.schemas import TransferInfo
from app.schemas.file import FileItem
from app.schemas.types import EventType, MediaType, NotificationType
from app.utils.system import SystemUtils
lock = threading.Lock()
class PlayletCategory(_PluginBase):
# 插件名称
plugin_name = "短剧自动分类"
# 插件描述
plugin_desc = "网络短剧自动分类到独立目录。"
# 插件图标
plugin_icon = "Amule_A.png"
# 插件版本
plugin_version = "2.1"
# 插件作者
plugin_author = "jxxghp,longqiuyu"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
plugin_config_prefix = "playletcategory_"
# 加载顺序
plugin_order = 29
# 可使用的用户级别
auth_level = 1
_enabled = False
_notify = True
_delay: int = 0
_category_dir = ""
_episode_duration = 8
def init_plugin(self, config: dict = None):
if config:
self._enabled = config.get("enabled")
self._delay = config.get("delay") or 0
self._notify = config.get("notify")
self._category_dir = config.get("category_dir")
self._episode_duration = config.get("episode_duration")
def get_state(self) -> bool:
return True if self._enabled and self._category_dir and self._episode_duration else False
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'notify',
'label': '发送消息',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4,
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'category_dir',
'label': '分类目录路径',
'placeholder': '/media/短剧'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4,
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'episode_duration',
'label': '单集时长(分钟)',
'placeholder': '8'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4,
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'delay',
'label': '入库延迟时间(秒)',
'placeholder': '使用刮削尽量设置大一些'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
'text': '小于单集时长的剧集视频文件将会移动到分类目录入库延迟适用于网盘等需要延后处理的场景需要安装FFmpeg。'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"notify": True,
"delay": '',
"category_dir": '短剧',
"episode_duration": '8'
}
def get_page(self) -> List[dict]:
pass
@eventmanager.register(EventType.TransferComplete)
def category_handler(self, event: Event):
"""
根据事件实时刮削剧集组信息
"""
logger.debug(f"触发短剧分类!")
if not event:
logger.debug(f"短剧分类异常:{event}")
return
if not self.get_state():
logger.debug(f"短剧分类插件配置不完整!")
return
try:
event_data = event.event_data
media_info: MediaInfo = event_data.get("mediainfo")
transfer_info: TransferInfo = event_data.get("transferinfo")
if not media_info or not transfer_info:
return
if not transfer_info.success:
logger.debug(f"整理失败不做处理!")
return
if not transfer_info.target_diritem.path:
logger.debug(f"文件路径不存在:{transfer_info.target_diritem.path}")
return
target_path = Path(transfer_info.target_diritem.path)
if not target_path.exists():
logger.debug(f"文件路径不存在:{target_path}")
return
if media_info.type != MediaType.TV:
logger.info(f"{target_path} 不是电视剧,跳过分类处理")
return
if int(self._delay) > 0:
# 进行延迟
time.sleep(int(self._delay))
# 加锁
with lock:
file_list = transfer_info.file_list_new or []
# 过滤掉不存在的文件
file_list = [file for file in file_list if Path(file).exists()]
if not file_list:
logger.warn(f"{target_path} 无文件,跳过分类处理")
return
logger.info(f"开始处理 {target_path} 短剧分类,共有 {len(file_list)} 个文件")
# 从文件列表中随机抽取3个文件
if len(file_list) > 3:
check_files = random.choices(file_list, k=3)
else:
check_files = file_list
# 计算文件时长,有任意文件时长大于单集时长则不处理
need_category = True
for file in check_files:
duration = self.__get_duration(file)
if duration > float(self._episode_duration):
logger.info(f"{file} 时长 {duration} 分钟,大于单集时长 {self._episode_duration} 分钟,不需要分类处理")
need_category = False
break
else:
logger.info(f"{file} 时长:{duration} 分钟")
if need_category:
logger.info(f"{target_path} 需要分类处理,开始移动文件...")
result = self.__move_files(target_path=target_path)
if result:
logger.info(f"{target_path} 短剧分类处理完成")
else:
logger.info(f"{target_path} 短剧分类移动失败!")
else:
logger.info(f"{target_path} 不是短剧,无需分类处理")
except Exception as e:
logger.info(f"短剧分类异常:{str(e)}")
@staticmethod
def __get_duration(video_path: str) -> float:
"""
获取视频文件时长(分钟)
"""
# 使用FFmpeg命令行工具获取视频时长
cmd = ['ffprobe', '-v', 'error', '-show_entries', 'format=duration', '-of',
'default=noprint_wrappers=1:nokey=1', str(video_path)]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
# 如果有错误,输出错误信息
if error:
logger.error(f"FFmpeg处理出错: {error.decode('utf-8')}")
return 0
# 获取视频时长(秒),转换为分钟
return round(float(output) / 60, 1)
def __move_files(self, target_path: Path) -> bool:
"""
移动文件到分类目录
:param target_path: 电视剧时为季的目录
"""
logger.debug(f"target_path: {target_path}")
if not target_path.exists():
logger.warning(f"目标路径 {target_path} 不存在,跳过处理。")
return False
if target_path.is_file():
target_path = target_path.parent
# 剧集的根目录
tv_path = target_path
# 新的文件目录
new_path = Path(self._category_dir) / target_path.name
logger.debug(f"{new_path}")
if not new_path.exists():
# 移动目录
try:
shutil.move(target_path, new_path)
except Exception as e:
logger.error(f"移动文件失败:{e}")
return False
else:
# 遍历目录下的所有文件,并移动到目的目录
for file in target_path.iterdir():
logger.debug(f"{file}")
if file.is_file():
try:
# 相对路径
relative_path = file.relative_to(target_path)
logger.debug(f"relative_path:{to_path}")
to_path = new_path / relative_path
logger.debug(f"to_path:{to_path}")
shutil.move(file, to_path)
except Exception as e:
logger.error(f"移动文件失败:{e}")
return False
else:
# 整季移动
try:
shutil.move(file, new_path)
except Exception as e:
logger.error(f"移动文件失败:{e}")
return False
# 删除空目录
if not SystemUtils.list_files(target_path, extensions=settings.RMT_MEDIAEXT + settings.DOWNLOAD_TMPEXT):
try:
shutil.rmtree(target_path, ignore_errors=True)
except Exception as e:
logger.error(f"删除空目录失败:{e}")
# 发送消息
if self._notify:
self.post_message(
mtype=NotificationType.Organize,
title="【短剧自动分类】",
text=f"已将 {tv_path.name} 分类到 {self._category_dir} 目录",
)
return True
def stop_service(self):
"""
停止服务
"""
pass

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,775 @@
import datetime
import re
import traceback
from pathlib import Path
from threading import Lock
from typing import Optional, Any, List, Dict, Tuple
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app import schemas
from app.chain.download import DownloadChain
from app.chain.search import SearchChain
from app.chain.subscribe import SubscribeChain
from app.core.config import settings
from app.core.context import MediaInfo, TorrentInfo, Context
from app.core.metainfo import MetaInfo
from app.helper.rss import RssHelper
from app.log import logger
from app.plugins import _PluginBase
from app.schemas import ExistMediaInfo
from app.schemas.types import SystemConfigKey, MediaType
lock = Lock()
class RssSubscribe(_PluginBase):
# 插件名称
plugin_name = "自定义订阅"
# 插件描述
plugin_desc = "定时刷新RSS报文识别内容后添加订阅或直接下载。"
# 插件图标
plugin_icon = "rss.png"
# 插件版本
plugin_version = "2.0"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
plugin_config_prefix = "rsssubscribe_"
# 加载顺序
plugin_order = 19
# 可使用的用户级别
auth_level = 2
# 私有变量
_scheduler: Optional[BackgroundScheduler] = None
_cache_path: Optional[Path] = None
rsshelper = None
downloadchain = None
searchchain = None
subscribechain = None
# 配置属性
_enabled: bool = False
_cron: str = ""
_notify: bool = False
_onlyonce: bool = False
_address: str = ""
_include: str = ""
_exclude: str = ""
_proxy: bool = False
_filter: bool = False
_clear: bool = False
_clearflag: bool = False
_action: str = "subscribe"
_save_path: str = ""
_size_range: str = ""
def init_plugin(self, config: dict = None):
self.rsshelper = RssHelper()
self.downloadchain = DownloadChain()
self.searchchain = SearchChain()
self.subscribechain = SubscribeChain()
# 停止现有任务
self.stop_service()
# 配置
if config:
self.__validate_and_fix_config(config=config)
self._enabled = config.get("enabled")
self._cron = config.get("cron")
self._notify = config.get("notify")
self._onlyonce = config.get("onlyonce")
self._address = config.get("address")
self._include = config.get("include")
self._exclude = config.get("exclude")
self._proxy = config.get("proxy")
self._filter = config.get("filter")
self._clear = config.get("clear")
self._action = config.get("action")
self._save_path = config.get("save_path")
self._size_range = config.get("size_range")
if self._onlyonce:
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
logger.info(f"自定义订阅服务启动,立即运行一次")
self._scheduler.add_job(func=self.check, trigger='date',
run_date=datetime.datetime.now(
tz=pytz.timezone(settings.TZ)) + datetime.timedelta(seconds=3)
)
# 启动任务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
if self._onlyonce or self._clear:
# 关闭一次性开关
self._onlyonce = False
# 记录清理缓存设置
self._clearflag = self._clear
# 关闭清理缓存开关
self._clear = False
# 保存设置
self.__update_config()
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
"""
定义远程控制命令
:return: 命令关键字、事件、描述、附带数据
"""
pass
def get_api(self) -> List[Dict[str, Any]]:
"""
获取插件API
[{
"path": "/xx",
"endpoint": self.xxx,
"methods": ["GET", "POST"],
"summary": "API说明"
}]
"""
return [
{
"path": "/delete_history",
"endpoint": self.delete_history,
"methods": ["GET"],
"summary": "删除自定义订阅历史记录"
}
]
def get_service(self) -> List[Dict[str, Any]]:
"""
注册插件公共服务
[{
"id": "服务ID",
"name": "服务名称",
"trigger": "触发器cron/interval/date/CronTrigger.from_crontab()",
"func": self.xxx,
"kwargs": {} # 定时器参数
}]
"""
if self._enabled and self._cron:
return [{
"id": "RssSubscribe",
"name": "自定义订阅服务",
"trigger": CronTrigger.from_crontab(self._cron),
"func": self.check,
"kwargs": {}
}]
elif self._enabled:
return [{
"id": "RssSubscribe",
"name": "自定义订阅服务",
"trigger": "interval",
"func": self.check,
"kwargs": {"minutes": 30}
}]
return []
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'notify',
'label': '发送通知',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '执行周期',
'placeholder': '5位cron表达式留空自动'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'action',
'label': '动作',
'items': [
{'title': '订阅', 'value': 'subscribe'},
{'title': '下载', 'value': 'download'}
]
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VTextarea',
'props': {
'model': 'address',
'label': 'RSS地址',
'rows': 3,
'placeholder': '每行一个RSS地址'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'include',
'label': '包含',
'placeholder': '支持正则表达式'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'exclude',
'label': '排除',
'placeholder': '支持正则表达式'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'size_range',
'label': '种子大小(GB)',
'placeholder': '3 或 3-5'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'save_path',
'label': '保存目录',
'placeholder': '下载时有效,留空自动'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'proxy',
'label': '使用代理服务器',
}
}
]
}, {
'component': 'VCol',
'props': {
'cols': 12,
'md': 4,
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'filter',
'label': '使用订阅优先级规则',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'clear',
'label': '清理历史记录',
}
}
]
}
]
}
]
}
], {
"enabled": False,
"notify": True,
"onlyonce": False,
"cron": "*/30 * * * *",
"address": "",
"include": "",
"exclude": "",
"proxy": False,
"clear": False,
"filter": False,
"action": "subscribe",
"save_path": "",
"size_range": ""
}
def get_page(self) -> List[dict]:
"""
拼装插件详情页面,需要返回页面配置,同时附带数据
"""
# 查询同步详情
historys = self.get_data('history')
if not historys:
return [
{
'component': 'div',
'text': '暂无数据',
'props': {
'class': 'text-center',
}
}
]
# 数据按时间降序排序
historys = sorted(historys, key=lambda x: x.get('time'), reverse=True)
# 拼装页面
contents = []
for history in historys:
title = history.get("title")
poster = history.get("poster")
mtype = history.get("type")
time_str = history.get("time")
contents.append(
{
'component': 'VCard',
'content': [
{
"component": "VDialogCloseBtn",
"props": {
'innerClass': 'absolute top-0 right-0',
},
'events': {
'click': {
'api': 'plugin/RssSubscribe/delete_history',
'method': 'get',
'params': {
'key': title,
'apikey': settings.API_TOKEN
}
}
},
},
{
'component': 'div',
'props': {
'class': 'd-flex justify-space-start flex-nowrap flex-row',
},
'content': [
{
'component': 'div',
'content': [
{
'component': 'VImg',
'props': {
'src': poster,
'height': 120,
'width': 80,
'aspect-ratio': '2/3',
'class': 'object-cover shadow ring-gray-500',
'cover': True
}
}
]
},
{
'component': 'div',
'content': [
{
'component': 'VCardTitle',
'props': {
'class': 'pa-1 pe-5 break-words whitespace-break-spaces'
},
'text': title
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'类型:{mtype}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'时间:{time_str}'
}
]
}
]
}
]
}
)
return [
{
'component': 'div',
'props': {
'class': 'grid gap-3 grid-info-card',
},
'content': contents
}
]
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
logger.error("退出插件失败:%s" % str(e))
def delete_history(self, key: str, apikey: str):
"""
删除同步历史记录
"""
if apikey != settings.API_TOKEN:
return schemas.Response(success=False, message="API密钥错误")
# 历史记录
historys = self.get_data('history')
if not historys:
return schemas.Response(success=False, message="未找到历史记录")
# 删除指定记录
historys = [h for h in historys if h.get("title") != key]
self.save_data('history', historys)
return schemas.Response(success=True, message="删除成功")
def __update_config(self):
"""
更新设置
"""
self.update_config({
"enabled": self._enabled,
"notify": self._notify,
"onlyonce": self._onlyonce,
"cron": self._cron,
"address": self._address,
"include": self._include,
"exclude": self._exclude,
"proxy": self._proxy,
"clear": self._clear,
"filter": self._filter,
"action": self._action,
"save_path": self._save_path,
"size_range": self._size_range
})
def check(self):
"""
通过用户RSS同步豆瓣想看数据
"""
if not self._address:
return
# 读取历史记录
if self._clearflag:
history = []
else:
history: List[dict] = self.get_data('history') or []
for url in self._address.split("\n"):
# 处理每一个RSS链接
if not url:
continue
logger.info(f"开始刷新RSS{url} ...")
results = self.rsshelper.parse(url, proxy=self._proxy)
if not results:
logger.error(f"未获取到RSS数据{url}")
return
# 过滤规则
filter_groups = self.systemconfig.get(SystemConfigKey.SubscribeFilterRuleGroups)
# 解析数据
for result in results:
try:
title = result.get("title")
description = result.get("description")
enclosure = result.get("enclosure")
link = result.get("link")
size = result.get("size")
pubdate: datetime.datetime = result.get("pubdate")
# 检查是否处理过
if not title or title in [h.get("key") for h in history]:
continue
# 检查规则
if self._include and not re.search(r"%s" % self._include,
f"{title} {description}", re.IGNORECASE):
logger.info(f"{title} - {description} 不符合包含规则")
continue
if self._exclude and re.search(r"%s" % self._exclude,
f"{title} {description}", re.IGNORECASE):
logger.info(f"{title} - {description} 不符合排除规则")
continue
if self._size_range:
sizes = [float(_size) * 1024 ** 3 for _size in self._size_range.split("-")]
if len(sizes) == 1 and float(size) < sizes[0]:
logger.info(f"{title} - 种子大小不符合条件")
continue
elif len(sizes) > 1 and not sizes[0] <= float(size) <= sizes[1]:
logger.info(f"{title} - 种子大小不在指定范围")
continue
# 识别媒体信息
meta = MetaInfo(title=title, subtitle=description)
if not meta.name:
logger.warn(f"{title} 未识别到有效数据")
continue
mediainfo: MediaInfo = self.chain.recognize_media(meta=meta)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{title}')
continue
# 种子
torrentinfo = TorrentInfo(
title=title,
description=description,
enclosure=enclosure,
page_url=link,
size=size,
pubdate=pubdate.strftime("%Y-%m-%d %H:%M:%S") if pubdate else None,
site_proxy=self._proxy,
)
# 过滤种子
if self._filter:
result = self.chain.filter_torrents(
rule_groups=filter_groups,
torrent_list=[torrentinfo],
mediainfo=mediainfo
)
if not result:
logger.info(f"{title} {description} 不匹配过滤规则")
continue
# 媒体库已存在的剧集
exist_info: Optional[ExistMediaInfo] = self.chain.media_exists(mediainfo=mediainfo)
if mediainfo.type == MediaType.TV:
if exist_info:
exist_season = exist_info.seasons
if exist_season:
exist_episodes = exist_season.get(meta.begin_season)
if exist_episodes and set(meta.episode_list).issubset(set(exist_episodes)):
logger.info(f'{mediainfo.title_year} {meta.season_episode} 己存在')
continue
elif exist_info:
# 电影已存在
logger.info(f'{mediainfo.title_year} 己存在')
continue
# 下载或订阅
if self._action == "download":
# 添加下载
result = self.downloadchain.download_single(
context=Context(
meta_info=meta,
media_info=mediainfo,
torrent_info=torrentinfo,
),
save_path=self._save_path,
username="RSS订阅"
)
if not result:
logger.error(f'{title} 下载失败')
continue
else:
# 检查是否在订阅中
subflag = self.subscribechain.exists(mediainfo=mediainfo, meta=meta)
if subflag:
logger.info(f'{mediainfo.title_year} {meta.season} 正在订阅中')
continue
# 添加订阅
self.subscribechain.add(title=mediainfo.title,
year=mediainfo.year,
mtype=mediainfo.type,
tmdbid=mediainfo.tmdb_id,
season=meta.begin_season,
exist_ok=True,
username="RSS订阅")
# 存储历史记录
history.append({
"title": f"{mediainfo.title} {meta.season}",
"key": f"{title}",
"type": mediainfo.type.value,
"year": mediainfo.year,
"poster": mediainfo.get_poster_image(),
"overview": mediainfo.overview,
"tmdbid": mediainfo.tmdb_id,
"time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
})
except Exception as err:
logger.error(f'刷新RSS数据出错{str(err)} - {traceback.format_exc()}')
logger.info(f"RSS {url} 刷新完成")
# 保存历史记录
self.save_data('history', history)
# 缓存只清理一次
self._clearflag = False
def __log_and_notify_error(self, message):
"""
记录错误日志并发送系统通知
"""
logger.error(message)
self.systemmessage.put(message, title="自定义订阅")
def __validate_and_fix_config(self, config: dict = None) -> bool:
"""
检查并修正配置值
"""
size_range = config.get("size_range")
if size_range and not self.__is_number_or_range(str(size_range)):
self.__log_and_notify_error(f"自定义订阅出错,种子大小设置错误:{size_range}")
config["size_range"] = None
return False
return True
@staticmethod
def __is_number_or_range(value):
"""
检查字符串是否表示单个数字或数字范围(如'5', '5.5', '5-10''5.5-10.2'
"""
return bool(re.match(r"^\d+(\.\d+)?(-\d+(\.\d+)?)?$", value))

View File

@@ -0,0 +1,985 @@
import warnings
from datetime import datetime, timedelta
from threading import Lock
from typing import Optional, Any, List, Dict, Tuple
from app import schemas
from app.chain.site import SiteChain
from app.core.config import settings
from app.core.event import eventmanager, Event
from app.db.models.siteuserdata import SiteUserData
from app.db.site_oper import SiteOper
from app.helper.sites import SitesHelper
from app.log import logger
from app.plugins import _PluginBase
from app.schemas.types import EventType, NotificationType
from app.utils.string import StringUtils
warnings.filterwarnings("ignore", category=FutureWarning)
lock = Lock()
class SiteStatistic(_PluginBase):
# 插件名称
plugin_name = "站点数据统计"
# 插件描述
plugin_desc = "站点统计数据图表。"
# 插件图标
plugin_icon = "statistic.png"
# 插件版本
plugin_version = "1.4.1"
# 插件作者
plugin_author = "lightolly,jxxghp"
# 作者主页
author_url = "https://github.com/lightolly"
# 插件配置项ID前缀
plugin_config_prefix = "sitestatistic_"
# 加载顺序
plugin_order = 1
# 可使用的用户级别
auth_level = 2
# 配置属性
siteoper = None
siteshelper = None
sitechain = None
_enabled: bool = False
_onlyonce: bool = False
_dashboard_type: str = "today"
_notify_type = ""
def init_plugin(self, config: dict = None):
self.siteoper = SiteOper()
self.siteshelper = SitesHelper()
self.sitechain = SiteChain()
# 停止现有任务
self.stop_service()
# 配置
if config:
self._enabled = config.get("enabled")
self._onlyonce = config.get("onlyonce")
self._dashboard_type = config.get("dashboard_type") or "today"
self._notify_type = config.get("notify_type") or ""
if self._onlyonce:
config["onlyonce"] = False
self.sitechain.refresh_userdatas()
self.update_config(config=config)
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
"""
获取插件API
[{
"path": "/xx",
"endpoint": self.xxx,
"methods": ["GET", "POST"],
"summary": "API说明"
}]
"""
return [{
"path": "/refresh_by_domain",
"endpoint": self.refresh_by_domain,
"methods": ["GET"],
"summary": "刷新站点数据",
"description": "刷新对应域名的站点数据",
}]
def get_service(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'dashboard_type',
'label': '仪表板组件',
'items': [
{'title': '今日数据', 'value': 'today'},
{'title': '汇总数据', 'value': 'total'},
{'title': '所有数据', 'value': 'all'}
]
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'notify_type',
'label': '数据刷新时发送通知',
'items': [
{'title': '不发送', 'value': ''},
{'title': '今日增量数据', 'value': 'inc'},
{'title': '累计全量数据', 'value': 'all'}
]
}
}
]
}
]
}
]
}
], {
"enabled": False,
"onlyonce": False,
"dashboard_type": 'today'
}
@eventmanager.register(EventType.SiteRefreshed)
def send_msg(self, event: Event):
"""
站点数据刷新事件时发送消息
"""
if not self._notify_type:
return
if event.event_data.get('site_id') != "*":
return
# 获取站点数据
today, today_data, yesterday_data = self.__get_data()
# 转换为字典
today_data_dict = {data.name: data for data in today_data}
yesterday_data_dict = {data.name: data for data in yesterday_data}
# 消息内容
messages = {}
# 总上传
incUploads = 0
# 总下载
incDownloads = 0
# 今天的日期
today_date = datetime.now().strftime("%Y-%m-%d")
for rand, site in enumerate(today_data_dict.keys()):
upload = int(today_data_dict[site].upload or 0)
download = int(today_data_dict[site].download or 0)
updated_date = today_data_dict[site].updated_day
if self._notify_type == "inc" and yesterday_data_dict.get(site):
upload -= int(yesterday_data[site].get("upload") or 0)
download -= int(yesterday_data[site].get("download") or 0)
if updated_date and updated_date != today_date:
updated_date = f"{updated_date}"
else:
updated_date = ""
if upload > 0 or download > 0:
incUploads += upload
incDownloads += download
messages[upload + (rand / 1000)] = (
f"{site}{updated_date}\n"
+ f"上传量:{StringUtils.str_filesize(upload)}\n"
+ f"下载量:{StringUtils.str_filesize(download)}\n"
+ "————————————"
)
if incDownloads or incUploads:
sorted_messages = [messages[key] for key in sorted(messages.keys(), reverse=True)]
sorted_messages.insert(0, f"【汇总】\n"
f"总上传:{StringUtils.str_filesize(incUploads)}\n"
f"总下载:{StringUtils.str_filesize(incDownloads)}\n"
f"————————————")
self.post_message(mtype=NotificationType.SiteMessage,
title="站点数据统计", text="\n".join(sorted_messages))
def __get_data(self) -> Tuple[str, List[SiteUserData], List[SiteUserData]]:
"""
获取今天的日期、今天的站点数据、昨天的站点数据
"""
# 获取最近所有数据
data_list: List[SiteUserData] = self.siteoper.get_userdata()
if not data_list:
return "", [], []
# 每个日期、每个站点只保留最后一条数据
data_list = list({f"{data.updated_day}_{data.name}": data for data in data_list}.values())
# 按日期倒序排序
data_list.sort(key=lambda x: x.updated_day, reverse=True)
# 获取今天的日期
today = data_list[0].updated_day
# 获取昨天的日期
yestoday = (datetime.strptime(today, "%Y-%m-%d") - timedelta(days=1)).strftime("%Y-%m-%d")
# 今天的数据
stattistic_data = [data for data in data_list if data.updated_day == today]
# 今日数据按数据量降序排序
stattistic_data.sort(key=lambda x: x.upload, reverse=True)
# 昨天的数据
yesterday_sites_data = [data for data in data_list if data.updated_day == yestoday]
return today, stattistic_data, yesterday_sites_data
@staticmethod
def __get_total_elements(today: str, stattistic_data: List[SiteUserData], yesterday_sites_data: List[SiteUserData],
dashboard: str = "today") -> List[dict]:
"""
获取统计元素
"""
def __gb(value: int) -> float:
"""
转换为GB保留1位小数
"""
if not value:
return 0
return round(float(value) / 1024 / 1024 / 1024, 1)
def __is_digit(value: any) -> bool:
"""
判断是否为数字
"""
if value is None:
return False
if isinstance(value, float) or isinstance(value, int):
return True
if isinstance(value, str):
return value.isdigit()
return False
def __to_numeric(value: any) -> int:
"""
将值转换为整数
"""
if isinstance(value, str):
return int(float(value))
elif isinstance(value, float) or isinstance(value, int):
return int(value)
else:
logger.error(f'数据类型转换错误 ({value})')
return 0
def __sub_data(d1: dict, d2: dict) -> dict:
"""
计算两个字典相同Key值的差值如果值为数字返回新字典
"""
if not d1:
return {}
if not d2:
return d1
d = {k: __to_numeric(d1.get(k)) - __to_numeric(d2.get(k)) for k in d1
if k in d2 and __is_digit(d1.get(k)) and __is_digit(d2.get(k))}
# 把小于0的数据变成0
for k, v in d.items():
if str(v).isdigit() and int(v) < 0:
d[k] = 0
return d
if dashboard in ['total', 'all']:
# 总上传量
total_upload = sum([data.upload for data in stattistic_data if data.upload])
# 总下载量
total_download = sum([data.download for data in stattistic_data if data.download])
# 总做种数
total_seed = sum([data.seeding for data in stattistic_data if data.seeding])
# 总做种体积
total_seed_size = sum([data.seeding_size for data in stattistic_data if data.seeding_size])
total_elements = [
# 总上传量
{
'component': 'VCol',
'props': {
'cols': 6,
'md': 3
},
'content': [
{
'component': 'VCard',
'props': {
'variant': 'tonal',
},
'content': [
{
'component': 'VCardText',
'props': {
'class': 'd-flex align-center',
},
'content': [
{
'component': 'VAvatar',
'props': {
'rounded': True,
'variant': 'text',
'class': 'me-3'
},
'content': [
{
'component': 'VImg',
'props': {
'src': '/plugin_icon/upload.png'
}
}
]
},
{
'component': 'div',
'content': [
{
'component': 'span',
'props': {
'class': 'text-caption'
},
'text': '总上传量'
},
{
'component': 'div',
'props': {
'class': 'd-flex align-center flex-wrap'
},
'content': [
{
'component': 'span',
'props': {
'class': 'text-h6'
},
'text': StringUtils.str_filesize(total_upload)
}
]
}
]
}
]
}
]
},
]
},
# 总下载量
{
'component': 'VCol',
'props': {
'cols': 6,
'md': 3,
},
'content': [
{
'component': 'VCard',
'props': {
'variant': 'tonal',
},
'content': [
{
'component': 'VCardText',
'props': {
'class': 'd-flex align-center',
},
'content': [
{
'component': 'VAvatar',
'props': {
'rounded': True,
'variant': 'text',
'class': 'me-3'
},
'content': [
{
'component': 'VImg',
'props': {
'src': '/plugin_icon/download.png'
}
}
]
},
{
'component': 'div',
'content': [
{
'component': 'span',
'props': {
'class': 'text-caption'
},
'text': '总下载量'
},
{
'component': 'div',
'props': {
'class': 'd-flex align-center flex-wrap'
},
'content': [
{
'component': 'span',
'props': {
'class': 'text-h6'
},
'text': StringUtils.str_filesize(total_download)
}
]
}
]
}
]
}
]
},
]
},
# 总做种数
{
'component': 'VCol',
'props': {
'cols': 6,
'md': 3
},
'content': [
{
'component': 'VCard',
'props': {
'variant': 'tonal',
},
'content': [
{
'component': 'VCardText',
'props': {
'class': 'd-flex align-center',
},
'content': [
{
'component': 'VAvatar',
'props': {
'rounded': True,
'variant': 'text',
'class': 'me-3'
},
'content': [
{
'component': 'VImg',
'props': {
'src': '/plugin_icon/seed.png'
}
}
]
},
{
'component': 'div',
'content': [
{
'component': 'span',
'props': {
'class': 'text-caption'
},
'text': '总做种数'
},
{
'component': 'div',
'props': {
'class': 'd-flex align-center flex-wrap'
},
'content': [
{
'component': 'span',
'props': {
'class': 'text-h6'
},
'text': f'{"{:,}".format(total_seed)}'
}
]
}
]
}
]
}
]
},
]
},
# 总做种体积
{
'component': 'VCol',
'props': {
'cols': 6,
'md': 3
},
'content': [
{
'component': 'VCard',
'props': {
'variant': 'tonal',
},
'content': [
{
'component': 'VCardText',
'props': {
'class': 'd-flex align-center',
},
'content': [
{
'component': 'VAvatar',
'props': {
'rounded': True,
'variant': 'text',
'class': 'me-3'
},
'content': [
{
'component': 'VImg',
'props': {
'src': '/plugin_icon/database.png'
}
}
]
},
{
'component': 'div',
'content': [
{
'component': 'span',
'props': {
'class': 'text-caption'
},
'text': '总做种体积'
},
{
'component': 'div',
'props': {
'class': 'd-flex align-center flex-wrap'
},
'content': [
{
'component': 'span',
'props': {
'class': 'text-h6'
},
'text': StringUtils.str_filesize(total_seed_size)
}
]
}
]
}
]
}
]
}
]
}
]
else:
total_elements = []
if dashboard in ["today", "all"]:
# 计算增量数据集
inc_data = {}
for data in stattistic_data:
yesterday_datas = [yd for yd in yesterday_sites_data if yd.domain == data.domain]
if yesterday_datas:
yesterday_data = yesterday_datas[0]
else:
yesterday_data = None
inc = __sub_data(data.to_dict(), yesterday_data.to_dict() if yesterday_data else None)
if inc:
inc_data[data.name] = inc
# 今日上传
uploads = {k: v for k, v in inc_data.items() if v.get("upload") if v.get("upload") > 0}
# 今日上传站点
upload_sites = [site for site in uploads.keys()]
# 今日上传数据
upload_datas = [__gb(data.get("upload")) for data in uploads.values()]
# 今日上传总量
today_upload = round(sum(upload_datas), 2)
# 今日下载
downloads = {k: v for k, v in inc_data.items() if v.get("download") if v.get("download") > 0}
# 今日下载站点
download_sites = [site for site in downloads.keys()]
# 今日下载数据
download_datas = [__gb(data.get("download")) for data in downloads.values()]
# 今日下载总量
today_download = round(sum(download_datas), 2)
# 今日上传下载元素
today_elements = [
# 上传量图表
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VApexChart',
'props': {
'height': 300,
'options': {
'chart': {
'type': 'pie',
},
'labels': upload_sites,
'title': {
'text': f'今日上传({today})共 {today_upload} GB'
},
'legend': {
'show': True
},
'plotOptions': {
'pie': {
'expandOnClick': False
}
},
'noData': {
'text': '暂无数据'
}
},
'series': upload_datas
}
}
]
},
# 下载量图表
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VApexChart',
'props': {
'height': 300,
'options': {
'chart': {
'type': 'pie',
},
'labels': download_sites,
'title': {
'text': f'今日下载({today})共 {today_download} GB'
},
'legend': {
'show': True
},
'plotOptions': {
'pie': {
'expandOnClick': False
}
},
'noData': {
'text': '暂无数据'
}
},
'series': download_datas
}
}
]
}
]
else:
today_elements = []
# 合并返回
return total_elements + today_elements
def get_dashboard(self, key: str, **kwargs) -> Optional[Tuple[Dict[str, Any], Dict[str, Any], List[dict]]]:
"""
获取插件仪表盘页面需要返回1、仪表板col配置字典2、仪表板页面元素配置json含数据3、全局配置自动刷新等
1、col配置参考
{
"cols": 12, "md": 6
}
2、页面配置使用Vuetify组件拼装参考https://vuetifyjs.com/
3、全局配置参考
{
"refresh": 10 // 自动刷新时间,单位秒
}
"""
# 列配置
cols = {
"cols": 12
}
# 全局配置
attrs = {}
# 获取数据
today, stattistic_data, yesterday_sites_data = self.__get_data()
# 汇总
# 站点统计
elements = [
{
'component': 'VRow',
'content': self.__get_total_elements(
today=today,
stattistic_data=stattistic_data,
yesterday_sites_data=yesterday_sites_data,
dashboard=self._dashboard_type
)
}
]
return cols, attrs, elements
def get_page(self) -> List[dict]:
"""
拼装插件详情页面,需要返回页面配置,同时附带数据
"""
def format_bonus(bonus):
try:
return f'{float(bonus):,.1f}'
except ValueError:
return '0.0'
# 获取数据
today, stattistic_data, yesterday_sites_data = self.__get_data()
if not stattistic_data:
return [
{
'component': 'div',
'text': '暂无数据',
'props': {
'class': 'text-center',
}
}
]
# 站点统计
site_totals = self.__get_total_elements(
today=today,
stattistic_data=stattistic_data,
yesterday_sites_data=yesterday_sites_data,
dashboard='all'
)
# 站点数据明细
site_trs = [
{
'component': 'tr',
'props': {
'class': 'text-sm'
},
'content': [
{
'component': 'td',
'props': {
'class': 'whitespace-nowrap break-keep text-high-emphasis'
},
'text': data.name
},
{
'component': 'td',
'text': data.username
},
{
'component': 'td',
'text': data.user_level
},
{
'component': 'td',
'props': {
'class': 'text-success'
},
'text': StringUtils.str_filesize(data.upload)
},
{
'component': 'td',
'props': {
'class': 'text-error'
},
'text': StringUtils.str_filesize(data.download)
},
{
'component': 'td',
'text': data.ratio
},
{
'component': 'td',
'text': format_bonus(data.bonus or 0)
},
{
'component': 'td',
'text': data.seeding
},
{
'component': 'td',
'text': StringUtils.str_filesize(data.seeding_size)
}
]
} for data in stattistic_data
]
# 拼装页面
return [
{
'component': 'VRow',
'content': site_totals + [
# 各站点数据明细
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VTable',
'props': {
'hover': True
},
'content': [
{
'component': 'thead',
'content': [
{
'component': 'th',
'props': {
'class': 'text-start ps-4'
},
'text': '站点'
},
{
'component': 'th',
'props': {
'class': 'text-start ps-4'
},
'text': '用户名'
},
{
'component': 'th',
'props': {
'class': 'text-start ps-4'
},
'text': '用户等级'
},
{
'component': 'th',
'props': {
'class': 'text-start ps-4'
},
'text': '上传量'
},
{
'component': 'th',
'props': {
'class': 'text-start ps-4'
},
'text': '下载量'
},
{
'component': 'th',
'props': {
'class': 'text-start ps-4'
},
'text': '分享率'
},
{
'component': 'th',
'props': {
'class': 'text-start ps-4'
},
'text': '魔力值'
},
{
'component': 'th',
'props': {
'class': 'text-start ps-4'
},
'text': '做种数'
},
{
'component': 'th',
'props': {
'class': 'text-start ps-4'
},
'text': '做种体积'
}
]
},
{
'component': 'tbody',
'content': site_trs
}
]
}
]
}
]
}
]
def stop_service(self):
pass
def refresh_by_domain(self, domain: str, apikey: str) -> schemas.Response:
"""
刷新一个站点数据可由API调用
"""
if apikey != settings.API_TOKEN:
return schemas.Response(success=False, message="API密钥错误")
site_info = self.siteshelper.get_indexer(domain)
if site_info:
site_data = SiteChain().refresh_userdata(site=site_info)
if site_data:
return schemas.Response(
success=True,
message=f"站点 {domain} 刷新成功",
data=site_data.dict()
)
return schemas.Response(
success=False,
message=f"站点 {domain} 刷新数据失败,未获取到数据"
)
return schemas.Response(
success=False,
message=f"站点 {domain} 不存在"
)

View File

@@ -0,0 +1,680 @@
import ipaddress
from typing import List, Tuple, Dict, Any, Optional
from app.core.event import eventmanager, Event
from app.helper.downloader import DownloaderHelper
from app.helper.mediaserver import MediaServerHelper
from app.log import logger
from app.plugins import _PluginBase
from app.schemas import NotificationType, WebhookEventInfo, ServiceInfo
from app.schemas.types import EventType
from app.utils.ip import IpUtils
class SpeedLimiter(_PluginBase):
# 插件名称
plugin_name = "播放限速"
# 插件描述
plugin_desc = "外网播放媒体库视频时,自动对下载器进行限速。"
# 插件图标
plugin_icon = "Librespeed_A.png"
# 插件版本
plugin_version = "2.1"
# 插件作者
plugin_author = "Shurelol"
# 作者主页
author_url = "https://github.com/Shurelol"
# 插件配置项ID前缀
plugin_config_prefix = "speedlimit_"
# 加载顺序
plugin_order = 11
# 可使用的用户级别
auth_level = 1
# 私有属性
downloader_helper = None
mediaserver_helper = None
_scheduler = None
_enabled: bool = False
_notify: bool = False
_interval: int = 60
_downloader: list = []
_play_up_speed: float = 0
_play_down_speed: float = 0
_noplay_up_speed: float = 0
_noplay_down_speed: float = 0
_bandwidth: float = 0
_allocation_ratio: str = ""
_auto_limit: bool = False
_limit_enabled: bool = False
# 不限速地址
_unlimited_ips = {}
# 当前限速状态
_current_state = ""
_exclude_path = ""
def init_plugin(self, config: dict = None):
self.downloader_helper = DownloaderHelper()
self.mediaserver_helper = MediaServerHelper()
# 读取配置
if config:
self._enabled = config.get("enabled")
self._notify = config.get("notify")
self._play_up_speed = float(config.get("play_up_speed")) if config.get("play_up_speed") else 0
self._play_down_speed = float(config.get("play_down_speed")) if config.get("play_down_speed") else 0
self._noplay_up_speed = float(config.get("noplay_up_speed")) if config.get("noplay_up_speed") else 0
self._noplay_down_speed = float(config.get("noplay_down_speed")) if config.get("noplay_down_speed") else 0
self._current_state = f"U:{self._noplay_up_speed},D:{self._noplay_down_speed}"
self._exclude_path = config.get("exclude_path")
try:
# 总带宽
self._bandwidth = int(float(config.get("bandwidth") or 0)) * 1000000
# 自动限速开关
if self._bandwidth > 0:
self._auto_limit = True
else:
self._auto_limit = False
except Exception as e:
logger.error(f"智能限速上行带宽设置错误:{str(e)}")
self._bandwidth = 0
# 限速服务开关
self._limit_enabled = True if (self._play_up_speed
or self._play_down_speed
or self._auto_limit) else False
self._allocation_ratio = config.get("allocation_ratio") or ""
# 不限速地址
self._unlimited_ips["ipv4"] = config.get("ipv4") or ""
self._unlimited_ips["ipv6"] = config.get("ipv6") or ""
self._downloader = config.get("downloader") or []
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_service(self) -> List[Dict[str, Any]]:
"""
注册插件公共服务
[{
"id": "服务ID",
"name": "服务名称",
"trigger": "触发器cron/interval/date/CronTrigger.from_crontab()",
"func": self.xxx,
"kwargs": {} # 定时器参数
}]
"""
if self._enabled and self._limit_enabled and self._interval:
return [
{
"id": "SpeedLimiter",
"name": "播放限速检查服务",
"trigger": "interval",
"func": self.check_playing_sessions,
"kwargs": {"seconds": self._interval}
}
]
return []
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'notify',
'label': '发送通知',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VSelect',
'props': {
'multiple': True,
'chips': True,
'clearable': True,
'model': 'downloader',
'label': '下载器',
'items': [{"title": config.name, "value": config.name}
for config in self.downloader_helper.get_configs().values()]
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'play_up_speed',
'label': '播放限速(上传)',
'placeholder': 'KB/s'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'play_down_speed',
'label': '播放限速(下载)',
'placeholder': 'KB/s'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'noplay_up_speed',
'label': '未播放限速(上传)',
'placeholder': 'KB/s'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'noplay_down_speed',
'label': '未播放限速(下载)',
'placeholder': 'KB/s'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'bandwidth',
'label': '智能限速上行带宽',
'placeholder': 'Mbps'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'allocation_ratio',
'label': '智能限速分配比例',
'items': [
{'title': '平均', 'value': ''},
{'title': '19', 'value': '1:9'},
{'title': '28', 'value': '2:8'},
{'title': '37', 'value': '3:7'},
{'title': '46', 'value': '4:6'},
{'title': '64', 'value': '6:4'},
{'title': '73', 'value': '7:3'},
{'title': '82', 'value': '8:2'},
{'title': '91', 'value': '9:1'},
]
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'ipv4',
'label': '不限速地址范围ipv4',
'placeholder': '留空默认不限速内网ipv4'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'ipv6',
'label': '不限速地址范围ipv6',
'placeholder': '留空默认不限速内网ipv6'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'exclude_path',
'label': '不限速路径',
'placeholder': '包含该路径的媒体不限速,多个请换行'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"notify": True,
"downloader": [],
"play_up_speed": None,
"play_down_speed": None,
"noplay_up_speed": None,
"noplay_down_speed": None,
"bandwidth": None,
"allocation_ratio": "",
"ipv4": "",
"ipv6": "",
"exclude_path": ""
}
def get_page(self) -> List[dict]:
pass
@property
def service_infos(self) -> Optional[Dict[str, ServiceInfo]]:
"""
服务信息
"""
if not self._downloader:
logger.warning("尚未配置下载器,请检查配置")
return None
services = self.downloader_helper.get_services(name_filters=self._downloader)
if not services:
logger.warning("获取下载器实例失败,请检查配置")
return None
active_services = {}
for service_name, service_info in services.items():
if service_info.instance.is_inactive():
logger.warning(f"下载器 {service_name} 未连接,请检查配置")
else:
active_services[service_name] = service_info
if not active_services:
logger.warning("没有已连接的下载器,请检查配置")
return None
return active_services
@eventmanager.register(EventType.WebhookMessage)
def check_playing_sessions(self, event: Event = None):
"""
检查播放会话
"""
if not self.service_infos:
return
if not self._enabled:
return
if event:
event_data: WebhookEventInfo = event.event_data
if event_data.event not in [
"playback.start",
"PlaybackStart",
"media.play",
"media.stop",
"PlaybackStop",
"playback.stop"
]:
return
# 当前播放的总比特率
total_bit_rate = 0
media_servers = self.mediaserver_helper.get_services()
if not media_servers:
return
# 查询所有媒体服务器状态
for server, service in media_servers.items():
# 查询播放中会话
playing_sessions = []
if service.type == "emby":
req_url = "[HOST]emby/Sessions?api_key=[APIKEY]"
try:
res = service.instance.get_data(req_url)
if res and res.status_code == 200:
sessions = res.json()
for session in sessions:
if session.get("NowPlayingItem") and not session.get("PlayState", {}).get("IsPaused"):
if not self.__path_execluded(session.get("NowPlayingItem").get("Path")):
playing_sessions.append(session)
except Exception as e:
logger.error(f"获取Emby播放会话失败{str(e)}")
continue
# 计算有效比特率
for session in playing_sessions:
# 设置了不限速范围则判断session ip是否在不限速范围内
if self._unlimited_ips["ipv4"] or self._unlimited_ips["ipv6"]:
if not self.__allow_access(self._unlimited_ips, session.get("RemoteEndPoint")) \
and session.get("NowPlayingItem", {}).get("MediaType") == "Video":
total_bit_rate += int(session.get("NowPlayingItem", {}).get("Bitrate") or 0)
# 未设置不限速范围则默认不限速内网ip
elif not IpUtils.is_private_ip(session.get("RemoteEndPoint")) \
and session.get("NowPlayingItem", {}).get("MediaType") == "Video":
total_bit_rate += int(session.get("NowPlayingItem", {}).get("Bitrate") or 0)
elif service.type == "jellyfin":
req_url = "[HOST]Sessions?api_key=[APIKEY]"
try:
res = service.instance.get_data(req_url)
if res and res.status_code == 200:
sessions = res.json()
for session in sessions:
if session.get("NowPlayingItem") and not session.get("PlayState", {}).get("IsPaused"):
if not self.__path_execluded(session.get("NowPlayingItem").get("Path")):
playing_sessions.append(session)
except Exception as e:
logger.error(f"获取Jellyfin播放会话失败{str(e)}")
continue
# 计算有效比特率
for session in playing_sessions:
# 设置了不限速范围则判断session ip是否在不限速范围内
if self._unlimited_ips["ipv4"] or self._unlimited_ips["ipv6"]:
if not self.__allow_access(self._unlimited_ips, session.get("RemoteEndPoint")) \
and session.get("NowPlayingItem", {}).get("MediaType") == "Video":
media_streams = session.get("NowPlayingItem", {}).get("MediaStreams") or []
for media_stream in media_streams:
total_bit_rate += int(media_stream.get("BitRate") or 0)
# 未设置不限速范围则默认不限速内网ip
elif not IpUtils.is_private_ip(session.get("RemoteEndPoint")) \
and session.get("NowPlayingItem", {}).get("MediaType") == "Video":
media_streams = session.get("NowPlayingItem", {}).get("MediaStreams") or []
for media_stream in media_streams:
total_bit_rate += int(media_stream.get("BitRate") or 0)
elif service.type == "plex":
_plex = service.instance.get_plex()
if _plex:
sessions = _plex.sessions()
for session in sessions:
bitrate = sum([m.bitrate or 0 for m in session.media])
playing_sessions.append({
"type": session.TAG,
"bitrate": bitrate,
"address": session.player.address
})
# 计算有效比特率
for session in playing_sessions:
# 设置了不限速范围则判断session ip是否在不限速范围内
if self._unlimited_ips["ipv4"] or self._unlimited_ips["ipv6"]:
if not self.__allow_access(self._unlimited_ips, session.get("address")) \
and session.get("type") == "Video":
total_bit_rate += int(session.get("bitrate") or 0)
# 未设置不限速范围则默认不限速内网ip
elif not IpUtils.is_private_ip(session.get("address")) \
and session.get("type") == "Video":
total_bit_rate += int(session.get("bitrate") or 0)
if total_bit_rate:
# 开启智能限速计算上传限速
if self._auto_limit:
play_up_speed = self.__calc_limit(total_bit_rate)
else:
play_up_speed = self._play_up_speed
# 当前正在播放,开始限速
self.__set_limiter(limit_type="播放", upload_limit=play_up_speed,
download_limit=self._play_down_speed)
else:
# 当前没有播放,取消限速
self.__set_limiter(limit_type="未播放", upload_limit=self._noplay_up_speed,
download_limit=self._noplay_down_speed)
def __path_execluded(self, path: str) -> bool:
"""
判断是否在不限速路径内
"""
if self._exclude_path:
exclude_paths = self._exclude_path.split("\n")
for exclude_path in exclude_paths:
if exclude_path in path:
logger.info(f"{path} 在不限速路径:{exclude_path} 内,跳过限速")
return True
return False
def __calc_limit(self, total_bit_rate: float) -> float:
"""
计算智能上传限速
"""
if not self._bandwidth:
return 10
return round((self._bandwidth - total_bit_rate) / 8 / 1024, 2)
def __set_limiter(self, limit_type: str, upload_limit: float, download_limit: float):
"""
设置限速
"""
if not self.service_infos:
return
state = f"U:{upload_limit},D:{download_limit}"
if self._current_state == state:
# 限速状态没有改变
return
else:
self._current_state = state
try:
cnt = 0
for download in self._downloader:
service = self.service_infos.get(download)
if self._auto_limit and limit_type == "播放":
# 开启了播放智能限速
if len(self._downloader) == 1:
# 只有一个下载器
upload_limit = int(upload_limit)
else:
# 多个下载器
if not self._allocation_ratio:
# 平均
upload_limit = int(upload_limit / len(self._downloader))
else:
# 按比例
allocation_count = sum([int(i) for i in self._allocation_ratio.split(":")])
upload_limit = int(upload_limit * int(self._allocation_ratio.split(":")[cnt]) / allocation_count)
cnt += 1
if upload_limit:
text = f"上传:{upload_limit} KB/s"
else:
text = f"上传:未限速"
if download_limit:
text = f"{text}\n下载:{download_limit} KB/s"
else:
text = f"{text}\n下载:未限速"
if service.type == 'qbittorrent':
service.instance.set_speed_limit(download_limit=download_limit, upload_limit=upload_limit)
# 发送通知
if self._notify:
title = "【播放限速】"
if upload_limit or download_limit:
subtitle = f"Qbittorrent 开始{limit_type}限速"
self.post_message(
mtype=NotificationType.MediaServer,
title=title,
text=f"{subtitle}\n{text}"
)
else:
self.post_message(
mtype=NotificationType.MediaServer,
title=title,
text=f"Qbittorrent 已取消限速"
)
else:
service.instance.set_speed_limit(download_limit=download_limit, upload_limit=upload_limit)
# 发送通知
if self._notify:
title = "【播放限速】"
if upload_limit or download_limit:
subtitle = f"Transmission 开始{limit_type}限速"
self.post_message(
mtype=NotificationType.MediaServer,
title=title,
text=f"{subtitle}\n{text}"
)
else:
self.post_message(
mtype=NotificationType.MediaServer,
title=title,
text=f"Transmission 已取消限速"
)
except Exception as e:
logger.error(f"设置限速失败:{str(e)}")
@staticmethod
def __allow_access(allow_ips: dict, ip: str) -> bool:
"""
判断IP是否合法
:param allow_ips: 充许的IP范围 {"ipv4":, "ipv6":}
:param ip: 需要检查的ip
"""
if not allow_ips:
return True
try:
ipaddr = ipaddress.ip_address(ip)
if ipaddr.version == 4:
if not allow_ips.get('ipv4'):
return True
allow_ipv4s = allow_ips.get('ipv4').split(",")
for allow_ipv4 in allow_ipv4s:
if ipaddr in ipaddress.ip_network(allow_ipv4, strict=False):
return True
elif ipaddr.ipv4_mapped:
if not allow_ips.get('ipv4'):
return True
allow_ipv4s = allow_ips.get('ipv4').split(",")
for allow_ipv4 in allow_ipv4s:
if ipaddr.ipv4_mapped in ipaddress.ip_network(allow_ipv4, strict=False):
return True
else:
if not allow_ips.get('ipv6'):
return True
allow_ipv6s = allow_ips.get('ipv6').split(",")
for allow_ipv6 in allow_ipv6s:
if ipaddr in ipaddress.ip_network(allow_ipv6, strict=False):
return True
except Exception as err:
print(str(err))
return False
return False
def stop_service(self):
pass

View File

@@ -0,0 +1,300 @@
import json
from datetime import datetime, timedelta
from hashlib import md5
from urllib.parse import urlparse
import pytz
from app.core.config import settings
from app.db.site_oper import SiteOper
from app.plugins import _PluginBase
from typing import Any, List, Dict, Tuple, Optional
from app.log import logger
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.utils.crypto import CryptoJsUtils
class SyncCookieCloud(_PluginBase):
# 插件名称
plugin_name = "同步CookieCloud"
# 插件描述
plugin_desc = "同步MoviePilot站点Cookie到本地CookieCloud。"
# 插件图标
plugin_icon = "Cookiecloud_A.png"
# 插件版本
plugin_version = "2.1"
# 插件作者
plugin_author = "thsrite"
# 作者主页
author_url = "https://github.com/thsrite"
# 插件配置项ID前缀
plugin_config_prefix = "synccookiecloud_"
# 加载顺序
plugin_order = 28
# 可使用的用户级别
auth_level = 1
# 私有属性
_enabled: bool = False
_onlyonce: bool = False
_cron: str = ""
siteoper = None
_scheduler: Optional[BackgroundScheduler] = None
def init_plugin(self, config: dict = None):
self.siteoper = SiteOper()
# 停止现有任务
self.stop_service()
if config:
self._enabled = config.get("enabled")
self._onlyonce = config.get("onlyonce")
self._cron = config.get("cron")
if self._enabled or self._onlyonce:
# 定时服务
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
# 立即运行一次
if self._onlyonce:
logger.info(f"同步CookieCloud服务启动立即运行一次")
self._scheduler.add_job(self.__sync_to_cookiecloud, 'date',
run_date=datetime.now(
tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3),
name="同步CookieCloud")
# 关闭一次性开关
self._onlyonce = False
# 保存配置
self.__update_config()
# 周期运行
if self._cron:
try:
self._scheduler.add_job(func=self.__sync_to_cookiecloud,
trigger=CronTrigger.from_crontab(self._cron),
name="同步CookieCloud")
except Exception as err:
logger.error(f"定时任务配置错误:{err}")
# 推送实时消息
self.systemmessage.put(f"执行周期配置错误:{err}")
# 启动任务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
def __sync_to_cookiecloud(self):
"""
同步站点cookie到cookiecloud
"""
# 获取所有站点
sites = self.siteoper.list_order_by_pri()
if not sites:
return
if not settings.COOKIECLOUD_ENABLE_LOCAL:
logger.error('本地CookieCloud服务器未启用')
return
cookies = {}
for site in sites:
domain = urlparse(site.url).netloc
cookie = site.cookie
if not cookie:
logger.error(f"站点 {domain} 无cookie跳过处理...")
continue
# 解析cookie
site_cookies = []
for ck in cookie.split(";"):
kv = ck.split("=")
if len(kv) < 2:
continue
site_cookies.append({
"domain": domain,
"name": ck.split("=")[0],
"value": ck.split("=")[1]
})
# 存储cookies
cookies[domain] = site_cookies
if cookies:
crypt_key = self._get_crypt_key()
try:
cookies = {'cookie_data': cookies}
encrypted_data = CryptoJsUtils.encrypt(json.dumps(cookies).encode('utf-8'), crypt_key).decode('utf-8')
except Exception as e:
logger.error(f"CookieCloud加密失败{e}")
return
ck = {'encrypted': encrypted_data}
cookie_path = settings.COOKIE_PATH / f"{settings.COOKIECLOUD_KEY}.json"
cookie_path.write_bytes(json.dumps(ck).encode('utf-8'))
logger.info(f"同步站点cookie到本地CookieCloud成功")
else:
logger.error(f"同步站点cookie到本地CookieCloud失败未获取到站点cookie")
def __decrypted(self, encrypt_data: dict):
"""
获取并解密本地CookieCloud数据
"""
encrypted = encrypt_data.get("encrypted")
if not encrypted:
return {}, "未获取到cookie密文"
else:
crypt_key = self._get_crypt_key()
try:
decrypted_data = CryptoJsUtils.decrypt(encrypted, crypt_key).decode('utf-8')
result = json.loads(decrypted_data)
except Exception as e:
return {}, "cookie解密失败" + str(e)
if not result:
return {}, "cookie解密为空"
if result.get("cookie_data"):
contents = result.get("cookie_data")
else:
contents = result
return contents
@staticmethod
def _get_crypt_key() -> bytes:
"""
使用UUID和密码生成CookieCloud的加解密密钥
"""
md5_generator = md5()
md5_generator.update(
(str(settings.COOKIECLOUD_KEY).strip() + '-' + str(settings.COOKIECLOUD_PASSWORD).strip()).encode('utf-8'))
return (md5_generator.hexdigest()[:16]).encode('utf-8')
def __update_config(self):
self.update_config({
"enabled": self._enabled,
"onlyonce": self._onlyonce,
"cron": self._cron
})
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '执行周期',
'placeholder': '5位cron表达式留空自动'
}
}
]
},
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
'text': '需要MoviePilot设定-站点启用本地CookieCloud服务器。'
}
}
]
}
]
},
]
}
], {
"enabled": False,
"onlyonce": False,
"cron": "5 1 * * *",
}
def get_page(self) -> List[dict]:
pass
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
logger.error("退出插件失败:%s" % str(e))

View File

@@ -0,0 +1,841 @@
import re
import threading
import time
from datetime import datetime, timedelta
from typing import List, Tuple, Dict, Any, Optional
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.core.config import settings
from app.helper.downloader import DownloaderHelper
from app.log import logger
from app.plugins import _PluginBase
from app.schemas import NotificationType, ServiceInfo
from app.utils.string import StringUtils
lock = threading.Lock()
class TorrentRemover(_PluginBase):
# 插件名称
plugin_name = "自动删种"
# 插件描述
plugin_desc = "自动删除下载器中的下载任务。"
# 插件图标
plugin_icon = "delete.jpg"
# 插件版本
plugin_version = "2.1.1"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
plugin_config_prefix = "torrentremover_"
# 加载顺序
plugin_order = 8
# 可使用的用户级别
auth_level = 2
# 私有属性
downloader_helper = None
_event = threading.Event()
_scheduler = None
_enabled = False
_onlyonce = False
_notify = False
# pause/delete
_downloaders = []
_action = "pause"
_cron = None
_samedata = False
_mponly = False
_size = None
_ratio = None
_time = None
_upspeed = None
_labels = None
_pathkeywords = None
_trackerkeywords = None
_errorkeywords = None
_torrentstates = None
_torrentcategorys = None
def init_plugin(self, config: dict = None):
self.downloader_helper = DownloaderHelper()
if config:
self._enabled = config.get("enabled")
self._onlyonce = config.get("onlyonce")
self._notify = config.get("notify")
self._downloaders = config.get("downloaders") or []
self._action = config.get("action")
self._cron = config.get("cron")
self._samedata = config.get("samedata")
self._mponly = config.get("mponly")
self._size = config.get("size") or ""
self._ratio = config.get("ratio")
self._time = config.get("time")
self._upspeed = config.get("upspeed")
self._labels = config.get("labels") or ""
self._pathkeywords = config.get("pathkeywords") or ""
self._trackerkeywords = config.get("trackerkeywords") or ""
self._errorkeywords = config.get("errorkeywords") or ""
self._torrentstates = config.get("torrentstates") or ""
self._torrentcategorys = config.get("torrentcategorys") or ""
self.stop_service()
if self.get_state() or self._onlyonce:
if self._onlyonce:
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
logger.info(f"自动删种服务启动,立即运行一次")
self._scheduler.add_job(func=self.delete_torrents, trigger='date',
run_date=datetime.now(
tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3)
)
# 关闭一次性开关
self._onlyonce = False
# 保存设置
self.update_config({
"enabled": self._enabled,
"notify": self._notify,
"onlyonce": self._onlyonce,
"action": self._action,
"cron": self._cron,
"downloaders": self._downloaders,
"samedata": self._samedata,
"mponly": self._mponly,
"size": self._size,
"ratio": self._ratio,
"time": self._time,
"upspeed": self._upspeed,
"labels": self._labels,
"pathkeywords": self._pathkeywords,
"trackerkeywords": self._trackerkeywords,
"errorkeywords": self._errorkeywords,
"torrentstates": self._torrentstates,
"torrentcategorys": self._torrentcategorys
})
if self._scheduler.get_jobs():
# 启动服务
self._scheduler.print_jobs()
self._scheduler.start()
def get_state(self) -> bool:
return True if self._enabled and self._cron and self._downloaders else False
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_service(self) -> List[Dict[str, Any]]:
"""
注册插件公共服务
[{
"id": "服务ID",
"name": "服务名称",
"trigger": "触发器cron/interval/date/CronTrigger.from_crontab()",
"func": self.xxx,
"kwargs": {} # 定时器参数
}]
"""
if self.get_state():
return [{
"id": "TorrentRemover",
"name": "自动删种服务",
"trigger": CronTrigger.from_crontab(self._cron),
"func": self.delete_torrents,
"kwargs": {}
}]
return []
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'notify',
'label': '发送通知',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '执行周期',
'placeholder': '0 */12 * * *'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'action',
'label': '动作',
'items': [
{'title': '暂停', 'value': 'pause'},
{'title': '删除种子', 'value': 'delete'},
{'title': '删除种子和文件', 'value': 'deletefile'}
]
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VSelect',
'props': {
'multiple': True,
'chips': True,
'clearable': True,
'model': 'downloaders',
'label': '下载器',
'items': [{"title": config.name, "value": config.name}
for config in self.downloader_helper.get_configs().values()]
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'size',
'label': '种子大小GB',
'placeholder': '例如1-10'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'ratio',
'label': '分享率',
'placeholder': ''
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'time',
'label': '做种时间(小时)',
'placeholder': ''
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'upspeed',
'label': '平均上传速度',
'placeholder': ''
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'labels',
'label': '标签',
'placeholder': '用,分隔多个标签'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'pathkeywords',
'label': '保存路径关键词',
'placeholder': '支持正式表达式'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'trackerkeywords',
'label': 'Tracker关键词',
'placeholder': '支持正式表达式'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'errorkeywords',
'label': '错误信息关键词TR',
'placeholder': '支持正式表达式仅适用于TR'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'torrentstates',
'label': '任务状态QB',
'placeholder': '用,分隔多个状态仅适用于QB'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'torrentcategorys',
'label': '任务分类',
'placeholder': '用,分隔多个分类'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'samedata',
'label': '处理辅种',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'mponly',
'label': '仅MoviePilot任务',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
'text': '自动删种存在风险,如设置不当可能导致数据丢失!建议动作先选择暂停,确定条件正确后再改成删除。'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
'text': '任务状态QB字典'
'downloading正在下载-传输数据,'
'stalledDL正在下载_未建立连接'
'uploading正在上传-传输数据,'
'stalledUP正在上传-未建立连接,'
'error暂停-发生错误,'
'pausedDL暂停-下载未完成,'
'pausedUP暂停-下载完成,'
'missingFiles暂停-文件丢失,'
'checkingDL检查中-下载未完成,'
'checkingUP检查中-下载完成,'
'checkingResumeData检查中-启动时恢复数据,'
'forcedDL强制下载-忽略队列,'
'queuedDL等待下载-排队,'
'forcedUP强制上传-忽略队列,'
'queuedUP等待上传-排队,'
'allocating分配磁盘空间'
'metaDL获取元数据'
'moving移动文件'
'unknown未知状态'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"notify": False,
"onlyonce": False,
"action": 'pause',
'downloaders': [],
"cron": '0 */12 * * *',
"samedata": False,
"mponly": False,
"size": "",
"ratio": "",
"time": "",
"upspeed": "",
"labels": "",
"pathkeywords": "",
"trackerkeywords": "",
"errorkeywords": "",
"torrentstates": "",
"torrentcategorys": ""
}
def get_page(self) -> List[dict]:
pass
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e))
@property
def service_infos(self) -> Optional[Dict[str, ServiceInfo]]:
"""
服务信息
"""
if not self._downloaders:
logger.warning("尚未配置下载器,请检查配置")
return None
services = self.downloader_helper.get_services(name_filters=self._downloaders)
if not services:
logger.warning("获取下载器实例失败,请检查配置")
return None
active_services = {}
for service_name, service_info in services.items():
if service_info.instance.is_inactive():
logger.warning(f"下载器 {service_name} 未连接,请检查配置")
else:
active_services[service_name] = service_info
if not active_services:
logger.warning("没有已连接的下载器,请检查配置")
return None
return active_services
def __get_downloader(self, name: str):
"""
根据类型返回下载器实例
"""
return self.service_infos.get(name).instance
def __get_downloader_config(self, name: str):
"""
根据类型返回下载器实例配置
"""
return self.service_infos.get(name).config
def delete_torrents(self):
"""
定时删除下载器中的下载任务
"""
for downloader in self._downloaders:
try:
with lock:
# 获取需删除种子列表
torrents = self.get_remove_torrents(downloader)
logger.info(f"自动删种任务 获取符合处理条件种子数 {len(torrents)}")
# 下载器
downlader_obj = self.__get_downloader(downloader)
if self._action == "pause":
message_text = f"{downloader.title()} 共暂停{len(torrents)}个种子"
for torrent in torrents:
if self._event.is_set():
logger.info(f"自动删种服务停止")
return
text_item = f"{torrent.get('name')} " \
f"来自站点:{torrent.get('site')} " \
f"大小:{StringUtils.str_filesize(torrent.get('size'))}"
# 暂停种子
downlader_obj.stop_torrents(ids=[torrent.get("id")])
logger.info(f"自动删种任务 暂停种子:{text_item}")
message_text = f"{message_text}\n{text_item}"
elif self._action == "delete":
message_text = f"{downloader.title()} 共删除{len(torrents)}个种子"
for torrent in torrents:
if self._event.is_set():
logger.info(f"自动删种服务停止")
return
text_item = f"{torrent.get('name')} " \
f"来自站点:{torrent.get('site')} " \
f"大小:{StringUtils.str_filesize(torrent.get('size'))}"
# 删除种子
downlader_obj.delete_torrents(delete_file=False,
ids=[torrent.get("id")])
logger.info(f"自动删种任务 删除种子:{text_item}")
message_text = f"{message_text}\n{text_item}"
elif self._action == "deletefile":
message_text = f"{downloader.title()} 共删除{len(torrents)}个种子及文件"
for torrent in torrents:
if self._event.is_set():
logger.info(f"自动删种服务停止")
return
text_item = f"{torrent.get('name')} " \
f"来自站点:{torrent.get('site')} " \
f"大小:{StringUtils.str_filesize(torrent.get('size'))}"
# 删除种子
downlader_obj.delete_torrents(delete_file=True,
ids=[torrent.get("id")])
logger.info(f"自动删种任务 删除种子及文件:{text_item}")
message_text = f"{message_text}\n{text_item}"
else:
continue
if torrents and message_text and self._notify:
self.post_message(
mtype=NotificationType.SiteMessage,
title=f"【自动删种任务完成】",
text=message_text
)
except Exception as e:
logger.error(f"自动删种任务异常:{str(e)}")
def __get_qb_torrent(self, torrent: Any) -> Optional[dict]:
"""
检查QB下载任务是否符合条件
"""
# 完成时间
date_done = torrent.completion_on if torrent.completion_on > 0 else torrent.added_on
# 现在时间
date_now = int(time.mktime(datetime.now().timetuple()))
# 做种时间
torrent_seeding_time = date_now - date_done if date_done else 0
# 平均上传速度
torrent_upload_avs = torrent.uploaded / torrent_seeding_time if torrent_seeding_time else 0
# 大小 单位GB
sizes = self._size.split('-') if self._size else []
minsize = float(sizes[0]) * 1024 * 1024 * 1024 if sizes else 0
maxsize = float(sizes[-1]) * 1024 * 1024 * 1024 if sizes else 0
# 分享率
if self._ratio and torrent.ratio <= float(self._ratio):
return None
# 做种时间 单位:小时
if self._time and torrent_seeding_time <= float(self._time) * 3600:
return None
# 文件大小
if self._size and (torrent.size >= int(maxsize) or torrent.size <= int(minsize)):
return None
if self._upspeed and torrent_upload_avs >= float(self._upspeed) * 1024:
return None
if self._pathkeywords and not re.findall(self._pathkeywords, torrent.save_path, re.I):
return None
if self._trackerkeywords and not re.findall(self._trackerkeywords, torrent.tracker, re.I):
return None
if self._torrentstates and torrent.state not in self._torrentstates:
return None
if self._torrentcategorys and (not torrent.category or torrent.category not in self._torrentcategorys):
return None
return {
"id": torrent.hash,
"name": torrent.name,
"site": StringUtils.get_url_sld(torrent.tracker),
"size": torrent.size
}
def __get_tr_torrent(self, torrent: Any) -> Optional[dict]:
"""
检查TR下载任务是否符合条件
"""
# 完成时间
date_done = torrent.date_done or torrent.date_added
# 现在时间
date_now = int(time.mktime(datetime.now().timetuple()))
# 做种时间
torrent_seeding_time = date_now - int(time.mktime(date_done.timetuple())) if date_done else 0
# 上传量
torrent_uploaded = torrent.ratio * torrent.total_size
# 平均上传速茺
torrent_upload_avs = torrent_uploaded / torrent_seeding_time if torrent_seeding_time else 0
# 大小 单位GB
sizes = self._size.split('-') if self._size else []
minsize = float(sizes[0]) * 1024 * 1024 * 1024 if sizes else 0
maxsize = float(sizes[-1]) * 1024 * 1024 * 1024 if sizes else 0
# 分享率
if self._ratio and torrent.ratio <= float(self._ratio):
return None
if self._time and torrent_seeding_time <= float(self._time) * 3600:
return None
if self._size and (torrent.total_size >= int(maxsize) or torrent.total_size <= int(minsize)):
return None
if self._upspeed and torrent_upload_avs >= float(self._upspeed) * 1024:
return None
if self._pathkeywords and not re.findall(self._pathkeywords, torrent.download_dir, re.I):
return None
if self._trackerkeywords:
if not torrent.trackers:
return None
else:
tacker_key_flag = False
for tracker in torrent.trackers:
if re.findall(self._trackerkeywords, tracker.get("announce", ""), re.I):
tacker_key_flag = True
break
if not tacker_key_flag:
return None
if self._errorkeywords and not re.findall(self._errorkeywords, torrent.error_string, re.I):
return None
return {
"id": torrent.hashString,
"name": torrent.name,
"site": torrent.trackers[0].get("sitename") if torrent.trackers else "",
"size": torrent.total_size
}
def get_remove_torrents(self, downloader: str):
"""
获取自动删种任务种子
"""
remove_torrents = []
# 下载器对象
downloader_obj = self.__get_downloader(downloader)
downloader_config = self.__get_downloader_config(downloader)
# 标题
if self._labels:
tags = self._labels.split(',')
else:
tags = []
if self._mponly:
tags.append(settings.TORRENT_TAG)
# 查询种子
torrents, error_flag = downloader_obj.get_torrents(tags=tags or None)
if error_flag:
return []
# 处理种子
for torrent in torrents:
if downloader_config.type == "qbittorrent":
item = self.__get_qb_torrent(torrent)
else:
item = self.__get_tr_torrent(torrent)
if not item:
continue
remove_torrents.append(item)
# 处理辅种
if self._samedata and remove_torrents:
remove_ids = [t.get("id") for t in remove_torrents]
remove_torrents_plus = []
for remove_torrent in remove_torrents:
name = remove_torrent.get("name")
size = remove_torrent.get("size")
for torrent in torrents:
if downloader_config.type == "qbittorrent":
plus_id = torrent.hash
plus_name = torrent.name
plus_size = torrent.size
plus_site = StringUtils.get_url_sld(torrent.tracker)
else:
plus_id = torrent.hashString
plus_name = torrent.name
plus_size = torrent.total_size
plus_site = torrent.trackers[0].get("sitename") if torrent.trackers else ""
# 比对名称和大小
if plus_name == name \
and plus_size == size \
and plus_id not in remove_ids:
remove_torrents_plus.append(
{
"id": plus_id,
"name": plus_name,
"site": plus_site,
"size": plus_size
}
)
if remove_torrents_plus:
remove_torrents.extend(remove_torrents_plus)
return remove_torrents

View File

@@ -0,0 +1,997 @@
import os
from datetime import datetime, timedelta
from pathlib import Path
from threading import Event
from typing import Any, List, Dict, Tuple, Optional, Union
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from bencode import bdecode, bencode
from app.core.config import settings
from app.helper.downloader import DownloaderHelper
from app.helper.torrent import TorrentHelper
from app.log import logger
from app.modules.qbittorrent import Qbittorrent
from app.modules.transmission import Transmission
from app.plugins import _PluginBase
from app.schemas import NotificationType, ServiceInfo
from app.utils.string import StringUtils
class TorrentTransfer(_PluginBase):
# 插件名称
plugin_name = "自动转移做种"
# 插件描述
plugin_desc = "定期转移下载器中的做种任务到另一个下载器。"
# 插件图标
plugin_icon = "seed.png"
# 插件版本
plugin_version = "1.7.1"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
plugin_config_prefix = "torrenttransfer_"
# 加载顺序
plugin_order = 18
# 可使用的用户级别
auth_level = 2
# 私有属性
_scheduler = None
torrent_helper = None
downloader_helper = None
# 开关
_enabled = False
_cron = None
_onlyonce = False
_fromdownloader = None
_todownloader = None
_frompath = None
_topath = None
_notify = False
_nolabels = None
_includelabels = None
_includecategory = None
_nopaths = None
_deletesource = False
_deleteduplicate = False
_fromtorrentpath = None
_autostart = False
_transferemptylabel = False
_add_torrent_tags = None
# 退出事件
_event = Event()
# 待检查种子清单
_recheck_torrents = {}
_is_recheck_running = False
# 任务标签
_torrent_tags = []
def init_plugin(self, config: dict = None):
self.torrent_helper = TorrentHelper()
self.downloader_helper = DownloaderHelper()
# 读取配置
if config:
self._enabled = config.get("enabled")
self._onlyonce = config.get("onlyonce")
self._cron = config.get("cron")
self._notify = config.get("notify")
self._nolabels = config.get("nolabels")
self._includelabels = config.get("includelabels")
self._includecategory = config.get("includecategory")
self._frompath = config.get("frompath")
self._topath = config.get("topath")
self._fromdownloader = config.get("fromdownloader")
self._todownloader = config.get("todownloader")
self._deletesource = config.get("deletesource")
self._deleteduplicate = config.get("deleteduplicate")
self._fromtorrentpath = config.get("fromtorrentpath")
self._nopaths = config.get("nopaths")
self._autostart = config.get("autostart")
self._transferemptylabel = config.get("transferemptylabel")
self._add_torrent_tags = config.get("add_torrent_tags") or ""
self._torrent_tags = self._add_torrent_tags.strip().split(",") if self._add_torrent_tags else []
# 停止现有任务
self.stop_service()
# 启动定时任务 & 立即运行一次
if self.get_state() or self._onlyonce:
if not self.__validate_config():
self._enabled = False
self._onlyonce = False
config["enabled"] = self._enabled
config["onlyonce"] = self._onlyonce
self.update_config(config=config)
return
# 定时服务
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
if self._autostart:
# 追加种子校验服务
self._scheduler.add_job(self.check_recheck, 'interval', minutes=0.5)
if self._onlyonce:
logger.info(f"转移做种服务启动,立即运行一次")
self._scheduler.add_job(self.transfer, 'date',
run_date=datetime.now(tz=pytz.timezone(settings.TZ)) + timedelta(
seconds=3))
self._onlyonce = False
config["onlyonce"] = self._onlyonce
self.update_config(config=config)
# 启动服务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
def service_info(self, name: str) -> Optional[ServiceInfo]:
"""
服务信息
"""
if not name:
logger.warning("尚未配置下载器,请检查配置")
return None
service = self.downloader_helper.get_service(name)
if not service or not service.instance:
logger.warning(f"获取下载器 {name} 实例失败,请检查配置")
return None
if service.instance.is_inactive():
logger.warning(f"下载器 {name} 未连接,请检查配置")
return None
return service
def get_state(self):
return True if self._enabled \
and self._cron \
and self._fromdownloader \
and self._todownloader \
and self._fromtorrentpath else False
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_service(self) -> List[Dict[str, Any]]:
"""
注册插件公共服务
[{
"id": "服务ID",
"name": "服务名称",
"trigger": "触发器cron/interval/date/CronTrigger.from_crontab()",
"func": self.xxx,
"kwargs": {} # 定时器参数
}]
"""
if self.get_state():
return [
{
"id": "TorrentTransfer",
"name": "转移做种服务",
"trigger": CronTrigger.from_crontab(self._cron),
"func": self.transfer,
"kwargs": {}
}
]
return []
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
downloader_options = [{"title": config.name, "value": config.name}
for config in self.downloader_helper.get_configs().values()]
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'notify',
'label': '发送通知',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'transferemptylabel',
'label': '转移无标签种子',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '执行周期',
'placeholder': '0 0 0 ? *'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'add_torrent_tags',
'label': '添加种子标签',
'placeholder': '已整理,转移做种'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'includecategory',
'label': '转移种子分类',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'nolabels',
'label': '不转移种子标签',
}
}
]
}, {
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'includelabels',
'label': '转移种子标签',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'fromdownloader',
'label': '源下载器',
'items': downloader_options
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'fromtorrentpath',
'label': '源下载器种子文件路径',
'placeholder': 'BT_backup、torrents'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'frompath',
'label': '源数据文件根路径',
'placeholder': '根路径,留空不进行路径转换'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'todownloader',
'label': '目的下载器',
'items': downloader_options
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'topath',
'label': '目的数据文件根路径',
'placeholder': '根路径,留空不进行路径转换'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VTextarea',
'props': {
'model': 'nopaths',
'label': '不转移数据文件目录',
'rows': 3,
'placeholder': '每一行一个目录'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 3
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'autostart',
'label': '校验完成后自动开始',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 3
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'deletesource',
'label': '删除源种子',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 3
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'deleteduplicate',
'label': '删除重复种子',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 3
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
}
]
}
]
}
], {
"enabled": False,
"notify": False,
"onlyonce": False,
"cron": "",
"nolabels": "",
"includelabels": "",
"includecategory": "",
"frompath": "",
"topath": "",
"fromdownloader": "",
"todownloader": "",
"deletesource": False,
"deleteduplicate": False,
"fromtorrentpath": "",
"nopaths": "",
"autostart": True,
"transferemptylabel": False,
"add_torrent_tags": "已整理,转移做种"
}
def get_page(self) -> List[dict]:
pass
def __validate_config(self) -> bool:
"""
校验配置
"""
# 检查配置
if self._fromtorrentpath and not Path(self._fromtorrentpath).exists():
logger.error(f"源下载器种子文件保存路径不存在:{self._fromtorrentpath}")
self.systemmessage.put(f"源下载器种子文件保存路径不存在:{self._fromtorrentpath}", title="自动转移做种")
return False
if self._fromdownloader == self._todownloader:
logger.error(f"源下载器和目的下载器不能相同")
self.systemmessage.put(f"源下载器和目的下载器不能相同", title="自动转移做种")
return False
return True
def __download(self, service: ServiceInfo, content: bytes,
save_path: str) -> Optional[str]:
"""
添加下载任务
"""
if not service or not service.instance:
return
downloader = service.instance
if self.downloader_helper.is_downloader("qbittorrent", service=service):
# 生成随机Tag
tag = StringUtils.generate_random_str(10)
state = downloader.add_torrent(content=content,
download_dir=save_path,
is_paused=True,
tag=self._torrent_tags + [tag])
if not state:
return None
else:
# 获取种子Hash
torrent_hash = downloader.get_torrent_id_by_tag(tags=tag)
if not torrent_hash:
logger.error(f"{downloader} 下载任务添加成功,但获取任务信息失败!")
return None
return torrent_hash
elif self.downloader_helper.is_downloader("transmission", service=service):
# 添加任务
torrent = downloader.add_torrent(content=content,
download_dir=save_path,
is_paused=True,
labels=self._torrent_tags)
if not torrent:
return None
else:
return torrent.hashString
logger.error(f"不支持的下载器类型")
return None
def transfer(self):
"""
开始转移做种
"""
logger.info("开始转移做种任务 ...")
if not self.__validate_config():
return
from_service = self.service_info(self._fromdownloader)
from_downloader: Optional[Union[Qbittorrent, Transmission]] = from_service.instance if from_service else None
to_service = self.service_info(self._todownloader)
to_downloader: Optional[Union[Qbittorrent, Transmission]] = to_service.instance if to_service else None
if not from_downloader or not to_downloader:
return
torrents = from_downloader.get_completed_torrents()
if torrents:
logger.info(f"下载器 {from_service.name} 已完成种子数:{len(torrents)}")
else:
logger.info(f"下载器 {from_service.name} 没有已完成种子")
return
# 过滤种子,记录保存目录
trans_torrents = []
for torrent in torrents:
if self._event.is_set():
logger.info(f"转移服务停止")
return
# 获取种子hash
hash_str = self.__get_hash(torrent, from_service.type)
# 获取保存路径
save_path = self.__get_save_path(torrent, from_service.type)
if self._nopaths and save_path:
# 过滤不需要转移的路径
nopath_skip = False
for nopath in self._nopaths.split('\n'):
if os.path.normpath(save_path).startswith(os.path.normpath(nopath)):
logger.info(f"种子 {hash_str} 保存路径 {save_path} 不需要转移,跳过 ...")
nopath_skip = True
break
if nopath_skip:
continue
# 获取种子标签
torrent_labels = self.__get_label(torrent, from_service.type)
# 获取种子分类
torrent_category = self.__get_category(torrent, from_service.type)
# 种子为无标签,则进行规范化
is_torrent_labels_empty = torrent_labels == [''] or torrent_labels == [] or torrent_labels is None
if is_torrent_labels_empty:
torrent_labels = []
# 如果分类项存在数值,则进行判断
if self._includecategory:
# 排除未标记的分类
if torrent_category not in self._includecategory.split(','):
logger.info(f"种子 {hash_str} 不含有转移分类 {self._includecategory},跳过 ...")
continue
# 根据设置决定是否转移无标签的种子
if is_torrent_labels_empty:
if not self._transferemptylabel:
continue
else:
# 排除含有不转移的标签
if self._nolabels:
is_skip = False
for label in self._nolabels.split(','):
if label in torrent_labels:
logger.info(f"种子 {hash_str} 含有不转移标签 {label},跳过 ...")
is_skip = True
break
if is_skip:
continue
# 排除不含有转移标签的种子
if self._includelabels:
is_skip = False
for label in self._includelabels.split(','):
if label not in torrent_labels:
logger.info(f"种子 {hash_str} 不含有转移标签 {label},跳过 ...")
is_skip = True
break
if is_skip:
continue
# 添加转移数据
trans_torrents.append({
"hash": hash_str,
"save_path": save_path,
"torrent": torrent
})
# 开始转移任务
if trans_torrents:
logger.info(f"需要转移的种子数:{len(trans_torrents)}")
# 记数
total = len(trans_torrents)
# 总成功数
success = 0
# 总失败数
fail = 0
# 跳过数
skip = 0
# 删除重复数
del_dup = 0
for torrent_item in trans_torrents:
# 检查种子文件是否存在
torrent_file = Path(self._fromtorrentpath) / f"{torrent_item.get('hash')}.torrent"
if not torrent_file.exists():
logger.error(f"种子文件不存在:{torrent_file}")
# 失败计数
fail += 1
continue
# 查询hash值是否已经在目的下载器中
torrent_info, _ = to_downloader.get_torrents(ids=[torrent_item.get('hash')])
if torrent_info:
# 删除重复的源种子,不能删除文件!
if self._deleteduplicate:
logger.info(f"删除重复的源下载器任务(不含文件):{torrent_item.get('hash')} ...")
to_downloader.delete_torrents(delete_file=False, ids=[torrent_item.get('hash')])
del_dup += 1
else:
logger.info(f"{torrent_item.get('hash')} 已在目的下载器中,跳过 ...")
# 跳过计数
skip += 1
continue
# 转换保存路径
download_dir = self.__convert_save_path(torrent_item.get('save_path'),
self._frompath,
self._topath)
if not download_dir:
logger.error(f"转换保存路径失败:{torrent_item.get('save_path')}")
# 失败计数
fail += 1
continue
# 如果源下载器是QB检查是否有Tracker没有的话额外获取
if self.downloader_helper.is_downloader("qbittorrent", service=from_service):
# 读取种子内容、解析种子文件
content = torrent_file.read_bytes()
if not content:
logger.warn(f"读取种子文件失败:{torrent_file}")
fail += 1
continue
# 读取trackers
try:
torrent_main = bdecode(content)
main_announce = torrent_main.get('announce')
except Exception as err:
logger.warn(f"解析种子文件 {torrent_file} 失败:{str(err)}")
fail += 1
continue
if not main_announce:
logger.info(f"{torrent_item.get('hash')} 未发现tracker信息尝试补充tracker信息...")
# 读取fastresume文件
fastresume_file = Path(self._fromtorrentpath) / f"{torrent_item.get('hash')}.fastresume"
if not fastresume_file.exists():
logger.warn(f"fastresume文件不存在{fastresume_file}")
fail += 1
continue
# 尝试补充trackers
try:
# 解析fastresume文件
fastresume = fastresume_file.read_bytes()
torrent_fastresume = bdecode(fastresume)
# 读取trackers
fastresume_trackers = torrent_fastresume.get('trackers')
if isinstance(fastresume_trackers, list) \
and len(fastresume_trackers) > 0 \
and fastresume_trackers[0]:
# 重新赋值
torrent_main['announce'] = fastresume_trackers[0][0]
# 保留其他tracker避免单一tracker无法连接
if len(fastresume_trackers) > 1 or len(fastresume_trackers[0]) > 1:
torrent_main['announce-list'] = fastresume_trackers
# 替换种子文件路径
torrent_file = settings.TEMP_PATH / f"{torrent_item.get('hash')}.torrent"
# 编码并保存到临时文件
torrent_file.write_bytes(bencode(torrent_main))
except Exception as err:
logger.error(f"解析fastresume文件 {fastresume_file} 出错:{str(err)}")
fail += 1
continue
# 发送到另一个下载器中下载:默认暂停、传输下载路径、关闭自动管理模式
logger.info(f"添加转移做种任务到下载器 {to_service.name}{torrent_file}")
download_id = self.__download(service=to_service,
content=torrent_file.read_bytes(),
save_path=download_dir)
if not download_id:
# 下载失败
fail += 1
logger.error(f"添加下载任务失败:{torrent_file}")
continue
else:
# 下载成功
logger.info(f"成功添加转移做种任务,种子文件:{torrent_file}")
# TR会自动校验QB需要手动校验
if self.downloader_helper.is_downloader("qbittorrent", service=to_service):
logger.info(f"qbittorrent 开始校验 {download_id} ...")
to_downloader.recheck_torrents(ids=[download_id])
# 追加校验任务
logger.info(f"添加校验检查任务:{download_id} ...")
if not self._recheck_torrents.get(to_service.name):
self._recheck_torrents[to_service.name] = []
self._recheck_torrents[to_service.name].append(download_id)
# 删除源种子,不能删除文件!
if self._deletesource:
logger.info(f"删除源下载器任务(不含文件):{torrent_item.get('hash')} ...")
from_downloader.delete_torrents(delete_file=False, ids=[torrent_item.get('hash')])
# 成功计数
success += 1
# 插入转种记录
history_key = f"{from_service.name}-{torrent_item.get('hash')}"
self.save_data(key=history_key,
value={
"to_download": to_service.name,
"to_download_id": download_id,
"delete_source": self._deletesource,
"delete_duplicate": self._deleteduplicate,
})
# 触发校验任务
if success > 0 and self._autostart:
self.check_recheck()
# 发送通知
if self._notify:
self.post_message(
mtype=NotificationType.SiteMessage,
title="【转移做种任务执行完成】",
text=f"总数:{total},成功:{success},失败:{fail},跳过:{skip},删除重复:{del_dup}"
)
else:
logger.info(f"没有需要转移的种子")
logger.info("转移做种任务执行完成")
def check_recheck(self):
"""
定时检查下载器中种子是否校验完成,校验完成且完整的自动开始辅种
"""
if not self._recheck_torrents:
return
if not self._todownloader:
return
if self._is_recheck_running:
return
# 校验下载器
to_service = self.service_info(self._todownloader)
to_downloader: Optional[Union[Qbittorrent, Transmission]] = to_service.instance if to_service else None
if not to_downloader:
return
# 需要检查的种子
recheck_torrents = self._recheck_torrents.get(to_service.name, [])
if not recheck_torrents:
return
logger.info(f"开始检查下载器 {to_service.name} 的校验任务 ...")
# 运行状态
self._is_recheck_running = True
torrents, _ = to_downloader.get_torrents(ids=recheck_torrents)
if torrents:
# 可做种的种子
can_seeding_torrents = []
for torrent in torrents:
# 获取种子hash
hash_str = self.__get_hash(torrent, to_service.type)
# 判断是否可做种
if self.__can_seeding(torrent, to_service.type):
can_seeding_torrents.append(hash_str)
if can_seeding_torrents:
logger.info(f"{len(can_seeding_torrents)} 个任务校验完成,开始做种")
# 开始做种
to_downloader.start_torrents(ids=can_seeding_torrents)
# 去除已经处理过的种子
self._recheck_torrents[to_service.name] = list(
set(recheck_torrents).difference(set(can_seeding_torrents)))
else:
logger.info(f"没有新的任务校验完成,将在下次个周期继续检查 ...")
elif torrents is None:
logger.info(f"下载器 {to_service.name} 查询校验任务失败,将在下次继续查询 ...")
else:
logger.info(f"下载器 {to_service.name} 中没有需要检查的校验任务,清空待处理列表")
self._recheck_torrents[to_service.name] = []
self._is_recheck_running = False
@staticmethod
def __get_hash(torrent: Any, dl_type: str):
"""
获取种子hash
"""
try:
return torrent.get("hash") if dl_type == "qbittorrent" else torrent.hashString
except Exception as e:
print(str(e))
return ""
@staticmethod
def __get_label(torrent: Any, dl_type: str):
"""
获取种子标签
"""
try:
return [str(tag).strip() for tag in torrent.get("tags").split(',')] \
if dl_type == "qbittorrent" else torrent.labels or []
except Exception as e:
print(str(e))
return []
@staticmethod
def __get_category(torrent: Any, dl_type: str):
"""
获取种子分类
"""
try:
return torrent.get("category").strip() \
if dl_type == "qbittorrent" else ""
except Exception as e:
print(str(e))
return ""
@staticmethod
def __get_save_path(torrent: Any, dl_type: str):
"""
获取种子保存路径
"""
try:
return torrent.get("save_path") if dl_type == "qbittorrent" else torrent.download_dir
except Exception as e:
print(str(e))
return ""
@staticmethod
def __can_seeding(torrent: Any, dl_type: str):
"""
判断种子是否可以做种并处于暂停状态
"""
try:
return (torrent.get("state") == "pausedUP") if dl_type == "qbittorrent" \
else (torrent.status.stopped and torrent.percent_done == 1)
except Exception as e:
print(str(e))
return False
@staticmethod
def __convert_save_path(save_path: str, from_root: str, to_root: str):
"""
转换保存路径
"""
try:
# 没有保存目录,以目的根目录为准
if not save_path:
return to_root
# 没有设置根目录时返回save_path
if not to_root or not from_root:
return save_path
# 统一目录格式
save_path = os.path.normpath(save_path).replace("\\", "/")
from_root = os.path.normpath(from_root).replace("\\", "/")
to_root = os.path.normpath(to_root).replace("\\", "/")
# 替换根目录
if save_path.startswith(from_root):
return save_path.replace(from_root, to_root, 1)
except Exception as e:
print(str(e))
return None
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e))

View File

@@ -38,7 +38,7 @@ class AutoSignIn(_PluginBase):
# 插件图标
plugin_icon = "signin.png"
# 插件版本
plugin_version = "2.4"
plugin_version = "2.4.2"
# 插件作者
plugin_author = "thsrite"
# 作者主页

View File

@@ -39,7 +39,15 @@ class HaiDan(_ISiteSigninHandler):
render = site_info.get("render")
# 签到
html_text = self.get_page_source(url='https://www.haidan.video/signin.php',
# 签到页会重定向到index.php由于302重定向特性导致index.php没有携带cookie
self.get_page_source(url='https://www.haidan.video/signin.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
# 重新携带cookie获取index.php查看签到结果
html_text = self.get_page_source(url='https://www.haidan.video/index.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,

View File

@@ -0,0 +1,64 @@
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.string import StringUtils
class PTTime(_ISiteSigninHandler):
"""
PT时间签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "pttime.org"
# 签到成功
_succeed_regex = ['签到成功']
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 签到
# 签到返回:<html><head></head><body>签到成功</body></html>
html_text = self.get_page_source(url='https://www.pttime.org/attendance.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._succeed_regex)
if sign_status:
logger.info(f"{site} 签到成功")
return True, '签到成功'
logger.error(f"{site} 签到失败,签到接口返回 {html_text}")
return False, '签到失败'

View File

@@ -0,0 +1,434 @@
# 基础库
import datetime
import json
from typing import Any, Dict, List, Optional, Type
# 第三方库
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
import pytz
from sqlalchemy import JSON
from sqlalchemy.orm import Session
# 项目库
from app.chain.subscribe import SubscribeChain, Subscribe
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.event import eventmanager, Event
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo
from app.db.models.subscribehistory import SubscribeHistory
from app.db.site_oper import SiteOper
from app.db.subscribe_oper import SubscribeOper
from app.db import db_query
from app.helper.subscribe import SubscribeHelper
from app.log import logger
from app.plugins import _PluginBase
from app.schemas.types import EventType, NotificationType
from app.utils.http import RequestUtils
class BangumiColl(_PluginBase):
# 插件名称
plugin_name = "Bangumi收藏订阅"
# 插件描述
plugin_desc = "将Bangumi用户收藏添加到订阅"
# 插件图标
plugin_icon = "bangumi_b.png"
# 插件版本
plugin_version = "1.5.2"
# 插件作者
plugin_author = "Attente"
# 作者主页
author_url = "https://github.com/wikrin"
# 插件配置项ID前缀
plugin_config_prefix = "bangumicoll_"
# 加载顺序
plugin_order = 23
# 可使用的用户级别
auth_level = 1
# 私有属性
_scheduler = None
siteoper: SiteOper = None
subscribehelper: SubscribeHelper = None
subscribeoper: SubscribeOper = None
# 配置属性
_enabled: bool = False
_total_change: bool = False
_cron: str = ""
_notify: bool = False
_onlyonce: bool = False
_include: str = ""
_exclude: str = ""
_uid: str = ""
_collection_type = []
_save_path: str = ""
_sites: list = []
def init_plugin(self, config: dict = None):
self.subscribechain = SubscribeChain()
self.siteoper = SiteOper()
self.subscribehelper = SubscribeHelper()
self.subscribeoper = SubscribeOper()
# 停止现有任务
self.stop_service()
self.load_config(config)
if self._onlyonce:
self.schedule_once()
def load_config(self, config: dict):
"""加载配置"""
if config:
# 遍历配置中的键并设置相应的属性
for key in (
"enabled",
"total_change",
"cron",
"notify",
"onlyonce",
"uid",
"collection_type",
"save_path",
"sites",
):
setattr(self, f"_{key}", config.get(key, getattr(self, f"_{key}")))
# 获得所有站点
site_ids = {site.id for site in self.siteoper.list_order_by_pri()}
# 过滤已删除的站点
self._sites = [site_id for site_id in self._sites if site_id in site_ids]
# 更新配置
self.__update_config()
def schedule_once(self):
"""调度一次性任务"""
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
logger.info("Bangumi收藏订阅立即运行一次")
self._scheduler.add_job(
func=self.bangumi_coll,
trigger='date',
run_date=datetime.datetime.now(tz=pytz.timezone(settings.TZ))
+ datetime.timedelta(seconds=3),
)
self._scheduler.start()
# 关闭一次性开关
self._onlyonce = False
self.__update_config()
def __update_config(self):
"""更新设置"""
self.update_config(
{
"enabled": self._enabled,
"notify": self._notify,
"total_change": self._total_change,
"onlyonce": self._onlyonce,
"cron": self._cron,
"uid": self._uid,
"collection_type": self._collection_type,
"include": self._include,
"exclude": self._exclude,
"save_path": self._save_path,
"sites": self._sites,
}
)
def get_form(self):
from .page_components import form
# 列出所有站点
sites_options = [
{"title": site.name, "value": site.id}
for site in self.siteoper.list_order_by_pri()
]
return form(sites_options)
def get_service(self) -> List[Dict[str, Any]]:
"""
注册插件公共服务
"""
if self._enabled or self._cron:
trigger = CronTrigger.from_crontab(self._cron) if self._cron else "interval"
kwargs = {"hours": 6} if not self._cron else {}
return [
{
"id": "BangumiColl",
"name": "Bangumi收藏订阅",
"trigger": trigger,
"func": self.bangumi_coll,
"kwargs": kwargs,
}
]
return []
def stop_service(self):
"""退出插件"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
logger.error(f"退出插件失败:{str(e)}")
@eventmanager.register(EventType.SiteDeleted)
def site_deleted(self, event: Event):
"""
删除对应站点
"""
site_id = event.event_data.get("site_id")
if site_id in self._sites:
self._sites.remove(site_id)
self.__update_config()
def get_api(self):
pass
def get_command(self):
pass
def get_page(self):
pass
def get_state(self):
return self._enabled
def bangumi_coll(self):
"""订阅Bangumi用户收藏"""
if not self._uid:
logger.error("请设置UID")
return
try:
res = self.get_bgm_res(addr="UserCollections", id=self._uid)
items = self.parse_collection_items(res)
# 新增和移除条目
self.manage_subscriptions(items)
except Exception as e:
logger.error(f"执行失败: {str(e)}")
def parse_collection_items(self, response) -> Dict[int, Dict[str, Any]]:
"""解析获取的收藏条目"""
data = response.json().get("data", [])
if not data:
logger.error(f"Bangumi用户{self._uid} ,没有任何收藏")
return {}
logger.info("解析Bangumi条目信息...")
return {
item.get("subject_id"): {
"name": item['subject'].get('name'),
"name_cn": item['subject'].get('name_cn'),
"date": item['subject'].get('date'),
"eps": item['subject'].get('eps'),
}
for item in data
if item.get("type") in self._collection_type
}
def manage_subscriptions(self, items: Dict[int, Dict[str, Any]]):
"""管理订阅的新增和删除"""
db_sub = {
i.bangumiid: i.id
for i in self.subscribechain.subscribeoper.list()
if i.bangumiid
}
db_hist = self.get_subscribe_history()
new_sub = items.keys() - db_sub.keys() - db_hist
del_sub = db_sub.keys() - items.keys()
logger.debug(f"待新增条目:{new_sub}")
logger.debug(f"待移除条目:{del_sub}")
if del_sub and self._notify:
del_items = {db_sub[i]: i for i in del_sub}
logger.info("开始移除订阅...")
self.delete_subscribe(del_items)
logger.info("移除完成")
if new_sub:
logger.info("开始添加订阅...")
msg = self.add_subscribe({i: items[i] for i in new_sub})
logger.info("添加完成")
if msg:
logger.info("\n".ljust(49, ' ').join(list(msg.values())))
# 添加订阅
def add_subscribe(self, items: Dict[int, Dict[str, Any]]) -> Dict:
"""添加订阅"""
fail_items = {}
for self._subid, item in items.items():
meta = MetaInfo(item.get("name_cn"))
if not meta.name:
fail_items[self._subid] = f"{item.get('name_cn')} 未识别到有效数据"
logger.warn(f"{item.get('name_cn')} 未识别到有效数据")
continue
meta.year = item.get("date")[:4] if item.get("date") else None
mediainfo = self.chain.recognize_media(meta=meta)
meta.total_episode = item.get("eps", 0)
if not mediainfo:
fail_items[self._subid] = f"{item.get('name_cn')} 媒体信息识别失败"
continue
self.update_media_info(item, mediainfo)
sid = self.subscribeoper.list_by_tmdbid(
mediainfo.tmdb_id, mediainfo.number_of_seasons
)
if sid:
logger.info(f"{mediainfo.title_year} 正在订阅中")
if len(sid) == 1:
self.subscribeoper.update(
sid=sid[0].id, payload={"bangumiid": self._subid}
)
logger.info(f"{mediainfo.title_year} Bangumi条目id更新成功")
continue
sid, msg = self.subscribechain.add(
title=mediainfo.title,
year=mediainfo.year,
season=mediainfo.number_of_seasons,
bangumiid=self._subid,
exist_ok=True,
username="Bangumi订阅",
**self.prepare_kwargs(meta, mediainfo),
)
if not sid:
fail_items[self._subid] = f"{item.get('name_cn')} {msg}"
return fail_items
def prepare_kwargs(self, meta: MetaBase, mediainfo: MediaInfo) -> Dict:
"""准备额外参数"""
kwargs = {
"save_path": self._save_path,
"sites": (
self._sites
if self.are_types_equal(attribute_name='sites')
else json.dumps(self._sites)
),
}
total_episode = len(mediainfo.seasons.get(mediainfo.number_of_seasons) or [])
if (
meta.begin_season
and mediainfo.number_of_seasons != meta.begin_season
or total_episode != meta.total_episode
):
meta = self.get_eps(meta)
total_ep: int = meta.end_episode if meta.end_episode else total_episode
lock_eps: int = total_ep - meta.begin_episode + 1
prev_eps: list = [i for i in range(1, meta.begin_episode)]
kwargs.update(
{
"total_episode": total_ep,
"start_episode": meta.begin_episode,
"lack_episode": lock_eps,
"manual_total_episode": (
1 if meta.total_episode and self._total_change else 0
), # 手动修改过总集数
"note": (
prev_eps
if self.are_types_equal("note")
else json.dumps(prev_eps)
),
}
)
logger.info(
f"{mediainfo.title_year} 更新总集数为: {total_ep},开始集数为: {meta.begin_episode}"
)
return kwargs
def update_media_info(self, item: dict, mediainfo: MediaInfo):
"""更新媒体信息"""
for info in mediainfo.season_info:
if self.are_dates(item.get("date"), info.get("air_date")):
mediainfo.number_of_seasons = info.get("season_number")
mediainfo.number_of_episodes = info.get("episode_count")
break
def get_eps(self, meta: MetaBase) -> MetaBase:
"""获取Bangumi条目的集数信息"""
try:
res = self.get_bgm_res(addr="getEpisodes", id=self._subid)
data = res.json().get("data", [{}])[0]
prev = data.get("sort", 1) - data.get("ep", 1)
total = res.json().get("total", None)
meta.begin_episode = prev + 1
meta.end_episode = prev + total if total else None
except Exception as e:
logger.error(f"获取集数信息失败: {str(e)}")
finally:
return meta
# 移除订阅
def delete_subscribe(self, del_items: Dict[int, int]):
"""删除订阅"""
for subscribe_id in del_items.keys():
try:
subscribe = self.subscribeoper.get(subscribe_id)
if subscribe:
self.subscribeoper.delete(subscribe_id)
self.subscribehelper.sub_done_async(
{"tmdbid": subscribe.tmdbid, "doubanid": subscribe.doubanid}
)
self.post_message(
mtype=NotificationType.Subscribe,
title=f"{subscribe.name}({subscribe.year}) 第{subscribe.season}季 已取消订阅",
text=f"原因: 未在Bangumi收藏中找到该条目\n订阅用户: {subscribe.username}\n创建时间: {subscribe.date}",
image=subscribe.backdrop,
)
except Exception as e:
logger.error(f"删除订阅失败 {subscribe_id}: {str(e)}")
@staticmethod
def get_bgm_res(addr: str, id: int | str):
url = {
"UserCollections": f"https://api.bgm.tv/v0/users/{str(id)}/collections?subject_type=2",
"getEpisodes": f"https://api.bgm.tv/v0/episodes?subject_id={str(id)}&type=0&limit=1",
}
headers = {
"User-Agent": "wikrin/MoviePilot-Plugins (https://github.com/wikrin/MoviePilot-Plugins)"
}
return RequestUtils(headers=headers).get_res(url=url[addr])
@staticmethod
def are_dates(date_str1, date_str2, threshold_days: int = 7) -> bool:
"""对比两个日期字符串是否接近"""
date1 = datetime.datetime.strptime(date_str1, '%Y-%m-%d')
date2 = datetime.datetime.strptime(date_str2, '%Y-%m-%d')
return abs((date1 - date2).days) <= threshold_days
@db_query
def get_subscribe_history(self, db: Session = None) -> set:
"""获取已完成的订阅"""
try:
result = (
db.query(SubscribeHistory)
.filter(SubscribeHistory.bangumiid.isnot(None))
.all()
)
return {i.bangumiid for i in result}
except Exception as e:
logger.error(f"获取订阅历史失败: {str(e)}")
return set()
@staticmethod
def are_types_equal(
attribute_name: str, expected_type: Type[Any] = JSON(), class_=Subscribe
) -> bool:
"""比较类中属性的类型与expected_type是否一致"""
column = class_.__table__.columns.get(attribute_name)
if column is None:
raise AttributeError(
f"Class: {class_.__name__} 没有属性: '{attribute_name}'"
)
return isinstance(column.type, type(expected_type))

View File

@@ -0,0 +1,318 @@
from bs4 import BeautifulSoup
def form(sites_options) -> list:
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {'cols': 12, 'md': 3},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
},
}
],
},
{
'component': 'VCol',
'props': {'cols': 12, 'md': 3},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'notify',
'label': '自动取消订阅并通知',
},
}
],
},
{
'component': 'VCol',
'props': {'cols': 12, 'md': 3},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'total_change',
'label': '不跟随TMDB变动',
},
}
],
},
{
'component': 'VCol',
'props': {'cols': 12, 'md': 3},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
},
}
],
},
],
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {'cols': 8, 'md': 4},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '执行周期',
'placeholder': '5位cron表达式留空自动',
},
}
],
},
{
'component': 'VCol',
'props': {'cols': 8, 'md': 4},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'uid',
'label': 'UID/用户名',
'placeholder': '设置了用户名填写用户名否则填写UID',
},
},
],
},
{
'component': 'VCol',
'props': {'cols': 8, 'md': 4},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'collection_type',
'label': '收藏类型',
'chips': True,
'multiple': True,
'items': [
{'title': '在看', 'value': 3},
{'title': '想看', 'value': 1},
],
},
}
],
},
],
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {'cols': 12, 'md': 6},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'include',
'label': '包含',
'placeholder': '暂未实现',
},
}
],
},
{
'component': 'VCol',
'props': {'cols': 12, 'md': 6},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'exclude',
'label': '排除',
'placeholder': '暂未实现',
},
}
],
},
],
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {'cols': 12, 'md': 6},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'save_path',
'label': '保存目录',
'placeholder': '留空自动',
},
}
],
},
{
'component': 'VCol',
'props': {'cols': 12, 'md': 6},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'sites',
'label': '选择站点',
'chips': True,
'multiple': True,
'items': sites_options,
},
}
],
},
],
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
},
'content': parse_html(
'<p>注意: 该插件仅会将<strong>公开</strong>的收藏添加到<strong>订阅</strong>。</p>'
),
}
],
}
],
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
},
'content': parse_html(
'<p>注意: 开启<strong>自动取消订阅并通知</strong>后,已添加的订阅在下一次执行时若不在已选择的<strong>收藏类型</strong>中,将会被取消订阅。</p>'
),
}
],
}
],
},
],
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
},
'content': parse_html(
'<p>注意: 开启<strong>不跟随TMDB变动</strong>后,从<a href="https://bangumi.github.io/api/#/%E7%AB%A0%E8%8A%82/getEpisodes" target="_blank"><u>Bangumi API</u></a>获取的总集数将不再跟随TMDB的集数变动。</p>'
),
},
],
},
],
},
], {
"enabled": False,
"total_change": False,
"notify": False,
"onlyonce": False,
"cron": "",
"uid": "",
"collection_type": [3],
"include": "",
"exclude": "",
"save_path": "",
"sites": [],
}
def parse_html(html_string: str) -> list:
soup = BeautifulSoup(html_string, 'html.parser')
result: list = []
# 定义需要直接转为文本的标签
inline_text_tags = {'strong', 'u', 'em', 'b', 'i'}
def process_element(element: BeautifulSoup):
# 处理纯文本节点
if element.name is None:
text = element.strip()
return text if text else ""
# 处理HTML标签
component = element.name
props = {attr: element[attr] for attr in element.attrs}
content = []
# 递归处理子元素
for child in element.children:
child_content = process_element(child)
if isinstance(child_content, str):
content.append({'component': 'span', 'text': child_content})
elif child_content: # 只有在child_content不为空时添加
content.append(child_content)
# 构建标签对象
tag_data = {
'component': component,
'props': props,
'content': content if component not in inline_text_tags else [],
}
if content and component in inline_text_tags:
tag_data['text'] = ' '.join(
item['text'] for item in content if 'text' in item
)
return tag_data
# 遍历所有子元素
for element in soup.children:
element_content = process_element(element)
if element_content: # 只增加非空内容
result.append(element_content)
return result

View File

@@ -25,6 +25,7 @@ from app.modules.qbittorrent import Qbittorrent
from app.modules.transmission import Transmission
from app.plugins import _PluginBase
from app.schemas import NotificationType, TorrentInfo, MediaType
from app.schemas.types import EventType
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
@@ -63,15 +64,15 @@ class BrushConfig:
self.delete_size_range = config.get("delete_size_range")
self.up_speed = self.__parse_number(config.get("up_speed"))
self.dl_speed = self.__parse_number(config.get("dl_speed"))
self.auto_archive_days = self.__parse_number(config.get("auto_archive_days"))
self.save_path = config.get("save_path")
self.clear_task = config.get("clear_task", False)
self.archive_task = config.get("archive_task", False)
self.except_tags = config.get("except_tags", True)
self.delete_except_tags = config.get("delete_except_tags")
self.except_subscribe = config.get("except_subscribe", True)
self.brush_sequential = config.get("brush_sequential", False)
self.proxy_download = config.get("proxy_download", False)
self.proxy_delete = config.get("proxy_delete", False)
self.log_more = config.get("log_more", False)
self.active_time_range = config.get("active_time_range")
self.downloader_monitor = config.get("downloader_monitor")
self.qb_category = config.get("qb_category")
@@ -257,7 +258,7 @@ class BrushFlow(_PluginBase):
# 插件图标
plugin_icon = "brush.jpg"
# 插件版本
plugin_version = "3.3"
plugin_version = "3.8"
# 插件作者
plugin_author = "jxxghp,InfinityPacer"
# 作者主页
@@ -295,7 +296,6 @@ class BrushFlow(_PluginBase):
# endregion
def init_plugin(self, config: dict = None):
logger.info(f"站点刷流服务初始化")
self.siteshelper = SitesHelper()
self.siteoper = SiteOper()
self.torrents = TorrentsChain()
@@ -340,11 +340,10 @@ class BrushFlow(_PluginBase):
brush_config.archive_task = False
self.__update_config()
if brush_config.log_more:
if brush_config.enable_site_config:
logger.info(f"已开启站点独立配置,配置信息:{brush_config}")
else:
logger.info(f"没有开启站点独立配置,配置信息:{brush_config}")
if brush_config.enable_site_config:
logger.debug(f"已开启站点独立配置,配置信息:{brush_config}")
else:
logger.debug(f"没有开启站点独立配置,配置信息:{brush_config}")
# 停止现有任务
self.stop_service()
@@ -366,8 +365,6 @@ class BrushFlow(_PluginBase):
# 如果开启&存在站点时,才需要启用后台任务
self._task_brush_enable = brush_config.enabled and brush_config.brushsites
# brush_config.onlyonce = True
# 检查是否启用了一次性任务
if brush_config.onlyonce:
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
@@ -974,11 +971,6 @@ class BrushFlow(_PluginBase):
'component': 'VWindow',
'props': {
'model': '_tabs'
# VWindow设置paddnig会导致切换Tab时页面高度变动调整为修改VRow的方案
# 'style': {
# 'padding-top': '24px',
# 'padding-bottom': '24px',
# },
},
'content': [
{
@@ -1140,6 +1132,25 @@ class BrushFlow(_PluginBase):
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'auto_archive_days',
'label': '自动归档记录天数',
'placeholder': '超过此天数后自动归档',
'type': 'number',
"min": "0"
}
}
]
}
]
}
@@ -1426,11 +1437,28 @@ class BrushFlow(_PluginBase):
'component': 'VTextField',
'props': {
'model': 'seed_inactivetime',
'label': '未活动时间(分钟) ',
'label': '未活动时间(分钟)',
'placeholder': '超过时删除任务'
}
}
]
},
{
'component': 'VCol',
'props': {
"cols": 12,
"md": 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'delete_except_tags',
'label': '删除排除标签',
'placeholder': 'MOVIEPILOT,H&R'
}
}
]
}
]
}
@@ -1476,8 +1504,8 @@ class BrushFlow(_PluginBase):
{
'component': 'VSwitch',
'props': {
'model': 'except_tags',
'label': '删种排除MoviePilot任务',
'model': 'except_subscribe',
'label': '排除订阅(实验性功能)',
}
}
]
@@ -1492,8 +1520,8 @@ class BrushFlow(_PluginBase):
{
'component': 'VSwitch',
'props': {
'model': 'except_subscribe',
'label': '排除订阅(实验性功能)',
'model': 'qb_first_last_piece',
'label': '优先下载首尾文件块',
}
}
]
@@ -1640,43 +1668,6 @@ class BrushFlow(_PluginBase):
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'qb_first_last_piece',
'label': '优先下载首尾文件块',
}
}
]
}
]
},
{
'component': 'VRow',
"content": [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'log_more',
'label': '记录更多日志',
}
}
]
}
]
}
@@ -1742,7 +1733,7 @@ class BrushFlow(_PluginBase):
'props': {
'type': 'error',
'variant': 'tonal',
'text': '注意排除H&R并不保证能完全适配所有站点部分站点在列表页不显示H&R标志但实际上是有H&R的请注意核对使用'
'text': '注意排除H&R并不保证能完全适配所有站点部分站点在列表页不显示H&R标志但实际上是有H&R的请注意核对使用'
}
}
]
@@ -1849,7 +1840,7 @@ class BrushFlow(_PluginBase):
"onlyonce": False,
"clear_task": False,
"archive_task": False,
"except_tags": True,
"delete_except_tags": f"{settings.TORRENT_TAG},H&R" if settings.TORRENT_TAG else "H&R",
"except_subscribe": True,
"brush_sequential": False,
"proxy_download": False,
@@ -1857,7 +1848,6 @@ class BrushFlow(_PluginBase):
"freeleech": "free",
"hr": "yes",
"enable_site_config": False,
"log_more": False,
"downloader_monitor": False,
"auto_qb_category": False,
"qb_first_last_piece": False,
@@ -2055,9 +2045,6 @@ class BrushFlow(_PluginBase):
if brush_config.site_hr_active:
logger.info(f"站点 {siteinfo.name} 已开启全站H&R选项所有种子设置为H&R种子")
# 由于缓存原因这里不能直接改torrents在后续加入任务中调整
# for torrent in torrents:
# torrent.hit_and_run = True
# 排除包含订阅的种子
if brush_config.except_subscribe:
@@ -2068,7 +2055,7 @@ class BrushFlow(_PluginBase):
torrents_size = self.__calculate_seeding_torrents_size(torrent_tasks=torrent_tasks)
logger.info(f"正在准备种子刷流,数量{len(torrents)}")
logger.info(f"正在准备种子刷流,数量 {len(torrents)}")
# 过滤种子
for torrent in torrents:
@@ -2078,6 +2065,8 @@ class BrushFlow(_PluginBase):
if not pre_condition_passed:
return False
logger.debug(f"种子详情:{torrent}")
# 判断能否通过保种体积刷流条件
size_condition_passed, reason = self.__evaluate_size_condition_for_brush(torrents_size=torrents_size,
add_torrent_size=torrent.size)
@@ -2098,8 +2087,8 @@ class BrushFlow(_PluginBase):
logger.warn(f"{torrent.title} 添加刷流任务失败!")
continue
# 保存任务信息
torrent_tasks[hash_string] = {
# 触发刷流下载时间并保存任务信息
torrent_task = {
"site": siteinfo.id,
"site_name": siteinfo.name,
"title": torrent.title,
@@ -2134,6 +2123,13 @@ class BrushFlow(_PluginBase):
"time": time.time()
}
self.eventmanager.send_event(etype=EventType.PluginAction, data={
"action": "brushflow_download_added",
"hash": hash_string,
"data": torrent_task
})
torrent_tasks[hash_string] = torrent_task
# 统计数据
torrents_size += torrent.size
statistic_info["count"] += 1
@@ -2306,7 +2302,8 @@ class BrushFlow(_PluginBase):
return True, None
def __log_brush_conditions(self, passed: bool, reason: str, torrent: Any = None):
@staticmethod
def __log_brush_conditions(passed: bool, reason: str, torrent: Any = None):
"""
记录刷流日志
"""
@@ -2314,9 +2311,7 @@ class BrushFlow(_PluginBase):
if not torrent:
logger.warn(f"没有通过前置刷流条件校验,原因:{reason}")
else:
brush_config = self.__get_brush_config()
if brush_config.log_more:
logger.warn(f"种子没有通过刷流条件校验,原因:{reason} 种子:{torrent.title}|{torrent.description}")
logger.debug(f"种子没有通过刷流条件校验,原因:{reason} 种子:{torrent.title}|{torrent.description}")
# endregion
@@ -2331,10 +2326,6 @@ class BrushFlow(_PluginBase):
if not brush_config.downloader:
return
if not self.__is_current_time_in_range():
logger.info(f"当前不在指定的刷流时间区间内,检查操作将暂时暂停")
return
with lock:
logger.info("开始检查刷流下载任务 ...")
torrent_tasks: Dict[str, dict] = self.get_data("torrents") or {}
@@ -2372,34 +2363,58 @@ class BrushFlow(_PluginBase):
# 更新刷流任务列表中在下载器中删除的种子为删除状态
self.__update_undeleted_torrents_missing_in_downloader(torrent_tasks, torrent_check_hashes, check_torrents)
# 排除MoviePilot种子
if check_torrents and brush_config.except_tags:
check_torrents = self.__filter_torrents_by_tag(torrents=check_torrents,
exclude_tag=settings.TORRENT_TAG)
# 根据配置的标签进行种子排除
if check_torrents:
logger.info(f"当前刷流任务共 {len(check_torrents)} 个有效种子,正在准备按设定的种子标签进行排除")
# 初始化一个空的列表来存储需要排除的标签
tags_to_exclude = set()
# 如果 delete_except_tags 非空且不是纯空白,则添加到排除列表中
if brush_config.delete_except_tags and brush_config.delete_except_tags.strip():
tags_to_exclude.update(tag.strip() for tag in brush_config.delete_except_tags.split(','))
# 将所有需要排除的标签组合成一个字符串,每个标签之间用逗号分隔
combined_tags = ",".join(tags_to_exclude)
if combined_tags: # 确保有标签需要排除
pre_filter_count = len(check_torrents) # 获取过滤前的任务数量
check_torrents = self.__filter_torrents_by_tag(torrents=check_torrents, exclude_tag=combined_tags)
post_filter_count = len(check_torrents) # 获取过滤后的任务数量
excluded_count = pre_filter_count - post_filter_count # 计算被排除的任务数量
logger.info(
f"有效种子数 {pre_filter_count},排除标签 '{combined_tags}' 后,"
f"剩余种子数 {post_filter_count},排除种子数 {excluded_count}")
else:
logger.info("没有配置有效的排除标签,所有种子均参与后续处理")
need_delete_hashes = []
# 如果配置了动态删除以及删种阈值,则根据动态删种进行分组处理
if brush_config.proxy_delete and brush_config.delete_size_range:
logger.info("已开启动态删种,按系统默认动态删种条件开始检查任务")
proxy_delete_hashes = self.__delete_torrent_for_proxy(torrents=check_torrents,
torrent_tasks=torrent_tasks) or []
need_delete_hashes.extend(proxy_delete_hashes)
# 否则均认为是没有开启动态删种
# 种子删除检查
if not check_torrents:
logger.info("没有需要检查的任务,跳过")
else:
logger.info("没有开启动态删种,按用户设置删种条件开始检查任务")
not_proxy_delete_hashes = self.__delete_torrent_for_evaluate_conditions(torrents=check_torrents,
torrent_tasks=torrent_tasks) or []
need_delete_hashes.extend(not_proxy_delete_hashes)
need_delete_hashes = []
if need_delete_hashes:
# 如果是QB则重新汇报Tracker
if brush_config.downloader == "qbittorrent":
self.__qb_torrents_reannounce(torrent_hashes=need_delete_hashes)
# 删除种子
if downloader.delete_torrents(ids=need_delete_hashes, delete_file=True):
for torrent_hash in need_delete_hashes:
torrent_tasks[torrent_hash]["deleted"] = True
# 如果配置了动态删除以及删种阈值,则根据动态删种进行分组处理
if brush_config.proxy_delete and brush_config.delete_size_range:
logger.info("已开启动态删种,按系统默认动态删种条件开始检查任务")
proxy_delete_hashes = self.__delete_torrent_for_proxy(torrents=check_torrents,
torrent_tasks=torrent_tasks) or []
need_delete_hashes.extend(proxy_delete_hashes)
# 否则均认为是没有开启动态删种
else:
logger.info("没有开启动态删种,按用户设置删种条件开始检查任务")
not_proxy_delete_hashes = self.__delete_torrent_for_evaluate_conditions(torrents=check_torrents,
torrent_tasks=torrent_tasks) or []
need_delete_hashes.extend(not_proxy_delete_hashes)
if need_delete_hashes:
# 如果是QB则重新汇报Tracker
if brush_config.downloader == "qbittorrent":
self.__qb_torrents_reannounce(torrent_hashes=need_delete_hashes)
# 删除种子
if downloader.delete_torrents(ids=need_delete_hashes, delete_file=True):
for torrent_hash in need_delete_hashes:
torrent_tasks[torrent_hash]["deleted"] = True
torrent_tasks[torrent_hash]["deleted_time"] = time.time()
# 归档数据
self.__auto_archive_tasks(torrent_tasks=torrent_tasks)
self.__update_and_save_statistic_info(torrent_tasks)
@@ -2618,8 +2633,7 @@ class BrushFlow(_PluginBase):
reason=reason)
logger.info(f"站点:{site_name}{reason},删除种子:{torrent_title}|{torrent_desc}")
else:
if brush_config.log_more:
logger.info(f"站点:{site_name}{reason},不删除种子:{torrent_title}|{torrent_desc}")
logger.debug(f"站点:{site_name}{reason},不删除种子:{torrent_title}|{torrent_desc}")
return delete_hashes
@@ -2657,8 +2671,7 @@ class BrushFlow(_PluginBase):
reason=reason)
logger.info(f"站点:{site_name}{reason},删除种子:{torrent_title}|{torrent_desc}")
else:
if brush_config.log_more:
logger.info(f"站点:{site_name}{reason},不删除种子:{torrent_title}|{torrent_desc}")
logger.debug(f"站点:{site_name}{reason},不删除种子:{torrent_title}|{torrent_desc}")
return delete_hashes
@@ -2829,6 +2842,7 @@ class BrushFlow(_PluginBase):
torrent_task = torrent_tasks[hash_value]
# 标记为已删除
torrent_task["deleted"] = True
torrent_task["deleted_time"] = time.time()
# 处理日志相关内容
delete_tasks.append(torrent_task)
site_name = torrent_task.get("site_name", "")
@@ -2914,7 +2928,7 @@ class BrushFlow(_PluginBase):
"active_downloaded": active_downloaded
})
logger.info(f"刷流任务统计数据总任务数:{total_count},活跃任务数:{active_count},已删除:{total_deleted}"
logger.info(f"刷流任务统计数据总任务数:{total_count},活跃任务数:{active_count},已删除:{total_deleted}"
f"待归档:{total_unarchived}"
f"活跃上传量:{StringUtils.str_filesize(active_uploaded)}"
f"活跃下载量:{StringUtils.str_filesize(active_downloaded)}"
@@ -2954,7 +2968,8 @@ class BrushFlow(_PluginBase):
"seed_avgspeed": "平均上传速度",
"seed_inactivetime": "未活动时间",
"up_speed": "单任务上传限速",
"dl_speed": "单任务下载限速"
"dl_speed": "单任务下载限速",
"auto_archive_days": "自动清理记录天数"
}
config_range_number_attr_to_desc = {
@@ -3026,15 +3041,15 @@ class BrushFlow(_PluginBase):
"delete_size_range": brush_config.delete_size_range,
"up_speed": brush_config.up_speed,
"dl_speed": brush_config.dl_speed,
"auto_archive_days": brush_config.auto_archive_days,
"save_path": brush_config.save_path,
"clear_task": brush_config.clear_task,
"archive_task": brush_config.archive_task,
"except_tags": brush_config.except_tags,
"delete_except_tags": brush_config.delete_except_tags,
"except_subscribe": brush_config.except_subscribe,
"brush_sequential": brush_config.brush_sequential,
"proxy_download": brush_config.proxy_download,
"proxy_delete": brush_config.proxy_delete,
"log_more": brush_config.log_more,
"active_time_range": brush_config.active_time_range,
"downloader_monitor": brush_config.downloader_monitor,
"qb_category": brush_config.qb_category,
@@ -3131,7 +3146,7 @@ class BrushFlow(_PluginBase):
data = data.get(key)
if not data:
return None
logger.info(f"获取到下载地址:{data}")
logger.debug(f"获取到下载地址:{data}")
return data
return None
@@ -3201,8 +3216,7 @@ class BrushFlow(_PluginBase):
# 获取种子Hash
torrent_hash = self.qb.get_torrent_id_by_tag(tags=tag)
if not torrent_hash:
logger.error(f"{brush_config.downloader} 获取种子Hash失败"
f"{',请尝试启用「代理下载种子」配置项' if not brush_config.proxy_download else ''}")
logger.error(f"{brush_config.downloader} 获取种子Hash失败,详细信息请查看 README")
return None
return torrent_hash
return None
@@ -3654,12 +3668,21 @@ class BrushFlow(_PluginBase):
"""
获取正在下载的任务数量
"""
brush_config = self.__get_brush_config()
downloader = self.__get_downloader(brush_config.downloader)
if not downloader:
try:
brush_config = self.__get_brush_config()
downloader = self.__get_downloader(brush_config.downloader)
if not downloader:
return 0
torrents = downloader.get_downloading_torrents(tags=brush_config.brush_tag)
if torrents is None:
logger.warn("获取下载数量失败,可能是下载器连接发生异常")
return 0
return len(torrents)
except Exception as e:
logger.error(f"获取下载数量发生异常: {e}")
return 0
torrents = downloader.get_downloading_torrents()
return len(torrents) or 0
@staticmethod
def __get_pubminutes(pubdate: str) -> float:
@@ -3705,14 +3728,21 @@ class BrushFlow(_PluginBase):
def __filter_torrents_by_tag(self, torrents: List[Any], exclude_tag: str) -> List[Any]:
"""
根据标签过滤torrents
根据标签过滤torrents,排除标签格式为逗号分隔的字符串,例如 "MOVIEPILOT, H&R"
"""
# 如果排除标签字符串为空,则返回原始列表
if not exclude_tag:
return torrents
# 将 exclude_tag 字符串分割成一个集合,并去除每个标签两端的空白,忽略空白标签并自动去重
exclude_tags = set(tag.strip() for tag in exclude_tag.split(',') if tag.strip())
filter_torrents = []
for torrent in torrents:
# 使用 __get_label 方法获取每个 torrent 的标签列表
labels = self.__get_label(torrent)
# 如果排除标签不在这个列表中,则添加到过滤后的列表
if exclude_tag not in labels:
# 检查是否有任何一个排除标签存在于标签列表中
if not any(exclude in labels for exclude in exclude_tags):
filter_torrents.append(torrent)
return filter_torrents
@@ -3752,7 +3782,8 @@ class BrushFlow(_PluginBase):
doubanid=subscribe.doubanid,
cache=True)
if mediainfo:
logger.info(f"subscribe {subscribe.name} {mediainfo.to_dict()}")
logger.info(f"订阅 {subscribe.name} 已识别到媒体信息")
logger.debug(f"subscribe {subscribe.name} {mediainfo.to_dict()}")
subscribe_titles.extend(mediainfo.names)
subscribe_titles = [title.strip() for title in subscribe_titles if title and title.strip()]
self._subscribe_infos[subscribe_key] = subscribe_titles
@@ -3766,7 +3797,8 @@ class BrushFlow(_PluginBase):
for key in set(self._subscribe_infos) - current_keys:
del self._subscribe_infos[key]
logger.info(f"订阅标题匹配完成,当前订阅的标题集合为:{self._subscribe_infos}")
logger.info("订阅标题匹配完成")
logger.debug(f"当前订阅的标题集合为:{self._subscribe_infos}")
unique_titles = {title for titles in self._subscribe_infos.values() for title in titles}
return unique_titles
@@ -3833,6 +3865,45 @@ class BrushFlow(_PluginBase):
"""
return sum(task.get("size", 0) for task in torrent_tasks.values() if not task.get("deleted", False))
def __auto_archive_tasks(self, torrent_tasks: Dict[str, dict]) -> None:
"""
自动归档已经删除的种子数据
"""
if not self._brush_config.auto_archive_days or self._brush_config.auto_archive_days <= 0:
logger.info("自动归档记录天数小于等于0取消自动归档")
return
# 用于存储已删除的数据
archived_tasks: Dict[str, dict] = self.get_data("archived") or {}
current_time = time.time()
archive_threshold_seconds = self._brush_config.auto_archive_days * 86400 # 将天数转换为秒数
# 准备一个列表,记录所有需要从原始数据中删除的键
keys_to_delete = set()
# 遍历所有 torrent 条目
for key, value in torrent_tasks.items():
deleted_time = value.get("deleted_time")
# 场景 1: 检查任务是否已被标记为删除且超出保留天数
if (value.get("deleted") and isinstance(deleted_time, (int, float)) and
current_time - deleted_time > archive_threshold_seconds):
keys_to_delete.add(key)
archived_tasks[key] = value
continue
# 场景 2: 检查没有明确删除时间的历史数据
if value.get("deleted") and deleted_time is None:
keys_to_delete.add(key)
archived_tasks[key] = value
continue
# 从原始字典中移除已删除的条目
for key in keys_to_delete:
del torrent_tasks[key]
self.save_data("archived", archived_tasks)
def __archive_tasks(self):
"""
归档已经删除的种子数据
@@ -3843,7 +3914,7 @@ class BrushFlow(_PluginBase):
archived_tasks: Dict[str, dict] = self.get_data("archived") or {}
# 准备一个列表,记录所有需要从原始数据中删除的键
keys_to_delete = []
keys_to_delete = set()
# 遍历所有 torrent 条目
for key, value in torrent_tasks.items():
@@ -3852,7 +3923,7 @@ class BrushFlow(_PluginBase):
# 如果是,加入到归档字典中
archived_tasks[key] = value
# 记录键,稍后删除
keys_to_delete.append(key)
keys_to_delete.add(key)
# 从原始字典中移除已删除的条目
for key in keys_to_delete:

View File

@@ -18,7 +18,7 @@ class CustomHosts(_PluginBase):
# 插件图标
plugin_icon = "hosts.png"
# 插件版本
plugin_version = "1.1"
plugin_version = "1.2"
# 插件作者
plugin_author = "thsrite"
# 作者主页
@@ -235,6 +235,12 @@ class CustomHosts(_PluginBase):
for host in hosts:
if not host:
continue
host = host.strip()
if host.startswith('#'): # 检查是否为注释行
host_entry = HostsEntry(entry_type='comment', comment=host)
new_entrys.append(host_entry)
continue
host_arr = str(host).split()
try:
host_entry = HostsEntry(entry_type='ipv4' if IpUtils.is_ipv4(str(host_arr[0])) else 'ipv6',

View File

@@ -0,0 +1,269 @@
import re
import time
import hmac
import hashlib
import base64
import urllib.parse
from app.plugins import _PluginBase
from app.core.event import eventmanager, Event
from app.schemas.types import EventType, NotificationType
from app.utils.http import RequestUtils
from typing import Any, List, Dict, Tuple
from app.log import logger
class DingdingMsg(_PluginBase):
# 插件名称
plugin_name = "钉钉机器人"
# 插件描述
plugin_desc = "支持使用钉钉机器人发送消息通知。"
# 插件图标
plugin_icon = "Dingding_A.png"
# 插件版本
plugin_version = "1.12"
# 插件作者
plugin_author = "nnlegenda"
# 作者主页
author_url = "https://github.com/nnlegenda"
# 插件配置项ID前缀
plugin_config_prefix = "dingdingmsg_"
# 加载顺序
plugin_order = 25
# 可使用的用户级别
auth_level = 1
# 私有属性
_enabled = False
_token = None
_secret = None
_msgtypes = []
def init_plugin(self, config: dict = None):
if config:
self._enabled = config.get("enabled")
self._token = config.get("token")
self._secret = config.get("secret")
self._msgtypes = config.get("msgtypes") or []
def get_state(self) -> bool:
return self._enabled and (True if self._token else False) and (True if self._secret else False)
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
# 编历 NotificationType 枚举,生成消息类型选项
MsgTypeOptions = []
for item in NotificationType:
MsgTypeOptions.append({
"title": item.value,
"value": item.name
})
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'token',
'label': '钉钉机器人token',
'placeholder': 'xxxxxx',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'secret',
'label': '加签',
'placeholder': 'SECxxx',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VSelect',
'props': {
'multiple': True,
'chips': True,
'model': 'msgtypes',
'label': '消息类型',
'items': MsgTypeOptions
}
}
]
}
]
},
]
}
], {
"enabled": False,
'token': '',
'msgtypes': []
}
def get_page(self) -> List[dict]:
pass
@eventmanager.register(EventType.NoticeMessage)
def send(self, event: Event):
"""
消息发送事件
"""
if not self.get_state():
return
if not event.event_data:
return
msg_body = event.event_data
# 渠道
channel = msg_body.get("channel")
if channel:
return
# 类型
msg_type: NotificationType = msg_body.get("type")
# 标题
title = msg_body.get("title")
# 文本
text = msg_body.get("text")
# 封面
cover = msg_body.get("image")
if not title and not text:
logger.warn("标题和内容不能同时为空")
return
if (msg_type and self._msgtypes
and msg_type.name not in self._msgtypes):
logger.info(f"消息类型 {msg_type.value} 未开启消息发送")
return
sc_url = self.url_sign(self._token, self._secret)
try:
if text:
# 对text进行Markdown特殊字符转义
text = re.sub(r"([_`])", r"\\\1", text)
else:
text = ""
if cover:
data = {
"msgtype": "markdown",
"markdown": {
"title": title,
"text": "### %s\n\n"
"![Cover](%s)\n\n"
"> %s\n\n > MoviePilot %s\n" % (title, cover, text, msg_type.value)
}
}
else:
data = {
"msgtype": "markdown",
"markdown": {
"title": title,
"text": "### %s\n\n"
"> %s\n\n > MoviePilot %s\n" % (title, text, msg_type.value)
}
}
res = RequestUtils(content_type="application/json").post_res(sc_url, json=data)
if res and res.status_code == 200:
ret_json = res.json()
errno = ret_json.get('errcode')
error = ret_json.get('errmsg')
if errno == 0:
logger.info("钉钉机器人消息发送成功")
else:
logger.warn(f"钉钉机器人消息发送失败,错误码:{errno},错误原因:{error}")
elif res is not None:
logger.warn(f"钉钉机器人消息发送失败,错误码:{res.status_code},错误原因:{res.reason}")
else:
logger.warn("钉钉机器人消息发送失败,未获取到返回信息")
except Exception as msg_e:
logger.error(f"钉钉机器人消息发送失败,{str(msg_e)}")
def stop_service(self):
"""
退出插件
"""
pass
def url_sign(self, access_token: str, secret: str) -> str:
"""
加签
"""
# 生成时间戳和签名
timestamp = str(round(time.time() * 1000))
secret_enc = secret.encode('utf-8')
string_to_sign = '{}\n{}'.format(timestamp, secret)
string_to_sign_enc = string_to_sign.encode('utf-8')
hmac_code = hmac.new(secret_enc, string_to_sign_enc, digestmod=hashlib.sha256).digest()
sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
# 组合请求的完整 URL
full_url = f'https://oapi.dingtalk.com/robot/send?access_token={access_token}&timestamp={timestamp}&sign={sign}'
return full_url

View File

@@ -330,7 +330,7 @@ class DirMonitor(_PluginBase):
return
# 不是媒体文件不处理
if file_path.suffix not in settings.RMT_MEDIAEXT:
if file_path.suffix.casefold() not in map(str.casefold, settings.RMT_MEDIAEXT):
logger.debug(f"{event_path} 不是媒体文件")
return

View File

@@ -34,7 +34,7 @@ class DoubanSync(_PluginBase):
# 插件图标
plugin_icon = "douban.png"
# 插件版本
plugin_version = "1.8"
plugin_version = "1.9.1"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
@@ -498,6 +498,11 @@ class DoubanSync(_PluginBase):
"""
if not self._users:
return
# 版本
if hasattr(settings, 'VERSION_FLAG'):
version = settings.VERSION_FLAG # V2
else:
version = "v1"
# 读取历史记录
if self._clearflag:
history = []
@@ -509,7 +514,12 @@ class DoubanSync(_PluginBase):
continue
logger.info(f"开始同步用户 {user_id} 的豆瓣想看数据 ...")
url = self._interests_url % user_id
results = self.rsshelper.parse(url)
if version == "v2":
results = self.rsshelper.parse(url, headers={
"User-Agent": settings.USER_AGENT
})
else:
results = self.rsshelper.parse(url)
if not results:
logger.warn(f"未获取到用户 {user_id} 豆瓣RSS数据{url}")
continue

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,296 @@
import re
import requests
from app.modules.wechat import WeChat
from app.schemas.types import NotificationType,MessageChannel
import os
import json
import requests
import base64
import hashlib
from typing import Dict, Any
from Crypto import Random
from Crypto.Cipher import AES
def bytes_to_key(data: bytes, salt: bytes, output=48) -> bytes:
# 兼容v2 将bytes_to_key和encrypt导入
assert len(salt) == 8, len(salt)
data += salt
key = hashlib.md5(data).digest()
final_key = key
while len(final_key) < output:
key = hashlib.md5(key + data).digest()
final_key += key
return final_key[:output]
def encrypt(message: bytes, passphrase: bytes) -> bytes:
"""
CryptoJS 加密原文
This is a modified copy of https://stackoverflow.com/questions/36762098/how-to-decrypt-password-from-javascript-cryptojs-aes-encryptpassword-passphras
"""
salt = Random.new().read(8)
key_iv = bytes_to_key(passphrase, salt, 32 + 16)
key = key_iv[:32]
iv = key_iv[32:]
aes = AES.new(key, AES.MODE_CBC, iv)
length = 16 - (len(message) % 16)
data = message + (chr(length) * length).encode()
return base64.b64encode(b"Salted__" + salt + aes.encrypt(data))
class PyCookieCloud:
def __init__(self, url: str, uuid: str, password: str):
self.url: str = url
self.uuid: str = uuid
self.password: str = password
def check_connection(self) -> bool:
"""
Test the connection to the CookieCloud server.
:return: True if the connection is successful, False otherwise.
"""
try:
resp = requests.get(self.url, timeout=3) # 设置超时为3秒
return resp.status_code == 200
except Exception as e:
return False
def update_cookie(self, formatted_cookies: Dict[str, Any]) -> bool:
"""
Update cookie data to CookieCloud.
:param formatted_cookies: cookie value to update.
:return: if update success, return True, else return False.
"""
if '.work.weixin.qq.com' not in formatted_cookies:
formatted_cookies['.work.weixin.qq.com'] = []
formatted_cookies['.work.weixin.qq.com'].append({
'name': '_upload_type',
'value': 'A',
'domain': '.work.weixin.qq.com',
'path': '/',
'expires': -1,
'httpOnly': False,
'secure': False,
'sameSite': 'Lax'
})
cookie = {'cookie_data': formatted_cookies}
raw_data = json.dumps(cookie)
encrypted_data = encrypt(raw_data.encode('utf-8'), self.get_the_key().encode('utf-8')).decode('utf-8')
cookie_cloud_request = requests.post(self.url + '/update',
json={'uuid': self.uuid, 'encrypted': encrypted_data})
if cookie_cloud_request.status_code == 200:
if cookie_cloud_request.json().get('action') == 'done':
return True
return False
def get_the_key(self) -> str:
"""
Get the key used to encrypt and decrypt data.
:return: the key.
"""
md5 = hashlib.md5()
md5.update((self.uuid + '-' + self.password).encode('utf-8'))
return md5.hexdigest()[:16]
@staticmethod
def load_cookie_lifetime(settings_file: str = None): # 返回时间戳 单位秒
if os.path.exists(settings_file):
with open(settings_file, 'r') as file:
settings = json.load(file)
return settings.get('_cookie_lifetime', 0)
else:
return 0
@staticmethod
def save_cookie_lifetime(settings_file, cookie_lifetime): # 传入时间戳 单位秒
with open(settings_file, 'w') as file:
json.dump({'_cookie_lifetime': cookie_lifetime}, file)
@staticmethod
def increase_cookie_lifetime(settings_file, seconds: int):
if os.path.exists(settings_file):
with open(settings_file, 'r') as file:
settings = json.load(file)
current_lifetime = settings.get('_cookie_lifetime', 0)
else:
current_lifetime = 0
new_lifetime = current_lifetime + seconds
# 保存新的 _cookie_lifetime
PyCookieCloud.save_cookie_lifetime(settings_file, new_lifetime)
class MySender:
def __init__(self, token=None, func=None):
self.tokens = token.split('||') if token and '||' in token else [token] if token else []
self.channels = [MySender._detect_channel(t) for t in self.tokens]
self.current_index = 0 # 当前使用的 token 和 channel 的索引
self.first_text_sent = False # 是否已发送过纯文本消息
self.init_success = bool(self.tokens) # 标识初始化是否成功
self.post_message_func = func # V2 微信模式的 post_message 方法
@staticmethod
def _detect_channel(token):
"""根据 token 确定通知渠道"""
if "WeChat" in token:
return "WeChat"
letters_only = ''.join(re.findall(r'[A-Za-z]', token))
if token.lower().startswith("sct"):
return "ServerChan"
elif letters_only.isupper():
return "AnPush"
else:
return "PushPlus"
def send(self, title, content=None, image=None, force_send=False, diy_channel=None):
"""发送消息"""
if not self.init_success:
return
# 对纯文本消息进行限制
if not image and not force_send:
if self.first_text_sent:
return
self.first_text_sent = True
# 如果指定了自定义通道,直接尝试发送
if diy_channel:
return self._try_send(title, content, image, diy_channel)
# 尝试按顺序发送,直到成功或遍历所有通道
for i in range(len(self.tokens)):
token = self.tokens[self.current_index]
channel = self.channels[self.current_index]
try:
result = self._try_send(title, content, image, channel, token)
if result is None: # 成功时返回 None
return
except Exception as e:
pass # 忽略单个错误,继续尝试下一个通道
self.current_index = (self.current_index + 1) % len(self.tokens)
return f"所有的通知方式都发送失败"
def _try_send(self, title, content, image, channel, token=None):
"""尝试使用指定通道发送消息"""
if channel == "WeChat" and self.post_message_func:
return self._send_v2_wechat(title, content, image, token)
elif channel == "WeChat":
return self._send_wechat(title, content, image, token)
elif channel == "ServerChan":
return self._send_serverchan(title, content, image)
elif channel == "AnPush":
return self._send_anpush(title, content, image)
elif channel == "PushPlus":
return self._send_pushplus(title, content, image)
else:
raise ValueError(f"Unknown channel: {channel}")
@staticmethod
def _send_wechat(title, content, image, token):
wechat = WeChat()
if token and ',' in token:
channel, actual_userid = token.split(',', 1)
else:
actual_userid = None
if image:
send_status = wechat.send_msg(title='企业微信登录二维码', image=image, link=image, userid=actual_userid)
else:
send_status = wechat.send_msg(title=title, text=content, userid=actual_userid)
if send_status is None:
return "微信通知发送错误"
return None
def _send_serverchan(self, title, content, image):
tmp_tokens = self.tokens[self.current_index]
if ',' in tmp_tokens:
before_comma, after_comma = tmp_tokens.split(',', 1)
if before_comma.startswith('sctp') and image:
token = after_comma # 图片发到公众号
else:
token = before_comma # 发到 server3
else:
token = tmp_tokens
if token.startswith('sctp'):
match = re.match(r'sctp(\d+)t', token)
if match:
num = match.group(1)
url = f'https://{num}.push.ft07.com/send/{token}.send'
else:
return '错误的Server3 Sendkey'
else:
url = f'https://sctapi.ftqq.com/{token}.send'
params = {'title': title, 'desp': f'![img]({image})' if image else content}
headers = {'Content-Type': 'application/json;charset=utf-8'}
response = requests.post(url, json=params, headers=headers)
result = response.json()
if result.get('code') != 0:
return f"Server酱通知错误: {result.get('message')}"
return None
def _send_anpush(self, title, content, image):
token = self.tokens[self.current_index] # 获取当前通道对应的 token
if ',' in token:
channel, token = token.split(',', 1)
else:
return "可能AnPush 没有配置消息通道ID"
url = f"https://api.anpush.com/push/{token}"
payload = {
"title": title,
"content": f"<img src=\"{image}\" width=\"100%\">" if image else content,
"channel": channel
}
headers = {"Content-Type": "application/x-www-form-urlencoded"}
response = requests.post(url, headers=headers, data=payload)
result = response.json()
# 判断返回的code和msgIds
if result.get('code') != 200:
return f"AnPush: {result.get('msg')}"
elif not result.get('data') or not result['data'].get('msgIds'):
return "AnPush 消息通道未找到"
return None
def _send_pushplus(self, title, content, image):
token = self.tokens[self.current_index] # 获取当前通道对应的 token
pushplus_url = f"http://www.pushplus.plus/send/{token}"
# PushPlus发送逻辑
data = {
"title": title,
"content": f"企业微信登录二维码<br/><img src='{image}' />" if image else content,
"template": "html"
}
response = requests.post(pushplus_url, json=data)
result = response.json()
if result.get('code') != 200:
return f"PushPlus send failed: {result.get('msg')}"
return None
def _send_v2_wechat(self, title, content, image, token):
"""V2 微信通知发送"""
if token and ',' in token:
_, actual_userid = token.split(',', 1)
else:
actual_userid = None
self.post_message_func(
channel=MessageChannel.Wechat,
mtype=NotificationType.Plugin,
title=title,
text=content,
image=image,
link=image,
userid=actual_userid
)
return None # 由于self.post_message()了None外没有其他返回值。无法判断是否发送成功V2直接默认成功
def reset_limit(self):
"""解除限制,允许再次发送纯文本消息"""
self.first_text_sent = False

View File

@@ -0,0 +1,87 @@
import hashlib
from typing import Dict, Any
import json
import requests
from urllib.parse import urljoin
from Cryptodome import Random
from Cryptodome.Cipher import AES
import base64
BLOCK_SIZE = 16
def pad(data):
length = BLOCK_SIZE - (len(data) % BLOCK_SIZE)
return data + (chr(length) * length).encode()
def bytes_to_key(data, salt, output=48):
# extended from https://gist.github.com/gsakkis/4546068
assert len(salt) == 8, len(salt)
data += salt
key = hashlib.md5(data).digest()
final_key = key
while len(final_key) < output:
key = hashlib.md5(key + data).digest()
final_key += key
return final_key[:output]
def encrypt(message, passphrase):
salt = Random.new().read(8)
key_iv = bytes_to_key(passphrase, salt, 32 + 16)
key = key_iv[:32]
iv = key_iv[32:]
aes = AES.new(key, AES.MODE_CBC, iv)
return base64.b64encode(b"Salted__" + salt + aes.encrypt(pad(message)))
class PyCookieCloud:
def __init__(self, url: str, uuid: str, password: str):
self.url: str = url
self.uuid: str = uuid
self.password: str = password
def check_connection(self) -> bool:
"""
Test the connection to the CookieCloud server.
:return: True if the connection is successful, False otherwise.
"""
try:
resp = requests.get(self.url)
if resp.status_code == 200:
return True
else:
return False
except Exception as e:
print(str(e))
return False
def update_cookie(self, cookie: Dict[str, Any]) -> bool:
"""
Update cookie data to CookieCloud.
:param cookie: cookie value to update, if this cookie does not contain 'cookie_data' key, it will be added into 'cookie_data'.
:return: if update success, return True, else return False.
"""
if 'cookie_data' not in cookie:
cookie = {'cookie_data': cookie}
raw_data = json.dumps(cookie)
encrypted_data = encrypt(raw_data.encode('utf-8'), self.get_the_key().encode('utf-8')).decode('utf-8')
cookie_cloud_request = requests.post(urljoin(self.url, '/update'),
data={'uuid': self.uuid, 'encrypted': encrypted_data})
if cookie_cloud_request.status_code == 200:
if cookie_cloud_request.json()['action'] == 'done':
return True
return False
def get_the_key(self) -> str:
"""
Get the key used to encrypt and decrypt data.
:return: the key.
"""
md5 = hashlib.md5()
md5.update((self.uuid + '-' + self.password).encode('utf-8'))
return md5.hexdigest()[:16]

File diff suppressed because it is too large Load Diff

View File

@@ -34,7 +34,7 @@ class IYUUAutoSeed(_PluginBase):
# 插件图标
plugin_icon = "IYUU.png"
# 插件版本
plugin_version = "1.9.3"
plugin_version = "1.9.6"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
@@ -957,6 +957,10 @@ class IYUUAutoSeed(_PluginBase):
if self._skipverify:
# 跳过校验
logger.info(f"{download_id} 跳过校验,请自行检查...")
# 请注意这里是故意不自动开始的
# 跳过校验存在直接失败、种子目录相同文件不同等异常情况
# 必须要用户自行二次确认之后才能开始做种
# 否则会出现反复下载刷掉分享率、做假种的情况
else:
# 追加校验任务
logger.info(f"添加校验检查任务:{download_id} ...")

View File

@@ -11,7 +11,7 @@ class IyuuHelper(object):
适配新版本IYUU开发版
"""
_version = "8.2.0"
_api_base = "https://dev.iyuu.cn"
_api_base = "https://2025.iyuu.cn"
_sites = {}
_token = None
_sid_sha1 = None

View File

@@ -1,11 +1,14 @@
import threading
from queue import Queue
from time import time, sleep
from typing import Any, List, Dict, Tuple
from urllib.parse import urlencode
from app.plugins import _PluginBase
from app.core.event import eventmanager, Event
from app.log import logger
from app.plugins import _PluginBase
from app.schemas.types import EventType, NotificationType
from app.utils.http import RequestUtils
from typing import Any, List, Dict, Tuple
from app.log import logger
class IyuuMsg(_PluginBase):
@@ -16,7 +19,7 @@ class IyuuMsg(_PluginBase):
# 插件图标
plugin_icon = "Iyuu_A.png"
# 插件版本
plugin_version = "1.2"
plugin_version = "1.3"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
@@ -33,12 +36,30 @@ class IyuuMsg(_PluginBase):
_token = None
_msgtypes = []
# 消息处理线程
processing_thread = None
# 上次发送时间
last_send_time = 0
# 消息队列
message_queue = Queue()
# 消息发送间隔(秒)
send_interval = 5
# 退出事件
__event = threading.Event()
def init_plugin(self, config: dict = None):
self.__event.clear()
if config:
self._enabled = config.get("enabled")
self._token = config.get("token")
self._msgtypes = config.get("msgtypes") or []
if self._enabled and self._token:
# 启动处理队列的后台线程
self.processing_thread = threading.Thread(target=self.process_queue)
self.processing_thread.daemon = True
self.processing_thread.start()
def get_state(self) -> bool:
return self._enabled and (True if self._token else False)
@@ -143,55 +164,77 @@ class IyuuMsg(_PluginBase):
@eventmanager.register(EventType.NoticeMessage)
def send(self, event: Event):
"""
消息发送事件
消息发送事件,将消息加入队列
"""
if not self.get_state():
return
if not event.event_data:
if not self.get_state() or not event.event_data:
return
msg_body = event.event_data
# 渠道
channel = msg_body.get("channel")
if channel:
return
# 类型
msg_type: NotificationType = msg_body.get("type")
# 标题
title = msg_body.get("title")
# 文本
text = msg_body.get("text")
if not title and not text:
# 验证消息的有效性
if not msg_body.get("title") and not msg_body.get("text"):
logger.warn("标题和内容不能同时为空")
return
if (msg_type and self._msgtypes
and msg_type.name not in self._msgtypes):
logger.info(f"消息类型 {msg_type.value} 未开启消息发送")
return
# 将消息加入队列
self.message_queue.put(msg_body)
logger.info("消息已加入队列等待发送")
try:
sc_url = "https://iyuu.cn/%s.send?%s" % (self._token, urlencode({"text": title, "desp": text}))
res = RequestUtils().get_res(sc_url)
if res and res.status_code == 200:
ret_json = res.json()
errno = ret_json.get('errcode')
error = ret_json.get('errmsg')
if errno == 0:
logger.info("IYUU消息发送成功")
def process_queue(self):
"""
处理队列中的消息,按间隔时间发送
"""
while True:
if self.__event.is_set():
logger.info("消息发送线程正在退出...")
break
# 获取队列中的下一条消息
msg_body = self.message_queue.get()
# 检查是否满足发送间隔时间
current_time = time()
time_since_last_send = current_time - self.last_send_time
if time_since_last_send < self.send_interval:
sleep(self.send_interval - time_since_last_send)
# 处理消息内容
channel = msg_body.get("channel")
if channel:
continue
msg_type: NotificationType = msg_body.get("type")
title = msg_body.get("title")
text = msg_body.get("text")
# 检查消息类型是否已启用
if msg_type and self._msgtypes and msg_type.name not in self._msgtypes:
logger.info(f"消息类型 {msg_type.value} 未开启消息发送")
continue
# 尝试发送消息
try:
sc_url = "https://iyuu.cn/%s.send?%s" % (self._token, urlencode({"text": title, "desp": text}))
res = RequestUtils().get_res(sc_url)
if res and res.status_code == 200:
ret_json = res.json()
errno = ret_json.get('errcode')
error = ret_json.get('errmsg')
if errno == 0:
logger.info("IYUU消息发送成功")
# 更新上次发送时间
self.last_send_time = time()
else:
logger.warn(f"IYUU消息发送失败错误码{errno},错误原因:{error}")
elif res is not None:
logger.warn(f"IYUU消息发送失败错误码{res.status_code},错误原因:{res.reason}")
else:
logger.warn(f"IYUU消息发送失败错误码:{errno},错误原因:{error}")
elif res is not None:
logger.warn(f"IYUU消息发送失败错误码:{res.status_code},错误原因:{res.reason}")
else:
logger.warn("IYUU消息发送失败未获取到返回信息")
except Exception as msg_e:
logger.error(f"IYUU消息发送失败{str(msg_e)}")
logger.warn("IYUU消息发送失败未获取到返回信息")
except Exception as msg_e:
logger.error(f"IYUU消息发送失败{str(msg_e)}")
# 标记任务完成
self.message_queue.task_done()
def stop_service(self):
"""
退出插件
"""
pass
self.__event.set()

View File

@@ -20,7 +20,7 @@ class MediaServerMsg(_PluginBase):
# 插件图标
plugin_icon = "mediaplay.png"
# 插件版本
plugin_version = "1.2"
plugin_version = "1.3"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
@@ -40,6 +40,7 @@ class MediaServerMsg(_PluginBase):
# 私有属性
_enabled = False
_types = []
_webhook_msg_keys = {}
# 拼装消息内容
_webhook_actions = {
@@ -198,6 +199,13 @@ class MediaServerMsg(_PluginBase):
logger.info(f"未开启 {event_info.event} 类型的消息通知")
return
expiring_key = f"{event_info.item_id}-{event_info.client}-{event_info.user_name}"
# 过滤停止播放重复消息
if str(event_info.event) == "playback.stop" and expiring_key in self._webhook_msg_keys.keys():
# 刷新过期时间
self.__add_element(expiring_key)
return
# 消息标题
if event_info.item_type in ["TV", "SHOW"]:
message_title = f"{self._webhook_actions.get(event_info.event)}剧集 {event_info.item_name}"
@@ -255,10 +263,31 @@ class MediaServerMsg(_PluginBase):
else:
play_link = None
if str(event_info.event) == "playback.stop":
# 停止播放消息,添加到过期字典
self.__add_element(expiring_key)
if str(event_info.event) == "playback.start":
# 开始播放消息,删除过期字典
self.__remove_element(expiring_key)
# 发送消息
self.post_message(mtype=NotificationType.MediaServer,
title=message_title, text=message_content, image=image_url, link=play_link)
def __add_element(self, key, duration=600):
expiration_time = time.time() + duration
# 如果元素已经存在,更新其过期时间
self._webhook_msg_keys[key] = expiration_time
def __remove_element(self, key):
self._webhook_msg_keys = {k: v for k, v in self._webhook_msg_keys.items() if k != key}
def __get_elements(self):
current_time = time.time()
# 过滤掉过期的元素
self._webhook_msg_keys = {k: v for k, v in self._webhook_msg_keys.items() if v > current_time}
return list(self._webhook_msg_keys.keys())
def stop_service(self):
"""
退出插件

View File

@@ -29,7 +29,7 @@ class MediaSyncDel(_PluginBase):
# 插件图标
plugin_icon = "mediasyncdel.png"
# 插件版本
plugin_version = "1.7"
plugin_version = "1.7.1"
# 插件作者
plugin_author = "thsrite"
# 作者主页
@@ -1324,7 +1324,7 @@ class MediaSyncDel(_PluginBase):
downloader=downloader)
# 暂停辅种
else:
self.chain.stop_torrents(hashs=torrent, download=downloader)
self.chain.stop_torrents(hashs=torrent, downloader=downloader)
logger.info(f"辅种:{downloader} - {torrent} 暂停")
# 处理辅种的辅种

View File

@@ -15,7 +15,7 @@ class MPServerStatus(_PluginBase):
# 插件图标
plugin_icon = "Duplicati_A.png"
# 插件版本
plugin_version = "1.0"
plugin_version = "1.1"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
@@ -73,7 +73,21 @@ class MPServerStatus(_PluginBase):
}
def get_page(self) -> List[dict]:
pass
"""
获取插件页面
"""
if not self._enable:
return [
{
'component': 'div',
'text': '插件未启用',
'props': {
'class': 'text-center',
}
}
]
_, _, elements = self.get_dashboard()
return elements
def get_dashboard(self) -> Optional[Tuple[Dict[str, Any], Dict[str, Any], List[dict]]]:
"""

View File

@@ -11,11 +11,11 @@ class PushPlusMsg(_PluginBase):
# 插件名称
plugin_name = "PushPlus消息推送"
# 插件描述
plugin_desc = "支持使用PushPlus发送消息通知。"
plugin_desc = "支持使用PushPlus发送消息通知(需实名认证)"
# 插件图标
plugin_icon = "Pushplus_A.png"
# 插件版本
plugin_version = "1.0"
plugin_version = "1.1"
# 插件作者
plugin_author = "cheng"
# 作者主页
@@ -128,6 +128,27 @@ class PushPlusMsg(_PluginBase):
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
'text': '由于pushplus规则更新没有实名认证的用户无法发送消息所以需要用户自己去官网进行认证。官网地址:https://www.pushplus.plus'
}
}
]
}
]
}
]
}
], {

View File

@@ -497,7 +497,6 @@ class RemoveLink(_PluginBase):
self._transferhistory.delete(transfer_history.id)
logger.info(f"删除历史记录:{transfer_history.id}")
def delete_empty_folders(self, path):
"""
从指定路径开始,逐级向上层目录检测并删除空目录,直到遇到非空目录或到达指定监控目录为止
@@ -589,7 +588,7 @@ class RemoveLink(_PluginBase):
mtype=NotificationType.SiteMessage,
title=f"【清理硬链接】",
text=f"监控到删除源文件:[{file_path}]\n"
f"同步删除硬链接文件:[{path}]",
f"同步删除硬链接文件:[{path}]",
)
except Exception as e:
logger.error(

View File

@@ -14,8 +14,7 @@ from ruamel.yaml import CommentedMap
from app import schemas
from app.core.config import settings
from app.core.event import Event
from app.core.event import eventmanager
from app.core.event import Event, eventmanager
from app.db.models import PluginData
from app.db.site_oper import SiteOper
from app.helper.browser import PlaywrightHelper
@@ -43,7 +42,7 @@ class SiteStatistic(_PluginBase):
# 插件图标
plugin_icon = "statistic.png"
# 插件版本
plugin_version = "3.9.1"
plugin_version = "4.0.1"
# 插件作者
plugin_author = "lightolly"
# 作者主页
@@ -931,6 +930,12 @@ class SiteStatistic(_PluginBase):
拼装插件详情页面,需要返回页面配置,同时附带数据
"""
def format_bonus(bonus):
try:
return f'{float(bonus):,.1f}'
except ValueError:
return '0.0'
# 获取数据
today, stattistic_data, yesterday_sites_data = self.__get_data()
if not stattistic_data:
@@ -995,7 +1000,7 @@ class SiteStatistic(_PluginBase):
},
{
'component': 'td',
'text': '{:,.1f}'.format(data.get('bonus') or 0)
'text': format_bonus(data.get('bonus') or 0)
},
{
'component': 'td',

View File

@@ -118,7 +118,7 @@ class NexusPhpSiteUserInfo(ISiteUserInfo):
if bonus_match and bonus_match.group(1).strip():
self.bonus = StringUtils.str_float(bonus_match.group(1))
return
bonus_match = re.search(r"mybonus.[\[\]:<>/a-zA-Z_\-=\"'\s#;.(使用魔力值豆]+\s*([\d,.]+)[<()&\s]", html_text)
bonus_match = re.search(r"mybonus.[\[\]:<>/a-zA-Z_\-=\"'\s#;.(使用&说明魔力值豆]+\s*([\d,.]+)[\[<()&\s]", html_text)
try:
if bonus_match and bonus_match.group(1).strip():
self.bonus = StringUtils.str_float(bonus_match.group(1))
@@ -340,6 +340,12 @@ class NexusPhpSiteUserInfo(ISiteUserInfo):
self.user_level = user_levels_text[0].xpath("string(.)").strip()
return
# 适配PTT用户等级
user_levels_text = html.xpath('//tr/td[text()="用户等级"]/following-sibling::td[1]/b/@title')
if user_levels_text:
self.user_level = user_levels_text[0].strip()
return
user_levels_text = html.xpath('//a[contains(@href, "userdetails")]/text()')
if not self.user_level and user_levels_text:
for user_level_text in user_levels_text:

View File

@@ -62,8 +62,8 @@ class TYemaSiteUserInfo(ISiteUserInfo):
self.user_level = user_info.get("level")
self.join_at = StringUtils.unify_datetime_str(user_info.get("registerTime"))
self.upload = user_info.get('uploadSize')
self.download = user_info.get('downloadSize')
self.upload = user_info.get('promotionUploadSize')
self.download = user_info.get('promotionDownloadSize')
self.ratio = round(self.upload / (self.download or 1), 2)
self.bonus = user_info.get("bonus")
self.message_unread = 0

View File

@@ -23,7 +23,7 @@ class SpeedLimiter(_PluginBase):
# 插件图标
plugin_icon = "Librespeed_A.png"
# 插件版本
plugin_version = "1.1"
plugin_version = "1.3"
# 插件作者
plugin_author = "Shurelol"
# 作者主页
@@ -48,6 +48,7 @@ class SpeedLimiter(_PluginBase):
_noplay_up_speed: float = 0
_noplay_down_speed: float = 0
_bandwidth: float = 0
_reserved_bandwidth: float = 0
_allocation_ratio: str = ""
_auto_limit: bool = False
_limit_enabled: bool = False
@@ -55,6 +56,7 @@ class SpeedLimiter(_PluginBase):
_unlimited_ips = {}
# 当前限速状态
_current_state = ""
_exclude_path = ""
def init_plugin(self, config: dict = None):
# 读取配置
@@ -66,9 +68,15 @@ class SpeedLimiter(_PluginBase):
self._noplay_up_speed = float(config.get("noplay_up_speed")) if config.get("noplay_up_speed") else 0
self._noplay_down_speed = float(config.get("noplay_down_speed")) if config.get("noplay_down_speed") else 0
self._current_state = f"U:{self._noplay_up_speed},D:{self._noplay_down_speed}"
self._exclude_path = config.get("exclude_path")
try:
# 总带宽
self._bandwidth = int(float(config.get("bandwidth") or 0)) * 1000000
self._reserved_bandwidth = int(float(config.get("reserved_bandwidth") or 0)) * 1000000
# 减去预留带宽
if self._reserved_bandwidth:
self._bandwidth -= self._reserved_bandwidth
# 自动限速开关
if self._bandwidth > 0:
self._auto_limit = True
@@ -316,6 +324,23 @@ class SpeedLimiter(_PluginBase):
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'reserved_bandwidth',
'label': '预留带宽(应对突发流量和额外开销)',
'placeholder': 'Mbps'
}
}
]
}
]
},
@@ -355,6 +380,23 @@ class SpeedLimiter(_PluginBase):
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'exclude_path',
'label': '不限速路径',
'placeholder': '包含该路径的媒体不限速,多个请换行'
}
}
]
}
]
}
@@ -371,7 +413,8 @@ class SpeedLimiter(_PluginBase):
"bandwidth": None,
"allocation_ratio": "",
"ipv4": "",
"ipv6": ""
"ipv6": "",
"exclude_path": ""
}
def get_page(self) -> List[dict]:
@@ -415,7 +458,9 @@ class SpeedLimiter(_PluginBase):
sessions = res.json()
for session in sessions:
if session.get("NowPlayingItem") and not session.get("PlayState", {}).get("IsPaused"):
playing_sessions.append(session)
if not self.__path_execluded(session.get("NowPlayingItem").get("Path")):
playing_sessions.append(session)
except Exception as e:
logger.error(f"获取Emby播放会话失败{str(e)}")
continue
@@ -429,6 +474,8 @@ class SpeedLimiter(_PluginBase):
# 未设置不限速范围则默认不限速内网ip
elif not IpUtils.is_private_ip(session.get("RemoteEndPoint")) \
and session.get("NowPlayingItem", {}).get("MediaType") == "Video":
logger.debug(f"当前播放内容:{session.get('NowPlayingItem').get('FileName')}"
f"比特率:{int(session.get('NowPlayingItem', {}).get('Bitrate') or 0)}")
total_bit_rate += int(session.get("NowPlayingItem", {}).get("Bitrate") or 0)
elif media_server == "jellyfin":
req_url = "[HOST]Sessions?api_key=[APIKEY]"
@@ -438,7 +485,8 @@ class SpeedLimiter(_PluginBase):
sessions = res.json()
for session in sessions:
if session.get("NowPlayingItem") and not session.get("PlayState", {}).get("IsPaused"):
playing_sessions.append(session)
if not self.__path_execluded(session.get("NowPlayingItem").get("Path")):
playing_sessions.append(session)
except Exception as e:
logger.error(f"获取Jellyfin播放会话失败{str(e)}")
continue
@@ -481,6 +529,7 @@ class SpeedLimiter(_PluginBase):
total_bit_rate += int(session.get("bitrate") or 0)
if total_bit_rate:
logger.debug(f"比特率总计:{total_bit_rate}")
# 开启智能限速计算上传限速
if self._auto_limit:
play_up_speed = self.__calc_limit(total_bit_rate)
@@ -488,6 +537,7 @@ class SpeedLimiter(_PluginBase):
play_up_speed = self._play_up_speed
# 当前正在播放,开始限速
logger.debug(f"上传限速:{play_up_speed} KB/s")
self.__set_limiter(limit_type="播放", upload_limit=play_up_speed,
download_limit=self._play_down_speed)
else:
@@ -495,11 +545,24 @@ class SpeedLimiter(_PluginBase):
self.__set_limiter(limit_type="未播放", upload_limit=self._noplay_up_speed,
download_limit=self._noplay_down_speed)
def __path_execluded(self, path: str) -> bool:
"""
判断是否在不限速路径内
"""
if self._exclude_path:
exclude_paths = self._exclude_path.split("\n")
for exclude_path in exclude_paths:
if exclude_path in path:
logger.info(f"{path} 在不限速路径:{exclude_path} 内,跳过限速")
return True
return False
def __calc_limit(self, total_bit_rate: float) -> float:
"""
计算智能上传限速
"""
if not self._bandwidth:
# 当前总比特率大于总带宽,则设置为最低限速
if not self._bandwidth or total_bit_rate > self._bandwidth:
return 10
return round((self._bandwidth - total_bit_rate) / 8 / 1024, 2)
@@ -518,71 +581,67 @@ class SpeedLimiter(_PluginBase):
try:
cnt = 0
text = ""
for download in self._downloader:
if cnt != 0:
text = f"{text}\n===================="
text = f"{text}\n下载器:{download}"
upload_limit_final = upload_limit
if self._auto_limit and limit_type == "播放":
# 开启了播放智能限速
if len(self._downloader) == 1:
# 只有一个下载器
upload_limit = int(upload_limit)
upload_limit_final = int(upload_limit)
else:
# 多个下载器
if not self._allocation_ratio:
# 平均
upload_limit = int(upload_limit / len(self._downloader))
upload_limit_final = int(upload_limit / len(self._downloader))
else:
# 按比例
allocation_count = sum([int(i) for i in self._allocation_ratio.split(":")])
upload_limit = int(upload_limit * int(self._allocation_ratio.split(":")[cnt]) / allocation_count)
upload_limit_final = int(upload_limit * int(self._allocation_ratio.split(":")[cnt]) / allocation_count)
logger.debug(f"下载器:{download} 分配比例:{self._allocation_ratio.split(':')[cnt]}/{allocation_count} 分配上传限速:{upload_limit_final} KB/s")
cnt += 1
if upload_limit:
text = f"上传:{upload_limit} KB/s"
if upload_limit_final:
text = f"{text}\n上传:{upload_limit_final} KB/s"
else:
text = f"上传:未限速"
text = f"{text}\n上传:未限速"
if download_limit:
text = f"{text}\n下载:{download_limit} KB/s"
else:
text = f"{text}\n下载:未限速"
if str(download) == 'qbittorrent':
if self._qb:
self._qb.set_speed_limit(download_limit=download_limit, upload_limit=upload_limit)
# 发送通知
if self._notify:
title = "【播放限速】"
if upload_limit or download_limit:
subtitle = f"Qbittorrent 开始{limit_type}限速"
self.post_message(
mtype=NotificationType.MediaServer,
title=title,
text=f"{subtitle}\n{text}"
)
else:
self.post_message(
mtype=NotificationType.MediaServer,
title=title,
text=f"Qbittorrent 已取消限速"
)
self._qb.set_speed_limit(download_limit=download_limit, upload_limit=upload_limit_final)
else:
if self._tr:
self._tr.set_speed_limit(download_limit=download_limit, upload_limit=upload_limit)
# 发送通知
if self._notify:
title = "【播放限速】"
if upload_limit or download_limit:
subtitle = f"Transmission 开始{limit_type}限速"
self.post_message(
mtype=NotificationType.MediaServer,
title=title,
text=f"{subtitle}\n{text}"
)
else:
self.post_message(
mtype=NotificationType.MediaServer,
title=title,
text=f"Transmission 已取消限速"
)
self._tr.set_speed_limit(download_limit=download_limit, upload_limit=upload_limit_final)
# 发送通知
self._notify_message(text, bool(upload_limit or download_limit), limit_type)
except Exception as e:
logger.error(f"设置限速失败:{str(e)}")
def _notify_message(self, text: str, is_limit: bool, limit_type: str):
"""
发送通知
"""
if self._notify:
title = "【播放限速】"
if is_limit:
subtitle = f"{limit_type},开始限速"
self.post_message(
mtype=NotificationType.MediaServer,
title=title,
text=f"{subtitle}\n{text}"
)
else:
self.post_message(
mtype=NotificationType.MediaServer,
title=title,
text=f"{limit_type},取消限速"
)
@staticmethod
def __allow_access(allow_ips: dict, ip: str) -> bool:
"""

View File

@@ -0,0 +1,299 @@
import json
from datetime import datetime, timedelta
from hashlib import md5
from urllib.parse import urlparse
import pytz
from app.core.config import settings
from app.db.site_oper import SiteOper
from app.plugins import _PluginBase
from typing import Any, List, Dict, Tuple, Optional
from app.log import logger
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.utils.common import encrypt, decrypt
class SyncCookieCloud(_PluginBase):
# 插件名称
plugin_name = "同步CookieCloud"
# 插件描述
plugin_desc = "同步MoviePilot站点Cookie到本地CookieCloud。"
# 插件图标
plugin_icon = "Cookiecloud_A.png"
# 插件版本
plugin_version = "1.4"
# 插件作者
plugin_author = "thsrite"
# 作者主页
author_url = "https://github.com/thsrite"
# 插件配置项ID前缀
plugin_config_prefix = "synccookiecloud_"
# 加载顺序
plugin_order = 28
# 可使用的用户级别
auth_level = 1
# 私有属性
_enabled: bool = False
_onlyonce: bool = False
_cron: str = ""
siteoper = None
_scheduler: Optional[BackgroundScheduler] = None
def init_plugin(self, config: dict = None):
self.siteoper = SiteOper()
# 停止现有任务
self.stop_service()
if config:
self._enabled = config.get("enabled")
self._onlyonce = config.get("onlyonce")
self._cron = config.get("cron")
if self._enabled or self._onlyonce:
# 定时服务
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
# 立即运行一次
if self._onlyonce:
logger.info(f"同步CookieCloud服务启动立即运行一次")
self._scheduler.add_job(self.__sync_to_cookiecloud, 'date',
run_date=datetime.now(
tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3),
name="同步CookieCloud")
# 关闭一次性开关
self._onlyonce = False
# 保存配置
self.__update_config()
# 周期运行
if self._cron:
try:
self._scheduler.add_job(func=self.__sync_to_cookiecloud,
trigger=CronTrigger.from_crontab(self._cron),
name="同步CookieCloud")
except Exception as err:
logger.error(f"定时任务配置错误:{err}")
# 推送实时消息
self.systemmessage.put(f"执行周期配置错误:{err}")
# 启动任务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
def __sync_to_cookiecloud(self):
"""
同步站点cookie到cookiecloud
"""
# 获取所有站点
sites = self.siteoper.list_order_by_pri()
if not sites:
return
if not settings.COOKIECLOUD_ENABLE_LOCAL:
logger.error('本地CookieCloud服务器未启用')
return
cookies = {}
for site in sites:
domain = urlparse(site.url).netloc
cookie = site.cookie
if not cookie:
logger.error(f"站点 {domain} 无cookie跳过处理...")
continue
# 解析cookie
site_cookies = []
for ck in cookie.split(";"):
kv = ck.split("=")
if len(kv) < 2:
continue
site_cookies.append({
"domain": domain,
"name": ck.split("=")[0],
"value": ck.split("=")[1]
})
# 存储cookies
cookies[domain] = site_cookies
if cookies:
crypt_key = self._get_crypt_key()
try:
cookies = {'cookie_data': cookies}
encrypted_data = encrypt(json.dumps(cookies).encode('utf-8'), crypt_key).decode('utf-8')
except Exception as e:
logger.error(f"CookieCloud加密失败{e}")
return
ck = {'encrypted': encrypted_data}
cookie_path = settings.COOKIE_PATH / f"{settings.COOKIECLOUD_KEY}.json"
cookie_path.write_bytes(json.dumps(ck).encode('utf-8'))
logger.info(f"同步站点cookie到本地CookieCloud成功")
else:
logger.error(f"同步站点cookie到本地CookieCloud失败未获取到站点cookie")
def __decrypted(self, encrypt_data: dict):
"""
获取并解密本地CookieCloud数据
"""
encrypted = encrypt_data.get("encrypted")
if not encrypted:
return {}, "未获取到cookie密文"
else:
crypt_key = self._get_crypt_key()
try:
decrypted_data = decrypt(encrypted, crypt_key).decode('utf-8')
result = json.loads(decrypted_data)
except Exception as e:
return {}, "cookie解密失败" + str(e)
if not result:
return {}, "cookie解密为空"
if result.get("cookie_data"):
contents = result.get("cookie_data")
else:
contents = result
return contents
@staticmethod
def _get_crypt_key() -> bytes:
"""
使用UUID和密码生成CookieCloud的加解密密钥
"""
md5_generator = md5()
md5_generator.update(
(str(settings.COOKIECLOUD_KEY).strip() + '-' + str(settings.COOKIECLOUD_PASSWORD).strip()).encode('utf-8'))
return (md5_generator.hexdigest()[:16]).encode('utf-8')
def __update_config(self):
self.update_config({
"enabled": self._enabled,
"onlyonce": self._onlyonce,
"cron": self._cron
})
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '执行周期',
'placeholder': '5位cron表达式留空自动'
}
}
]
},
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'type': 'info',
'variant': 'tonal',
'text': '需要MoviePilot设定-站点启用本地CookieCloud服务器。'
}
}
]
}
]
},
]
}
], {
"enabled": False,
"onlyonce": False,
"cron": "5 1 * * *",
}
def get_page(self) -> List[dict]:
pass
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
logger.error("退出插件失败:%s" % str(e))

View File

@@ -22,7 +22,7 @@ class SyncDownloadFiles(_PluginBase):
# 插件图标
plugin_icon = "Youtube-dl_A.png"
# 插件版本
plugin_version = "1.1"
plugin_version = "1.1.1"
# 插件作者
plugin_author = "thsrite"
# 作者主页
@@ -265,7 +265,7 @@ class SyncDownloadFiles(_PluginBase):
if last_sync_time:
# 获取种子时间
if dl_tpe == "qbittorrent":
torrent_date = time.gmtime(torrent.get("added_on")) # 将时间戳转换为时间元组
torrent_date = time.localtime(torrent.get("added_on")) # 将时间戳转换为时间元组
torrent_date = time.strftime("%Y-%m-%d %H:%M:%S", torrent_date) # 格式化时间
else:
torrent_date = torrent.added_date

View File

@@ -21,7 +21,7 @@ class TmdbWallpaper(_PluginBase):
# 插件图标
plugin_icon = "Macos_Sierra.png"
# 插件版本
plugin_version = "1.1"
plugin_version = "1.2"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
@@ -220,24 +220,30 @@ class TmdbWallpaper(_PluginBase):
"""
下载MoviePilot的登录壁纸到本地
"""
if not self._savepath:
return
if settings.WALLPAPER == "tmdb":
url = TmdbChain().get_random_wallpager()
filename = url.split("/")[-1]
else:
url = WebUtils.get_bing_wallpaper()
filename = f"{datetime.now().strftime('%Y%m%d')}.jpg"
# 下载壁纸
if url:
def __save_file(_url: str, _filename: str):
"""
保存文件
"""
try:
savepath = Path(self._savepath)
logger.info(f"下载壁纸:{url}")
r = RequestUtils().get_res(url)
logger.info(f"下载壁纸:{_url}")
r = RequestUtils().get_res(_url)
if r and r.status_code == 200:
with open(savepath / filename, "wb") as f:
with open(savepath / _filename, "wb") as f:
f.write(r.content)
except Exception as e:
logger.error(f"下载壁纸失败:{str(e)}")
if not self._savepath:
return
if settings.WALLPAPER == "tmdb":
urls = TmdbChain().get_trending_wallpapers() or []
for url in urls:
filename = url.split("/")[-1]
__save_file(url, filename)
else:
logger.error(f"获取壁纸地址失败")
url = WebUtils.get_bing_wallpaper()
if url:
filename = f"{datetime.now().strftime('%Y%m%d')}.jpg"
__save_file(url, filename)

View File

@@ -27,7 +27,7 @@ class TorrentTransfer(_PluginBase):
# 插件图标
plugin_icon = "seed.png"
# 插件版本
plugin_version = "1.4"
plugin_version = "1.6"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
@@ -55,19 +55,21 @@ class TorrentTransfer(_PluginBase):
_notify = False
_nolabels = None
_includelabels = None
_includecategory = None
_nopaths = None
_deletesource = False
_deleteduplicate = False
_fromtorrentpath = None
_autostart = False
_transferemptylabel = False
_add_torrent_tags = None
# 退出事件
_event = Event()
# 待检查种子清单
_recheck_torrents = {}
_is_recheck_running = False
# 任务标签
_torrent_tags = ["已整理", "转移做种"]
_torrent_tags = []
def init_plugin(self, config: dict = None):
self.torrent = TorrentHelper()
@@ -79,6 +81,7 @@ class TorrentTransfer(_PluginBase):
self._notify = config.get("notify")
self._nolabels = config.get("nolabels")
self._includelabels = config.get("includelabels")
self._includecategory = config.get("includecategory")
self._frompath = config.get("frompath")
self._topath = config.get("topath")
self._fromdownloader = config.get("fromdownloader")
@@ -89,6 +92,12 @@ class TorrentTransfer(_PluginBase):
self._nopaths = config.get("nopaths")
self._autostart = config.get("autostart")
self._transferemptylabel = config.get("transferemptylabel")
self._add_torrent_tags = config.get("add_torrent_tags")
if self._add_torrent_tags is None:
self._add_torrent_tags = "已整理,转移做种"
config["add_torrent_tags"] = self._add_torrent_tags
self.update_config(config=config)
self._torrent_tags = self._add_torrent_tags.strip().split(",") if self._add_torrent_tags else []
# 停止现有任务
self.stop_service()
@@ -97,14 +106,12 @@ class TorrentTransfer(_PluginBase):
if self.get_state() or self._onlyonce:
self.qb = Qbittorrent()
self.tr = Transmission()
# 检查配置
if self._fromtorrentpath and not Path(self._fromtorrentpath).exists():
logger.error(f"源下载器种子文件保存路径不存在:{self._fromtorrentpath}")
self.systemmessage.put(f"源下载器种子文件保存路径不存在:{self._fromtorrentpath}", title="自动转移做种")
return
if self._fromdownloader == self._todownloader:
logger.error(f"源下载器和目的下载器不能相同")
self.systemmessage.put(f"源下载器和目的下载器不能相同", title="自动转移做种")
if not self.__validate_config():
self._enabled = False
self._onlyonce = False
config["enabled"] = self._enabled
config["onlyonce"] = self._onlyonce
self.update_config(config=config)
return
# 定时服务
@@ -121,24 +128,8 @@ class TorrentTransfer(_PluginBase):
seconds=3))
# 关闭一次性开关
self._onlyonce = False
self.update_config({
"enabled": self._enabled,
"onlyonce": self._onlyonce,
"cron": self._cron,
"notify": self._notify,
"nolabels": self._nolabels,
"includelabels": self._includelabels,
"frompath": self._frompath,
"topath": self._topath,
"fromdownloader": self._fromdownloader,
"todownloader": self._todownloader,
"deletesource": self._deletesource,
"deleteduplicate": self._deleteduplicate,
"fromtorrentpath": self._fromtorrentpath,
"nopaths": self._nopaths,
"autostart": self._autostart,
"transferemptylabel": self._transferemptylabel
})
config["onlyonce"] = self._onlyonce
self.update_config(config=config)
# 启动服务
if self._scheduler.get_jobs():
@@ -269,6 +260,39 @@ class TorrentTransfer(_PluginBase):
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'add_torrent_tags',
'label': '添加种子标签',
'placeholder': '已整理,转移做种'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'includecategory',
'label': '转移种子分类',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
@@ -282,7 +306,7 @@ class TorrentTransfer(_PluginBase):
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
'md': 6
},
'content': [
{
@@ -293,7 +317,7 @@ class TorrentTransfer(_PluginBase):
}
}
]
}
},
]
},
{
@@ -494,6 +518,7 @@ class TorrentTransfer(_PluginBase):
"cron": "",
"nolabels": "",
"includelabels": "",
"includecategory": "",
"frompath": "",
"topath": "",
"fromdownloader": "",
@@ -503,7 +528,8 @@ class TorrentTransfer(_PluginBase):
"fromtorrentpath": "",
"nopaths": "",
"autostart": True,
"transferemptylabel": False
"transferemptylabel": False,
"add_torrent_tags": "已整理,转移做种"
}
def get_page(self) -> List[dict]:
@@ -520,6 +546,21 @@ class TorrentTransfer(_PluginBase):
else:
return None
def __validate_config(self) -> bool:
"""
校验配置
"""
# 检查配置
if self._fromtorrentpath and not Path(self._fromtorrentpath).exists():
logger.error(f"源下载器种子文件保存路径不存在:{self._fromtorrentpath}")
self.systemmessage.put(f"源下载器种子文件保存路径不存在:{self._fromtorrentpath}", title="自动转移做种")
return False
if self._fromdownloader == self._todownloader:
logger.error(f"源下载器和目的下载器不能相同")
self.systemmessage.put(f"源下载器和目的下载器不能相同", title="自动转移做种")
return False
return True
def __download(self, downloader: str, content: bytes,
save_path: str) -> Optional[str]:
"""
@@ -531,7 +572,7 @@ class TorrentTransfer(_PluginBase):
state = self.qb.add_torrent(content=content,
download_dir=save_path,
is_paused=True,
tag=["已整理", "转移做种", tag])
tag=self._torrent_tags + [tag])
if not state:
return None
else:
@@ -546,7 +587,7 @@ class TorrentTransfer(_PluginBase):
torrent = self.tr.add_torrent(content=content,
download_dir=save_path,
is_paused=True,
labels=["已整理", "转移做种"])
labels=self._torrent_tags)
if not torrent:
return None
else:
@@ -561,6 +602,9 @@ class TorrentTransfer(_PluginBase):
"""
logger.info("开始转移做种任务 ...")
if not self.__validate_config():
return
# 源下载器
downloader = self._fromdownloader
# 目的下载器
@@ -600,13 +644,20 @@ class TorrentTransfer(_PluginBase):
# 获取种子标签
torrent_labels = self.__get_label(torrent, downloader)
# 获取种子分类
torrent_category = self.__get_category(torrent, downloader)
# 种子为无标签,则进行规范化
is_torrent_labels_empty = torrent_labels == [''] or torrent_labels == [] or torrent_labels is None
if is_torrent_labels_empty:
torrent_labels = []
#根据设置决定是否转移无标签的种子
# 如果分类项存在数值,则进行判断
if self._includecategory:
# 排除未标记的分类
if torrent_category not in self._includecategory.split(','):
logger.info(f"种子 {hash_str} 不含有转移分类 {self._includecategory},跳过 ...")
continue
# 根据设置决定是否转移无标签的种子
if is_torrent_labels_empty:
if not self._transferemptylabel:
continue
@@ -724,6 +775,9 @@ class TorrentTransfer(_PluginBase):
and fastresume_trackers[0]:
# 重新赋值
torrent_main['announce'] = fastresume_trackers[0][0]
# 保留其他tracker避免单一tracker无法连接
if len(fastresume_trackers) > 1 or len(fastresume_trackers[0]) > 1:
torrent_main['announce-list'] = fastresume_trackers
# 替换种子文件路径
torrent_file = settings.TEMP_PATH / f"{torrent_item.get('hash')}.torrent"
# 编码并保存到临时文件
@@ -867,6 +921,18 @@ class TorrentTransfer(_PluginBase):
print(str(e))
return []
@staticmethod
def __get_category(torrent: Any, dl_type: str):
"""
获取种子分类
"""
try:
return torrent.get("category").strip() \
if dl_type == "qbittorrent" else ""
except Exception as e:
print(str(e))
return ""
@staticmethod
def __get_save_path(torrent: Any, dl_type: str):
"""

View File

@@ -15,12 +15,11 @@ from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver
from app import schemas
from app.chain.media import MediaChain
from app.chain.tmdb import TmdbChain
from app.chain.transfer import TransferChain
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.event import eventmanager, Event
from app.core.metainfo import MetaInfoPath
from app.db.downloadhistory_oper import DownloadHistoryOper
from app.db.transferhistory_oper import TransferHistoryOper
from app.log import logger
@@ -73,11 +72,11 @@ class VCBAnimeMonitor(_PluginBase):
# 插件名称
plugin_name = "整理VCB动漫压制组作品"
# 插件描述
plugin_desc = "提高部分VCB-Studio作品的识别准确率,将VCB-Studio的作品统一转移到指定目录同时进行刮削整理"
plugin_desc = "一款辅助整理&提高识别VCB-Stuido动漫压制组作品的插件"
# 插件图标
plugin_icon = "vcbmonitor.png"
# 插件版本
plugin_version = "1.8"
plugin_version = "1.8.2.2"
# 插件作者
plugin_author = "pixel@qingwa"
# 作者主页
@@ -91,7 +90,6 @@ class VCBAnimeMonitor(_PluginBase):
# 私有属性
_switch_ova = False
_high_mode = False
_torrents_path = None
new_save_path = None
qb = None
@@ -100,6 +98,7 @@ class VCBAnimeMonitor(_PluginBase):
downloadhis = None
transferchian = None
tmdbchain = None
mediaChain = None
_observer = []
_enabled = False
_notify = False
@@ -126,6 +125,7 @@ class VCBAnimeMonitor(_PluginBase):
self.transferhis = TransferHistoryOper()
self.downloadhis = DownloadHistoryOper()
self.transferchian = TransferChain()
self.mediaChain = MediaChain()
self.tmdbchain = TmdbChain()
# 清空配置
self._dirconf = {}
@@ -145,7 +145,6 @@ class VCBAnimeMonitor(_PluginBase):
self._size = config.get("size") or 0
self._scrape = config.get("scrape")
self._switch_ova = config.get("ova")
self._high_mode = config.get("high_mode")
self._torrents_path = config.get("torrents_path") or ""
# 停止现有任务
@@ -164,13 +163,16 @@ class VCBAnimeMonitor(_PluginBase):
return
# 启用种子目录监控
if self._torrents_path is not None and Path(self._torrents_path).exists() and self._enabled:
if self._torrents_path and Path(self._torrents_path).exists() and self._enabled:
# 只取第一个目录作为新的保存
first_path = monitor_dirs[0]
if SystemUtils.is_windows():
self.new_save_path = first_path.split(':')[0] + ":" + first_path.split(':')[1]
else:
self.new_save_path = first_path.split(':')[0]
try:
first_path = monitor_dirs[0]
if SystemUtils.is_windows():
self.new_save_path = first_path.split(':')[0] + ":" + first_path.split(':')[1]
else:
self.new_save_path = first_path.split(':')[0]
except Exception:
logger.error(f"目录保存失败,请检查输入目录是否合法")
# print(self.new_save_path)
try:
observer = Observer()
@@ -181,7 +183,7 @@ class VCBAnimeMonitor(_PluginBase):
observer.start()
logger.info(f"{self._torrents_path} 的种子目录监控服务启动开启监控新增的VCB-Studio种子文件")
except Exception as e:
logger.error(f"{self._torrents_path} 启动种子目录监控失败:{str(e)}")
logger.debug(f"{self._torrents_path} 启动种子目录监控失败:{str(e)}")
else:
logger.info("种子目录为空不转移qb中正在下载的VCB-Studio文件")
@@ -224,7 +226,8 @@ class VCBAnimeMonitor(_PluginBase):
try:
if target_path and target_path.is_relative_to(Path(mon_path)):
logger.warn(f"{target_path} 是监控目录 {mon_path} 的子目录,无法监控")
self.systemmessage.put(f"{target_path} 是下载目录 {mon_path} 的子目录,无法监控", title="整理VCB动漫压制组作品")
self.systemmessage.put(f"{target_path} 是下载目录 {mon_path} 的子目录,无法监控",
title="整理VCB动漫压制组作品")
continue
except Exception as e:
logger.debug(str(e))
@@ -290,7 +293,6 @@ class VCBAnimeMonitor(_PluginBase):
"size": self._size,
"scrape": self._scrape,
"ova": self._switch_ova,
"high_mode": self._high_mode,
"torrents_path": self._torrents_path
})
@@ -376,33 +378,56 @@ class VCBAnimeMonitor(_PluginBase):
logger.debug(f"{event_path} 不是媒体文件")
return
# 判断是不是蓝光目录
bluray_flag = False
if re.search(r"BDMV[/\\]STREAM", event_path, re.IGNORECASE):
bluray_flag = True
# 截取BDMV前面的路径
blurray_dir = event_path[:event_path.find("BDMV")]
file_path = Path(blurray_dir)
logger.info(f"{event_path} 是蓝光目录,更正文件路径为:{str(file_path)}")
# 查询历史记录,已转移的不处理
if self.transferhis.get_by_src(str(file_path)):
logger.info(f"{file_path} 已整理过")
return
# 元数据
if file_path.parent.name == "SPs":
logger.warn("位于SPs目录下,跳过处理")
if file_path.parent.name.lower() in ["sps", "scans", "cds", "previews", "extras"]:
logger.warn("位于特典或其他特殊目录下,跳过处理")
return
remeta = ReMeta(ova_switch=self._switch_ova, high_performance=self._high_mode)
if 'VCB-Studio' not in file_path.stem.strip():
logger.warn("不属于VCB的作品不处理")
return
remeta = ReMeta(ova_switch=self._switch_ova)
file_meta = remeta.handel_file(file_path=file_path)
if file_meta:
if not file_meta.name:
logger.error(f"{file_path.name} 无法识别有效信息")
return
if remeta.is_special and not self._switch_ova:
if remeta.is_ova and not self._switch_ova:
logger.warn(f"{file_path.name} 为OVA资源未开启OVA开关不处理")
return
if remeta.is_special and self._switch_ova:
logger.info(f"{file_path.name} 为OVA资源,开始处理")
if self.get_data(key=f"OVA_{file_meta.title}") is not None:
ova_history_ep = int(self.get_data(key=f"OVA_{file_meta.title}")) + 1
file_meta.begin_episode = ova_history_ep
self.save_data(key=f"OVA_{file_meta.title}", value=ova_history_ep)
if remeta.is_ova and self._switch_ova:
logger.info(f"{file_path.name} 为OVA资源,开始历史记录处理")
ova_history_ep_list = self.get_data(file_meta.title)
if ova_history_ep_list and isinstance(ova_history_ep_list, list):
ep = file_meta.begin_episode
if ep in ova_history_ep_list:
for i in range(1, 100):
if ep + i not in ova_history_ep_list:
ova_history_ep_list.append(ep + i)
file_meta.begin_episode = ep + i
logger.info(
f"{file_path.name} 为OVA资源,历史记录中已存在,自动识别为第{ep + i}")
break
else:
ova_history_ep_list.append(ep)
self.save_data(file_meta.title, ova_history_ep_list)
else:
file_meta.begin_episode = 1
self.save_data(key=f"OVA_{file_meta.title}", value=1)
self.save_data(file_meta.title, [file_meta.begin_episode])
else:
return
@@ -418,14 +443,23 @@ class VCBAnimeMonitor(_PluginBase):
# 根据父路径获取下载历史
download_history = None
# 按文件全路径查询
download_file = self.downloadhis.get_file_by_fullpath(str(file_path))
if download_file:
download_history = self.downloadhis.get_by_hash(download_file.download_hash)
if bluray_flag:
# 蓝光原盘,按目录名查询
# FIXME 理论上DownloadHistory表中的path应该是全路径但实际表中登记的数据只有目录名暂按目录名查询
download_history = self.downloadhis.get_by_path(file_path.name)
else:
# 按文件全路径查询
download_file = self.downloadhis.get_file_by_fullpath(str(file_path))
if download_file:
download_history = self.downloadhis.get_by_hash(download_file.download_hash)
# 识别媒体信息
mediainfo: MediaInfo = self.chain.recognize_media(meta=file_meta,
tmdbid=download_history.tmdbid if download_history else None)
if download_history and download_history.tmdbid:
mediainfo: MediaInfo = self.mediaChain.recognize_media(mtype=MediaType(download_history.type),
tmdbid=download_history.tmdbid,
doubanid=download_history.doubanid)
else:
mediainfo: MediaInfo = self.mediaChain.recognize_by_meta(file_meta)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{file_meta.name}')
@@ -615,13 +649,13 @@ class VCBAnimeMonitor(_PluginBase):
if not torrent_path.exists():
return
# 只处理刚刚添加的种子也就是获取正在下载的种子
logger.info(f"开始转移qb中正在下载的VCB资源,转移目录为:{self.new_save_path}")
# 等待种子文件下载完成
time.sleep(5)
with lock:
torrents = self.qb.get_downloading_torrents()
for torrent in torrents:
if "VCB-Studio" in torrent.name:
logger.info(f"开始转移qb中正在下载的VCB资源,转移目录为:{self.new_save_path}")
# 原本存在的暂停的种子不处理
if torrent.state_enum == qbittorrentapi.TorrentState.PAUSED_DOWNLOAD:
continue
@@ -813,22 +847,6 @@ class VCBAnimeMonitor(_PluginBase):
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'high_mode',
'label': '高性能处理模式',
}
}
]
},
{
'component': 'VCol',
'props': {
@@ -983,7 +1001,7 @@ class VCBAnimeMonitor(_PluginBase):
'props': {
'model': 'monitor_dirs',
'label': '监控目录',
'rows': 5,
'rows': 4,
'placeholder': '每一行一个目录,支持以下几种配置方式,转移方式支持 move、copy、link、softlink、rclone_copy、rclone_move\n'
'监控目录\n'
'监控目录#转移方式\n'
@@ -1031,8 +1049,10 @@ class VCBAnimeMonitor(_PluginBase):
'props': {
'type': 'info',
'variant': 'tonal',
'text': '核心用法与目录同步插件相同不同点在于只识别处理VCB-Studio资源,\n'
'不处理SPs目录下的文件,OVA/OAD集数根据入库顺序累加命名,不保证与TMDB集数匹配'
'text': '核心用法与目录同步插件相同不同点在于只识别处理VCB-Studio资源'
'默认不处理SPs、CDs、SCans目录下的文件OVA/OAD集数暂时根据入库顺序累加命名'
'因此不保证与TMDB集数匹配。部分季度以罗马音音译为名的作品暂时无法识别出准确季度。'
'有想法有问题欢迎点击插件作者主页提issue'
}
}
]
@@ -1053,9 +1073,9 @@ class VCBAnimeMonitor(_PluginBase):
'props': {
'type': 'info',
'variant': 'tonal',
'text': '最佳使用方式监控目录单独设置一个作为保存VCB-Studio资源的目录,\n'
'填入监控种子目录,开启后会将正在QB(仅支持QB)下载器内的VCB-Studio资源转移到监控目录实现自动整理('
'仅支持第一个监控目录),\n'
'text': '最佳使用方式监控目录单独设置一个作为保存VCB-Studio资源的目录'
'填入监控种子目录开启后会将正在QB(仅支持QB)下载器内正在下载的VCB-Studio资源转移到监控目录实现自动整理('
'仅支持第一个监控目录)'
'监控种子目录为空则不转移文件'
}
}
@@ -1077,7 +1097,6 @@ class VCBAnimeMonitor(_PluginBase):
"cron": "",
"size": 0,
"ova": False,
"high_mode": False,
"torrents_path": "",
}

View File

@@ -1,5 +1,6 @@
import concurrent
import re
from dataclasses import dataclass
from pathlib import Path
from typing import List
from app.chain.media import MediaChain
@@ -8,196 +9,276 @@ from app.core.metainfo import MetaInfoPath
from app.log import logger
from app.schemas import MediaType
season_patterns = [
{"pattern": re.compile(r"S(\d+)$", re.IGNORECASE), "group": 1},
{"pattern": re.compile(r"(\d+)$", re.IGNORECASE), "group": 1},
{"pattern": re.compile(r"(\d+)(st|nd|rd|th)?\s*season", re.IGNORECASE), "group": 1},
{"pattern": re.compile(r"(.*) ?\s*season (\d+)", re.IGNORECASE), "group": 2},
{"pattern": re.compile(r"\s(II|III|IV|V|VI|VII|VIII|IX|X)$", re.IGNORECASE), "group": "1"}
]
episode_patterns = [
{"pattern": re.compile(r"(\d+)\((\d+)\)", re.IGNORECASE), "group": 2},
{"pattern": re.compile(r"(\d+)", re.IGNORECASE), "group": 1},
{"pattern": re.compile(r'(\d+)v\d+', re.IGNORECASE), "group": 1},
]
def roman_to_int(s) -> int:
"""
:param s: 罗马数字字符串
罗马数字转整数
"""
roman_dict = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
total = 0
prev_value = 0
ova_patterns = [
re.compile(r".*?(OVA|OAD).*?", re.IGNORECASE),
re.compile(r"\d+\.5"),
re.compile(r"00")
]
for char in reversed(s): # 反向遍历罗马数字字符串
current_value = roman_dict[char]
if current_value >= prev_value:
total += current_value # 如果当前值大于等于前一个值,加上当前值
else:
total -= current_value # 如果当前值小于前一个值,减去当前值
prev_value = current_value
final_season_patterns = [
re.compile('final season', re.IGNORECASE),
re.compile('The Final', re.IGNORECASE),
re.compile(r'\sFinal')
]
return total
movie_patterns = [
re.compile("Movie", re.IGNORECASE),
re.compile("the Movie", re.IGNORECASE),
]
@dataclass
class VCBMetaBase:
# 转化为小写后的原始文件名称 (不含后缀)
original_title: str = ""
# 解析后不包含季度和集数的标题
title: str = ""
# 类型:TV / Movie (默认TV)
type: str = "TV"
# 可能含有季度的标题,一级解析后的标题
season_title: str = ""
# 可能含有集数的字符串列表
ep_title: List[str] = None
# 识别出来的季度
season: int = None
# 识别出来的集数
ep: int = None
# 是否是OVA/OAD
is_ova: bool = False
# TMDB ID
tmdb_id: int = None
blocked_words = ["vcb-studio", "360p", "480p", "720p", "1080p", "2160p", "hdr", "x265", "x264", "aac", "flac"]
class ReMeta:
# 解析之后的标题:
title: str = None
# 识别出来的集数
ep: int = None
# 识别出来的季度
season: int = None
# 特殊季识别开关
is_special = False
# OVA/OAD识别开关
ova_switch: bool = False
# 高性能处理开关
high_performance = False
season_patterns = [
{"pattern": re.compile(r"S(\d+)$"), "group": 1},
{"pattern": re.compile(r"(\d+)$"), "group": 1},
{"pattern": re.compile(r"(\d+)(st|nd|rd|th)?\s*[Ss][Ee][Aa][Ss][Oo][Nn]"), "group": 1},
{"pattern": re.compile(r"(.*) ?\s*[Ss][Ee][Aa][Ss][Oo][Nn] (\d+)"), "group": 2},
{"pattern": re.compile(r"\s(II|III|IV|V|VI|VII|VIII|IX|X)$"), "group": "1"}
]
episode_patterns = [
{"pattern": re.compile(r"\[(\d+)\((\d+)\)]"), "group": 2},
{"pattern": re.compile(r"\[(\d+)]"), "group": 1},
{"pattern": re.compile(r'\[(\d+)v\d+]'), "group": 1},
]
_ova_patterns = [re.compile(r"\[.*?(OVA|OAD).*?]"),
re.compile(r"\[\d+\.5]"),
re.compile(r"\[00\]")]
final_season_patterns = [re.compile('final season', re.IGNORECASE),
re.compile('The Final', re.IGNORECASE),
re.compile(r'\sFinal')
]
# 自定义添加的季度正则表达式
_custom_season_patterns = []
def __init__(self, ova_switch: bool = False, high_performance: bool = False):
def __init__(self, ova_switch: bool = False, custom_season_patterns: list[dict] = None):
self.meta = None
# TODO:自定义季度匹配规则
self.custom_season_patterns = custom_season_patterns
self.season_patterns = season_patterns
self.ova_switch = ova_switch
self.high_performance = high_performance
self.vcb_meta = VCBMetaBase()
self.is_ova = False
def is_tv(self, title: str) -> bool:
"""
判断是否是TV
"""
if title.count("[") != 4 and title.count("]") != 4:
self.vcb_meta.type = "Movie"
self.vcb_meta.title = re.sub(r'\[.*?\]', '', title).strip()
return False
return True
def handel_file(self, file_path: Path):
file_name = file_path.stem.strip().lower()
self.vcb_meta.original_title = file_name
if not self.is_tv(file_name):
logger.warn(
"不符合VCB-Studio的剧集命名规范归类为电影,跳过剧集模块处理。注意:年份较为久远的作品可能在此会判断错误")
self.parse_movie()
else:
self.tv_mode()
self.is_ova = self.vcb_meta.is_ova
meta = MetaInfoPath(file_path)
self.title = meta.title
self.title = Path(self.title).stem.strip()
if 'VCB-Studio' not in meta.title:
logger.warn("不属于VCB的作品不处理")
return None
if meta.title.count("[") != 4 and meta.title.count("]") != 4:
# 可能是电影,电影只有三组[],因此去除所有[]后只剩下电影名
logger.warn("不符合VCB-Studio的剧集命名规范跳过剧集模块处理交给默认处理逻辑")
meta.title = re.sub(r'\[.*?\]', '', meta.title).strip()
meta.en_name = meta.title
return meta
split_title: List[str] | None = self.split_season_ep(self.title)
if split_title:
self.handle_season_ep(split_title)
if self.season is not None:
meta.begin_season = self.season
else:
logger.warn("未识别出季度,默认处理逻辑返回第一季")
if self.ep is not None:
meta.begin_episode = self.ep
else:
logger.warn("未识别出集数,默认处理逻辑返回第一集")
meta.title = self.title
meta.en_name = self.title
logger.info(f"识别出季度为{self.season},集数为{self.ep},标题为:{self.title}")
meta.title = self.vcb_meta.title
meta.en_name = self.vcb_meta.title
if self.vcb_meta.type == "Movie":
meta.type = MediaType.MOVIE
else:
meta.type = MediaType.TV
if self.vcb_meta.ep is not None:
meta.begin_episode = self.vcb_meta.ep
if self.vcb_meta.season is not None:
meta.begin_season = self.vcb_meta.season
if self.vcb_meta.tmdb_id is not None:
meta.tmdbid = self.vcb_meta.tmdb_id
return meta
# 分离季度部分和集数部分
def split_season_ep(self, pre_title: str):
split_ep = re.findall(r"(\[.*?])", pre_title)[1]
if not split_ep:
logger.warn("未识别出集数位置信息,结束识别!")
return None
split_title = re.sub(r"\[.*?\]", "", pre_title).strip()
logger.info(f"分离出包含季度的部分:{split_title} \n 分离出包含集数的部分: {split_ep}")
return [split_title, split_ep]
def split_season_ep(self):
# 把所有的[] 里面的内容获取出来,不需要[]本身
self.vcb_meta.ep_title = re.findall(r'\[(.*?)\]', self.vcb_meta.original_title)
# 去除所有[]后只剩下剧名
self.vcb_meta.season_title = re.sub(r"\[.*?\]", "", self.vcb_meta.original_title).strip()
if self.vcb_meta.ep_title:
self.culling_blocked_words()
logger.info(
f"分离出包含可能季度的内容部分:{self.vcb_meta.season_title} | 可能包含集数的内容部分: {self.vcb_meta.ep_title}")
self.vcb_meta.title = self.vcb_meta.season_title
if not self.vcb_meta.ep_title:
self.vcb_meta.title = self.vcb_meta.season_title
logger.warn("未识别出可能存在集数位置的信息,跳过剩余识别步骤!")
def handle_season_ep(self, title: List[str]):
if self.high_performance:
with concurrent.futures.ProcessPoolExecutor(max_workers=2) as executor:
title_season_result = executor.submit(self.handle_season, title[0])
ep_result = executor.submit(self.re_ep, title[1], )
try:
title_season_result = title_season_result.result() # Blocks until the task is complete.
ep_result = ep_result.result() # Blocks until the task is complete.
except Exception as exc:
print('Generated an exception: %s' % exc)
else:
title_season_result = self.handle_season(title[0])
ep_result = self.re_ep(title[1])
self.title = title_season_result["title"]
is_ova = ep_result["is_ova"]
if ep_result["ep"] is not None:
self.ep = ep_result["ep"]
if title_season_result["season"]:
self.season = title_season_result["season"]
if is_ova:
self.season = 0
self.is_special = True
def tv_mode(self):
logger.info("开始分离季度和集数部分")
self.split_season_ep()
if not self.vcb_meta.ep_title:
return
self.parse_season()
self.parse_episode()
# 处理季度
def handle_season(self, pre_title: str) -> dict:
title_season = {"title": pre_title, "season": 1}
for season_pattern in self.season_patterns:
pattern = season_pattern["pattern"]
group = season_pattern["group"]
match = pattern.search(pre_title)
def parse_season(self):
"""
从标题中解析季度
"""
flag = False
for pattern in season_patterns:
match = pattern["pattern"].search(self.vcb_meta.season_title)
if match:
if type(group) == str:
title_season["season"] = roman_to_int(match.group(int(group)))
title_season["title"] = re.sub(pattern, "", pre_title).strip()
if isinstance(pattern["group"], int):
self.vcb_meta.season = int(match.group(pattern["group"]))
else:
title_season["season"] = int(match.group(group))
title_season["title"] = re.sub(pattern, "", pre_title).strip()
return title_season
for final_season_pattern in self.final_season_patterns:
match = final_season_pattern.search(pre_title)
if match:
logger.info("识别出最终季度,开始处理!")
title_season["title"] = re.sub(final_season_pattern, "", pre_title).strip()
title_season["season"] = self.handle_final_season(title=pre_title)
break
return title_season
self.vcb_meta.season = self.roman_to_int(match.group(pattern["group"]))
# 匹配成功后,标题中去除季度信息
self.vcb_meta.title = pattern["pattern"].sub("", self.vcb_meta.season_title).strip
logger.info(f"识别出季度为{self.vcb_meta.season}")
return
logger.info(f"正常匹配季度失败开始匹配ova/oad/最终季度")
if not flag:
# 匹配是否为最终季
for pattern in final_season_patterns:
if pattern.search(self.vcb_meta.season_title):
logger.info("命中到最终季匹配规则")
self.vcb_meta.title = pattern.sub("", self.vcb_meta.season_title).strip()
self.handle_final_season()
return
logger.info("未识别出最终季度开始匹配OVA/OAD")
# 匹配是否为OVA/OAD
if "ova" in self.vcb_meta.season_title or "oad" in self.vcb_meta.season_title:
logger.info("季度部分命中到OVA/OAD匹配规则")
if self.ova_switch:
logger.info("开启OVA/OAD处理逻辑")
self.vcb_meta.is_ova = True
for pattern in ova_patterns:
if pattern.search(self.vcb_meta.season_title):
self.vcb_meta.title = pattern.sub("", self.vcb_meta.season_title).strip()
self.vcb_meta.title = re.sub("ova|oad", "", self.vcb_meta.season_title).strip()
self.vcb_meta.season = 0
return
logger.warn("未识别出季度,默认处理逻辑返回第一季")
self.vcb_meta.title = self.vcb_meta.season_title
self.vcb_meta.season = 1
# 处理存在“Final”字样命名的季度
def handle_final_season(self, title: str) -> int | None:
medias = MediaChain().search(title=title)[1]
if not medias:
logger.warn("没有找到对应的媒体信息!")
return
# 根据类型进行过滤只取类型是电视剧和动漫的media
medias = [media for media in medias if media.type == MediaType.TV]
if not medias:
logger.warn("没有找到动漫或电视剧的媒体信息!")
return
media = sorted(medias, key=lambda x: x.popularity, reverse=True)[0]
media_tmdb_id = media.tmdb_id
seasons_info = TmdbChain().tmdb_seasons(tmdbid=media_tmdb_id)
if seasons_info is None:
logger.warn("无法获取最终季")
else:
logger.info(f"获取到最终季,季度为{len(seasons_info)}")
return len(seasons_info)
def parse_episode(self):
"""
从标题中解析集数
"""
# 从ep_title中剔除不相关的内容之后只剩下存在集数的字符串
ep = self.vcb_meta.ep_title[0]
for pattern in episode_patterns:
match = pattern["pattern"].search(ep)
if match:
self.vcb_meta.ep = int(match.group(pattern["group"]))
logger.info(f"识别出集数为{self.vcb_meta.ep}")
return
# 直接进入判断是否为OVA/OAD
for pattern in ova_patterns:
if pattern.search(ep):
self.vcb_meta.is_ova = True
# 直接获取数字
self.vcb_meta.ep = int(re.search(r"\d+", ep).group()) or 1
logger.info(f"OVA模式下识别出集数为{self.vcb_meta.ep}")
self.vcb_meta.season = 0
return
def re_ep(self, ep_title: str, ) -> dict:
def culling_blocked_words(self):
"""
# 集数匹配处理模块
:param ep_title: 从title解析出的集数,ep_title固定格式[集数]
1.先判断是否存在OVA/OAD,形如:[OVA],[12(OVA)],[12.5]这种形式都是属于OVA/OAD交给处理OVA模块处理
2.集数通常有两种情况一种:[12]直接性,另一种:[12(24)],这一种应该去括号内的为集数
:return: 集数(int)
从ep_title中剔除不相关的内容
"""
ep_ova = {"ep": None, "is_ova": False}
for ova_pattern in self._ova_patterns:
match = ova_pattern.search(ep_title)
if match:
ep_ova["is_ova"] = True
ep_ova["ep"] = 1
return ep_ova
for ep_pattern in self.episode_patterns:
pattern = ep_pattern["pattern"]
group = ep_pattern["group"]
match = pattern.search(ep_title)
if match:
ep_ova["ep"] = int(match.group(group))
return ep_ova
return ep_ova
blocked_set = set(blocked_words) # 将阻止词列表转换为集合
result = [ep for ep in self.vcb_meta.ep_title if not any(word in ep for word in blocked_set)]
self.vcb_meta.ep_title = result
def handle_final_season(self):
_, medias = MediaChain().search(title=self.vcb_meta.title)
if not medias:
logger.warning("匹配到最终季时无法找到对应的媒体信息季度返回默认值1")
self.vcb_meta.season = 1
return
filter_medias = [media for media in medias if media.type == MediaType.TV]
if not filter_medias:
logger.warning("匹配到最终季时无法找到对应的媒体信息季度返回默认值1")
self.vcb_meta.season = 1
return
medias = [media for media in filter_medias if media.popularity or media.vote_average]
if not medias:
logger.warning("匹配到最终季时无法找到对应的媒体信息季度返回默认值1")
self.vcb_meta.season = 1
return
# 获取欢迎度最高或者评分最高的媒体
medias_sorted = sorted(medias, key=lambda x: x.popularity or x.vote_average, reverse=True)[0]
self.vcb_meta.tmdb_id = medias_sorted.tmdb_id
if medias_sorted.tmdb_id:
seasons_info = TmdbChain().tmdb_seasons(tmdbid=medias_sorted.tmdb_id)
if seasons_info:
self.vcb_meta.season = len(seasons_info)
logger.info(f"获取到最终季度,季度为{self.vcb_meta.season}")
return
logger.warning("无法获取到最终季度信息季度返回默认值1")
self.vcb_meta.season = 1
def parse_movie(self):
logger.info("开始尝试剧场版模式解析")
for pattern in movie_patterns:
if pattern.search(self.vcb_meta.title):
logger.info("命中剧场版匹配规则,加上剧场版标识辅助识别")
self.vcb_meta.type = "Movie"
self.vcb_meta.title = pattern.sub("", self.vcb_meta.title).strip()
self.vcb_meta.title = self.vcb_meta.title
return
def find_ova_episode(self):
"""
搜索OVA的集数
TODO:模糊匹配OVA的集数
"""
pass
@staticmethod
def roman_to_int(s) -> int:
"""
:param s: 罗马数字字符串
罗马数字转整数
"""
roman_dict = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
total = 0
prev_value = 0
for char in reversed(s): # 反向遍历罗马数字字符串
current_value = roman_dict[char]
if current_value >= prev_value:
total += current_value # 如果当前值大于等于前一个值,加上当前值
else:
total -= current_value # 如果当前值小于前一个值,减去当前值
prev_value = current_value
return total
# if __name__ == '__main__':
# ReMeta(
# ova_switch=True,
# ).handel_file(Path(
# r"[Airota&Nekomoe kissaten&VCB-Studio] Yuru Camp [Heya Camp EP00][Ma10p_1080p][x265_flac].mkv"))

View File

@@ -1,9 +1,11 @@
from app.plugins import _PluginBase
from typing import Any, List, Dict, Tuple
from app.core.config import settings
from app.core.event import eventmanager
from app.log import logger
from app.plugins import _PluginBase
from app.schemas.types import EventType
from app.utils.http import RequestUtils
from typing import Any, List, Dict, Tuple
from app.log import logger
class WebHook(_PluginBase):
@@ -14,7 +16,7 @@ class WebHook(_PluginBase):
# 插件图标
plugin_icon = "webhook.png"
# 插件版本
plugin_version = "1.0"
plugin_version = "1.1"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
@@ -134,6 +136,9 @@ class WebHook(_PluginBase):
if not self._enabled or not self._webhook_url:
return
if not event or not event.event_type:
return
def __to_dict(_event):
"""
递归将对象转换为字典
@@ -159,21 +164,27 @@ class WebHook(_PluginBase):
else:
return str(_event)
version = getattr(settings, "VERSION_FLAG", "v1")
event_type = event.event_type if version == "v1" else event.event_type.value
event_info = {
"type": event.event_type,
"type": event_type,
"data": __to_dict(event.event_data)
}
if self._method == 'POST':
ret = RequestUtils(content_type="application/json").post_res(self._webhook_url, json=event_info)
else:
ret = RequestUtils().get_res(self._webhook_url, params=event_info)
if ret:
logger.info("发送成功:%s" % self._webhook_url)
elif ret is not None:
logger.error(f"发送失败,状态码:{ret.status_code},返回信息:{ret.text} {ret.reason}")
else:
logger.error("发送失败,未获取到返回信息")
try:
if self._method == 'POST':
ret = RequestUtils(content_type="application/json").post_res(self._webhook_url, json=event_info)
else:
ret = RequestUtils().get_res(self._webhook_url, params=event_info)
if ret:
logger.info(f"发送成功:{self._webhook_url}")
elif ret is not None:
logger.error(f"发送失败,状态码:{ret.status_code},返回信息:{ret.text} {ret.reason}")
else:
logger.error("发送失败,未获取到返回信息")
except Exception as e:
logger.error(f"发送请求时发生异常:{e}")
def stop_service(self):
"""

View File

@@ -75,7 +75,7 @@ class DoubanHelper:
response = RequestUtils(headers=self.headers).get_res(url)
if not response.status_code == 200:
logger.error(f"搜索 {title} 失败 状态码:{response.status_code}")
return None
return None, None, None
# self.headers["Cookie"] = response.cookies
soup = BeautifulSoup(response.text.encode('utf-8'), 'lxml')
title_divs = soup.find_all("div", class_="title")

View File

@@ -31,7 +31,7 @@ class ZvideoHelper(_PluginBase):
# 插件图标
plugin_icon = "zvideo.png"
# 插件版本
plugin_version = "1.3"
plugin_version = "1.4"
# 插件作者
plugin_author = "DzAvril"
# 作者主页