diff --git a/README.md b/README.md index 7aa629d..2057cd4 100644 --- a/README.md +++ b/README.md @@ -506,3 +506,32 @@ def get_dashboard(self, key: str, **kwargs) -> Optional[Tuple[Dict[str, Any], Di } ``` - 新增加的插件请配置在`package.json`中的末尾,这样可被识别为最新增加,可用于用户排序。 + +### 10. 如何开发V2版本的插件以及实现插件多版本兼容? +- 将插件代码放置在`plugins.v2`文件夹,将插件的定义放置在`package.v2.json`中,可实现该插件仅 MoviePilot V2 版本可见 +- 如V1版本插件实际在V2版本可用,或在插件中主动兼容了V1和V2版本,则可在`package.json`中定义 `"v2": true`属性,以便在 MoviePilot V2 版本插件市场中显示 + +```json +{ + "CustomSites": { + "name": "自定义站点", + "description": "增加自定义站点为签到和统计使用。", + "labels": "站点", + "version": "1.0", + "icon": "world.png", + "author": "lightolly", + "level": 2, + "v2": true + } +} +``` + +- MoviePilot V2中 Settings 模块中新增了`VERSION_FLAG`属性,V2版本值为`v2`,可通过以下代码判断当前的版本,以便在插件中兼容处理: + +```python +from app.core.config import settings +if hasattr(settings, 'VERSION_FLAG'): + version = settings.VERSION_FLAG # V2 +else: + version = "v1" +``` \ No newline at end of file diff --git a/package.v2.json b/package.v2.json new file mode 100644 index 0000000..bdf7828 --- /dev/null +++ b/package.v2.json @@ -0,0 +1,14 @@ +{ + "SiteStatistic": { + "name": "站点数据统计", + "description": "站点统计数据图表。", + "labels": "站点,仪表板", + "version": "1.0.0", + "icon": "statistic.png", + "author": "lightolly,jxxghp", + "level": 2, + "history": { + "v1.0.0": "MoviePilot V2 版本站点数据统计插件" + } + } +} \ No newline at end of file diff --git a/plugins.v2/sitestatistic/__init__.py b/plugins.v2/sitestatistic/__init__.py new file mode 100644 index 0000000..2c5622a --- /dev/null +++ b/plugins.v2/sitestatistic/__init__.py @@ -0,0 +1,1504 @@ +import json +import re +import warnings +from datetime import datetime, timedelta +from multiprocessing.dummy import Pool as ThreadPool +from threading import Lock +from typing import Optional, Any, List, Dict, Tuple + +import pytz +import requests +from apscheduler.schedulers.background import BackgroundScheduler +from apscheduler.triggers.cron import CronTrigger +from ruamel.yaml import CommentedMap + +from app import schemas +from app.core.config import settings +from app.core.event import Event, eventmanager +from app.db.models import PluginData +from app.db.site_oper import SiteOper +from app.helper.browser import PlaywrightHelper +from app.helper.module import ModuleHelper +from app.helper.sites import SitesHelper +from app.log import logger +from app.plugins import _PluginBase +from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo +from app.schemas.types import EventType, NotificationType +from app.utils.http import RequestUtils +from app.utils.object import ObjectUtils +from app.utils.string import StringUtils +from app.utils.timer import TimerUtils + +warnings.filterwarnings("ignore", category=FutureWarning) + +lock = Lock() + + +class SiteStatistic(_PluginBase): + # 插件名称 + plugin_name = "站点数据统计" + # 插件描述 + plugin_desc = "站点统计数据图表。" + # 插件图标 + plugin_icon = "statistic.png" + # 插件版本 + plugin_version = "1.0.0" + # 插件作者 + plugin_author = "lightolly,jxxghp" + # 作者主页 + author_url = "https://github.com/lightolly" + # 插件配置项ID前缀 + plugin_config_prefix = "sitestatistic_" + # 加载顺序 + plugin_order = 1 + # 可使用的用户级别 + auth_level = 2 + + # 私有属性 + sites = None + siteoper = None + _scheduler: Optional[BackgroundScheduler] = None + _last_update_time: Optional[datetime] = None + _sites_data: dict = {} + _site_schema: List[ISiteUserInfo] = None + + # 配置属性 + _enabled: bool = False + _onlyonce: bool = False + _sitemsg: bool = True + _cron: str = "" + _notify: bool = False + _queue_cnt: int = 5 + _remove_failed: bool = False + _statistic_type: str = None + _statistic_sites: list = [] + _dashboard_type: str = "today" + + def init_plugin(self, config: dict = None): + self.sites = SitesHelper() + self.siteoper = SiteOper() + # 停止现有任务 + self.stop_service() + + # 配置 + if config: + self._enabled = config.get("enabled") + self._onlyonce = config.get("onlyonce") + self._cron = config.get("cron") + self._notify = config.get("notify") + self._sitemsg = config.get("sitemsg") + self._queue_cnt = config.get("queue_cnt") + self._remove_failed = config.get("remove_failed") + self._statistic_type = config.get("statistic_type") or "all" + self._statistic_sites = config.get("statistic_sites") or [] + self._dashboard_type = config.get("dashboard_type") or "today" + + # 过滤掉已删除的站点 + all_sites = [site.id for site in self.siteoper.list_order_by_pri()] + [site.get("id") for site in + self.__custom_sites()] + self._statistic_sites = [site_id for site_id in all_sites if site_id in self._statistic_sites] + self.__update_config() + + if self._enabled or self._onlyonce: + # 加载模块 + self._site_schema = ModuleHelper.load('app.plugins.sitestatistic.siteuserinfo', + filter_func=lambda _, obj: hasattr(obj, 'schema')) + + self._site_schema.sort(key=lambda x: x.order) + # 站点上一次更新时间 + self._last_update_time = None + # 站点数据 + self._sites_data = {} + + # 立即运行一次 + if self._onlyonce: + # 定时服务 + self._scheduler = BackgroundScheduler(timezone=settings.TZ) + logger.info(f"站点数据统计服务启动,立即运行一次") + self._scheduler.add_job(self.refresh_all_site_data, 'date', + run_date=datetime.now( + tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3) + ) + # 关闭一次性开关 + self._onlyonce = False + + # 保存配置 + self.__update_config() + + # 启动任务 + if self._scheduler.get_jobs(): + self._scheduler.print_jobs() + self._scheduler.start() + + def get_state(self) -> bool: + return self._enabled + + @staticmethod + def get_command() -> List[Dict[str, Any]]: + """ + 定义远程控制命令 + :return: 命令关键字、事件、描述、附带数据 + """ + return [{ + "cmd": "/site_statistic", + "event": EventType.PluginAction, + "desc": "站点数据统计", + "category": "站点", + "data": { + "action": "site_statistic" + } + }] + + def get_api(self) -> List[Dict[str, Any]]: + """ + 获取插件API + [{ + "path": "/xx", + "endpoint": self.xxx, + "methods": ["GET", "POST"], + "summary": "API说明" + }] + """ + return [{ + "path": "/refresh_by_domain", + "endpoint": self.refresh_by_domain, + "methods": ["GET"], + "summary": "刷新站点数据", + "description": "刷新对应域名的站点数据", + }] + + def get_service(self) -> List[Dict[str, Any]]: + """ + 注册插件公共服务 + [{ + "id": "服务ID", + "name": "服务名称", + "trigger": "触发器:cron/interval/date/CronTrigger.from_crontab()", + "func": self.xxx, + "kwargs": {} # 定时器参数 + }] + """ + if self._enabled and self._cron: + return [{ + "id": "SiteStatistic", + "name": "站点数据统计服务", + "trigger": CronTrigger.from_crontab(self._cron), + "func": self.refresh_all_site_data, + "kwargs": {} + }] + elif self._enabled: + triggers = TimerUtils.random_scheduler(num_executions=1, + begin_hour=0, + end_hour=1, + min_interval=1, + max_interval=60) + ret_jobs = [] + for trigger in triggers: + ret_jobs.append({ + "id": f"SiteStatistic|{trigger.hour}:{trigger.minute}", + "name": "站点数据统计服务", + "trigger": "cron", + "func": self.refresh_all_site_data, + "kwargs": { + "hour": trigger.hour, + "minute": trigger.minute + } + }) + return ret_jobs + return [] + + def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: + """ + 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 + """ + # 站点的可选项(内置站点 + 自定义站点) + customSites = self.__custom_sites() + + site_options = ([{"title": site.name, "value": site.id} + for site in self.siteoper.list_order_by_pri()] + + [{"title": site.get("name"), "value": site.get("id")} + for site in customSites]) + + return [ + { + 'component': 'VForm', + 'content': [ + { + 'component': 'VRow', + 'content': [ + { + 'component': 'VCol', + 'props': { + 'cols': 12, + 'md': 4 + }, + 'content': [ + { + 'component': 'VSwitch', + 'props': { + 'model': 'enabled', + 'label': '启用插件', + } + } + ] + }, + { + 'component': 'VCol', + 'props': { + 'cols': 12, + 'md': 4 + }, + 'content': [ + { + 'component': 'VSwitch', + 'props': { + 'model': 'notify', + 'label': '发送通知', + } + } + ] + }, + { + 'component': 'VCol', + 'props': { + 'cols': 12, + 'md': 4 + }, + 'content': [ + { + 'component': 'VSwitch', + 'props': { + 'model': 'onlyonce', + 'label': '立即运行一次', + } + } + ] + } + ] + }, + { + 'component': 'VRow', + 'content': [ + { + 'component': 'VCol', + 'props': { + 'cols': 12, + 'md': 3 + }, + 'content': [ + { + 'component': 'VTextField', + 'props': { + 'model': 'cron', + 'label': '执行周期', + 'placeholder': '5位cron表达式,留空自动' + } + } + ] + }, + { + 'component': 'VCol', + 'props': { + 'cols': 12, + 'md': 3 + }, + 'content': [ + { + 'component': 'VTextField', + 'props': { + 'model': 'queue_cnt', + 'label': '队列数量' + } + } + ] + }, + { + 'component': 'VCol', + 'props': { + 'cols': 12, + 'md': 3 + }, + 'content': [ + { + 'component': 'VSelect', + 'props': { + 'model': 'statistic_type', + 'label': '统计类型', + 'items': [ + {'title': '全量', 'value': 'all'}, + {'title': '增量', 'value': 'add'} + ] + } + } + ] + }, + { + 'component': 'VCol', + 'props': { + 'cols': 12, + 'md': 3 + }, + 'content': [ + { + 'component': 'VSelect', + 'props': { + 'model': 'dashboard_type', + 'label': '仪表板组件', + 'items': [ + {'title': '今日数据', 'value': 'today'}, + {'title': '汇总数据', 'value': 'total'}, + {'title': '所有数据', 'value': 'all'} + ] + } + } + ] + } + ] + }, + { + 'component': 'VRow', + 'content': [ + { + 'component': 'VCol', + 'content': [ + { + 'component': 'VSelect', + 'props': { + 'chips': True, + 'multiple': True, + 'model': 'statistic_sites', + 'label': '统计站点', + 'items': site_options + } + } + ] + } + ] + }, + { + 'component': 'VRow', + 'content': [ + { + 'component': 'VCol', + 'props': { + 'cols': 12, + 'md': 4 + }, + 'content': [ + { + 'component': 'VSwitch', + 'props': { + 'model': 'sitemsg', + 'label': '站点未读消息', + } + } + ] + }, + { + 'component': 'VCol', + 'props': { + 'cols': 12, + 'md': 4 + }, + 'content': [ + { + 'component': 'VSwitch', + 'props': { + 'model': 'remove_failed', + 'label': '移除失效站点', + } + } + ] + }, + ] + } + ] + } + ], { + "enabled": False, + "onlyonce": False, + "notify": True, + "sitemsg": True, + "cron": "5 1 * * *", + "queue_cnt": 5, + "remove_failed": False, + "statistic_type": "all", + "statistic_sites": [], + "dashboard_type": 'today' + } + + def __get_data(self) -> Tuple[str, dict, dict]: + """ + 获取今天的日期、今天的站点数据、昨天的站点数据 + """ + # 最近一天的签到数据 + stattistic_data: Dict[str, Dict[str, Any]] = {} + # 昨天数据 + yesterday_sites_data: Dict[str, Dict[str, Any]] = {} + # 获取最近所有数据 + data_list: List[PluginData] = self.get_data(key=None) + if not data_list: + return "", {}, {} + # 取key符合日期格式的数据 + data_list = [data for data in data_list if re.match(r"\d{4}-\d{2}-\d{2}", data.key)] + # 按日期倒序排序 + data_list.sort(key=lambda x: x.key, reverse=True) + # 今天的日期 + today = data_list[0].key + # 数据按时间降序排序 + datas = [json.loads(data.value) for data in data_list if ObjectUtils.is_obj(data.value)] + if len(data_list) > 0: + stattistic_data = datas[0] + if len(data_list) > 1: + yesterday_sites_data = datas[1] + + # 数据按时间降序排序 + stattistic_data = dict(sorted(stattistic_data.items(), + key=lambda item: item[1].get('upload') or 0, + reverse=True)) + return today, stattistic_data, yesterday_sites_data + + @staticmethod + def __get_total_elements(today: str, stattistic_data: dict, yesterday_sites_data: dict, + dashboard: str = "today") -> List[dict]: + """ + 获取统计元素 + """ + + def __gb(value: int) -> float: + """ + 转换为GB,保留1位小数 + """ + if not value: + return 0 + return round(float(value) / 1024 / 1024 / 1024, 1) + + def __sub_dict(d1: dict, d2: dict) -> dict: + """ + 计算两个字典相同Key值的差值(如果值为数字),返回新字典 + """ + if not d1: + return {} + if not d2: + return d1 + d = {k: int(d1.get(k)) - int(d2.get(k)) for k in d1 + if k in d2 and str(d1.get(k)).isdigit() and str(d2.get(k)).isdigit()} + # 把小于0的数据变成0 + for k, v in d.items(): + if str(v).isdigit() and int(v) < 0: + d[k] = 0 + return d + + if dashboard in ['total', 'all']: + # 总上传量 + total_upload = sum([int(data.get("upload")) + for data in stattistic_data.values() if data.get("upload")]) + # 总下载量 + total_download = sum([int(data.get("download")) + for data in stattistic_data.values() if data.get("download")]) + # 总做种数 + total_seed = sum([int(data.get("seeding")) + for data in stattistic_data.values() if data.get("seeding")]) + # 总做种体积 + total_seed_size = sum([int(data.get("seeding_size")) + for data in stattistic_data.values() if data.get("seeding_size")]) + + total_elements = [ + # 总上传量 + { + 'component': 'VCol', + 'props': { + 'cols': 6, + 'md': 3 + }, + 'content': [ + { + 'component': 'VCard', + 'props': { + 'variant': 'tonal', + }, + 'content': [ + { + 'component': 'VCardText', + 'props': { + 'class': 'd-flex align-center', + }, + 'content': [ + { + 'component': 'VAvatar', + 'props': { + 'rounded': True, + 'variant': 'text', + 'class': 'me-3' + }, + 'content': [ + { + 'component': 'VImg', + 'props': { + 'src': '/plugin_icon/upload.png' + } + } + ] + }, + { + 'component': 'div', + 'content': [ + { + 'component': 'span', + 'props': { + 'class': 'text-caption' + }, + 'text': '总上传量' + }, + { + 'component': 'div', + 'props': { + 'class': 'd-flex align-center flex-wrap' + }, + 'content': [ + { + 'component': 'span', + 'props': { + 'class': 'text-h6' + }, + 'text': StringUtils.str_filesize(total_upload) + } + ] + } + ] + } + ] + } + ] + }, + ] + }, + # 总下载量 + { + 'component': 'VCol', + 'props': { + 'cols': 6, + 'md': 3, + }, + 'content': [ + { + 'component': 'VCard', + 'props': { + 'variant': 'tonal', + }, + 'content': [ + { + 'component': 'VCardText', + 'props': { + 'class': 'd-flex align-center', + }, + 'content': [ + { + 'component': 'VAvatar', + 'props': { + 'rounded': True, + 'variant': 'text', + 'class': 'me-3' + }, + 'content': [ + { + 'component': 'VImg', + 'props': { + 'src': '/plugin_icon/download.png' + } + } + ] + }, + { + 'component': 'div', + 'content': [ + { + 'component': 'span', + 'props': { + 'class': 'text-caption' + }, + 'text': '总下载量' + }, + { + 'component': 'div', + 'props': { + 'class': 'd-flex align-center flex-wrap' + }, + 'content': [ + { + 'component': 'span', + 'props': { + 'class': 'text-h6' + }, + 'text': StringUtils.str_filesize(total_download) + } + ] + } + ] + } + ] + } + ] + }, + ] + }, + # 总做种数 + { + 'component': 'VCol', + 'props': { + 'cols': 6, + 'md': 3 + }, + 'content': [ + { + 'component': 'VCard', + 'props': { + 'variant': 'tonal', + }, + 'content': [ + { + 'component': 'VCardText', + 'props': { + 'class': 'd-flex align-center', + }, + 'content': [ + { + 'component': 'VAvatar', + 'props': { + 'rounded': True, + 'variant': 'text', + 'class': 'me-3' + }, + 'content': [ + { + 'component': 'VImg', + 'props': { + 'src': '/plugin_icon/seed.png' + } + } + ] + }, + { + 'component': 'div', + 'content': [ + { + 'component': 'span', + 'props': { + 'class': 'text-caption' + }, + 'text': '总做种数' + }, + { + 'component': 'div', + 'props': { + 'class': 'd-flex align-center flex-wrap' + }, + 'content': [ + { + 'component': 'span', + 'props': { + 'class': 'text-h6' + }, + 'text': f'{"{:,}".format(total_seed)}' + } + ] + } + ] + } + ] + } + ] + }, + ] + }, + # 总做种体积 + { + 'component': 'VCol', + 'props': { + 'cols': 6, + 'md': 3 + }, + 'content': [ + { + 'component': 'VCard', + 'props': { + 'variant': 'tonal', + }, + 'content': [ + { + 'component': 'VCardText', + 'props': { + 'class': 'd-flex align-center', + }, + 'content': [ + { + 'component': 'VAvatar', + 'props': { + 'rounded': True, + 'variant': 'text', + 'class': 'me-3' + }, + 'content': [ + { + 'component': 'VImg', + 'props': { + 'src': '/plugin_icon/database.png' + } + } + ] + }, + { + 'component': 'div', + 'content': [ + { + 'component': 'span', + 'props': { + 'class': 'text-caption' + }, + 'text': '总做种体积' + }, + { + 'component': 'div', + 'props': { + 'class': 'd-flex align-center flex-wrap' + }, + 'content': [ + { + 'component': 'span', + 'props': { + 'class': 'text-h6' + }, + 'text': StringUtils.str_filesize(total_seed_size) + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + else: + total_elements = [] + + if dashboard in ["today", "all"]: + # 计算增量数据集 + inc_data = {} + for site, data in stattistic_data.items(): + inc = __sub_dict(data, yesterday_sites_data.get(site)) + if inc: + inc_data[site] = inc + # 今日上传 + uploads = {k: v for k, v in inc_data.items() if v.get("upload")} + # 今日上传站点 + upload_sites = [site for site in uploads.keys()] + # 今日上传数据 + upload_datas = [__gb(data.get("upload")) for data in uploads.values()] + # 今日上传总量 + today_upload = round(sum(upload_datas), 2) + # 今日下载 + downloads = {k: v for k, v in inc_data.items() if v.get("download")} + # 今日下载站点 + download_sites = [site for site in downloads.keys()] + # 今日下载数据 + download_datas = [__gb(data.get("download")) for data in downloads.values()] + # 今日下载总量 + today_download = round(sum(download_datas), 2) + # 今日上传下载元素 + today_elements = [ + # 上传量图表 + { + 'component': 'VCol', + 'props': { + 'cols': 12, + 'md': 6 + }, + 'content': [ + { + 'component': 'VApexChart', + 'props': { + 'height': 300, + 'options': { + 'chart': { + 'type': 'pie', + }, + 'labels': upload_sites, + 'title': { + 'text': f'今日上传({today})共 {today_upload} GB' + }, + 'legend': { + 'show': True + }, + 'plotOptions': { + 'pie': { + 'expandOnClick': False + } + }, + 'noData': { + 'text': '暂无数据' + } + }, + 'series': upload_datas + } + } + ] + }, + # 下载量图表 + { + 'component': 'VCol', + 'props': { + 'cols': 12, + 'md': 6 + }, + 'content': [ + { + 'component': 'VApexChart', + 'props': { + 'height': 300, + 'options': { + 'chart': { + 'type': 'pie', + }, + 'labels': download_sites, + 'title': { + 'text': f'今日下载({today})共 {today_download} GB' + }, + 'legend': { + 'show': True + }, + 'plotOptions': { + 'pie': { + 'expandOnClick': False + } + }, + 'noData': { + 'text': '暂无数据' + } + }, + 'series': download_datas + } + } + ] + } + ] + else: + today_elements = [] + # 合并返回 + return total_elements + today_elements + + def get_dashboard(self) -> Optional[Tuple[Dict[str, Any], Dict[str, Any], List[dict]]]: + """ + 获取插件仪表盘页面,需要返回:1、仪表板col配置字典;2、仪表板页面元素配置json(含数据);3、全局配置(自动刷新等) + 1、col配置参考: + { + "cols": 12, "md": 6 + } + 2、页面配置使用Vuetify组件拼装,参考:https://vuetifyjs.com/ + 3、全局配置参考: + { + "refresh": 10 // 自动刷新时间,单位秒 + } + """ + # 列配置 + cols = { + "cols": 12 + } + # 全局配置 + attrs = {} + # 获取数据 + today, stattistic_data, yesterday_sites_data = self.__get_data() + # 汇总 + # 站点统计 + elements = [ + { + 'component': 'VRow', + 'content': self.__get_total_elements( + today=today, + stattistic_data=stattistic_data, + yesterday_sites_data=yesterday_sites_data, + dashboard=self._dashboard_type + ) + } + ] + return cols, attrs, elements + + def get_page(self) -> List[dict]: + """ + 拼装插件详情页面,需要返回页面配置,同时附带数据 + """ + + def format_bonus(bonus): + try: + return f'{float(bonus):,.1f}' + except ValueError: + return '0.0' + + # 获取数据 + today, stattistic_data, yesterday_sites_data = self.__get_data() + if not stattistic_data: + return [ + { + 'component': 'div', + 'text': '暂无数据', + 'props': { + 'class': 'text-center', + } + } + ] + + # 站点统计 + site_totals = self.__get_total_elements( + today=today, + stattistic_data=stattistic_data, + yesterday_sites_data=yesterday_sites_data, + dashboard='all' + ) + + # 站点数据明细 + site_trs = [ + { + 'component': 'tr', + 'props': { + 'class': 'text-sm' + }, + 'content': [ + { + 'component': 'td', + 'props': { + 'class': 'whitespace-nowrap break-keep text-high-emphasis' + }, + 'text': site + }, + { + 'component': 'td', + 'text': data.get("username") + }, + { + 'component': 'td', + 'text': data.get("user_level") + }, + { + 'component': 'td', + 'props': { + 'class': 'text-success' + }, + 'text': StringUtils.str_filesize(data.get("upload")) + }, + { + 'component': 'td', + 'props': { + 'class': 'text-error' + }, + 'text': StringUtils.str_filesize(data.get("download")) + }, + { + 'component': 'td', + 'text': data.get('ratio') + }, + { + 'component': 'td', + 'text': format_bonus(data.get('bonus') or 0) + }, + { + 'component': 'td', + 'text': data.get('seeding') + }, + { + 'component': 'td', + 'text': StringUtils.str_filesize(data.get('seeding_size')) + } + ] + } for site, data in stattistic_data.items() if not data.get("err_msg") + ] + + # 拼装页面 + return [ + { + 'component': 'VRow', + 'content': site_totals + [ + # 各站点数据明细 + { + 'component': 'VCol', + 'props': { + 'cols': 12, + }, + 'content': [ + { + 'component': 'VTable', + 'props': { + 'hover': True + }, + 'content': [ + { + 'component': 'thead', + 'content': [ + { + 'component': 'th', + 'props': { + 'class': 'text-start ps-4' + }, + 'text': '站点' + }, + { + 'component': 'th', + 'props': { + 'class': 'text-start ps-4' + }, + 'text': '用户名' + }, + { + 'component': 'th', + 'props': { + 'class': 'text-start ps-4' + }, + 'text': '用户等级' + }, + { + 'component': 'th', + 'props': { + 'class': 'text-start ps-4' + }, + 'text': '上传量' + }, + { + 'component': 'th', + 'props': { + 'class': 'text-start ps-4' + }, + 'text': '下载量' + }, + { + 'component': 'th', + 'props': { + 'class': 'text-start ps-4' + }, + 'text': '分享率' + }, + { + 'component': 'th', + 'props': { + 'class': 'text-start ps-4' + }, + 'text': '魔力值' + }, + { + 'component': 'th', + 'props': { + 'class': 'text-start ps-4' + }, + 'text': '做种数' + }, + { + 'component': 'th', + 'props': { + 'class': 'text-start ps-4' + }, + 'text': '做种体积' + } + ] + }, + { + 'component': 'tbody', + 'content': site_trs + } + ] + } + ] + } + ] + } + ] + + def stop_service(self): + """ + 退出插件 + """ + try: + if self._scheduler: + self._scheduler.remove_all_jobs() + if self._scheduler.running: + self._scheduler.shutdown() + self._scheduler = None + except Exception as e: + logger.error("退出插件失败:%s" % str(e)) + + def __build_class(self, html_text: str) -> Any: + for site_schema in self._site_schema: + try: + if site_schema.match(html_text): + return site_schema + except Exception as e: + logger.error(f"站点匹配失败 {str(e)}") + return None + + def build(self, site_info: CommentedMap) -> Optional[ISiteUserInfo]: + """ + 构建站点信息 + """ + site_name = site_info.get("name") + site_cookie = site_info.get("cookie") + apikey = site_info.get("apikey") + token = site_info.get("token") + if not site_cookie and not apikey and not token: + return None + url = site_info.get("url") + proxy = site_info.get("proxy") + ua = site_info.get("ua") + # 会话管理 + with requests.Session() as session: + proxies = settings.PROXY if proxy else None + proxy_server = settings.PROXY_SERVER if proxy else None + render = site_info.get("render") + logger.debug(f"站点 {site_name} url={url},site_cookie={site_cookie},ua={ua},api_key={apikey},token={token},proxy={proxy}") + if render: + # 演染模式 + html_text = PlaywrightHelper().get_page_source(url=url, + cookies=site_cookie, + ua=ua, + proxies=proxy_server) + else: + # 普通模式 + res = RequestUtils(cookies=site_cookie, + session=session, + ua=ua, + proxies=proxies + ).get_res(url=url) + if res and res.status_code == 200: + if re.search(r"charset=\"?utf-8\"?", res.text, re.IGNORECASE): + res.encoding = "utf-8" + else: + res.encoding = res.apparent_encoding + html_text = res.text + # 第一次登录反爬 + if html_text.find("title") == -1: + i = html_text.find("window.location") + if i == -1: + return None + tmp_url = url + html_text[i:html_text.find(";")] \ + .replace("\"", "") \ + .replace("+", "") \ + .replace(" ", "") \ + .replace("window.location=", "") + res = RequestUtils(cookies=site_cookie, + session=session, + ua=ua, + proxies=proxies + ).get_res(url=tmp_url) + if res and res.status_code == 200: + if "charset=utf-8" in res.text or "charset=UTF-8" in res.text: + res.encoding = "UTF-8" + else: + res.encoding = res.apparent_encoding + html_text = res.text + if not html_text: + return None + elif res is not None: + logger.error("站点 %s 被反爬限制:%s, 状态码:%s" % (site_name, url, res.status_code)) + return None + else: + logger.error("站点 %s 无法访问:%s" % (site_name, url)) + return None + + # 兼容假首页情况,假首页通常没有 schemas.Response: + """ + 刷新一个站点数据,可由API调用 + """ + if apikey != settings.API_TOKEN: + return schemas.Response(success=False, message="API密钥错误") + site_info = self.sites.get_indexer(domain) + if site_info: + site_data = self.__refresh_site_data(site_info) + if site_data: + return schemas.Response( + success=True, + message=f"站点 {domain} 刷新成功", + data=site_data.to_dict() + ) + return schemas.Response( + success=False, + message=f"站点 {domain} 刷新数据失败,未获取到数据" + ) + return schemas.Response( + success=False, + message=f"站点 {domain} 不存在" + ) + + def __refresh_site_data(self, site_info: CommentedMap) -> Optional[ISiteUserInfo]: + """ + 更新单个site 数据信息 + :param site_info: + :return: + """ + site_name = site_info.get('name') + site_url = site_info.get('url') + if not site_url: + return None + unread_msg_notify = True + try: + site_user_info: ISiteUserInfo = self.build(site_info=site_info) + if site_user_info: + logger.debug(f"站点 {site_name} 开始以 {site_user_info.site_schema()} 模型解析") + # 开始解析 + site_user_info.parse() + logger.debug(f"站点 {site_name} 解析完成") + + # 获取不到数据时,仅返回错误信息,不做历史数据更新 + if site_user_info.err_msg: + self._sites_data.update({site_name: {"err_msg": site_user_info.err_msg}}) + return None + + if self._sitemsg: + # 发送通知,存在未读消息 + self.__notify_unread_msg(site_name, site_user_info, unread_msg_notify) + + # 分享率接近1时,发送消息提醒 + if site_user_info.ratio and float(site_user_info.ratio) < 1: + self.post_message(mtype=NotificationType.SiteMessage, + title=f"【站点分享率低预警】", + text=f"站点 {site_user_info.site_name} 分享率 {site_user_info.ratio},请注意!") + + self._sites_data.update( + { + site_name: { + "upload": site_user_info.upload, + "username": site_user_info.username, + "user_level": site_user_info.user_level, + "join_at": site_user_info.join_at, + "download": site_user_info.download, + "ratio": site_user_info.ratio, + "seeding": site_user_info.seeding, + "seeding_size": site_user_info.seeding_size, + "leeching": site_user_info.leeching, + "bonus": site_user_info.bonus, + "url": site_url, + "err_msg": site_user_info.err_msg, + "message_unread": site_user_info.message_unread, + "updated_at": datetime.now().strftime('%Y-%m-%d') + } + }) + return site_user_info + + except Exception as e: + import traceback + logger.error(f"站点 {site_name} 获取流量数据失败:{str(e)}") + logger.error(traceback.format_exc()) + return None + + def __notify_unread_msg(self, site_name: str, site_user_info: ISiteUserInfo, unread_msg_notify: bool): + if site_user_info.message_unread <= 0: + return + if self._sites_data.get(site_name, {}).get('message_unread') == site_user_info.message_unread: + return + if not unread_msg_notify: + return + + # 解析出内容,则发送内容 + if len(site_user_info.message_unread_contents) > 0: + for head, date, content in site_user_info.message_unread_contents: + msg_title = f"【站点 {site_user_info.site_name} 消息】" + msg_text = f"时间:{date}\n标题:{head}\n内容:\n{content}" + self.post_message(mtype=NotificationType.SiteMessage, title=msg_title, text=msg_text) + else: + self.post_message(mtype=NotificationType.SiteMessage, + title=f"站点 {site_user_info.site_name} 收到 " + f"{site_user_info.message_unread} 条新消息,请登陆查看") + + @eventmanager.register(EventType.PluginAction) + def refresh(self, event: Event): + """ + 刷新站点数据 + """ + if event: + event_data = event.event_data + if not event_data or event_data.get("action") != "site_statistic": + return + logger.info("收到命令,开始刷新站点数据 ...") + self.post_message(channel=event.event_data.get("channel"), + title="开始刷新站点数据 ...", + userid=event.event_data.get("user")) + self.refresh_all_site_data() + if event: + self.post_message(channel=event.event_data.get("channel"), + title="站点数据刷新完成!", userid=event.event_data.get("user")) + + def refresh_all_site_data(self): + """ + 多线程刷新站点下载上传量,默认间隔6小时 + """ + if not self.sites.get_indexers(): + return + + logger.info("开始刷新站点数据 ...") + + with lock: + + all_sites = [site for site in self.sites.get_indexers() if not site.get("public")] + self.__custom_sites() + # 没有指定站点,默认使用全部站点 + if not self._statistic_sites: + refresh_sites = all_sites + else: + refresh_sites = [site for site in all_sites if + site.get("id") in self._statistic_sites] + if not refresh_sites: + return + + # 将数据初始化为前一天,筛选站点 + yesterday_sites_data = {} + today_date = datetime.now().strftime('%Y-%m-%d') + if self._statistic_type == "add" or not self._remove_failed: + if last_update_time := self.get_data("last_update_time"): + yesterday_sites_data = self.get_data(last_update_time) or {} + + if not self._remove_failed and yesterday_sites_data: + site_names = [site.get("name") for site in refresh_sites] + self._sites_data = {k: v for k, v in yesterday_sites_data.items() if k in site_names} + + # 并发刷新 + with ThreadPool(min(len(refresh_sites), int(self._queue_cnt or 5))) as p: + p.map(self.__refresh_site_data, refresh_sites) + + # 通知刷新完成 + if self._notify: + messages = {} + # 总上传 + incUploads = 0 + # 总下载 + incDownloads = 0 + + for rand, site in enumerate(self._sites_data.keys()): + upload = int(self._sites_data[site].get("upload") or 0) + download = int(self._sites_data[site].get("download") or 0) + updated_date = self._sites_data[site].get("updated_at") + + if self._statistic_type == "add" and yesterday_sites_data.get(site): + upload -= int(yesterday_sites_data[site].get("upload") or 0) + download -= int(yesterday_sites_data[site].get("download") or 0) + + if updated_date and updated_date != today_date: + updated_date = f"({updated_date})" + else: + updated_date = "" + + if upload > 0 or download > 0: + incUploads += upload + incDownloads += download + messages[upload + (rand / 1000)] = ( + f"【{site}】{updated_date}\n" + + f"上传量:{StringUtils.str_filesize(upload)}\n" + + f"下载量:{StringUtils.str_filesize(download)}\n" + + "————————————" + ) + + if incDownloads or incUploads: + sorted_messages = [messages[key] for key in sorted(messages.keys(), reverse=True)] + sorted_messages.insert(0, f"【汇总】\n" + f"总上传:{StringUtils.str_filesize(incUploads)}\n" + f"总下载:{StringUtils.str_filesize(incDownloads)}\n" + f"————————————") + self.post_message(mtype=NotificationType.SiteMessage, + title="站点数据统计", text="\n".join(sorted_messages)) + + # 保存数据 + self.save_data(today_date, self._sites_data) + + # 更新时间 + self.save_data("last_update_time", today_date) + + self.eventmanager.send_event(etype=EventType.PluginAction, data={ + "action": "sitestatistic_refresh_complete" + }) + + logger.info("站点数据刷新完成") + + def __custom_sites(self) -> List[Any]: + custom_sites = [] + custom_sites_config = self.get_config("CustomSites") + if custom_sites_config and custom_sites_config.get("enabled"): + custom_sites = custom_sites_config.get("sites") + return custom_sites + + def __update_config(self): + self.update_config({ + "enabled": self._enabled, + "onlyonce": self._onlyonce, + "cron": self._cron, + "notify": self._notify, + "sitemsg": self._sitemsg, + "queue_cnt": self._queue_cnt, + "remove_failed": self._remove_failed, + "statistic_type": self._statistic_type, + "statistic_sites": self._statistic_sites, + "dashboard_type": self._dashboard_type + }) + + @eventmanager.register(EventType.SiteDeleted) + def site_deleted(self, event): + """ + 删除对应站点选中 + """ + site_id = event.event_data.get("site_id") + config = self.get_config() + if config: + statistic_sites = config.get("statistic_sites") + if statistic_sites: + if isinstance(statistic_sites, str): + statistic_sites = [statistic_sites] + + # 删除对应站点 + if site_id: + statistic_sites = [site for site in statistic_sites if int(site) != int(site_id)] + else: + # 清空 + statistic_sites = [] + + # 若无站点,则停止 + if len(statistic_sites) == 0: + self._enabled = False + + self._statistic_sites = statistic_sites + # 保存配置 + self.__update_config() diff --git a/plugins.v2/sitestatistic/siteuserinfo/__init__.py b/plugins.v2/sitestatistic/siteuserinfo/__init__.py new file mode 100644 index 0000000..92da174 --- /dev/null +++ b/plugins.v2/sitestatistic/siteuserinfo/__init__.py @@ -0,0 +1,428 @@ +# -*- coding: utf-8 -*- +import json +import re +from abc import ABCMeta, abstractmethod +from enum import Enum +from typing import Optional +from urllib.parse import urljoin, urlsplit + +from requests import Session + +from app.core.config import settings +from app.helper.cloudflare import under_challenge +from app.log import logger +from app.utils.http import RequestUtils +from app.utils.site import SiteUtils + +SITE_BASE_ORDER = 1000 + + +# 站点框架 +class SiteSchema(Enum): + DiscuzX = "Discuz!" + Gazelle = "Gazelle" + Ipt = "IPTorrents" + NexusPhp = "NexusPhp" + NexusProject = "NexusProject" + NexusRabbit = "NexusRabbit" + NexusHhanclub = "NexusHhanclub" + NexusAudiences = "NexusAudiences" + SmallHorse = "Small Horse" + Unit3d = "Unit3d" + TorrentLeech = "TorrentLeech" + FileList = "FileList" + TNode = "TNode" + MTorrent = "MTorrent" + Yema = "Yema" + + +class ISiteUserInfo(metaclass=ABCMeta): + # 站点模版 + schema = SiteSchema.NexusPhp + # 站点解析时判断顺序,值越小越先解析 + order = SITE_BASE_ORDER + # 请求模式 cookie/apikey + request_mode = "cookie" + + def __init__(self, site_name: str, + url: str, + site_cookie: str, + apikey: str, + token: str, + index_html: str, + session: Session = None, + ua: str = None, + emulate: bool = False, + proxy: bool = None): + super().__init__() + # 站点信息 + self.site_name = None + self.site_url = None + self.apikey = apikey + self.token = token + # 用户信息 + self.username = None + self.userid = None + # 未读消息 + self.message_unread = 0 + self.message_unread_contents = [] + + # 流量信息 + self.upload = 0 + self.download = 0 + self.ratio = 0 + + # 种子信息 + self.seeding = 0 + self.leeching = 0 + self.uploaded = 0 + self.completed = 0 + self.incomplete = 0 + self.seeding_size = 0 + self.leeching_size = 0 + self.uploaded_size = 0 + self.completed_size = 0 + self.incomplete_size = 0 + # 做种人数, 种子大小 + self.seeding_info = [] + + # 用户详细信息 + self._user_basic_page = None + self._user_basic_params = None + self._user_basic_headers = None + self.user_level = None + self.join_at = None + self.bonus = 0.0 + + # 错误信息 + self.err_msg = None + # 内部数据 + self._addition_headers = None + + # 站点页面 + self._brief_page = "index.php" + self._user_detail_page = "userdetails.php?id=" + self._user_detail_params = None + self._user_detail_headers = None + self._user_traffic_page = "index.php" + self._user_traffic_params = None + self._user_traffic_headers = None + self._user_mail_unread_page = "messages.php?action=viewmailbox&box=1&unread=yes" + self._sys_mail_unread_page = "messages.php?action=viewmailbox&box=-2&unread=yes" + self._mail_unread_params = None + self._mail_unread_headers = None + self._mail_content_params = None + self._mail_content_headers = None + self._torrent_seeding_page = "getusertorrentlistajax.php?userid=" + self._torrent_seeding_params = None + self._torrent_seeding_headers = None + + split_url = urlsplit(url) + self.site_name = site_name + self.site_url = url + self.site_domain = split_url.netloc + self._base_url = f"{split_url.scheme}://{split_url.netloc}" + self._site_cookie = site_cookie + self._index_html = index_html + self._session = session if session else None + self._ua = ua + + self._emulate = emulate + self._proxy = proxy + + def site_schema(self) -> SiteSchema: + """ + 站点解析模型 + :return: 站点解析模型 + """ + return self.schema + + @classmethod + def match(cls, html_text: str) -> bool: + """ + 是否匹配当前解析模型 + :param html_text: 站点首页html + :return: 是否匹配 + """ + pass + + def parse(self): + """ + 解析站点信息 + :return: + """ + # 检查是否已经登录 + if not self._parse_logged_in(self._index_html): + return + # 解析站点页面 + self._parse_site_page(self._index_html) + # 解析用户基础信息 + if self._user_basic_page: + self._parse_user_base_info( + self._get_page_content( + url=urljoin(self._base_url, self._user_basic_page), + params=self._user_basic_params, + headers=self._user_basic_headers + ) + ) + else: + self._parse_user_base_info(self._index_html) + # 解析用户详细信息 + if self._user_detail_page: + self._parse_user_detail_info( + self._get_page_content( + url=urljoin(self._base_url, self._user_detail_page), + params=self._user_detail_params, + headers=self._user_detail_headers + ) + ) + # 解析用户未读消息 + self._pase_unread_msgs() + # 解析用户上传、下载、分享率等信息 + if self._user_traffic_page: + self._parse_user_traffic_info( + self._get_page_content( + url=urljoin(self._base_url, self._user_traffic_page), + params=self._user_traffic_params, + headers=self._user_traffic_headers + ) + ) + # 解析用户做种信息 + self._parse_seeding_pages() + self.seeding_info = json.dumps(self.seeding_info) + + def _pase_unread_msgs(self): + """ + 解析所有未读消息标题和内容 + :return: + """ + unread_msg_links = [] + if self.message_unread > 0: + links = {self._user_mail_unread_page, self._sys_mail_unread_page} + for link in links: + if not link: + continue + msg_links = [] + next_page = self._parse_message_unread_links( + self._get_page_content( + url=urljoin(self._base_url, link), + params=self._mail_unread_params, + headers=self._mail_unread_headers + ), + msg_links) + while next_page: + next_page = self._parse_message_unread_links( + self._get_page_content( + url=urljoin(self._base_url, next_page), + params=self._mail_unread_params, + headers=self._mail_unread_headers + ), + msg_links + ) + unread_msg_links.extend(msg_links) + # 重新更新未读消息数(99999表示有消息但数量未知) + if self.message_unread == 99999: + self.message_unread = len(unread_msg_links) + # 解析未读消息内容 + for msg_link in unread_msg_links: + logger.debug(f"{self.site_name} 信息链接 {msg_link}") + head, date, content = self._parse_message_content( + self._get_page_content( + urljoin(self._base_url, msg_link), + params=self._mail_content_params, + headers=self._mail_content_headers + ) + ) + logger.debug(f"{self.site_name} 标题 {head} 时间 {date} 内容 {content}") + self.message_unread_contents.append((head, date, content)) + + def _parse_seeding_pages(self): + """ + 解析做种页面 + """ + if self._torrent_seeding_page: + # 第一页 + next_page = self._parse_user_torrent_seeding_info( + self._get_page_content( + url=urljoin(self._base_url, self._torrent_seeding_page), + params=self._torrent_seeding_params, + headers=self._torrent_seeding_headers + ) + ) + + # 其他页处理 + while next_page is not None and next_page is not False: + next_page = self._parse_user_torrent_seeding_info( + self._get_page_content( + url=urljoin(urljoin(self._base_url, self._torrent_seeding_page), next_page), + params=self._torrent_seeding_params, + headers=self._torrent_seeding_headers + ), + multi_page=True) + + @staticmethod + def _prepare_html_text(html_text): + """ + 处理掉HTML中的干扰部分 + """ + return re.sub(r"#\d+", "", re.sub(r"\d+px", "", html_text)) + + @abstractmethod + def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: + """ + 获取未阅读消息链接 + :param html_text: + :return: + """ + pass + + def _get_page_content(self, url: str, params: dict = None, headers: dict = None): + """ + :param url: 网页地址 + :param params: post参数 + :param headers: 额外的请求头 + :return: + """ + req_headers = None + proxies = settings.PROXY if self._proxy else None + if self._ua or headers or self._addition_headers: + req_headers = { + "User-Agent": f"{self._ua}" + } + + if headers: + req_headers.update(headers) + else: + req_headers.update({ + "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8", + }) + if self._addition_headers: + req_headers.update(self._addition_headers) + + if self.request_mode == "apikey": + # 使用apikey请求,通过请求头传递 + cookie = None + session = None + else: + # 使用cookie请求 + cookie = self._site_cookie + session = self._session + + if params: + if req_headers.get("Content-Type") == "application/json": + res = RequestUtils(cookies=cookie, + session=session, + timeout=60, + proxies=proxies, + headers=req_headers).post_res(url=url, json=params) + else: + res = RequestUtils(cookies=cookie, + session=session, + timeout=60, + proxies=proxies, + headers=req_headers).post_res(url=url, data=params) + else: + res = RequestUtils(cookies=cookie, + session=session, + timeout=60, + proxies=proxies, + headers=req_headers).get_res(url=url) + if res is not None and res.status_code in (200, 500, 403): + if req_headers and "application/json" in str(req_headers.get("Accept")): + return json.dumps(res.json()) + else: + # 如果cloudflare 有防护,尝试使用浏览器仿真 + if under_challenge(res.text): + logger.warn( + f"{self.site_name} 检测到Cloudflare,请更新Cookie和UA") + return "" + if re.search(r"charset=\"?utf-8\"?", res.text, re.IGNORECASE): + res.encoding = "utf-8" + else: + res.encoding = res.apparent_encoding + return res.text + + return "" + + @abstractmethod + def _parse_site_page(self, html_text: str): + """ + 解析站点相关信息页面 + :param html_text: + :return: + """ + pass + + @abstractmethod + def _parse_user_base_info(self, html_text: str): + """ + 解析用户基础信息 + :param html_text: + :return: + """ + pass + + def _parse_logged_in(self, html_text): + """ + 解析用户是否已经登陆 + :param html_text: + :return: True/False + """ + logged_in = SiteUtils.is_logged_in(html_text) + if not logged_in: + self.err_msg = "未检测到已登陆,请检查cookies是否过期" + logger.warn(f"{self.site_name} 未登录,跳过后续操作") + + return logged_in + + @abstractmethod + def _parse_user_traffic_info(self, html_text: str): + """ + 解析用户的上传,下载,分享率等信息 + :param html_text: + :return: + """ + pass + + @abstractmethod + def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: + """ + 解析用户的做种相关信息 + :param html_text: + :param multi_page: 是否多页数据 + :return: 下页地址 + """ + pass + + @abstractmethod + def _parse_user_detail_info(self, html_text: str): + """ + 解析用户的详细信息 + 加入时间/等级/魔力值等 + :param html_text: + :return: + """ + pass + + @abstractmethod + def _parse_message_content(self, html_text): + """ + 解析短消息内容 + :param html_text: + :return: head: message, date: time, content: message content + """ + pass + + def to_dict(self): + """ + 转化为字典 + """ + attributes = [ + attr for attr in dir(self) + if not callable(getattr(self, attr)) and not attr.startswith("_") + ] + return { + attr: getattr(self, attr).value + if isinstance(getattr(self, attr), SiteSchema) + else getattr(self, attr) for attr in attributes + } diff --git a/plugins.v2/sitestatistic/siteuserinfo/discuz.py b/plugins.v2/sitestatistic/siteuserinfo/discuz.py new file mode 100644 index 0000000..03fbb81 --- /dev/null +++ b/plugins.v2/sitestatistic/siteuserinfo/discuz.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +import re +from typing import Optional + +from lxml import etree + +from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema +from app.utils.string import StringUtils + + +class DiscuzUserInfo(ISiteUserInfo): + schema = SiteSchema.DiscuzX + order = SITE_BASE_ORDER + 10 + + @classmethod + def match(cls, html_text: str) -> bool: + html = etree.HTML(html_text) + if not html: + return False + + printable_text = html.xpath("string(.)") if html else "" + return 'Powered by Discuz!' in printable_text + + def _parse_user_base_info(self, html_text: str): + html_text = self._prepare_html_text(html_text) + html = etree.HTML(html_text) + + user_info = html.xpath('//a[contains(@href, "&uid=")]') + if user_info: + user_id_match = re.search(r"&uid=(\d+)", user_info[0].attrib['href']) + if user_id_match and user_id_match.group().strip(): + self.userid = user_id_match.group(1) + self._torrent_seeding_page = f"forum.php?&mod=torrents&cat_5up=on" + self._user_detail_page = user_info[0].attrib['href'] + self.username = user_info[0].text.strip() + + def _parse_site_page(self, html_text: str): + # TODO + pass + + def _parse_user_detail_info(self, html_text: str): + """ + 解析用户额外信息,加入时间,等级 + :param html_text: + :return: + """ + html = etree.HTML(html_text) + if not html: + return None + + # 用户等级 + user_levels_text = html.xpath('//a[contains(@href, "usergroup")]/text()') + if user_levels_text: + self.user_level = user_levels_text[-1].strip() + + # 加入日期 + join_at_text = html.xpath('//li[em[text()="注册时间"]]/text()') + if join_at_text: + self.join_at = StringUtils.unify_datetime_str(join_at_text[0].strip()) + + # 分享率 + ratio_text = html.xpath('//li[contains(.//text(), "分享率")]//text()') + if ratio_text: + ratio_match = re.search(r"\(([\d,.]+)\)", ratio_text[0]) + if ratio_match and ratio_match.group(1).strip(): + self.bonus = StringUtils.str_float(ratio_match.group(1)) + + # 积分 + bouns_text = html.xpath('//li[em[text()="积分"]]/text()') + if bouns_text: + self.bonus = StringUtils.str_float(bouns_text[0].strip()) + + # 上传 + upload_text = html.xpath('//li[em[contains(text(),"上传量")]]/text()') + if upload_text: + self.upload = StringUtils.num_filesize(upload_text[0].strip().split('/')[-1]) + + # 下载 + download_text = html.xpath('//li[em[contains(text(),"下载量")]]/text()') + if download_text: + self.download = StringUtils.num_filesize(download_text[0].strip().split('/')[-1]) + + def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: + """ + 做种相关信息 + :param html_text: + :param multi_page: 是否多页数据 + :return: 下页地址 + """ + html = etree.HTML(html_text) + if not html: + return None + + size_col = 3 + seeders_col = 4 + # 搜索size列 + if html.xpath('//tr[position()=1]/td[.//img[@class="size"] and .//img[@alt="size"]]'): + size_col = len(html.xpath('//tr[position()=1]/td[.//img[@class="size"] ' + 'and .//img[@alt="size"]]/preceding-sibling::td')) + 1 + # 搜索seeders列 + if html.xpath('//tr[position()=1]/td[.//img[@class="seeders"] and .//img[@alt="seeders"]]'): + seeders_col = len(html.xpath('//tr[position()=1]/td[.//img[@class="seeders"] ' + 'and .//img[@alt="seeders"]]/preceding-sibling::td')) + 1 + + page_seeding = 0 + page_seeding_size = 0 + page_seeding_info = [] + seeding_sizes = html.xpath(f'//tr[position()>1]/td[{size_col}]') + seeding_seeders = html.xpath(f'//tr[position()>1]/td[{seeders_col}]//text()') + if seeding_sizes and seeding_seeders: + page_seeding = len(seeding_sizes) + + for i in range(0, len(seeding_sizes)): + size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip()) + seeders = StringUtils.str_int(seeding_seeders[i]) + + page_seeding_size += size + page_seeding_info.append([seeders, size]) + + self.seeding += page_seeding + self.seeding_size += page_seeding_size + self.seeding_info.extend(page_seeding_info) + + # 是否存在下页数据 + next_page = None + next_page_text = html.xpath('//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁")]/@href') + if next_page_text: + next_page = next_page_text[-1].strip() + + return next_page + + def _parse_user_traffic_info(self, html_text: str): + pass + + def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: + return None + + def _parse_message_content(self, html_text): + return None, None, None diff --git a/plugins.v2/sitestatistic/siteuserinfo/file_list.py b/plugins.v2/sitestatistic/siteuserinfo/file_list.py new file mode 100644 index 0000000..9bf6f31 --- /dev/null +++ b/plugins.v2/sitestatistic/siteuserinfo/file_list.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +import re +from typing import Optional + +from lxml import etree + +from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema +from app.utils.string import StringUtils + + +class FileListSiteUserInfo(ISiteUserInfo): + schema = SiteSchema.FileList + order = SITE_BASE_ORDER + 50 + + @classmethod + def match(cls, html_text: str) -> bool: + html = etree.HTML(html_text) + if not html: + return False + + printable_text = html.xpath("string(.)") if html else "" + return 'Powered by FileList' in printable_text + + def _parse_site_page(self, html_text: str): + html_text = self._prepare_html_text(html_text) + + user_detail = re.search(r"userdetails.php\?id=(\d+)", html_text) + if user_detail and user_detail.group().strip(): + self._user_detail_page = user_detail.group().strip().lstrip('/') + self.userid = user_detail.group(1) + + self._torrent_seeding_page = f"snatchlist.php?id={self.userid}&action=torrents&type=seeding" + + def _parse_user_base_info(self, html_text: str): + html_text = self._prepare_html_text(html_text) + html = etree.HTML(html_text) + + ret = html.xpath(f'//a[contains(@href, "userdetails") and contains(@href, "{self.userid}")]//text()') + if ret: + self.username = str(ret[0]) + + def _parse_user_traffic_info(self, html_text: str): + """ + 上传/下载/分享率 [做种数/魔力值] + :param html_text: + :return: + """ + return + + def _parse_user_detail_info(self, html_text: str): + html_text = self._prepare_html_text(html_text) + html = etree.HTML(html_text) + + upload_html = html.xpath('//table//tr/td[text()="Uploaded"]/following-sibling::td//text()') + if upload_html: + self.upload = StringUtils.num_filesize(upload_html[0]) + download_html = html.xpath('//table//tr/td[text()="Downloaded"]/following-sibling::td//text()') + if download_html: + self.download = StringUtils.num_filesize(download_html[0]) + + ratio_html = html.xpath('//table//tr/td[text()="Share ratio"]/following-sibling::td//text()') + if ratio_html: + share_ratio = StringUtils.str_float(ratio_html[0]) + self.ratio = 0 if self.download == 0 else share_ratio + + seed_html = html.xpath('//table//tr/td[text()="Seed bonus"]/following-sibling::td//text()') + if seed_html: + self.seeding = StringUtils.str_int(seed_html[1]) + self.seeding_size = StringUtils.num_filesize(seed_html[3]) + + user_level_html = html.xpath('//table//tr/td[text()="Class"]/following-sibling::td//text()') + if user_level_html: + self.user_level = user_level_html[0].strip() + + join_at_html = html.xpath('//table//tr/td[contains(text(), "Join")]/following-sibling::td//text()') + if join_at_html: + join_at = (join_at_html[0].split("("))[0].strip() + self.join_at = StringUtils.unify_datetime_str(join_at) + + bonus_html = html.xpath('//a[contains(@href, "shop.php")]') + if bonus_html: + self.bonus = StringUtils.str_float(bonus_html[0].xpath("string(.)").strip()) + pass + + def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: + """ + 做种相关信息 + :param html_text: + :param multi_page: 是否多页数据 + :return: 下页地址 + """ + html = etree.HTML(html_text) + if not html: + return None + + size_col = 6 + seeders_col = 7 + + page_seeding = 0 + page_seeding_size = 0 + page_seeding_info = [] + seeding_sizes = html.xpath(f'//table/tr[position()>1]/td[{size_col}]') + seeding_seeders = html.xpath(f'//table/tr[position()>1]/td[{seeders_col}]') + if seeding_sizes and seeding_seeders: + page_seeding = len(seeding_sizes) + + for i in range(0, len(seeding_sizes)): + size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip()) + seeders = StringUtils.str_int(seeding_seeders[i].xpath("string(.)").strip()) + + page_seeding_size += size + page_seeding_info.append([seeders, size]) + + # self.seeding += page_seeding + # self.seeding_size += page_seeding_size + self.seeding_info.extend(page_seeding_info) + + # 是否存在下页数据 + next_page = None + + return next_page + + def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: + return None + + def _parse_message_content(self, html_text): + return None, None, None diff --git a/plugins.v2/sitestatistic/siteuserinfo/gazelle.py b/plugins.v2/sitestatistic/siteuserinfo/gazelle.py new file mode 100644 index 0000000..ae2de5e --- /dev/null +++ b/plugins.v2/sitestatistic/siteuserinfo/gazelle.py @@ -0,0 +1,163 @@ +# -*- coding: utf-8 -*- +import re +from typing import Optional + +from lxml import etree + +from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema +from app.utils.string import StringUtils + + +class GazelleSiteUserInfo(ISiteUserInfo): + schema = SiteSchema.Gazelle + order = SITE_BASE_ORDER + + @classmethod + def match(cls, html_text: str) -> bool: + html = etree.HTML(html_text) + if not html: + return False + + printable_text = html.xpath("string(.)") if html else "" + + return "Powered by Gazelle" in printable_text or "DIC Music" in printable_text + + def _parse_user_base_info(self, html_text: str): + html_text = self._prepare_html_text(html_text) + html = etree.HTML(html_text) + + tmps = html.xpath('//a[contains(@href, "user.php?id=")]') + if tmps: + user_id_match = re.search(r"user.php\?id=(\d+)", tmps[0].attrib['href']) + if user_id_match and user_id_match.group().strip(): + self.userid = user_id_match.group(1) + self._torrent_seeding_page = f"torrents.php?type=seeding&userid={self.userid}" + self._user_detail_page = f"user.php?id={self.userid}" + self.username = tmps[0].text.strip() + + tmps = html.xpath('//*[@id="header-uploaded-value"]/@data-value') + if tmps: + self.upload = StringUtils.num_filesize(tmps[0]) + else: + tmps = html.xpath('//li[@id="stats_seeding"]/span/text()') + if tmps: + self.upload = StringUtils.num_filesize(tmps[0]) + + tmps = html.xpath('//*[@id="header-downloaded-value"]/@data-value') + if tmps: + self.download = StringUtils.num_filesize(tmps[0]) + else: + tmps = html.xpath('//li[@id="stats_leeching"]/span/text()') + if tmps: + self.download = StringUtils.num_filesize(tmps[0]) + + self.ratio = 0.0 if self.download <= 0.0 else round(self.upload / self.download, 3) + + tmps = html.xpath('//a[contains(@href, "bonus.php")]/@data-tooltip') + if tmps: + bonus_match = re.search(r"([\d,.]+)", tmps[0]) + if bonus_match and bonus_match.group(1).strip(): + self.bonus = StringUtils.str_float(bonus_match.group(1)) + else: + tmps = html.xpath('//a[contains(@href, "bonus.php")]') + if tmps: + bonus_text = tmps[0].xpath("string(.)") + bonus_match = re.search(r"([\d,.]+)", bonus_text) + if bonus_match and bonus_match.group(1).strip(): + self.bonus = StringUtils.str_float(bonus_match.group(1)) + + def _parse_site_page(self, html_text: str): + # TODO + pass + + def _parse_user_detail_info(self, html_text: str): + """ + 解析用户额外信息,加入时间,等级 + :param html_text: + :return: + """ + html = etree.HTML(html_text) + if not html: + return None + + # 用户等级 + user_levels_text = html.xpath('//*[@id="class-value"]/@data-value') + if user_levels_text: + self.user_level = user_levels_text[0].strip() + else: + user_levels_text = html.xpath('//li[contains(text(), "用户等级")]/text()') + if user_levels_text: + self.user_level = user_levels_text[0].split(':')[1].strip() + + # 加入日期 + join_at_text = html.xpath('//*[@id="join-date-value"]/@data-value') + if join_at_text: + self.join_at = StringUtils.unify_datetime_str(join_at_text[0].strip()) + else: + join_at_text = html.xpath( + '//div[contains(@class, "box_userinfo_stats")]//li[contains(text(), "加入时间")]/span/text()') + if join_at_text: + self.join_at = StringUtils.unify_datetime_str(join_at_text[0].strip()) + + def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: + """ + 做种相关信息 + :param html_text: + :param multi_page: 是否多页数据 + :return: 下页地址 + """ + html = etree.HTML(html_text) + if not html: + return None + + size_col = 3 + # 搜索size列 + if html.xpath('//table[contains(@id, "torrent")]//tr[1]/td'): + size_col = len(html.xpath('//table[contains(@id, "torrent")]//tr[1]/td')) - 3 + # 搜索seeders列 + seeders_col = size_col + 2 + + page_seeding = 0 + page_seeding_size = 0 + page_seeding_info = [] + seeding_sizes = html.xpath(f'//table[contains(@id, "torrent")]//tr[position()>1]/td[{size_col}]') + seeding_seeders = html.xpath(f'//table[contains(@id, "torrent")]//tr[position()>1]/td[{seeders_col}]/text()') + if seeding_sizes and seeding_seeders: + page_seeding = len(seeding_sizes) + + for i in range(0, len(seeding_sizes)): + size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip()) + seeders = int(seeding_seeders[i]) + + page_seeding_size += size + page_seeding_info.append([seeders, size]) + + if multi_page: + self.seeding += page_seeding + self.seeding_size += page_seeding_size + self.seeding_info.extend(page_seeding_info) + else: + if not self.seeding: + self.seeding = page_seeding + if not self.seeding_size: + self.seeding_size = page_seeding_size + if not self.seeding_info: + self.seeding_info = page_seeding_info + + # 是否存在下页数据 + next_page = None + next_page_text = html.xpath('//a[contains(.//text(), "Next") or contains(.//text(), "下一页")]/@href') + if next_page_text: + next_page = next_page_text[-1].strip() + + return next_page + + def _parse_user_traffic_info(self, html_text: str): + # TODO + pass + + def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: + return None + + def _parse_message_content(self, html_text): + return None, None, None diff --git a/plugins.v2/sitestatistic/siteuserinfo/ipt_project.py b/plugins.v2/sitestatistic/siteuserinfo/ipt_project.py new file mode 100644 index 0000000..9eeb217 --- /dev/null +++ b/plugins.v2/sitestatistic/siteuserinfo/ipt_project.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +import re +from typing import Optional + +from lxml import etree + +from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema +from app.utils.string import StringUtils + + +class IptSiteUserInfo(ISiteUserInfo): + schema = SiteSchema.Ipt + order = SITE_BASE_ORDER + 35 + + @classmethod + def match(cls, html_text: str) -> bool: + return 'IPTorrents' in html_text + + def _parse_user_base_info(self, html_text: str): + html_text = self._prepare_html_text(html_text) + html = etree.HTML(html_text) + tmps = html.xpath('//a[contains(@href, "/u/")]//text()') + tmps_id = html.xpath('//a[contains(@href, "/u/")]/@href') + if tmps: + self.username = str(tmps[-1]) + if tmps_id: + user_id_match = re.search(r"/u/(\d+)", tmps_id[0]) + if user_id_match and user_id_match.group().strip(): + self.userid = user_id_match.group(1) + self._user_detail_page = f"user.php?u={self.userid}" + self._torrent_seeding_page = f"peers?u={self.userid}" + + tmps = html.xpath('//div[@class = "stats"]/div/div') + if tmps: + self.upload = StringUtils.num_filesize(str(tmps[0].xpath('span/text()')[1]).strip()) + self.download = StringUtils.num_filesize(str(tmps[0].xpath('span/text()')[2]).strip()) + self.seeding = StringUtils.str_int(tmps[0].xpath('a')[2].xpath('text()')[0]) + self.leeching = StringUtils.str_int(tmps[0].xpath('a')[2].xpath('text()')[1]) + self.ratio = StringUtils.str_float(str(tmps[0].xpath('span/text()')[0]).strip().replace('-', '0')) + self.bonus = StringUtils.str_float(tmps[0].xpath('a')[3].xpath('text()')[0]) + + def _parse_site_page(self, html_text: str): + # TODO + pass + + def _parse_user_detail_info(self, html_text: str): + html = etree.HTML(html_text) + if not html: + return + + user_levels_text = html.xpath('//tr/th[text()="Class"]/following-sibling::td[1]/text()') + if user_levels_text: + self.user_level = user_levels_text[0].strip() + + # 加入日期 + join_at_text = html.xpath('//tr/th[text()="Join date"]/following-sibling::td[1]/text()') + if join_at_text: + self.join_at = StringUtils.unify_datetime_str(join_at_text[0].split(' (')[0]) + + def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: + html = etree.HTML(html_text) + if not html: + return + # seeding start + seeding_end_pos = 3 + if html.xpath('//tr/td[text() = "Leechers"]'): + seeding_end_pos = len(html.xpath('//tr/td[text() = "Leechers"]/../preceding-sibling::tr')) + 1 + seeding_end_pos = seeding_end_pos - 3 + + page_seeding = 0 + page_seeding_size = 0 + seeding_torrents = html.xpath('//tr/td[text() = "Seeders"]/../following-sibling::tr/td[position()=6]/text()') + if seeding_torrents: + page_seeding = seeding_end_pos + for per_size in seeding_torrents[:seeding_end_pos]: + if '(' in per_size and ')' in per_size: + per_size = per_size.split('(')[-1] + per_size = per_size.split(')')[0] + + page_seeding_size += StringUtils.num_filesize(per_size) + + self.seeding = page_seeding + self.seeding_size = page_seeding_size + + def _parse_user_traffic_info(self, html_text: str): + # TODO + pass + + def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: + return None + + def _parse_message_content(self, html_text): + return None, None, None diff --git a/plugins.v2/sitestatistic/siteuserinfo/mtorrent.py b/plugins.v2/sitestatistic/siteuserinfo/mtorrent.py new file mode 100644 index 0000000..8c999d5 --- /dev/null +++ b/plugins.v2/sitestatistic/siteuserinfo/mtorrent.py @@ -0,0 +1,200 @@ +# -*- coding: utf-8 -*- +import json +from typing import Optional, Tuple +from urllib.parse import urljoin + +from lxml import etree + +from app.log import logger +from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema +from app.utils.string import StringUtils + + +class MTorrentSiteUserInfo(ISiteUserInfo): + schema = SiteSchema.MTorrent + order = SITE_BASE_ORDER + 60 + request_mode = "apikey" + + # 用户级别字典 + MTeam_sysRoleList = { + "1": "User", + "2": "Power User", + "3": "Elite User", + "4": "Crazy User", + "5": "Insane User", + "6": "Veteran User", + "7": "Extreme User", + "8": "Ultimate User", + "9": "Nexus Master", + "10": "VIP", + "11": "Retiree", + "12": "Uploader", + "13": "Moderator", + "14": "Administrator", + "15": "Sysop", + "16": "Staff", + "17": "Offer memberStaff", + "18": "Bet memberStaff", + } + + @classmethod + def match(cls, html_text: str) -> bool: + html = etree.HTML(html_text) + if not html: + return False + if html.xpath("//title/text()") and "M-Team" in html.xpath("//title/text()")[0]: + return True + return False + + def _parse_site_page(self, html_text: str): + """ + 获取站点页面地址 + """ + # 更换api地址 + self._base_url = f"https://api.{StringUtils.get_url_domain(self._base_url)}" + self._user_traffic_page = None + self._user_detail_page = None + self._user_basic_page = "api/member/profile" + self._user_basic_params = { + "uid": self.userid + } + self._sys_mail_unread_page = None + self._user_mail_unread_page = "api/msg/search" + self._mail_unread_params = { + "keyword": "", + "box": "-2", + "type": "pageNumber", + "pageSize": 100 + } + self._torrent_seeding_page = "api/member/getUserTorrentList" + self._torrent_seeding_headers = { + "Content-Type": "application/json", + "Accept": "application/json, text/plain, */*" + } + self._addition_headers = { + "x-api-key": self.apikey, + } + + def _parse_logged_in(self, html_text): + """ + 判断是否登录成功, 通过判断是否存在用户信息 + 暂时跳过检测,待后续优化 + :param html_text: + :return: + """ + return True + + def _parse_user_base_info(self, html_text: str): + """ + 解析用户基本信息,这里把_parse_user_traffic_info和_parse_user_detail_info合并到这里 + """ + if not html_text: + return None + detail = json.loads(html_text) + if not detail or detail.get("code") != "0": + return + user_info = detail.get("data", {}) + self.userid = user_info.get("id") + self.username = user_info.get("username") + self.user_level = self.MTeam_sysRoleList.get(user_info.get("role") or "1") + self.join_at = user_info.get("memberStatus", {}).get("createdDate") + + self.upload = int(user_info.get("memberCount", {}).get("uploaded") or '0') + self.download = int(user_info.get("memberCount", {}).get("downloaded") or '0') + self.ratio = user_info.get("memberCount", {}).get("shareRate") or 0 + self.bonus = user_info.get("memberCount", {}).get("bonus") or 0 + # 需要解析消息,但不确定消息条数 + self.message_unread = 99999 + + self._torrent_seeding_params = { + "pageNumber": 1, + "pageSize": 200, + "type": "SEEDING", + "userid": self.userid + } + + def _parse_user_traffic_info(self, html_text: str): + """ + 解析用户流量信息 + """ + pass + + def _parse_user_detail_info(self, html_text: str): + """ + 解析用户详细信息 + """ + pass + + def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: + """ + 解析用户做种信息 + """ + if not html_text: + return None + seeding_info = json.loads(html_text) + if not seeding_info or seeding_info.get("code") != "0": + return None + torrents = seeding_info.get("data", {}).get("data", []) + page_seeding_size = 0 + page_seeding_info = [] + for info in torrents: + torrent = info.get("torrent", {}) + size = int(torrent.get("size") or '0') + seeders = int(torrent.get("source") or '0') + page_seeding_size += size + page_seeding_info.append([seeders, size]) + self.seeding += len(torrents) + self.seeding_size += page_seeding_size + self.seeding_info.extend(page_seeding_info) + + # 查询总做种数 + seeder_count = 0 + try: + result = self._get_page_content( + url=urljoin(self._base_url, "api/tracker/myPeerStatus"), + params={"uid": self.userid}, + ) + if result: + seeder_info = json.loads(result) + seeder_count = int(seeder_info.get("data", {}).get("seeder") or 0) + except Exception as e: + logger.error(f"获取做种数失败: {str(e)}") + if not seeder_count: + return None + if self.seeding >= seeder_count: + return None + # 还有下一页 + self._torrent_seeding_params["pageNumber"] += 1 + return "" + + def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: + """ + 解析未读消息链接,这里直接读出详情 + """ + if not html_text: + return None + messages_info = json.loads(html_text) + if not messages_info or messages_info.get("code") != "0": + return None + messages = messages_info.get("data", {}).get("data", []) + for message in messages: + if not message.get("unread"): + continue + head = message.get("title") + date = message.get("createdDate") + content = message.get("context") + if head and date and content: + self.message_unread_contents.append((head, date, content)) + # 设置已读 + self._get_page_content( + url=urljoin(self._base_url, f"api/msg/markRead"), + params={"msgId": message.get("id")} + ) + # 是否存在下页数据 + return None + + def _parse_message_content(self, html_text) -> Tuple[Optional[str], Optional[str], Optional[str]]: + """ + 解析消息内容 + """ + pass diff --git a/plugins.v2/sitestatistic/siteuserinfo/nexus_audiences.py b/plugins.v2/sitestatistic/siteuserinfo/nexus_audiences.py new file mode 100644 index 0000000..304dc26 --- /dev/null +++ b/plugins.v2/sitestatistic/siteuserinfo/nexus_audiences.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +from urllib.parse import urljoin + +from app.plugins.sitestatistic.siteuserinfo import SITE_BASE_ORDER, SiteSchema +from app.plugins.sitestatistic.siteuserinfo.nexus_php import NexusPhpSiteUserInfo + + +class NexusAudiencesSiteUserInfo(NexusPhpSiteUserInfo): + schema = SiteSchema.NexusAudiences + order = SITE_BASE_ORDER + 5 + + @classmethod + def match(cls, html_text: str) -> bool: + return 'audiences.me' in html_text + + def _parse_site_page(self, html_text: str): + super()._parse_site_page(html_text) + self._torrent_seeding_page = f"usertorrentlist.php?userid={self.userid}&type=seeding" + + def _parse_seeding_pages(self): + self._torrent_seeding_headers = {"Referer": urljoin(self._base_url, self._user_detail_page)} + super()._parse_seeding_pages() diff --git a/plugins.v2/sitestatistic/siteuserinfo/nexus_hhanclub.py b/plugins.v2/sitestatistic/siteuserinfo/nexus_hhanclub.py new file mode 100644 index 0000000..c85c96d --- /dev/null +++ b/plugins.v2/sitestatistic/siteuserinfo/nexus_hhanclub.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +import re + +from lxml import etree + +from app.plugins.sitestatistic.siteuserinfo import SITE_BASE_ORDER, SiteSchema +from app.plugins.sitestatistic.siteuserinfo.nexus_php import NexusPhpSiteUserInfo +from app.utils.string import StringUtils + + +class NexusHhanclubSiteUserInfo(NexusPhpSiteUserInfo): + schema = SiteSchema.NexusHhanclub + order = SITE_BASE_ORDER + 20 + + @classmethod + def match(cls, html_text: str) -> bool: + return 'hhanclub.top' in html_text + + def _parse_user_traffic_info(self, html_text): + super()._parse_user_traffic_info(html_text) + + html_text = self._prepare_html_text(html_text) + html = etree.HTML(html_text) + + # 上传、下载、分享率 + upload_match = re.search(r"[_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", + html.xpath('//*[@id="user-info-panel"]/div[2]/div[2]/div[4]/text()')[0]) + download_match = re.search(r"[_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", + html.xpath('//*[@id="user-info-panel"]/div[2]/div[2]/div[5]/text()')[0]) + ratio_match = re.search(r"分享率][::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+)", + html.xpath('//*[@id="user-info-panel"]/div[2]/div[1]/div[1]/div/text()')[0]) + + # 计算分享率 + self.upload = StringUtils.num_filesize(upload_match.group(1).strip()) if upload_match else 0 + self.download = StringUtils.num_filesize(download_match.group(1).strip()) if download_match else 0 + # 优先使用页面上的分享率 + calc_ratio = 0.0 if self.download <= 0.0 else round(self.upload / self.download, 3) + self.ratio = StringUtils.str_float(ratio_match.group(1)) if ( + ratio_match and ratio_match.group(1).strip()) else calc_ratio + + def _parse_user_detail_info(self, html_text: str): + """ + 解析用户额外信息,加入时间,等级 + :param html_text: + :return: + """ + super()._parse_user_detail_info(html_text) + + html = etree.HTML(html_text) + if not html: + return + # 加入时间 + join_at_text = html.xpath('//*[@id="mainContent"]/div/div[2]/div[4]/div[3]/span[2]/text()[1]') + if join_at_text: + self.join_at = StringUtils.unify_datetime_str(join_at_text[0].split(' (')[0].strip()) + + def _get_user_level(self, html): + super()._get_user_level(html) + user_level_path = html.xpath('//*[@id="mainContent"]/div/div[2]/div[2]/div[4]/span[2]/img/@title') + if user_level_path: + self.user_level = user_level_path[0] diff --git a/plugins.v2/sitestatistic/siteuserinfo/nexus_php.py b/plugins.v2/sitestatistic/siteuserinfo/nexus_php.py new file mode 100644 index 0000000..13b357b --- /dev/null +++ b/plugins.v2/sitestatistic/siteuserinfo/nexus_php.py @@ -0,0 +1,404 @@ +# -*- coding: utf-8 -*- +import re +from typing import Optional + +from lxml import etree + +from app.log import logger +from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema +from app.utils.string import StringUtils + + +class NexusPhpSiteUserInfo(ISiteUserInfo): + schema = SiteSchema.NexusPhp + order = SITE_BASE_ORDER * 2 + + @classmethod + def match(cls, html_text: str) -> bool: + """ + 默认使用NexusPhp解析 + :param html_text: + :return: + """ + return True + + def _parse_site_page(self, html_text: str): + html_text = self._prepare_html_text(html_text) + + user_detail = re.search(r"userdetails.php\?id=(\d+)", html_text) + if user_detail and user_detail.group().strip(): + self._user_detail_page = user_detail.group().strip().lstrip('/') + self.userid = user_detail.group(1) + self._torrent_seeding_page = f"getusertorrentlistajax.php?userid={self.userid}&type=seeding" + else: + user_detail = re.search(r"(userdetails)", html_text) + if user_detail and user_detail.group().strip(): + self._user_detail_page = user_detail.group().strip().lstrip('/') + self.userid = None + self._torrent_seeding_page = None + + def _parse_message_unread(self, html_text): + """ + 解析未读短消息数量 + :param html_text: + :return: + """ + html = etree.HTML(html_text) + if not html: + return + + message_labels = html.xpath('//a[@href="messages.php"]/..') + message_labels.extend(html.xpath('//a[contains(@href, "messages.php")]/..')) + if message_labels: + message_text = message_labels[0].xpath("string(.)") + + logger.debug(f"{self.site_name} 消息原始信息 {message_text}") + message_unread_match = re.findall(r"[^Date](信息箱\s*|\(|你有\xa0)(\d+)", message_text) + + if message_unread_match and len(message_unread_match[-1]) == 2: + self.message_unread = StringUtils.str_int(message_unread_match[-1][1]) + elif message_text.isdigit(): + self.message_unread = StringUtils.str_int(message_text) + + def _parse_user_base_info(self, html_text: str): + """ + 解析用户基本信息 + """ + # 合并解析,减少额外请求调用 + self._parse_user_traffic_info(html_text) + self._user_traffic_page = None + + self._parse_message_unread(html_text) + + html = etree.HTML(html_text) + if not html: + return + + ret = html.xpath(f'//a[contains(@href, "userdetails") and contains(@href, "{self.userid}")]//b//text()') + if ret: + self.username = str(ret[0]) + return + ret = html.xpath(f'//a[contains(@href, "userdetails") and contains(@href, "{self.userid}")]//text()') + if ret: + self.username = str(ret[0]) + + ret = html.xpath('//a[contains(@href, "userdetails")]//strong//text()') + if ret: + self.username = str(ret[0]) + return + + def _parse_user_traffic_info(self, html_text): + """ + 解析用户流量信息 + """ + html_text = self._prepare_html_text(html_text) + upload_match = re.search(r"[^总]上[传傳]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text, + re.IGNORECASE) + self.upload = StringUtils.num_filesize(upload_match.group(1).strip()) if upload_match else 0 + download_match = re.search(r"[^总子影力]下[载載]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text, + re.IGNORECASE) + self.download = StringUtils.num_filesize(download_match.group(1).strip()) if download_match else 0 + ratio_match = re.search(r"分享率[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+)", html_text) + # 计算分享率 + calc_ratio = 0.0 if self.download <= 0.0 else round(self.upload / self.download, 3) + # 优先使用页面上的分享率 + self.ratio = StringUtils.str_float(ratio_match.group(1)) if ( + ratio_match and ratio_match.group(1).strip()) else calc_ratio + leeching_match = re.search(r"(Torrents leeching|下载中)[\u4E00-\u9FA5\D\s]+(\d+)[\s\S]+<", html_text) + self.leeching = StringUtils.str_int(leeching_match.group(2)) if leeching_match and leeching_match.group( + 2).strip() else 0 + html = etree.HTML(html_text) + has_ucoin, self.bonus = self._parse_ucoin(html) + if has_ucoin: + return + tmps = html.xpath('//a[contains(@href,"mybonus")]/text()') if html else None + if tmps: + bonus_text = str(tmps[0]).strip() + bonus_match = re.search(r"([\d,.]+)", bonus_text) + if bonus_match and bonus_match.group(1).strip(): + self.bonus = StringUtils.str_float(bonus_match.group(1)) + return + bonus_match = re.search(r"mybonus.[\[\]::<>/a-zA-Z_\-=\"'\s#;.(使用&说明魔力值豆]+\s*([\d,.]+)[\[<()&\s]", html_text) + try: + if bonus_match and bonus_match.group(1).strip(): + self.bonus = StringUtils.str_float(bonus_match.group(1)) + return + bonus_match = re.search(r"[魔力值|\]][\[\]::<>/a-zA-Z_\-=\"'\s#;]+\s*([\d,.]+|\"[\d,.]+\")[<>()&\s]", + html_text, + flags=re.S) + if bonus_match and bonus_match.group(1).strip(): + self.bonus = StringUtils.str_float(bonus_match.group(1).strip('"')) + except Exception as err: + logger.error(f"{self.site_name} 解析魔力值出错, 错误信息: {str(err)}") + + @staticmethod + def _parse_ucoin(html): + """ + 解析ucoin, 统一转换为铜币 + :param html: + :return: + """ + if html: + gold, silver, copper = None, None, None + + golds = html.xpath('//span[@class = "ucoin-symbol ucoin-gold"]//text()') + if golds: + gold = StringUtils.str_float(str(golds[-1])) + silvers = html.xpath('//span[@class = "ucoin-symbol ucoin-silver"]//text()') + if silvers: + silver = StringUtils.str_float(str(silvers[-1])) + coppers = html.xpath('//span[@class = "ucoin-symbol ucoin-copper"]//text()') + if coppers: + copper = StringUtils.str_float(str(coppers[-1])) + if gold or silver or copper: + gold = gold if gold else 0 + silver = silver if silver else 0 + copper = copper if copper else 0 + return True, gold * 100 * 100 + silver * 100 + copper + return False, 0.0 + + def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: + """ + 做种相关信息 + :param html_text: + :param multi_page: 是否多页数据 + :return: 下页地址 + """ + html = etree.HTML(str(html_text).replace(r'\/', '/')) + if not html: + return None + + # 首页存在扩展链接,使用扩展链接 + seeding_url_text = html.xpath('//a[contains(@href,"torrents.php") ' + 'and contains(@href,"seeding")]/@href') + if multi_page is False and seeding_url_text and seeding_url_text[0].strip(): + self._torrent_seeding_page = seeding_url_text[0].strip() + return self._torrent_seeding_page + + size_col = 3 + seeders_col = 4 + # 搜索size列 + size_col_xpath = '//tr[position()=1]/' \ + 'td[(img[@class="size"] and img[@alt="size"])' \ + ' or (text() = "大小")' \ + ' or (a/img[@class="size" and @alt="size"])]' + if html.xpath(size_col_xpath): + size_col = len(html.xpath(f'{size_col_xpath}/preceding-sibling::td')) + 1 + # 搜索seeders列 + seeders_col_xpath = '//tr[position()=1]/' \ + 'td[(img[@class="seeders"] and img[@alt="seeders"])' \ + ' or (text() = "在做种")' \ + ' or (a/img[@class="seeders" and @alt="seeders"])]' + if html.xpath(seeders_col_xpath): + seeders_col = len(html.xpath(f'{seeders_col_xpath}/preceding-sibling::td')) + 1 + + page_seeding = 0 + page_seeding_size = 0 + page_seeding_info = [] + # 如果 table class="torrents",则增加table[@class="torrents"] + table_class = '//table[@class="torrents"]' if html.xpath('//table[@class="torrents"]') else '' + seeding_sizes = html.xpath(f'{table_class}//tr[position()>1]/td[{size_col}]') + seeding_seeders = html.xpath(f'{table_class}//tr[position()>1]/td[{seeders_col}]/b/a/text()') + if not seeding_seeders: + seeding_seeders = html.xpath(f'{table_class}//tr[position()>1]/td[{seeders_col}]//text()') + if seeding_sizes and seeding_seeders: + page_seeding = len(seeding_sizes) + + for i in range(0, len(seeding_sizes)): + size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip()) + seeders = StringUtils.str_int(seeding_seeders[i]) + + page_seeding_size += size + page_seeding_info.append([seeders, size]) + + self.seeding += page_seeding + self.seeding_size += page_seeding_size + self.seeding_info.extend(page_seeding_info) + + # 是否存在下页数据 + next_page = None + next_page_text = html.xpath('//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁") or contains(.//text(), ">")]/@href') + if next_page_text: + next_page = next_page_text[-1].strip() + # fix up page url + if self.userid not in next_page: + next_page = f'{next_page}&userid={self.userid}&type=seeding' + + return next_page + + def _parse_user_detail_info(self, html_text: str): + """ + 解析用户额外信息,加入时间,等级 + :param html_text: + :return: + """ + html = etree.HTML(html_text) + if not html: + return + + self._get_user_level(html) + + self._fixup_traffic_info(html) + + # 加入日期 + join_at_text = html.xpath( + '//tr/td[text()="加入日期" or text()="注册日期" or *[text()="加入日期"]]/following-sibling::td[1]//text()' + '|//div/b[text()="加入日期"]/../text()') + if join_at_text: + self.join_at = StringUtils.unify_datetime_str(join_at_text[0].split(' (')[0].strip()) + + # 做种体积 & 做种数 + # seeding 页面获取不到的话,此处再获取一次 + seeding_sizes = html.xpath('//tr/td[text()="当前上传"]/following-sibling::td[1]//' + 'table[tr[1][td[4 and text()="尺寸"]]]//tr[position()>1]/td[4]') + seeding_seeders = html.xpath('//tr/td[text()="当前上传"]/following-sibling::td[1]//' + 'table[tr[1][td[5 and text()="做种者"]]]//tr[position()>1]/td[5]//text()') + tmp_seeding = len(seeding_sizes) + tmp_seeding_size = 0 + tmp_seeding_info = [] + for i in range(0, len(seeding_sizes)): + size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip()) + seeders = StringUtils.str_int(seeding_seeders[i]) + + tmp_seeding_size += size + tmp_seeding_info.append([seeders, size]) + + if not self.seeding_size: + self.seeding_size = tmp_seeding_size + if not self.seeding: + self.seeding = tmp_seeding + if not self.seeding_info: + self.seeding_info = tmp_seeding_info + + seeding_sizes = html.xpath('//tr/td[text()="做种统计"]/following-sibling::td[1]//text()') + if seeding_sizes: + seeding_match = re.search(r"总做种数:\s+(\d+)", seeding_sizes[0], re.IGNORECASE) + seeding_size_match = re.search(r"总做种体积:\s+([\d,.\s]+[KMGTPI]*B)", seeding_sizes[0], re.IGNORECASE) + tmp_seeding = StringUtils.str_int(seeding_match.group(1)) if ( + seeding_match and seeding_match.group(1)) else 0 + tmp_seeding_size = StringUtils.num_filesize( + seeding_size_match.group(1).strip()) if seeding_size_match else 0 + if not self.seeding_size: + self.seeding_size = tmp_seeding_size + if not self.seeding: + self.seeding = tmp_seeding + + self._fixup_torrent_seeding_page(html) + + def _fixup_torrent_seeding_page(self, html): + """ + 修正种子页面链接 + :param html: + :return: + """ + # 单独的种子页面 + seeding_url_text = html.xpath('//a[contains(@href,"getusertorrentlist.php") ' + 'and contains(@href,"seeding")]/@href') + if seeding_url_text: + self._torrent_seeding_page = seeding_url_text[0].strip() + # 从JS调用种获取用户ID + seeding_url_text = html.xpath('//a[contains(@href, "javascript: getusertorrentlistajax") ' + 'and contains(@href,"seeding")]/@href') + csrf_text = html.xpath('//meta[@name="x-csrf"]/@content') + if not self._torrent_seeding_page and seeding_url_text: + user_js = re.search(r"javascript: getusertorrentlistajax\(\s*'(\d+)", seeding_url_text[0]) + if user_js and user_js.group(1).strip(): + self.userid = user_js.group(1).strip() + self._torrent_seeding_page = f"getusertorrentlistajax.php?userid={self.userid}&type=seeding" + elif seeding_url_text and csrf_text: + if csrf_text[0].strip(): + self._torrent_seeding_page \ + = f"ajax_getusertorrentlist.php" + self._torrent_seeding_params = {'userid': self.userid, 'type': 'seeding', 'csrf': csrf_text[0].strip()} + + # 分类做种模式 + # 临时屏蔽 + # seeding_url_text = html.xpath('//tr/td[text()="当前做种"]/following-sibling::td[1]' + # '/table//td/a[contains(@href,"seeding")]/@href') + # if seeding_url_text: + # self._torrent_seeding_page = seeding_url_text + + def _get_user_level(self, html): + # 等级 获取同一行等级数据,图片格式等级,取title信息,否则取文本信息 + user_levels_text = html.xpath('//tr/td[text()="等級" or text()="等级" or *[text()="等级"]]/' + 'following-sibling::td[1]/img[1]/@title') + if user_levels_text: + self.user_level = user_levels_text[0].strip() + return + + user_levels_text = html.xpath('//tr/td[text()="等級" or text()="等级"]/' + 'following-sibling::td[1 and not(img)]' + '|//tr/td[text()="等級" or text()="等级"]/' + 'following-sibling::td[1 and img[not(@title)]]') + if user_levels_text: + self.user_level = user_levels_text[0].xpath("string(.)").strip() + return + + user_levels_text = html.xpath('//tr/td[text()="等級" or text()="等级"]/' + 'following-sibling::td[1]') + if user_levels_text: + self.user_level = user_levels_text[0].xpath("string(.)").strip() + return + + # 适配PTT用户等级 + user_levels_text = html.xpath('//tr/td[text()="用户等级"]/following-sibling::td[1]/b/@title') + if user_levels_text: + self.user_level = user_levels_text[0].strip() + return + + user_levels_text = html.xpath('//a[contains(@href, "userdetails")]/text()') + if not self.user_level and user_levels_text: + for user_level_text in user_levels_text: + user_level_match = re.search(r"\[(.*)]", user_level_text) + if user_level_match and user_level_match.group(1).strip(): + self.user_level = user_level_match.group(1).strip() + break + + def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: + html = etree.HTML(html_text) + if not html: + return None + + message_links = html.xpath('//tr[not(./td/img[@alt="Read"])]/td/a[contains(@href, "viewmessage")]/@href') + msg_links.extend(message_links) + # 是否存在下页数据 + next_page = None + next_page_text = html.xpath('//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁")]/@href') + if next_page_text: + next_page = next_page_text[-1].strip() + + return next_page + + def _parse_message_content(self, html_text): + html = etree.HTML(html_text) + if not html: + return None, None, None + # 标题 + message_head_text = None + message_head = html.xpath('//h1/text()' + '|//div[@class="layui-card-header"]/span[1]/text()') + if message_head: + message_head_text = message_head[-1].strip() + + # 消息时间 + message_date_text = None + message_date = html.xpath('//h1/following-sibling::table[.//tr/td[@class="colhead"]]//tr[2]/td[2]' + '|//div[@class="layui-card-header"]/span[2]/span[2]') + if message_date: + message_date_text = message_date[0].xpath("string(.)").strip() + + # 消息内容 + message_content_text = None + message_content = html.xpath('//h1/following-sibling::table[.//tr/td[@class="colhead"]]//tr[3]/td' + '|//div[contains(@class,"layui-card-body")]') + if message_content: + message_content_text = message_content[0].xpath("string(.)").strip() + + return message_head_text, message_date_text, message_content_text + + def _fixup_traffic_info(self, html): + # fixup bonus + if not self.bonus: + bonus_text = html.xpath('//tr/td[text()="魔力值" or text()="猫粮"]/following-sibling::td[1]/text()') + if bonus_text: + self.bonus = StringUtils.str_float(bonus_text[0].strip()) diff --git a/plugins.v2/sitestatistic/siteuserinfo/nexus_project.py b/plugins.v2/sitestatistic/siteuserinfo/nexus_project.py new file mode 100644 index 0000000..d64c59d --- /dev/null +++ b/plugins.v2/sitestatistic/siteuserinfo/nexus_project.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +import re + +from app.plugins.sitestatistic.siteuserinfo import SITE_BASE_ORDER, SiteSchema +from app.plugins.sitestatistic.siteuserinfo.nexus_php import NexusPhpSiteUserInfo + + +class NexusProjectSiteUserInfo(NexusPhpSiteUserInfo): + schema = SiteSchema.NexusProject + order = SITE_BASE_ORDER + 25 + + @classmethod + def match(cls, html_text: str) -> bool: + return 'Nexus Project' in html_text + + def _parse_site_page(self, html_text: str): + html_text = self._prepare_html_text(html_text) + + user_detail = re.search(r"userdetails.php\?id=(\d+)", html_text) + if user_detail and user_detail.group().strip(): + self._user_detail_page = user_detail.group().strip().lstrip('/') + self.userid = user_detail.group(1) + + self._torrent_seeding_page = f"viewusertorrents.php?id={self.userid}&show=seeding" diff --git a/plugins.v2/sitestatistic/siteuserinfo/nexus_rabbit.py b/plugins.v2/sitestatistic/siteuserinfo/nexus_rabbit.py new file mode 100644 index 0000000..08c4c52 --- /dev/null +++ b/plugins.v2/sitestatistic/siteuserinfo/nexus_rabbit.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +import json +from typing import Optional + +from lxml import etree + +from app.log import logger +from app.plugins.sitestatistic.siteuserinfo import SITE_BASE_ORDER, SiteSchema +from app.plugins.sitestatistic.siteuserinfo.nexus_php import NexusPhpSiteUserInfo + + +class NexusRabbitSiteUserInfo(NexusPhpSiteUserInfo): + schema = SiteSchema.NexusRabbit + order = SITE_BASE_ORDER + 5 + + @classmethod + def match(cls, html_text: str) -> bool: + html = etree.HTML(html_text) + if not html: + return False + + printable_text = html.xpath("string(.)") if html else "" + return 'Style by Rabbit' in printable_text + + def _parse_site_page(self, html_text: str): + super()._parse_site_page(html_text) + self._torrent_seeding_page = f"getusertorrentlistajax.php?page=1&limit=5000000&type=seeding&uid={self.userid}" + self._torrent_seeding_headers = {"Accept": "application/json, text/javascript, */*; q=0.01"} + + def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: + """ + 做种相关信息 + :param html_text: + :param multi_page: 是否多页数据 + :return: 下页地址 + """ + + try: + torrents = json.loads(html_text).get('data') + except Exception as e: + logger.error(f"解析做种信息失败: {str(e)}") + return + + page_seeding_size = 0 + page_seeding_info = [] + + page_seeding = len(torrents) + for torrent in torrents: + seeders = int(torrent.get('seeders', 0)) + size = int(torrent.get('size', 0)) + page_seeding_size += int(torrent.get('size', 0)) + + page_seeding_info.append([seeders, size]) + + self.seeding += page_seeding + self.seeding_size += page_seeding_size + self.seeding_info.extend(page_seeding_info) diff --git a/plugins.v2/sitestatistic/siteuserinfo/small_horse.py b/plugins.v2/sitestatistic/siteuserinfo/small_horse.py new file mode 100644 index 0000000..d704a28 --- /dev/null +++ b/plugins.v2/sitestatistic/siteuserinfo/small_horse.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +import re +from typing import Optional + +from lxml import etree + +from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema +from app.utils.string import StringUtils + + +class SmallHorseSiteUserInfo(ISiteUserInfo): + schema = SiteSchema.SmallHorse + order = SITE_BASE_ORDER + 30 + + @classmethod + def match(cls, html_text: str) -> bool: + return 'Small Horse' in html_text + + def _parse_site_page(self, html_text: str): + html_text = self._prepare_html_text(html_text) + + user_detail = re.search(r"user.php\?id=(\d+)", html_text) + if user_detail and user_detail.group().strip(): + self._user_detail_page = user_detail.group().strip().lstrip('/') + self.userid = user_detail.group(1) + self._torrent_seeding_page = f"torrents.php?type=seeding&userid={self.userid}" + self._user_traffic_page = f"user.php?id={self.userid}" + + def _parse_user_base_info(self, html_text: str): + html_text = self._prepare_html_text(html_text) + html = etree.HTML(html_text) + ret = html.xpath('//a[contains(@href, "user.php")]//text()') + if ret: + self.username = str(ret[0]) + + def _parse_user_traffic_info(self, html_text: str): + """ + 上传/下载/分享率 [做种数/魔力值] + :param html_text: + :return: + """ + html_text = self._prepare_html_text(html_text) + html = etree.HTML(html_text) + tmps = html.xpath('//ul[@class = "stats nobullet"]') + if tmps: + if tmps[1].xpath("li") and tmps[1].xpath("li")[0].xpath("span//text()"): + self.join_at = StringUtils.unify_datetime_str(tmps[1].xpath("li")[0].xpath("span//text()")[0]) + self.upload = StringUtils.num_filesize(str(tmps[1].xpath("li")[2].xpath("text()")[0]).split(":")[1].strip()) + self.download = StringUtils.num_filesize( + str(tmps[1].xpath("li")[3].xpath("text()")[0]).split(":")[1].strip()) + if tmps[1].xpath("li")[4].xpath("span//text()"): + self.ratio = StringUtils.str_float(str(tmps[1].xpath("li")[4].xpath("span//text()")[0]).replace('∞', '0')) + else: + self.ratio = StringUtils.str_float(str(tmps[1].xpath("li")[5].xpath("text()")[0]).split(":")[1]) + self.bonus = StringUtils.str_float(str(tmps[1].xpath("li")[5].xpath("text()")[0]).split(":")[1]) + self.user_level = str(tmps[3].xpath("li")[0].xpath("text()")[0]).split(":")[1].strip() + self.leeching = StringUtils.str_int( + (tmps[4].xpath("li")[6].xpath("text()")[0]).split(":")[1].replace("[", "")) + + def _parse_user_detail_info(self, html_text: str): + pass + + def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: + """ + 做种相关信息 + :param html_text: + :param multi_page: 是否多页数据 + :return: 下页地址 + """ + html = etree.HTML(html_text) + if not html: + return None + + size_col = 6 + seeders_col = 8 + + page_seeding = 0 + page_seeding_size = 0 + page_seeding_info = [] + seeding_sizes = html.xpath(f'//table[@id="torrent_table"]//tr[position()>1]/td[{size_col}]') + seeding_seeders = html.xpath(f'//table[@id="torrent_table"]//tr[position()>1]/td[{seeders_col}]') + if seeding_sizes and seeding_seeders: + page_seeding = len(seeding_sizes) + + for i in range(0, len(seeding_sizes)): + size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip()) + seeders = StringUtils.str_int(seeding_seeders[i].xpath("string(.)").strip()) + + page_seeding_size += size + page_seeding_info.append([seeders, size]) + + self.seeding += page_seeding + self.seeding_size += page_seeding_size + self.seeding_info.extend(page_seeding_info) + + # 是否存在下页数据 + next_page = None + next_pages = html.xpath('//ul[@class="pagination"]/li[contains(@class,"active")]/following-sibling::li') + if next_pages and len(next_pages) > 1: + page_num = next_pages[0].xpath("string(.)").strip() + if page_num.isdigit(): + next_page = f"{self._torrent_seeding_page}&page={page_num}" + + return next_page + + def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: + return None + + def _parse_message_content(self, html_text): + return None, None, None diff --git a/plugins.v2/sitestatistic/siteuserinfo/tnode.py b/plugins.v2/sitestatistic/siteuserinfo/tnode.py new file mode 100644 index 0000000..8f7ce7f --- /dev/null +++ b/plugins.v2/sitestatistic/siteuserinfo/tnode.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +import json +import re +from typing import Optional + +from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema +from app.utils.string import StringUtils + + +class TNodeSiteUserInfo(ISiteUserInfo): + schema = SiteSchema.TNode + order = SITE_BASE_ORDER + 60 + + @classmethod + def match(cls, html_text: str) -> bool: + return 'Powered By TNode' in html_text + + def _parse_site_page(self, html_text: str): + html_text = self._prepare_html_text(html_text) + + # + csrf_token = re.search(r'', html_text) + if csrf_token: + self._addition_headers = {'X-CSRF-TOKEN': csrf_token.group(1)} + self._user_detail_page = "api/user/getMainInfo" + self._torrent_seeding_page = "api/user/listTorrentActivity?id=&type=seeding&page=1&size=20000" + + def _parse_logged_in(self, html_text): + """ + 判断是否登录成功, 通过判断是否存在用户信息 + 暂时跳过检测,待后续优化 + :param html_text: + :return: + """ + return True + + def _parse_user_base_info(self, html_text: str): + self.username = self.userid + + def _parse_user_traffic_info(self, html_text: str): + pass + + def _parse_user_detail_info(self, html_text: str): + detail = json.loads(html_text) + if detail.get("status") != 200: + return + + user_info = detail.get("data", {}) + self.userid = user_info.get("id") + self.username = user_info.get("username") + self.user_level = user_info.get("class", {}).get("name") + self.join_at = user_info.get("regTime", 0) + self.join_at = StringUtils.unify_datetime_str(str(self.join_at)) + + self.upload = user_info.get("upload") + self.download = user_info.get("download") + self.ratio = 0 if self.download <= 0 else round(self.upload / self.download, 3) + self.bonus = user_info.get("bonus") + + self.message_unread = user_info.get("unreadAdmin", 0) + user_info.get("unreadInbox", 0) + user_info.get( + "unreadSystem", 0) + pass + + def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: + """ + 解析用户做种信息 + """ + seeding_info = json.loads(html_text) + if seeding_info.get("status") != 200: + return + + torrents = seeding_info.get("data", {}).get("torrents", []) + + page_seeding_size = 0 + page_seeding_info = [] + for torrent in torrents: + size = torrent.get("size", 0) + seeders = torrent.get("seeding", 0) + + page_seeding_size += size + page_seeding_info.append([seeders, size]) + + self.seeding += len(torrents) + self.seeding_size += page_seeding_size + self.seeding_info.extend(page_seeding_info) + + # 是否存在下页数据 + next_page = None + + return next_page + + def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: + return None + + def _parse_message_content(self, html_text): + """ + 系统信息 api/message/listSystem?page=1&size=20 + 收件箱信息 api/message/listInbox?page=1&size=20 + 管理员信息 api/message/listAdmin?page=1&size=20 + :param html_text: + :return: + """ + return None, None, None diff --git a/plugins.v2/sitestatistic/siteuserinfo/torrent_leech.py b/plugins.v2/sitestatistic/siteuserinfo/torrent_leech.py new file mode 100644 index 0000000..96f973a --- /dev/null +++ b/plugins.v2/sitestatistic/siteuserinfo/torrent_leech.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +import re +from typing import Optional + +from lxml import etree + +from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema +from app.utils.string import StringUtils + + +class TorrentLeechSiteUserInfo(ISiteUserInfo): + schema = SiteSchema.TorrentLeech + order = SITE_BASE_ORDER + 40 + + @classmethod + def match(cls, html_text: str) -> bool: + return 'TorrentLeech' in html_text + + def _parse_site_page(self, html_text: str): + html_text = self._prepare_html_text(html_text) + + user_detail = re.search(r"/profile/([^/]+)/", html_text) + if user_detail and user_detail.group().strip(): + self._user_detail_page = user_detail.group().strip().lstrip('/') + self.userid = user_detail.group(1) + self._user_traffic_page = f"profile/{self.userid}/view" + self._torrent_seeding_page = f"profile/{self.userid}/seeding" + + def _parse_user_base_info(self, html_text: str): + self.username = self.userid + + def _parse_user_traffic_info(self, html_text: str): + """ + 上传/下载/分享率 [做种数/魔力值] + :param html_text: + :return: + """ + html_text = self._prepare_html_text(html_text) + html = etree.HTML(html_text) + upload_html = html.xpath('//div[contains(@class,"profile-uploaded")]//span/text()') + if upload_html: + self.upload = StringUtils.num_filesize(upload_html[0]) + download_html = html.xpath('//div[contains(@class,"profile-downloaded")]//span/text()') + if download_html: + self.download = StringUtils.num_filesize(download_html[0]) + ratio_html = html.xpath('//div[contains(@class,"profile-ratio")]//span/text()') + if ratio_html: + self.ratio = StringUtils.str_float(ratio_html[0].replace('∞', '0')) + + user_level_html = html.xpath('//table[contains(@class, "profileViewTable")]' + '//tr/td[text()="Class"]/following-sibling::td/text()') + if user_level_html: + self.user_level = user_level_html[0].strip() + + join_at_html = html.xpath('//table[contains(@class, "profileViewTable")]' + '//tr/td[text()="Registration date"]/following-sibling::td/text()') + if join_at_html: + self.join_at = StringUtils.unify_datetime_str(join_at_html[0].strip()) + + bonus_html = html.xpath('//span[contains(@class, "total-TL-points")]/text()') + if bonus_html: + self.bonus = StringUtils.str_float(bonus_html[0].strip()) + + def _parse_user_detail_info(self, html_text: str): + pass + + def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: + """ + 做种相关信息 + :param html_text: + :param multi_page: 是否多页数据 + :return: 下页地址 + """ + html = etree.HTML(html_text) + if not html: + return None + + size_col = 2 + seeders_col = 7 + + page_seeding = 0 + page_seeding_size = 0 + page_seeding_info = [] + seeding_sizes = html.xpath(f'//tbody/tr/td[{size_col}]') + seeding_seeders = html.xpath(f'//tbody/tr/td[{seeders_col}]/text()') + if seeding_sizes and seeding_seeders: + page_seeding = len(seeding_sizes) + + for i in range(0, len(seeding_sizes)): + size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip()) + seeders = StringUtils.str_int(seeding_seeders[i]) + + page_seeding_size += size + page_seeding_info.append([seeders, size]) + + self.seeding += page_seeding + self.seeding_size += page_seeding_size + self.seeding_info.extend(page_seeding_info) + + # 是否存在下页数据 + next_page = None + + return next_page + + def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: + return None + + def _parse_message_content(self, html_text): + return None, None, None diff --git a/plugins.v2/sitestatistic/siteuserinfo/unit3d.py b/plugins.v2/sitestatistic/siteuserinfo/unit3d.py new file mode 100644 index 0000000..a40483e --- /dev/null +++ b/plugins.v2/sitestatistic/siteuserinfo/unit3d.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- +import re +from typing import Optional + +from lxml import etree + +from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema +from app.utils.string import StringUtils + + +class Unit3dSiteUserInfo(ISiteUserInfo): + schema = SiteSchema.Unit3d + order = SITE_BASE_ORDER + 15 + + @classmethod + def match(cls, html_text: str) -> bool: + return "unit3d.js" in html_text + + def _parse_user_base_info(self, html_text: str): + html_text = self._prepare_html_text(html_text) + html = etree.HTML(html_text) + + tmps = html.xpath('//a[contains(@href, "/users/") and contains(@href, "settings")]/@href') + if tmps: + user_name_match = re.search(r"/users/(.+)/settings", tmps[0]) + if user_name_match and user_name_match.group().strip(): + self.username = user_name_match.group(1) + self._torrent_seeding_page = f"/users/{self.username}/active?perPage=100&client=&seeding=include" + self._user_detail_page = f"/users/{self.username}" + + tmps = html.xpath('//a[contains(@href, "bonus/earnings")]') + if tmps: + bonus_text = tmps[0].xpath("string(.)") + bonus_match = re.search(r"([\d,.]+)", bonus_text) + if bonus_match and bonus_match.group(1).strip(): + self.bonus = StringUtils.str_float(bonus_match.group(1)) + + def _parse_site_page(self, html_text: str): + # TODO + pass + + def _parse_user_detail_info(self, html_text: str): + """ + 解析用户额外信息,加入时间,等级 + :param html_text: + :return: + """ + html = etree.HTML(html_text) + if not html: + return None + + # 用户等级 + user_levels_text = html.xpath('//div[contains(@class, "content")]//span[contains(@class, "badge-user")]/text()') + if user_levels_text: + self.user_level = user_levels_text[0].strip() + + # 加入日期 + join_at_text = html.xpath('//div[contains(@class, "content")]//h4[contains(text(), "注册日期") ' + 'or contains(text(), "註冊日期") ' + 'or contains(text(), "Registration date")]/text()') + if join_at_text: + self.join_at = StringUtils.unify_datetime_str( + join_at_text[0].replace('注册日期', '').replace('註冊日期', '').replace('Registration date', '')) + + def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: + """ + 做种相关信息 + :param html_text: + :param multi_page: 是否多页数据 + :return: 下页地址 + """ + html = etree.HTML(html_text) + if not html: + return None + + size_col = 9 + seeders_col = 2 + # 搜索size列 + if html.xpath('//thead//th[contains(@class,"size")]'): + size_col = len(html.xpath('//thead//th[contains(@class,"size")][1]/preceding-sibling::th')) + 1 + # 搜索seeders列 + if html.xpath('//thead//th[contains(@class,"seeders")]'): + seeders_col = len(html.xpath('//thead//th[contains(@class,"seeders")]/preceding-sibling::th')) + 1 + + page_seeding = 0 + page_seeding_size = 0 + page_seeding_info = [] + seeding_sizes = html.xpath(f'//tr[position()]/td[{size_col}]') + seeding_seeders = html.xpath(f'//tr[position()]/td[{seeders_col}]') + if seeding_sizes and seeding_seeders: + page_seeding = len(seeding_sizes) + + for i in range(0, len(seeding_sizes)): + size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip()) + seeders = StringUtils.str_int(seeding_seeders[i].xpath("string(.)").strip()) + + page_seeding_size += size + page_seeding_info.append([seeders, size]) + + self.seeding += page_seeding + self.seeding_size += page_seeding_size + self.seeding_info.extend(page_seeding_info) + + # 是否存在下页数据 + next_page = None + next_pages = html.xpath('//ul[@class="pagination"]/li[contains(@class,"active")]/following-sibling::li') + if next_pages and len(next_pages) > 1: + page_num = next_pages[0].xpath("string(.)").strip() + if page_num.isdigit(): + next_page = f"{self._torrent_seeding_page}&page={page_num}" + + return next_page + + def _parse_user_traffic_info(self, html_text: str): + html_text = self._prepare_html_text(html_text) + upload_match = re.search(r"[^总]上[传傳]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text, + re.IGNORECASE) + self.upload = StringUtils.num_filesize(upload_match.group(1).strip()) if upload_match else 0 + download_match = re.search(r"[^总子影力]下[载載]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text, + re.IGNORECASE) + self.download = StringUtils.num_filesize(download_match.group(1).strip()) if download_match else 0 + ratio_match = re.search(r"分享率[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+)", html_text) + self.ratio = StringUtils.str_float(ratio_match.group(1)) if ( + ratio_match and ratio_match.group(1).strip()) else 0.0 + + def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: + return None + + def _parse_message_content(self, html_text): + return None, None, None diff --git a/plugins.v2/sitestatistic/siteuserinfo/yema.py b/plugins.v2/sitestatistic/siteuserinfo/yema.py new file mode 100644 index 0000000..44a23d7 --- /dev/null +++ b/plugins.v2/sitestatistic/siteuserinfo/yema.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- +import json +from typing import Optional, Tuple +from urllib.parse import urljoin + +from app.log import logger +from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema +from app.utils.string import StringUtils + + +class TYemaSiteUserInfo(ISiteUserInfo): + schema = SiteSchema.Yema + order = SITE_BASE_ORDER + 60 + + @classmethod + def match(cls, html_text: str) -> bool: + return '