mirror of
https://github.com/d0zingcat/MoviePilot-Plugins.git
synced 2026-05-13 23:16:47 +00:00
fix SiteStatistic
This commit is contained in:
@@ -3,12 +3,12 @@
|
||||
"name": "站点数据统计",
|
||||
"description": "站点统计数据图表。",
|
||||
"labels": "站点,仪表板",
|
||||
"version": "1.0.0",
|
||||
"version": "1.0.1",
|
||||
"icon": "statistic.png",
|
||||
"author": "lightolly,jxxghp",
|
||||
"level": 2,
|
||||
"history": {
|
||||
"v1.0.0": "MoviePilot V2 版本站点数据统计插件"
|
||||
"v1.0.1": "MoviePilot V2 版本站点数据统计插件"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,33 +1,19 @@
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
import warnings
|
||||
from datetime import datetime, timedelta
|
||||
from multiprocessing.dummy import Pool as ThreadPool
|
||||
from threading import Lock
|
||||
from typing import Optional, Any, List, Dict, Tuple
|
||||
|
||||
import pytz
|
||||
import requests
|
||||
from apscheduler.schedulers.background import BackgroundScheduler
|
||||
from apscheduler.triggers.cron import CronTrigger
|
||||
from ruamel.yaml import CommentedMap
|
||||
|
||||
from app import schemas
|
||||
from app.chain.site import SiteChain
|
||||
from app.core.config import settings
|
||||
from app.core.event import Event, eventmanager
|
||||
from app.db.models import PluginData
|
||||
from app.db.models.siteuserdata import SiteUserData
|
||||
from app.db.site_oper import SiteOper
|
||||
from app.helper.browser import PlaywrightHelper
|
||||
from app.helper.module import ModuleHelper
|
||||
from app.helper.sites import SitesHelper
|
||||
from app.log import logger
|
||||
from app.plugins import _PluginBase
|
||||
from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo
|
||||
from app.schemas.types import EventType, NotificationType
|
||||
from app.utils.http import RequestUtils
|
||||
from app.utils.object import ObjectUtils
|
||||
from app.schemas.types import EventType
|
||||
from app.utils.string import StringUtils
|
||||
from app.utils.timer import TimerUtils
|
||||
|
||||
warnings.filterwarnings("ignore", category=FutureWarning)
|
||||
|
||||
@@ -42,7 +28,7 @@ class SiteStatistic(_PluginBase):
|
||||
# 插件图标
|
||||
plugin_icon = "statistic.png"
|
||||
# 插件版本
|
||||
plugin_version = "1.0.0"
|
||||
plugin_version = "1.0.1"
|
||||
# 插件作者
|
||||
plugin_author = "lightolly,jxxghp"
|
||||
# 作者主页
|
||||
@@ -54,29 +40,17 @@ class SiteStatistic(_PluginBase):
|
||||
# 可使用的用户级别
|
||||
auth_level = 2
|
||||
|
||||
# 私有属性
|
||||
sites = None
|
||||
siteoper = None
|
||||
_scheduler: Optional[BackgroundScheduler] = None
|
||||
_last_update_time: Optional[datetime] = None
|
||||
_sites_data: dict = {}
|
||||
_site_schema: List[ISiteUserInfo] = None
|
||||
|
||||
# 配置属性
|
||||
siteoper = None
|
||||
siteshelper = None
|
||||
_enabled: bool = False
|
||||
_onlyonce: bool = False
|
||||
_sitemsg: bool = True
|
||||
_cron: str = ""
|
||||
_notify: bool = False
|
||||
_queue_cnt: int = 5
|
||||
_remove_failed: bool = False
|
||||
_statistic_type: str = None
|
||||
_statistic_sites: list = []
|
||||
_dashboard_type: str = "today"
|
||||
|
||||
def init_plugin(self, config: dict = None):
|
||||
self.sites = SitesHelper()
|
||||
self.siteoper = SiteOper()
|
||||
self.siteshelper = SitesHelper()
|
||||
|
||||
# 停止现有任务
|
||||
self.stop_service()
|
||||
|
||||
@@ -84,51 +58,12 @@ class SiteStatistic(_PluginBase):
|
||||
if config:
|
||||
self._enabled = config.get("enabled")
|
||||
self._onlyonce = config.get("onlyonce")
|
||||
self._cron = config.get("cron")
|
||||
self._notify = config.get("notify")
|
||||
self._sitemsg = config.get("sitemsg")
|
||||
self._queue_cnt = config.get("queue_cnt")
|
||||
self._remove_failed = config.get("remove_failed")
|
||||
self._statistic_type = config.get("statistic_type") or "all"
|
||||
self._statistic_sites = config.get("statistic_sites") or []
|
||||
self._dashboard_type = config.get("dashboard_type") or "today"
|
||||
|
||||
# 过滤掉已删除的站点
|
||||
all_sites = [site.id for site in self.siteoper.list_order_by_pri()] + [site.get("id") for site in
|
||||
self.__custom_sites()]
|
||||
self._statistic_sites = [site_id for site_id in all_sites if site_id in self._statistic_sites]
|
||||
self.__update_config()
|
||||
|
||||
if self._enabled or self._onlyonce:
|
||||
# 加载模块
|
||||
self._site_schema = ModuleHelper.load('app.plugins.sitestatistic.siteuserinfo',
|
||||
filter_func=lambda _, obj: hasattr(obj, 'schema'))
|
||||
|
||||
self._site_schema.sort(key=lambda x: x.order)
|
||||
# 站点上一次更新时间
|
||||
self._last_update_time = None
|
||||
# 站点数据
|
||||
self._sites_data = {}
|
||||
|
||||
# 立即运行一次
|
||||
if self._onlyonce:
|
||||
# 定时服务
|
||||
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
|
||||
logger.info(f"站点数据统计服务启动,立即运行一次")
|
||||
self._scheduler.add_job(self.refresh_all_site_data, 'date',
|
||||
run_date=datetime.now(
|
||||
tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3)
|
||||
)
|
||||
# 关闭一次性开关
|
||||
self._onlyonce = False
|
||||
|
||||
# 保存配置
|
||||
self.__update_config()
|
||||
|
||||
# 启动任务
|
||||
if self._scheduler.get_jobs():
|
||||
self._scheduler.print_jobs()
|
||||
self._scheduler.start()
|
||||
SiteChain().refresh_userdata()
|
||||
|
||||
def get_state(self) -> bool:
|
||||
return self._enabled
|
||||
@@ -168,57 +103,12 @@ class SiteStatistic(_PluginBase):
|
||||
}]
|
||||
|
||||
def get_service(self) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
注册插件公共服务
|
||||
[{
|
||||
"id": "服务ID",
|
||||
"name": "服务名称",
|
||||
"trigger": "触发器:cron/interval/date/CronTrigger.from_crontab()",
|
||||
"func": self.xxx,
|
||||
"kwargs": {} # 定时器参数
|
||||
}]
|
||||
"""
|
||||
if self._enabled and self._cron:
|
||||
return [{
|
||||
"id": "SiteStatistic",
|
||||
"name": "站点数据统计服务",
|
||||
"trigger": CronTrigger.from_crontab(self._cron),
|
||||
"func": self.refresh_all_site_data,
|
||||
"kwargs": {}
|
||||
}]
|
||||
elif self._enabled:
|
||||
triggers = TimerUtils.random_scheduler(num_executions=1,
|
||||
begin_hour=0,
|
||||
end_hour=1,
|
||||
min_interval=1,
|
||||
max_interval=60)
|
||||
ret_jobs = []
|
||||
for trigger in triggers:
|
||||
ret_jobs.append({
|
||||
"id": f"SiteStatistic|{trigger.hour}:{trigger.minute}",
|
||||
"name": "站点数据统计服务",
|
||||
"trigger": "cron",
|
||||
"func": self.refresh_all_site_data,
|
||||
"kwargs": {
|
||||
"hour": trigger.hour,
|
||||
"minute": trigger.minute
|
||||
}
|
||||
})
|
||||
return ret_jobs
|
||||
return []
|
||||
pass
|
||||
|
||||
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
|
||||
"""
|
||||
拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构
|
||||
"""
|
||||
# 站点的可选项(内置站点 + 自定义站点)
|
||||
customSites = self.__custom_sites()
|
||||
|
||||
site_options = ([{"title": site.name, "value": site.id}
|
||||
for site in self.siteoper.list_order_by_pri()]
|
||||
+ [{"title": site.get("name"), "value": site.get("id")}
|
||||
for site in customSites])
|
||||
|
||||
return [
|
||||
{
|
||||
'component': 'VForm',
|
||||
@@ -242,22 +132,6 @@ class SiteStatistic(_PluginBase):
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
'component': 'VCol',
|
||||
'props': {
|
||||
'cols': 12,
|
||||
'md': 4
|
||||
},
|
||||
'content': [
|
||||
{
|
||||
'component': 'VSwitch',
|
||||
'props': {
|
||||
'model': 'notify',
|
||||
'label': '发送通知',
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
'component': 'VCol',
|
||||
'props': {
|
||||
@@ -283,60 +157,7 @@ class SiteStatistic(_PluginBase):
|
||||
'component': 'VCol',
|
||||
'props': {
|
||||
'cols': 12,
|
||||
'md': 3
|
||||
},
|
||||
'content': [
|
||||
{
|
||||
'component': 'VTextField',
|
||||
'props': {
|
||||
'model': 'cron',
|
||||
'label': '执行周期',
|
||||
'placeholder': '5位cron表达式,留空自动'
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
'component': 'VCol',
|
||||
'props': {
|
||||
'cols': 12,
|
||||
'md': 3
|
||||
},
|
||||
'content': [
|
||||
{
|
||||
'component': 'VTextField',
|
||||
'props': {
|
||||
'model': 'queue_cnt',
|
||||
'label': '队列数量'
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
'component': 'VCol',
|
||||
'props': {
|
||||
'cols': 12,
|
||||
'md': 3
|
||||
},
|
||||
'content': [
|
||||
{
|
||||
'component': 'VSelect',
|
||||
'props': {
|
||||
'model': 'statistic_type',
|
||||
'label': '统计类型',
|
||||
'items': [
|
||||
{'title': '全量', 'value': 'all'},
|
||||
{'title': '增量', 'value': 'add'}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
'component': 'VCol',
|
||||
'props': {
|
||||
'cols': 12,
|
||||
'md': 3
|
||||
'md': 6
|
||||
},
|
||||
'content': [
|
||||
{
|
||||
@@ -354,112 +175,47 @@ class SiteStatistic(_PluginBase):
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
'component': 'VRow',
|
||||
'content': [
|
||||
{
|
||||
'component': 'VCol',
|
||||
'content': [
|
||||
{
|
||||
'component': 'VSelect',
|
||||
'props': {
|
||||
'chips': True,
|
||||
'multiple': True,
|
||||
'model': 'statistic_sites',
|
||||
'label': '统计站点',
|
||||
'items': site_options
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
'component': 'VRow',
|
||||
'content': [
|
||||
{
|
||||
'component': 'VCol',
|
||||
'props': {
|
||||
'cols': 12,
|
||||
'md': 4
|
||||
},
|
||||
'content': [
|
||||
{
|
||||
'component': 'VSwitch',
|
||||
'props': {
|
||||
'model': 'sitemsg',
|
||||
'label': '站点未读消息',
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
'component': 'VCol',
|
||||
'props': {
|
||||
'cols': 12,
|
||||
'md': 4
|
||||
},
|
||||
'content': [
|
||||
{
|
||||
'component': 'VSwitch',
|
||||
'props': {
|
||||
'model': 'remove_failed',
|
||||
'label': '移除失效站点',
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
], {
|
||||
"enabled": False,
|
||||
"onlyonce": False,
|
||||
"notify": True,
|
||||
"sitemsg": True,
|
||||
"cron": "5 1 * * *",
|
||||
"queue_cnt": 5,
|
||||
"remove_failed": False,
|
||||
"statistic_type": "all",
|
||||
"statistic_sites": [],
|
||||
"dashboard_type": 'today'
|
||||
}
|
||||
|
||||
def __get_data(self) -> Tuple[str, dict, dict]:
|
||||
def __get_data(self) -> Tuple[str, List[SiteUserData], List[SiteUserData]]:
|
||||
"""
|
||||
获取今天的日期、今天的站点数据、昨天的站点数据
|
||||
"""
|
||||
# 最近一天的签到数据
|
||||
stattistic_data: Dict[str, Dict[str, Any]] = {}
|
||||
# 最近一天的数据
|
||||
stattistic_data: List[SiteUserData] = []
|
||||
# 昨天数据
|
||||
yesterday_sites_data: Dict[str, Dict[str, Any]] = {}
|
||||
yesterday_sites_data: List[SiteUserData] = []
|
||||
# 获取最近所有数据
|
||||
data_list: List[PluginData] = self.get_data(key=None)
|
||||
data_list: List[SiteUserData] = self.siteoper.get_userdata()
|
||||
if not data_list:
|
||||
return "", {}, {}
|
||||
# 取key符合日期格式的数据
|
||||
data_list = [data for data in data_list if re.match(r"\d{4}-\d{2}-\d{2}", data.key)]
|
||||
return "", [], []
|
||||
# 每个日期只保留最后一条数据
|
||||
data_list = list({data.updated_day: data for data in data_list}.values())
|
||||
# 按日期倒序排序
|
||||
data_list.sort(key=lambda x: x.key, reverse=True)
|
||||
data_list.sort(key=lambda x: x.updated_day, reverse=True)
|
||||
# 今天的日期
|
||||
today = data_list[0].key
|
||||
# 数据按时间降序排序
|
||||
datas = [json.loads(data.value) for data in data_list if ObjectUtils.is_obj(data.value)]
|
||||
today = time.strftime('%Y-%m-%d', time.localtime())
|
||||
if len(data_list) > 0:
|
||||
stattistic_data = datas[0]
|
||||
today = data_list[0].updated_day
|
||||
stattistic_data = [data for data in data_list if data.updated_day == today]
|
||||
if len(data_list) > 1:
|
||||
yesterday_sites_data = datas[1]
|
||||
yestoday = data_list[1].updated_day
|
||||
yesterday_sites_data = [data for data in data_list if data.updated_day == yestoday]
|
||||
|
||||
# 今日数据按数据量降序排序
|
||||
stattistic_data.sort(key=lambda x: x.upload, reverse=True)
|
||||
|
||||
# 数据按时间降序排序
|
||||
stattistic_data = dict(sorted(stattistic_data.items(),
|
||||
key=lambda item: item[1].get('upload') or 0,
|
||||
reverse=True))
|
||||
return today, stattistic_data, yesterday_sites_data
|
||||
|
||||
@staticmethod
|
||||
def __get_total_elements(today: str, stattistic_data: dict, yesterday_sites_data: dict,
|
||||
def __get_total_elements(today: str, stattistic_data: List[SiteUserData], yesterday_sites_data: List[SiteUserData],
|
||||
dashboard: str = "today") -> List[dict]:
|
||||
"""
|
||||
获取统计元素
|
||||
@@ -473,7 +229,7 @@ class SiteStatistic(_PluginBase):
|
||||
return 0
|
||||
return round(float(value) / 1024 / 1024 / 1024, 1)
|
||||
|
||||
def __sub_dict(d1: dict, d2: dict) -> dict:
|
||||
def __sub_data(d1: dict, d2: dict) -> dict:
|
||||
"""
|
||||
计算两个字典相同Key值的差值(如果值为数字),返回新字典
|
||||
"""
|
||||
@@ -491,17 +247,13 @@ class SiteStatistic(_PluginBase):
|
||||
|
||||
if dashboard in ['total', 'all']:
|
||||
# 总上传量
|
||||
total_upload = sum([int(data.get("upload"))
|
||||
for data in stattistic_data.values() if data.get("upload")])
|
||||
total_upload = sum([data.upload for data in stattistic_data if data.upload])
|
||||
# 总下载量
|
||||
total_download = sum([int(data.get("download"))
|
||||
for data in stattistic_data.values() if data.get("download")])
|
||||
total_download = sum([data.download for data in stattistic_data if data.download])
|
||||
# 总做种数
|
||||
total_seed = sum([int(data.get("seeding"))
|
||||
for data in stattistic_data.values() if data.get("seeding")])
|
||||
total_seed = sum([data.seeding for data in stattistic_data if data.seeding])
|
||||
# 总做种体积
|
||||
total_seed_size = sum([int(data.get("seeding_size"))
|
||||
for data in stattistic_data.values() if data.get("seeding_size")])
|
||||
total_seed_size = sum([data.seeding_size for data in stattistic_data if data.seeding_size])
|
||||
|
||||
total_elements = [
|
||||
# 总上传量
|
||||
@@ -787,10 +539,15 @@ class SiteStatistic(_PluginBase):
|
||||
if dashboard in ["today", "all"]:
|
||||
# 计算增量数据集
|
||||
inc_data = {}
|
||||
for site, data in stattistic_data.items():
|
||||
inc = __sub_dict(data, yesterday_sites_data.get(site))
|
||||
for data in stattistic_data:
|
||||
yesterday_datas = [yd for yd in yesterday_sites_data if yd.domain == data.domain]
|
||||
if yesterday_datas:
|
||||
yesterday_data = yesterday_datas[0]
|
||||
else:
|
||||
yesterday_data = None
|
||||
inc = __sub_data(data.to_dict(), yesterday_data.to_dict() if yesterday_data else None)
|
||||
if inc:
|
||||
inc_data[site] = inc
|
||||
inc_data[data.name] = inc
|
||||
# 今日上传
|
||||
uploads = {k: v for k, v in inc_data.items() if v.get("upload")}
|
||||
# 今日上传站点
|
||||
@@ -889,7 +646,7 @@ class SiteStatistic(_PluginBase):
|
||||
# 合并返回
|
||||
return total_elements + today_elements
|
||||
|
||||
def get_dashboard(self) -> Optional[Tuple[Dict[str, Any], Dict[str, Any], List[dict]]]:
|
||||
def get_dashboard(self, key: str, **kwargs) -> Optional[Tuple[Dict[str, Any], Dict[str, Any], List[dict]]]:
|
||||
"""
|
||||
获取插件仪表盘页面,需要返回:1、仪表板col配置字典;2、仪表板页面元素配置json(含数据);3、全局配置(自动刷新等)
|
||||
1、col配置参考:
|
||||
@@ -970,48 +727,48 @@ class SiteStatistic(_PluginBase):
|
||||
'props': {
|
||||
'class': 'whitespace-nowrap break-keep text-high-emphasis'
|
||||
},
|
||||
'text': site
|
||||
'text': data.name
|
||||
},
|
||||
{
|
||||
'component': 'td',
|
||||
'text': data.get("username")
|
||||
'text': data.username
|
||||
},
|
||||
{
|
||||
'component': 'td',
|
||||
'text': data.get("user_level")
|
||||
'text': data.user_level
|
||||
},
|
||||
{
|
||||
'component': 'td',
|
||||
'props': {
|
||||
'class': 'text-success'
|
||||
},
|
||||
'text': StringUtils.str_filesize(data.get("upload"))
|
||||
'text': StringUtils.str_filesize(data.upload)
|
||||
},
|
||||
{
|
||||
'component': 'td',
|
||||
'props': {
|
||||
'class': 'text-error'
|
||||
},
|
||||
'text': StringUtils.str_filesize(data.get("download"))
|
||||
'text': StringUtils.str_filesize(data.download)
|
||||
},
|
||||
{
|
||||
'component': 'td',
|
||||
'text': data.get('ratio')
|
||||
'text': data.ratio
|
||||
},
|
||||
{
|
||||
'component': 'td',
|
||||
'text': format_bonus(data.get('bonus') or 0)
|
||||
'text': format_bonus(data.bonus or 0)
|
||||
},
|
||||
{
|
||||
'component': 'td',
|
||||
'text': data.get('seeding')
|
||||
'text': data.seeding
|
||||
},
|
||||
{
|
||||
'component': 'td',
|
||||
'text': StringUtils.str_filesize(data.get('seeding_size'))
|
||||
'text': StringUtils.str_filesize(data.seeding_size)
|
||||
}
|
||||
]
|
||||
} for site, data in stattistic_data.items() if not data.get("err_msg")
|
||||
} for data in stattistic_data
|
||||
]
|
||||
|
||||
# 拼装页面
|
||||
@@ -1113,239 +870,7 @@ class SiteStatistic(_PluginBase):
|
||||
]
|
||||
|
||||
def stop_service(self):
|
||||
"""
|
||||
退出插件
|
||||
"""
|
||||
try:
|
||||
if self._scheduler:
|
||||
self._scheduler.remove_all_jobs()
|
||||
if self._scheduler.running:
|
||||
self._scheduler.shutdown()
|
||||
self._scheduler = None
|
||||
except Exception as e:
|
||||
logger.error("退出插件失败:%s" % str(e))
|
||||
|
||||
def __build_class(self, html_text: str) -> Any:
|
||||
for site_schema in self._site_schema:
|
||||
try:
|
||||
if site_schema.match(html_text):
|
||||
return site_schema
|
||||
except Exception as e:
|
||||
logger.error(f"站点匹配失败 {str(e)}")
|
||||
return None
|
||||
|
||||
def build(self, site_info: CommentedMap) -> Optional[ISiteUserInfo]:
|
||||
"""
|
||||
构建站点信息
|
||||
"""
|
||||
site_name = site_info.get("name")
|
||||
site_cookie = site_info.get("cookie")
|
||||
apikey = site_info.get("apikey")
|
||||
token = site_info.get("token")
|
||||
if not site_cookie and not apikey and not token:
|
||||
return None
|
||||
url = site_info.get("url")
|
||||
proxy = site_info.get("proxy")
|
||||
ua = site_info.get("ua")
|
||||
# 会话管理
|
||||
with requests.Session() as session:
|
||||
proxies = settings.PROXY if proxy else None
|
||||
proxy_server = settings.PROXY_SERVER if proxy else None
|
||||
render = site_info.get("render")
|
||||
logger.debug(f"站点 {site_name} url={url},site_cookie={site_cookie},ua={ua},api_key={apikey},token={token},proxy={proxy}")
|
||||
if render:
|
||||
# 演染模式
|
||||
html_text = PlaywrightHelper().get_page_source(url=url,
|
||||
cookies=site_cookie,
|
||||
ua=ua,
|
||||
proxies=proxy_server)
|
||||
else:
|
||||
# 普通模式
|
||||
res = RequestUtils(cookies=site_cookie,
|
||||
session=session,
|
||||
ua=ua,
|
||||
proxies=proxies
|
||||
).get_res(url=url)
|
||||
if res and res.status_code == 200:
|
||||
if re.search(r"charset=\"?utf-8\"?", res.text, re.IGNORECASE):
|
||||
res.encoding = "utf-8"
|
||||
else:
|
||||
res.encoding = res.apparent_encoding
|
||||
html_text = res.text
|
||||
# 第一次登录反爬
|
||||
if html_text.find("title") == -1:
|
||||
i = html_text.find("window.location")
|
||||
if i == -1:
|
||||
return None
|
||||
tmp_url = url + html_text[i:html_text.find(";")] \
|
||||
.replace("\"", "") \
|
||||
.replace("+", "") \
|
||||
.replace(" ", "") \
|
||||
.replace("window.location=", "")
|
||||
res = RequestUtils(cookies=site_cookie,
|
||||
session=session,
|
||||
ua=ua,
|
||||
proxies=proxies
|
||||
).get_res(url=tmp_url)
|
||||
if res and res.status_code == 200:
|
||||
if "charset=utf-8" in res.text or "charset=UTF-8" in res.text:
|
||||
res.encoding = "UTF-8"
|
||||
else:
|
||||
res.encoding = res.apparent_encoding
|
||||
html_text = res.text
|
||||
if not html_text:
|
||||
return None
|
||||
elif res is not None:
|
||||
logger.error("站点 %s 被反爬限制:%s, 状态码:%s" % (site_name, url, res.status_code))
|
||||
return None
|
||||
else:
|
||||
logger.error("站点 %s 无法访问:%s" % (site_name, url))
|
||||
return None
|
||||
|
||||
# 兼容假首页情况,假首页通常没有 <link rel="search" 属性
|
||||
if '"search"' not in html_text and '"csrf-token"' not in html_text:
|
||||
# 排除掉单页面应用,单页面应用首页包含一个 div 容器
|
||||
if not re.search(r"id=\"?root\"?", res.text, re.IGNORECASE):
|
||||
res = RequestUtils(cookies=site_cookie,
|
||||
session=session,
|
||||
ua=ua,
|
||||
proxies=proxies
|
||||
).get_res(url=url + "/index.php")
|
||||
if res and res.status_code == 200:
|
||||
if re.search(r"charset=\"?utf-8\"?", res.text, re.IGNORECASE):
|
||||
res.encoding = "utf-8"
|
||||
else:
|
||||
res.encoding = res.apparent_encoding
|
||||
html_text = res.text
|
||||
if not html_text:
|
||||
return None
|
||||
elif res is not None:
|
||||
logger.error(f"站点 {site_name} 连接失败,状态码:{res.status_code}")
|
||||
return None
|
||||
else:
|
||||
logger.error(f"站点 {site_name} 无法访问:{url}")
|
||||
return None
|
||||
# 解析站点类型
|
||||
if html_text:
|
||||
site_schema = self.__build_class(html_text)
|
||||
if not site_schema:
|
||||
logger.error(f"站点 {site_name} 无法识别站点类型,可能是由于插件代码不全,请尝试强制重装插件以确保代码完整")
|
||||
return None
|
||||
return site_schema(
|
||||
site_name=site_name,
|
||||
url=url,
|
||||
site_cookie=site_cookie,
|
||||
apikey=apikey,
|
||||
token=token,
|
||||
index_html=html_text,
|
||||
session=session,
|
||||
ua=ua,
|
||||
proxy=proxy)
|
||||
return None
|
||||
|
||||
def refresh_by_domain(self, domain: str, apikey: str) -> schemas.Response:
|
||||
"""
|
||||
刷新一个站点数据,可由API调用
|
||||
"""
|
||||
if apikey != settings.API_TOKEN:
|
||||
return schemas.Response(success=False, message="API密钥错误")
|
||||
site_info = self.sites.get_indexer(domain)
|
||||
if site_info:
|
||||
site_data = self.__refresh_site_data(site_info)
|
||||
if site_data:
|
||||
return schemas.Response(
|
||||
success=True,
|
||||
message=f"站点 {domain} 刷新成功",
|
||||
data=site_data.to_dict()
|
||||
)
|
||||
return schemas.Response(
|
||||
success=False,
|
||||
message=f"站点 {domain} 刷新数据失败,未获取到数据"
|
||||
)
|
||||
return schemas.Response(
|
||||
success=False,
|
||||
message=f"站点 {domain} 不存在"
|
||||
)
|
||||
|
||||
def __refresh_site_data(self, site_info: CommentedMap) -> Optional[ISiteUserInfo]:
|
||||
"""
|
||||
更新单个site 数据信息
|
||||
:param site_info:
|
||||
:return:
|
||||
"""
|
||||
site_name = site_info.get('name')
|
||||
site_url = site_info.get('url')
|
||||
if not site_url:
|
||||
return None
|
||||
unread_msg_notify = True
|
||||
try:
|
||||
site_user_info: ISiteUserInfo = self.build(site_info=site_info)
|
||||
if site_user_info:
|
||||
logger.debug(f"站点 {site_name} 开始以 {site_user_info.site_schema()} 模型解析")
|
||||
# 开始解析
|
||||
site_user_info.parse()
|
||||
logger.debug(f"站点 {site_name} 解析完成")
|
||||
|
||||
# 获取不到数据时,仅返回错误信息,不做历史数据更新
|
||||
if site_user_info.err_msg:
|
||||
self._sites_data.update({site_name: {"err_msg": site_user_info.err_msg}})
|
||||
return None
|
||||
|
||||
if self._sitemsg:
|
||||
# 发送通知,存在未读消息
|
||||
self.__notify_unread_msg(site_name, site_user_info, unread_msg_notify)
|
||||
|
||||
# 分享率接近1时,发送消息提醒
|
||||
if site_user_info.ratio and float(site_user_info.ratio) < 1:
|
||||
self.post_message(mtype=NotificationType.SiteMessage,
|
||||
title=f"【站点分享率低预警】",
|
||||
text=f"站点 {site_user_info.site_name} 分享率 {site_user_info.ratio},请注意!")
|
||||
|
||||
self._sites_data.update(
|
||||
{
|
||||
site_name: {
|
||||
"upload": site_user_info.upload,
|
||||
"username": site_user_info.username,
|
||||
"user_level": site_user_info.user_level,
|
||||
"join_at": site_user_info.join_at,
|
||||
"download": site_user_info.download,
|
||||
"ratio": site_user_info.ratio,
|
||||
"seeding": site_user_info.seeding,
|
||||
"seeding_size": site_user_info.seeding_size,
|
||||
"leeching": site_user_info.leeching,
|
||||
"bonus": site_user_info.bonus,
|
||||
"url": site_url,
|
||||
"err_msg": site_user_info.err_msg,
|
||||
"message_unread": site_user_info.message_unread,
|
||||
"updated_at": datetime.now().strftime('%Y-%m-%d')
|
||||
}
|
||||
})
|
||||
return site_user_info
|
||||
|
||||
except Exception as e:
|
||||
import traceback
|
||||
logger.error(f"站点 {site_name} 获取流量数据失败:{str(e)}")
|
||||
logger.error(traceback.format_exc())
|
||||
return None
|
||||
|
||||
def __notify_unread_msg(self, site_name: str, site_user_info: ISiteUserInfo, unread_msg_notify: bool):
|
||||
if site_user_info.message_unread <= 0:
|
||||
return
|
||||
if self._sites_data.get(site_name, {}).get('message_unread') == site_user_info.message_unread:
|
||||
return
|
||||
if not unread_msg_notify:
|
||||
return
|
||||
|
||||
# 解析出内容,则发送内容
|
||||
if len(site_user_info.message_unread_contents) > 0:
|
||||
for head, date, content in site_user_info.message_unread_contents:
|
||||
msg_title = f"【站点 {site_user_info.site_name} 消息】"
|
||||
msg_text = f"时间:{date}\n标题:{head}\n内容:\n{content}"
|
||||
self.post_message(mtype=NotificationType.SiteMessage, title=msg_title, text=msg_text)
|
||||
else:
|
||||
self.post_message(mtype=NotificationType.SiteMessage,
|
||||
title=f"站点 {site_user_info.site_name} 收到 "
|
||||
f"{site_user_info.message_unread} 条新消息,请登陆查看")
|
||||
pass
|
||||
|
||||
@eventmanager.register(EventType.PluginAction)
|
||||
def refresh(self, event: Event):
|
||||
@@ -1360,145 +885,31 @@ class SiteStatistic(_PluginBase):
|
||||
self.post_message(channel=event.event_data.get("channel"),
|
||||
title="开始刷新站点数据 ...",
|
||||
userid=event.event_data.get("user"))
|
||||
self.refresh_all_site_data()
|
||||
SiteChain().refresh_userdatas()
|
||||
if event:
|
||||
self.post_message(channel=event.event_data.get("channel"),
|
||||
title="站点数据刷新完成!", userid=event.event_data.get("user"))
|
||||
|
||||
def refresh_all_site_data(self):
|
||||
def refresh_by_domain(self, domain: str, apikey: str) -> schemas.Response:
|
||||
"""
|
||||
多线程刷新站点下载上传量,默认间隔6小时
|
||||
刷新一个站点数据,可由API调用
|
||||
"""
|
||||
if not self.sites.get_indexers():
|
||||
return
|
||||
|
||||
logger.info("开始刷新站点数据 ...")
|
||||
|
||||
with lock:
|
||||
|
||||
all_sites = [site for site in self.sites.get_indexers() if not site.get("public")] + self.__custom_sites()
|
||||
# 没有指定站点,默认使用全部站点
|
||||
if not self._statistic_sites:
|
||||
refresh_sites = all_sites
|
||||
else:
|
||||
refresh_sites = [site for site in all_sites if
|
||||
site.get("id") in self._statistic_sites]
|
||||
if not refresh_sites:
|
||||
return
|
||||
|
||||
# 将数据初始化为前一天,筛选站点
|
||||
yesterday_sites_data = {}
|
||||
today_date = datetime.now().strftime('%Y-%m-%d')
|
||||
if self._statistic_type == "add" or not self._remove_failed:
|
||||
if last_update_time := self.get_data("last_update_time"):
|
||||
yesterday_sites_data = self.get_data(last_update_time) or {}
|
||||
|
||||
if not self._remove_failed and yesterday_sites_data:
|
||||
site_names = [site.get("name") for site in refresh_sites]
|
||||
self._sites_data = {k: v for k, v in yesterday_sites_data.items() if k in site_names}
|
||||
|
||||
# 并发刷新
|
||||
with ThreadPool(min(len(refresh_sites), int(self._queue_cnt or 5))) as p:
|
||||
p.map(self.__refresh_site_data, refresh_sites)
|
||||
|
||||
# 通知刷新完成
|
||||
if self._notify:
|
||||
messages = {}
|
||||
# 总上传
|
||||
incUploads = 0
|
||||
# 总下载
|
||||
incDownloads = 0
|
||||
|
||||
for rand, site in enumerate(self._sites_data.keys()):
|
||||
upload = int(self._sites_data[site].get("upload") or 0)
|
||||
download = int(self._sites_data[site].get("download") or 0)
|
||||
updated_date = self._sites_data[site].get("updated_at")
|
||||
|
||||
if self._statistic_type == "add" and yesterday_sites_data.get(site):
|
||||
upload -= int(yesterday_sites_data[site].get("upload") or 0)
|
||||
download -= int(yesterday_sites_data[site].get("download") or 0)
|
||||
|
||||
if updated_date and updated_date != today_date:
|
||||
updated_date = f"({updated_date})"
|
||||
else:
|
||||
updated_date = ""
|
||||
|
||||
if upload > 0 or download > 0:
|
||||
incUploads += upload
|
||||
incDownloads += download
|
||||
messages[upload + (rand / 1000)] = (
|
||||
f"【{site}】{updated_date}\n"
|
||||
+ f"上传量:{StringUtils.str_filesize(upload)}\n"
|
||||
+ f"下载量:{StringUtils.str_filesize(download)}\n"
|
||||
+ "————————————"
|
||||
)
|
||||
|
||||
if incDownloads or incUploads:
|
||||
sorted_messages = [messages[key] for key in sorted(messages.keys(), reverse=True)]
|
||||
sorted_messages.insert(0, f"【汇总】\n"
|
||||
f"总上传:{StringUtils.str_filesize(incUploads)}\n"
|
||||
f"总下载:{StringUtils.str_filesize(incDownloads)}\n"
|
||||
f"————————————")
|
||||
self.post_message(mtype=NotificationType.SiteMessage,
|
||||
title="站点数据统计", text="\n".join(sorted_messages))
|
||||
|
||||
# 保存数据
|
||||
self.save_data(today_date, self._sites_data)
|
||||
|
||||
# 更新时间
|
||||
self.save_data("last_update_time", today_date)
|
||||
|
||||
self.eventmanager.send_event(etype=EventType.PluginAction, data={
|
||||
"action": "sitestatistic_refresh_complete"
|
||||
})
|
||||
|
||||
logger.info("站点数据刷新完成")
|
||||
|
||||
def __custom_sites(self) -> List[Any]:
|
||||
custom_sites = []
|
||||
custom_sites_config = self.get_config("CustomSites")
|
||||
if custom_sites_config and custom_sites_config.get("enabled"):
|
||||
custom_sites = custom_sites_config.get("sites")
|
||||
return custom_sites
|
||||
|
||||
def __update_config(self):
|
||||
self.update_config({
|
||||
"enabled": self._enabled,
|
||||
"onlyonce": self._onlyonce,
|
||||
"cron": self._cron,
|
||||
"notify": self._notify,
|
||||
"sitemsg": self._sitemsg,
|
||||
"queue_cnt": self._queue_cnt,
|
||||
"remove_failed": self._remove_failed,
|
||||
"statistic_type": self._statistic_type,
|
||||
"statistic_sites": self._statistic_sites,
|
||||
"dashboard_type": self._dashboard_type
|
||||
})
|
||||
|
||||
@eventmanager.register(EventType.SiteDeleted)
|
||||
def site_deleted(self, event):
|
||||
"""
|
||||
删除对应站点选中
|
||||
"""
|
||||
site_id = event.event_data.get("site_id")
|
||||
config = self.get_config()
|
||||
if config:
|
||||
statistic_sites = config.get("statistic_sites")
|
||||
if statistic_sites:
|
||||
if isinstance(statistic_sites, str):
|
||||
statistic_sites = [statistic_sites]
|
||||
|
||||
# 删除对应站点
|
||||
if site_id:
|
||||
statistic_sites = [site for site in statistic_sites if int(site) != int(site_id)]
|
||||
else:
|
||||
# 清空
|
||||
statistic_sites = []
|
||||
|
||||
# 若无站点,则停止
|
||||
if len(statistic_sites) == 0:
|
||||
self._enabled = False
|
||||
|
||||
self._statistic_sites = statistic_sites
|
||||
# 保存配置
|
||||
self.__update_config()
|
||||
if apikey != settings.API_TOKEN:
|
||||
return schemas.Response(success=False, message="API密钥错误")
|
||||
site_info = self.siteshelper.get_indexer(domain)
|
||||
if site_info:
|
||||
site_data = SiteChain().refresh_userdata(site=site_info)
|
||||
if site_data:
|
||||
return schemas.Response(
|
||||
success=True,
|
||||
message=f"站点 {domain} 刷新成功",
|
||||
data=site_data.dict()
|
||||
)
|
||||
return schemas.Response(
|
||||
success=False,
|
||||
message=f"站点 {domain} 刷新数据失败,未获取到数据"
|
||||
)
|
||||
return schemas.Response(
|
||||
success=False,
|
||||
message=f"站点 {domain} 不存在"
|
||||
)
|
||||
|
||||
@@ -1,428 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import json
|
||||
import re
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
from urllib.parse import urljoin, urlsplit
|
||||
|
||||
from requests import Session
|
||||
|
||||
from app.core.config import settings
|
||||
from app.helper.cloudflare import under_challenge
|
||||
from app.log import logger
|
||||
from app.utils.http import RequestUtils
|
||||
from app.utils.site import SiteUtils
|
||||
|
||||
SITE_BASE_ORDER = 1000
|
||||
|
||||
|
||||
# 站点框架
|
||||
class SiteSchema(Enum):
|
||||
DiscuzX = "Discuz!"
|
||||
Gazelle = "Gazelle"
|
||||
Ipt = "IPTorrents"
|
||||
NexusPhp = "NexusPhp"
|
||||
NexusProject = "NexusProject"
|
||||
NexusRabbit = "NexusRabbit"
|
||||
NexusHhanclub = "NexusHhanclub"
|
||||
NexusAudiences = "NexusAudiences"
|
||||
SmallHorse = "Small Horse"
|
||||
Unit3d = "Unit3d"
|
||||
TorrentLeech = "TorrentLeech"
|
||||
FileList = "FileList"
|
||||
TNode = "TNode"
|
||||
MTorrent = "MTorrent"
|
||||
Yema = "Yema"
|
||||
|
||||
|
||||
class ISiteUserInfo(metaclass=ABCMeta):
|
||||
# 站点模版
|
||||
schema = SiteSchema.NexusPhp
|
||||
# 站点解析时判断顺序,值越小越先解析
|
||||
order = SITE_BASE_ORDER
|
||||
# 请求模式 cookie/apikey
|
||||
request_mode = "cookie"
|
||||
|
||||
def __init__(self, site_name: str,
|
||||
url: str,
|
||||
site_cookie: str,
|
||||
apikey: str,
|
||||
token: str,
|
||||
index_html: str,
|
||||
session: Session = None,
|
||||
ua: str = None,
|
||||
emulate: bool = False,
|
||||
proxy: bool = None):
|
||||
super().__init__()
|
||||
# 站点信息
|
||||
self.site_name = None
|
||||
self.site_url = None
|
||||
self.apikey = apikey
|
||||
self.token = token
|
||||
# 用户信息
|
||||
self.username = None
|
||||
self.userid = None
|
||||
# 未读消息
|
||||
self.message_unread = 0
|
||||
self.message_unread_contents = []
|
||||
|
||||
# 流量信息
|
||||
self.upload = 0
|
||||
self.download = 0
|
||||
self.ratio = 0
|
||||
|
||||
# 种子信息
|
||||
self.seeding = 0
|
||||
self.leeching = 0
|
||||
self.uploaded = 0
|
||||
self.completed = 0
|
||||
self.incomplete = 0
|
||||
self.seeding_size = 0
|
||||
self.leeching_size = 0
|
||||
self.uploaded_size = 0
|
||||
self.completed_size = 0
|
||||
self.incomplete_size = 0
|
||||
# 做种人数, 种子大小
|
||||
self.seeding_info = []
|
||||
|
||||
# 用户详细信息
|
||||
self._user_basic_page = None
|
||||
self._user_basic_params = None
|
||||
self._user_basic_headers = None
|
||||
self.user_level = None
|
||||
self.join_at = None
|
||||
self.bonus = 0.0
|
||||
|
||||
# 错误信息
|
||||
self.err_msg = None
|
||||
# 内部数据
|
||||
self._addition_headers = None
|
||||
|
||||
# 站点页面
|
||||
self._brief_page = "index.php"
|
||||
self._user_detail_page = "userdetails.php?id="
|
||||
self._user_detail_params = None
|
||||
self._user_detail_headers = None
|
||||
self._user_traffic_page = "index.php"
|
||||
self._user_traffic_params = None
|
||||
self._user_traffic_headers = None
|
||||
self._user_mail_unread_page = "messages.php?action=viewmailbox&box=1&unread=yes"
|
||||
self._sys_mail_unread_page = "messages.php?action=viewmailbox&box=-2&unread=yes"
|
||||
self._mail_unread_params = None
|
||||
self._mail_unread_headers = None
|
||||
self._mail_content_params = None
|
||||
self._mail_content_headers = None
|
||||
self._torrent_seeding_page = "getusertorrentlistajax.php?userid="
|
||||
self._torrent_seeding_params = None
|
||||
self._torrent_seeding_headers = None
|
||||
|
||||
split_url = urlsplit(url)
|
||||
self.site_name = site_name
|
||||
self.site_url = url
|
||||
self.site_domain = split_url.netloc
|
||||
self._base_url = f"{split_url.scheme}://{split_url.netloc}"
|
||||
self._site_cookie = site_cookie
|
||||
self._index_html = index_html
|
||||
self._session = session if session else None
|
||||
self._ua = ua
|
||||
|
||||
self._emulate = emulate
|
||||
self._proxy = proxy
|
||||
|
||||
def site_schema(self) -> SiteSchema:
|
||||
"""
|
||||
站点解析模型
|
||||
:return: 站点解析模型
|
||||
"""
|
||||
return self.schema
|
||||
|
||||
@classmethod
|
||||
def match(cls, html_text: str) -> bool:
|
||||
"""
|
||||
是否匹配当前解析模型
|
||||
:param html_text: 站点首页html
|
||||
:return: 是否匹配
|
||||
"""
|
||||
pass
|
||||
|
||||
def parse(self):
|
||||
"""
|
||||
解析站点信息
|
||||
:return:
|
||||
"""
|
||||
# 检查是否已经登录
|
||||
if not self._parse_logged_in(self._index_html):
|
||||
return
|
||||
# 解析站点页面
|
||||
self._parse_site_page(self._index_html)
|
||||
# 解析用户基础信息
|
||||
if self._user_basic_page:
|
||||
self._parse_user_base_info(
|
||||
self._get_page_content(
|
||||
url=urljoin(self._base_url, self._user_basic_page),
|
||||
params=self._user_basic_params,
|
||||
headers=self._user_basic_headers
|
||||
)
|
||||
)
|
||||
else:
|
||||
self._parse_user_base_info(self._index_html)
|
||||
# 解析用户详细信息
|
||||
if self._user_detail_page:
|
||||
self._parse_user_detail_info(
|
||||
self._get_page_content(
|
||||
url=urljoin(self._base_url, self._user_detail_page),
|
||||
params=self._user_detail_params,
|
||||
headers=self._user_detail_headers
|
||||
)
|
||||
)
|
||||
# 解析用户未读消息
|
||||
self._pase_unread_msgs()
|
||||
# 解析用户上传、下载、分享率等信息
|
||||
if self._user_traffic_page:
|
||||
self._parse_user_traffic_info(
|
||||
self._get_page_content(
|
||||
url=urljoin(self._base_url, self._user_traffic_page),
|
||||
params=self._user_traffic_params,
|
||||
headers=self._user_traffic_headers
|
||||
)
|
||||
)
|
||||
# 解析用户做种信息
|
||||
self._parse_seeding_pages()
|
||||
self.seeding_info = json.dumps(self.seeding_info)
|
||||
|
||||
def _pase_unread_msgs(self):
|
||||
"""
|
||||
解析所有未读消息标题和内容
|
||||
:return:
|
||||
"""
|
||||
unread_msg_links = []
|
||||
if self.message_unread > 0:
|
||||
links = {self._user_mail_unread_page, self._sys_mail_unread_page}
|
||||
for link in links:
|
||||
if not link:
|
||||
continue
|
||||
msg_links = []
|
||||
next_page = self._parse_message_unread_links(
|
||||
self._get_page_content(
|
||||
url=urljoin(self._base_url, link),
|
||||
params=self._mail_unread_params,
|
||||
headers=self._mail_unread_headers
|
||||
),
|
||||
msg_links)
|
||||
while next_page:
|
||||
next_page = self._parse_message_unread_links(
|
||||
self._get_page_content(
|
||||
url=urljoin(self._base_url, next_page),
|
||||
params=self._mail_unread_params,
|
||||
headers=self._mail_unread_headers
|
||||
),
|
||||
msg_links
|
||||
)
|
||||
unread_msg_links.extend(msg_links)
|
||||
# 重新更新未读消息数(99999表示有消息但数量未知)
|
||||
if self.message_unread == 99999:
|
||||
self.message_unread = len(unread_msg_links)
|
||||
# 解析未读消息内容
|
||||
for msg_link in unread_msg_links:
|
||||
logger.debug(f"{self.site_name} 信息链接 {msg_link}")
|
||||
head, date, content = self._parse_message_content(
|
||||
self._get_page_content(
|
||||
urljoin(self._base_url, msg_link),
|
||||
params=self._mail_content_params,
|
||||
headers=self._mail_content_headers
|
||||
)
|
||||
)
|
||||
logger.debug(f"{self.site_name} 标题 {head} 时间 {date} 内容 {content}")
|
||||
self.message_unread_contents.append((head, date, content))
|
||||
|
||||
def _parse_seeding_pages(self):
|
||||
"""
|
||||
解析做种页面
|
||||
"""
|
||||
if self._torrent_seeding_page:
|
||||
# 第一页
|
||||
next_page = self._parse_user_torrent_seeding_info(
|
||||
self._get_page_content(
|
||||
url=urljoin(self._base_url, self._torrent_seeding_page),
|
||||
params=self._torrent_seeding_params,
|
||||
headers=self._torrent_seeding_headers
|
||||
)
|
||||
)
|
||||
|
||||
# 其他页处理
|
||||
while next_page is not None and next_page is not False:
|
||||
next_page = self._parse_user_torrent_seeding_info(
|
||||
self._get_page_content(
|
||||
url=urljoin(urljoin(self._base_url, self._torrent_seeding_page), next_page),
|
||||
params=self._torrent_seeding_params,
|
||||
headers=self._torrent_seeding_headers
|
||||
),
|
||||
multi_page=True)
|
||||
|
||||
@staticmethod
|
||||
def _prepare_html_text(html_text):
|
||||
"""
|
||||
处理掉HTML中的干扰部分
|
||||
"""
|
||||
return re.sub(r"#\d+", "", re.sub(r"\d+px", "", html_text))
|
||||
|
||||
@abstractmethod
|
||||
def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]:
|
||||
"""
|
||||
获取未阅读消息链接
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
pass
|
||||
|
||||
def _get_page_content(self, url: str, params: dict = None, headers: dict = None):
|
||||
"""
|
||||
:param url: 网页地址
|
||||
:param params: post参数
|
||||
:param headers: 额外的请求头
|
||||
:return:
|
||||
"""
|
||||
req_headers = None
|
||||
proxies = settings.PROXY if self._proxy else None
|
||||
if self._ua or headers or self._addition_headers:
|
||||
req_headers = {
|
||||
"User-Agent": f"{self._ua}"
|
||||
}
|
||||
|
||||
if headers:
|
||||
req_headers.update(headers)
|
||||
else:
|
||||
req_headers.update({
|
||||
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
|
||||
})
|
||||
if self._addition_headers:
|
||||
req_headers.update(self._addition_headers)
|
||||
|
||||
if self.request_mode == "apikey":
|
||||
# 使用apikey请求,通过请求头传递
|
||||
cookie = None
|
||||
session = None
|
||||
else:
|
||||
# 使用cookie请求
|
||||
cookie = self._site_cookie
|
||||
session = self._session
|
||||
|
||||
if params:
|
||||
if req_headers.get("Content-Type") == "application/json":
|
||||
res = RequestUtils(cookies=cookie,
|
||||
session=session,
|
||||
timeout=60,
|
||||
proxies=proxies,
|
||||
headers=req_headers).post_res(url=url, json=params)
|
||||
else:
|
||||
res = RequestUtils(cookies=cookie,
|
||||
session=session,
|
||||
timeout=60,
|
||||
proxies=proxies,
|
||||
headers=req_headers).post_res(url=url, data=params)
|
||||
else:
|
||||
res = RequestUtils(cookies=cookie,
|
||||
session=session,
|
||||
timeout=60,
|
||||
proxies=proxies,
|
||||
headers=req_headers).get_res(url=url)
|
||||
if res is not None and res.status_code in (200, 500, 403):
|
||||
if req_headers and "application/json" in str(req_headers.get("Accept")):
|
||||
return json.dumps(res.json())
|
||||
else:
|
||||
# 如果cloudflare 有防护,尝试使用浏览器仿真
|
||||
if under_challenge(res.text):
|
||||
logger.warn(
|
||||
f"{self.site_name} 检测到Cloudflare,请更新Cookie和UA")
|
||||
return ""
|
||||
if re.search(r"charset=\"?utf-8\"?", res.text, re.IGNORECASE):
|
||||
res.encoding = "utf-8"
|
||||
else:
|
||||
res.encoding = res.apparent_encoding
|
||||
return res.text
|
||||
|
||||
return ""
|
||||
|
||||
@abstractmethod
|
||||
def _parse_site_page(self, html_text: str):
|
||||
"""
|
||||
解析站点相关信息页面
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _parse_user_base_info(self, html_text: str):
|
||||
"""
|
||||
解析用户基础信息
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
pass
|
||||
|
||||
def _parse_logged_in(self, html_text):
|
||||
"""
|
||||
解析用户是否已经登陆
|
||||
:param html_text:
|
||||
:return: True/False
|
||||
"""
|
||||
logged_in = SiteUtils.is_logged_in(html_text)
|
||||
if not logged_in:
|
||||
self.err_msg = "未检测到已登陆,请检查cookies是否过期"
|
||||
logger.warn(f"{self.site_name} 未登录,跳过后续操作")
|
||||
|
||||
return logged_in
|
||||
|
||||
@abstractmethod
|
||||
def _parse_user_traffic_info(self, html_text: str):
|
||||
"""
|
||||
解析用户的上传,下载,分享率等信息
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]:
|
||||
"""
|
||||
解析用户的做种相关信息
|
||||
:param html_text:
|
||||
:param multi_page: 是否多页数据
|
||||
:return: 下页地址
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _parse_user_detail_info(self, html_text: str):
|
||||
"""
|
||||
解析用户的详细信息
|
||||
加入时间/等级/魔力值等
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _parse_message_content(self, html_text):
|
||||
"""
|
||||
解析短消息内容
|
||||
:param html_text:
|
||||
:return: head: message, date: time, content: message content
|
||||
"""
|
||||
pass
|
||||
|
||||
def to_dict(self):
|
||||
"""
|
||||
转化为字典
|
||||
"""
|
||||
attributes = [
|
||||
attr for attr in dir(self)
|
||||
if not callable(getattr(self, attr)) and not attr.startswith("_")
|
||||
]
|
||||
return {
|
||||
attr: getattr(self, attr).value
|
||||
if isinstance(getattr(self, attr), SiteSchema)
|
||||
else getattr(self, attr) for attr in attributes
|
||||
}
|
||||
@@ -1,139 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class DiscuzUserInfo(ISiteUserInfo):
|
||||
schema = SiteSchema.DiscuzX
|
||||
order = SITE_BASE_ORDER + 10
|
||||
|
||||
@classmethod
|
||||
def match(cls, html_text: str) -> bool:
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return False
|
||||
|
||||
printable_text = html.xpath("string(.)") if html else ""
|
||||
return 'Powered by Discuz!' in printable_text
|
||||
|
||||
def _parse_user_base_info(self, html_text: str):
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
html = etree.HTML(html_text)
|
||||
|
||||
user_info = html.xpath('//a[contains(@href, "&uid=")]')
|
||||
if user_info:
|
||||
user_id_match = re.search(r"&uid=(\d+)", user_info[0].attrib['href'])
|
||||
if user_id_match and user_id_match.group().strip():
|
||||
self.userid = user_id_match.group(1)
|
||||
self._torrent_seeding_page = f"forum.php?&mod=torrents&cat_5up=on"
|
||||
self._user_detail_page = user_info[0].attrib['href']
|
||||
self.username = user_info[0].text.strip()
|
||||
|
||||
def _parse_site_page(self, html_text: str):
|
||||
# TODO
|
||||
pass
|
||||
|
||||
def _parse_user_detail_info(self, html_text: str):
|
||||
"""
|
||||
解析用户额外信息,加入时间,等级
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return None
|
||||
|
||||
# 用户等级
|
||||
user_levels_text = html.xpath('//a[contains(@href, "usergroup")]/text()')
|
||||
if user_levels_text:
|
||||
self.user_level = user_levels_text[-1].strip()
|
||||
|
||||
# 加入日期
|
||||
join_at_text = html.xpath('//li[em[text()="注册时间"]]/text()')
|
||||
if join_at_text:
|
||||
self.join_at = StringUtils.unify_datetime_str(join_at_text[0].strip())
|
||||
|
||||
# 分享率
|
||||
ratio_text = html.xpath('//li[contains(.//text(), "分享率")]//text()')
|
||||
if ratio_text:
|
||||
ratio_match = re.search(r"\(([\d,.]+)\)", ratio_text[0])
|
||||
if ratio_match and ratio_match.group(1).strip():
|
||||
self.bonus = StringUtils.str_float(ratio_match.group(1))
|
||||
|
||||
# 积分
|
||||
bouns_text = html.xpath('//li[em[text()="积分"]]/text()')
|
||||
if bouns_text:
|
||||
self.bonus = StringUtils.str_float(bouns_text[0].strip())
|
||||
|
||||
# 上传
|
||||
upload_text = html.xpath('//li[em[contains(text(),"上传量")]]/text()')
|
||||
if upload_text:
|
||||
self.upload = StringUtils.num_filesize(upload_text[0].strip().split('/')[-1])
|
||||
|
||||
# 下载
|
||||
download_text = html.xpath('//li[em[contains(text(),"下载量")]]/text()')
|
||||
if download_text:
|
||||
self.download = StringUtils.num_filesize(download_text[0].strip().split('/')[-1])
|
||||
|
||||
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]:
|
||||
"""
|
||||
做种相关信息
|
||||
:param html_text:
|
||||
:param multi_page: 是否多页数据
|
||||
:return: 下页地址
|
||||
"""
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return None
|
||||
|
||||
size_col = 3
|
||||
seeders_col = 4
|
||||
# 搜索size列
|
||||
if html.xpath('//tr[position()=1]/td[.//img[@class="size"] and .//img[@alt="size"]]'):
|
||||
size_col = len(html.xpath('//tr[position()=1]/td[.//img[@class="size"] '
|
||||
'and .//img[@alt="size"]]/preceding-sibling::td')) + 1
|
||||
# 搜索seeders列
|
||||
if html.xpath('//tr[position()=1]/td[.//img[@class="seeders"] and .//img[@alt="seeders"]]'):
|
||||
seeders_col = len(html.xpath('//tr[position()=1]/td[.//img[@class="seeders"] '
|
||||
'and .//img[@alt="seeders"]]/preceding-sibling::td')) + 1
|
||||
|
||||
page_seeding = 0
|
||||
page_seeding_size = 0
|
||||
page_seeding_info = []
|
||||
seeding_sizes = html.xpath(f'//tr[position()>1]/td[{size_col}]')
|
||||
seeding_seeders = html.xpath(f'//tr[position()>1]/td[{seeders_col}]//text()')
|
||||
if seeding_sizes and seeding_seeders:
|
||||
page_seeding = len(seeding_sizes)
|
||||
|
||||
for i in range(0, len(seeding_sizes)):
|
||||
size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
|
||||
seeders = StringUtils.str_int(seeding_seeders[i])
|
||||
|
||||
page_seeding_size += size
|
||||
page_seeding_info.append([seeders, size])
|
||||
|
||||
self.seeding += page_seeding
|
||||
self.seeding_size += page_seeding_size
|
||||
self.seeding_info.extend(page_seeding_info)
|
||||
|
||||
# 是否存在下页数据
|
||||
next_page = None
|
||||
next_page_text = html.xpath('//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁")]/@href')
|
||||
if next_page_text:
|
||||
next_page = next_page_text[-1].strip()
|
||||
|
||||
return next_page
|
||||
|
||||
def _parse_user_traffic_info(self, html_text: str):
|
||||
pass
|
||||
|
||||
def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]:
|
||||
return None
|
||||
|
||||
def _parse_message_content(self, html_text):
|
||||
return None, None, None
|
||||
@@ -1,127 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class FileListSiteUserInfo(ISiteUserInfo):
|
||||
schema = SiteSchema.FileList
|
||||
order = SITE_BASE_ORDER + 50
|
||||
|
||||
@classmethod
|
||||
def match(cls, html_text: str) -> bool:
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return False
|
||||
|
||||
printable_text = html.xpath("string(.)") if html else ""
|
||||
return 'Powered by FileList' in printable_text
|
||||
|
||||
def _parse_site_page(self, html_text: str):
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
|
||||
user_detail = re.search(r"userdetails.php\?id=(\d+)", html_text)
|
||||
if user_detail and user_detail.group().strip():
|
||||
self._user_detail_page = user_detail.group().strip().lstrip('/')
|
||||
self.userid = user_detail.group(1)
|
||||
|
||||
self._torrent_seeding_page = f"snatchlist.php?id={self.userid}&action=torrents&type=seeding"
|
||||
|
||||
def _parse_user_base_info(self, html_text: str):
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
html = etree.HTML(html_text)
|
||||
|
||||
ret = html.xpath(f'//a[contains(@href, "userdetails") and contains(@href, "{self.userid}")]//text()')
|
||||
if ret:
|
||||
self.username = str(ret[0])
|
||||
|
||||
def _parse_user_traffic_info(self, html_text: str):
|
||||
"""
|
||||
上传/下载/分享率 [做种数/魔力值]
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
return
|
||||
|
||||
def _parse_user_detail_info(self, html_text: str):
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
html = etree.HTML(html_text)
|
||||
|
||||
upload_html = html.xpath('//table//tr/td[text()="Uploaded"]/following-sibling::td//text()')
|
||||
if upload_html:
|
||||
self.upload = StringUtils.num_filesize(upload_html[0])
|
||||
download_html = html.xpath('//table//tr/td[text()="Downloaded"]/following-sibling::td//text()')
|
||||
if download_html:
|
||||
self.download = StringUtils.num_filesize(download_html[0])
|
||||
|
||||
ratio_html = html.xpath('//table//tr/td[text()="Share ratio"]/following-sibling::td//text()')
|
||||
if ratio_html:
|
||||
share_ratio = StringUtils.str_float(ratio_html[0])
|
||||
self.ratio = 0 if self.download == 0 else share_ratio
|
||||
|
||||
seed_html = html.xpath('//table//tr/td[text()="Seed bonus"]/following-sibling::td//text()')
|
||||
if seed_html:
|
||||
self.seeding = StringUtils.str_int(seed_html[1])
|
||||
self.seeding_size = StringUtils.num_filesize(seed_html[3])
|
||||
|
||||
user_level_html = html.xpath('//table//tr/td[text()="Class"]/following-sibling::td//text()')
|
||||
if user_level_html:
|
||||
self.user_level = user_level_html[0].strip()
|
||||
|
||||
join_at_html = html.xpath('//table//tr/td[contains(text(), "Join")]/following-sibling::td//text()')
|
||||
if join_at_html:
|
||||
join_at = (join_at_html[0].split("("))[0].strip()
|
||||
self.join_at = StringUtils.unify_datetime_str(join_at)
|
||||
|
||||
bonus_html = html.xpath('//a[contains(@href, "shop.php")]')
|
||||
if bonus_html:
|
||||
self.bonus = StringUtils.str_float(bonus_html[0].xpath("string(.)").strip())
|
||||
pass
|
||||
|
||||
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]:
|
||||
"""
|
||||
做种相关信息
|
||||
:param html_text:
|
||||
:param multi_page: 是否多页数据
|
||||
:return: 下页地址
|
||||
"""
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return None
|
||||
|
||||
size_col = 6
|
||||
seeders_col = 7
|
||||
|
||||
page_seeding = 0
|
||||
page_seeding_size = 0
|
||||
page_seeding_info = []
|
||||
seeding_sizes = html.xpath(f'//table/tr[position()>1]/td[{size_col}]')
|
||||
seeding_seeders = html.xpath(f'//table/tr[position()>1]/td[{seeders_col}]')
|
||||
if seeding_sizes and seeding_seeders:
|
||||
page_seeding = len(seeding_sizes)
|
||||
|
||||
for i in range(0, len(seeding_sizes)):
|
||||
size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
|
||||
seeders = StringUtils.str_int(seeding_seeders[i].xpath("string(.)").strip())
|
||||
|
||||
page_seeding_size += size
|
||||
page_seeding_info.append([seeders, size])
|
||||
|
||||
# self.seeding += page_seeding
|
||||
# self.seeding_size += page_seeding_size
|
||||
self.seeding_info.extend(page_seeding_info)
|
||||
|
||||
# 是否存在下页数据
|
||||
next_page = None
|
||||
|
||||
return next_page
|
||||
|
||||
def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]:
|
||||
return None
|
||||
|
||||
def _parse_message_content(self, html_text):
|
||||
return None, None, None
|
||||
@@ -1,163 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class GazelleSiteUserInfo(ISiteUserInfo):
|
||||
schema = SiteSchema.Gazelle
|
||||
order = SITE_BASE_ORDER
|
||||
|
||||
@classmethod
|
||||
def match(cls, html_text: str) -> bool:
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return False
|
||||
|
||||
printable_text = html.xpath("string(.)") if html else ""
|
||||
|
||||
return "Powered by Gazelle" in printable_text or "DIC Music" in printable_text
|
||||
|
||||
def _parse_user_base_info(self, html_text: str):
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
html = etree.HTML(html_text)
|
||||
|
||||
tmps = html.xpath('//a[contains(@href, "user.php?id=")]')
|
||||
if tmps:
|
||||
user_id_match = re.search(r"user.php\?id=(\d+)", tmps[0].attrib['href'])
|
||||
if user_id_match and user_id_match.group().strip():
|
||||
self.userid = user_id_match.group(1)
|
||||
self._torrent_seeding_page = f"torrents.php?type=seeding&userid={self.userid}"
|
||||
self._user_detail_page = f"user.php?id={self.userid}"
|
||||
self.username = tmps[0].text.strip()
|
||||
|
||||
tmps = html.xpath('//*[@id="header-uploaded-value"]/@data-value')
|
||||
if tmps:
|
||||
self.upload = StringUtils.num_filesize(tmps[0])
|
||||
else:
|
||||
tmps = html.xpath('//li[@id="stats_seeding"]/span/text()')
|
||||
if tmps:
|
||||
self.upload = StringUtils.num_filesize(tmps[0])
|
||||
|
||||
tmps = html.xpath('//*[@id="header-downloaded-value"]/@data-value')
|
||||
if tmps:
|
||||
self.download = StringUtils.num_filesize(tmps[0])
|
||||
else:
|
||||
tmps = html.xpath('//li[@id="stats_leeching"]/span/text()')
|
||||
if tmps:
|
||||
self.download = StringUtils.num_filesize(tmps[0])
|
||||
|
||||
self.ratio = 0.0 if self.download <= 0.0 else round(self.upload / self.download, 3)
|
||||
|
||||
tmps = html.xpath('//a[contains(@href, "bonus.php")]/@data-tooltip')
|
||||
if tmps:
|
||||
bonus_match = re.search(r"([\d,.]+)", tmps[0])
|
||||
if bonus_match and bonus_match.group(1).strip():
|
||||
self.bonus = StringUtils.str_float(bonus_match.group(1))
|
||||
else:
|
||||
tmps = html.xpath('//a[contains(@href, "bonus.php")]')
|
||||
if tmps:
|
||||
bonus_text = tmps[0].xpath("string(.)")
|
||||
bonus_match = re.search(r"([\d,.]+)", bonus_text)
|
||||
if bonus_match and bonus_match.group(1).strip():
|
||||
self.bonus = StringUtils.str_float(bonus_match.group(1))
|
||||
|
||||
def _parse_site_page(self, html_text: str):
|
||||
# TODO
|
||||
pass
|
||||
|
||||
def _parse_user_detail_info(self, html_text: str):
|
||||
"""
|
||||
解析用户额外信息,加入时间,等级
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return None
|
||||
|
||||
# 用户等级
|
||||
user_levels_text = html.xpath('//*[@id="class-value"]/@data-value')
|
||||
if user_levels_text:
|
||||
self.user_level = user_levels_text[0].strip()
|
||||
else:
|
||||
user_levels_text = html.xpath('//li[contains(text(), "用户等级")]/text()')
|
||||
if user_levels_text:
|
||||
self.user_level = user_levels_text[0].split(':')[1].strip()
|
||||
|
||||
# 加入日期
|
||||
join_at_text = html.xpath('//*[@id="join-date-value"]/@data-value')
|
||||
if join_at_text:
|
||||
self.join_at = StringUtils.unify_datetime_str(join_at_text[0].strip())
|
||||
else:
|
||||
join_at_text = html.xpath(
|
||||
'//div[contains(@class, "box_userinfo_stats")]//li[contains(text(), "加入时间")]/span/text()')
|
||||
if join_at_text:
|
||||
self.join_at = StringUtils.unify_datetime_str(join_at_text[0].strip())
|
||||
|
||||
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]:
|
||||
"""
|
||||
做种相关信息
|
||||
:param html_text:
|
||||
:param multi_page: 是否多页数据
|
||||
:return: 下页地址
|
||||
"""
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return None
|
||||
|
||||
size_col = 3
|
||||
# 搜索size列
|
||||
if html.xpath('//table[contains(@id, "torrent")]//tr[1]/td'):
|
||||
size_col = len(html.xpath('//table[contains(@id, "torrent")]//tr[1]/td')) - 3
|
||||
# 搜索seeders列
|
||||
seeders_col = size_col + 2
|
||||
|
||||
page_seeding = 0
|
||||
page_seeding_size = 0
|
||||
page_seeding_info = []
|
||||
seeding_sizes = html.xpath(f'//table[contains(@id, "torrent")]//tr[position()>1]/td[{size_col}]')
|
||||
seeding_seeders = html.xpath(f'//table[contains(@id, "torrent")]//tr[position()>1]/td[{seeders_col}]/text()')
|
||||
if seeding_sizes and seeding_seeders:
|
||||
page_seeding = len(seeding_sizes)
|
||||
|
||||
for i in range(0, len(seeding_sizes)):
|
||||
size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
|
||||
seeders = int(seeding_seeders[i])
|
||||
|
||||
page_seeding_size += size
|
||||
page_seeding_info.append([seeders, size])
|
||||
|
||||
if multi_page:
|
||||
self.seeding += page_seeding
|
||||
self.seeding_size += page_seeding_size
|
||||
self.seeding_info.extend(page_seeding_info)
|
||||
else:
|
||||
if not self.seeding:
|
||||
self.seeding = page_seeding
|
||||
if not self.seeding_size:
|
||||
self.seeding_size = page_seeding_size
|
||||
if not self.seeding_info:
|
||||
self.seeding_info = page_seeding_info
|
||||
|
||||
# 是否存在下页数据
|
||||
next_page = None
|
||||
next_page_text = html.xpath('//a[contains(.//text(), "Next") or contains(.//text(), "下一页")]/@href')
|
||||
if next_page_text:
|
||||
next_page = next_page_text[-1].strip()
|
||||
|
||||
return next_page
|
||||
|
||||
def _parse_user_traffic_info(self, html_text: str):
|
||||
# TODO
|
||||
pass
|
||||
|
||||
def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]:
|
||||
return None
|
||||
|
||||
def _parse_message_content(self, html_text):
|
||||
return None, None, None
|
||||
@@ -1,93 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class IptSiteUserInfo(ISiteUserInfo):
|
||||
schema = SiteSchema.Ipt
|
||||
order = SITE_BASE_ORDER + 35
|
||||
|
||||
@classmethod
|
||||
def match(cls, html_text: str) -> bool:
|
||||
return 'IPTorrents' in html_text
|
||||
|
||||
def _parse_user_base_info(self, html_text: str):
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
html = etree.HTML(html_text)
|
||||
tmps = html.xpath('//a[contains(@href, "/u/")]//text()')
|
||||
tmps_id = html.xpath('//a[contains(@href, "/u/")]/@href')
|
||||
if tmps:
|
||||
self.username = str(tmps[-1])
|
||||
if tmps_id:
|
||||
user_id_match = re.search(r"/u/(\d+)", tmps_id[0])
|
||||
if user_id_match and user_id_match.group().strip():
|
||||
self.userid = user_id_match.group(1)
|
||||
self._user_detail_page = f"user.php?u={self.userid}"
|
||||
self._torrent_seeding_page = f"peers?u={self.userid}"
|
||||
|
||||
tmps = html.xpath('//div[@class = "stats"]/div/div')
|
||||
if tmps:
|
||||
self.upload = StringUtils.num_filesize(str(tmps[0].xpath('span/text()')[1]).strip())
|
||||
self.download = StringUtils.num_filesize(str(tmps[0].xpath('span/text()')[2]).strip())
|
||||
self.seeding = StringUtils.str_int(tmps[0].xpath('a')[2].xpath('text()')[0])
|
||||
self.leeching = StringUtils.str_int(tmps[0].xpath('a')[2].xpath('text()')[1])
|
||||
self.ratio = StringUtils.str_float(str(tmps[0].xpath('span/text()')[0]).strip().replace('-', '0'))
|
||||
self.bonus = StringUtils.str_float(tmps[0].xpath('a')[3].xpath('text()')[0])
|
||||
|
||||
def _parse_site_page(self, html_text: str):
|
||||
# TODO
|
||||
pass
|
||||
|
||||
def _parse_user_detail_info(self, html_text: str):
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return
|
||||
|
||||
user_levels_text = html.xpath('//tr/th[text()="Class"]/following-sibling::td[1]/text()')
|
||||
if user_levels_text:
|
||||
self.user_level = user_levels_text[0].strip()
|
||||
|
||||
# 加入日期
|
||||
join_at_text = html.xpath('//tr/th[text()="Join date"]/following-sibling::td[1]/text()')
|
||||
if join_at_text:
|
||||
self.join_at = StringUtils.unify_datetime_str(join_at_text[0].split(' (')[0])
|
||||
|
||||
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]:
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return
|
||||
# seeding start
|
||||
seeding_end_pos = 3
|
||||
if html.xpath('//tr/td[text() = "Leechers"]'):
|
||||
seeding_end_pos = len(html.xpath('//tr/td[text() = "Leechers"]/../preceding-sibling::tr')) + 1
|
||||
seeding_end_pos = seeding_end_pos - 3
|
||||
|
||||
page_seeding = 0
|
||||
page_seeding_size = 0
|
||||
seeding_torrents = html.xpath('//tr/td[text() = "Seeders"]/../following-sibling::tr/td[position()=6]/text()')
|
||||
if seeding_torrents:
|
||||
page_seeding = seeding_end_pos
|
||||
for per_size in seeding_torrents[:seeding_end_pos]:
|
||||
if '(' in per_size and ')' in per_size:
|
||||
per_size = per_size.split('(')[-1]
|
||||
per_size = per_size.split(')')[0]
|
||||
|
||||
page_seeding_size += StringUtils.num_filesize(per_size)
|
||||
|
||||
self.seeding = page_seeding
|
||||
self.seeding_size = page_seeding_size
|
||||
|
||||
def _parse_user_traffic_info(self, html_text: str):
|
||||
# TODO
|
||||
pass
|
||||
|
||||
def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]:
|
||||
return None
|
||||
|
||||
def _parse_message_content(self, html_text):
|
||||
return None, None, None
|
||||
@@ -1,200 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import json
|
||||
from typing import Optional, Tuple
|
||||
from urllib.parse import urljoin
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from app.log import logger
|
||||
from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class MTorrentSiteUserInfo(ISiteUserInfo):
|
||||
schema = SiteSchema.MTorrent
|
||||
order = SITE_BASE_ORDER + 60
|
||||
request_mode = "apikey"
|
||||
|
||||
# 用户级别字典
|
||||
MTeam_sysRoleList = {
|
||||
"1": "User",
|
||||
"2": "Power User",
|
||||
"3": "Elite User",
|
||||
"4": "Crazy User",
|
||||
"5": "Insane User",
|
||||
"6": "Veteran User",
|
||||
"7": "Extreme User",
|
||||
"8": "Ultimate User",
|
||||
"9": "Nexus Master",
|
||||
"10": "VIP",
|
||||
"11": "Retiree",
|
||||
"12": "Uploader",
|
||||
"13": "Moderator",
|
||||
"14": "Administrator",
|
||||
"15": "Sysop",
|
||||
"16": "Staff",
|
||||
"17": "Offer memberStaff",
|
||||
"18": "Bet memberStaff",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def match(cls, html_text: str) -> bool:
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return False
|
||||
if html.xpath("//title/text()") and "M-Team" in html.xpath("//title/text()")[0]:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _parse_site_page(self, html_text: str):
|
||||
"""
|
||||
获取站点页面地址
|
||||
"""
|
||||
# 更换api地址
|
||||
self._base_url = f"https://api.{StringUtils.get_url_domain(self._base_url)}"
|
||||
self._user_traffic_page = None
|
||||
self._user_detail_page = None
|
||||
self._user_basic_page = "api/member/profile"
|
||||
self._user_basic_params = {
|
||||
"uid": self.userid
|
||||
}
|
||||
self._sys_mail_unread_page = None
|
||||
self._user_mail_unread_page = "api/msg/search"
|
||||
self._mail_unread_params = {
|
||||
"keyword": "",
|
||||
"box": "-2",
|
||||
"type": "pageNumber",
|
||||
"pageSize": 100
|
||||
}
|
||||
self._torrent_seeding_page = "api/member/getUserTorrentList"
|
||||
self._torrent_seeding_headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json, text/plain, */*"
|
||||
}
|
||||
self._addition_headers = {
|
||||
"x-api-key": self.apikey,
|
||||
}
|
||||
|
||||
def _parse_logged_in(self, html_text):
|
||||
"""
|
||||
判断是否登录成功, 通过判断是否存在用户信息
|
||||
暂时跳过检测,待后续优化
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
return True
|
||||
|
||||
def _parse_user_base_info(self, html_text: str):
|
||||
"""
|
||||
解析用户基本信息,这里把_parse_user_traffic_info和_parse_user_detail_info合并到这里
|
||||
"""
|
||||
if not html_text:
|
||||
return None
|
||||
detail = json.loads(html_text)
|
||||
if not detail or detail.get("code") != "0":
|
||||
return
|
||||
user_info = detail.get("data", {})
|
||||
self.userid = user_info.get("id")
|
||||
self.username = user_info.get("username")
|
||||
self.user_level = self.MTeam_sysRoleList.get(user_info.get("role") or "1")
|
||||
self.join_at = user_info.get("memberStatus", {}).get("createdDate")
|
||||
|
||||
self.upload = int(user_info.get("memberCount", {}).get("uploaded") or '0')
|
||||
self.download = int(user_info.get("memberCount", {}).get("downloaded") or '0')
|
||||
self.ratio = user_info.get("memberCount", {}).get("shareRate") or 0
|
||||
self.bonus = user_info.get("memberCount", {}).get("bonus") or 0
|
||||
# 需要解析消息,但不确定消息条数
|
||||
self.message_unread = 99999
|
||||
|
||||
self._torrent_seeding_params = {
|
||||
"pageNumber": 1,
|
||||
"pageSize": 200,
|
||||
"type": "SEEDING",
|
||||
"userid": self.userid
|
||||
}
|
||||
|
||||
def _parse_user_traffic_info(self, html_text: str):
|
||||
"""
|
||||
解析用户流量信息
|
||||
"""
|
||||
pass
|
||||
|
||||
def _parse_user_detail_info(self, html_text: str):
|
||||
"""
|
||||
解析用户详细信息
|
||||
"""
|
||||
pass
|
||||
|
||||
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]:
|
||||
"""
|
||||
解析用户做种信息
|
||||
"""
|
||||
if not html_text:
|
||||
return None
|
||||
seeding_info = json.loads(html_text)
|
||||
if not seeding_info or seeding_info.get("code") != "0":
|
||||
return None
|
||||
torrents = seeding_info.get("data", {}).get("data", [])
|
||||
page_seeding_size = 0
|
||||
page_seeding_info = []
|
||||
for info in torrents:
|
||||
torrent = info.get("torrent", {})
|
||||
size = int(torrent.get("size") or '0')
|
||||
seeders = int(torrent.get("source") or '0')
|
||||
page_seeding_size += size
|
||||
page_seeding_info.append([seeders, size])
|
||||
self.seeding += len(torrents)
|
||||
self.seeding_size += page_seeding_size
|
||||
self.seeding_info.extend(page_seeding_info)
|
||||
|
||||
# 查询总做种数
|
||||
seeder_count = 0
|
||||
try:
|
||||
result = self._get_page_content(
|
||||
url=urljoin(self._base_url, "api/tracker/myPeerStatus"),
|
||||
params={"uid": self.userid},
|
||||
)
|
||||
if result:
|
||||
seeder_info = json.loads(result)
|
||||
seeder_count = int(seeder_info.get("data", {}).get("seeder") or 0)
|
||||
except Exception as e:
|
||||
logger.error(f"获取做种数失败: {str(e)}")
|
||||
if not seeder_count:
|
||||
return None
|
||||
if self.seeding >= seeder_count:
|
||||
return None
|
||||
# 还有下一页
|
||||
self._torrent_seeding_params["pageNumber"] += 1
|
||||
return ""
|
||||
|
||||
def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]:
|
||||
"""
|
||||
解析未读消息链接,这里直接读出详情
|
||||
"""
|
||||
if not html_text:
|
||||
return None
|
||||
messages_info = json.loads(html_text)
|
||||
if not messages_info or messages_info.get("code") != "0":
|
||||
return None
|
||||
messages = messages_info.get("data", {}).get("data", [])
|
||||
for message in messages:
|
||||
if not message.get("unread"):
|
||||
continue
|
||||
head = message.get("title")
|
||||
date = message.get("createdDate")
|
||||
content = message.get("context")
|
||||
if head and date and content:
|
||||
self.message_unread_contents.append((head, date, content))
|
||||
# 设置已读
|
||||
self._get_page_content(
|
||||
url=urljoin(self._base_url, f"api/msg/markRead"),
|
||||
params={"msgId": message.get("id")}
|
||||
)
|
||||
# 是否存在下页数据
|
||||
return None
|
||||
|
||||
def _parse_message_content(self, html_text) -> Tuple[Optional[str], Optional[str], Optional[str]]:
|
||||
"""
|
||||
解析消息内容
|
||||
"""
|
||||
pass
|
||||
@@ -1,22 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from urllib.parse import urljoin
|
||||
|
||||
from app.plugins.sitestatistic.siteuserinfo import SITE_BASE_ORDER, SiteSchema
|
||||
from app.plugins.sitestatistic.siteuserinfo.nexus_php import NexusPhpSiteUserInfo
|
||||
|
||||
|
||||
class NexusAudiencesSiteUserInfo(NexusPhpSiteUserInfo):
|
||||
schema = SiteSchema.NexusAudiences
|
||||
order = SITE_BASE_ORDER + 5
|
||||
|
||||
@classmethod
|
||||
def match(cls, html_text: str) -> bool:
|
||||
return 'audiences.me' in html_text
|
||||
|
||||
def _parse_site_page(self, html_text: str):
|
||||
super()._parse_site_page(html_text)
|
||||
self._torrent_seeding_page = f"usertorrentlist.php?userid={self.userid}&type=seeding"
|
||||
|
||||
def _parse_seeding_pages(self):
|
||||
self._torrent_seeding_headers = {"Referer": urljoin(self._base_url, self._user_detail_page)}
|
||||
super()._parse_seeding_pages()
|
||||
@@ -1,61 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from app.plugins.sitestatistic.siteuserinfo import SITE_BASE_ORDER, SiteSchema
|
||||
from app.plugins.sitestatistic.siteuserinfo.nexus_php import NexusPhpSiteUserInfo
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class NexusHhanclubSiteUserInfo(NexusPhpSiteUserInfo):
|
||||
schema = SiteSchema.NexusHhanclub
|
||||
order = SITE_BASE_ORDER + 20
|
||||
|
||||
@classmethod
|
||||
def match(cls, html_text: str) -> bool:
|
||||
return 'hhanclub.top' in html_text
|
||||
|
||||
def _parse_user_traffic_info(self, html_text):
|
||||
super()._parse_user_traffic_info(html_text)
|
||||
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
html = etree.HTML(html_text)
|
||||
|
||||
# 上传、下载、分享率
|
||||
upload_match = re.search(r"[_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)",
|
||||
html.xpath('//*[@id="user-info-panel"]/div[2]/div[2]/div[4]/text()')[0])
|
||||
download_match = re.search(r"[_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)",
|
||||
html.xpath('//*[@id="user-info-panel"]/div[2]/div[2]/div[5]/text()')[0])
|
||||
ratio_match = re.search(r"分享率][::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+)",
|
||||
html.xpath('//*[@id="user-info-panel"]/div[2]/div[1]/div[1]/div/text()')[0])
|
||||
|
||||
# 计算分享率
|
||||
self.upload = StringUtils.num_filesize(upload_match.group(1).strip()) if upload_match else 0
|
||||
self.download = StringUtils.num_filesize(download_match.group(1).strip()) if download_match else 0
|
||||
# 优先使用页面上的分享率
|
||||
calc_ratio = 0.0 if self.download <= 0.0 else round(self.upload / self.download, 3)
|
||||
self.ratio = StringUtils.str_float(ratio_match.group(1)) if (
|
||||
ratio_match and ratio_match.group(1).strip()) else calc_ratio
|
||||
|
||||
def _parse_user_detail_info(self, html_text: str):
|
||||
"""
|
||||
解析用户额外信息,加入时间,等级
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
super()._parse_user_detail_info(html_text)
|
||||
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return
|
||||
# 加入时间
|
||||
join_at_text = html.xpath('//*[@id="mainContent"]/div/div[2]/div[4]/div[3]/span[2]/text()[1]')
|
||||
if join_at_text:
|
||||
self.join_at = StringUtils.unify_datetime_str(join_at_text[0].split(' (')[0].strip())
|
||||
|
||||
def _get_user_level(self, html):
|
||||
super()._get_user_level(html)
|
||||
user_level_path = html.xpath('//*[@id="mainContent"]/div/div[2]/div[2]/div[4]/span[2]/img/@title')
|
||||
if user_level_path:
|
||||
self.user_level = user_level_path[0]
|
||||
@@ -1,404 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from app.log import logger
|
||||
from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class NexusPhpSiteUserInfo(ISiteUserInfo):
|
||||
schema = SiteSchema.NexusPhp
|
||||
order = SITE_BASE_ORDER * 2
|
||||
|
||||
@classmethod
|
||||
def match(cls, html_text: str) -> bool:
|
||||
"""
|
||||
默认使用NexusPhp解析
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
return True
|
||||
|
||||
def _parse_site_page(self, html_text: str):
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
|
||||
user_detail = re.search(r"userdetails.php\?id=(\d+)", html_text)
|
||||
if user_detail and user_detail.group().strip():
|
||||
self._user_detail_page = user_detail.group().strip().lstrip('/')
|
||||
self.userid = user_detail.group(1)
|
||||
self._torrent_seeding_page = f"getusertorrentlistajax.php?userid={self.userid}&type=seeding"
|
||||
else:
|
||||
user_detail = re.search(r"(userdetails)", html_text)
|
||||
if user_detail and user_detail.group().strip():
|
||||
self._user_detail_page = user_detail.group().strip().lstrip('/')
|
||||
self.userid = None
|
||||
self._torrent_seeding_page = None
|
||||
|
||||
def _parse_message_unread(self, html_text):
|
||||
"""
|
||||
解析未读短消息数量
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return
|
||||
|
||||
message_labels = html.xpath('//a[@href="messages.php"]/..')
|
||||
message_labels.extend(html.xpath('//a[contains(@href, "messages.php")]/..'))
|
||||
if message_labels:
|
||||
message_text = message_labels[0].xpath("string(.)")
|
||||
|
||||
logger.debug(f"{self.site_name} 消息原始信息 {message_text}")
|
||||
message_unread_match = re.findall(r"[^Date](信息箱\s*|\(|你有\xa0)(\d+)", message_text)
|
||||
|
||||
if message_unread_match and len(message_unread_match[-1]) == 2:
|
||||
self.message_unread = StringUtils.str_int(message_unread_match[-1][1])
|
||||
elif message_text.isdigit():
|
||||
self.message_unread = StringUtils.str_int(message_text)
|
||||
|
||||
def _parse_user_base_info(self, html_text: str):
|
||||
"""
|
||||
解析用户基本信息
|
||||
"""
|
||||
# 合并解析,减少额外请求调用
|
||||
self._parse_user_traffic_info(html_text)
|
||||
self._user_traffic_page = None
|
||||
|
||||
self._parse_message_unread(html_text)
|
||||
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return
|
||||
|
||||
ret = html.xpath(f'//a[contains(@href, "userdetails") and contains(@href, "{self.userid}")]//b//text()')
|
||||
if ret:
|
||||
self.username = str(ret[0])
|
||||
return
|
||||
ret = html.xpath(f'//a[contains(@href, "userdetails") and contains(@href, "{self.userid}")]//text()')
|
||||
if ret:
|
||||
self.username = str(ret[0])
|
||||
|
||||
ret = html.xpath('//a[contains(@href, "userdetails")]//strong//text()')
|
||||
if ret:
|
||||
self.username = str(ret[0])
|
||||
return
|
||||
|
||||
def _parse_user_traffic_info(self, html_text):
|
||||
"""
|
||||
解析用户流量信息
|
||||
"""
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
upload_match = re.search(r"[^总]上[传傳]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text,
|
||||
re.IGNORECASE)
|
||||
self.upload = StringUtils.num_filesize(upload_match.group(1).strip()) if upload_match else 0
|
||||
download_match = re.search(r"[^总子影力]下[载載]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text,
|
||||
re.IGNORECASE)
|
||||
self.download = StringUtils.num_filesize(download_match.group(1).strip()) if download_match else 0
|
||||
ratio_match = re.search(r"分享率[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+)", html_text)
|
||||
# 计算分享率
|
||||
calc_ratio = 0.0 if self.download <= 0.0 else round(self.upload / self.download, 3)
|
||||
# 优先使用页面上的分享率
|
||||
self.ratio = StringUtils.str_float(ratio_match.group(1)) if (
|
||||
ratio_match and ratio_match.group(1).strip()) else calc_ratio
|
||||
leeching_match = re.search(r"(Torrents leeching|下载中)[\u4E00-\u9FA5\D\s]+(\d+)[\s\S]+<", html_text)
|
||||
self.leeching = StringUtils.str_int(leeching_match.group(2)) if leeching_match and leeching_match.group(
|
||||
2).strip() else 0
|
||||
html = etree.HTML(html_text)
|
||||
has_ucoin, self.bonus = self._parse_ucoin(html)
|
||||
if has_ucoin:
|
||||
return
|
||||
tmps = html.xpath('//a[contains(@href,"mybonus")]/text()') if html else None
|
||||
if tmps:
|
||||
bonus_text = str(tmps[0]).strip()
|
||||
bonus_match = re.search(r"([\d,.]+)", bonus_text)
|
||||
if bonus_match and bonus_match.group(1).strip():
|
||||
self.bonus = StringUtils.str_float(bonus_match.group(1))
|
||||
return
|
||||
bonus_match = re.search(r"mybonus.[\[\]::<>/a-zA-Z_\-=\"'\s#;.(使用&说明魔力值豆]+\s*([\d,.]+)[\[<()&\s]", html_text)
|
||||
try:
|
||||
if bonus_match and bonus_match.group(1).strip():
|
||||
self.bonus = StringUtils.str_float(bonus_match.group(1))
|
||||
return
|
||||
bonus_match = re.search(r"[魔力值|\]][\[\]::<>/a-zA-Z_\-=\"'\s#;]+\s*([\d,.]+|\"[\d,.]+\")[<>()&\s]",
|
||||
html_text,
|
||||
flags=re.S)
|
||||
if bonus_match and bonus_match.group(1).strip():
|
||||
self.bonus = StringUtils.str_float(bonus_match.group(1).strip('"'))
|
||||
except Exception as err:
|
||||
logger.error(f"{self.site_name} 解析魔力值出错, 错误信息: {str(err)}")
|
||||
|
||||
@staticmethod
|
||||
def _parse_ucoin(html):
|
||||
"""
|
||||
解析ucoin, 统一转换为铜币
|
||||
:param html:
|
||||
:return:
|
||||
"""
|
||||
if html:
|
||||
gold, silver, copper = None, None, None
|
||||
|
||||
golds = html.xpath('//span[@class = "ucoin-symbol ucoin-gold"]//text()')
|
||||
if golds:
|
||||
gold = StringUtils.str_float(str(golds[-1]))
|
||||
silvers = html.xpath('//span[@class = "ucoin-symbol ucoin-silver"]//text()')
|
||||
if silvers:
|
||||
silver = StringUtils.str_float(str(silvers[-1]))
|
||||
coppers = html.xpath('//span[@class = "ucoin-symbol ucoin-copper"]//text()')
|
||||
if coppers:
|
||||
copper = StringUtils.str_float(str(coppers[-1]))
|
||||
if gold or silver or copper:
|
||||
gold = gold if gold else 0
|
||||
silver = silver if silver else 0
|
||||
copper = copper if copper else 0
|
||||
return True, gold * 100 * 100 + silver * 100 + copper
|
||||
return False, 0.0
|
||||
|
||||
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]:
|
||||
"""
|
||||
做种相关信息
|
||||
:param html_text:
|
||||
:param multi_page: 是否多页数据
|
||||
:return: 下页地址
|
||||
"""
|
||||
html = etree.HTML(str(html_text).replace(r'\/', '/'))
|
||||
if not html:
|
||||
return None
|
||||
|
||||
# 首页存在扩展链接,使用扩展链接
|
||||
seeding_url_text = html.xpath('//a[contains(@href,"torrents.php") '
|
||||
'and contains(@href,"seeding")]/@href')
|
||||
if multi_page is False and seeding_url_text and seeding_url_text[0].strip():
|
||||
self._torrent_seeding_page = seeding_url_text[0].strip()
|
||||
return self._torrent_seeding_page
|
||||
|
||||
size_col = 3
|
||||
seeders_col = 4
|
||||
# 搜索size列
|
||||
size_col_xpath = '//tr[position()=1]/' \
|
||||
'td[(img[@class="size"] and img[@alt="size"])' \
|
||||
' or (text() = "大小")' \
|
||||
' or (a/img[@class="size" and @alt="size"])]'
|
||||
if html.xpath(size_col_xpath):
|
||||
size_col = len(html.xpath(f'{size_col_xpath}/preceding-sibling::td')) + 1
|
||||
# 搜索seeders列
|
||||
seeders_col_xpath = '//tr[position()=1]/' \
|
||||
'td[(img[@class="seeders"] and img[@alt="seeders"])' \
|
||||
' or (text() = "在做种")' \
|
||||
' or (a/img[@class="seeders" and @alt="seeders"])]'
|
||||
if html.xpath(seeders_col_xpath):
|
||||
seeders_col = len(html.xpath(f'{seeders_col_xpath}/preceding-sibling::td')) + 1
|
||||
|
||||
page_seeding = 0
|
||||
page_seeding_size = 0
|
||||
page_seeding_info = []
|
||||
# 如果 table class="torrents",则增加table[@class="torrents"]
|
||||
table_class = '//table[@class="torrents"]' if html.xpath('//table[@class="torrents"]') else ''
|
||||
seeding_sizes = html.xpath(f'{table_class}//tr[position()>1]/td[{size_col}]')
|
||||
seeding_seeders = html.xpath(f'{table_class}//tr[position()>1]/td[{seeders_col}]/b/a/text()')
|
||||
if not seeding_seeders:
|
||||
seeding_seeders = html.xpath(f'{table_class}//tr[position()>1]/td[{seeders_col}]//text()')
|
||||
if seeding_sizes and seeding_seeders:
|
||||
page_seeding = len(seeding_sizes)
|
||||
|
||||
for i in range(0, len(seeding_sizes)):
|
||||
size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
|
||||
seeders = StringUtils.str_int(seeding_seeders[i])
|
||||
|
||||
page_seeding_size += size
|
||||
page_seeding_info.append([seeders, size])
|
||||
|
||||
self.seeding += page_seeding
|
||||
self.seeding_size += page_seeding_size
|
||||
self.seeding_info.extend(page_seeding_info)
|
||||
|
||||
# 是否存在下页数据
|
||||
next_page = None
|
||||
next_page_text = html.xpath('//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁") or contains(.//text(), ">")]/@href')
|
||||
if next_page_text:
|
||||
next_page = next_page_text[-1].strip()
|
||||
# fix up page url
|
||||
if self.userid not in next_page:
|
||||
next_page = f'{next_page}&userid={self.userid}&type=seeding'
|
||||
|
||||
return next_page
|
||||
|
||||
def _parse_user_detail_info(self, html_text: str):
|
||||
"""
|
||||
解析用户额外信息,加入时间,等级
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return
|
||||
|
||||
self._get_user_level(html)
|
||||
|
||||
self._fixup_traffic_info(html)
|
||||
|
||||
# 加入日期
|
||||
join_at_text = html.xpath(
|
||||
'//tr/td[text()="加入日期" or text()="注册日期" or *[text()="加入日期"]]/following-sibling::td[1]//text()'
|
||||
'|//div/b[text()="加入日期"]/../text()')
|
||||
if join_at_text:
|
||||
self.join_at = StringUtils.unify_datetime_str(join_at_text[0].split(' (')[0].strip())
|
||||
|
||||
# 做种体积 & 做种数
|
||||
# seeding 页面获取不到的话,此处再获取一次
|
||||
seeding_sizes = html.xpath('//tr/td[text()="当前上传"]/following-sibling::td[1]//'
|
||||
'table[tr[1][td[4 and text()="尺寸"]]]//tr[position()>1]/td[4]')
|
||||
seeding_seeders = html.xpath('//tr/td[text()="当前上传"]/following-sibling::td[1]//'
|
||||
'table[tr[1][td[5 and text()="做种者"]]]//tr[position()>1]/td[5]//text()')
|
||||
tmp_seeding = len(seeding_sizes)
|
||||
tmp_seeding_size = 0
|
||||
tmp_seeding_info = []
|
||||
for i in range(0, len(seeding_sizes)):
|
||||
size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
|
||||
seeders = StringUtils.str_int(seeding_seeders[i])
|
||||
|
||||
tmp_seeding_size += size
|
||||
tmp_seeding_info.append([seeders, size])
|
||||
|
||||
if not self.seeding_size:
|
||||
self.seeding_size = tmp_seeding_size
|
||||
if not self.seeding:
|
||||
self.seeding = tmp_seeding
|
||||
if not self.seeding_info:
|
||||
self.seeding_info = tmp_seeding_info
|
||||
|
||||
seeding_sizes = html.xpath('//tr/td[text()="做种统计"]/following-sibling::td[1]//text()')
|
||||
if seeding_sizes:
|
||||
seeding_match = re.search(r"总做种数:\s+(\d+)", seeding_sizes[0], re.IGNORECASE)
|
||||
seeding_size_match = re.search(r"总做种体积:\s+([\d,.\s]+[KMGTPI]*B)", seeding_sizes[0], re.IGNORECASE)
|
||||
tmp_seeding = StringUtils.str_int(seeding_match.group(1)) if (
|
||||
seeding_match and seeding_match.group(1)) else 0
|
||||
tmp_seeding_size = StringUtils.num_filesize(
|
||||
seeding_size_match.group(1).strip()) if seeding_size_match else 0
|
||||
if not self.seeding_size:
|
||||
self.seeding_size = tmp_seeding_size
|
||||
if not self.seeding:
|
||||
self.seeding = tmp_seeding
|
||||
|
||||
self._fixup_torrent_seeding_page(html)
|
||||
|
||||
def _fixup_torrent_seeding_page(self, html):
|
||||
"""
|
||||
修正种子页面链接
|
||||
:param html:
|
||||
:return:
|
||||
"""
|
||||
# 单独的种子页面
|
||||
seeding_url_text = html.xpath('//a[contains(@href,"getusertorrentlist.php") '
|
||||
'and contains(@href,"seeding")]/@href')
|
||||
if seeding_url_text:
|
||||
self._torrent_seeding_page = seeding_url_text[0].strip()
|
||||
# 从JS调用种获取用户ID
|
||||
seeding_url_text = html.xpath('//a[contains(@href, "javascript: getusertorrentlistajax") '
|
||||
'and contains(@href,"seeding")]/@href')
|
||||
csrf_text = html.xpath('//meta[@name="x-csrf"]/@content')
|
||||
if not self._torrent_seeding_page and seeding_url_text:
|
||||
user_js = re.search(r"javascript: getusertorrentlistajax\(\s*'(\d+)", seeding_url_text[0])
|
||||
if user_js and user_js.group(1).strip():
|
||||
self.userid = user_js.group(1).strip()
|
||||
self._torrent_seeding_page = f"getusertorrentlistajax.php?userid={self.userid}&type=seeding"
|
||||
elif seeding_url_text and csrf_text:
|
||||
if csrf_text[0].strip():
|
||||
self._torrent_seeding_page \
|
||||
= f"ajax_getusertorrentlist.php"
|
||||
self._torrent_seeding_params = {'userid': self.userid, 'type': 'seeding', 'csrf': csrf_text[0].strip()}
|
||||
|
||||
# 分类做种模式
|
||||
# 临时屏蔽
|
||||
# seeding_url_text = html.xpath('//tr/td[text()="当前做种"]/following-sibling::td[1]'
|
||||
# '/table//td/a[contains(@href,"seeding")]/@href')
|
||||
# if seeding_url_text:
|
||||
# self._torrent_seeding_page = seeding_url_text
|
||||
|
||||
def _get_user_level(self, html):
|
||||
# 等级 获取同一行等级数据,图片格式等级,取title信息,否则取文本信息
|
||||
user_levels_text = html.xpath('//tr/td[text()="等級" or text()="等级" or *[text()="等级"]]/'
|
||||
'following-sibling::td[1]/img[1]/@title')
|
||||
if user_levels_text:
|
||||
self.user_level = user_levels_text[0].strip()
|
||||
return
|
||||
|
||||
user_levels_text = html.xpath('//tr/td[text()="等級" or text()="等级"]/'
|
||||
'following-sibling::td[1 and not(img)]'
|
||||
'|//tr/td[text()="等級" or text()="等级"]/'
|
||||
'following-sibling::td[1 and img[not(@title)]]')
|
||||
if user_levels_text:
|
||||
self.user_level = user_levels_text[0].xpath("string(.)").strip()
|
||||
return
|
||||
|
||||
user_levels_text = html.xpath('//tr/td[text()="等級" or text()="等级"]/'
|
||||
'following-sibling::td[1]')
|
||||
if user_levels_text:
|
||||
self.user_level = user_levels_text[0].xpath("string(.)").strip()
|
||||
return
|
||||
|
||||
# 适配PTT用户等级
|
||||
user_levels_text = html.xpath('//tr/td[text()="用户等级"]/following-sibling::td[1]/b/@title')
|
||||
if user_levels_text:
|
||||
self.user_level = user_levels_text[0].strip()
|
||||
return
|
||||
|
||||
user_levels_text = html.xpath('//a[contains(@href, "userdetails")]/text()')
|
||||
if not self.user_level and user_levels_text:
|
||||
for user_level_text in user_levels_text:
|
||||
user_level_match = re.search(r"\[(.*)]", user_level_text)
|
||||
if user_level_match and user_level_match.group(1).strip():
|
||||
self.user_level = user_level_match.group(1).strip()
|
||||
break
|
||||
|
||||
def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]:
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return None
|
||||
|
||||
message_links = html.xpath('//tr[not(./td/img[@alt="Read"])]/td/a[contains(@href, "viewmessage")]/@href')
|
||||
msg_links.extend(message_links)
|
||||
# 是否存在下页数据
|
||||
next_page = None
|
||||
next_page_text = html.xpath('//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁")]/@href')
|
||||
if next_page_text:
|
||||
next_page = next_page_text[-1].strip()
|
||||
|
||||
return next_page
|
||||
|
||||
def _parse_message_content(self, html_text):
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return None, None, None
|
||||
# 标题
|
||||
message_head_text = None
|
||||
message_head = html.xpath('//h1/text()'
|
||||
'|//div[@class="layui-card-header"]/span[1]/text()')
|
||||
if message_head:
|
||||
message_head_text = message_head[-1].strip()
|
||||
|
||||
# 消息时间
|
||||
message_date_text = None
|
||||
message_date = html.xpath('//h1/following-sibling::table[.//tr/td[@class="colhead"]]//tr[2]/td[2]'
|
||||
'|//div[@class="layui-card-header"]/span[2]/span[2]')
|
||||
if message_date:
|
||||
message_date_text = message_date[0].xpath("string(.)").strip()
|
||||
|
||||
# 消息内容
|
||||
message_content_text = None
|
||||
message_content = html.xpath('//h1/following-sibling::table[.//tr/td[@class="colhead"]]//tr[3]/td'
|
||||
'|//div[contains(@class,"layui-card-body")]')
|
||||
if message_content:
|
||||
message_content_text = message_content[0].xpath("string(.)").strip()
|
||||
|
||||
return message_head_text, message_date_text, message_content_text
|
||||
|
||||
def _fixup_traffic_info(self, html):
|
||||
# fixup bonus
|
||||
if not self.bonus:
|
||||
bonus_text = html.xpath('//tr/td[text()="魔力值" or text()="猫粮"]/following-sibling::td[1]/text()')
|
||||
if bonus_text:
|
||||
self.bonus = StringUtils.str_float(bonus_text[0].strip())
|
||||
@@ -1,24 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
|
||||
from app.plugins.sitestatistic.siteuserinfo import SITE_BASE_ORDER, SiteSchema
|
||||
from app.plugins.sitestatistic.siteuserinfo.nexus_php import NexusPhpSiteUserInfo
|
||||
|
||||
|
||||
class NexusProjectSiteUserInfo(NexusPhpSiteUserInfo):
|
||||
schema = SiteSchema.NexusProject
|
||||
order = SITE_BASE_ORDER + 25
|
||||
|
||||
@classmethod
|
||||
def match(cls, html_text: str) -> bool:
|
||||
return 'Nexus Project' in html_text
|
||||
|
||||
def _parse_site_page(self, html_text: str):
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
|
||||
user_detail = re.search(r"userdetails.php\?id=(\d+)", html_text)
|
||||
if user_detail and user_detail.group().strip():
|
||||
self._user_detail_page = user_detail.group().strip().lstrip('/')
|
||||
self.userid = user_detail.group(1)
|
||||
|
||||
self._torrent_seeding_page = f"viewusertorrents.php?id={self.userid}&show=seeding"
|
||||
@@ -1,57 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import json
|
||||
from typing import Optional
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from app.log import logger
|
||||
from app.plugins.sitestatistic.siteuserinfo import SITE_BASE_ORDER, SiteSchema
|
||||
from app.plugins.sitestatistic.siteuserinfo.nexus_php import NexusPhpSiteUserInfo
|
||||
|
||||
|
||||
class NexusRabbitSiteUserInfo(NexusPhpSiteUserInfo):
|
||||
schema = SiteSchema.NexusRabbit
|
||||
order = SITE_BASE_ORDER + 5
|
||||
|
||||
@classmethod
|
||||
def match(cls, html_text: str) -> bool:
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return False
|
||||
|
||||
printable_text = html.xpath("string(.)") if html else ""
|
||||
return 'Style by Rabbit' in printable_text
|
||||
|
||||
def _parse_site_page(self, html_text: str):
|
||||
super()._parse_site_page(html_text)
|
||||
self._torrent_seeding_page = f"getusertorrentlistajax.php?page=1&limit=5000000&type=seeding&uid={self.userid}"
|
||||
self._torrent_seeding_headers = {"Accept": "application/json, text/javascript, */*; q=0.01"}
|
||||
|
||||
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]:
|
||||
"""
|
||||
做种相关信息
|
||||
:param html_text:
|
||||
:param multi_page: 是否多页数据
|
||||
:return: 下页地址
|
||||
"""
|
||||
|
||||
try:
|
||||
torrents = json.loads(html_text).get('data')
|
||||
except Exception as e:
|
||||
logger.error(f"解析做种信息失败: {str(e)}")
|
||||
return
|
||||
|
||||
page_seeding_size = 0
|
||||
page_seeding_info = []
|
||||
|
||||
page_seeding = len(torrents)
|
||||
for torrent in torrents:
|
||||
seeders = int(torrent.get('seeders', 0))
|
||||
size = int(torrent.get('size', 0))
|
||||
page_seeding_size += int(torrent.get('size', 0))
|
||||
|
||||
page_seeding_info.append([seeders, size])
|
||||
|
||||
self.seeding += page_seeding
|
||||
self.seeding_size += page_seeding_size
|
||||
self.seeding_info.extend(page_seeding_info)
|
||||
@@ -1,110 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class SmallHorseSiteUserInfo(ISiteUserInfo):
|
||||
schema = SiteSchema.SmallHorse
|
||||
order = SITE_BASE_ORDER + 30
|
||||
|
||||
@classmethod
|
||||
def match(cls, html_text: str) -> bool:
|
||||
return 'Small Horse' in html_text
|
||||
|
||||
def _parse_site_page(self, html_text: str):
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
|
||||
user_detail = re.search(r"user.php\?id=(\d+)", html_text)
|
||||
if user_detail and user_detail.group().strip():
|
||||
self._user_detail_page = user_detail.group().strip().lstrip('/')
|
||||
self.userid = user_detail.group(1)
|
||||
self._torrent_seeding_page = f"torrents.php?type=seeding&userid={self.userid}"
|
||||
self._user_traffic_page = f"user.php?id={self.userid}"
|
||||
|
||||
def _parse_user_base_info(self, html_text: str):
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
html = etree.HTML(html_text)
|
||||
ret = html.xpath('//a[contains(@href, "user.php")]//text()')
|
||||
if ret:
|
||||
self.username = str(ret[0])
|
||||
|
||||
def _parse_user_traffic_info(self, html_text: str):
|
||||
"""
|
||||
上传/下载/分享率 [做种数/魔力值]
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
html = etree.HTML(html_text)
|
||||
tmps = html.xpath('//ul[@class = "stats nobullet"]')
|
||||
if tmps:
|
||||
if tmps[1].xpath("li") and tmps[1].xpath("li")[0].xpath("span//text()"):
|
||||
self.join_at = StringUtils.unify_datetime_str(tmps[1].xpath("li")[0].xpath("span//text()")[0])
|
||||
self.upload = StringUtils.num_filesize(str(tmps[1].xpath("li")[2].xpath("text()")[0]).split(":")[1].strip())
|
||||
self.download = StringUtils.num_filesize(
|
||||
str(tmps[1].xpath("li")[3].xpath("text()")[0]).split(":")[1].strip())
|
||||
if tmps[1].xpath("li")[4].xpath("span//text()"):
|
||||
self.ratio = StringUtils.str_float(str(tmps[1].xpath("li")[4].xpath("span//text()")[0]).replace('∞', '0'))
|
||||
else:
|
||||
self.ratio = StringUtils.str_float(str(tmps[1].xpath("li")[5].xpath("text()")[0]).split(":")[1])
|
||||
self.bonus = StringUtils.str_float(str(tmps[1].xpath("li")[5].xpath("text()")[0]).split(":")[1])
|
||||
self.user_level = str(tmps[3].xpath("li")[0].xpath("text()")[0]).split(":")[1].strip()
|
||||
self.leeching = StringUtils.str_int(
|
||||
(tmps[4].xpath("li")[6].xpath("text()")[0]).split(":")[1].replace("[", ""))
|
||||
|
||||
def _parse_user_detail_info(self, html_text: str):
|
||||
pass
|
||||
|
||||
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]:
|
||||
"""
|
||||
做种相关信息
|
||||
:param html_text:
|
||||
:param multi_page: 是否多页数据
|
||||
:return: 下页地址
|
||||
"""
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return None
|
||||
|
||||
size_col = 6
|
||||
seeders_col = 8
|
||||
|
||||
page_seeding = 0
|
||||
page_seeding_size = 0
|
||||
page_seeding_info = []
|
||||
seeding_sizes = html.xpath(f'//table[@id="torrent_table"]//tr[position()>1]/td[{size_col}]')
|
||||
seeding_seeders = html.xpath(f'//table[@id="torrent_table"]//tr[position()>1]/td[{seeders_col}]')
|
||||
if seeding_sizes and seeding_seeders:
|
||||
page_seeding = len(seeding_sizes)
|
||||
|
||||
for i in range(0, len(seeding_sizes)):
|
||||
size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
|
||||
seeders = StringUtils.str_int(seeding_seeders[i].xpath("string(.)").strip())
|
||||
|
||||
page_seeding_size += size
|
||||
page_seeding_info.append([seeders, size])
|
||||
|
||||
self.seeding += page_seeding
|
||||
self.seeding_size += page_seeding_size
|
||||
self.seeding_info.extend(page_seeding_info)
|
||||
|
||||
# 是否存在下页数据
|
||||
next_page = None
|
||||
next_pages = html.xpath('//ul[@class="pagination"]/li[contains(@class,"active")]/following-sibling::li')
|
||||
if next_pages and len(next_pages) > 1:
|
||||
page_num = next_pages[0].xpath("string(.)").strip()
|
||||
if page_num.isdigit():
|
||||
next_page = f"{self._torrent_seeding_page}&page={page_num}"
|
||||
|
||||
return next_page
|
||||
|
||||
def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]:
|
||||
return None
|
||||
|
||||
def _parse_message_content(self, html_text):
|
||||
return None, None, None
|
||||
@@ -1,103 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import json
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class TNodeSiteUserInfo(ISiteUserInfo):
|
||||
schema = SiteSchema.TNode
|
||||
order = SITE_BASE_ORDER + 60
|
||||
|
||||
@classmethod
|
||||
def match(cls, html_text: str) -> bool:
|
||||
return 'Powered By TNode' in html_text
|
||||
|
||||
def _parse_site_page(self, html_text: str):
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
|
||||
# <meta name="x-csrf-token" content="fd169876a7b4846f3a7a16fcd5cccf8d">
|
||||
csrf_token = re.search(r'<meta name="x-csrf-token" content="(.+?)">', html_text)
|
||||
if csrf_token:
|
||||
self._addition_headers = {'X-CSRF-TOKEN': csrf_token.group(1)}
|
||||
self._user_detail_page = "api/user/getMainInfo"
|
||||
self._torrent_seeding_page = "api/user/listTorrentActivity?id=&type=seeding&page=1&size=20000"
|
||||
|
||||
def _parse_logged_in(self, html_text):
|
||||
"""
|
||||
判断是否登录成功, 通过判断是否存在用户信息
|
||||
暂时跳过检测,待后续优化
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
return True
|
||||
|
||||
def _parse_user_base_info(self, html_text: str):
|
||||
self.username = self.userid
|
||||
|
||||
def _parse_user_traffic_info(self, html_text: str):
|
||||
pass
|
||||
|
||||
def _parse_user_detail_info(self, html_text: str):
|
||||
detail = json.loads(html_text)
|
||||
if detail.get("status") != 200:
|
||||
return
|
||||
|
||||
user_info = detail.get("data", {})
|
||||
self.userid = user_info.get("id")
|
||||
self.username = user_info.get("username")
|
||||
self.user_level = user_info.get("class", {}).get("name")
|
||||
self.join_at = user_info.get("regTime", 0)
|
||||
self.join_at = StringUtils.unify_datetime_str(str(self.join_at))
|
||||
|
||||
self.upload = user_info.get("upload")
|
||||
self.download = user_info.get("download")
|
||||
self.ratio = 0 if self.download <= 0 else round(self.upload / self.download, 3)
|
||||
self.bonus = user_info.get("bonus")
|
||||
|
||||
self.message_unread = user_info.get("unreadAdmin", 0) + user_info.get("unreadInbox", 0) + user_info.get(
|
||||
"unreadSystem", 0)
|
||||
pass
|
||||
|
||||
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]:
|
||||
"""
|
||||
解析用户做种信息
|
||||
"""
|
||||
seeding_info = json.loads(html_text)
|
||||
if seeding_info.get("status") != 200:
|
||||
return
|
||||
|
||||
torrents = seeding_info.get("data", {}).get("torrents", [])
|
||||
|
||||
page_seeding_size = 0
|
||||
page_seeding_info = []
|
||||
for torrent in torrents:
|
||||
size = torrent.get("size", 0)
|
||||
seeders = torrent.get("seeding", 0)
|
||||
|
||||
page_seeding_size += size
|
||||
page_seeding_info.append([seeders, size])
|
||||
|
||||
self.seeding += len(torrents)
|
||||
self.seeding_size += page_seeding_size
|
||||
self.seeding_info.extend(page_seeding_info)
|
||||
|
||||
# 是否存在下页数据
|
||||
next_page = None
|
||||
|
||||
return next_page
|
||||
|
||||
def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]:
|
||||
return None
|
||||
|
||||
def _parse_message_content(self, html_text):
|
||||
"""
|
||||
系统信息 api/message/listSystem?page=1&size=20
|
||||
收件箱信息 api/message/listInbox?page=1&size=20
|
||||
管理员信息 api/message/listAdmin?page=1&size=20
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
return None, None, None
|
||||
@@ -1,109 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class TorrentLeechSiteUserInfo(ISiteUserInfo):
|
||||
schema = SiteSchema.TorrentLeech
|
||||
order = SITE_BASE_ORDER + 40
|
||||
|
||||
@classmethod
|
||||
def match(cls, html_text: str) -> bool:
|
||||
return 'TorrentLeech' in html_text
|
||||
|
||||
def _parse_site_page(self, html_text: str):
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
|
||||
user_detail = re.search(r"/profile/([^/]+)/", html_text)
|
||||
if user_detail and user_detail.group().strip():
|
||||
self._user_detail_page = user_detail.group().strip().lstrip('/')
|
||||
self.userid = user_detail.group(1)
|
||||
self._user_traffic_page = f"profile/{self.userid}/view"
|
||||
self._torrent_seeding_page = f"profile/{self.userid}/seeding"
|
||||
|
||||
def _parse_user_base_info(self, html_text: str):
|
||||
self.username = self.userid
|
||||
|
||||
def _parse_user_traffic_info(self, html_text: str):
|
||||
"""
|
||||
上传/下载/分享率 [做种数/魔力值]
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
html = etree.HTML(html_text)
|
||||
upload_html = html.xpath('//div[contains(@class,"profile-uploaded")]//span/text()')
|
||||
if upload_html:
|
||||
self.upload = StringUtils.num_filesize(upload_html[0])
|
||||
download_html = html.xpath('//div[contains(@class,"profile-downloaded")]//span/text()')
|
||||
if download_html:
|
||||
self.download = StringUtils.num_filesize(download_html[0])
|
||||
ratio_html = html.xpath('//div[contains(@class,"profile-ratio")]//span/text()')
|
||||
if ratio_html:
|
||||
self.ratio = StringUtils.str_float(ratio_html[0].replace('∞', '0'))
|
||||
|
||||
user_level_html = html.xpath('//table[contains(@class, "profileViewTable")]'
|
||||
'//tr/td[text()="Class"]/following-sibling::td/text()')
|
||||
if user_level_html:
|
||||
self.user_level = user_level_html[0].strip()
|
||||
|
||||
join_at_html = html.xpath('//table[contains(@class, "profileViewTable")]'
|
||||
'//tr/td[text()="Registration date"]/following-sibling::td/text()')
|
||||
if join_at_html:
|
||||
self.join_at = StringUtils.unify_datetime_str(join_at_html[0].strip())
|
||||
|
||||
bonus_html = html.xpath('//span[contains(@class, "total-TL-points")]/text()')
|
||||
if bonus_html:
|
||||
self.bonus = StringUtils.str_float(bonus_html[0].strip())
|
||||
|
||||
def _parse_user_detail_info(self, html_text: str):
|
||||
pass
|
||||
|
||||
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]:
|
||||
"""
|
||||
做种相关信息
|
||||
:param html_text:
|
||||
:param multi_page: 是否多页数据
|
||||
:return: 下页地址
|
||||
"""
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return None
|
||||
|
||||
size_col = 2
|
||||
seeders_col = 7
|
||||
|
||||
page_seeding = 0
|
||||
page_seeding_size = 0
|
||||
page_seeding_info = []
|
||||
seeding_sizes = html.xpath(f'//tbody/tr/td[{size_col}]')
|
||||
seeding_seeders = html.xpath(f'//tbody/tr/td[{seeders_col}]/text()')
|
||||
if seeding_sizes and seeding_seeders:
|
||||
page_seeding = len(seeding_sizes)
|
||||
|
||||
for i in range(0, len(seeding_sizes)):
|
||||
size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
|
||||
seeders = StringUtils.str_int(seeding_seeders[i])
|
||||
|
||||
page_seeding_size += size
|
||||
page_seeding_info.append([seeders, size])
|
||||
|
||||
self.seeding += page_seeding
|
||||
self.seeding_size += page_seeding_size
|
||||
self.seeding_info.extend(page_seeding_info)
|
||||
|
||||
# 是否存在下页数据
|
||||
next_page = None
|
||||
|
||||
return next_page
|
||||
|
||||
def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]:
|
||||
return None
|
||||
|
||||
def _parse_message_content(self, html_text):
|
||||
return None, None, None
|
||||
@@ -1,130 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class Unit3dSiteUserInfo(ISiteUserInfo):
|
||||
schema = SiteSchema.Unit3d
|
||||
order = SITE_BASE_ORDER + 15
|
||||
|
||||
@classmethod
|
||||
def match(cls, html_text: str) -> bool:
|
||||
return "unit3d.js" in html_text
|
||||
|
||||
def _parse_user_base_info(self, html_text: str):
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
html = etree.HTML(html_text)
|
||||
|
||||
tmps = html.xpath('//a[contains(@href, "/users/") and contains(@href, "settings")]/@href')
|
||||
if tmps:
|
||||
user_name_match = re.search(r"/users/(.+)/settings", tmps[0])
|
||||
if user_name_match and user_name_match.group().strip():
|
||||
self.username = user_name_match.group(1)
|
||||
self._torrent_seeding_page = f"/users/{self.username}/active?perPage=100&client=&seeding=include"
|
||||
self._user_detail_page = f"/users/{self.username}"
|
||||
|
||||
tmps = html.xpath('//a[contains(@href, "bonus/earnings")]')
|
||||
if tmps:
|
||||
bonus_text = tmps[0].xpath("string(.)")
|
||||
bonus_match = re.search(r"([\d,.]+)", bonus_text)
|
||||
if bonus_match and bonus_match.group(1).strip():
|
||||
self.bonus = StringUtils.str_float(bonus_match.group(1))
|
||||
|
||||
def _parse_site_page(self, html_text: str):
|
||||
# TODO
|
||||
pass
|
||||
|
||||
def _parse_user_detail_info(self, html_text: str):
|
||||
"""
|
||||
解析用户额外信息,加入时间,等级
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return None
|
||||
|
||||
# 用户等级
|
||||
user_levels_text = html.xpath('//div[contains(@class, "content")]//span[contains(@class, "badge-user")]/text()')
|
||||
if user_levels_text:
|
||||
self.user_level = user_levels_text[0].strip()
|
||||
|
||||
# 加入日期
|
||||
join_at_text = html.xpath('//div[contains(@class, "content")]//h4[contains(text(), "注册日期") '
|
||||
'or contains(text(), "註冊日期") '
|
||||
'or contains(text(), "Registration date")]/text()')
|
||||
if join_at_text:
|
||||
self.join_at = StringUtils.unify_datetime_str(
|
||||
join_at_text[0].replace('注册日期', '').replace('註冊日期', '').replace('Registration date', ''))
|
||||
|
||||
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]:
|
||||
"""
|
||||
做种相关信息
|
||||
:param html_text:
|
||||
:param multi_page: 是否多页数据
|
||||
:return: 下页地址
|
||||
"""
|
||||
html = etree.HTML(html_text)
|
||||
if not html:
|
||||
return None
|
||||
|
||||
size_col = 9
|
||||
seeders_col = 2
|
||||
# 搜索size列
|
||||
if html.xpath('//thead//th[contains(@class,"size")]'):
|
||||
size_col = len(html.xpath('//thead//th[contains(@class,"size")][1]/preceding-sibling::th')) + 1
|
||||
# 搜索seeders列
|
||||
if html.xpath('//thead//th[contains(@class,"seeders")]'):
|
||||
seeders_col = len(html.xpath('//thead//th[contains(@class,"seeders")]/preceding-sibling::th')) + 1
|
||||
|
||||
page_seeding = 0
|
||||
page_seeding_size = 0
|
||||
page_seeding_info = []
|
||||
seeding_sizes = html.xpath(f'//tr[position()]/td[{size_col}]')
|
||||
seeding_seeders = html.xpath(f'//tr[position()]/td[{seeders_col}]')
|
||||
if seeding_sizes and seeding_seeders:
|
||||
page_seeding = len(seeding_sizes)
|
||||
|
||||
for i in range(0, len(seeding_sizes)):
|
||||
size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
|
||||
seeders = StringUtils.str_int(seeding_seeders[i].xpath("string(.)").strip())
|
||||
|
||||
page_seeding_size += size
|
||||
page_seeding_info.append([seeders, size])
|
||||
|
||||
self.seeding += page_seeding
|
||||
self.seeding_size += page_seeding_size
|
||||
self.seeding_info.extend(page_seeding_info)
|
||||
|
||||
# 是否存在下页数据
|
||||
next_page = None
|
||||
next_pages = html.xpath('//ul[@class="pagination"]/li[contains(@class,"active")]/following-sibling::li')
|
||||
if next_pages and len(next_pages) > 1:
|
||||
page_num = next_pages[0].xpath("string(.)").strip()
|
||||
if page_num.isdigit():
|
||||
next_page = f"{self._torrent_seeding_page}&page={page_num}"
|
||||
|
||||
return next_page
|
||||
|
||||
def _parse_user_traffic_info(self, html_text: str):
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
upload_match = re.search(r"[^总]上[传傳]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text,
|
||||
re.IGNORECASE)
|
||||
self.upload = StringUtils.num_filesize(upload_match.group(1).strip()) if upload_match else 0
|
||||
download_match = re.search(r"[^总子影力]下[载載]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text,
|
||||
re.IGNORECASE)
|
||||
self.download = StringUtils.num_filesize(download_match.group(1).strip()) if download_match else 0
|
||||
ratio_match = re.search(r"分享率[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+)", html_text)
|
||||
self.ratio = StringUtils.str_float(ratio_match.group(1)) if (
|
||||
ratio_match and ratio_match.group(1).strip()) else 0.0
|
||||
|
||||
def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]:
|
||||
return None
|
||||
|
||||
def _parse_message_content(self, html_text):
|
||||
return None, None, None
|
||||
@@ -1,113 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import json
|
||||
from typing import Optional, Tuple
|
||||
from urllib.parse import urljoin
|
||||
|
||||
from app.log import logger
|
||||
from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class TYemaSiteUserInfo(ISiteUserInfo):
|
||||
schema = SiteSchema.Yema
|
||||
order = SITE_BASE_ORDER + 60
|
||||
|
||||
@classmethod
|
||||
def match(cls, html_text: str) -> bool:
|
||||
return '<title>YemaPT</title>' in html_text
|
||||
|
||||
def _parse_site_page(self, html_text: str):
|
||||
"""
|
||||
获取站点页面地址
|
||||
"""
|
||||
self._user_traffic_page = None
|
||||
self._user_detail_page = None
|
||||
self._user_basic_page = "api/consumer/fetchSelfDetail"
|
||||
self._user_basic_params = {}
|
||||
self._sys_mail_unread_page = None
|
||||
self._user_mail_unread_page = None
|
||||
self._mail_unread_params = {}
|
||||
self._torrent_seeding_page = "/api/userTorrent/fetchSeedTorrentInfo"
|
||||
self._torrent_seeding_params = {
|
||||
# 虽然这个参数是无意义的,但这个 API 必须用 POST
|
||||
"status": "seeding"
|
||||
}
|
||||
self._torrent_seeding_headers = {}
|
||||
self._addition_headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
}
|
||||
|
||||
def _parse_logged_in(self, html_text):
|
||||
"""
|
||||
判断是否登录成功, 通过判断是否存在用户信息
|
||||
暂时跳过检测,待后续优化
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
return True
|
||||
|
||||
def _parse_user_base_info(self, html_text: str):
|
||||
"""
|
||||
解析用户基本信息,这里把_parse_user_traffic_info和_parse_user_detail_info合并到这里
|
||||
"""
|
||||
if not html_text:
|
||||
return None
|
||||
detail = json.loads(html_text)
|
||||
if not detail or not detail.get("success"):
|
||||
return
|
||||
user_info = detail.get("data", {})
|
||||
self.userid = user_info.get("id")
|
||||
self.username = user_info.get("name")
|
||||
self.user_level = user_info.get("level")
|
||||
self.join_at = StringUtils.unify_datetime_str(user_info.get("registerTime"))
|
||||
|
||||
self.upload = user_info.get('promotionUploadSize')
|
||||
self.download = user_info.get('promotionDownloadSize')
|
||||
self.ratio = round(self.upload / (self.download or 1), 2)
|
||||
self.bonus = user_info.get("bonus")
|
||||
self.message_unread = 0
|
||||
|
||||
def _parse_user_traffic_info(self, html_text: str):
|
||||
"""
|
||||
解析用户流量信息
|
||||
"""
|
||||
pass
|
||||
|
||||
def _parse_user_detail_info(self, html_text: str):
|
||||
"""
|
||||
解析用户详细信息
|
||||
"""
|
||||
pass
|
||||
|
||||
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]:
|
||||
"""
|
||||
解析用户做种信息
|
||||
"""
|
||||
if not html_text:
|
||||
return None
|
||||
seeding_info = json.loads(html_text)
|
||||
if not seeding_info or not seeding_info.get("success") or not seeding_info.get("data"):
|
||||
return None
|
||||
|
||||
torrents = seeding_info.get("data")
|
||||
|
||||
self.seeding += torrents.get("num")
|
||||
self.seeding_size += torrents.get("fileSize")
|
||||
|
||||
# 是否存在下页数据
|
||||
next_page = None
|
||||
|
||||
return next_page
|
||||
|
||||
def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]:
|
||||
"""
|
||||
解析未读消息链接,这里直接读出详情
|
||||
"""
|
||||
pass
|
||||
|
||||
def _parse_message_content(self, html_text) -> Tuple[Optional[str], Optional[str], Optional[str]]:
|
||||
"""
|
||||
解析消息内容
|
||||
"""
|
||||
pass
|
||||
Reference in New Issue
Block a user