From 13023141bc6f406c4c8147b3bacbcdf9761301a3 Mon Sep 17 00:00:00 2001 From: madrays <87717138@qq.com> Date: Thu, 29 May 2025 00:46:11 +0800 Subject: [PATCH 1/7] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E7=BC=93=E5=AD=98?= =?UTF-8?q?=E7=AE=A1=E7=90=86=E9=A1=B5=E9=9D=A2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/endpoints/system.py | 429 +++++++++++++++++++++++++++++++++++- 1 file changed, 427 insertions(+), 2 deletions(-) diff --git a/app/api/endpoints/system.py b/app/api/endpoints/system.py index 963abf3cd..bf11e86c0 100644 --- a/app/api/endpoints/system.py +++ b/app/api/endpoints/system.py @@ -5,13 +5,14 @@ from collections import deque from datetime import datetime from pathlib import Path -from typing import Optional, Union, Annotated +from typing import Optional, Union, Annotated, List import aiofiles import pillow_avif # noqa 用于自动注册AVIF支持 from PIL import Image from fastapi import APIRouter, Depends, HTTPException, Header, Request, Response from fastapi.responses import StreamingResponse +from pydantic import BaseModel from app import schemas from app.chain.search import SearchChain @@ -32,7 +33,7 @@ from app.log import logger from app.monitor import Monitor from app.scheduler import Scheduler -from app.schemas.types import SystemConfigKey +from app.schemas.types import SystemConfigKey, MediaType from app.utils.crypto import HashUtils from app.utils.http import RequestUtils from app.utils.security import SecurityUtils @@ -518,3 +519,427 @@ def run_scheduler2(jobid: str, Scheduler().start(jobid) return schemas.Response(success=True) + + +@router.get("/sites/mapping", summary="获取站点域名到名称的映射", response_model=schemas.Response) +def get_sites_mapping(_: User = Depends(get_current_active_superuser)): + """ + 获取站点域名到名称的映射关系 + """ + try: + from app.db.site_oper import SiteOper + + site_oper = SiteOper() + sites = site_oper.list() + + mapping = {} + for site in sites: + mapping[site.domain] = site.name + + return schemas.Response(success=True, data=mapping) + except Exception as e: + logger.error(f"获取站点映射失败:{str(e)}") + return schemas.Response(success=False, message=f"获取映射失败:{str(e)}") + + +@router.get("/cache/torrents", summary="获取种子缓存", response_model=schemas.Response) +def get_torrents_cache(_: User = Depends(get_current_active_superuser)): + """ + 获取当前种子缓存数据 + """ + from app.chain.torrents import TorrentsChain + + torrents_chain = TorrentsChain() + + # 获取spider和rss两种缓存 + spider_cache = torrents_chain.get_torrents("spider") + rss_cache = torrents_chain.get_torrents("rss") + + # 统计信息 + spider_count = sum(len(torrents) for torrents in spider_cache.values()) + rss_count = sum(len(torrents) for torrents in rss_cache.values()) + + # 转换为前端需要的格式 + spider_data = [] + for domain, contexts in spider_cache.items(): + for context in contexts: + torrent_hash = HashUtils.md5(f"{context.torrent_info.title}{context.torrent_info.description}") + spider_data.append({ + "hash": torrent_hash, + "domain": domain, + "title": context.torrent_info.title, + "description": context.torrent_info.description, + "size": context.torrent_info.size, + "pubdate": context.torrent_info.pubdate, + "site_name": context.torrent_info.site_name, + "media_name": context.media_info.title if context.media_info else "", + "media_year": context.media_info.year if context.media_info else "", + "media_type": context.media_info.type if context.media_info else "", + "season_episode": context.meta_info.season_episode if context.meta_info else "", + "resource_term": context.meta_info.resource_term if context.meta_info else "", + "enclosure": context.torrent_info.enclosure, + "page_url": context.torrent_info.page_url, + "poster_path": context.media_info.get_poster_image() if context.media_info else "", + "backdrop_path": context.media_info.get_backdrop_image() if context.media_info else "" + }) + + rss_data = [] + for domain, contexts in rss_cache.items(): + for context in contexts: + torrent_hash = HashUtils.md5(f"{context.torrent_info.title}{context.torrent_info.description}") + rss_data.append({ + "hash": torrent_hash, + "domain": domain, + "title": context.torrent_info.title, + "description": context.torrent_info.description, + "size": context.torrent_info.size, + "pubdate": context.torrent_info.pubdate, + "site_name": context.torrent_info.site_name, + "media_name": context.media_info.title if context.media_info else "", + "media_year": context.media_info.year if context.media_info else "", + "media_type": context.media_info.type if context.media_info else "", + "season_episode": context.meta_info.season_episode if context.meta_info else "", + "resource_term": context.meta_info.resource_term if context.meta_info else "", + "enclosure": context.torrent_info.enclosure, + "page_url": context.torrent_info.page_url, + "poster_path": context.media_info.get_poster_image() if context.media_info else "", + "backdrop_path": context.media_info.get_backdrop_image() if context.media_info else "" + }) + + return schemas.Response(success=True, data={ + "spider": { + "count": spider_count, + "sites": len(spider_cache), + "data": spider_data + }, + "rss": { + "count": rss_count, + "sites": len(rss_cache), + "data": rss_data + } + }) + + +@router.post("/cache/torrents/refresh", summary="刷新种子缓存", response_model=schemas.Response) +def refresh_torrents_cache(cache_type: str = "auto", _: User = Depends(get_current_active_superuser)): + """ + 刷新种子缓存 + :param cache_type: 缓存类型 auto/spider/rss + """ + from app.chain.torrents import TorrentsChain + + torrents_chain = TorrentsChain() + + try: + if cache_type == "auto": + cache_type = None + + result = torrents_chain.refresh(stype=cache_type, sites=None) + + # 统计刷新结果 + total_count = sum(len(torrents) for torrents in result.values()) + sites_count = len(result) + + return schemas.Response(success=True, message=f"缓存刷新完成,共刷新 {sites_count} 个站点,{total_count} 个种子") + except Exception as e: + logger.error(f"刷新种子缓存失败:{str(e)}") + return schemas.Response(success=False, message=f"刷新失败:{str(e)}") + + +@router.delete("/cache/torrents", summary="清理种子缓存", response_model=schemas.Response) +def clear_torrents_cache(_: User = Depends(get_current_active_superuser)): + """ + 清理所有种子缓存 + """ + from app.chain.torrents import TorrentsChain + + torrents_chain = TorrentsChain() + + try: + torrents_chain.clear_torrents() + return schemas.Response(success=True, message="种子缓存清理完成") + except Exception as e: + logger.error(f"清理种子缓存失败:{str(e)}") + return schemas.Response(success=False, message=f"清理失败:{str(e)}") + + +@router.get("/cache/torrents/stats", summary="获取种子缓存统计", response_model=schemas.Response) +def get_torrents_cache_stats(_: User = Depends(get_current_active_superuser)): + """ + 获取种子缓存统计信息 + """ + from app.chain.torrents import TorrentsChain + + torrents_chain = TorrentsChain() + + # 获取缓存配置 + cache_limit = settings.CACHE_CONF.get("torrents", 100) + refresh_limit = settings.CACHE_CONF.get("refresh", 30) + + # 获取缓存数据 + spider_cache = torrents_chain.get_torrents("spider") + rss_cache = torrents_chain.get_torrents("rss") + + # 统计各站点缓存情况 + spider_stats = [] + for domain, contexts in spider_cache.items(): + spider_stats.append({ + "domain": domain, + "count": len(contexts), + "latest_date": max([ctx.torrent_info.pubdate for ctx in contexts if ctx.torrent_info.pubdate], default="") + }) + + rss_stats = [] + for domain, contexts in rss_cache.items(): + rss_stats.append({ + "domain": domain, + "count": len(contexts), + "latest_date": max([ctx.torrent_info.pubdate for ctx in contexts if ctx.torrent_info.pubdate], default="") + }) + + return schemas.Response(success=True, data={ + "config": { + "cache_limit": cache_limit, + "refresh_limit": refresh_limit, + "current_mode": settings.SUBSCRIBE_MODE + }, + "spider": { + "total_count": sum(len(torrents) for torrents in spider_cache.values()), + "sites_count": len(spider_cache), + "sites": spider_stats + }, + "rss": { + "total_count": sum(len(torrents) for torrents in rss_cache.values()), + "sites_count": len(rss_cache), + "sites": rss_stats + } + }) + + +@router.delete("/cache/torrents/{cache_type}/{domain}/{torrent_hash}", summary="删除指定种子缓存", response_model=schemas.Response) +def delete_torrent_cache(cache_type: str, domain: str, torrent_hash: str, + _: User = Depends(get_current_active_superuser)): + """ + 删除指定的种子缓存 + :param cache_type: 缓存类型 spider/rss + :param domain: 站点域名 + :param torrent_hash: 种子hash(使用title+description的md5) + """ + from app.chain.torrents import TorrentsChain + from app.utils.crypto import HashUtils + + torrents_chain = TorrentsChain() + + try: + # 获取当前缓存 + cache_data = torrents_chain.get_torrents(cache_type) + + if domain not in cache_data: + return schemas.Response(success=False, message=f"站点 {domain} 缓存不存在") + + # 查找并删除指定种子 + original_count = len(cache_data[domain]) + cache_data[domain] = [ + context for context in cache_data[domain] + if HashUtils.md5(f"{context.torrent_info.title}{context.torrent_info.description}") != torrent_hash + ] + + if len(cache_data[domain]) == original_count: + return schemas.Response(success=False, message="未找到指定的种子") + + # 保存更新后的缓存 + if cache_type == "spider": + torrents_chain.save_cache(cache_data, torrents_chain._spider_file) + else: + torrents_chain.save_cache(cache_data, torrents_chain._rss_file) + + return schemas.Response(success=True, message="种子删除成功") + except Exception as e: + logger.error(f"删除种子缓存失败:{str(e)}") + return schemas.Response(success=False, message=f"删除失败:{str(e)}") + + +@router.post("/cache/torrents/{cache_type}/{domain}/{torrent_hash}/reidentify", summary="重新识别种子", response_model=schemas.Response) +def reidentify_torrent_cache(cache_type: str, domain: str, torrent_hash: str, + tmdbid: Optional[int] = None, doubanid: Optional[str] = None, + _: User = Depends(get_current_active_superuser)): + """ + 重新识别指定的种子 + :param cache_type: 缓存类型 spider/rss + :param domain: 站点域名 + :param torrent_hash: 种子hash(使用title+description的md5) + :param tmdbid: 手动指定的TMDB ID + :param doubanid: 手动指定的豆瓣ID + """ + from app.chain.torrents import TorrentsChain + from app.chain.media import MediaChain + from app.core.metainfo import MetaInfo + from app.core.context import MediaInfo + from app.utils.crypto import HashUtils + from app.schemas.types import MediaType + + torrents_chain = TorrentsChain() + media_chain = MediaChain() + + try: + # 获取当前缓存 + cache_data = torrents_chain.get_torrents(cache_type) + + if domain not in cache_data: + return schemas.Response(success=False, message=f"站点 {domain} 缓存不存在") + + # 查找指定种子 + target_context = None + for context in cache_data[domain]: + if HashUtils.md5(f"{context.torrent_info.title}{context.torrent_info.description}") == torrent_hash: + target_context = context + break + + if not target_context: + return schemas.Response(success=False, message="未找到指定的种子") + + # 重新识别 + if tmdbid or doubanid: + # 手动指定媒体信息 + if tmdbid: + # 先尝试电影类型 + tmdbinfo = media_chain.tmdb_info(tmdbid=tmdbid, mtype=MediaType.MOVIE) + if not tmdbinfo: + # 再尝试电视剧类型 + tmdbinfo = media_chain.tmdb_info(tmdbid=tmdbid, mtype=MediaType.TV) + + if tmdbinfo: + mediainfo = MediaInfo() + mediainfo.set_tmdb_info(tmdbinfo) + else: + mediainfo = None + else: + # 先尝试电影类型 + doubaninfo = media_chain.douban_info(doubanid=doubanid, mtype=MediaType.MOVIE) + if not doubaninfo: + # 再尝试电视剧类型 + doubaninfo = media_chain.douban_info(doubanid=doubanid, mtype=MediaType.TV) + + if doubaninfo: + mediainfo = MediaInfo() + mediainfo.set_douban_info(doubaninfo) + else: + mediainfo = None + else: + # 自动重新识别 + meta = MetaInfo(title=target_context.torrent_info.title, + subtitle=target_context.torrent_info.description) + mediainfo = media_chain.recognize_by_meta(meta) + + if not mediainfo: + # 创建空的媒体信息 + mediainfo = MediaInfo() + else: + # 清理多余数据 + mediainfo.clear() + + # 更新上下文中的媒体信息 + target_context.media_info = mediainfo + + # 保存更新后的缓存 + if cache_type == "spider": + torrents_chain.save_cache(cache_data, torrents_chain._spider_file) + else: + torrents_chain.save_cache(cache_data, torrents_chain._rss_file) + + return schemas.Response(success=True, message="重新识别完成", data={ + "media_name": mediainfo.title if mediainfo else "", + "media_year": mediainfo.year if mediainfo else "", + "media_type": mediainfo.type.value if mediainfo and mediainfo.type else "" + }) + except Exception as e: + logger.error(f"重新识别种子失败:{str(e)}") + return schemas.Response(success=False, message=f"重新识别失败:{str(e)}") + + +@router.get("/cache/images/stats", summary="获取图片缓存统计", response_model=schemas.Response) +def get_images_cache_stats(_: User = Depends(get_current_active_superuser)): + """ + 获取图片缓存统计信息 + """ + import os + from pathlib import Path + + try: + images_cache_path = settings.CACHE_PATH / "images" + + if not images_cache_path.exists(): + return schemas.Response(success=True, data={ + "total_files": 0, + "total_size": 0, + "cache_enabled": settings.GLOBAL_IMAGE_CACHE + }) + + total_files = 0 + total_size = 0 + + # 递归统计所有图片文件 + for root, dirs, files in os.walk(images_cache_path): + for file in files: + file_path = Path(root) / file + if file_path.suffix.lower() in settings.SECURITY_IMAGE_SUFFIXES: + total_files += 1 + try: + total_size += file_path.stat().st_size + except (OSError, IOError): + continue + + return schemas.Response(success=True, data={ + "total_files": total_files, + "total_size": total_size, + "cache_enabled": settings.GLOBAL_IMAGE_CACHE, + "cache_path": str(images_cache_path) + }) + except Exception as e: + logger.error(f"获取图片缓存统计失败:{str(e)}") + return schemas.Response(success=False, message=f"获取统计失败:{str(e)}") + + +@router.delete("/cache/images", summary="清理图片缓存", response_model=schemas.Response) +def clear_images_cache(_: User = Depends(get_current_active_superuser)): + """ + 清理所有图片缓存 + """ + try: + from app.utils.system import SystemUtils + + images_cache_path = settings.CACHE_PATH / "images" + + if not images_cache_path.exists(): + return schemas.Response(success=True, message="图片缓存目录不存在") + + # 清理图片缓存目录 + cleared_count = SystemUtils.clear(images_cache_path, days=0) + + return schemas.Response(success=True, message=f"图片缓存清理完成,清理了 {cleared_count} 个文件") + except Exception as e: + logger.error(f"清理图片缓存失败:{str(e)}") + return schemas.Response(success=False, message=f"清理失败:{str(e)}") + + +@router.post("/cache/images/clean", summary="清理过期图片缓存", response_model=schemas.Response) +def clean_expired_images_cache(days: int = 7, _: User = Depends(get_current_active_superuser)): + """ + 清理过期的图片缓存 + :param days: 保留天数,默认7天 + """ + try: + from app.utils.system import SystemUtils + + images_cache_path = settings.CACHE_PATH / "images" + + if not images_cache_path.exists(): + return schemas.Response(success=True, message="图片缓存目录不存在") + + # 清理过期图片缓存 + cleared_count = SystemUtils.clear(images_cache_path, days=days) + + return schemas.Response(success=True, message=f"过期图片缓存清理完成,清理了 {cleared_count} 个文件") + except Exception as e: + logger.error(f"清理过期图片缓存失败:{str(e)}") + return schemas.Response(success=False, message=f"清理失败:{str(e)}") From 8761c82afeb0393ae1a5f56efbc410da5112a4a0 Mon Sep 17 00:00:00 2001 From: jxxghp Date: Thu, 29 May 2025 07:14:42 +0800 Subject: [PATCH 2/7] =?UTF-8?q?fix=20TVDB=E4=BB=A3=E7=90=86=E4=B8=8ESSL?= =?UTF-8?q?=E6=A0=A1=E9=AA=8C=20#4356?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/modules/thetvdb/__init__.py | 6 +++++- app/modules/thetvdb/tvdb_v4_official.py | 27 ++++++++++++++++++------- 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/app/modules/thetvdb/__init__.py b/app/modules/thetvdb/__init__.py index 4ad07a165..04a6a3ffa 100644 --- a/app/modules/thetvdb/__init__.py +++ b/app/modules/thetvdb/__init__.py @@ -18,7 +18,9 @@ def _initialize_tvdb_session(self) -> None: 创建或刷新 TVDB 登录会话 """ try: - self.tvdb = tvdb_v4_official.TVDB(apikey=settings.TVDB_V4_API_KEY, pin=settings.TVDB_V4_API_PIN) + self.tvdb = tvdb_v4_official.TVDB(apikey=settings.TVDB_V4_API_KEY, + pin=settings.TVDB_V4_API_PIN, + proxy=settings.PROXY) except Exception as e: logger.error(f"TVDB 登录失败: {str(e)}") @@ -75,6 +77,8 @@ def test(self) -> Tuple[bool, str]: """ 测试模块连接性 """ + if not self.tvdb: + return False, "TheTVDB 连接失败" try: self._handle_tvdb_call(self.tvdb.get_series, 81189) return True, "" diff --git a/app/modules/thetvdb/tvdb_v4_official.py b/app/modules/thetvdb/tvdb_v4_official.py index be5ab4d86..7dccdfabd 100644 --- a/app/modules/thetvdb/tvdb_v4_official.py +++ b/app/modules/thetvdb/tvdb_v4_official.py @@ -4,6 +4,7 @@ __version__ = "1.0.12" import json +import ssl import string import urllib import urllib.request @@ -12,16 +13,21 @@ class Auth: - def __init__(self, url, apikey, pin=""): + def __init__(self, url, apikey, pin="", proxy=None): loginInfo = {"apikey": apikey} if pin != "": loginInfo["pin"] = pin loginInfoBytes = json.dumps(loginInfo, indent=2).encode("utf-8") + if proxy: + proxy_handler = urllib.request.ProxyHandler(proxy) + opener = urllib.request.build_opener(proxy_handler) + urllib.request.install_opener(opener) req = urllib.request.Request(url, data=loginInfoBytes) req.add_header("Content-Type", "application/json") try: - with urllib.request.urlopen(req, data=loginInfoBytes) as response: + context = ssl._create_unverified_context() + with urllib.request.urlopen(req, context=context, data=loginInfoBytes) as response: res = json.load(response) self.token = res["data"]["token"] except HTTPError as e: @@ -33,17 +39,24 @@ def get_token(self): class Request: - def __init__(self, auth_token): + def __init__(self, auth_token, proxy=None): self.auth_token = auth_token self.links = None + self.proxy = proxy def make_request(self, url, if_modified_since=None): + """Makes a request to the given URL and returns the data""" + if self.proxy: + proxy_handler = urllib.request.ProxyHandler(self.proxy) + opener = urllib.request.build_opener(proxy_handler) + urllib.request.install_opener(opener) req = urllib.request.Request(url) req.add_header("Authorization", "Bearer {}".format(self.auth_token)) if if_modified_since: req.add_header("If-Modified-Since", "{}".format(if_modified_since)) try: - with urllib.request.urlopen(req) as response: + context = ssl._create_unverified_context() + with urllib.request.urlopen(req, context=context) as response: res = json.load(response) except HTTPError as e: try: @@ -87,12 +100,12 @@ def construct( class TVDB: - def __init__(self, apikey: str, pin=""): + def __init__(self, apikey: str, pin="", proxy=None): self.url = Url() login_url = self.url.construct("login") - self.auth = Auth(login_url, apikey, pin) + self.auth = Auth(login_url, apikey, pin, proxy) auth_token = self.auth.get_token() - self.request = Request(auth_token) + self.request = Request(auth_token, proxy) def get_req_links(self) -> dict: return self.request.links From d5f7f1ba9171b2ef1932ed1af3c1ba01af642a20 Mon Sep 17 00:00:00 2001 From: jxxghp Date: Thu, 29 May 2025 08:03:12 +0800 Subject: [PATCH 3/7] fix tvdb api --- app/modules/thetvdb/tvdb_v4_official.py | 116 ++++++++++++++---------- 1 file changed, 67 insertions(+), 49 deletions(-) diff --git a/app/modules/thetvdb/tvdb_v4_official.py b/app/modules/thetvdb/tvdb_v4_official.py index 7dccdfabd..dd1ba5872 100644 --- a/app/modules/thetvdb/tvdb_v4_official.py +++ b/app/modules/thetvdb/tvdb_v4_official.py @@ -4,12 +4,10 @@ __version__ = "1.0.12" import json -import ssl -import string -import urllib -import urllib.request +import urllib.parse from http import HTTPStatus -from urllib.error import HTTPError + +from app.utils.http import RequestUtils class Auth: @@ -18,21 +16,31 @@ def __init__(self, url, apikey, pin="", proxy=None): if pin != "": loginInfo["pin"] = pin - loginInfoBytes = json.dumps(loginInfo, indent=2).encode("utf-8") - if proxy: - proxy_handler = urllib.request.ProxyHandler(proxy) - opener = urllib.request.build_opener(proxy_handler) - urllib.request.install_opener(opener) - req = urllib.request.Request(url, data=loginInfoBytes) - req.add_header("Content-Type", "application/json") + loginInfoBytes = json.dumps(loginInfo, indent=2) + try: - context = ssl._create_unverified_context() - with urllib.request.urlopen(req, context=context, data=loginInfoBytes) as response: - res = json.load(response) + # 使用项目统一的RequestUtils类 + req_utils = RequestUtils(proxies=proxy, timeout=30) + response = req_utils.post_res( + url=url, + data=loginInfoBytes, + headers={"Content-Type": "application/json"} + ) + + if response and response.status_code == 200: + res = response.json() self.token = res["data"]["token"] - except HTTPError as e: - res = json.load(e) - raise Exception("Code:{}, {}".format(e, res["message"])) + else: + error_msg = f"登录失败,状态码: {response.status_code if response else 'None'}" + if response: + try: + error_data = response.json() + error_msg = f"Code: {response.status_code}, {error_data.get('message', '未知错误')}" + except: + error_msg = f"Code: {response.status_code}, 响应解析失败" + raise Exception(error_msg) + except Exception as e: + raise Exception(f"TVDB认证失败: {str(e)}") def get_token(self): return self.token @@ -46,36 +54,46 @@ def __init__(self, auth_token, proxy=None): def make_request(self, url, if_modified_since=None): """Makes a request to the given URL and returns the data""" - if self.proxy: - proxy_handler = urllib.request.ProxyHandler(self.proxy) - opener = urllib.request.build_opener(proxy_handler) - urllib.request.install_opener(opener) - req = urllib.request.Request(url) - req.add_header("Authorization", "Bearer {}".format(self.auth_token)) + headers = {"Authorization": f"Bearer {self.auth_token}"} if if_modified_since: - req.add_header("If-Modified-Since", "{}".format(if_modified_since)) + headers["If-Modified-Since"] = str(if_modified_since) + try: - context = ssl._create_unverified_context() - with urllib.request.urlopen(req, context=context) as response: - res = json.load(response) - except HTTPError as e: - try: - if e.code == HTTPStatus.NOT_MODIFIED: - return { - "code": HTTPStatus.NOT_MODIFIED.real, - "message": "Not-Modified", - } - res = json.load(e) - except: - res = {} - data = res.get("data", None) - if data is not None and res.get("status", "failure") != "failure": - self.links = res.get("links", None) - return data - msg = res.get("message", None) - if not msg: - msg = "UNKNOWN FAILURE" - raise ValueError("failed to get " + url + "\n " + str(msg)) + # 使用项目统一的RequestUtils类 + req_utils = RequestUtils(proxies=self.proxy, timeout=30) + response = req_utils.get_res(url=url, headers=headers) + + if response is None: + raise ValueError(f"failed to get {url}\n 网络连接失败") + + if response.status_code == HTTPStatus.NOT_MODIFIED: + return { + "code": HTTPStatus.NOT_MODIFIED.real, + "message": "Not-Modified", + } + + if response.status_code == 200: + res = response.json() + data = res.get("data", None) + if data is not None and res.get("status", "failure") != "failure": + self.links = res.get("links", None) + return data + + msg = res.get("message", "UNKNOWN FAILURE") + raise ValueError(f"failed to get {url}\n {str(msg)}") + else: + # 处理其他HTTP错误状态码 + try: + error_data = response.json() + msg = error_data.get("message", f"HTTP {response.status_code}") + except: + msg = f"HTTP {response.status_code}" + raise ValueError(f"failed to get {url}\n {str(msg)}") + + except Exception as e: + if isinstance(e, ValueError): + raise + raise ValueError(f"failed to get {url}\n {str(e)}") class Url: @@ -198,7 +216,7 @@ def get_series(self, id: int, meta=None, if_modified_since=None) -> dict: return self.request.make_request(url, if_modified_since) def get_series_by_slug( - self, slug: string, meta=None, if_modified_since=None + self, slug: str, meta=None, if_modified_since=None ) -> dict: """Returns a series dictionary""" url = self.url.construct("series/slug", slug, meta=meta) @@ -257,7 +275,7 @@ def get_movie(self, id: int, meta=None, if_modified_since=None) -> dict: return self.request.make_request(url, if_modified_since) def get_movie_by_slug( - self, slug: string, meta=None, if_modified_since=None + self, slug: str, meta=None, if_modified_since=None ) -> dict: """Returns a movie dictionary""" url = self.url.construct("movies/slug", slug, meta=meta) @@ -415,7 +433,7 @@ def get_list(self, id: int, meta=None, if_modified_since=None) -> dict: url = self.url.construct("lists", id, meta=meta) return self.request.make_request(url), if_modified_since - def get_list_by_slug(self, slug: string, meta=None, if_modified_since=None) -> dict: + def get_list_by_slug(self, slug: str, meta=None, if_modified_since=None) -> dict: """Returns a movie dictionary""" url = self.url.construct("lists/slug", slug, meta=meta) return self.request.make_request(url, if_modified_since) From da0343283a6eeb3a673b8e0454b4bb90751e4ce8 Mon Sep 17 00:00:00 2001 From: jxxghp Date: Thu, 29 May 2025 08:16:54 +0800 Subject: [PATCH 4/7] =?UTF-8?q?=E6=94=AF=E6=8C=81=E5=9C=A8=E6=8F=92?= =?UTF-8?q?=E4=BB=B6=E6=96=87=E4=BB=B6=E5=A4=B9=E4=B8=AD=E7=AE=A1=E7=90=86?= =?UTF-8?q?=E5=88=86=E8=BA=AB=E6=8F=92=E4=BB=B6=E7=9A=84=E6=B7=BB=E5=8A=A0?= =?UTF-8?q?=E4=B8=8E=E7=A7=BB=E9=99=A4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/api/endpoints/plugin.py | 94 +++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/app/api/endpoints/plugin.py b/app/api/endpoints/plugin.py index 3c0135a36..c5b397d9e 100644 --- a/app/api/endpoints/plugin.py +++ b/app/api/endpoints/plugin.py @@ -500,6 +500,8 @@ def uninstall_plugin(plugin_id: str, plugin_manager.plugins.pop(plugin_id, None) except Exception as e: logger.error(f"删除插件分身目录 {plugin_base_dir} 失败: {str(e)}") + # 从插件文件夹中移除该插件 + _remove_plugin_from_folders(plugin_id) # 移除插件 plugin_manager.remove_plugin(plugin_id) return schemas.Response(success=True) @@ -523,9 +525,101 @@ def clone_plugin(plugin_id: str, ) if success: + # 插件分身创建成功后,处理插件文件夹:如果原插件在某个文件夹中,则将分身插件也添加到同一个文件夹中 + clone_id = f"{plugin_id}{clone_data.get('suffix', '').lower()}" + _add_clone_to_plugin_folder(plugin_id, clone_id) + return schemas.Response(success=True, message="插件分身创建成功") else: return schemas.Response(success=False, message=message) except Exception as e: logger.error(f"创建插件分身失败:{str(e)}") return schemas.Response(success=False, message=f"创建插件分身失败:{str(e)}") + + +def _add_clone_to_plugin_folder(original_plugin_id: str, clone_plugin_id: str): + """ + 将分身插件添加到原插件所在的文件夹中 + :param original_plugin_id: 原插件ID + :param clone_plugin_id: 分身插件ID + """ + try: + config_oper = SystemConfigOper() + # 获取插件文件夹配置 + folders = config_oper.get(SystemConfigKey.PluginFolders) or {} + + # 查找原插件所在的文件夹 + target_folder = None + for folder_name, folder_data in folders.items(): + if isinstance(folder_data, dict) and 'plugins' in folder_data: + # 新格式:{"plugins": [...], "order": ..., "icon": ...} + if original_plugin_id in folder_data['plugins']: + target_folder = folder_name + break + elif isinstance(folder_data, list): + # 旧格式:直接是插件列表 + if original_plugin_id in folder_data: + target_folder = folder_name + break + + # 如果找到了原插件所在的文件夹,则将分身插件也添加到该文件夹中 + if target_folder: + folder_data = folders[target_folder] + if isinstance(folder_data, dict) and 'plugins' in folder_data: + # 新格式 + if clone_plugin_id not in folder_data['plugins']: + folder_data['plugins'].append(clone_plugin_id) + logger.info(f"已将分身插件 {clone_plugin_id} 添加到文件夹 '{target_folder}' 中") + elif isinstance(folder_data, list): + # 旧格式 + if clone_plugin_id not in folder_data: + folder_data.append(clone_plugin_id) + logger.info(f"已将分身插件 {clone_plugin_id} 添加到文件夹 '{target_folder}' 中") + + # 保存更新后的文件夹配置 + config_oper.set(SystemConfigKey.PluginFolders, folders) + else: + logger.info(f"原插件 {original_plugin_id} 不在任何文件夹中,分身插件 {clone_plugin_id} 将保持独立") + + except Exception as e: + logger.error(f"处理插件文件夹时出错:{str(e)}") + # 文件夹处理失败不影响插件分身创建的整体流程 + + +def _remove_plugin_from_folders(plugin_id: str): + """ + 从所有文件夹中移除指定的插件 + :param plugin_id: 要移除的插件ID + """ + try: + config_oper = SystemConfigOper() + # 获取插件文件夹配置 + folders = config_oper.get(SystemConfigKey.PluginFolders) or {} + + # 标记是否有修改 + modified = False + + # 遍历所有文件夹,移除指定插件 + for folder_name, folder_data in folders.items(): + if isinstance(folder_data, dict) and 'plugins' in folder_data: + # 新格式:{"plugins": [...], "order": ..., "icon": ...} + if plugin_id in folder_data['plugins']: + folder_data['plugins'].remove(plugin_id) + logger.info(f"已从文件夹 '{folder_name}' 中移除插件 {plugin_id}") + modified = True + elif isinstance(folder_data, list): + # 旧格式:直接是插件列表 + if plugin_id in folder_data: + folder_data.remove(plugin_id) + logger.info(f"已从文件夹 '{folder_name}' 中移除插件 {plugin_id}") + modified = True + + # 如果有修改,保存更新后的文件夹配置 + if modified: + config_oper.set(SystemConfigKey.PluginFolders, folders) + else: + logger.debug(f"插件 {plugin_id} 不在任何文件夹中,无需移除") + + except Exception as e: + logger.error(f"从文件夹中移除插件时出错:{str(e)}") + # 文件夹处理失败不影响插件卸载的整体流程 From 7ce57cc67a81bc993e3e6949cf63411c4e65b9c4 Mon Sep 17 00:00:00 2001 From: jxxghp Date: Thu, 29 May 2025 08:22:45 +0800 Subject: [PATCH 5/7] fix --- app/api/endpoints/plugin.py | 5 +---- app/core/plugin.py | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/app/api/endpoints/plugin.py b/app/api/endpoints/plugin.py index c5b397d9e..56656f9cd 100644 --- a/app/api/endpoints/plugin.py +++ b/app/api/endpoints/plugin.py @@ -525,10 +525,7 @@ def clone_plugin(plugin_id: str, ) if success: - # 插件分身创建成功后,处理插件文件夹:如果原插件在某个文件夹中,则将分身插件也添加到同一个文件夹中 - clone_id = f"{plugin_id}{clone_data.get('suffix', '').lower()}" - _add_clone_to_plugin_folder(plugin_id, clone_id) - + _add_clone_to_plugin_folder(plugin_id, message) return schemas.Response(success=True, message="插件分身创建成功") else: return schemas.Response(success=False, message=message) diff --git a/app/core/plugin.py b/app/core/plugin.py index b5fe41b06..5d44796d1 100644 --- a/app/core/plugin.py +++ b/app/core/plugin.py @@ -1118,7 +1118,7 @@ def clone_plugin(self, plugin_id: str, suffix: str, name: str, description: str, logger.info(f"分身插件 {clone_id} 配置重新初始化完成") logger.info(f"插件分身 {clone_id} 创建成功") - return True, "插件分身创建成功" + return True, clone_id except Exception as e: logger.error(f"创建插件分身失败:{str(e)}") From 076e241056263a76d3178f56d2a1c6bcc63ef0ad Mon Sep 17 00:00:00 2001 From: jxxghp Date: Thu, 29 May 2025 08:30:14 +0800 Subject: [PATCH 6/7] fix tvdb --- app/modules/thetvdb/tvdb_v4_official.py | 161 +++++++++++------------- 1 file changed, 70 insertions(+), 91 deletions(-) diff --git a/app/modules/thetvdb/tvdb_v4_official.py b/app/modules/thetvdb/tvdb_v4_official.py index dd1ba5872..1d6625cde 100644 --- a/app/modules/thetvdb/tvdb_v4_official.py +++ b/app/modules/thetvdb/tvdb_v4_official.py @@ -11,42 +11,47 @@ class Auth: + """TVDB认证类""" + def __init__(self, url, apikey, pin="", proxy=None): - loginInfo = {"apikey": apikey} + login_info = {"apikey": apikey} if pin != "": - loginInfo["pin"] = pin + login_info["pin"] = pin - loginInfoBytes = json.dumps(loginInfo, indent=2) + login_info_bytes = json.dumps(login_info, indent=2) try: # 使用项目统一的RequestUtils类 req_utils = RequestUtils(proxies=proxy, timeout=30) response = req_utils.post_res( url=url, - data=loginInfoBytes, + data=login_info_bytes, headers={"Content-Type": "application/json"} ) if response and response.status_code == 200: - res = response.json() - self.token = res["data"]["token"] + result = response.json() + self.token = result["data"]["token"] else: error_msg = f"登录失败,状态码: {response.status_code if response else 'None'}" if response: try: error_data = response.json() error_msg = f"Code: {response.status_code}, {error_data.get('message', '未知错误')}" - except: - error_msg = f"Code: {response.status_code}, 响应解析失败" + except Exception as err: + error_msg = f"Code: {response.status_code}, 响应解析失败:{err}" raise Exception(error_msg) except Exception as e: raise Exception(f"TVDB认证失败: {str(e)}") def get_token(self): + """获取认证token""" return self.token class Request: + """请求处理类""" + def __init__(self, auth_token, proxy=None): self.auth_token = auth_token self.links = None @@ -73,21 +78,21 @@ def make_request(self, url, if_modified_since=None): } if response.status_code == 200: - res = response.json() - data = res.get("data", None) - if data is not None and res.get("status", "failure") != "failure": - self.links = res.get("links", None) + result = response.json() + data = result.get("data", None) + if data is not None and result.get("status", "failure") != "failure": + self.links = result.get("links", None) return data - msg = res.get("message", "UNKNOWN FAILURE") + msg = result.get("message", "UNKNOWN FAILURE") raise ValueError(f"failed to get {url}\n {str(msg)}") else: # 处理其他HTTP错误状态码 try: error_data = response.json() msg = error_data.get("message", f"HTTP {response.status_code}") - except: - msg = f"HTTP {response.status_code}" + except Exception as err: + msg = f"HTTP {response.status_code} {err}" raise ValueError(f"failed to get {url}\n {str(msg)}") except Exception as e: @@ -97,12 +102,13 @@ def make_request(self, url, if_modified_since=None): class Url: + """URL构建类""" + def __init__(self): self.base_url = "https://api4.thetvdb.com/v4/" - def construct( - self, url_sect, url_id=None, url_subsect=None, url_lang=None, **query - ): + def construct(self, url_sect, url_id=None, url_subsect=None, url_lang=None, **kwargs): + """构建API URL""" url = self.base_url + url_sect if url_id: url += "/" + str(url_id) @@ -110,14 +116,16 @@ def construct( url += "/" + url_subsect if url_lang: url += "/" + url_lang - if query: - query = {var: val for var, val in query.items() if val is not None} - if query: - url += "?" + urllib.parse.urlencode(query) + if kwargs: + params = {var: val for var, val in kwargs.items() if val is not None} + if params: + url += "?" + urllib.parse.urlencode(params) return url class TVDB: + """TVDB API主类""" + def __init__(self, apikey: str, pin="", proxy=None): self.url = Url() login_url = self.url.construct("login") @@ -126,6 +134,7 @@ def __init__(self, apikey: str, pin="", proxy=None): self.request = Request(auth_token, proxy) def get_req_links(self) -> dict: + """获取请求链接""" return self.request.links def get_artwork_statuses(self, meta=None, if_modified_since=None) -> list: @@ -173,9 +182,7 @@ def get_award_category(self, id: int, meta=None, if_modified_since=None) -> dict url = self.url.construct("awards/categories", id, meta=meta) return self.request.make_request(url, if_modified_since) - def get_award_category_extended( - self, id: int, meta=None, if_modified_since=None - ) -> dict: + def get_award_category_extended(self, id: int, meta=None, if_modified_since=None) -> dict: """Returns an award category extended dictionary""" url = self.url.construct("awards/categories", id, "extended", meta=meta) return self.request.make_request(url, if_modified_since) @@ -215,51 +222,35 @@ def get_series(self, id: int, meta=None, if_modified_since=None) -> dict: url = self.url.construct("series", id, meta=meta) return self.request.make_request(url, if_modified_since) - def get_series_by_slug( - self, slug: str, meta=None, if_modified_since=None - ) -> dict: + def get_series_by_slug(self, slug: str, meta=None, if_modified_since=None) -> dict: """Returns a series dictionary""" url = self.url.construct("series/slug", slug, meta=meta) return self.request.make_request(url, if_modified_since) - def get_series_extended( - self, id: int, meta=None, short=False, if_modified_since=None - ) -> dict: + def get_series_extended(self, id: int, meta=None, short=False, if_modified_since=None) -> dict: """Returns a series extended dictionary""" url = self.url.construct("series", id, "extended", meta=meta, short=short) return self.request.make_request(url, if_modified_since) - def get_series_episodes( - self, - id: int, - season_type: str = "default", - page: int = 0, - lang: str = None, - meta=None, - if_modified_since=None, - **kwargs - ) -> dict: + def get_series_episodes(self, id: int, season_type: str = "default", page: int = 0, + lang: str = None, meta=None, if_modified_since=None, **kwargs) -> dict: """Returns a series episodes dictionary""" url = self.url.construct( "series", id, "episodes/" + season_type, lang, page=page, meta=meta, **kwargs ) return self.request.make_request(url, if_modified_since) - def get_series_translation( - self, id: int, lang: str, meta=None, if_modified_since=None - ) -> dict: + def get_series_translation(self, id: int, lang: str, meta=None, if_modified_since=None) -> dict: """Returns a series translation dictionary""" url = self.url.construct("series", id, "translations", lang, meta=meta) return self.request.make_request(url, if_modified_since) - def get_series_artworks( - self, id: int, lang: str, type=None, if_modified_since=None - ) -> dict: + def get_series_artworks(self, id: int, lang: str, type=None, if_modified_since=None) -> dict: """Returns a series record with an artwork array""" url = self.url.construct("series", id, "artworks", lang=lang, type=type) return self.request.make_request(url, if_modified_since) - def get_series_nextAired(self, id: int, if_modified_since=None) -> dict: + def get_series_next_aired(self, id: int, if_modified_since=None) -> dict: """Returns a series extended dictionary""" url = self.url.construct("series", id, "nextAired") return self.request.make_request(url, if_modified_since) @@ -274,23 +265,17 @@ def get_movie(self, id: int, meta=None, if_modified_since=None) -> dict: url = self.url.construct("movies", id, meta=meta) return self.request.make_request(url, if_modified_since) - def get_movie_by_slug( - self, slug: str, meta=None, if_modified_since=None - ) -> dict: + def get_movie_by_slug(self, slug: str, meta=None, if_modified_since=None) -> dict: """Returns a movie dictionary""" url = self.url.construct("movies/slug", slug, meta=meta) return self.request.make_request(url, if_modified_since) - def get_movie_extended( - self, id: int, meta=None, short=False, if_modified_since=None - ) -> dict: + def get_movie_extended(self, id: int, meta=None, short=False, if_modified_since=None) -> dict: """Returns a movie extended dictionary""" url = self.url.construct("movies", id, "extended", meta=meta, short=short) return self.request.make_request(url, if_modified_since) - def get_movie_translation( - self, id: int, lang: str, meta=None, if_modified_since=None - ) -> dict: + def get_movie_translation(self, id: int, lang: str, meta=None, if_modified_since=None) -> dict: """Returns a movie translation dictionary""" url = self.url.construct("movies", id, "translations", lang, meta=meta) return self.request.make_request(url, if_modified_since) @@ -315,9 +300,7 @@ def get_season_types(self, meta=None, if_modified_since=None) -> list: url = self.url.construct("seasons/types", meta=meta) return self.request.make_request(url, if_modified_since) - def get_season_translation( - self, id: int, lang: str, meta=None, if_modified_since=None - ) -> dict: + def get_season_translation(self, id: int, lang: str, meta=None, if_modified_since=None) -> dict: """Returns a seasons translation dictionary""" url = self.url.construct("seasons", id, "translations", lang, meta=meta) return self.request.make_request(url, if_modified_since) @@ -337,16 +320,13 @@ def get_episode_extended(self, id: int, meta=None, if_modified_since=None) -> di url = self.url.construct("episodes", id, "extended", meta=meta) return self.request.make_request(url, if_modified_since) - def get_episode_translation( - self, id: int, lang: str, meta=None, if_modified_since=None - ) -> dict: + def get_episode_translation(self, id: int, lang: str, meta=None, if_modified_since=None) -> dict: """Returns an episode translation dictionary""" url = self.url.construct("episodes", id, "translations", lang, meta=meta) return self.request.make_request(url, if_modified_since) - get_episodes_translation = ( - get_episode_translation # Support the old name of the function. - ) + # Support the old name of the function. + get_episodes_translation = get_episode_translation def get_all_genders(self, meta=None, if_modified_since=None) -> list: """Returns a list of genders""" @@ -383,10 +363,8 @@ def get_person_extended(self, id: int, meta=None, if_modified_since=None) -> dic url = self.url.construct("people", id, "extended", meta=meta) return self.request.make_request(url, if_modified_since) - def get_person_translation( - self, id: int, lang: str, meta=None, if_modified_since=None - ) -> dict: - """Returns an people translation dictionary""" + def get_person_translation(self, id: int, lang: str, meta=None, if_modified_since=None) -> dict: + """Returns a people translation dictionary""" url = self.url.construct("people", id, "translations", lang, meta=meta) return self.request.make_request(url, if_modified_since) @@ -400,18 +378,24 @@ def get_people_types(self, meta=None, if_modified_since=None) -> list: url = self.url.construct("people/types", meta=meta) return self.request.make_request(url, if_modified_since) - get_all_people_types = get_people_types # Support the old function name + # Support the old function name + get_all_people_types = get_people_types def get_source_types(self, meta=None, if_modified_since=None) -> list: """Returns a list of source types""" url = self.url.construct("sources/types", meta=meta) return self.request.make_request(url, if_modified_since) - get_all_sourcetypes = get_source_types # Support the old function name + # Support the old function name + get_all_sourcetypes = get_source_types - # kwargs accepts args such as: page=2, action='update', type='artwork' def get_updates(self, since: int, **kwargs) -> list: - """Returns a list of updates""" + """Returns a list of updates + + Args: + since: Timestamp since when to get updates + **kwargs: Additional parameters such as page=2, action='update', type='artwork' + """ url = self.url.construct("updates", since=since, **kwargs) return self.request.make_request(url) @@ -426,32 +410,34 @@ def get_tag_option(self, id: int, meta=None, if_modified_since=None) -> dict: return self.request.make_request(url, if_modified_since) def get_all_lists(self, page=None, meta=None) -> dict: + """Returns all lists""" url = self.url.construct("lists", page=page, meta=meta) return self.request.make_request(url) def get_list(self, id: int, meta=None, if_modified_since=None) -> dict: + """Returns a list dictionary""" url = self.url.construct("lists", id, meta=meta) - return self.request.make_request(url), if_modified_since + return self.request.make_request(url, if_modified_since) def get_list_by_slug(self, slug: str, meta=None, if_modified_since=None) -> dict: - """Returns a movie dictionary""" + """Returns a list dictionary by slug""" url = self.url.construct("lists/slug", slug, meta=meta) return self.request.make_request(url, if_modified_since) def get_list_extended(self, id: int, meta=None, if_modified_since=None) -> dict: + """Returns an extended list dictionary""" url = self.url.construct("lists", id, "extended", meta=meta) - return self.request.make_request(url), if_modified_since + return self.request.make_request(url, if_modified_since) - def get_list_translation( - self, id: int, lang: str, meta=None, if_modified_since=None - ) -> dict: - """Returns an list translation dictionary""" + def get_list_translation(self, id: int, lang: str, meta=None, if_modified_since=None) -> dict: + """Returns a list translation dictionary""" url = self.url.construct("lists", id, "translations", lang, meta=meta) return self.request.make_request(url, if_modified_since) def get_inspiration_types(self, meta=None, if_modified_since=None) -> dict: + """Returns inspiration types""" url = self.url.construct("inspiration/types", meta=meta) - return self.request.make_request(url), if_modified_since + return self.request.make_request(url, if_modified_since) def search(self, query, **kwargs) -> list: """Returns a list of search results""" @@ -469,7 +455,7 @@ def get_tags(self, slug: str, if_modified_since=None) -> dict: return self.request.make_request(url, if_modified_since) def get_entities_types(self, if_modified_since=None) -> dict: - """Returns a entities types dictionary""" + """Returns an entities types dictionary""" url = self.url.construct("entities") return self.request.make_request(url, if_modified_since) @@ -484,13 +470,6 @@ def get_user(self) -> dict: return self.request.make_request(url) def get_user_favorites(self) -> dict: - """Returns a user info dictionary""" + """Returns a user favorites dictionary""" url = self.url.construct('user/favorites') return self.request.make_request(url) - - -if __name__ == "__main__": - tvdb = TVDB("ed2aa66b-7899-4677-92a7-67bc9ce3d93a") - query = "romance in the alley" - res = tvdb.search(query) - print(res) From 014b77c3c7543d95bf9c3800fec1fa7d8fb26212 Mon Sep 17 00:00:00 2001 From: jxxghp Date: Thu, 29 May 2025 08:30:31 +0800 Subject: [PATCH 7/7] v2.5.1-1 --- version.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/version.py b/version.py index 9f9f6e89b..08ecbb0f6 100644 --- a/version.py +++ b/version.py @@ -1,2 +1,2 @@ -APP_VERSION = 'v2.5.1' -FRONTEND_VERSION = 'v2.5.1' +APP_VERSION = 'v2.5.1-1' +FRONTEND_VERSION = 'v2.5.1-1'