diff --git a/.DS_Store b/.DS_Store
new file mode 100644
index 0000000..0384844
Binary files /dev/null and b/.DS_Store differ
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..dfe0770
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,2 @@
+# Auto detect text files and perform LF normalization
+* text=auto
diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md
new file mode 100644
index 0000000..d098423
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug.md
@@ -0,0 +1,30 @@
+---
+name: 问题模板
+about: 如发现Bug,请按此模板提交issues,不按模板提交的问题将直接关闭。
+提交问题务必描述清楚、附上日志,描述不清导致无法理解和分析的问题也可能会被直接关闭。
+---
+
+## 你使用的 NAStool 是什么版本,什么环境?
+
+> NAStool 版本: vx.x.x
+>
+> 环境: docker or windows or Synology
+>
+
+## 你遇到什么问题了?
+
+> 描述一下你遇到的问题
+
+## 是否已经浏览过Issues、Wiki及TG公众号仍无法解决?
+
+> 请搜索Issues列表、查看wiki跟TG公众号的更新说明,已经解释过的问题不要重复提问
+
+
+## 你期望的结果
+
+> 描述以下你期望的结果
+
+## 给出程序界面截图、后台运行日志或配置文件
+
+> 如UI BUG请提供截图及配置文件截图
+> 其它问题提供后台日志,如为Docker请提供docker的日志
diff --git a/.github/ISSUE_TEMPLATE/feature.md b/.github/ISSUE_TEMPLATE/feature.md
new file mode 100644
index 0000000..7c68cd7
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature.md
@@ -0,0 +1,18 @@
+---
+name: 功能需求模板
+about: 如有新功能需要需要提交,请按此模板创建issues
+---
+
+## 你使用的 NAStool 是什么版本,什么环境?
+
+> NAStool 版本: vx.x.x
+>
+> 环境: docker or windows or synology
+
+## 你想要新增或者改进什么功能?
+
+> 你想要新增或者改进什么功能?
+
+## 这个功能有什么可以参考的资料吗?
+
+> 这个功能有什么可以参考的资料吗?是否可以列举一些,不要引用同类但商业化软件的任何内容.
diff --git a/.github/workflows/build-beta.yml b/.github/workflows/build-beta.yml
new file mode 100644
index 0000000..103bd5f
--- /dev/null
+++ b/.github/workflows/build-beta.yml
@@ -0,0 +1,54 @@
+name: Build NAStool Beta Image
+on:
+ workflow_dispatch:
+ push:
+ branches:
+ - dev
+ paths:
+ - version.py
+ - docker/Dockerfile.beta
+ - .github/workflows/build-beta.yml
+ - requirements.txt
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ name: Build Docker Image
+ steps:
+ -
+ name: Checkout
+ uses: actions/checkout@master
+
+ -
+ name: Release version
+ id: release_version
+ run: |
+ app_version=$(cat version.py |sed -ne "s/APP_VERSION\s=\s'v\(.*\)'/\1/gp")
+ echo "app_version=$app_version" >> $GITHUB_ENV
+
+ -
+ name: Set Up QEMU
+ uses: docker/setup-qemu-action@v1
+
+ -
+ name: Set Up Buildx
+ uses: docker/setup-buildx-action@v1
+
+ -
+ name: Login DockerHub
+ uses: docker/login-action@v1
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_PASSWORD }}
+
+ -
+ name: Buildx
+ uses: docker/build-push-action@v2
+ with:
+ context: .
+ file: docker/Dockerfile.beta
+ platforms: |
+ linux/amd64
+ linux/arm64
+ push: true
+ tags: |
+ ${{ secrets.DOCKER_USERNAME }}/nas-tools:${{ env.app_version }}-beta
\ No newline at end of file
diff --git a/.github/workflows/build-lite.yml b/.github/workflows/build-lite.yml
new file mode 100644
index 0000000..5c8c0b7
--- /dev/null
+++ b/.github/workflows/build-lite.yml
@@ -0,0 +1,54 @@
+name: Build NAStool Lite Image
+on:
+ workflow_dispatch:
+ push:
+ branches:
+ - master
+ paths:
+ - version.py
+ - docker/Dockerfile.lite
+ - .github/workflows/build-lite.yml
+ - requirements.txt
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ name: Build Docker Image
+ steps:
+ -
+ name: Checkout
+ uses: actions/checkout@master
+
+ -
+ name: Release version
+ id: release_version
+ run: |
+ app_version=$(cat version.py |sed -ne "s/APP_VERSION\s=\s'v\(.*\)'/\1/gp")
+ echo "app_version=$app_version" >> $GITHUB_ENV
+
+ -
+ name: Set Up QEMU
+ uses: docker/setup-qemu-action@v1
+
+ -
+ name: Set Up Buildx
+ uses: docker/setup-buildx-action@v1
+
+ -
+ name: Login DockerHub
+ uses: docker/login-action@v1
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_PASSWORD }}
+
+ -
+ name: Build Lite Image
+ uses: docker/build-push-action@v2
+ with:
+ context: .
+ file: docker/Dockerfile.lite
+ platforms: |
+ linux/amd64
+ linux/arm64
+ push: true
+ tags: |
+ ${{ secrets.DOCKER_USERNAME }}/nas-tools:${{ env.app_version }}-lite
\ No newline at end of file
diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml
new file mode 100644
index 0000000..1bb737d
--- /dev/null
+++ b/.github/workflows/build-windows.yml
@@ -0,0 +1,98 @@
+name: Build NAStool Windows
+on:
+ workflow_dispatch:
+ push:
+ branches:
+ - master
+ paths:
+ - version.py
+ - .github/workflows/build-windows.yml
+ - windows/**
+
+jobs:
+ Windows-build:
+ runs-on: windows-latest
+ steps:
+ - name: init Python 3.10.6
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.10.6'
+ - name: install dependent packages
+ run: |
+ python -m pip install --upgrade pip
+ pip install wheel numpy==1.23.5 pyparsing==3.0.9 wxpython==4.2.0 pyinstaller==5.7.0
+ git clone --depth=1 -b master https://github.com/NAStool/nas-tools --recurse-submodule
+ cd nas-tools
+ pip install -r requirements.txt
+ echo ("NASTOOL_CONFIG=D:/a/nas-tools/nas-tools/nas-tools/config/config.yaml") >> $env:GITHUB_ENV
+ echo $env:NASTOOL_CONFIG
+ shell: pwsh
+ - name: package through pyinstaller
+ run: |
+ cd nas-tools
+ copy .\windows\rely\upx.exe c:\hostedtoolcache\windows\python\3.10.6\x64\Scripts
+ copy .\windows\rely\hook-cn2an.py c:\hostedtoolcache\windows\python\3.10.6\x64\lib\site-packages\pyinstaller\hooks
+ copy .\windows\rely\hook-zhconv.py c:\hostedtoolcache\windows\python\3.10.6\x64\lib\site-packages\pyinstaller\hooks
+ copy .\third_party.txt .\windows
+ copy .\windows\rely\template.jinja2 c:\hostedtoolcache\windows\Python\3.10.6\x64\lib\site-packages\setuptools\_vendor\pyparsing\diagram
+ xcopy .\web c:\hostedtoolcache\windows\python\3.10.6\x64\lib\site-packages\web\ /e
+ xcopy .\config c:\hostedtoolcache\windows\python\3.10.6\x64\lib\site-packages\config\ /e
+ xcopy .\db_scripts c:\hostedtoolcache\windows\python\3.10.6\x64\lib\site-packages\db_scripts\ /e
+ cd windows
+ pyinstaller nas-tools.spec
+ shell: pwsh
+ - name: upload windows file
+ uses: actions/upload-artifact@v3
+ with:
+ name: windows
+ path: D:/a/nas-tools/nas-tools/nas-tools/windows/dist/nas-tools.exe
+
+ Create-release_Send-message:
+ runs-on: ubuntu-latest
+ needs: [Windows-build]
+ steps:
+ - uses: actions/checkout@v2
+ - name: Release version
+ id: release_version
+ run: |
+ app_version=$(cat version.py |sed -ne "s/APP_VERSION\s=\s'v\(.*\)'/\1/gp")
+ echo "app_version=$app_version" >> $GITHUB_ENV
+ - name: download exe and rename
+ uses: actions/download-artifact@v3
+ - name: get release_informations
+ shell: bash
+ run: |
+ pwd
+ mkdir releases
+ cd windows
+ mv nas-tools.exe /home/runner/work/nas-tools/nas-tools/releases/nastool_win_v${{ env.app_version }}.exe
+ pwd
+ - name: Create release
+ id: create_release
+ uses: actions/create-release@latest
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ tag_name: v${{ env.app_version }}
+ release_name: v${{ env.app_version }}
+ body: ${{ github.event.commits[0].message }}
+ draft: false
+ prerelease: false
+ - name: Upload release asset
+ uses: dwenegar/upload-release-assets@v1
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ release_id: ${{ steps.create_release.outputs.id }}
+ assets_path: |
+ /home/runner/work/nas-tools/nas-tools/releases/
+ - name: Send telegram message (release informations)
+ uses: appleboy/telegram-action@master
+ with:
+ to: ${{ secrets.TELEGRAM_TO }}
+ token: ${{ secrets.TELEGRAM_TOKEN }}
+ format: markdown
+ message: |
+ *v${{ env.app_version }}*
+
+ ${{ github.event.commits[0].message }}
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
new file mode 100644
index 0000000..5cc9a27
--- /dev/null
+++ b/.github/workflows/build.yml
@@ -0,0 +1,55 @@
+name: Build NAStool Image
+on:
+ workflow_dispatch:
+ push:
+ branches:
+ - master
+ paths:
+ - version.py
+ - docker/Dockerfile
+ - docker/Dockerfile.lite
+ - .github/workflows/build.yml
+ - requirements.txt
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ name: Build Docker Image
+ steps:
+ -
+ name: Checkout
+ uses: actions/checkout@master
+
+ -
+ name: Release version
+ id: release_version
+ run: |
+ app_version=$(cat version.py |sed -ne "s/APP_VERSION\s=\s'v\(.*\)'/\1/gp")
+ echo "app_version=$app_version" >> $GITHUB_ENV
+
+ -
+ name: Set Up QEMU
+ uses: docker/setup-qemu-action@v1
+
+ -
+ name: Set Up Buildx
+ uses: docker/setup-buildx-action@v1
+
+ -
+ name: Login DockerHub
+ uses: docker/login-action@v1
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_PASSWORD }}
+
+ - name: Build Image
+ uses: docker/build-push-action@v2
+ with:
+ context: .
+ file: docker/Dockerfile
+ platforms: |
+ linux/amd64
+ linux/arm64
+ push: true
+ tags: |
+ ${{ secrets.DOCKER_USERNAME }}/nas-tools:latest
+ ${{ secrets.DOCKER_USERNAME }}/nas-tools:${{ env.app_version }}
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..72644b9
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,13 @@
+__pycache__
+*.sock
+*.log
+*.pid
+test.py
+
+### IntelliJ IDEA ###
+.idea
+*.iws
+*.iml
+*.ipr
+out/
+gen/
\ No newline at end of file
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..c4b7027
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,18 @@
+[submodule "third_party/qbittorrent-api"]
+ path = third_party/qbittorrent-api
+ url = https://github.com/rmartin16/qbittorrent-api
+[submodule "third_party/transmission-rpc"]
+ path = third_party/transmission-rpc
+ url = https://github.com/Trim21/transmission-rpc
+[submodule "third_party/anitopy"]
+ path = third_party/anitopy
+ url = https://github.com/igorcmoura/anitopy
+[submodule "third_party/plexapi"]
+ path = third_party/plexapi
+ url = https://github.com/pkkid/python-plexapi
+[submodule "third_party/slack_bolt"]
+ path = third_party/slack_bolt
+ url = https://github.com/slackapi/bolt-python
+[submodule "third_party/feapder"]
+ path = third_party/feapder
+ url = https://github.com/jxxghp/feapder
diff --git a/LICENSE.md b/LICENSE.md
new file mode 100644
index 0000000..0ad25db
--- /dev/null
+++ b/LICENSE.md
@@ -0,0 +1,661 @@
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ ${Object.values(item)[0]}
+
").strip() if title else ""
+ content = content.replace("\n", "
").strip() if content else ""
+ self.__append_message_queue(level, title, content)
+
+ def __append_message_queue(self, level, title, content):
+ """
+ 将消息增加到队列
+ """
+ self._message_queue.appendleft({"level": level,
+ "title": title,
+ "content": content,
+ "time": time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))})
+
+ def get_system_messages(self, num=20, lst_time=None):
+ """
+ 查询系统消息
+ :param num:条数
+ :param lst_time: 最后时间
+ """
+ if not lst_time:
+ return list(self._message_queue)[-num:]
+ else:
+ ret_messages = []
+ for message in list(self._message_queue):
+ if (datetime.datetime.strptime(message.get("time"), '%Y-%m-%d %H:%M:%S') - datetime.datetime.strptime(
+ lst_time, '%Y-%m-%d %H:%M:%S')).seconds > 0:
+ ret_messages.append(message)
+ else:
+ break
+ return ret_messages
diff --git a/app/rss.py b/app/rss.py
new file mode 100644
index 0000000..f22de7f
--- /dev/null
+++ b/app/rss.py
@@ -0,0 +1,628 @@
+import re
+import xml.dom.minidom
+from threading import Lock
+
+import log
+from app.downloader import Downloader
+from app.filter import Filter
+from app.helper import DbHelper
+from app.media import Media
+from app.media.meta import MetaInfo
+from app.sites import Sites
+from app.subscribe import Subscribe
+from app.utils import DomUtils, RequestUtils, StringUtils, ExceptionUtils, RssTitleUtils, Torrent
+from app.utils.types import MediaType, SearchType
+
+lock = Lock()
+
+
+class Rss:
+ _sites = []
+ filter = None
+ media = None
+ downloader = None
+ searcher = None
+ dbhelper = None
+ subscribe = None
+
+ def __init__(self):
+ self.media = Media()
+ self.downloader = Downloader()
+ self.sites = Sites()
+ self.filter = Filter()
+ self.dbhelper = DbHelper()
+ self.subscribe = Subscribe()
+ self.init_config()
+
+ def init_config(self):
+ self._sites = self.sites.get_sites(rss=True)
+
+ def rssdownload(self):
+ """
+ RSS订阅检索下载入口,由定时服务调用
+ """
+
+ if not self._sites:
+ return
+
+ with lock:
+ log.info("【Rss】开始RSS订阅...")
+
+ # 读取电影订阅
+ rss_movies = self.subscribe.get_subscribe_movies(state='R')
+ if not rss_movies:
+ log.warn("【Rss】没有正在订阅的电影")
+ else:
+ log.info("【Rss】电影订阅清单:%s"
+ % " ".join('%s' % info.get("name") for _, info in rss_movies.items()))
+ # 读取电视剧订阅
+ rss_tvs = self.subscribe.get_subscribe_tvs(state='R')
+ if not rss_tvs:
+ log.warn("【Rss】没有正在订阅的电视剧")
+ else:
+ log.info("【Rss】电视剧订阅清单:%s"
+ % " ".join('%s' % info.get("name") for _, info in rss_tvs.items()))
+ # 没有订阅退出
+ if not rss_movies and not rss_tvs:
+ return
+
+ # 获取有订阅的站点范围
+ check_sites = []
+ check_all = False
+ for rid, rinfo in rss_movies.items():
+ rss_sites = rinfo.get("rss_sites")
+ if not rss_sites:
+ check_all = True
+ break
+ else:
+ check_sites += rss_sites
+ if not check_all:
+ for rid, rinfo in rss_tvs.items():
+ rss_sites = rinfo.get("rss_sites")
+ if not rss_sites:
+ check_all = True
+ break
+ else:
+ check_sites += rss_sites
+ if check_all:
+ check_sites = []
+ else:
+ check_sites = list(set(check_sites))
+
+ # 匹配到的资源列表
+ rss_download_torrents = []
+ # 缺失的资源详情
+ rss_no_exists = {}
+ # 遍历站点资源
+ for site_info in self._sites:
+ if not site_info:
+ continue
+ # 站点名称
+ site_name = site_info.get("name")
+ # 没有订阅的站点中的不检索
+ if check_sites and site_name not in check_sites:
+ continue
+ # 站点rss链接
+ rss_url = site_info.get("rssurl")
+ if not rss_url:
+ log.info(f"【Rss】{site_name} 未配置rssurl,跳过...")
+ continue
+ site_cookie = site_info.get("cookie")
+ site_ua = site_info.get("ua")
+ # 是否解析种子详情
+ site_parse = site_info.get("parse")
+ # 是否使用代理
+ site_proxy = site_info.get("proxy")
+ # 使用的规则
+ site_fliter_rule = site_info.get("rule")
+ # 开始下载RSS
+ log.info(f"【Rss】正在处理:{site_name}")
+ if site_info.get("pri"):
+ site_order = 100 - int(site_info.get("pri"))
+ else:
+ site_order = 0
+ rss_acticles = self.parse_rssxml(rss_url)
+ if not rss_acticles:
+ log.warn(f"【Rss】{site_name} 未下载到数据")
+ continue
+ else:
+ log.info(f"【Rss】{site_name} 获取数据:{len(rss_acticles)}")
+ # 处理RSS结果
+ res_num = 0
+ for article in rss_acticles:
+ try:
+ # 种子名
+ title = article.get('title')
+ # 种子链接
+ enclosure = article.get('enclosure')
+ # 种子页面
+ page_url = article.get('link')
+ # 种子大小
+ size = article.get('size')
+ # 开始处理
+ log.info(f"【Rss】开始处理:{title}")
+ # 检查这个种子是不是下过了
+ if self.dbhelper.is_torrent_rssd(enclosure):
+ log.info(f"【Rss】{title} 已成功订阅过")
+ continue
+ # 识别种子名称,开始检索TMDB
+ media_info = MetaInfo(title=title)
+ cache_info = self.media.get_cache_info(media_info)
+ if cache_info.get("id"):
+ # 使用缓存信息
+ media_info.tmdb_id = cache_info.get("id")
+ media_info.type = cache_info.get("type")
+ media_info.title = cache_info.get("title")
+ media_info.year = cache_info.get("year")
+ else:
+ # 重新查询TMDB
+ media_info = self.media.get_media_info(title=title)
+ if not media_info:
+ log.warn(f"【Rss】{title} 无法识别出媒体信息!")
+ continue
+ elif not media_info.tmdb_info:
+ log.info(f"【Rss】{title} 识别为 {media_info.get_name()} 未匹配到TMDB媒体信息")
+ # 大小及种子页面
+ media_info.set_torrent_info(size=size,
+ page_url=page_url,
+ site=site_name,
+ site_order=site_order,
+ enclosure=enclosure)
+ # 检查种子是否匹配订阅,返回匹配到的订阅ID、是否洗版、总集数、上传因子、下载因子
+ match_flag, match_msg, match_info = self.check_torrent_rss(
+ media_info=media_info,
+ rss_movies=rss_movies,
+ rss_tvs=rss_tvs,
+ site_filter_rule=site_fliter_rule,
+ site_cookie=site_cookie,
+ site_parse=site_parse,
+ site_ua=site_ua,
+ site_proxy=site_proxy)
+ for msg in match_msg:
+ log.info(f"【Rss】{msg}")
+
+ # 未匹配
+ if not match_flag:
+ continue
+
+ # 非模糊匹配命中,检查本地情况,检查删除订阅
+ if not match_info.get("fuzzy_match"):
+ # 匹配到订阅,如没有TMDB信息则重新查询
+ if not media_info.tmdb_info and media_info.tmdb_id:
+ media_info.set_tmdb_info(self.media.get_tmdb_info(mtype=media_info.type,
+ tmdbid=media_info.tmdb_id))
+ if not media_info.tmdb_info:
+ continue
+ # 非洗版时检查本地是否存在
+ if not match_info.get("over_edition"):
+ if media_info.type == MediaType.MOVIE:
+ exist_flag, rss_no_exists, _ = self.downloader.check_exists_medias(
+ meta_info=media_info,
+ no_exists=rss_no_exists
+ )
+ else:
+ # 从登记薄中获取缺失剧集
+ season = 1
+ if match_info.get("season"):
+ season = int(str(match_info.get("season")).replace("S", ""))
+ # 设定的总集数
+ total_ep = match_info.get("total")
+ # 设定的开始集数
+ current_ep = match_info.get("current_ep")
+ # 表登记的缺失集数
+ episodes = self.subscribe.get_subscribe_tv_episodes(match_info.get("id"))
+ if episodes is None:
+ episodes = []
+ if current_ep:
+ episodes = list(range(int(current_ep), int(total_ep) + 1))
+ rss_no_exists[media_info.tmdb_id] = [
+ {
+ "season": season,
+ "episodes": episodes,
+ "total_episodes": total_ep
+ }
+ ]
+ else:
+ rss_no_exists[media_info.tmdb_id] = [
+ {
+ "season": season,
+ "episodes": episodes,
+ "total_episodes": total_ep
+ }
+ ]
+ # 检查本地媒体库情况
+ exist_flag, library_no_exists, _ = self.downloader.check_exists_medias(
+ meta_info=media_info,
+ total_ep={season: total_ep}
+ )
+ # 取交集做为缺失集
+ rss_no_exists = Torrent.get_intersection_episodes(target=rss_no_exists,
+ source=library_no_exists,
+ title=media_info.tmdb_id)
+ if rss_no_exists.get(media_info.tmdb_id):
+ log.info("【Rss】%s 订阅缺失季集:%s" % (
+ media_info.get_title_string(),
+ rss_no_exists.get(media_info.tmdb_id)
+ ))
+ # 本地已存在
+ if exist_flag:
+ continue
+ # 洗版模式
+ else:
+ # 洗版时季集不完整的资源不要
+ if media_info.type != MediaType.MOVIE \
+ and media_info.get_episode_list():
+ log.info(
+ f"【Rss】{media_info.get_title_string()}{media_info.get_season_string()} "
+ f"正在洗版,过滤掉季集不完整的资源:{title}"
+ )
+ continue
+ if not self.subscribe.check_subscribe_over_edition(
+ rtype=media_info.type,
+ rssid=match_info.get("id"),
+ res_order=match_info.get("res_order")):
+ log.info(
+ f"【Rss】{media_info.get_title_string()}{media_info.get_season_string()} "
+ f"正在洗版,跳过低优先级或同优先级资源:{title}"
+ )
+ continue
+ # 模糊匹配
+ else:
+ # 不做处理,直接下载
+ pass
+
+ # 设置种子信息
+ media_info.set_torrent_info(res_order=match_info.get("res_order"),
+ filter_rule=match_info.get("filter_rule"),
+ over_edition=match_info.get("over_edition"),
+ download_volume_factor=match_info.get("download_volume_factor"),
+ upload_volume_factor=match_info.get("upload_volume_factor"),
+ rssid=match_info.get("id"))
+ # 设置下载参数
+ media_info.set_download_info(download_setting=match_info.get("download_setting"),
+ save_path=match_info.get("save_path"))
+ # 插入数据库历史记录
+ self.dbhelper.insert_rss_torrents(media_info)
+ # 加入下载列表
+ if media_info not in rss_download_torrents:
+ rss_download_torrents.append(media_info)
+ res_num = res_num + 1
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ log.error("【Rss】处理RSS发生错误:%s" % str(e))
+ continue
+ log.info("【Rss】%s 处理结束,匹配到 %s 个有效资源" % (site_name, res_num))
+ log.info("【Rss】所有RSS处理结束,共 %s 个有效资源" % len(rss_download_torrents))
+ # 开始择优下载
+ self.download_rss_torrent(rss_download_torrents=rss_download_torrents,
+ rss_no_exists=rss_no_exists)
+
+ @staticmethod
+ def parse_rssxml(url):
+ """
+ 解析RSS订阅URL,获取RSS中的种子信息
+ :param url: RSS地址
+ :return: 种子信息列表
+ """
+ _special_title_sites = {
+ 'pt.keepfrds.com': RssTitleUtils.keepfriends_title
+ }
+
+ # 开始处理
+ ret_array = []
+ if not url:
+ return []
+ site_domain = StringUtils.get_url_domain(url)
+ try:
+ ret = RequestUtils().get_res(url)
+ if not ret:
+ return []
+ ret.encoding = ret.apparent_encoding
+ except Exception as e2:
+ ExceptionUtils.exception_traceback(e2)
+ log.console(str(e2))
+ return []
+ if ret:
+ ret_xml = ret.text
+ try:
+ # 解析XML
+ dom_tree = xml.dom.minidom.parseString(ret_xml)
+ rootNode = dom_tree.documentElement
+ items = rootNode.getElementsByTagName("item")
+ for item in items:
+ try:
+ # 标题
+ title = DomUtils.tag_value(item, "title", default="")
+ if not title:
+ continue
+ # 标题特殊处理
+ if site_domain and site_domain in _special_title_sites:
+ title = _special_title_sites.get(site_domain)(title)
+ # 描述
+ description = DomUtils.tag_value(item, "description", default="")
+ # 种子页面
+ link = DomUtils.tag_value(item, "link", default="")
+ # 种子链接
+ enclosure = DomUtils.tag_value(item, "enclosure", "url", default="")
+ if not enclosure and not link:
+ continue
+ # 部分RSS只有link没有enclosure
+ if not enclosure and link:
+ enclosure = link
+ link = None
+ # 大小
+ size = DomUtils.tag_value(item, "enclosure", "length", default=0)
+ if size and str(size).isdigit():
+ size = int(size)
+ else:
+ size = 0
+ # 发布日期
+ pubdate = DomUtils.tag_value(item, "pubDate", default="")
+ if pubdate:
+ # 转换为时间
+ pubdate = StringUtils.get_time_stamp(pubdate)
+ # 返回对象
+ tmp_dict = {'title': title,
+ 'enclosure': enclosure,
+ 'size': size,
+ 'description': description,
+ 'link': link,
+ 'pubdate': pubdate}
+ ret_array.append(tmp_dict)
+ except Exception as e1:
+ ExceptionUtils.exception_traceback(e1)
+ continue
+ except Exception as e2:
+ ExceptionUtils.exception_traceback(e2)
+ return ret_array
+ return ret_array
+
+ def check_torrent_rss(self,
+ media_info,
+ rss_movies,
+ rss_tvs,
+ site_filter_rule,
+ site_cookie,
+ site_parse,
+ site_ua,
+ site_proxy):
+ """
+ 判断种子是否命中订阅
+ :param media_info: 已识别的种子媒体信息
+ :param rss_movies: 电影订阅清单
+ :param rss_tvs: 电视剧订阅清单
+ :param site_filter_rule: 站点过滤规则
+ :param site_cookie: 站点的Cookie
+ :param site_parse: 是否解析种子详情
+ :param site_ua: 站点请求UA
+ :param site_proxy: 是否使用代理
+ :return: 匹配到的订阅ID、是否洗版、总集数、匹配规则的资源顺序、上传因子、下载因子,匹配的季(电视剧)
+ """
+ # 默认值
+ # 匹配状态 0不在订阅范围内 -1不符合过滤条件 1匹配
+ match_flag = False
+ # 匹配的rss信息
+ match_msg = []
+ match_rss_info = {}
+ # 上传因素
+ upload_volume_factor = None
+ # 下载因素
+ download_volume_factor = None
+ hit_and_run = False
+
+ # 匹配电影
+ if media_info.type == MediaType.MOVIE and rss_movies:
+ for rid, rss_info in rss_movies.items():
+ rss_sites = rss_info.get('rss_sites')
+ # 过滤订阅站点
+ if rss_sites and media_info.site not in rss_sites:
+ continue
+ # tmdbid或名称年份匹配
+ name = rss_info.get('name')
+ year = rss_info.get('year')
+ tmdbid = rss_info.get('tmdbid')
+ fuzzy_match = rss_info.get('fuzzy_match')
+ # 非模糊匹配
+ if not fuzzy_match:
+ # 有tmdbid时使用tmdbid匹配
+ if tmdbid and not tmdbid.startswith("DB:"):
+ if str(media_info.tmdb_id) != str(tmdbid):
+ continue
+ else:
+ # 豆瓣年份与tmdb取向不同
+ if year and str(media_info.year) not in [str(year),
+ str(int(year) + 1),
+ str(int(year) - 1)]:
+ continue
+ if name != media_info.title:
+ continue
+ # 模糊匹配
+ else:
+ # 匹配年份
+ if year and str(year) != str(media_info.year):
+ continue
+ # 匹配关键字或正则表达式
+ search_title = f"{media_info.org_string} {media_info.title} {media_info.year}"
+ if not re.search(name, search_title, re.I) and name not in search_title:
+ continue
+ # 媒体匹配成功
+ match_flag = True
+ match_rss_info = rss_info
+
+ break
+ # 匹配电视剧
+ elif rss_tvs:
+ # 匹配种子标题
+ for rid, rss_info in rss_tvs.items():
+ rss_sites = rss_info.get('rss_sites')
+ # 过滤订阅站点
+ if rss_sites and media_info.site not in rss_sites:
+ continue
+ # 有tmdbid时精确匹配
+ name = rss_info.get('name')
+ year = rss_info.get('year')
+ season = rss_info.get('season')
+ tmdbid = rss_info.get('tmdbid')
+ fuzzy_match = rss_info.get('fuzzy_match')
+ # 非模糊匹配
+ if not fuzzy_match:
+ if tmdbid and not tmdbid.startswith("DB:"):
+ if str(media_info.tmdb_id) != str(tmdbid):
+ continue
+ else:
+ # 匹配年份,年份可以为空
+ if year and str(year) != str(media_info.year):
+ continue
+ # 匹配名称
+ if name != media_info.title:
+ continue
+ # 匹配季,季可以为空
+ if season and season != media_info.get_season_string():
+ continue
+ # 模糊匹配
+ else:
+ # 匹配季,季可以为空
+ if season and season != "S00" and season != media_info.get_season_string():
+ continue
+ # 匹配年份
+ if year and str(year) != str(media_info.year):
+ continue
+ # 匹配关键字或正则表达式
+ search_title = f"{media_info.org_string} {media_info.title} {media_info.year}"
+ if not re.search(name, search_title, re.I) and name not in search_title:
+ continue
+ # 媒体匹配成功
+ match_flag = True
+ match_rss_info = rss_info
+ break
+ # 名称匹配成功,开始过滤
+ if match_flag:
+ # 解析种子详情
+ if site_parse:
+ # 检测Free
+ torrent_attr = self.sites.check_torrent_attr(torrent_url=media_info.page_url,
+ cookie=site_cookie,
+ ua=site_ua,
+ proxy=site_proxy)
+ if torrent_attr.get('2xfree'):
+ download_volume_factor = 0.0
+ upload_volume_factor = 2.0
+ elif torrent_attr.get('free'):
+ download_volume_factor = 0.0
+ upload_volume_factor = 1.0
+ else:
+ upload_volume_factor = 1.0
+ download_volume_factor = 1.0
+ if torrent_attr.get('hr'):
+ hit_and_run = True
+ # 设置属性
+ media_info.set_torrent_info(upload_volume_factor=upload_volume_factor,
+ download_volume_factor=download_volume_factor,
+ hit_and_run=hit_and_run)
+ # 订阅无过滤规则应用站点设置
+ filter_rule = match_rss_info.get('filter_rule') or site_filter_rule
+ filter_dict = {
+ "restype": match_rss_info.get('filter_restype'),
+ "pix": match_rss_info.get('filter_pix'),
+ "team": match_rss_info.get('filter_team'),
+ "rule": filter_rule
+ }
+ match_filter_flag, res_order, match_filter_msg = self.filter.check_torrent_filter(meta_info=media_info,
+ filter_args=filter_dict)
+ if not match_filter_flag:
+ match_msg.append(match_filter_msg)
+ return False, match_msg, match_rss_info
+ else:
+ match_msg.append("%s 识别为 %s %s 匹配订阅成功" % (
+ media_info.org_string,
+ media_info.get_title_string(),
+ media_info.get_season_episode_string()))
+ match_msg.append(f"种子描述:{media_info.subtitle}")
+ match_rss_info.update({
+ "res_order": res_order,
+ "filter_rule": filter_rule,
+ "upload_volume_factor": upload_volume_factor,
+ "download_volume_factor": download_volume_factor})
+ return True, match_msg, match_rss_info
+ else:
+ match_msg.append("%s 识别为 %s %s 不在订阅范围" % (
+ media_info.org_string,
+ media_info.get_title_string(),
+ media_info.get_season_episode_string()))
+ return False, match_msg, match_rss_info
+
+ def download_rss_torrent(self, rss_download_torrents, rss_no_exists):
+ """
+ 根据缺失情况以及匹配到的结果选择下载种子
+ """
+
+ if not rss_download_torrents:
+ return
+
+ finished_rss_torrents = []
+ updated_rss_torrents = []
+
+ def __finish_rss(download_item):
+ """
+ 完成订阅
+ """
+ if not download_item:
+ return
+ if not download_item.rssid \
+ or download_item.rssid in finished_rss_torrents:
+ return
+ finished_rss_torrents.append(download_item.rssid)
+ self.subscribe.finish_rss_subscribe(rssid=download_item.rssid,
+ media=download_item)
+
+ def __update_tv_rss(download_item, left_media):
+ """
+ 更新订阅集数
+ """
+ if not download_item or not left_media:
+ return
+ if not download_item.rssid \
+ or download_item.rssid in updated_rss_torrents:
+ return
+ updated_rss_torrents.append(download_item.rssid)
+ self.subscribe.update_subscribe_tv_lack(rssid=download_item.rssid,
+ media_info=download_item,
+ seasoninfo=left_media)
+
+ def __update_over_edition(download_item):
+ """
+ 更新洗版订阅
+ """
+ if not download_item:
+ return
+ if not download_item.rssid \
+ or download_item.rssid in updated_rss_torrents:
+ return
+ if download_item.get_episode_list():
+ return
+ updated_rss_torrents.append(download_item.rssid)
+ self.subscribe.update_subscribe_over_edition(rtype=download_item.type,
+ rssid=download_item.rssid,
+ media=download_item)
+
+ # 去重择优后开始添加下载
+ download_items, left_medias = self.downloader.batch_download(SearchType.RSS,
+ rss_download_torrents,
+ rss_no_exists)
+ # 批量删除订阅
+ if download_items:
+ for item in download_items:
+ if not item.rssid:
+ continue
+ if item.over_edition:
+ # 更新洗版订阅
+ __update_over_edition(item)
+ elif not left_medias or not left_medias.get(item.tmdb_id):
+ # 删除电视剧订阅
+ __finish_rss(item)
+ else:
+ # 更新电视剧缺失剧集
+ __update_tv_rss(item, left_medias.get(item.tmdb_id))
+ log.info("【Rss】实际下载了 %s 个资源" % len(download_items))
+ else:
+ log.info("【Rss】未下载到任何资源")
diff --git a/app/rsschecker.py b/app/rsschecker.py
new file mode 100644
index 0000000..8fda61a
--- /dev/null
+++ b/app/rsschecker.py
@@ -0,0 +1,662 @@
+import json
+import traceback
+
+import jsonpath
+from apscheduler.executors.pool import ThreadPoolExecutor
+from apscheduler.schedulers.background import BackgroundScheduler
+from lxml import etree
+
+import log
+from app.downloader import Downloader
+from app.filter import Filter
+from app.helper import DbHelper
+from app.media import Media
+from app.media.meta import MetaInfo
+from app.message import Message
+from app.searcher import Searcher
+from app.subscribe import Subscribe
+from app.utils import RequestUtils, StringUtils, ExceptionUtils
+from app.utils.commons import singleton
+from app.utils.types import MediaType, SearchType
+from config import Config
+
+
+@singleton
+class RssChecker(object):
+ message = None
+ searcher = None
+ filter = None
+ media = None
+ filterrule = None
+ downloader = None
+ subscribe = None
+ dbhelper = None
+
+ _scheduler = None
+ _rss_tasks = []
+ _rss_parsers = []
+ _site_users = {
+ "D": "下载",
+ "R": "订阅",
+ "S": "搜索"
+ }
+
+ def __init__(self):
+ self.init_config()
+
+ def init_config(self):
+ self.dbhelper = DbHelper()
+ self.message = Message()
+ self.searcher = Searcher()
+ self.filter = Filter()
+ self.media = Media()
+ self.downloader = Downloader()
+ self.subscribe = Subscribe()
+ # 移除现有任务
+ try:
+ if self._scheduler:
+ self._scheduler.remove_all_jobs()
+ if self._scheduler.running:
+ self._scheduler.shutdown()
+ self._scheduler = None
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ # 读取解析器列表
+ rss_parsers = self.dbhelper.get_userrss_parser()
+ self._rss_parsers = []
+ for rss_parser in rss_parsers:
+ self._rss_parsers.append(
+ {
+ "id": rss_parser.ID,
+ "name": rss_parser.NAME,
+ "type": rss_parser.TYPE,
+ "format": rss_parser.FORMAT,
+ "params": rss_parser.PARAMS,
+ "note": rss_parser.NOTE
+ }
+ )
+ # 读取任务任务列表
+ rsstasks = self.dbhelper.get_userrss_tasks()
+ self._rss_tasks = []
+ for task in rsstasks:
+ parser = self.get_userrss_parser(task.PARSER)
+ if task.FILTER:
+ filterrule = self.filter.get_rule_groups(groupid=task.FILTER)
+ else:
+ filterrule = {}
+ # 解析属性
+ note = {}
+ if task.NOTE:
+ try:
+ note = json.loads(task.NOTE)
+ except Exception as e:
+ print(str(e))
+ note = {}
+ save_path = note.get("save_path") or ""
+ recognization = note.get("recognization") or "Y"
+ self._rss_tasks.append({
+ "id": task.ID,
+ "name": task.NAME,
+ "address": task.ADDRESS,
+ "parser": task.PARSER,
+ "parser_name": parser.get("name") if parser else "",
+ "interval": task.INTERVAL,
+ "uses": task.USES if task.USES != "S" else "R",
+ "uses_text": self._site_users.get(task.USES),
+ "include": task.INCLUDE,
+ "exclude": task.EXCLUDE,
+ "filter": task.FILTER,
+ "filter_name": filterrule.get("name") if filterrule else "",
+ "update_time": task.UPDATE_TIME,
+ "counter": task.PROCESS_COUNT,
+ "state": task.STATE,
+ "save_path": task.SAVE_PATH or save_path,
+ "download_setting": task.DOWNLOAD_SETTING or "",
+ "recognization": task.RECOGNIZATION or recognization,
+ "over_edition": task.OVER_EDITION or 0,
+ "sites": json.loads(task.SITES) if task.SITES else {"rss_sites": [], "search_sites": []},
+ "filter_args": json.loads(task.FILTER_ARGS)
+ if task.FILTER_ARGS else {"restype": "", "pix": "", "team": ""},
+ })
+ if not self._rss_tasks:
+ return
+ # 启动RSS任务
+ self._scheduler = BackgroundScheduler(timezone=Config().get_timezone(),
+ executors={
+ 'default': ThreadPoolExecutor(30)
+ })
+ rss_flag = False
+ for task in self._rss_tasks:
+ if task.get("state") == "Y" and task.get("interval") and str(task.get("interval")).isdigit():
+ rss_flag = True
+ self._scheduler.add_job(func=self.check_task_rss,
+ args=[task.get("id")],
+ trigger='interval',
+ seconds=int(task.get("interval")) * 60)
+ if rss_flag:
+ self._scheduler.print_jobs()
+ self._scheduler.start()
+ log.info("自定义订阅服务启动")
+
+ def get_rsstask_info(self, taskid=None):
+ """
+ 获取单个RSS任务详细信息
+ """
+ if taskid:
+ if str(taskid).isdigit():
+ taskid = int(taskid)
+ for task in self._rss_tasks:
+ if task.get("id") == taskid:
+ return task
+ else:
+ return {}
+ return self._rss_tasks
+
+ def check_task_rss(self, taskid):
+ """
+ 处理自定义RSS任务,由定时服务调用
+ :param taskid: 自定义RSS的ID
+ """
+ if not taskid:
+ return
+ # 需要下载的项目
+ rss_download_torrents = []
+ # 需要订阅的项目
+ rss_subscribe_torrents = []
+ # 需要搜索的项目
+ rss_search_torrents = []
+ # 任务信息
+ taskinfo = self.get_rsstask_info(taskid)
+ if not taskinfo:
+ return
+ rss_result = self.__parse_userrss_result(taskinfo)
+ if len(rss_result) == 0:
+ log.warn("【RssChecker】%s 未下载到数据" % taskinfo.get("name"))
+ return
+ else:
+ log.info("【RssChecker】%s 获取数据:%s" % (taskinfo.get("name"), len(rss_result)))
+ # 处理RSS结果
+ res_num = 0
+ no_exists = {}
+ for res in rss_result:
+ try:
+ # 种子名
+ title = res.get('title')
+ if not title:
+ continue
+ # 种子链接
+ enclosure = res.get('enclosure')
+ # 种子页面
+ page_url = res.get('link')
+ # 种子大小
+ size = StringUtils.str_filesize(res.get('size'))
+ # 年份
+ year = res.get('year')
+ if year and len(year) > 4:
+ year = year[:4]
+ # 类型
+ mediatype = res.get('type')
+ if mediatype:
+ mediatype = MediaType.MOVIE if mediatype == "movie" else MediaType.TV
+
+ log.info("【RssChecker】开始处理:%s" % title)
+
+ # 检查是不是处理过
+ meta_name = "%s %s" % (title, year) if year else title
+ if self.dbhelper.is_userrss_finished(meta_name, enclosure):
+ log.info("【RssChecker】%s 已处理过" % title)
+ continue
+
+ if taskinfo.get("uses") == "D":
+ # 识别种子名称,开始检索TMDB
+ media_info = MetaInfo(title=meta_name,
+ mtype=mediatype)
+ cache_info = self.media.get_cache_info(media_info)
+ if taskinfo.get("recognization") == "Y":
+ if cache_info.get("id"):
+ # 有缓存,直接使用缓存
+ media_info.tmdb_id = cache_info.get("id")
+ media_info.type = cache_info.get("type")
+ media_info.title = cache_info.get("title")
+ media_info.year = cache_info.get("year")
+ else:
+ media_info = self.media.get_media_info(title=meta_name,
+ mtype=mediatype)
+ if not media_info:
+ log.warn("【RssChecker】%s 识别媒体信息出错!" % title)
+ continue
+ if not media_info.tmdb_info:
+ log.info("【RssChecker】%s 识别为 %s 未匹配到媒体信息" % (title, media_info.get_name()))
+ continue
+ # 检查是否已存在
+ if media_info.type == MediaType.MOVIE:
+ exist_flag, no_exists, _ = self.downloader.check_exists_medias(meta_info=media_info,
+ no_exists=no_exists)
+ if exist_flag:
+ log.info("【RssChecker】电影 %s 已存在" % media_info.get_title_string())
+ continue
+ else:
+ exist_flag, no_exists, _ = self.downloader.check_exists_medias(meta_info=media_info,
+ no_exists=no_exists)
+ # 当前剧集已存在,跳过
+ if exist_flag:
+ # 已全部存在
+ if not no_exists or not no_exists.get(
+ media_info.tmdb_id):
+ log.info("【RssChecker】电视剧 %s %s 已存在" % (
+ media_info.get_title_string(), media_info.get_season_episode_string()))
+ continue
+ if no_exists.get(media_info.tmdb_id):
+ log.info("【RssChecker】%s 缺失季集:%s"
+ % (media_info.get_title_string(), no_exists.get(media_info.tmdb_id)))
+ # 大小及种子页面
+ media_info.set_torrent_info(size=size,
+ page_url=page_url,
+ site=taskinfo.get("name"),
+ enclosure=enclosure)
+ # 检查种子是否匹配过滤条件
+ filter_args = {
+ "include": taskinfo.get("include"),
+ "exclude": taskinfo.get("exclude"),
+ "rule": taskinfo.get("filter")
+ }
+ match_flag, res_order, match_msg = self.filter.check_torrent_filter(meta_info=media_info,
+ filter_args=filter_args)
+ # 未匹配
+ if not match_flag:
+ log.info(f"【RssChecker】{match_msg}")
+ continue
+ else:
+ # 匹配优先级
+ media_info.set_torrent_info(res_order=res_order)
+ if taskinfo.get("recognization") == "Y":
+ log.info("【RssChecker】%s 识别为 %s %s 匹配成功" % (
+ title,
+ media_info.get_title_string(),
+ media_info.get_season_episode_string()))
+ # 补充TMDB完整信息
+ if not media_info.tmdb_info:
+ media_info.set_tmdb_info(self.media.get_tmdb_info(mtype=media_info.type,
+ tmdbid=media_info.tmdb_id))
+ # TMDB信息插入订阅任务
+ if media_info.type != MediaType.MOVIE:
+ self.dbhelper.insert_userrss_mediainfos(taskid, media_info)
+ else:
+ log.info(f"【RssChecker】{title} 匹配成功")
+ # 添加下载列表
+ if not enclosure:
+ log.warn("【RssChecker】%s RSS报文中没有enclosure种子链接" % taskinfo.get("name"))
+ continue
+ if media_info not in rss_download_torrents:
+ rss_download_torrents.append(media_info)
+ res_num = res_num + 1
+ elif taskinfo.get("uses") == "R":
+ media_info = MetaInfo(title=meta_name, mtype=mediatype)
+ # 检查种子是否匹配过滤条件
+ filter_args = {
+ "include": taskinfo.get("include"),
+ "exclude": taskinfo.get("exclude"),
+ "rule": -1
+
+ }
+ match_flag, _, match_msg = self.filter.check_torrent_filter(meta_info=media_info,
+ filter_args=filter_args)
+ # 未匹配
+ if not match_flag:
+ log.info(f"【RssChecker】{match_msg}")
+ continue
+ # 添加订阅列表
+ self.dbhelper.insert_rss_torrents(media_info)
+ if media_info not in rss_subscribe_torrents:
+ rss_subscribe_torrents.append(media_info)
+ res_num = res_num + 1
+ else:
+ continue
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ log.error("【RssChecker】处理RSS发生错误:%s - %s" % (str(e), traceback.format_exc()))
+ continue
+ log.info("【RssChecker】%s 处理结束,匹配到 %s 个有效资源" % (taskinfo.get("name"), res_num))
+ # 添加下载
+ if rss_download_torrents:
+ for media in rss_download_torrents:
+ ret, ret_msg = self.downloader.download(media_info=media,
+ download_dir=taskinfo.get("save_path"),
+ download_setting=taskinfo.get("download_setting"))
+ if ret:
+ self.message.send_download_message(in_from=SearchType.USERRSS,
+ can_item=media)
+ # 下载类型的 这里下载成功了 插入数据库
+ self.dbhelper.insert_rss_torrents(media)
+ # 登记自定义RSS任务下载记录
+ downloader = self.downloader.get_default_client_type().value
+ if media.download_setting:
+ download_attr = self.downloader.get_download_setting(media.download_setting)
+ if download_attr.get("downloader"):
+ downloader = download_attr.get("downloader")
+ self.dbhelper.insert_userrss_task_history(taskid, media.org_string, downloader)
+ else:
+ log.error("【RssChecker】添加下载任务 %s 失败:%s" % (
+ media.get_title_string(), ret_msg or "请检查下载任务是否已存在"))
+ if ret_msg:
+ self.message.send_download_fail_message(media, ret_msg)
+ # 添加订阅
+ if rss_subscribe_torrents:
+ for media in rss_subscribe_torrents:
+ code, msg, rss_media = self.subscribe.add_rss_subscribe(
+ mtype=media.type,
+ name=media.get_name(),
+ year=media.year,
+ season=media.begin_season,
+ rss_sites=taskinfo.get("sites", {}).get("rss_sites"),
+ search_sites=taskinfo.get("sites", {}).get("search_sites"),
+ over_edition=True if taskinfo.get("over_edition") else False,
+ filter_restype=taskinfo.get("filter_args", {}).get("restype"),
+ filter_pix=taskinfo.get("filter_args", {}).get("pix"),
+ filter_team=taskinfo.get("filter_args", {}).get("team"),
+ filter_rule=taskinfo.get("filter"),
+ save_path=taskinfo.get("save_path"),
+ download_setting=taskinfo.get("download_setting"),
+ )
+ if rss_media and code == 0:
+ self.message.send_rss_success_message(in_from=SearchType.USERRSS, media_info=rss_media)
+ else:
+ log.warn("【RssChecker】%s 添加订阅失败:%s" % (media.get_name(), msg))
+
+ # 更新状态
+ counter = len(rss_download_torrents) + len(rss_subscribe_torrents) + len(rss_search_torrents)
+ if counter:
+ self.dbhelper.update_userrss_task_info(taskid, counter)
+
+ def __parse_userrss_result(self, taskinfo):
+ """
+ 获取RSS链接数据,根据PARSER进行解析获取返回结果
+ """
+ rss_parser = self.get_userrss_parser(taskinfo.get("parser"))
+ if not rss_parser:
+ log.error("【RssChecker】任务 %s 的解析配置不存在" % taskinfo.get("name"))
+ return []
+ if not rss_parser.get("format"):
+ log.error("【RssChecker】任务 %s 的解析配置不正确" % taskinfo.get("name"))
+ return []
+ try:
+ rss_parser_format = json.loads(rss_parser.get("format"))
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ log.error("【RssChecker】任务 %s 的解析配置不是合法的Json格式" % taskinfo.get("name"))
+ return []
+ # 拼装链接
+ rss_url = taskinfo.get("address")
+ if not rss_url:
+ return []
+ if rss_parser.get("params"):
+ _dict = {
+ "TMDBKEY": Config().get_config("app").get("rmt_tmdbkey")
+ }
+ try:
+ param_url = rss_parser.get("params").format(**_dict)
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ log.error("【RssChecker】任务 %s 的解析配置附加参数不合法" % taskinfo.get("name"))
+ return []
+ rss_url = "%s?%s" % (rss_url, param_url) if rss_url.find("?") == -1 else "%s&%s" % (rss_url, param_url)
+ # 请求数据
+ try:
+ ret = RequestUtils().get_res(rss_url)
+ if not ret:
+ return []
+ ret.encoding = ret.apparent_encoding
+ except Exception as e2:
+ ExceptionUtils.exception_traceback(e2)
+ return []
+ # 解析数据 XPATH
+ rss_result = []
+ if rss_parser.get("type") == "XML":
+ try:
+ result_tree = etree.XML(ret.text.encode("utf-8"))
+ item_list = result_tree.xpath(rss_parser_format.get("list")) or []
+ for item in item_list:
+ rss_item = {}
+ for key, attr in rss_parser_format.get("item", {}).items():
+ if attr.get("path"):
+ if attr.get("namespaces"):
+ value = item.xpath("//ns:%s" % attr.get("path"),
+ namespaces={"ns": attr.get("namespaces")})
+ else:
+ value = item.xpath(attr.get("path"))
+ elif attr.get("value"):
+ value = attr.get("value")
+ else:
+ continue
+ if value:
+ rss_item.update({key: value[0]})
+ rss_result.append(rss_item)
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ log.error("【RssChecker】任务 %s 获取的订阅报文无法解析:%s" % (taskinfo.get("name"), str(err)))
+ return []
+ elif rss_parser.get("type") == "JSON":
+ try:
+ result_json = json.loads(ret.text)
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ log.error("【RssChecker】任务 %s 获取的订阅报文不是合法的Json格式:%s" % (taskinfo.get("name"), str(err)))
+ return []
+ item_list = jsonpath.jsonpath(result_json, rss_parser_format.get("list"))[0]
+ if not isinstance(item_list, list):
+ log.error("【RssChecker】任务 %s 获取的订阅报文list后不是列表" % taskinfo.get("name"))
+ return []
+ for item in item_list:
+ rss_item = {}
+ for key, attr in rss_parser_format.get("item", {}).items():
+ if attr.get("path"):
+ value = jsonpath.jsonpath(item, attr.get("path"))
+ elif attr.get("value"):
+ value = attr.get("value")
+ else:
+ continue
+ if value:
+ rss_item.update({key: value[0]})
+ rss_result.append(rss_item)
+ return rss_result
+
+ def get_userrss_parser(self, pid=None):
+ if pid:
+ for rss_parser in self._rss_parsers:
+ if rss_parser.get("id") == int(pid):
+ return rss_parser
+ return {}
+ else:
+ return self._rss_parsers
+
+ def get_rss_articles(self, taskid):
+ """
+ 查看自定义RSS报文
+ :param taskid: 自定义RSS的ID
+ """
+ if not taskid:
+ return
+ # 下载订阅的文章列表
+ rss_articles = []
+ # 任务信息
+ taskinfo = self.get_rsstask_info(taskid)
+ if not taskinfo:
+ return
+ rss_result = self.__parse_userrss_result(taskinfo)
+ if len(rss_result) == 0:
+ return []
+ for res in rss_result:
+ try:
+ # 种子名
+ title = res.get('title')
+ if not title:
+ continue
+ # 种子链接
+ enclosure = res.get('enclosure')
+ # 种子页面
+ link = res.get('link')
+ # 副标题
+ description = res.get('description')
+ # 种子大小
+ size = StringUtils.str_filesize(res.get('size'))
+ # 发布日期
+ date = StringUtils.unify_datetime_str(res.get('date'))
+ # 年份
+ year = res.get('year')
+ if year and len(year) > 4:
+ year = year[:4]
+ # 检查是不是处理过
+ meta_name = "%s %s" % (title, year) if year else title
+ finish_flag = self.dbhelper.is_userrss_finished(meta_name, enclosure)
+ # 信息聚合
+ params = {
+ "title": title,
+ "link": link,
+ "enclosure": enclosure,
+ "size": size,
+ "description": description,
+ "date": date,
+ "finish_flag": finish_flag,
+ }
+ if params not in rss_articles:
+ rss_articles.append(params)
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ log.error("【RssChecker】获取RSS报文发生错误:%s - %s" % (str(e), traceback.format_exc()))
+ return rss_articles
+
+ def test_rss_articles(self, taskid, title):
+ """
+ 测试RSS报文
+ :param taskid: 自定义RSS的ID
+ :param title: RSS报文title
+ """
+ # 任务信息
+ taskinfo = self.get_rsstask_info(taskid)
+ if not taskinfo:
+ return
+ # 识别种子名称,开始检索TMDB
+ media_info = MetaInfo(title=title)
+ cache_info = self.media.get_cache_info(media_info)
+ if cache_info.get("id"):
+ # 有缓存,直接使用缓存
+ media_info.tmdb_id = cache_info.get("id")
+ media_info.type = cache_info.get("type")
+ media_info.title = cache_info.get("title")
+ media_info.year = cache_info.get("year")
+ else:
+ media_info = self.media.get_media_info(title=title)
+ if not media_info:
+ log.warn("【RssChecker】%s 识别媒体信息出错!" % title)
+ # 检查是否匹配
+ filter_args = {
+ "include": taskinfo.get("include"),
+ "exclude": taskinfo.get("exclude"),
+ "rule": taskinfo.get("filter") if taskinfo.get("uses") == "D" else None
+ }
+ match_flag, res_order, match_msg = self.filter.check_torrent_filter(meta_info=media_info,
+ filter_args=filter_args)
+ # 未匹配
+ if not match_flag:
+ log.info(f"【RssChecker】{match_msg}")
+ else:
+ log.info("【RssChecker】%s 识别为 %s %s 匹配成功" % (
+ title,
+ media_info.get_title_string(),
+ media_info.get_season_episode_string()))
+ media_info.set_torrent_info(res_order=res_order)
+ # 检查是否已存在
+ no_exists = {}
+ exist_flag = False
+ if not media_info.tmdb_id:
+ log.info("【RssChecker】%s 识别为 %s 未匹配到媒体信息" % (title, media_info.get_name()))
+ else:
+ if media_info.type == MediaType.MOVIE:
+ exist_flag, no_exists, _ = self.downloader.check_exists_medias(meta_info=media_info,
+ no_exists=no_exists)
+ if exist_flag:
+ log.info("【RssChecker】电影 %s 已存在" % media_info.get_title_string())
+ else:
+ exist_flag, no_exists, _ = self.downloader.check_exists_medias(meta_info=media_info,
+ no_exists=no_exists)
+ if exist_flag:
+ # 已全部存在
+ if not no_exists or not no_exists.get(
+ media_info.tmdb_id):
+ log.info("【RssChecker】电视剧 %s %s 已存在" % (
+ media_info.get_title_string(), media_info.get_season_episode_string()))
+ if no_exists.get(media_info.tmdb_id):
+ log.info("【RssChecker】%s 缺失季集:%s"
+ % (media_info.get_title_string(), no_exists.get(media_info.tmdb_id)))
+ return media_info, match_flag, exist_flag
+
+ def check_rss_articles(self, flag, articles):
+ """
+ RSS报文处理设置
+ :param flag: set_finished/set_unfinish
+ :param articles: 报文(title/enclosure)
+ """
+ try:
+ if flag == "set_finished":
+ for article in articles:
+ title = article.get("title")
+ enclosure = article.get("enclosure")
+ if not self.dbhelper.is_userrss_finished(title, enclosure):
+ self.dbhelper.simple_insert_rss_torrents(title, enclosure)
+ elif flag == "set_unfinish":
+ for article in articles:
+ self.dbhelper.simple_delete_rss_torrents(article.get("title"), article.get("enclosure"))
+ else:
+ return False
+ return True
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ log.error("【RssChecker】设置RSS报文状态时发生错误:%s - %s" % (str(e), traceback.format_exc()))
+ return False
+
+ def download_rss_articles(self, taskid, articles):
+ """
+ RSS报文下载
+ :param taskid: 自定义RSS的ID
+ :param articles: 报文(title/enclosure)
+ """
+ if not taskid:
+ return
+ # 任务信息
+ taskinfo = self.get_rsstask_info(taskid)
+ if not taskinfo:
+ return
+ for article in articles:
+ media = self.media.get_media_info(title=article.get("title"))
+ media.set_torrent_info(enclosure=article.get("enclosure"))
+ ret, ret_msg = self.downloader.download(media_info=media,
+ download_dir=taskinfo.get("save_path"),
+ download_setting=taskinfo.get("download_setting"))
+ if ret:
+ self.message.send_download_message(in_from=SearchType.USERRSS,
+ can_item=media)
+ # 插入数据库
+ self.dbhelper.insert_rss_torrents(media)
+ # 登记自定义RSS任务下载记录
+ downloader = self.downloader.get_default_client_type().value
+ if taskinfo.get("download_setting"):
+ download_attr = self.downloader.get_download_setting(taskinfo.get("download_setting"))
+ if download_attr.get("downloader"):
+ downloader = download_attr.get("downloader")
+ self.dbhelper.insert_userrss_task_history(taskid, media.org_string, downloader)
+ else:
+ log.error("【RssChecker】添加下载任务 %s 失败:%s" % (
+ media.get_title_string(), ret_msg or "请检查下载任务是否已存在"))
+ if ret_msg:
+ self.message.send_download_fail_message(media, ret_msg)
+ return False
+ return True
+
+ def get_userrss_mediainfos(self):
+ taskinfos = self.dbhelper.get_userrss_tasks()
+ mediainfos_all = []
+ for taskinfo in taskinfos:
+ mediainfos = json.loads(taskinfo.MEDIAINFOS) if taskinfo.MEDIAINFOS else []
+ if mediainfos:
+ mediainfos_all += mediainfos
+ return mediainfos_all
diff --git a/app/scheduler.py b/app/scheduler.py
new file mode 100644
index 0000000..4b09dea
--- /dev/null
+++ b/app/scheduler.py
@@ -0,0 +1,265 @@
+import datetime
+import math
+import random
+import traceback
+
+from apscheduler.executors.pool import ThreadPoolExecutor
+from apscheduler.schedulers.background import BackgroundScheduler
+
+import log
+from app.doubansync import DoubanSync
+from app.downloader import Downloader
+from app.helper import MetaHelper
+from app.mediaserver import MediaServer
+from app.rss import Rss
+from app.sites import Sites, SiteUserInfo, SiteSignin
+from app.subscribe import Subscribe
+from app.sync import Sync
+from app.utils import ExceptionUtils
+from app.utils.commons import singleton
+from config import PT_TRANSFER_INTERVAL, METAINFO_SAVE_INTERVAL, \
+ SYNC_TRANSFER_INTERVAL, RSS_CHECK_INTERVAL, REFRESH_PT_DATA_INTERVAL, \
+ RSS_REFRESH_TMDB_INTERVAL, META_DELETE_UNKNOWN_INTERVAL, REFRESH_WALLPAPER_INTERVAL, Config
+from web.backend.wallpaper import get_login_wallpaper
+
+
+@singleton
+class Scheduler:
+ SCHEDULER = None
+ _pt = None
+ _douban = None
+ _media = None
+
+ def __init__(self):
+ self.init_config()
+
+ def init_config(self):
+ self._pt = Config().get_config('pt')
+ self._media = Config().get_config('media')
+ self._douban = Config().get_config('douban')
+
+ def run_service(self):
+ """
+ 读取配置,启动定时服务
+ """
+ self.SCHEDULER = BackgroundScheduler(timezone=Config().get_timezone(),
+ executors={
+ 'default': ThreadPoolExecutor(20)
+ })
+ if not self.SCHEDULER:
+ return
+ if self._pt:
+ # 站点签到
+ ptsignin_cron = str(self._pt.get('ptsignin_cron'))
+ if ptsignin_cron:
+ if '-' in ptsignin_cron:
+ try:
+ time_range = ptsignin_cron.split("-")
+ start_time_range_str = time_range[0]
+ end_time_range_str = time_range[1]
+ start_time_range_array = start_time_range_str.split(":")
+ end_time_range_array = end_time_range_str.split(":")
+ start_hour = int(start_time_range_array[0])
+ start_minute = int(start_time_range_array[1])
+ end_hour = int(end_time_range_array[0])
+ end_minute = int(end_time_range_array[1])
+
+ def start_random_job():
+ task_time_count = random.randint(start_hour * 60 + start_minute, end_hour * 60 + end_minute)
+ self.start_data_site_signin_job(math.floor(task_time_count / 60), task_time_count % 60)
+
+ self.SCHEDULER.add_job(start_random_job,
+ "cron",
+ hour=start_hour,
+ minute=start_minute)
+ log.info("站点自动签到服务时间范围随机模式启动,起始时间于%s:%s" % (
+ str(start_hour).rjust(2, '0'), str(start_minute).rjust(2, '0')))
+ except Exception as e:
+ log.info("站点自动签到时间 时间范围随机模式 配置格式错误:%s %s" % (ptsignin_cron, str(e)))
+ elif ptsignin_cron.find(':') != -1:
+ try:
+ hour = int(ptsignin_cron.split(":")[0])
+ minute = int(ptsignin_cron.split(":")[1])
+ except Exception as e:
+ log.info("站点自动签到时间 配置格式错误:%s" % str(e))
+ hour = minute = 0
+ self.SCHEDULER.add_job(SiteSignin().signin,
+ "cron",
+ hour=hour,
+ minute=minute)
+ log.info("站点自动签到服务启动")
+ else:
+ try:
+ hours = float(ptsignin_cron)
+ except Exception as e:
+ log.info("站点自动签到时间 配置格式错误:%s" % str(e))
+ hours = 0
+ if hours:
+ self.SCHEDULER.add_job(SiteSignin().signin,
+ "interval",
+ hours=hours)
+ log.info("站点自动签到服务启动")
+
+ # 下载文件转移
+ pt_monitor = self._pt.get('pt_monitor')
+ if pt_monitor:
+ self.SCHEDULER.add_job(Downloader().transfer, 'interval', seconds=PT_TRANSFER_INTERVAL)
+ log.info("下载文件转移服务启动")
+
+ # RSS下载器
+ pt_check_interval = self._pt.get('pt_check_interval')
+ if pt_check_interval:
+ if isinstance(pt_check_interval, str) and pt_check_interval.isdigit():
+ pt_check_interval = int(pt_check_interval)
+ else:
+ try:
+ pt_check_interval = round(float(pt_check_interval))
+ except Exception as e:
+ log.error("RSS订阅周期 配置格式错误:%s" % str(e))
+ pt_check_interval = 0
+ if pt_check_interval:
+ if pt_check_interval < 300:
+ pt_check_interval = 300
+ self.SCHEDULER.add_job(Rss().rssdownload, 'interval', seconds=pt_check_interval)
+ log.info("RSS订阅服务启动")
+
+ # RSS订阅定时检索
+ search_rss_interval = self._pt.get('search_rss_interval')
+ if search_rss_interval:
+ if isinstance(search_rss_interval, str) and search_rss_interval.isdigit():
+ search_rss_interval = int(search_rss_interval)
+ else:
+ try:
+ search_rss_interval = round(float(search_rss_interval))
+ except Exception as e:
+ log.error("订阅定时搜索周期 配置格式错误:%s" % str(e))
+ search_rss_interval = 0
+ if search_rss_interval:
+ if search_rss_interval < 6:
+ search_rss_interval = 6
+ self.SCHEDULER.add_job(Subscribe().subscribe_search_all, 'interval', hours=search_rss_interval)
+ log.info("订阅定时搜索服务启动")
+
+ # 豆瓣电影同步
+ if self._douban:
+ douban_interval = self._douban.get('interval')
+ if douban_interval:
+ if isinstance(douban_interval, str):
+ if douban_interval.isdigit():
+ douban_interval = int(douban_interval)
+ else:
+ try:
+ douban_interval = float(douban_interval)
+ except Exception as e:
+ log.info("豆瓣同步服务启动失败:%s" % str(e))
+ douban_interval = 0
+ if douban_interval:
+ self.SCHEDULER.add_job(DoubanSync().sync, 'interval', hours=douban_interval)
+ log.info("豆瓣同步服务启动")
+
+ # 媒体库同步
+ if self._media:
+ mediasync_interval = self._media.get("mediasync_interval")
+ if mediasync_interval:
+ if isinstance(mediasync_interval, str):
+ if mediasync_interval.isdigit():
+ mediasync_interval = int(mediasync_interval)
+ else:
+ try:
+ mediasync_interval = round(float(mediasync_interval))
+ except Exception as e:
+ log.info("豆瓣同步服务启动失败:%s" % str(e))
+ mediasync_interval = 0
+ if mediasync_interval:
+ self.SCHEDULER.add_job(MediaServer().sync_mediaserver, 'interval', hours=mediasync_interval)
+ log.info("媒体库同步服务启动")
+
+ # 元数据定时保存
+ self.SCHEDULER.add_job(MetaHelper().save_meta_data, 'interval', seconds=METAINFO_SAVE_INTERVAL)
+
+ # 定时把队列中的监控文件转移走
+ self.SCHEDULER.add_job(Sync().transfer_mon_files, 'interval', seconds=SYNC_TRANSFER_INTERVAL)
+
+ # RSS队列中检索
+ self.SCHEDULER.add_job(Subscribe().subscribe_search, 'interval', seconds=RSS_CHECK_INTERVAL)
+
+ # 站点数据刷新
+ self.SCHEDULER.add_job(SiteUserInfo().refresh_pt_date_now,
+ 'interval',
+ hours=REFRESH_PT_DATA_INTERVAL,
+ next_run_time=datetime.datetime.now() + datetime.timedelta(minutes=1))
+
+ # 豆瓣RSS转TMDB,定时更新TMDB数据
+ self.SCHEDULER.add_job(Subscribe().refresh_rss_metainfo, 'interval', hours=RSS_REFRESH_TMDB_INTERVAL)
+
+ # 定时清除未识别的缓存
+ self.SCHEDULER.add_job(MetaHelper().delete_unknown_meta, 'interval', hours=META_DELETE_UNKNOWN_INTERVAL)
+
+ # 定时刷新壁纸
+ self.SCHEDULER.add_job(get_login_wallpaper,
+ 'interval',
+ hours=REFRESH_WALLPAPER_INTERVAL,
+ next_run_time=datetime.datetime.now())
+
+ self.SCHEDULER.print_jobs()
+
+ self.SCHEDULER.start()
+
+ def stop_service(self):
+ """
+ 停止定时服务
+ """
+ try:
+ if self.SCHEDULER:
+ self.SCHEDULER.remove_all_jobs()
+ self.SCHEDULER.shutdown()
+ self.SCHEDULER = None
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+
+ def start_data_site_signin_job(self, hour, minute):
+ year = datetime.datetime.now().year
+ month = datetime.datetime.now().month
+ day = datetime.datetime.now().day
+ # 随机数从1秒开始,不在整点签到
+ second = random.randint(1, 59)
+ log.info("站点自动签到时间 即将在%s-%s-%s,%s:%s:%s签到" % (
+ str(year), str(month), str(day), str(hour), str(minute), str(second)))
+ if hour < 0 or hour > 24:
+ hour = -1
+ if minute < 0 or minute > 60:
+ minute = -1
+ if hour < 0 or minute < 0:
+ log.warn("站点自动签到时间 配置格式错误:不启动任务")
+ return
+ self.SCHEDULER.add_job(SiteSignin().signin,
+ "date",
+ run_date=datetime.datetime(year, month, day, hour, minute, second))
+
+
+def run_scheduler():
+ """
+ 启动定时服务
+ """
+ try:
+ Scheduler().run_service()
+ except Exception as err:
+ log.error("启动定时服务失败:%s - %s" % (str(err), traceback.format_exc()))
+
+
+def stop_scheduler():
+ """
+ 停止定时服务
+ """
+ try:
+ Scheduler().stop_service()
+ except Exception as err:
+ log.debug("停止定时服务失败:%s" % str(err))
+
+
+def restart_scheduler():
+ """
+ 重启定时服务
+ """
+ stop_scheduler()
+ run_scheduler()
diff --git a/app/searcher.py b/app/searcher.py
new file mode 100644
index 0000000..f4a828c
--- /dev/null
+++ b/app/searcher.py
@@ -0,0 +1,181 @@
+import log
+from app.helper import DbHelper
+from app.indexer import Indexer
+from config import Config
+from app.message import Message
+from app.downloader import Downloader
+from app.media import Media
+from app.helper import ProgressHelper
+from app.utils.types import SearchType
+
+
+class Searcher:
+ downloader = None
+ media = None
+ message = None
+ indexer = None
+ progress = None
+ dbhelper = None
+
+ _search_auto = True
+
+ def __init__(self):
+ self.downloader = Downloader()
+ self.media = Media()
+ self.message = Message()
+ self.progress = ProgressHelper()
+ self.dbhelper = DbHelper()
+ self.indexer = Indexer()
+ self.init_config()
+
+ def init_config(self):
+ self._search_auto = Config().get_config("pt").get('search_auto', True)
+
+ def search_medias(self,
+ key_word: [str, list],
+ filter_args: dict,
+ match_media=None,
+ in_from: SearchType = None):
+ """
+ 根据关键字调用索引器检查媒体
+ :param key_word: 检索的关键字,不能为空
+ :param filter_args: 过滤条件
+ :param match_media: 区配的媒体信息
+ :param in_from: 搜索渠道
+ :return: 命中的资源媒体信息列表
+ """
+ if not key_word:
+ return []
+ if not self.indexer:
+ return []
+ return self.indexer.search_by_keyword(key_word=key_word,
+ filter_args=filter_args,
+ match_media=match_media,
+ in_from=in_from)
+
+ def search_one_media(self, media_info,
+ in_from: SearchType,
+ no_exists: dict,
+ sites: list = None,
+ filters: dict = None,
+ user_name=None):
+ """
+ 只检索和下载一个资源,用于精确检索下载,由微信、Telegram或豆瓣调用
+ :param media_info: 已识别的媒体信息
+ :param in_from: 搜索渠道
+ :param no_exists: 缺失的剧集清单
+ :param sites: 检索哪些站点
+ :param filters: 过滤条件,为空则不过滤
+ :param user_name: 用户名
+ :return: 请求的资源是否全部下载完整,如完整则返回媒体信息
+ 请求的资源如果是剧集则返回下载后仍然缺失的季集信息
+ 搜索到的结果数量
+ 下载到的结果数量,如为None则表示未开启自动下载
+ """
+ if not media_info:
+ return None, {}, 0, 0
+ # 进度计数重置
+ self.progress.start('search')
+ # 查找的季
+ if media_info.begin_season is None:
+ search_season = None
+ else:
+ search_season = media_info.get_season_list()
+ # 查找的集
+ search_episode = media_info.get_episode_list()
+ if search_episode and not search_season:
+ search_season = [1]
+ # 过滤条件
+ filter_args = {"season": search_season,
+ "episode": search_episode,
+ "year": media_info.year,
+ "type": media_info.type,
+ "site": sites,
+ "seeders": True}
+ if filters:
+ filter_args.update(filters)
+ if media_info.keyword:
+ # 直接使用搜索词搜索
+ first_search_name = media_info.keyword
+ second_search_name = None
+ else:
+ # 中文名
+ if media_info.cn_name:
+ search_cn_name = media_info.cn_name
+ else:
+ search_cn_name = media_info.title
+ # 英文名
+ search_en_name = None
+ if media_info.en_name:
+ search_en_name = media_info.en_name
+ else:
+ if media_info.original_language == "en":
+ search_en_name = media_info.original_title
+ else:
+ # 此处使用独立对象,避免影响TMDB语言
+ en_title = Media().get_tmdb_en_title(media_info)
+ if en_title:
+ search_en_name = en_title
+ # 两次搜索名称
+ second_search_name = None
+ if Config().get_config("laboratory").get("search_en_title"):
+ if search_en_name:
+ first_search_name = search_en_name
+ second_search_name = search_cn_name
+ else:
+ first_search_name = search_cn_name
+ else:
+ first_search_name = search_cn_name
+ if search_en_name:
+ second_search_name = search_en_name
+ # 开始搜索
+ log.info("【Searcher】开始检索 %s ..." % first_search_name)
+ media_list = self.search_medias(key_word=first_search_name,
+ filter_args=filter_args,
+ match_media=media_info,
+ in_from=in_from)
+ # 使用名称重新搜索
+ if len(media_list) == 0 \
+ and second_search_name \
+ and second_search_name != first_search_name:
+ log.info("【Searcher】%s 未检索到资源,尝试通过 %s 重新检索 ..." % (first_search_name, second_search_name))
+ media_list = self.search_medias(key_word=second_search_name,
+ filter_args=filter_args,
+ match_media=media_info,
+ in_from=in_from)
+
+ if len(media_list) == 0:
+ log.info("【Searcher】%s 未搜索到任何资源" % second_search_name)
+ return None, no_exists, 0, 0
+ else:
+ if in_from in self.message.get_search_types():
+ # 保存搜索记录
+ self.dbhelper.delete_all_search_torrents()
+ # 搜索结果排序
+ media_list = sorted(media_list, key=lambda x: "%s%s%s%s" % (str(x.title).ljust(100, ' '),
+ str(x.res_order).rjust(3, '0'),
+ str(x.site_order).rjust(3, '0'),
+ str(x.seeders).rjust(10, '0')),
+ reverse=True)
+ # 插入数据库
+ self.dbhelper.insert_search_results(media_list)
+ # 微信未开自动下载时返回
+ if not self._search_auto:
+ return None, no_exists, len(media_list), None
+ # 择优下载
+ download_items, left_medias = self.downloader.batch_download(in_from=in_from,
+ media_list=media_list,
+ need_tvs=no_exists,
+ user_name=user_name)
+ # 统计下载情况,下全了返回True,没下全返回False
+ if not download_items:
+ log.info("【Searcher】%s 未下载到资源" % media_info.title)
+ return None, left_medias, len(media_list), 0
+ else:
+ log.info("【Searcher】实际下载了 %s 个资源" % len(download_items))
+ # 还有剩下的缺失,说明没下完,返回False
+ if left_medias:
+ return None, left_medias, len(media_list), len(download_items)
+ # 全部下完了
+ else:
+ return download_items[0], no_exists, len(media_list), len(download_items)
diff --git a/app/sites/__init__.py b/app/sites/__init__.py
new file mode 100644
index 0000000..8394b0a
--- /dev/null
+++ b/app/sites/__init__.py
@@ -0,0 +1,4 @@
+from app.sites.site_userinfo import SiteUserInfo
+from .sites import Sites
+from .site_cookie import SiteCookie
+from .site_signin import SiteSignin
diff --git a/app/sites/site_cookie.py b/app/sites/site_cookie.py
new file mode 100644
index 0000000..2b040eb
--- /dev/null
+++ b/app/sites/site_cookie.py
@@ -0,0 +1,302 @@
+import base64
+import time
+
+from lxml import etree
+from selenium.webdriver.common.by import By
+from selenium.webdriver.support import expected_conditions as es
+from selenium.webdriver.support.wait import WebDriverWait
+
+import log
+from app.helper import ChromeHelper, ProgressHelper, DbHelper, OcrHelper, SiteHelper
+from app.sites.sites import Sites
+from app.conf import SiteConf
+from app.utils import StringUtils, RequestUtils, ExceptionUtils
+from app.utils.commons import singleton
+
+
+@singleton
+class SiteCookie(object):
+ progress = None
+ sites = None
+ ocrhelper = None
+ dbhelpter = None
+ captcha_code = {}
+
+ def __init__(self):
+ self.init_config()
+
+ def init_config(self):
+ self.dbhelpter = DbHelper()
+ self.progress = ProgressHelper()
+ self.sites = Sites()
+ self.ocrhelper = OcrHelper()
+ self.captcha_code = {}
+
+ def set_code(self, code, value):
+ """
+ 设置验证码的值
+ """
+ self.captcha_code[code] = value
+
+ def get_code(self, code):
+ """
+ 获取验证码的值
+ """
+ return self.captcha_code.get(code)
+
+ def __get_site_cookie_ua(self,
+ url,
+ username,
+ password,
+ twostepcode=None,
+ ocrflag=False):
+ """
+ 获取站点cookie和ua
+ :param url: 站点地址
+ :param username: 用户名
+ :param password: 密码
+ :param twostepcode: 两步验证
+ :param ocrflag: 是否开启OCR识别
+ :return: cookie、ua、message
+ """
+ if not url or not username or not password:
+ return None, None, "参数错误"
+ # 全局锁
+ chrome = ChromeHelper()
+ if not chrome.get_status():
+ return None, None, "需要浏览器内核环境才能更新站点信息"
+ if not chrome.visit(url=url):
+ return None, None, "Chrome模拟访问失败"
+ # 循环检测是否过cf
+ cloudflare = chrome.pass_cloudflare()
+ if not cloudflare:
+ return None, None, "跳转站点失败,无法通过Cloudflare验证"
+ # 登录页面代码
+ html_text = chrome.get_html()
+ if not html_text:
+ return None, None, "获取源码失败"
+ if SiteHelper.is_logged_in(html_text):
+ return chrome.get_cookies(), chrome.get_ua(), "已经登录过且Cookie未失效"
+ # 查找用户名输入框
+ html = etree.HTML(html_text)
+ username_xpath = None
+ for xpath in SiteConf.SITE_LOGIN_XPATH.get("username"):
+ if html.xpath(xpath):
+ username_xpath = xpath
+ break
+ if not username_xpath:
+ return None, None, "未找到用户名输入框"
+ # 查找密码输入框
+ password_xpath = None
+ for xpath in SiteConf.SITE_LOGIN_XPATH.get("password"):
+ if html.xpath(xpath):
+ password_xpath = xpath
+ break
+ if not password_xpath:
+ return None, None, "未找到密码输入框"
+ # 查找两步验证码
+ twostepcode_xpath = None
+ for xpath in SiteConf.SITE_LOGIN_XPATH.get("twostep"):
+ if html.xpath(xpath):
+ twostepcode_xpath = xpath
+ break
+ # 查找验证码输入框
+ captcha_xpath = None
+ for xpath in SiteConf.SITE_LOGIN_XPATH.get("captcha"):
+ if html.xpath(xpath):
+ captcha_xpath = xpath
+ break
+ # 查找验证码图片
+ captcha_img_url = None
+ if captcha_xpath:
+ for xpath in SiteConf.SITE_LOGIN_XPATH.get("captcha_img"):
+ if html.xpath(xpath):
+ captcha_img_url = html.xpath(xpath)[0]
+ break
+ if not captcha_img_url:
+ return None, None, "未找到验证码图片"
+ # 查找登录按钮
+ submit_xpath = None
+ for xpath in SiteConf.SITE_LOGIN_XPATH.get("submit"):
+ if html.xpath(xpath):
+ submit_xpath = xpath
+ break
+ if not submit_xpath:
+ return None, None, "未找到登录按钮"
+ # 点击登录按钮
+ try:
+ submit_obj = WebDriverWait(driver=chrome.browser,
+ timeout=6).until(es.element_to_be_clickable((By.XPATH,
+ submit_xpath)))
+ if submit_obj:
+ # 输入用户名
+ chrome.browser.find_element(By.XPATH, username_xpath).send_keys(username)
+ # 输入密码
+ chrome.browser.find_element(By.XPATH, password_xpath).send_keys(password)
+ # 输入两步验证码
+ if twostepcode and twostepcode_xpath:
+ twostepcode_element = chrome.browser.find_element(By.XPATH, twostepcode_xpath)
+ if twostepcode_element.is_displayed():
+ twostepcode_element.send_keys(twostepcode)
+ # 识别验证码
+ if captcha_xpath:
+ captcha_element = chrome.browser.find_element(By.XPATH, captcha_xpath)
+ if captcha_element.is_displayed():
+ code_url = self.__get_captcha_url(url, captcha_img_url)
+ if ocrflag:
+ # 自动OCR识别验证码
+ captcha = self.get_captcha_text(chrome, code_url)
+ if captcha:
+ log.info("【Sites】验证码地址为:%s,识别结果:%s" % (code_url, captcha))
+ else:
+ return None, None, "验证码识别失败"
+ else:
+ # 等待用户输入
+ captcha = None
+ code_key = StringUtils.generate_random_str(5)
+ for sec in range(30, 0, -1):
+ if self.get_code(code_key):
+ # 用户输入了
+ captcha = self.get_code(code_key)
+ log.info("【Sites】接收到验证码:%s" % captcha)
+ self.progress.update(ptype='sitecookie',
+ text="接收到验证码:%s" % captcha)
+ break
+ else:
+ # 获取验证码图片base64
+ code_bin = self.get_captcha_base64(chrome, code_url)
+ if not code_bin:
+ return None, None, "获取验证码图片数据失败"
+ else:
+ code_bin = f"data:image/png;base64,{code_bin}"
+ # 推送到前端
+ self.progress.update(ptype='sitecookie',
+ text=f"{code_bin}|{code_key}")
+ time.sleep(1)
+ if not captcha:
+ return None, None, "验证码输入超时"
+ # 输入验证码
+ captcha_element.send_keys(captcha)
+ else:
+ # 不可见元素不处理
+ pass
+ # 提交登录
+ submit_obj.click()
+ else:
+ return None, None, "未找到登录按钮"
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return None, None, "仿真登录失败:%s" % str(e)
+ # 登录后的源码
+ html_text = chrome.get_html()
+ if not html_text:
+ return None, None, "获取源码失败"
+ if SiteHelper.is_logged_in(html_text):
+ return chrome.get_cookies(), chrome.get_ua(), ""
+ else:
+ # 读取错误信息
+ error_xpath = None
+ for xpath in SiteConf.SITE_LOGIN_XPATH.get("error"):
+ if html.xpath(xpath):
+ error_xpath = xpath
+ break
+ if not error_xpath:
+ return None, None, "登录失败"
+ else:
+ error_msg = html.xpath(error_xpath)[0]
+ return None, None, error_msg
+
+ def get_captcha_text(self, chrome, code_url):
+ """
+ 识别验证码图片的内容
+ """
+ code_b64 = self.get_captcha_base64(chrome=chrome,
+ image_url=code_url)
+ if not code_b64:
+ return ""
+ return self.ocrhelper.get_captcha_text(image_b64=code_b64)
+
+ @staticmethod
+ def __get_captcha_url(siteurl, imageurl):
+ """
+ 获取验证码图片的URL
+ """
+ if not siteurl or not imageurl:
+ return ""
+ if imageurl.startswith("/"):
+ imageurl = imageurl[1:]
+ return "%s/%s" % (StringUtils.get_base_url(siteurl), imageurl)
+
+ def update_sites_cookie_ua(self,
+ username,
+ password,
+ twostepcode=None,
+ siteid=None,
+ ocrflag=False):
+ """
+ 更新所有站点Cookie和ua
+ """
+ # 获取站点列表
+ sites = self.sites.get_sites(siteid=siteid)
+ if siteid:
+ sites = [sites]
+ # 总数量
+ site_num = len(sites)
+ # 当前数量
+ curr_num = 0
+ # 返回码、返回消息
+ retcode = 0
+ messages = []
+ # 开始进度
+ self.progress.start('sitecookie')
+ for site in sites:
+ if not site.get("signurl") and not site.get("rssurl"):
+ log.info("【Sites】%s 未设置地址,跳过" % site.get("name"))
+ continue
+ log.info("【Sites】开始更新 %s Cookie和User-Agent ..." % site.get("name"))
+ self.progress.update(ptype='sitecookie',
+ text="开始更新 %s Cookie和User-Agent ..." % site.get("name"))
+ # 登录页面地址
+ baisc_url = StringUtils.get_base_url(site.get("signurl") or site.get("rssurl"))
+ site_conf = self.sites.get_grapsite_conf(url=baisc_url)
+ if site_conf.get("LOGIN"):
+ login_url = "%s/%s" % (baisc_url, site_conf.get("LOGIN"))
+ else:
+ login_url = "%s/login.php" % baisc_url
+ # 获取Cookie和User-Agent
+ cookie, ua, msg = self.__get_site_cookie_ua(url=login_url,
+ username=username,
+ password=password,
+ twostepcode=twostepcode,
+ ocrflag=ocrflag)
+ # 更新进度
+ curr_num += 1
+ if not cookie:
+ log.error("【Sites】获取 %s 信息失败:%s" % (site.get("name"), msg))
+ messages.append("%s %s" % (site.get("name"), msg))
+ self.progress.update(ptype='sitecookie',
+ value=round(100 * (curr_num / site_num)),
+ text="%s %s" % (site.get("name"), msg))
+ retcode = 1
+ else:
+ self.dbhelpter.update_site_cookie_ua(site.get("id"), cookie, ua)
+ log.info("【Sites】更新 %s 的Cookie和User-Agent成功" % site.get("name"))
+ messages.append("%s %s" % (site.get("name"), msg or "更新Cookie和User-Agent成功"))
+ self.progress.update(ptype='sitecookie',
+ value=round(100 * (curr_num / site_num)),
+ text="%s %s" % (site.get("name"), msg or "更新Cookie和User-Agent成功"))
+ self.progress.end('sitecookie')
+ return retcode, messages
+
+ @staticmethod
+ def get_captcha_base64(chrome, image_url):
+ """
+ 根据图片地址,获取验证码图片base64编码
+ """
+ if not image_url:
+ return ""
+ ret = RequestUtils(headers=chrome.get_ua(),
+ cookies=chrome.get_cookies()).get_res(image_url)
+ if ret:
+ return base64.b64encode(ret.content).decode()
+ return ""
diff --git a/app/sites/site_signin.py b/app/sites/site_signin.py
new file mode 100644
index 0000000..127d88b
--- /dev/null
+++ b/app/sites/site_signin.py
@@ -0,0 +1,166 @@
+import re
+from multiprocessing.dummy import Pool as ThreadPool
+from threading import Lock
+
+from lxml import etree
+from selenium.webdriver.common.by import By
+from selenium.webdriver.support import expected_conditions as es
+from selenium.webdriver.support.wait import WebDriverWait
+
+import log
+from app.conf import SiteConf
+from app.helper import ChromeHelper, SubmoduleHelper, DbHelper, SiteHelper
+from app.message import Message
+from app.sites.sites import Sites
+from app.utils import RequestUtils, ExceptionUtils, StringUtils
+from app.utils.commons import singleton
+from config import Config
+
+lock = Lock()
+
+
+@singleton
+class SiteSignin(object):
+ sites = None
+ dbhelper = None
+ message = None
+
+ _MAX_CONCURRENCY = 10
+
+ def __init__(self):
+ # 加载模块
+ self._site_schema = SubmoduleHelper.import_submodules('app.sites.sitesignin',
+ filter_func=lambda _, obj: hasattr(obj, 'match'))
+ log.debug(f"【Sites】加载站点签到:{self._site_schema}")
+ self.init_config()
+
+ def init_config(self):
+ self.sites = Sites()
+ self.dbhelper = DbHelper()
+ self.message = Message()
+
+ def __build_class(self, url):
+ for site_schema in self._site_schema:
+ try:
+ if site_schema.match(url):
+ return site_schema
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return None
+
+ def signin(self):
+ """
+ 站点并发签到
+ """
+ sites = self.sites.get_sites(signin=True)
+ if not sites:
+ return
+ with ThreadPool(min(len(sites), self._MAX_CONCURRENCY)) as p:
+ status = p.map(self.__signin_site, sites)
+ if status:
+ self.message.send_site_signin_message(status)
+
+ def __signin_site(self, site_info):
+ """
+ 签到一个站点
+ """
+ site_module = self.__build_class(site_info.get("signurl"))
+ if site_module:
+ return site_module.signin(site_info)
+ else:
+ return self.__signin_base(site_info)
+
+ @staticmethod
+ def __signin_base(site_info):
+ """
+ 通用签到处理
+ :param site_info: 站点信息
+ :return: 签到结果信息
+ """
+ if not site_info:
+ return ""
+ site = site_info.get("name")
+ try:
+ site_url = site_info.get("signurl")
+ site_cookie = site_info.get("cookie")
+ ua = site_info.get("ua")
+ if not site_url or not site_cookie:
+ log.warn("【Sites】未配置 %s 的站点地址或Cookie,无法签到" % str(site))
+ return ""
+ chrome = ChromeHelper()
+ if site_info.get("chrome") and chrome.get_status():
+ # 首页
+ log.info("【Sites】开始站点仿真签到:%s" % site)
+ home_url = StringUtils.get_base_url(site_url)
+ if not chrome.visit(url=home_url, ua=ua, cookie=site_cookie):
+ log.warn("【Sites】%s 无法打开网站" % site)
+ return f"【{site}】无法打开网站!"
+ # 循环检测是否过cf
+ cloudflare = chrome.pass_cloudflare()
+ if not cloudflare:
+ log.warn("【Sites】%s 跳转站点失败" % site)
+ return f"【{site}】跳转站点失败!"
+ # 判断是否已签到
+ html_text = chrome.get_html()
+ if not html_text:
+ log.warn("【Sites】%s 获取站点源码失败" % site)
+ return f"【{site}】获取站点源码失败!"
+ # 查找签到按钮
+ html = etree.HTML(html_text)
+ xpath_str = None
+ for xpath in SiteConf.SITE_CHECKIN_XPATH:
+ if html.xpath(xpath):
+ xpath_str = xpath
+ break
+ if re.search(r'已签|签到已得', html_text, re.IGNORECASE) \
+ and not xpath_str:
+ log.info("【Sites】%s 今日已签到" % site)
+ return f"【{site}】今日已签到"
+ if not xpath_str:
+ if SiteHelper.is_logged_in(html_text):
+ log.warn("【Sites】%s 未找到签到按钮,模拟登录成功" % site)
+ return f"【{site}】模拟登录成功"
+ else:
+ log.info("【Sites】%s 未找到签到按钮,且模拟登录失败" % site)
+ return f"【{site}】模拟登录失败!"
+ # 开始仿真
+ try:
+ checkin_obj = WebDriverWait(driver=chrome.browser, timeout=6).until(
+ es.element_to_be_clickable((By.XPATH, xpath_str)))
+ if checkin_obj:
+ checkin_obj.click()
+ log.info("【Sites】%s 仿真签到成功" % site)
+ return f"【{site}】仿真签到成功"
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ log.warn("【Sites】%s 仿真签到失败:%s" % (site, str(e)))
+ return f"【{site}】签到失败!"
+ # 模拟登录
+ else:
+ if site_url.find("attendance.php") != -1:
+ checkin_text = "签到"
+ else:
+ checkin_text = "模拟登录"
+ log.info(f"【Sites】开始站点{checkin_text}:{site}")
+ # 访问链接
+ res = RequestUtils(cookies=site_cookie,
+ headers=ua,
+ proxies=Config().get_proxies() if site_info.get("proxy") else None
+ ).get_res(url=site_url)
+ if res and res.status_code == 200:
+ if not SiteHelper.is_logged_in(res.text):
+ log.warn(f"【Sites】{site} {checkin_text}失败,请检查Cookie")
+ return f"【{site}】{checkin_text}失败,请检查Cookie!"
+ else:
+ log.info(f"【Sites】{site} {checkin_text}成功")
+ return f"【{site}】{checkin_text}成功"
+ elif res is not None:
+ log.warn(f"【Sites】{site} {checkin_text}失败,状态码:{res.status_code}")
+ return f"【{site}】{checkin_text}失败,状态码:{res.status_code}!"
+ else:
+ log.warn(f"【Sites】{site} {checkin_text}失败,无法打开网站")
+ return f"【{site}】{checkin_text}失败,无法打开网站!"
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ log.warn("【Sites】%s 签到出错:%s" % (site, str(e)))
+ return f"{site} 签到出错:{str(e)}!"
diff --git a/app/sites/site_userinfo.py b/app/sites/site_userinfo.py
new file mode 100644
index 0000000..7411b10
--- /dev/null
+++ b/app/sites/site_userinfo.py
@@ -0,0 +1,366 @@
+import json
+from datetime import datetime
+from multiprocessing.dummy import Pool as ThreadPool
+from threading import Lock
+
+import requests
+
+import log
+from app.helper import ChromeHelper, SubmoduleHelper, DbHelper
+from app.message import Message
+from app.sites.sites import Sites
+from app.utils import RequestUtils, ExceptionUtils
+from app.utils.commons import singleton
+from config import Config
+
+lock = Lock()
+
+
+@singleton
+class SiteUserInfo(object):
+
+ sites = None
+ dbhelper = None
+ message = None
+
+ _MAX_CONCURRENCY = 10
+ _last_update_time = None
+ _sites_data = {}
+
+ def __init__(self):
+
+ # 加载模块
+ self._site_schema = SubmoduleHelper.import_submodules('app.sites.siteuserinfo',
+ filter_func=lambda _, obj: hasattr(obj, 'schema'))
+ self._site_schema.sort(key=lambda x: x.order)
+ log.debug(f"【Sites】加载站点解析:{self._site_schema}")
+ self.init_config()
+
+ def init_config(self):
+ self.sites = Sites()
+ self.dbhelper = DbHelper()
+ self.message = Message()
+ # 站点上一次更新时间
+ self._last_update_time = None
+ # 站点数据
+ self._sites_data = {}
+
+ def __build_class(self, html_text):
+ for site_schema in self._site_schema:
+ try:
+ if site_schema.match(html_text):
+ return site_schema
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return None
+
+ def build(self, url, site_name, site_cookie=None, ua=None, emulate=None, proxy=False):
+ if not site_cookie:
+ return None
+ session = requests.Session()
+ log.debug(f"【Sites】站点 {site_name} url={url} site_cookie={site_cookie} ua={ua}")
+ # 检测环境,有浏览器内核的优先使用仿真签到
+ chrome = ChromeHelper()
+ if emulate and chrome.get_status():
+ if not chrome.visit(url=url, ua=ua, cookie=site_cookie):
+ log.error("【Sites】%s 无法打开网站" % site_name)
+ return None
+ # 循环检测是否过cf
+ cloudflare = chrome.pass_cloudflare()
+ if not cloudflare:
+ log.error("【Sites】%s 跳转站点失败" % site_name)
+ return None
+ # 判断是否已签到
+ html_text = chrome.get_html()
+ else:
+ proxies = Config().get_proxies() if proxy else None
+ res = RequestUtils(cookies=site_cookie,
+ session=session,
+ headers=ua,
+ proxies=proxies
+ ).get_res(url=url)
+ if res and res.status_code == 200:
+ if "charset=utf-8" in res.text or "charset=UTF-8" in res.text:
+ res.encoding = "UTF-8"
+ else:
+ res.encoding = res.apparent_encoding
+ html_text = res.text
+ # 第一次登录反爬
+ if html_text.find("title") == -1:
+ i = html_text.find("window.location")
+ if i == -1:
+ return None
+ tmp_url = url + html_text[i:html_text.find(";")] \
+ .replace("\"", "").replace("+", "").replace(" ", "").replace("window.location=", "")
+ res = RequestUtils(cookies=site_cookie,
+ session=session,
+ headers=ua,
+ proxies=proxies
+ ).get_res(url=tmp_url)
+ if res and res.status_code == 200:
+ if "charset=utf-8" in res.text or "charset=UTF-8" in res.text:
+ res.encoding = "UTF-8"
+ else:
+ res.encoding = res.apparent_encoding
+ html_text = res.text
+ if not html_text:
+ return None
+ else:
+ log.error("【Sites】站点 %s 被反爬限制:%s, 状态码:%s" % (site_name, url, res.status_code))
+ return None
+
+ # 兼容假首页情况,假首页通常没有 0:
+ for head, date, content in site_user_info.message_unread_contents:
+ msg_title = f"【站点 {site_user_info.site_name} 消息】"
+ msg_text = f"时间:{date}\n标题:{head}\n内容:\n{content}"
+ self.message.send_site_message(title=msg_title, text=msg_text)
+ else:
+ self.message.send_site_message(
+ title=f"站点 {site_user_info.site_name} 收到 {site_user_info.message_unread} 条新消息,请登陆查看")
+
+ def refresh_pt_date_now(self):
+ """
+ 强制刷新站点数据
+ """
+ self.__refresh_all_site_data(force=True)
+
+ def get_pt_date(self, specify_sites=None, force=False):
+ """
+ 获取站点上传下载量
+ """
+ self.__refresh_all_site_data(force=force, specify_sites=specify_sites)
+ return self._sites_data
+
+ def __refresh_all_site_data(self, force=False, specify_sites=None):
+ """
+ 多线程刷新站点下载上传量,默认间隔6小时
+ """
+ if not self.sites.get_sites():
+ return
+
+ with lock:
+
+ if not force \
+ and not specify_sites \
+ and self._last_update_time \
+ and (datetime.now() - self._last_update_time).seconds < 6 * 3600:
+ return
+
+ if specify_sites \
+ and not isinstance(specify_sites, list):
+ specify_sites = [specify_sites]
+
+ # 没有指定站点,默认使用全部站点
+ if not specify_sites:
+ refresh_sites = self.sites.get_sites(statistic=True)
+ else:
+ refresh_sites = [site for site in self.sites.get_sites(statistic=True) if
+ site.get("name") in specify_sites]
+
+ if not refresh_sites:
+ return
+
+ # 并发刷新
+ with ThreadPool(min(len(refresh_sites), self._MAX_CONCURRENCY)) as p:
+ site_user_infos = p.map(self.__refresh_site_data, refresh_sites)
+ site_user_infos = [info for info in site_user_infos if info]
+
+ # 登记历史数据
+ self.dbhelper.insert_site_statistics_history(site_user_infos)
+ # 实时用户数据
+ self.dbhelper.update_site_user_statistics(site_user_infos)
+ # 更新站点图标
+ self.dbhelper.update_site_favicon(site_user_infos)
+ # 实时做种信息
+ self.dbhelper.update_site_seed_info(site_user_infos)
+ # 站点图标重新加载
+ self.sites.init_favicons()
+
+ # 更新时间
+ self._last_update_time = datetime.now()
+
+ def get_pt_site_statistics_history(self, days=7):
+ """
+ 获取站点上传下载量
+ """
+ site_urls = []
+ for site in self.sites.get_sites(statistic=True):
+ site_url = site.get("strict_url")
+ if site_url:
+ site_urls.append(site_url)
+
+ return self.dbhelper.get_site_statistics_recent_sites(days=days, strict_urls=site_urls)
+
+ def get_site_user_statistics(self, sites=None, encoding="RAW"):
+ """
+ 获取站点用户数据
+ :param sites: 站点名称
+ :param encoding: RAW/DICT
+ :return:
+ """
+ statistic_sites = self.sites.get_sites(statistic=True)
+ if not sites:
+ site_urls = [site.get("strict_url") for site in statistic_sites]
+ else:
+ site_urls = [site.get("strict_url") for site in statistic_sites
+ if site.get("name") in sites]
+
+ raw_statistics = self.dbhelper.get_site_user_statistics(strict_urls=site_urls)
+ if encoding == "RAW":
+ return raw_statistics
+
+ return self.__todict(raw_statistics)
+
+ def get_pt_site_activity_history(self, site, days=365 * 2):
+ """
+ 查询站点 上传,下载,做种数据
+ :param site: 站点名称
+ :param days: 最大数据量
+ :return:
+ """
+ site_activities = [["time", "upload", "download", "bonus", "seeding", "seeding_size"]]
+ sql_site_activities = self.dbhelper.get_site_statistics_history(site=site, days=days)
+ for sql_site_activity in sql_site_activities:
+ timestamp = datetime.strptime(sql_site_activity.DATE, '%Y-%m-%d').timestamp() * 1000
+ site_activities.append(
+ [timestamp,
+ sql_site_activity.UPLOAD,
+ sql_site_activity.DOWNLOAD,
+ sql_site_activity.BONUS,
+ sql_site_activity.SEEDING,
+ sql_site_activity.SEEDING_SIZE])
+
+ return site_activities
+
+ def get_pt_site_seeding_info(self, site):
+ """
+ 查询站点 做种分布信息
+ :param site: 站点名称
+ :return: seeding_info:[uploader_num, seeding_size]
+ """
+ site_seeding_info = {"seeding_info": []}
+ seeding_info = self.dbhelper.get_site_seeding_info(site=site)
+ if not seeding_info:
+ return site_seeding_info
+
+ site_seeding_info["seeding_info"] = json.loads(seeding_info[0])
+ return site_seeding_info
+
+ @staticmethod
+ def __todict(raw_statistics):
+ statistics = []
+ for site in raw_statistics:
+ statistics.append({"site": site.SITE,
+ "username": site.USERNAME,
+ "user_level": site.USER_LEVEL,
+ "join_at": site.JOIN_AT,
+ "update_at": site.UPDATE_AT,
+ "upload": site.UPLOAD,
+ "download": site.DOWNLOAD,
+ "ratio": site.RATIO,
+ "seeding": site.SEEDING,
+ "leeching": site.LEECHING,
+ "seeding_size": site.SEEDING_SIZE,
+ "bonus": site.BONUS,
+ "url": site.URL,
+ "msg_unread": site.MSG_UNREAD
+ })
+ return statistics
diff --git a/app/sites/sites.py b/app/sites/sites.py
new file mode 100644
index 0000000..676d134
--- /dev/null
+++ b/app/sites/sites.py
@@ -0,0 +1,422 @@
+import json
+import random
+import time
+from datetime import datetime
+from functools import lru_cache
+
+from lxml import etree
+
+from app.conf import SiteConf
+from app.helper import ChromeHelper, SiteHelper, DbHelper
+from app.message import Message
+from app.utils import RequestUtils, StringUtils, ExceptionUtils
+from app.utils.commons import singleton
+from config import Config
+
+
+@singleton
+class Sites:
+ message = None
+ dbhelper = None
+
+ _sites = []
+ _siteByIds = {}
+ _siteByUrls = {}
+ _site_favicons = {}
+ _rss_sites = []
+ _brush_sites = []
+ _statistic_sites = []
+ _signin_sites = []
+
+ _MAX_CONCURRENCY = 10
+
+ def __init__(self):
+ self.init_config()
+
+ def init_config(self):
+ self.dbhelper = DbHelper()
+ self.message = Message()
+ # 原始站点列表
+ self._sites = []
+ # ID存储站点
+ self._siteByIds = {}
+ # URL存储站点
+ self._siteByUrls = {}
+ # 开启订阅功能站点
+ self._rss_sites = []
+ # 开启刷流功能站点:
+ self._brush_sites = []
+ # 开启统计功能站点:
+ self._statistic_sites = []
+ # 开启签到功能站点:
+ self._signin_sites = []
+ # 站点图标
+ self.init_favicons()
+ # 站点数据
+ self._sites = self.dbhelper.get_config_site()
+ for site in self._sites:
+ # 站点属性
+ site_note = self.__get_site_note_items(site.NOTE)
+ # 站点用途:Q签到、D订阅、S刷流
+ site_rssurl = site.RSSURL
+ site_signurl = site.SIGNURL
+ site_cookie = site.COOKIE
+ site_uses = site.INCLUDE or ''
+ uses = []
+ if site_uses:
+ signin_enable = True if "Q" in site_uses and site_signurl and site_cookie else False
+ rss_enable = True if "D" in site_uses and site_rssurl else False
+ brush_enable = True if "S" in site_uses and site_rssurl and site_cookie else False
+ statistic_enable = True if "T" in site_uses and (site_rssurl or site_signurl) and site_cookie else False
+ uses.append("Q") if signin_enable else None
+ uses.append("D") if rss_enable else None
+ uses.append("S") if brush_enable else None
+ uses.append("T") if statistic_enable else None
+ else:
+ signin_enable = False
+ rss_enable = False
+ brush_enable = False
+ statistic_enable = False
+ site_info = {
+ "id": site.ID,
+ "name": site.NAME,
+ "pri": site.PRI or 0,
+ "rssurl": site_rssurl,
+ "signurl": site_signurl,
+ "cookie": site_cookie,
+ "rule": site_note.get("rule"),
+ "download_setting": site_note.get("download_setting"),
+ "signin_enable": signin_enable,
+ "rss_enable": rss_enable,
+ "brush_enable": brush_enable,
+ "statistic_enable": statistic_enable,
+ "uses": uses,
+ "ua": site_note.get("ua"),
+ "parse": True if site_note.get("parse") == "Y" else False,
+ "unread_msg_notify": True if site_note.get("message") == "Y" else False,
+ "chrome": True if site_note.get("chrome") == "Y" else False,
+ "proxy": True if site_note.get("proxy") == "Y" else False,
+ "subtitle": True if site_note.get("subtitle") == "Y" else False,
+ "strict_url": StringUtils.get_base_url(site_signurl or site_rssurl)
+ }
+ # 以ID存储
+ self._siteByIds[site.ID] = site_info
+ # 以域名存储
+ site_strict_url = StringUtils.get_url_domain(site.SIGNURL or site.RSSURL)
+ if site_strict_url:
+ self._siteByUrls[site_strict_url] = site_info
+
+ def init_favicons(self):
+ """
+ 加载图标到内存
+ """
+ self._site_favicons = {site.SITE: site.FAVICON for site in self.dbhelper.get_site_favicons()}
+
+ def get_sites(self,
+ siteid=None,
+ siteurl=None,
+ rss=False,
+ brush=False,
+ signin=False,
+ statistic=False):
+ """
+ 获取站点配置
+ """
+ if siteid:
+ return self._siteByIds.get(int(siteid)) or {}
+ if siteurl:
+ return self._siteByUrls.get(StringUtils.get_url_domain(siteurl)) or {}
+
+ ret_sites = []
+ for site in self._siteByIds.values():
+ if rss and not site.get('rss_enable'):
+ continue
+ if brush and not site.get('brush_enable'):
+ continue
+ if signin and not site.get('signin_enable'):
+ continue
+ if statistic and not site.get('statistic_enable'):
+ continue
+ ret_sites.append(site)
+ if siteid or siteurl:
+ return {}
+ return ret_sites
+
+ def get_site_dict(self,
+ rss=False,
+ brush=False,
+ signin=False,
+ statistic=False):
+ """
+ 获取站点字典
+ """
+ return [
+ {
+ "id": site.get("id"),
+ "name": site.get("name")
+ } for site in self.get_sites(
+ rss=rss,
+ brush=brush,
+ signin=signin,
+ statistic=statistic
+ )
+ ]
+
+ def get_site_names(self,
+ rss=False,
+ brush=False,
+ signin=False,
+ statistic=False):
+ """
+ 获取站点名称
+ """
+ return [
+ site.get("name") for site in self.get_sites(
+ rss=rss,
+ brush=brush,
+ signin=signin,
+ statistic=statistic
+ )
+ ]
+
+ def get_site_favicon(self, site_name=None):
+ """
+ 获取站点图标
+ """
+ if site_name:
+ return self._site_favicons.get(site_name)
+ else:
+ return self._site_favicons
+
+ def get_site_download_setting(self, site_name=None):
+ """
+ 获取站点下载设置
+ """
+ if site_name:
+ for site in self._siteByIds.values():
+ if site.get("name") == site_name:
+ return site.get("download_setting")
+ return None
+
+ def test_connection(self, site_id):
+ """
+ 测试站点连通性
+ :param site_id: 站点编号
+ :return: 是否连通、错误信息、耗时
+ """
+ site_info = self.get_sites(siteid=site_id)
+ if not site_info:
+ return False, "站点不存在", 0
+ site_cookie = site_info.get("cookie")
+ if not site_cookie:
+ return False, "未配置站点Cookie", 0
+ ua = site_info.get("ua")
+ site_url = StringUtils.get_base_url(site_info.get("signurl") or site_info.get("rssurl"))
+ if not site_url:
+ return False, "未配置站点地址", 0
+ chrome = ChromeHelper()
+ if site_info.get("chrome") and chrome.get_status():
+ # 计时
+ start_time = datetime.now()
+ if not chrome.visit(url=site_url, ua=ua, cookie=site_cookie):
+ return False, "Chrome模拟访问失败", 0
+ # 循环检测是否过cf
+ cloudflare = chrome.pass_cloudflare()
+ seconds = int((datetime.now() - start_time).microseconds / 1000)
+ if not cloudflare:
+ return False, "跳转站点失败", seconds
+ # 判断是否已签到
+ html_text = chrome.get_html()
+ if not html_text:
+ return False, "获取站点源码失败", 0
+ if SiteHelper.is_logged_in(html_text):
+ return True, "连接成功", seconds
+ else:
+ return False, "Cookie失效", seconds
+ else:
+ # 计时
+ start_time = datetime.now()
+ res = RequestUtils(cookies=site_cookie,
+ headers=ua,
+ proxies=Config().get_proxies() if site_info.get("proxy") else None
+ ).get_res(url=site_url)
+ seconds = int((datetime.now() - start_time).microseconds / 1000)
+ if res and res.status_code == 200:
+ if not SiteHelper.is_logged_in(res.text):
+ return False, "Cookie失效", seconds
+ else:
+ return True, "连接成功", seconds
+ elif res is not None:
+ return False, f"连接失败,状态码:{res.status_code}", seconds
+ else:
+ return False, "无法打开网站", seconds
+
+ def get_site_attr(self, url):
+ """
+ 整合公有站点和私有站点的属性
+ """
+ site_info = self.get_sites(siteurl=url)
+ public_site = self.get_public_sites(url=url)
+ if public_site:
+ site_info.update(public_site)
+ return site_info
+
+ def parse_site_download_url(self, page_url, xpath):
+ """
+ 从站点详情页面中解析中下载链接
+ :param page_url: 详情页面地址
+ :param xpath: 解析XPATH,同时还包括Cookie、UA和Referer
+ """
+ if not page_url or not xpath:
+ return ""
+ cookie, ua, referer, page_source = None, None, None, None
+ xpaths = xpath.split("|")
+ xpath = xpaths[0]
+ if len(xpaths) > 1:
+ cookie = xpaths[1]
+ if len(xpaths) > 2:
+ ua = xpaths[2]
+ if len(xpaths) > 3:
+ referer = xpaths[3]
+ try:
+ site_info = self.get_public_sites(url=page_url)
+ if not site_info.get("referer"):
+ referer = None
+ req = RequestUtils(
+ headers=ua,
+ cookies=cookie,
+ referer=referer,
+ proxies=Config().get_proxies() if site_info.get("proxy") else None
+ ).get_res(url=page_url)
+ if req and req.status_code == 200:
+ if req.text:
+ page_source = req.text
+ # xpath解析
+ if page_source:
+ html = etree.HTML(page_source)
+ urls = html.xpath(xpath)
+ if urls:
+ return str(urls[0])
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ return None
+
+ @staticmethod
+ @lru_cache(maxsize=128)
+ def __get_site_page_html(url, cookie, ua, render=False, proxy=False):
+ chrome = ChromeHelper(headless=True)
+ if render and chrome.get_status():
+ # 开渲染
+ if chrome.visit(url=url, cookie=cookie, ua=ua):
+ # 等待页面加载完成
+ time.sleep(10)
+ return chrome.get_html()
+ else:
+ res = RequestUtils(
+ cookies=cookie,
+ headers=ua,
+ proxies=Config().get_proxies() if proxy else None
+ ).get_res(url=url)
+ if res and res.status_code == 200:
+ res.encoding = res.apparent_encoding
+ return res.text
+ return ""
+
+ @staticmethod
+ def get_grapsite_conf(url):
+ """
+ 根据地址找到RSS_SITE_GRAP_CONF对应配置
+ """
+ for k, v in SiteConf.RSS_SITE_GRAP_CONF.items():
+ if StringUtils.url_equal(k, url):
+ return v
+ return {}
+
+ def check_torrent_attr(self, torrent_url, cookie, ua=None, proxy=False):
+ """
+ 检验种子是否免费,当前做种人数
+ :param torrent_url: 种子的详情页面
+ :param cookie: 站点的Cookie
+ :param ua: 站点的ua
+ :param proxy: 是否使用代理
+ :return: 种子属性,包含FREE 2XFREE HR PEER_COUNT等属性
+ """
+ ret_attr = {
+ "free": False,
+ "2xfree": False,
+ "hr": False,
+ "peer_count": 0
+ }
+ if not torrent_url:
+ return ret_attr
+ xpath_strs = self.get_grapsite_conf(torrent_url)
+ if not xpath_strs:
+ return ret_attr
+ html_text = self.__get_site_page_html(url=torrent_url,
+ cookie=cookie,
+ ua=ua,
+ render=xpath_strs.get('RENDER'),
+ proxy=proxy)
+ if not html_text:
+ return ret_attr
+ try:
+ html = etree.HTML(html_text)
+ # 检测2XFREE
+ for xpath_str in xpath_strs.get("2XFREE"):
+ if html.xpath(xpath_str):
+ ret_attr["free"] = True
+ ret_attr["2xfree"] = True
+ # 检测FREE
+ for xpath_str in xpath_strs.get("FREE"):
+ if html.xpath(xpath_str):
+ ret_attr["free"] = True
+ # 检测HR
+ for xpath_str in xpath_strs.get("HR"):
+ if html.xpath(xpath_str):
+ ret_attr["hr"] = True
+ # 检测PEER_COUNT当前做种人数
+ for xpath_str in xpath_strs.get("PEER_COUNT"):
+ peer_count_dom = html.xpath(xpath_str)
+ if peer_count_dom:
+ peer_count_str = ''.join(peer_count_dom[0].itertext())
+ peer_count_digit_str = ""
+ for m in peer_count_str:
+ if m.isdigit():
+ peer_count_digit_str = peer_count_digit_str + m
+ ret_attr["peer_count"] = int(peer_count_digit_str) if len(peer_count_digit_str) > 0 else 0
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ # 随机休眼后再返回
+ time.sleep(round(random.uniform(1, 5), 1))
+ return ret_attr
+
+ @staticmethod
+ def is_public_site(url):
+ """
+ 判断是否为公开BT站点
+ """
+ _, netloc = StringUtils.get_url_netloc(url)
+ if netloc in SiteConf.PUBLIC_TORRENT_SITES.keys():
+ return True
+ return False
+
+ @staticmethod
+ def get_public_sites(url=None):
+ """
+ 查询所有公开BT站点
+ """
+ if url:
+ _, netloc = StringUtils.get_url_netloc(url)
+ return SiteConf.PUBLIC_TORRENT_SITES.get(netloc) or {}
+ else:
+ return SiteConf.PUBLIC_TORRENT_SITES.items()
+
+ @staticmethod
+ def __get_site_note_items(note):
+ """
+ 从note中提取站点信息
+ """
+ infos = {}
+ if note:
+ infos = json.loads(note)
+ return infos
diff --git a/app/sites/sitesignin/_base.py b/app/sites/sitesignin/_base.py
new file mode 100644
index 0000000..30c4eaf
--- /dev/null
+++ b/app/sites/sitesignin/_base.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+from abc import ABCMeta, abstractmethod
+
+from app.utils import StringUtils
+
+
+class _ISiteSigninHandler(metaclass=ABCMeta):
+ """
+ 实现站点签到的基类,所有站点签到类都需要继承此类,并实现match和signin方法
+ 实现类放置到sitesignin目录下将会自动加载
+ """
+ # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
+ site_url = ""
+
+ @abstractmethod
+ def match(self, url):
+ """
+ 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
+ :param url: 站点Url
+ :return: 是否匹配,如匹配则会调用该类的signin方法
+ """
+ return True if StringUtils.url_equal(url, self.site_url) else False
+
+ @abstractmethod
+ def signin(self, site_info: dict):
+ """
+ 执行签到操作
+ :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
+ :return: 签到结果信息
+ """
+ pass
diff --git a/app/sites/siteuserinfo/__init__.py b/app/sites/siteuserinfo/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/app/sites/siteuserinfo/_base.py b/app/sites/siteuserinfo/_base.py
new file mode 100644
index 0000000..fc5695a
--- /dev/null
+++ b/app/sites/siteuserinfo/_base.py
@@ -0,0 +1,319 @@
+# -*- coding: utf-8 -*-
+import base64
+import json
+import re
+from abc import ABCMeta, abstractmethod
+from urllib.parse import urljoin, urlsplit
+
+import requests
+from lxml import etree
+
+import log
+from app.helper import SiteHelper
+from app.utils import RequestUtils
+from app.utils.types import SiteSchema
+
+SITE_BASE_ORDER = 1000
+
+
+class _ISiteUserInfo(metaclass=ABCMeta):
+ # 站点模版
+ schema = SiteSchema.NexusPhp
+ # 站点解析时判断顺序,值越小越先解析
+ order = SITE_BASE_ORDER
+
+ def __init__(self, site_name, url, site_cookie, index_html, session=None, ua=None):
+ super().__init__()
+ # 站点信息
+ self.site_name = None
+ self.site_url = None
+ self.site_favicon = None
+ # 用户信息
+ self.username = None
+ self.userid = None
+ # 未读消息
+ self.message_unread = 0
+ self.message_unread_contents = []
+
+ # 流量信息
+ self.upload = 0
+ self.download = 0
+ self.ratio = 0
+
+ # 种子信息
+ self.seeding = 0
+ self.leeching = 0
+ self.uploaded = 0
+ self.completed = 0
+ self.incomplete = 0
+ self.seeding_size = 0
+ self.leeching_size = 0
+ self.uploaded_size = 0
+ self.completed_size = 0
+ self.incomplete_size = 0
+ # 做种人数, 种子大小
+ self.seeding_info = []
+
+ # 用户详细信息
+ self.user_level = None
+ self.join_at = None
+ self.bonus = 0.0
+
+ # 错误信息
+ self.err_msg = None
+ # 内部数据
+ self._base_url = None
+ self._site_cookie = None
+ self._index_html = None
+ self._addition_headers = None
+
+ # 站点页面
+ self._brief_page = "index.php"
+ self._user_detail_page = "userdetails.php?id="
+ self._user_traffic_page = "index.php"
+ self._torrent_seeding_page = "getusertorrentlistajax.php?userid="
+ self._user_mail_unread_page = "messages.php?action=viewmailbox&box=1&unread=yes"
+ self._sys_mail_unread_page = "messages.php?action=viewmailbox&box=-2&unread=yes"
+ self._torrent_seeding_params = None
+ self._torrent_seeding_headers = None
+
+ split_url = urlsplit(url)
+ self.site_name = site_name
+ self.site_url = url
+ self._base_url = f"{split_url.scheme}://{split_url.netloc}"
+ self._favicon_url = urljoin(self._base_url, "favicon.ico")
+ self.site_favicon = ""
+ self._site_cookie = site_cookie
+ self._index_html = index_html
+ self._session = session if session else requests.Session()
+ self._ua = ua
+
+ def site_schema(self):
+ """
+ 站点解析模型
+ :return: 站点解析模型
+ """
+ return self.schema
+
+ @classmethod
+ def match(cls, html_text):
+ """
+ 是否匹配当前解析模型
+ :param html_text: 站点首页html
+ :return: 是否匹配
+ """
+ return False
+
+ def parse(self):
+ """
+ 解析站点信息
+ :return:
+ """
+ self._parse_favicon(self._index_html)
+ if not self._parse_logged_in(self._index_html):
+ return
+
+ self._parse_site_page(self._index_html)
+ self._parse_user_base_info(self._index_html)
+ self._pase_unread_msgs()
+ if self._user_traffic_page:
+ self._parse_user_traffic_info(self._get_page_content(urljoin(self._base_url, self._user_traffic_page)))
+ if self._user_detail_page:
+ self._parse_user_detail_info(self._get_page_content(urljoin(self._base_url, self._user_detail_page)))
+
+ self._parse_seeding_pages()
+ self.seeding_info = json.dumps(self.seeding_info)
+
+ def _pase_unread_msgs(self):
+ """
+ 解析所有未读消息标题和内容
+ :return:
+ """
+ unread_msg_links = []
+ if self.message_unread > 0:
+ links = {self._user_mail_unread_page, self._sys_mail_unread_page}
+ for link in links:
+ if not link:
+ continue
+
+ msg_links = []
+ next_page = self._parse_message_unread_links(
+ self._get_page_content(urljoin(self._base_url, link)), msg_links)
+ while next_page:
+ next_page = self._parse_message_unread_links(
+ self._get_page_content(urljoin(self._base_url, next_page)), msg_links)
+
+ unread_msg_links.extend(msg_links)
+
+ for msg_link in unread_msg_links:
+ print(msg_link)
+ log.debug(f"【Sites】{self.site_name} 信息链接 {msg_link}")
+ head, date, content = self._parse_message_content(self._get_page_content(urljoin(self._base_url, msg_link)))
+ log.debug(f"【Sites】{self.site_name} 标题 {head} 时间 {date} 内容 {content}")
+ self.message_unread_contents.append((head, date, content))
+
+ def _parse_seeding_pages(self):
+ seeding_pages = []
+ if self._torrent_seeding_page:
+ if isinstance(self._torrent_seeding_page, list):
+ seeding_pages.extend(self._torrent_seeding_page)
+ else:
+ seeding_pages.append(self._torrent_seeding_page)
+
+ for seeding_page in seeding_pages:
+ # 第一页
+ next_page = self._parse_user_torrent_seeding_info(
+ self._get_page_content(urljoin(self._base_url, seeding_page),
+ self._torrent_seeding_params,
+ self._torrent_seeding_headers))
+
+ # 其他页处理
+ while next_page:
+ next_page = self._parse_user_torrent_seeding_info(
+ self._get_page_content(urljoin(urljoin(self._base_url, seeding_page), next_page),
+ self._torrent_seeding_params,
+ self._torrent_seeding_headers),
+ multi_page=True)
+
+ @staticmethod
+ def _prepare_html_text(html_text):
+ """
+ 处理掉HTML中的干扰部分
+ """
+ return re.sub(r"#\d+", "", re.sub(r"\d+px", "", html_text))
+
+ @abstractmethod
+ def _parse_message_unread_links(self, html_text, msg_links):
+ """
+ 获取未阅读消息链接
+ :param html_text:
+ :return:
+ """
+ pass
+
+ def _parse_favicon(self, html_text):
+ """
+ 解析站点favicon,返回base64 fav图标
+ :param html_text:
+ :return:
+ """
+ html = etree.HTML(html_text)
+ if html:
+ fav_link = html.xpath('//head/link[contains(@rel, "icon")]/@href')
+ if fav_link:
+ self._favicon_url = urljoin(self._base_url, fav_link[0])
+
+ res = RequestUtils(cookies=self._site_cookie, session=self._session, timeout=60, headers=self._ua).get_res(
+ url=self._favicon_url)
+ if res:
+ self.site_favicon = base64.b64encode(res.content).decode()
+
+ def _get_page_content(self, url, params=None, headers=None):
+ """
+ :param url: 网页地址
+ :param params: post参数
+ :param headers: 额外的请求头
+ :return:
+ """
+ req_headers = None
+ if self._ua or headers or self._addition_headers:
+ req_headers = {}
+ if headers:
+ req_headers.update(headers)
+
+ if isinstance(self._ua, str):
+ req_headers.update({
+ "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
+ "User-Agent": f"{self._ua}"
+ })
+ else:
+ req_headers.update(self._ua)
+
+ if self._addition_headers:
+ req_headers.update(self._addition_headers)
+
+ if params:
+ res = RequestUtils(cookies=self._site_cookie, session=self._session, timeout=60,
+ headers=req_headers).post_res(
+ url=url, params=params)
+ else:
+ res = RequestUtils(cookies=self._site_cookie, session=self._session, timeout=60,
+ headers=req_headers).get_res(
+ url=url)
+ if res is not None and res.status_code in (200, 500):
+ if "charset=utf-8" in res.text or "charset=UTF-8" in res.text:
+ res.encoding = "UTF-8"
+ else:
+ res.encoding = res.apparent_encoding
+ return res.text
+
+ return ""
+
+ @abstractmethod
+ def _parse_site_page(self, html_text):
+ """
+ 解析站点相关信息页面
+ :param html_text:
+ :return:
+ """
+ pass
+
+ @abstractmethod
+ def _parse_user_base_info(self, html_text):
+ """
+ 解析用户基础信息
+ :param html_text:
+ :return:
+ """
+ pass
+
+ def _parse_logged_in(self, html_text):
+ """
+ 解析用户是否已经登陆
+ :param html_text:
+ :return: True/False
+ """
+ logged_in = SiteHelper.is_logged_in(html_text)
+ if not logged_in:
+ self.err_msg = "未检测到已登陆,请检查cookies是否过期"
+ log.warn(f"【Sites】{self.site_name} 未登录,跳过后续操作")
+
+ return logged_in
+
+ @abstractmethod
+ def _parse_user_traffic_info(self, html_text):
+ """
+ 解析用户的上传,下载,分享率等信息
+ :param html_text:
+ :return:
+ """
+ pass
+
+ @abstractmethod
+ def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
+ """
+ 解析用户的做种相关信息
+ :param html_text:
+ :param multi_page: 是否多页数据
+ :return: 下页地址
+ """
+ pass
+
+ @abstractmethod
+ def _parse_user_detail_info(self, html_text):
+ """
+ 解析用户的详细信息
+ 加入时间/等级/魔力值等
+ :param html_text:
+ :return:
+ """
+ pass
+
+ @abstractmethod
+ def _parse_message_content(self, html_text):
+ """
+ 解析短消息内容
+ :param html_text:
+ :return: head: message, date: time, content: message content
+ """
+ pass
diff --git a/app/sites/siteuserinfo/discuz.py b/app/sites/siteuserinfo/discuz.py
new file mode 100644
index 0000000..41717a1
--- /dev/null
+++ b/app/sites/siteuserinfo/discuz.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+import re
+
+from lxml import etree
+
+from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
+from app.utils import StringUtils
+from app.utils.types import SiteSchema
+
+
+class DiscuzUserInfo(_ISiteUserInfo):
+ schema = SiteSchema.DiscuzX
+ order = SITE_BASE_ORDER + 10
+
+ @classmethod
+ def match(cls, html_text):
+ html = etree.HTML(html_text)
+ if not html:
+ return False
+
+ printable_text = html.xpath("string(.)") if html else ""
+ return 'Powered by Discuz!' in printable_text
+
+ def _parse_user_base_info(self, html_text):
+ html_text = self._prepare_html_text(html_text)
+ html = etree.HTML(html_text)
+
+ user_info = html.xpath('//a[contains(@href, "&uid=")]')
+ if user_info:
+ user_id_match = re.search(r"&uid=(\d+)", user_info[0].attrib['href'])
+ if user_id_match and user_id_match.group().strip():
+ self.userid = user_id_match.group(1)
+ self._torrent_seeding_page = f"forum.php?&mod=torrents&cat_5up=on"
+ self._user_detail_page = user_info[0].attrib['href']
+ self.username = user_info[0].text.strip()
+
+ def _parse_site_page(self, html_text):
+ # TODO
+ pass
+
+ def _parse_user_detail_info(self, html_text):
+ """
+ 解析用户额外信息,加入时间,等级
+ :param html_text:
+ :return:
+ """
+ html = etree.HTML(html_text)
+ if not html:
+ return None
+
+ # 用户等级
+ user_levels_text = html.xpath('//a[contains(@href, "usergroup")]/text()')
+ if user_levels_text:
+ self.user_level = user_levels_text[-1].strip()
+
+ # 加入日期
+ join_at_text = html.xpath('//li[em[text()="注册时间"]]/text()')
+ if join_at_text:
+ self.join_at = StringUtils.unify_datetime_str(join_at_text[0].strip())
+
+ # 分享率
+ ratio_text = html.xpath('//li[contains(.//text(), "分享率")]//text()')
+ if ratio_text:
+ ratio_match = re.search(r"\(([\d,.]+)\)", ratio_text[0])
+ if ratio_match and ratio_match.group(1).strip():
+ self.bonus = StringUtils.str_float(ratio_match.group(1))
+
+ # 积分
+ bouns_text = html.xpath('//li[em[text()="积分"]]/text()')
+ if bouns_text:
+ self.bonus = StringUtils.str_float(bouns_text[0].strip())
+
+ # 上传
+ upload_text = html.xpath('//li[em[contains(text(),"上传量")]]/text()')
+ if upload_text:
+ self.upload = StringUtils.num_filesize(upload_text[0].strip().split('/')[-1])
+
+ # 下载
+ download_text = html.xpath('//li[em[contains(text(),"下载量")]]/text()')
+ if download_text:
+ self.download = StringUtils.num_filesize(download_text[0].strip().split('/')[-1])
+
+ def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
+ """
+ 做种相关信息
+ :param html_text:
+ :param multi_page: 是否多页数据
+ :return: 下页地址
+ """
+ html = etree.HTML(html_text)
+ if not html:
+ return None
+
+ size_col = 3
+ seeders_col = 4
+ # 搜索size列
+ if html.xpath('//tr[position()=1]/td[.//img[@class="size"] and .//img[@alt="size"]]'):
+ size_col = len(html.xpath('//tr[position()=1]/td[.//img[@class="size"] '
+ 'and .//img[@alt="size"]]/preceding-sibling::td')) + 1
+ # 搜索seeders列
+ if html.xpath('//tr[position()=1]/td[.//img[@class="seeders"] and .//img[@alt="seeders"]]'):
+ seeders_col = len(html.xpath('//tr[position()=1]/td[.//img[@class="seeders"] '
+ 'and .//img[@alt="seeders"]]/preceding-sibling::td')) + 1
+
+ page_seeding = 0
+ page_seeding_size = 0
+ page_seeding_info = []
+ seeding_sizes = html.xpath(f'//tr[position()>1]/td[{size_col}]')
+ seeding_seeders = html.xpath(f'//tr[position()>1]/td[{seeders_col}]//text()')
+ if seeding_sizes and seeding_seeders:
+ page_seeding = len(seeding_sizes)
+
+ for i in range(0, len(seeding_sizes)):
+ size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
+ seeders = StringUtils.str_int(seeding_seeders[i])
+
+ page_seeding_size += size
+ page_seeding_info.append([seeders, size])
+
+ self.seeding += page_seeding
+ self.seeding_size += page_seeding_size
+ self.seeding_info.extend(page_seeding_info)
+
+ # 是否存在下页数据
+ next_page = None
+ next_page_text = html.xpath('//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁")]/@href')
+ if next_page_text:
+ next_page = next_page_text[-1].strip()
+
+ return next_page
+
+ def _parse_user_traffic_info(self, html_text):
+ pass
+
+ def _parse_message_unread_links(self, html_text, msg_links):
+ return None
+
+ def _parse_message_content(self, html_text):
+ return None, None, None
diff --git a/app/sites/siteuserinfo/file_list.py b/app/sites/siteuserinfo/file_list.py
new file mode 100644
index 0000000..59d823b
--- /dev/null
+++ b/app/sites/siteuserinfo/file_list.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+import re
+
+from lxml import etree
+
+from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
+from app.utils import StringUtils
+from app.utils.types import SiteSchema
+
+
+class FileListSiteUserInfo(_ISiteUserInfo):
+ schema = SiteSchema.FileList
+ order = SITE_BASE_ORDER + 50
+
+ @classmethod
+ def match(cls, html_text):
+ html = etree.HTML(html_text)
+ if not html:
+ return False
+
+ printable_text = html.xpath("string(.)") if html else ""
+ return 'Powered by FileList' in printable_text
+
+ def _parse_site_page(self, html_text):
+ html_text = self._prepare_html_text(html_text)
+
+ user_detail = re.search(r"userdetails.php\?id=(\d+)", html_text)
+ if user_detail and user_detail.group().strip():
+ self._user_detail_page = user_detail.group().strip().lstrip('/')
+ self.userid = user_detail.group(1)
+
+ self._torrent_seeding_page = f"snatchlist.php?id={self.userid}&action=torrents&type=seeding"
+
+ def _parse_user_base_info(self, html_text):
+ html_text = self._prepare_html_text(html_text)
+ html = etree.HTML(html_text)
+
+ ret = html.xpath(f'//a[contains(@href, "userdetails") and contains(@href, "{self.userid}")]//text()')
+ if ret:
+ self.username = str(ret[0])
+
+ def _parse_user_traffic_info(self, html_text):
+ """
+ 上传/下载/分享率 [做种数/魔力值]
+ :param html_text:
+ :return:
+ """
+ return
+
+ def _parse_user_detail_info(self, html_text):
+ html_text = self._prepare_html_text(html_text)
+ html = etree.HTML(html_text)
+
+ upload_html = html.xpath('//table//tr/td[text()="Uploaded"]/following-sibling::td//text()')
+ if upload_html:
+ self.upload = StringUtils.num_filesize(upload_html[0])
+ download_html = html.xpath('//table//tr/td[text()="Downloaded"]/following-sibling::td//text()')
+ if download_html:
+ self.download = StringUtils.num_filesize(download_html[0])
+
+ self.ratio = 0 if self.download == 0 else self.upload / self.download
+
+ user_level_html = html.xpath('//table//tr/td[text()="Class"]/following-sibling::td//text()')
+ if user_level_html:
+ self.user_level = user_level_html[0].strip()
+
+ join_at_html = html.xpath('//table//tr/td[contains(text(), "Join")]/following-sibling::td//text()')
+ if join_at_html:
+ self.join_at = StringUtils.unify_datetime_str(join_at_html[0].strip())
+
+ bonus_html = html.xpath('//a[contains(@href, "shop.php")]')
+ if bonus_html:
+ self.bonus = StringUtils.str_float(bonus_html[0].xpath("string(.)").strip())
+ pass
+
+ def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
+ """
+ 做种相关信息
+ :param html_text:
+ :param multi_page: 是否多页数据
+ :return: 下页地址
+ """
+ html = etree.HTML(html_text)
+ if not html:
+ return None
+
+ size_col = 6
+ seeders_col = 7
+
+ page_seeding = 0
+ page_seeding_size = 0
+ page_seeding_info = []
+ seeding_sizes = html.xpath(f'//table/tr[position()>1]/td[{size_col}]')
+ seeding_seeders = html.xpath(f'//table/tr[position()>1]/td[{seeders_col}]')
+ if seeding_sizes and seeding_seeders:
+ page_seeding = len(seeding_sizes)
+
+ for i in range(0, len(seeding_sizes)):
+ size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
+ seeders = StringUtils.str_int(seeding_seeders[i].xpath("string(.)").strip())
+
+ page_seeding_size += size
+ page_seeding_info.append([seeders, size])
+
+ self.seeding += page_seeding
+ self.seeding_size += page_seeding_size
+ self.seeding_info.extend(page_seeding_info)
+
+ # 是否存在下页数据
+ next_page = None
+
+ return next_page
+
+ def _parse_message_unread_links(self, html_text, msg_links):
+ return None
+
+ def _parse_message_content(self, html_text):
+ return None, None, None
diff --git a/app/sites/siteuserinfo/gazelle.py b/app/sites/siteuserinfo/gazelle.py
new file mode 100644
index 0000000..ec4dfe4
--- /dev/null
+++ b/app/sites/siteuserinfo/gazelle.py
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+import re
+
+from lxml import etree
+
+from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
+from app.utils import StringUtils
+from app.utils.types import SiteSchema
+
+
+class GazelleSiteUserInfo(_ISiteUserInfo):
+ schema = SiteSchema.Gazelle
+ order = SITE_BASE_ORDER
+
+ @classmethod
+ def match(cls, html_text):
+ html = etree.HTML(html_text)
+ if not html:
+ return False
+
+ printable_text = html.xpath("string(.)") if html else ""
+
+ return "Powered by Gazelle" in printable_text or "DIC Music" in printable_text
+
+ def _parse_user_base_info(self, html_text):
+ html_text = self._prepare_html_text(html_text)
+ html = etree.HTML(html_text)
+
+ tmps = html.xpath('//a[contains(@href, "user.php?id=")]')
+ if tmps:
+ user_id_match = re.search(r"user.php\?id=(\d+)", tmps[0].attrib['href'])
+ if user_id_match and user_id_match.group().strip():
+ self.userid = user_id_match.group(1)
+ self._torrent_seeding_page = f"torrents.php?type=seeding&userid={self.userid}"
+ self._user_detail_page = f"user.php?id={self.userid}"
+ self.username = tmps[0].text.strip()
+
+ tmps = html.xpath('//*[@id="header-uploaded-value"]/@data-value')
+ if tmps:
+ self.upload = StringUtils.num_filesize(tmps[0])
+ else:
+ tmps = html.xpath('//li[@id="stats_seeding"]/span/text()')
+ if tmps:
+ self.upload = StringUtils.num_filesize(tmps[0])
+
+ tmps = html.xpath('//*[@id="header-downloaded-value"]/@data-value')
+ if tmps:
+ self.download = StringUtils.num_filesize(tmps[0])
+ else:
+ tmps = html.xpath('//li[@id="stats_leeching"]/span/text()')
+ if tmps:
+ self.download = StringUtils.num_filesize(tmps[0])
+
+ self.ratio = 0.0 if self.download <= 0.0 else round(self.upload / self.download, 3)
+
+ tmps = html.xpath('//a[contains(@href, "bonus.php")]/@data-tooltip')
+ if tmps:
+ bonus_match = re.search(r"([\d,.]+)", tmps[0])
+ if bonus_match and bonus_match.group(1).strip():
+ self.bonus = StringUtils.str_float(bonus_match.group(1))
+ else:
+ tmps = html.xpath('//a[contains(@href, "bonus.php")]')
+ if tmps:
+ bonus_text = tmps[0].xpath("string(.)")
+ bonus_match = re.search(r"([\d,.]+)", bonus_text)
+ if bonus_match and bonus_match.group(1).strip():
+ self.bonus = StringUtils.str_float(bonus_match.group(1))
+
+ def _parse_site_page(self, html_text):
+ # TODO
+ pass
+
+ def _parse_user_detail_info(self, html_text):
+ """
+ 解析用户额外信息,加入时间,等级
+ :param html_text:
+ :return:
+ """
+ html = etree.HTML(html_text)
+ if not html:
+ return None
+
+ # 用户等级
+ user_levels_text = html.xpath('//*[@id="class-value"]/@data-value')
+ if user_levels_text:
+ self.user_level = user_levels_text[0].strip()
+ else:
+ user_levels_text = html.xpath('//li[contains(text(), "用户等级")]/text()')
+ if user_levels_text:
+ self.user_level = user_levels_text[0].split(':')[1].strip()
+
+ # 加入日期
+ join_at_text = html.xpath('//*[@id="join-date-value"]/@data-value')
+ if join_at_text:
+ self.join_at = StringUtils.unify_datetime_str(join_at_text[0].strip())
+ else:
+ join_at_text = html.xpath(
+ '//div[contains(@class, "box_userinfo_stats")]//li[contains(text(), "加入时间")]/span/text()')
+ if join_at_text:
+ self.join_at = StringUtils.unify_datetime_str(join_at_text[0].strip())
+
+ def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
+ """
+ 做种相关信息
+ :param html_text:
+ :param multi_page: 是否多页数据
+ :return: 下页地址
+ """
+ html = etree.HTML(html_text)
+ if not html:
+ return None
+
+ size_col = 3
+ # 搜索size列
+ if html.xpath('//table[contains(@id, "torrent")]//tr[1]/td'):
+ size_col = len(html.xpath('//table[contains(@id, "torrent")]//tr[1]/td')) - 3
+ # 搜索seeders列
+ seeders_col = size_col + 2
+
+ page_seeding = 0
+ page_seeding_size = 0
+ page_seeding_info = []
+ seeding_sizes = html.xpath(f'//table[contains(@id, "torrent")]//tr[position()>1]/td[{size_col}]')
+ seeding_seeders = html.xpath(f'//table[contains(@id, "torrent")]//tr[position()>1]/td[{seeders_col}]/text()')
+ if seeding_sizes and seeding_seeders:
+ page_seeding = len(seeding_sizes)
+
+ for i in range(0, len(seeding_sizes)):
+ size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
+ seeders = int(seeding_seeders[i])
+
+ page_seeding_size += size
+ page_seeding_info.append([seeders, size])
+
+ if multi_page:
+ self.seeding += page_seeding
+ self.seeding_size += page_seeding_size
+ self.seeding_info.extend(page_seeding_info)
+ else:
+ if not self.seeding:
+ self.seeding = page_seeding
+ if not self.seeding_size:
+ self.seeding_size = page_seeding_size
+ if not self.seeding_info:
+ self.seeding_info = page_seeding_info
+
+ # 是否存在下页数据
+ next_page = None
+ next_page_text = html.xpath('//a[contains(.//text(), "Next") or contains(.//text(), "下一页")]/@href')
+ if next_page_text:
+ next_page = next_page_text[-1].strip()
+
+ return next_page
+
+ def _parse_user_traffic_info(self, html_text):
+ # TODO
+ pass
+
+ def _parse_message_unread_links(self, html_text, msg_links):
+ return None
+
+ def _parse_message_content(self, html_text):
+ return None, None, None
diff --git a/app/sites/siteuserinfo/ipt_project.py b/app/sites/siteuserinfo/ipt_project.py
new file mode 100644
index 0000000..cb57e10
--- /dev/null
+++ b/app/sites/siteuserinfo/ipt_project.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+import re
+
+from lxml import etree
+
+from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
+from app.utils import StringUtils
+from app.utils.types import SiteSchema
+
+
+class IptSiteUserInfo(_ISiteUserInfo):
+ schema = SiteSchema.Ipt
+ order = SITE_BASE_ORDER + 35
+
+ @classmethod
+ def match(cls, html_text):
+ return 'IPTorrents' in html_text
+
+ def _parse_user_base_info(self, html_text):
+ html_text = self._prepare_html_text(html_text)
+ html = etree.HTML(html_text)
+ tmps = html.xpath('//a[contains(@href, "/u/")]//text()')
+ tmps_id = html.xpath('//a[contains(@href, "/u/")]/@href')
+ if tmps:
+ self.username = str(tmps[-1])
+ if tmps_id:
+ user_id_match = re.search(r"/u/(\d+)", tmps_id[0])
+ if user_id_match and user_id_match.group().strip():
+ self.userid = user_id_match.group(1)
+ self._user_detail_page = f"user.php?u={self.userid}"
+ self._torrent_seeding_page = f"peers?u={self.userid}"
+
+ tmps = html.xpath('//div[@class = "stats"]/div/div')
+ if tmps:
+ self.upload = StringUtils.num_filesize(str(tmps[0].xpath('span/text()')[1]).strip())
+ self.download = StringUtils.num_filesize(str(tmps[0].xpath('span/text()')[2]).strip())
+ self.seeding = StringUtils.str_int(tmps[0].xpath('a')[2].xpath('text()')[0])
+ self.leeching = StringUtils.str_int(tmps[0].xpath('a')[2].xpath('text()')[1])
+ self.ratio = StringUtils.str_float(str(tmps[0].xpath('span/text()')[0]).strip().replace('-', '0'))
+ self.bonus = StringUtils.str_float(tmps[0].xpath('a')[3].xpath('text()')[0])
+
+ def _parse_site_page(self, html_text):
+ # TODO
+ pass
+
+ def _parse_user_detail_info(self, html_text):
+ html = etree.HTML(html_text)
+ if not html:
+ return
+
+ user_levels_text = html.xpath('//tr/th[text()="Class"]/following-sibling::td[1]/text()')
+ if user_levels_text:
+ self.user_level = user_levels_text[0].strip()
+
+ # 加入日期
+ join_at_text = html.xpath('//tr/th[text()="Join date"]/following-sibling::td[1]/text()')
+ if join_at_text:
+ self.join_at = StringUtils.unify_datetime_str(join_at_text[0].split(' (')[0])
+
+ def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
+ html = etree.HTML(html_text)
+ if not html:
+ return
+ # seeding start
+ seeding_end_pos = 3
+ if html.xpath('//tr/td[text() = "Leechers"]'):
+ seeding_end_pos = len(html.xpath('//tr/td[text() = "Leechers"]/../preceding-sibling::tr')) + 1
+ seeding_end_pos = seeding_end_pos - 3
+
+ page_seeding = 0
+ page_seeding_size = 0
+ seeding_torrents = html.xpath('//tr/td[text() = "Seeders"]/../following-sibling::tr/td[position()=6]/text()')
+ if seeding_torrents:
+ page_seeding = seeding_end_pos
+ for per_size in seeding_torrents[:seeding_end_pos]:
+ if '(' in per_size and ')' in per_size:
+ per_size = per_size.split('(')[-1]
+ per_size = per_size.split(')')[0]
+
+ page_seeding_size += StringUtils.num_filesize(per_size)
+
+ self.seeding = page_seeding
+ self.seeding_size = page_seeding_size
+
+ def _parse_user_traffic_info(self, html_text):
+ # TODO
+ pass
+
+ def _parse_message_unread_links(self, html_text, msg_links):
+ return None
+
+ def _parse_message_content(self, html_text):
+ return None, None, None
diff --git a/app/sites/siteuserinfo/nexus_php.py b/app/sites/siteuserinfo/nexus_php.py
new file mode 100644
index 0000000..5c54015
--- /dev/null
+++ b/app/sites/siteuserinfo/nexus_php.py
@@ -0,0 +1,343 @@
+# -*- coding: utf-8 -*-
+import re
+
+from lxml import etree
+
+import log
+from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
+from app.utils import StringUtils
+from app.utils.exception_utils import ExceptionUtils
+from app.utils.types import SiteSchema
+
+
+class NexusPhpSiteUserInfo(_ISiteUserInfo):
+ schema = SiteSchema.NexusPhp
+ order = SITE_BASE_ORDER * 2
+
+ @classmethod
+ def match(cls, html_text):
+ """
+ 默认使用NexusPhp解析
+ :param html_text:
+ :return:
+ """
+ return True
+
+ def _parse_site_page(self, html_text):
+ html_text = self._prepare_html_text(html_text)
+
+ user_detail = re.search(r"userdetails.php\?id=(\d+)", html_text)
+ if user_detail and user_detail.group().strip():
+ self._user_detail_page = user_detail.group().strip().lstrip('/')
+ self.userid = user_detail.group(1)
+ self._torrent_seeding_page = f"getusertorrentlistajax.php?userid={self.userid}&type=seeding"
+ else:
+ user_detail = re.search(r"(userdetails)", html_text)
+ if user_detail and user_detail.group().strip():
+ self._user_detail_page = user_detail.group().strip().lstrip('/')
+ self.userid = None
+ self._torrent_seeding_page = None
+
+ def _parse_message_unread(self, html_text):
+ """
+ 解析未读短消息数量
+ :param html_text:
+ :return:
+ """
+ html = etree.HTML(html_text)
+ if not html:
+ return
+
+ message_labels = html.xpath('//a[contains(@href, "messages.php")]/..')
+ if message_labels:
+ message_text = message_labels[0].xpath("string(.)")
+
+ log.debug(f"【Sites】{self.site_name} 消息原始信息 {message_text}")
+ message_unread_match = re.findall(r"[^Date](信息箱\s*|\(|你有\xa0)(\d+)", message_text)
+
+ if message_unread_match and len(message_unread_match[-1]) == 2:
+ self.message_unread = StringUtils.str_int(message_unread_match[-1][1])
+
+ def _parse_user_base_info(self, html_text):
+ # 合并解析,减少额外请求调用
+ self.__parse_user_traffic_info(html_text)
+ self._user_traffic_page = None
+
+ self._parse_message_unread(html_text)
+
+ html = etree.HTML(html_text)
+ if not html:
+ return
+
+ ret = html.xpath(f'//a[contains(@href, "userdetails") and contains(@href, "{self.userid}")]//b//text()')
+ if ret:
+ self.username = str(ret[0])
+ return
+ ret = html.xpath(f'//a[contains(@href, "userdetails") and contains(@href, "{self.userid}")]//text()')
+ if ret:
+ self.username = str(ret[0])
+
+ ret = html.xpath('//a[contains(@href, "userdetails")]//strong//text()')
+ if ret:
+ self.username = str(ret[0])
+ return
+
+ def __parse_user_traffic_info(self, html_text):
+ html_text = self._prepare_html_text(html_text)
+ upload_match = re.search(r"[^总]上[传傳]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text,
+ re.IGNORECASE)
+ self.upload = StringUtils.num_filesize(upload_match.group(1).strip()) if upload_match else 0
+ download_match = re.search(r"[^总子影力]下[载載]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text,
+ re.IGNORECASE)
+ self.download = StringUtils.num_filesize(download_match.group(1).strip()) if download_match else 0
+ ratio_match = re.search(r"分享率[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+)", html_text)
+ self.ratio = StringUtils.str_float(ratio_match.group(1)) if (
+ ratio_match and ratio_match.group(1).strip()) else 0.0
+ leeching_match = re.search(r"(Torrents leeching|下载中)[\u4E00-\u9FA5\D\s]+(\d+)[\s\S]+<", html_text)
+ self.leeching = StringUtils.str_int(leeching_match.group(2)) if leeching_match and leeching_match.group(
+ 2).strip() else 0
+ html = etree.HTML(html_text)
+ tmps = html.xpath('//span[@class = "ucoin-symbol ucoin-gold"]//text()') if html else None
+ if tmps:
+ self.bonus = StringUtils.str_float(str(tmps[-1]))
+ return
+ tmps = html.xpath('//a[contains(@href,"mybonus")]/text()') if html else None
+ if tmps:
+ bonus_text = str(tmps[0]).strip()
+ bonus_match = re.search(r"([\d,.]+)", bonus_text)
+ if bonus_match and bonus_match.group(1).strip():
+ self.bonus = StringUtils.str_float(bonus_match.group(1))
+ return
+ bonus_match = re.search(r"mybonus.[\[\]::<>/a-zA-Z_\-=\"'\s#;.(使用魔力值豆]+\s*([\d,.]+)[<()&\s]", html_text)
+ try:
+ if bonus_match and bonus_match.group(1).strip():
+ self.bonus = StringUtils.str_float(bonus_match.group(1))
+ return
+ bonus_match = re.search(r"[魔力值|\]][\[\]::<>/a-zA-Z_\-=\"'\s#;]+\s*([\d,.]+)[<()&\s]", html_text,
+ flags=re.S)
+ if bonus_match and bonus_match.group(1).strip():
+ self.bonus = StringUtils.str_float(bonus_match.group(1))
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+
+ def _parse_user_traffic_info(self, html_text):
+ """
+ 上传/下载/分享率 [做种数/魔力值]
+ :param html_text:
+ :return:
+ """
+ pass
+
+ def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
+ """
+ 做种相关信息
+ :param html_text:
+ :param multi_page: 是否多页数据
+ :return: 下页地址
+ """
+ html = etree.HTML(str(html_text).replace(r'\/', '/'))
+ if not html:
+ return None
+
+ size_col = 3
+ seeders_col = 4
+ # 搜索size列
+ size_col_xpath = '//tr[position()=1]/td[(img[@class="size"] and img[@alt="size"]) or (text() = "大小")]'
+ if html.xpath(size_col_xpath):
+ size_col = len(html.xpath(f'{size_col_xpath}/preceding-sibling::td')) + 1
+ # 搜索seeders列
+ seeders_col_xpath = '//tr[position()=1]/td[(img[@class="seeders"] and img[@alt="seeders"]) or (text() = "在做种")]'
+ if html.xpath(seeders_col_xpath):
+ seeders_col = len(html.xpath(f'{seeders_col_xpath}/preceding-sibling::td')) + 1
+
+ page_seeding = 0
+ page_seeding_size = 0
+ page_seeding_info = []
+ seeding_sizes = html.xpath(f'//tr[position()>1]/td[{size_col}]')
+ seeding_seeders = html.xpath(f'//tr[position()>1]/td[{seeders_col}]//text()')
+ if seeding_sizes and seeding_seeders:
+ page_seeding = len(seeding_sizes)
+
+ for i in range(0, len(seeding_sizes)):
+ size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
+ seeders = StringUtils.str_int(seeding_seeders[i])
+
+ page_seeding_size += size
+ page_seeding_info.append([seeders, size])
+
+ self.seeding += page_seeding
+ self.seeding_size += page_seeding_size
+ self.seeding_info.extend(page_seeding_info)
+
+ # 是否存在下页数据
+ next_page = None
+ next_page_text = html.xpath('//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁")]/@href')
+ if next_page_text:
+ next_page = next_page_text[-1].strip()
+ # fix up page url
+ if self.userid not in next_page:
+ next_page = f'{next_page}&userid={self.userid}&type=seeding'
+
+ return next_page
+
+ def _parse_user_detail_info(self, html_text):
+ """
+ 解析用户额外信息,加入时间,等级
+ :param html_text:
+ :return:
+ """
+ html = etree.HTML(html_text)
+ if not html:
+ return
+
+ self.__get_user_level(html)
+
+ # 加入日期
+ join_at_text = html.xpath(
+ '//tr/td[text()="加入日期" or text()="注册日期" or *[text()="加入日期"]]/following-sibling::td[1]//text()'
+ '|//div/b[text()="加入日期"]/../text()')
+ if join_at_text:
+ self.join_at = StringUtils.unify_datetime_str(join_at_text[0].split(' (')[0].strip())
+
+ # 做种体积 & 做种数
+ # seeding 页面获取不到的话,此处再获取一次
+ seeding_sizes = html.xpath('//tr/td[text()="当前上传"]/following-sibling::td[1]//'
+ 'table[tr[1][td[4 and text()="尺寸"]]]//tr[position()>1]/td[4]')
+ seeding_seeders = html.xpath('//tr/td[text()="当前上传"]/following-sibling::td[1]//'
+ 'table[tr[1][td[5 and text()="做种者"]]]//tr[position()>1]/td[5]//text()')
+ tmp_seeding = len(seeding_sizes)
+ tmp_seeding_size = 0
+ tmp_seeding_info = []
+ for i in range(0, len(seeding_sizes)):
+ size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
+ seeders = StringUtils.str_int(seeding_seeders[i])
+
+ tmp_seeding_size += size
+ tmp_seeding_info.append([seeders, size])
+
+ if not self.seeding_size:
+ self.seeding_size = tmp_seeding_size
+ if not self.seeding:
+ self.seeding = tmp_seeding
+ if not self.seeding_info:
+ self.seeding_info = tmp_seeding_info
+
+ seeding_sizes = html.xpath('//tr/td[text()="做种统计"]/following-sibling::td[1]//text()')
+ if seeding_sizes:
+ seeding_match = re.search(r"总做种数:\s+(\d+)", seeding_sizes[0], re.IGNORECASE)
+ seeding_size_match = re.search(r"总做种体积:\s+([\d,.\s]+[KMGTPI]*B)", seeding_sizes[0], re.IGNORECASE)
+ tmp_seeding = StringUtils.str_int(seeding_match.group(1)) if (
+ seeding_match and seeding_match.group(1)) else 0
+ tmp_seeding_size = StringUtils.num_filesize(
+ seeding_size_match.group(1).strip()) if seeding_size_match else 0
+ if not self.seeding_size:
+ self.seeding_size = tmp_seeding_size
+ if not self.seeding:
+ self.seeding = tmp_seeding
+
+ self.__fixup_torrent_seeding_page(html)
+
+ def __fixup_torrent_seeding_page(self, html):
+ """
+ 修正种子页面链接
+ :param html:
+ :return:
+ """
+ # 单独的种子页面
+ seeding_url_text = html.xpath('//a[contains(@href,"getusertorrentlist.php") '
+ 'and contains(@href,"seeding")]/@href')
+ if seeding_url_text:
+ self._torrent_seeding_page = seeding_url_text[0].strip()
+ # 从JS调用种获取用户ID
+ seeding_url_text = html.xpath('//a[contains(@href, "javascript: getusertorrentlistajax") '
+ 'and contains(@href,"seeding")]/@href')
+ csrf_text = html.xpath('//meta[@name="x-csrf"]/@content')
+ if not self._torrent_seeding_page and seeding_url_text:
+ user_js = re.search(r"javascript: getusertorrentlistajax\(\s*'(\d+)", seeding_url_text[0])
+ if user_js and user_js.group(1).strip():
+ self.userid = user_js.group(1).strip()
+ self._torrent_seeding_page = f"getusertorrentlistajax.php?userid={self.userid}&type=seeding"
+ elif seeding_url_text and csrf_text:
+ if csrf_text[0].strip():
+ self._torrent_seeding_page \
+ = f"ajax_getusertorrentlist.php"
+ self._torrent_seeding_params = {'userid': self.userid, 'type': 'seeding', 'csrf': csrf_text[0].strip()}
+
+ # 分类做种模式
+ # 临时屏蔽
+ # seeding_url_text = html.xpath('//tr/td[text()="当前做种"]/following-sibling::td[1]'
+ # '/table//td/a[contains(@href,"seeding")]/@href')
+ # if seeding_url_text:
+ # self._torrent_seeding_page = seeding_url_text
+
+ def __get_user_level(self, html):
+ # 等级 获取同一行等级数据,图片格式等级,取title信息,否则取文本信息
+ user_levels_text = html.xpath('//tr/td[text()="等級" or text()="等级" or *[text()="等级"]]/'
+ 'following-sibling::td[1]/img[1]/@title')
+ if user_levels_text:
+ self.user_level = user_levels_text[0].strip()
+ return
+
+ user_levels_text = html.xpath('//tr/td[text()="等級" or text()="等级"]/'
+ 'following-sibling::td[1 and not(img)]'
+ '|//tr/td[text()="等級" or text()="等级"]/'
+ 'following-sibling::td[1 and img[not(@title)]]')
+ if user_levels_text:
+ self.user_level = user_levels_text[0].xpath("string(.)").strip()
+ return
+
+ user_levels_text = html.xpath('//tr/td[text()="等級" or text()="等级"]/'
+ 'following-sibling::td[1]')
+ if user_levels_text:
+ self.user_level = user_levels_text[0].xpath("string(.)").strip()
+ return
+
+ user_levels_text = html.xpath('//a[contains(@href, "userdetails")]/text()')
+ if not self.user_level and user_levels_text:
+ for user_level_text in user_levels_text:
+ user_level_match = re.search(r"\[(.*)]", user_level_text)
+ if user_level_match and user_level_match.group(1).strip():
+ self.user_level = user_level_match.group(1).strip()
+ break
+
+ def _parse_message_unread_links(self, html_text, msg_links):
+ html = etree.HTML(html_text)
+ if not html:
+ return None
+
+ message_links = html.xpath('//tr[not(./td/img[@alt="Read"])]/td/a[contains(@href, "viewmessage")]/@href')
+ msg_links.extend(message_links)
+ # 是否存在下页数据
+ next_page = None
+ next_page_text = html.xpath('//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁")]/@href')
+ if next_page_text:
+ next_page = next_page_text[-1].strip()
+
+ return next_page
+
+ def _parse_message_content(self, html_text):
+ html = etree.HTML(html_text)
+ if not html:
+ return None, None, None
+ # 标题
+ message_head_text = None
+ message_head = html.xpath('//h1/text()'
+ '|//div[@class="layui-card-header"]/span[1]/text()')
+ if message_head:
+ message_head_text = message_head[-1].strip()
+
+ # 消息时间
+ message_date_text = None
+ message_date = html.xpath('//h1/following-sibling::table[.//tr/td[@class="colhead"]]//tr[2]/td[2]'
+ '|//div[@class="layui-card-header"]/span[2]/span[2]')
+ if message_date:
+ message_date_text = message_date[0].xpath("string(.)").strip()
+
+ # 消息内容
+ message_content_text = None
+ message_content = html.xpath('//h1/following-sibling::table[.//tr/td[@class="colhead"]]//tr[3]/td'
+ '|//div[contains(@class,"layui-card-body")]')
+ if message_content:
+ message_content_text = message_content[0].xpath("string(.)").strip()
+
+ return message_head_text, message_date_text, message_content_text
diff --git a/app/sites/siteuserinfo/nexus_project.py b/app/sites/siteuserinfo/nexus_project.py
new file mode 100644
index 0000000..0880998
--- /dev/null
+++ b/app/sites/siteuserinfo/nexus_project.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+import re
+
+from app.sites.siteuserinfo._base import SITE_BASE_ORDER
+from app.sites.siteuserinfo.nexus_php import NexusPhpSiteUserInfo
+from app.utils.types import SiteSchema
+
+
+class NexusProjectSiteUserInfo(NexusPhpSiteUserInfo):
+ schema = SiteSchema.NexusProject
+ order = SITE_BASE_ORDER + 25
+
+ @classmethod
+ def match(cls, html_text):
+ return 'Nexus Project' in html_text
+
+ def _parse_site_page(self, html_text):
+ html_text = self._prepare_html_text(html_text)
+
+ user_detail = re.search(r"userdetails.php\?id=(\d+)", html_text)
+ if user_detail and user_detail.group().strip():
+ self._user_detail_page = user_detail.group().strip().lstrip('/')
+ self.userid = user_detail.group(1)
+
+ self._torrent_seeding_page = f"viewusertorrents.php?id={self.userid}&show=seeding"
diff --git a/app/sites/siteuserinfo/nexus_rabbit.py b/app/sites/siteuserinfo/nexus_rabbit.py
new file mode 100644
index 0000000..6f76430
--- /dev/null
+++ b/app/sites/siteuserinfo/nexus_rabbit.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+import json
+
+from lxml import etree
+
+from app.sites.siteuserinfo._base import SITE_BASE_ORDER
+from app.sites.siteuserinfo.nexus_php import NexusPhpSiteUserInfo
+from app.utils.exception_utils import ExceptionUtils
+from app.utils.types import SiteSchema
+
+
+class NexusRabbitSiteUserInfo(NexusPhpSiteUserInfo):
+ schema = SiteSchema.NexusRabbit
+ order = SITE_BASE_ORDER + 5
+
+ @classmethod
+ def match(cls, html_text):
+ html = etree.HTML(html_text)
+ if not html:
+ return False
+
+ printable_text = html.xpath("string(.)") if html else ""
+ return 'Style by Rabbit' in printable_text
+
+ def _parse_site_page(self, html_text):
+ super()._parse_site_page(html_text)
+ self._torrent_seeding_page = f"getusertorrentlistajax.php?page=1&limit=5000000&type=seeding&uid={self.userid}"
+ self._torrent_seeding_headers = {"Accept": "application/json, text/javascript, */*; q=0.01"}
+
+ def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
+ """
+ 做种相关信息
+ :param html_text:
+ :param multi_page: 是否多页数据
+ :return: 下页地址
+ """
+
+ try:
+ torrents = json.loads(html_text).get('data')
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return
+
+ page_seeding_size = 0
+ page_seeding_info = []
+
+ page_seeding = len(torrents)
+ for torrent in torrents:
+ seeders = int(torrent.get('seeders', 0))
+ size = int(torrent.get('size', 0))
+ page_seeding_size += int(torrent.get('size', 0))
+
+ page_seeding_info.append([seeders, size])
+
+ self.seeding += page_seeding
+ self.seeding_size += page_seeding_size
+ self.seeding_info.extend(page_seeding_info)
diff --git a/app/sites/siteuserinfo/small_horse.py b/app/sites/siteuserinfo/small_horse.py
new file mode 100644
index 0000000..875c282
--- /dev/null
+++ b/app/sites/siteuserinfo/small_horse.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+import re
+
+from lxml import etree
+
+from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
+from app.utils import StringUtils
+from app.utils.types import SiteSchema
+
+
+class SmallHorseSiteUserInfo(_ISiteUserInfo):
+ schema = SiteSchema.SmallHorse
+ order = SITE_BASE_ORDER + 30
+
+ @classmethod
+ def match(cls, html_text):
+ return 'Small Horse' in html_text
+
+ def _parse_site_page(self, html_text):
+ html_text = self._prepare_html_text(html_text)
+
+ user_detail = re.search(r"user.php\?id=(\d+)", html_text)
+ if user_detail and user_detail.group().strip():
+ self._user_detail_page = user_detail.group().strip().lstrip('/')
+ self.userid = user_detail.group(1)
+ self._user_traffic_page = f"user.php?id={self.userid}"
+
+ def _parse_user_base_info(self, html_text):
+ html_text = self._prepare_html_text(html_text)
+ html = etree.HTML(html_text)
+ ret = html.xpath('//a[contains(@href, "user.php")]//text()')
+ if ret:
+ self.username = str(ret[0])
+
+ def _parse_user_traffic_info(self, html_text):
+ """
+ 上传/下载/分享率 [做种数/魔力值]
+ :param html_text:
+ :return:
+ """
+ html_text = self._prepare_html_text(html_text)
+ html = etree.HTML(html_text)
+ tmps = html.xpath('//ul[@class = "stats nobullet"]')
+ if tmps:
+ if tmps[1].xpath("li") and tmps[1].xpath("li")[0].xpath("span//text()"):
+ self.join_at = StringUtils.unify_datetime_str(tmps[1].xpath("li")[0].xpath("span//text()")[0])
+ self.upload = StringUtils.num_filesize(str(tmps[1].xpath("li")[2].xpath("text()")[0]).split(":")[1].strip())
+ self.download = StringUtils.num_filesize(
+ str(tmps[1].xpath("li")[3].xpath("text()")[0]).split(":")[1].strip())
+ if tmps[1].xpath("li")[4].xpath("span//text()"):
+ self.ratio = StringUtils.str_float(str(tmps[1].xpath("li")[4].xpath("span//text()")[0]).replace('∞', '0'))
+ else:
+ self.ratio = StringUtils.str_float(str(tmps[1].xpath("li")[5].xpath("text()")[0]).split(":")[1])
+ self.bonus = StringUtils.str_float(str(tmps[1].xpath("li")[5].xpath("text()")[0]).split(":")[1])
+ self.user_level = str(tmps[3].xpath("li")[0].xpath("text()")[0]).split(":")[1].strip()
+ self.seeding = StringUtils.str_int(
+ (tmps[4].xpath("li")[5].xpath("text()")[0]).split(":")[1].replace("[", ""))
+ self.leeching = StringUtils.str_int(
+ (tmps[4].xpath("li")[6].xpath("text()")[0]).split(":")[1].replace("[", ""))
+
+ def _parse_user_detail_info(self, html_text):
+ pass
+
+ def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
+ pass
+
+ def _parse_message_unread_links(self, html_text, msg_links):
+ return None
+
+ def _parse_message_content(self, html_text):
+ return None, None, None
diff --git a/app/sites/siteuserinfo/tnode.py b/app/sites/siteuserinfo/tnode.py
new file mode 100644
index 0000000..d6846f3
--- /dev/null
+++ b/app/sites/siteuserinfo/tnode.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+import json
+import re
+
+from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
+from app.utils import StringUtils
+from app.utils.types import SiteSchema
+
+
+class TNodeSiteUserInfo(_ISiteUserInfo):
+ schema = SiteSchema.TNode
+ order = SITE_BASE_ORDER + 60
+
+ @classmethod
+ def match(cls, html_text):
+ return 'Powered By TNode' in html_text
+
+ def _parse_site_page(self, html_text):
+ html_text = self._prepare_html_text(html_text)
+
+ #
+ csrf_token = re.search(r'', html_text)
+ if csrf_token:
+ self._addition_headers = {'X-CSRF-TOKEN': csrf_token.group(1)}
+ self._user_detail_page = "api/user/getMainInfo"
+ self._torrent_seeding_page = "api/user/listTorrentActivity?id=&type=seeding&page=1&size=20000"
+
+ def _parse_logged_in(self, html_text):
+ """
+ 判断是否登录成功, 通过判断是否存在用户信息
+ 暂时跳过检测,待后续优化
+ :param html_text:
+ :return:
+ """
+ return True
+
+ def _parse_user_base_info(self, html_text):
+ self.username = self.userid
+
+ def _parse_user_traffic_info(self, html_text):
+ pass
+
+ def _parse_user_detail_info(self, html_text):
+ detail = json.loads(html_text)
+ if detail.get("status") != 200:
+ return
+
+ user_info = detail.get("data", {})
+ self.userid = user_info.get("id")
+ self.username = user_info.get("username")
+ self.user_level = user_info.get("class", {}).get("name")
+ self.join_at = user_info.get("regTime", 0)
+ self.join_at = StringUtils.unify_datetime_str(str(self.join_at))
+
+ self.upload = user_info.get("upload")
+ self.download = user_info.get("download")
+ self.ratio = 0 if self.download <= 0 else round(self.upload / self.download, 3)
+ self.bonus = user_info.get("bonus")
+
+ self.message_unread = user_info.get("unreadAdmin", 0) + user_info.get("unreadInbox", 0) + user_info.get(
+ "unreadSystem", 0)
+ pass
+
+ def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
+ """
+ 解析用户做种信息
+ """
+ seeding_info = json.loads(html_text)
+ if seeding_info.get("status") != 200:
+ return
+
+ torrents = seeding_info.get("data", {}).get("torrents", [])
+
+ page_seeding_size = 0
+ page_seeding_info = []
+ for torrent in torrents:
+ size = torrent.get("size", 0)
+ seeders = torrent.get("seeding", 0)
+
+ page_seeding_size += size
+ page_seeding_info.append([seeders, size])
+
+ self.seeding += len(torrents)
+ self.seeding_size += page_seeding_size
+ self.seeding_info.extend(page_seeding_info)
+
+ # 是否存在下页数据
+ next_page = None
+
+ return next_page
+
+ def _parse_message_unread_links(self, html_text, msg_links):
+ return None
+
+ def _parse_message_content(self, html_text):
+ """
+ 系统信息 api/message/listSystem?page=1&size=20
+ 收件箱信息 api/message/listInbox?page=1&size=20
+ 管理员信息 api/message/listAdmin?page=1&size=20
+ :param html_text:
+ :return:
+ """
+ return None, None, None
diff --git a/app/sites/siteuserinfo/torrent_leech.py b/app/sites/siteuserinfo/torrent_leech.py
new file mode 100644
index 0000000..b2a1aef
--- /dev/null
+++ b/app/sites/siteuserinfo/torrent_leech.py
@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+import re
+
+from lxml import etree
+
+from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
+from app.utils import StringUtils
+from app.utils.types import SiteSchema
+
+
+class TorrentLeechSiteUserInfo(_ISiteUserInfo):
+ schema = SiteSchema.TorrentLeech
+ order = SITE_BASE_ORDER + 40
+
+ @classmethod
+ def match(cls, html_text):
+ return 'TorrentLeech' in html_text
+
+ def _parse_site_page(self, html_text):
+ html_text = self._prepare_html_text(html_text)
+
+ user_detail = re.search(r"/profile/([^/]+)/", html_text)
+ if user_detail and user_detail.group().strip():
+ self._user_detail_page = user_detail.group().strip().lstrip('/')
+ self.userid = user_detail.group(1)
+ self._user_traffic_page = f"profile/{self.userid}/view"
+ self._torrent_seeding_page = f"profile/{self.userid}/seeding"
+
+ def _parse_user_base_info(self, html_text):
+ self.username = self.userid
+
+ def _parse_user_traffic_info(self, html_text):
+ """
+ 上传/下载/分享率 [做种数/魔力值]
+ :param html_text:
+ :return:
+ """
+ html_text = self._prepare_html_text(html_text)
+ html = etree.HTML(html_text)
+ upload_html = html.xpath('//div[contains(@class,"profile-uploaded")]//span/text()')
+ if upload_html:
+ self.upload = StringUtils.num_filesize(upload_html[0])
+ download_html = html.xpath('//div[contains(@class,"profile-downloaded")]//span/text()')
+ if download_html:
+ self.download = StringUtils.num_filesize(download_html[0])
+ ratio_html = html.xpath('//div[contains(@class,"profile-ratio")]//span/text()')
+ if ratio_html:
+ self.ratio = StringUtils.str_float(ratio_html[0].replace('∞', '0'))
+
+ user_level_html = html.xpath('//table[contains(@class, "profileViewTable")]'
+ '//tr/td[text()="Class"]/following-sibling::td/text()')
+ if user_level_html:
+ self.user_level = user_level_html[0].strip()
+
+ join_at_html = html.xpath('//table[contains(@class, "profileViewTable")]'
+ '//tr/td[text()="Registration date"]/following-sibling::td/text()')
+ if join_at_html:
+ self.join_at = StringUtils.unify_datetime_str(join_at_html[0].strip())
+
+ bonus_html = html.xpath('//span[contains(@class, "total-TL-points")]/text()')
+ if bonus_html:
+ self.bonus = StringUtils.str_float(bonus_html[0].strip())
+
+ def _parse_user_detail_info(self, html_text):
+ pass
+
+ def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
+ """
+ 做种相关信息
+ :param html_text:
+ :param multi_page: 是否多页数据
+ :return: 下页地址
+ """
+ html = etree.HTML(html_text)
+ if not html:
+ return None
+
+ size_col = 2
+ seeders_col = 7
+
+ page_seeding = 0
+ page_seeding_size = 0
+ page_seeding_info = []
+ seeding_sizes = html.xpath(f'//tbody/tr/td[{size_col}]')
+ seeding_seeders = html.xpath(f'//tbody/tr/td[{seeders_col}]/text()')
+ if seeding_sizes and seeding_seeders:
+ page_seeding = len(seeding_sizes)
+
+ for i in range(0, len(seeding_sizes)):
+ size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
+ seeders = StringUtils.str_int(seeding_seeders[i])
+
+ page_seeding_size += size
+ page_seeding_info.append([seeders, size])
+
+ self.seeding += page_seeding
+ self.seeding_size += page_seeding_size
+ self.seeding_info.extend(page_seeding_info)
+
+ # 是否存在下页数据
+ next_page = None
+
+ return next_page
+
+ def _parse_message_unread_links(self, html_text, msg_links):
+ return None
+
+ def _parse_message_content(self, html_text):
+ return None, None, None
diff --git a/app/sites/siteuserinfo/unit3d.py b/app/sites/siteuserinfo/unit3d.py
new file mode 100644
index 0000000..d33b454
--- /dev/null
+++ b/app/sites/siteuserinfo/unit3d.py
@@ -0,0 +1,130 @@
+# -*- coding: utf-8 -*-
+import re
+
+from lxml import etree
+
+from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
+from app.utils import StringUtils
+from app.utils.types import SiteSchema
+
+
+class Unit3dSiteUserInfo(_ISiteUserInfo):
+ schema = SiteSchema.Unit3d
+ order = SITE_BASE_ORDER + 15
+
+ @classmethod
+ def match(cls, html_text):
+ return "unit3d.js" in html_text
+
+ def _parse_user_base_info(self, html_text):
+ html_text = self._prepare_html_text(html_text)
+ html = etree.HTML(html_text)
+
+ tmps = html.xpath('//a[contains(@href, "/users/") and contains(@href, "settings")]/@href')
+ if tmps:
+ user_name_match = re.search(r"/users/(.+)/settings", tmps[0])
+ if user_name_match and user_name_match.group().strip():
+ self.username = user_name_match.group(1)
+ self._torrent_seeding_page = f"/users/{self.username}/active?perPage=100&client=&seeding=include"
+ self._user_detail_page = f"/users/{self.username}"
+
+ tmps = html.xpath('//a[contains(@href, "bonus/earnings")]')
+ if tmps:
+ bonus_text = tmps[0].xpath("string(.)")
+ bonus_match = re.search(r"([\d,.]+)", bonus_text)
+ if bonus_match and bonus_match.group(1).strip():
+ self.bonus = StringUtils.str_float(bonus_match.group(1))
+
+ def _parse_site_page(self, html_text):
+ # TODO
+ pass
+
+ def _parse_user_detail_info(self, html_text):
+ """
+ 解析用户额外信息,加入时间,等级
+ :param html_text:
+ :return:
+ """
+ html = etree.HTML(html_text)
+ if not html:
+ return None
+
+ # 用户等级
+ user_levels_text = html.xpath('//div[contains(@class, "content")]//span[contains(@class, "badge-user")]/text()')
+ if user_levels_text:
+ self.user_level = user_levels_text[0].strip()
+
+ # 加入日期
+ join_at_text = html.xpath('//div[contains(@class, "content")]//h4[contains(text(), "注册日期") '
+ 'or contains(text(), "註冊日期") '
+ 'or contains(text(), "Registration date")]/text()')
+ if join_at_text:
+ self.join_at = StringUtils.unify_datetime_str(
+ join_at_text[0].replace('注册日期', '').replace('註冊日期', '').replace('Registration date', ''))
+
+ def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
+ """
+ 做种相关信息
+ :param html_text:
+ :param multi_page: 是否多页数据
+ :return: 下页地址
+ """
+ html = etree.HTML(html_text)
+ if not html:
+ return None
+
+ size_col = 9
+ seeders_col = 2
+ # 搜索size列
+ if html.xpath('//tr[position()=1]/th[contains(@class,"size")]'):
+ size_col = len(html.xpath('//tr[position()=1]/th[contains(@class,"size")]/preceding-sibling::th')) + 1
+ # 搜索seeders列
+ if html.xpath('//tr[position()=1]/th[contains(@class,"seeders")]'):
+ seeders_col = len(html.xpath('//tr[position()=1]/th[contains(@class,"seeders")]/preceding-sibling::th')) + 1
+
+ page_seeding = 0
+ page_seeding_size = 0
+ page_seeding_info = []
+ seeding_sizes = html.xpath(f'//tr[position()]/td[{size_col}]')
+ seeding_seeders = html.xpath(f'//tr[position()]/td[{seeders_col}]')
+ if seeding_sizes and seeding_seeders:
+ page_seeding = len(seeding_sizes)
+
+ for i in range(0, len(seeding_sizes)):
+ size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
+ seeders = StringUtils.str_int(seeding_seeders[i].xpath("string(.)").strip())
+
+ page_seeding_size += size
+ page_seeding_info.append([seeders, size])
+
+ self.seeding += page_seeding
+ self.seeding_size += page_seeding_size
+ self.seeding_info.extend(page_seeding_info)
+
+ # 是否存在下页数据
+ next_page = None
+ next_pages = html.xpath('//ul[@class="pagination"]/li[contains(@class,"active")]/following-sibling::li')
+ if next_pages and len(next_pages) > 1:
+ page_num = next_pages[0].xpath("string(.)").strip()
+ if page_num.isdigit():
+ next_page = f"{self._torrent_seeding_page}&page={page_num}"
+
+ return next_page
+
+ def _parse_user_traffic_info(self, html_text):
+ html_text = self._prepare_html_text(html_text)
+ upload_match = re.search(r"[^总]上[传傳]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text,
+ re.IGNORECASE)
+ self.upload = StringUtils.num_filesize(upload_match.group(1).strip()) if upload_match else 0
+ download_match = re.search(r"[^总子影力]下[载載]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text,
+ re.IGNORECASE)
+ self.download = StringUtils.num_filesize(download_match.group(1).strip()) if download_match else 0
+ ratio_match = re.search(r"分享率[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+)", html_text)
+ self.ratio = StringUtils.str_float(ratio_match.group(1)) if (
+ ratio_match and ratio_match.group(1).strip()) else 0.0
+
+ def _parse_message_unread_links(self, html_text, msg_links):
+ return None
+
+ def _parse_message_content(self, html_text):
+ return None, None, None
diff --git a/app/speedlimiter.py b/app/speedlimiter.py
new file mode 100644
index 0000000..792e4f2
--- /dev/null
+++ b/app/speedlimiter.py
@@ -0,0 +1,212 @@
+from app.conf import SystemConfig
+from app.downloader import Downloader
+from app.mediaserver import MediaServer
+from app.utils import ExceptionUtils
+from app.utils.commons import singleton
+from app.utils.types import DownloaderType, MediaServerType
+from app.helper.security_helper import SecurityHelper
+from apscheduler.schedulers.background import BackgroundScheduler
+from config import Config
+
+import log
+
+
+@singleton
+class SpeedLimiter:
+ downloader = None
+ mediaserver = None
+ limit_enabled = False
+ limit_flag = False
+ qb_limit = False
+ qb_download_limit = 0
+ qb_upload_limit = 0
+ qb_upload_ratio = 0
+ tr_limit = False
+ tr_download_limit = 0
+ tr_upload_limit = 0
+ tr_upload_ratio = 0
+ unlimited_ips = {"ipv4": "0.0.0.0/0", "ipv6": "::/0"}
+ auto_limit = False
+ bandwidth = 0
+
+ _scheduler = None
+
+ def __init__(self):
+ self.init_config()
+
+ def init_config(self):
+ self.downloader = Downloader()
+ self.mediaserver = MediaServer()
+
+ config = SystemConfig().get_system_config("SpeedLimit")
+ if config:
+ try:
+ self.bandwidth = int(float(config.get("bandwidth") or 0)) * 1000000
+ residual_ratio = float(config.get("residual_ratio") or 1)
+ if residual_ratio > 1:
+ residual_ratio = 1
+ allocation = (config.get("allocation") or "1:1").split(":")
+ if len(allocation) != 2 or not str(allocation[0]).isdigit() or not str(allocation[-1]).isdigit():
+ allocation = ["1", "1"]
+ self.qb_upload_ratio = round(int(allocation[0]) / (int(allocation[-1]) + int(allocation[0])) * residual_ratio, 2)
+ self.tr_upload_ratio = round(int(allocation[-1]) / (int(allocation[-1]) + int(allocation[0])) * residual_ratio, 2)
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ self.bandwidth = 0
+ self.qb_upload_ratio = 0
+ self.tr_upload_ratio = 0
+ self.auto_limit = True if self.bandwidth and (self.qb_upload_ratio or self.tr_upload_ratio) else False
+ try:
+ self.qb_download_limit = int(float(config.get("qb_download") or 0)) * 1024
+ self.qb_upload_limit = int(float(config.get("qb_upload") or 0)) * 1024
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ self.qb_download_limit = 0
+ self.qb_upload_limit = 0
+ self.qb_limit = True if self.qb_download_limit or self.qb_upload_limit or self.auto_limit else False
+ try:
+ self.tr_download_limit = int(float(config.get("tr_download") or 0))
+ self.tr_upload_limit = int(float(config.get("tr_upload") or 0))
+ except Exception as e:
+ self.tr_download_limit = 0
+ self.tr_upload_limit = 0
+ ExceptionUtils.exception_traceback(e)
+ self.tr_limit = True if self.tr_download_limit or self.tr_upload_limit or self.auto_limit else False
+ self.limit_enabled = True if self.qb_limit or self.tr_limit else False
+ self.unlimited_ips["ipv4"] = config.get("ipv4") or "0.0.0.0/0"
+ self.unlimited_ips["ipv6"] = config.get("ipv6") or "::/0"
+ else:
+ self.limit_enabled = False
+ # 移出现有任务
+ try:
+ if self._scheduler:
+ self._scheduler.remove_all_jobs()
+ if self._scheduler.running:
+ self._scheduler.shutdown()
+ self._scheduler = None
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ # 启动限速任务
+ if self.limit_enabled:
+ self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
+ self._scheduler.add_job(func=self.__check_playing_sessions,
+ args=[self.mediaserver.get_type(), True],
+ trigger='interval',
+ seconds=300)
+ self._scheduler.print_jobs()
+ self._scheduler.start()
+ log.info("播放限速服务启动")
+
+ def __start(self):
+ """
+ 开始限速
+ """
+ if self.qb_limit:
+ self.downloader.set_speed_limit(
+ downloader=DownloaderType.QB,
+ download_limit=self.qb_download_limit,
+ upload_limit=self.qb_upload_limit
+ )
+ if not self.limit_flag:
+ log.info(f"【SpeedLimiter】Qbittorrent下载器开始限速")
+ if self.tr_limit:
+ self.downloader.set_speed_limit(
+ downloader=DownloaderType.TR,
+ download_limit=self.tr_download_limit,
+ upload_limit=self.tr_upload_limit
+ )
+ if not self.limit_flag:
+ log.info(f"【SpeedLimiter】Transmission下载器开始限速")
+ self.limit_flag = True
+
+ def __stop(self):
+ """
+ 停止限速
+ """
+ if self.qb_limit:
+ self.downloader.set_speed_limit(
+ downloader=DownloaderType.QB,
+ download_limit=0,
+ upload_limit=0
+ )
+ if self.limit_flag:
+ log.info(f"【SpeedLimiter】Qbittorrent下载器停止限速")
+ if self.tr_limit:
+ self.downloader.set_speed_limit(
+ downloader=DownloaderType.TR,
+ download_limit=0,
+ upload_limit=0
+ )
+ if self.limit_flag:
+ log.info(f"【SpeedLimiter】Transmission下载器停止限速")
+ self.limit_flag = False
+
+ def emby_action(self, message):
+ """
+ 检查emby Webhook消息
+ """
+ if self.limit_enabled and message.get("Event") in ["playback.start", "playback.stop"]:
+ self.__check_playing_sessions(mediaserver_type=MediaServerType.EMBY, time_check=False)
+
+ def jellyfin_action(self, message):
+ """
+ 检查jellyfin Webhook消息
+ """
+ pass
+
+ def plex_action(self, message):
+ """
+ 检查plex Webhook消息
+ """
+ pass
+
+ def __check_playing_sessions(self, mediaserver_type, time_check=False):
+ """
+ 检查是否限速
+ """
+ if mediaserver_type != self.mediaserver.get_type():
+ return
+ playing_sessions = self.mediaserver.get_playing_sessions()
+ limit_flag = False
+ if mediaserver_type == MediaServerType.EMBY:
+ total_bit_rate = 0
+ for session in playing_sessions:
+ if not SecurityHelper.allow_access(self.unlimited_ips, session.get("RemoteEndPoint")) \
+ and session.get("NowPlayingItem").get("MediaType") == "Video":
+ total_bit_rate += int(session.get("NowPlayingItem").get("Bitrate")) or 0
+ if total_bit_rate:
+ limit_flag = True
+ if self.auto_limit:
+ residual_bandwidth = (self.bandwidth - total_bit_rate)
+ if residual_bandwidth < 0:
+ self.qb_upload_limit = 10*1024
+ self.tr_upload_limit = 10
+ else:
+ qb_upload_limit = residual_bandwidth / 8 / 1024 * self.qb_upload_ratio
+ tr_upload_limit = residual_bandwidth / 8 / 1024 * self.tr_upload_ratio
+ self.qb_upload_limit = qb_upload_limit * 1024 if qb_upload_limit > 10 else 10*1024
+ self.tr_upload_limit = tr_upload_limit if tr_upload_limit > 10 else 10
+ elif mediaserver_type == MediaServerType.JELLYFIN:
+ pass
+ elif mediaserver_type == MediaServerType.PLEX:
+ pass
+ else:
+ return
+ if time_check or self.auto_limit:
+ if limit_flag:
+ self.__start()
+ else:
+ self.__stop()
+ else:
+ if not self.limit_flag and limit_flag:
+ self.__start()
+ elif self.limit_flag and not limit_flag:
+ self.__stop()
+ else:
+ pass
+
+
+
+
+
+
diff --git a/app/subscribe.py b/app/subscribe.py
new file mode 100644
index 0000000..dc51c93
--- /dev/null
+++ b/app/subscribe.py
@@ -0,0 +1,852 @@
+import json
+from threading import Lock
+
+import log
+from app.downloader import Downloader
+from app.filter import Filter
+from app.helper import DbHelper, MetaHelper
+from app.media import Media, DouBan
+from app.media.meta import MetaInfo
+from app.message import Message
+from app.searcher import Searcher
+from app.sites import Sites
+from app.indexer import Indexer
+from app.utils import Torrent
+from app.utils.types import MediaType, SearchType
+from web.backend.web_utils import WebUtils
+
+lock = Lock()
+
+
+class Subscribe:
+ dbhelper = None
+ metahelper = None
+ searcher = None
+ message = None
+ media = None
+ downloader = None
+ sites = None
+ douban = None
+ filter = None
+
+ def __init__(self):
+ self.dbhelper = DbHelper()
+ self.metahelper = MetaHelper()
+ self.searcher = Searcher()
+ self.message = Message()
+ self.media = Media()
+ self.downloader = Downloader()
+ self.sites = Sites()
+ self.douban = DouBan()
+ self.indexer = Indexer()
+ self.filter = Filter()
+
+ def add_rss_subscribe(self, mtype, name, year,
+ keyword=None,
+ season=None,
+ fuzzy_match=False,
+ mediaid=None,
+ rss_sites=None,
+ search_sites=None,
+ over_edition=False,
+ filter_restype=None,
+ filter_pix=None,
+ filter_team=None,
+ filter_rule=None,
+ save_path=None,
+ download_setting=None,
+ total_ep=None,
+ current_ep=None,
+ state="D",
+ rssid=None):
+ """
+ 添加电影、电视剧订阅
+ :param mtype: 类型,电影、电视剧、动漫
+ :param name: 标题
+ :param year: 年份,如要是剧集需要是首播年份
+ :param keyword: 自定义搜索词
+ :param season: 第几季,数字
+ :param fuzzy_match: 是否模糊匹配
+ :param mediaid: 媒体ID,DB:/BG:/TMDBID
+ :param rss_sites: 订阅站点列表,为空则表示全部站点
+ :param search_sites: 搜索站点列表,为空则表示全部站点
+ :param over_edition: 是否选版
+ :param filter_restype: 质量过滤
+ :param filter_pix: 分辨率过滤
+ :param filter_team: 制作组/字幕组过滤
+ :param filter_rule: 关键字过滤
+ :param save_path: 保存路径
+ :param download_setting: 下载设置
+ :param state: 添加订阅时的状态
+ :param rssid: 修改订阅时传入
+ :param total_ep: 总集数
+ :param current_ep: 开始订阅集数
+ :return: 错误码:0代表成功,错误信息
+ """
+ if not name:
+ return -1, "标题或类型有误", None
+ year = int(year) if str(year).isdigit() else ""
+ rss_sites = rss_sites or []
+ search_sites = search_sites or []
+ over_edition = 1 if over_edition else 0
+ filter_rule = int(filter_rule) if str(filter_rule).isdigit() else None
+ total_ep = int(total_ep) if str(total_ep).isdigit() else None
+ current_ep = int(current_ep) if str(current_ep).isdigit() else None
+ download_setting = int(download_setting) if str(download_setting).replace("-", "").isdigit() else ""
+ fuzzy_match = True if fuzzy_match else False
+ # 检索媒体信息
+ if not fuzzy_match:
+ # 根据TMDBID查询,从推荐加订阅的情况
+ if mediaid:
+ # 根据ID查询
+ media_info = WebUtils.get_mediainfo_from_id(mtype=mtype, mediaid=mediaid)
+ else:
+ # 根据名称和年份查询
+ if season:
+ title = "%s %s 第%s季".strip() % (name, year, season)
+ else:
+ title = "%s %s".strip() % (name, year)
+ media_info = self.media.get_media_info(title=title,
+ mtype=mtype,
+ strict=True if year else False,
+ cache=False)
+ # 检查TMDB信息
+ if not media_info or not media_info.tmdb_info:
+ return 1, "无法TMDB查询到媒体信息", None
+ # 添加订阅
+ if media_info.type != MediaType.MOVIE:
+ # 电视剧
+ if season:
+ total_episode = self.media.get_tmdb_season_episodes_num(tv_info=media_info.tmdb_info,
+ season=int(season))
+ else:
+ # 查询季及集信息
+ total_seasoninfo = self.media.get_tmdb_tv_seasons(tv_info=media_info.tmdb_info)
+ if not total_seasoninfo:
+ return 2, "获取剧集信息失败", media_info
+ # 按季号降序排序
+ total_seasoninfo = sorted(total_seasoninfo,
+ key=lambda x: x.get("season_number"),
+ reverse=True)
+ # 取最新季
+ season = total_seasoninfo[0].get("season_number")
+ total_episode = total_seasoninfo[0].get("episode_count")
+ if not total_episode:
+ return 3, "第%s季获取剧集数失败,请确认该季是否存在" % season, media_info
+ media_info.begin_season = int(season)
+ media_info.total_episodes = total_episode
+ if total_ep:
+ total = total_ep
+ else:
+ total = media_info.total_episodes
+ if current_ep:
+ lack = total - current_ep - 1
+ else:
+ lack = total
+ if rssid:
+ self.dbhelper.delete_rss_tv(rssid=rssid)
+ code = self.dbhelper.insert_rss_tv(media_info=media_info,
+ total=total,
+ lack=lack,
+ state=state,
+ rss_sites=rss_sites,
+ search_sites=search_sites,
+ over_edition=over_edition,
+ filter_restype=filter_restype,
+ filter_pix=filter_pix,
+ filter_team=filter_team,
+ filter_rule=filter_rule,
+ save_path=save_path,
+ download_setting=download_setting,
+ total_ep=total_ep,
+ current_ep=current_ep,
+ fuzzy_match=0,
+ desc=media_info.overview,
+ note=self.gen_rss_note(media_info),
+ keyword=keyword)
+ else:
+ # 电影
+ if rssid:
+ self.dbhelper.delete_rss_movie(rssid=rssid)
+ code = self.dbhelper.insert_rss_movie(media_info=media_info,
+ state=state,
+ rss_sites=rss_sites,
+ search_sites=search_sites,
+ over_edition=over_edition,
+ filter_restype=filter_restype,
+ filter_pix=filter_pix,
+ filter_team=filter_team,
+ filter_rule=filter_rule,
+ save_path=save_path,
+ download_setting=download_setting,
+ fuzzy_match=0,
+ desc=media_info.overview,
+ note=self.gen_rss_note(media_info),
+ keyword=keyword)
+ else:
+ # 模糊匹配
+ media_info = MetaInfo(title=name, mtype=mtype)
+ media_info.title = name
+ media_info.type = mtype
+ if season:
+ media_info.begin_season = int(season)
+ if mtype == MediaType.MOVIE:
+ if rssid:
+ self.dbhelper.delete_rss_movie(rssid=rssid)
+ code = self.dbhelper.insert_rss_movie(media_info=media_info,
+ state="R",
+ rss_sites=rss_sites,
+ search_sites=search_sites,
+ over_edition=over_edition,
+ filter_restype=filter_restype,
+ filter_pix=filter_pix,
+ filter_team=filter_team,
+ filter_rule=filter_rule,
+ save_path=save_path,
+ download_setting=download_setting,
+ fuzzy_match=1,
+ keyword=keyword)
+ else:
+ if rssid:
+ self.dbhelper.delete_rss_tv(rssid=rssid)
+ code = self.dbhelper.insert_rss_tv(media_info=media_info,
+ total=0,
+ lack=0,
+ state="R",
+ rss_sites=rss_sites,
+ search_sites=search_sites,
+ over_edition=over_edition,
+ filter_restype=filter_restype,
+ filter_pix=filter_pix,
+ filter_team=filter_team,
+ filter_rule=filter_rule,
+ save_path=save_path,
+ download_setting=download_setting,
+ fuzzy_match=1,
+ keyword=keyword)
+
+ if code == 0:
+ return code, "添加订阅成功", media_info
+ elif code == 9:
+ return code, "订阅已存在", media_info
+ else:
+ return code, "添加订阅失败", media_info
+
+ def finish_rss_subscribe(self, rssid, media):
+ """
+ 完成订阅
+ :param rssid: 订阅ID
+ :param media: 识别的媒体信息,发送消息使用
+ """
+ if not rssid or not media:
+ return
+ # 电影订阅
+ rtype = "MOV" if media.type == MediaType.MOVIE else "TV"
+ if media.type == MediaType.MOVIE:
+ # 查询电影RSS数据
+ rss = self.dbhelper.get_rss_movies(rssid=rssid)
+ if not rss:
+ return
+ # 登记订阅历史
+ self.dbhelper.insert_rss_history(rssid=rssid,
+ rtype=rtype,
+ name=rss[0].NAME,
+ year=rss[0].YEAR,
+ tmdbid=rss[0].TMDBID,
+ image=media.get_poster_image(),
+ desc=media.overview)
+
+ # 删除订阅
+ self.dbhelper.delete_rss_movie(rssid=rssid)
+
+ # 电视剧订阅
+ else:
+ # 查询电视剧RSS数据
+ rss = self.dbhelper.get_rss_tvs(rssid=rssid)
+ if not rss:
+ return
+ total = rss[0].TOTAL_EP
+ # 登记订阅历史
+ self.dbhelper.insert_rss_history(rssid=rssid,
+ rtype=rtype,
+ name=rss[0].NAME,
+ year=rss[0].YEAR,
+ season=rss[0].SEASON,
+ tmdbid=rss[0].TMDBID,
+ image=media.get_poster_image(),
+ desc=media.overview,
+ total=total if total else rss[0].TOTAL,
+ start=rss[0].CURRENT_EP)
+ # 删除订阅
+ self.dbhelper.delete_rss_tv(rssid=rssid)
+
+ # 发送订阅完成的消息
+ log.info("【Rss】%s %s %s 订阅完成,删除订阅..." % (
+ media.type.value,
+ media.get_title_string(),
+ media.get_season_string()
+ ))
+ self.message.send_rss_finished_message(media_info=media)
+
+ def get_subscribe_movies(self, rid=None, state=None):
+ """
+ 获取电影订阅
+ """
+ ret_dict = {}
+ rss_movies = self.dbhelper.get_rss_movies(rssid=rid, state=state)
+ rss_sites_valid = self.sites.get_site_names(rss=True)
+ search_sites_valid = self.indexer.get_indexer_names()
+ for rss_movie in rss_movies:
+ desc = rss_movie.DESC
+ note = rss_movie.NOTE
+ tmdbid = rss_movie.TMDBID
+ rss_sites = json.loads(rss_movie.RSS_SITES) if rss_movie.RSS_SITES else []
+ search_sites = json.loads(rss_movie.SEARCH_SITES) if rss_movie.SEARCH_SITES else []
+ over_edition = True if rss_movie.OVER_EDITION == 1 else False
+ filter_restype = rss_movie.FILTER_RESTYPE
+ filter_pix = rss_movie.FILTER_PIX
+ filter_team = rss_movie.FILTER_TEAM
+ filter_rule = rss_movie.FILTER_RULE
+ download_setting = rss_movie.DOWNLOAD_SETTING
+ save_path = rss_movie.SAVE_PATH
+ fuzzy_match = True if rss_movie.FUZZY_MATCH == 1 else False
+ keyword = rss_movie.KEYWORD
+ # 兼容旧配置
+ if desc and desc.find('{') != -1:
+ desc = self.__parse_rss_desc(desc)
+ rss_sites = desc.get("rss_sites")
+ search_sites = desc.get("search_sites")
+ over_edition = True if desc.get("over_edition") == 'Y' else False
+ filter_restype = desc.get("restype")
+ filter_pix = desc.get("pix")
+ filter_team = desc.get("team")
+ filter_rule = desc.get("rule")
+ download_setting = ""
+ save_path = ""
+ fuzzy_match = False if tmdbid else True
+ if note:
+ note_info = self.__parse_rss_desc(note)
+ else:
+ note_info = {}
+ rss_sites = [site for site in rss_sites if site in rss_sites_valid]
+ search_sites = [site for site in search_sites if site in search_sites_valid]
+ ret_dict[str(rss_movie.ID)] = {
+ "id": rss_movie.ID,
+ "name": rss_movie.NAME,
+ "year": rss_movie.YEAR,
+ "tmdbid": rss_movie.TMDBID,
+ "image": rss_movie.IMAGE,
+ "overview": rss_movie.DESC,
+ "rss_sites": rss_sites,
+ "search_sites": search_sites,
+ "over_edition": over_edition,
+ "filter_restype": filter_restype,
+ "filter_pix": filter_pix,
+ "filter_team": filter_team,
+ "filter_rule": filter_rule,
+ "save_path": save_path,
+ "download_setting": download_setting,
+ "fuzzy_match": fuzzy_match,
+ "state": rss_movie.STATE,
+ "poster": note_info.get("poster"),
+ "release_date": note_info.get("release_date"),
+ "vote": note_info.get("vote"),
+ "keyword": keyword
+
+ }
+ return ret_dict
+
+ def get_subscribe_tvs(self, rid=None, state=None):
+ ret_dict = {}
+ rss_tvs = self.dbhelper.get_rss_tvs(rssid=rid, state=state)
+ rss_sites_valid = self.sites.get_site_names(rss=True)
+ search_sites_valid = self.indexer.get_indexer_names()
+ for rss_tv in rss_tvs:
+ desc = rss_tv.DESC
+ note = rss_tv.NOTE
+ tmdbid = rss_tv.TMDBID
+ rss_sites = json.loads(rss_tv.RSS_SITES) if rss_tv.RSS_SITES else []
+ search_sites = json.loads(rss_tv.SEARCH_SITES) if rss_tv.SEARCH_SITES else []
+ over_edition = True if rss_tv.OVER_EDITION == 1 else False
+ filter_restype = rss_tv.FILTER_RESTYPE
+ filter_pix = rss_tv.FILTER_PIX
+ filter_team = rss_tv.FILTER_TEAM
+ filter_rule = rss_tv.FILTER_RULE
+ download_setting = rss_tv.DOWNLOAD_SETTING
+ save_path = rss_tv.SAVE_PATH
+ total_ep = rss_tv.TOTAL_EP
+ current_ep = rss_tv.CURRENT_EP
+ fuzzy_match = True if rss_tv.FUZZY_MATCH == 1 else False
+ keyword = rss_tv.KEYWORD
+ # 兼容旧配置
+ if desc and desc.find('{') != -1:
+ desc = self.__parse_rss_desc(desc)
+ rss_sites = desc.get("rss_sites")
+ search_sites = desc.get("search_sites")
+ over_edition = True if desc.get("over_edition") == 'Y' else False
+ filter_restype = desc.get("restype")
+ filter_pix = desc.get("pix")
+ filter_team = desc.get("team")
+ filter_rule = desc.get("rule")
+ save_path = ""
+ download_setting = ""
+ total_ep = desc.get("total")
+ current_ep = desc.get("current")
+ fuzzy_match = False if tmdbid else True
+ if note:
+ note_info = self.__parse_rss_desc(note)
+ else:
+ note_info = {}
+ rss_sites = [site for site in rss_sites if site in rss_sites_valid]
+ search_sites = [site for site in search_sites if site in search_sites_valid]
+ ret_dict[str(rss_tv.ID)] = {
+ "id": rss_tv.ID,
+ "name": rss_tv.NAME,
+ "year": rss_tv.YEAR,
+ "season": rss_tv.SEASON,
+ "tmdbid": rss_tv.TMDBID,
+ "image": rss_tv.IMAGE,
+ "overview": rss_tv.DESC,
+ "rss_sites": rss_sites,
+ "search_sites": search_sites,
+ "over_edition": over_edition,
+ "filter_restype": filter_restype,
+ "filter_pix": filter_pix,
+ "filter_team": filter_team,
+ "filter_rule": filter_rule,
+ "save_path": save_path,
+ "download_setting": download_setting,
+ "total": rss_tv.TOTAL,
+ "lack": rss_tv.LACK,
+ "total_ep": total_ep,
+ "current_ep": current_ep,
+ "fuzzy_match": fuzzy_match,
+ "state": rss_tv.STATE,
+ "poster": note_info.get("poster"),
+ "release_date": note_info.get("release_date"),
+ "vote": note_info.get("vote"),
+ "keyword": keyword
+ }
+ return ret_dict
+
+ @staticmethod
+ def __parse_rss_desc(desc):
+ """
+ 解析订阅的JSON字段
+ """
+ if not desc:
+ return {}
+ return json.loads(desc) or {}
+
+ @staticmethod
+ def gen_rss_note(media):
+ """
+ 生成订阅的JSON备注信息
+ :param media: 媒体信息
+ :return: 备注信息
+ """
+ if not media:
+ return {}
+ note = {
+ "poster": media.get_poster_image(),
+ "release_date": media.release_date,
+ "vote": media.vote_average
+ }
+ return json.dumps(note)
+
+ def refresh_rss_metainfo(self):
+ """
+ 定时将豆瓣订阅转换为TMDB的订阅,并更新订阅的TMDB信息
+ """
+ # 更新电影
+ log.info("【Subscribe】开始刷新订阅TMDB信息...")
+ rss_movies = self.get_subscribe_movies(state='R')
+ for rid, rss_info in rss_movies.items():
+ # 跳过模糊匹配的
+ if rss_info.get("fuzzy_match"):
+ continue
+ rssid = rss_info.get("id")
+ name = rss_info.get("name")
+ year = rss_info.get("year") or ""
+ tmdbid = rss_info.get("tmdbid")
+ # 更新TMDB信息
+ media_info = self.__get_media_info(tmdbid=tmdbid,
+ name=name,
+ year=year,
+ mtype=MediaType.MOVIE,
+ cache=False)
+ if media_info and media_info.tmdb_id and media_info.title != name:
+ log.info(f"【Subscribe】检测到TMDB信息变化,更新电影订阅 {name} 为 {media_info.title}")
+ # 更新订阅信息
+ self.dbhelper.update_rss_movie_tmdb(rid=rssid,
+ tmdbid=media_info.tmdb_id,
+ title=media_info.title,
+ year=media_info.year,
+ image=media_info.get_message_image(),
+ desc=media_info.overview,
+ note=self.gen_rss_note(media_info))
+ # 清除TMDB缓存
+ self.metahelper.delete_meta_data_by_tmdbid(media_info.tmdb_id)
+
+ # 更新电视剧
+ rss_tvs = self.get_subscribe_tvs(state='R')
+ for rid, rss_info in rss_tvs.items():
+ # 跳过模糊匹配的
+ if rss_info.get("fuzzy_match"):
+ continue
+ rssid = rss_info.get("id")
+ name = rss_info.get("name")
+ year = rss_info.get("year") or ""
+ tmdbid = rss_info.get("tmdbid")
+ season = rss_info.get("season") or 1
+ total = rss_info.get("total")
+ total_ep = rss_info.get("total_ep")
+ lack = rss_info.get("lack")
+ # 更新TMDB信息
+ media_info = self.__get_media_info(tmdbid=tmdbid,
+ name=name,
+ year=year,
+ mtype=MediaType.TV,
+ cache=False)
+ if media_info and media_info.tmdb_id:
+ # 获取总集数
+ total_episode = self.media.get_tmdb_season_episodes_num(tv_info=media_info.tmdb_info,
+ season=int(str(season).replace("S", "")))
+ # 设置总集数的,不更新集数
+ if total_ep:
+ total_episode = total_ep
+ if total_episode and (name != media_info.title or total != total_episode):
+ # 新的缺失集数
+ lack_episode = total_episode - (total - lack)
+ log.info(
+ f"【Subscribe】检测到TMDB信息变化,更新电视剧订阅 {name} 为 {media_info.title},总集数为:{total_episode}")
+ # 更新订阅信息
+ self.dbhelper.update_rss_tv_tmdb(rid=rssid,
+ tmdbid=media_info.tmdb_id,
+ title=media_info.title,
+ year=media_info.year,
+ total=total_episode,
+ lack=lack_episode,
+ image=media_info.get_message_image(),
+ desc=media_info.overview,
+ note=self.gen_rss_note(media_info))
+ # 更新缺失季集
+ self.dbhelper.update_rss_tv_episodes(rid=rssid, episodes=range(total - lack + 1, total + 1))
+ # 清除TMDB缓存
+ self.metahelper.delete_meta_data_by_tmdbid(media_info.tmdb_id)
+ log.info("【Subscribe】订阅TMDB信息刷新完成")
+
+ def __get_media_info(self, tmdbid, name, year, mtype, cache=True):
+ """
+ 综合返回媒体信息
+ """
+ if tmdbid and not str(tmdbid).startswith("DB:"):
+ media_info = MetaInfo(title="%s %s".strip() % (name, year))
+ tmdb_info = self.media.get_tmdb_info(mtype=mtype, tmdbid=tmdbid)
+ media_info.set_tmdb_info(tmdb_info)
+ else:
+ media_info = self.media.get_media_info(title="%s %s" % (name, year), mtype=mtype, strict=True, cache=cache)
+ return media_info
+
+ def subscribe_search_all(self):
+ """
+ 搜索R状态的所有订阅,由定时服务调用
+ """
+ self.subscribe_search(state="R")
+
+ def subscribe_search(self, state="D"):
+ """
+ RSS订阅队列中状态的任务处理,先进行存量资源检索,缺失的才标志为RSS状态,由定时服务调用
+ """
+ try:
+ lock.acquire()
+ # 处理电影
+ self.subscribe_search_movie(state=state)
+ # 处理电视剧
+ self.subscribe_search_tv(state=state)
+ finally:
+ lock.release()
+
+ def subscribe_search_movie(self, rssid=None, state='D'):
+ """
+ 检索电影RSS
+ :param rssid: 订阅ID,未输入时检索所有状态为D的,输入时检索该ID任何状态的
+ :param state: 检索的状态,默认为队列中才检索
+ """
+ if rssid:
+ rss_movies = self.get_subscribe_movies(rid=rssid)
+ else:
+ rss_movies = self.get_subscribe_movies(state=state)
+ if rss_movies:
+ log.info("【Subscribe】共有 %s 个电影订阅需要检索" % len(rss_movies))
+ for rid, rss_info in rss_movies.items():
+ # 跳过模糊匹配的
+ if rss_info.get("fuzzy_match"):
+ continue
+ # 搜索站点范围
+ rssid = rss_info.get("id")
+ name = rss_info.get("name")
+ year = rss_info.get("year") or ""
+ tmdbid = rss_info.get("tmdbid")
+ over_edition = rss_info.get("over_edition")
+ keyword = rss_info.get("keyword")
+
+ # 开始搜索
+ self.dbhelper.update_rss_movie_state(rssid=rssid, state='S')
+ # 识别
+ media_info = self.__get_media_info(tmdbid, name, year, MediaType.MOVIE)
+ # 未识别到媒体信息
+ if not media_info or not media_info.tmdb_info:
+ self.dbhelper.update_rss_movie_state(rssid=rssid, state='R')
+ continue
+ media_info.set_download_info(download_setting=rss_info.get("download_setting"),
+ save_path=rss_info.get("save_path"))
+ # 自定义搜索词
+ media_info.keyword = keyword
+ # 非洗版的情况检查是否存在
+ if not over_edition:
+ # 检查是否存在
+ exist_flag, no_exists, _ = self.downloader.check_exists_medias(meta_info=media_info)
+ # 已经存在
+ if exist_flag:
+ log.info("【Subscribe】电影 %s 已存在" % media_info.get_title_string())
+ self.finish_rss_subscribe(rssid=rssid, media=media_info)
+ continue
+ else:
+ # 洗版时按缺失来下载
+ no_exists = {}
+ # 把洗版标志加入检索
+ media_info.over_edition = over_edition
+ # 将当前的优先级传入搜索
+ media_info.res_order = self.dbhelper.get_rss_overedition_order(rtype=media_info.type,
+ rssid=rssid)
+ # 开始检索
+ filter_dict = {
+ "restype": rss_info.get('filter_restype'),
+ "pix": rss_info.get('filter_pix'),
+ "team": rss_info.get('filter_team'),
+ "rule": rss_info.get('filter_rule'),
+ "site": rss_info.get("search_sites")
+ }
+ search_result, _, _, _ = self.searcher.search_one_media(
+ media_info=media_info,
+ in_from=SearchType.RSS,
+ no_exists=no_exists,
+ sites=rss_info.get("search_sites"),
+ filters=filter_dict)
+ if search_result:
+ # 洗版
+ if over_edition:
+ self.update_subscribe_over_edition(rtype=search_result.type,
+ rssid=rssid,
+ media=search_result)
+ else:
+ self.finish_rss_subscribe(rssid=rssid, media=media_info)
+ else:
+ self.dbhelper.update_rss_movie_state(rssid=rssid, state='R')
+
+ def subscribe_search_tv(self, rssid=None, state="D"):
+ """
+ 检索电视剧RSS
+ :param rssid: 订阅ID,未输入时检索所有状态为D的,输入时检索该ID任何状态的
+ :param state: 检索的状态,默认为队列中才检索
+ """
+ if rssid:
+ rss_tvs = self.get_subscribe_tvs(rid=rssid)
+ else:
+ rss_tvs = self.get_subscribe_tvs(state=state)
+ if rss_tvs:
+ log.info("【Subscribe】共有 %s 个电视剧订阅需要检索" % len(rss_tvs))
+ rss_no_exists = {}
+ for rid, rss_info in rss_tvs.items():
+ # 跳过模糊匹配的
+ if rss_info.get("fuzzy_match"):
+ continue
+ rssid = rss_info.get("id")
+ name = rss_info.get("name")
+ year = rss_info.get("year") or ""
+ tmdbid = rss_info.get("tmdbid")
+ over_edition = rss_info.get("over_edition")
+ keyword = rss_info.get("keyword")
+ # 开始搜索
+ self.dbhelper.update_rss_tv_state(rssid=rssid, state='S')
+ # 识别
+ media_info = self.__get_media_info(tmdbid, name, year, MediaType.TV)
+ # 未识别到媒体信息
+ if not media_info or not media_info.tmdb_info:
+ self.dbhelper.update_rss_tv_state(rssid=rssid, state='R')
+ continue
+ # 取下载设置
+ media_info.set_download_info(download_setting=rss_info.get("download_setting"),
+ save_path=rss_info.get("save_path"))
+ # 从登记薄中获取缺失剧集
+ season = 1
+ if rss_info.get("season"):
+ season = int(str(rss_info.get("season")).replace("S", ""))
+ # 订阅季
+ media_info.begin_season = season
+ # 订阅ID
+ media_info.rssid = rssid
+ # 自定义集数
+ total_ep = rss_info.get("total")
+ current_ep = rss_info.get("current_ep")
+ # 自定义搜索词
+ media_info.keyword = keyword
+ # 表中记录的剩余订阅集数
+ episodes = self.get_subscribe_tv_episodes(rss_info.get("id"))
+ if episodes is None:
+ episodes = []
+ if current_ep:
+ episodes = list(range(current_ep, total_ep + 1))
+ rss_no_exists[media_info.tmdb_id] = [
+ {
+ "season": season,
+ "episodes": episodes,
+ "total_episodes": total_ep
+ }
+ ]
+ else:
+ rss_no_exists[media_info.tmdb_id] = [
+ {
+ "season": season,
+ "episodes": episodes,
+ "total_episodes": total_ep
+ }
+ ]
+ # 非洗版时检查本地媒体库情况
+ if not over_edition:
+ exist_flag, library_no_exists, _ = self.downloader.check_exists_medias(
+ meta_info=media_info,
+ total_ep={season: total_ep})
+ # 当前剧集已存在,跳过
+ if exist_flag:
+ # 已全部存在
+ if not library_no_exists \
+ or not library_no_exists.get(media_info.tmdb_id):
+ log.info("【Subscribe】电视剧 %s 订阅剧集已全部存在" % (
+ media_info.get_title_string()))
+ # 完成订阅
+ self.finish_rss_subscribe(rssid=rss_info.get("id"),
+ media=media_info)
+ continue
+ # 取交集做为缺失集
+ rss_no_exists = Torrent.get_intersection_episodes(target=rss_no_exists,
+ source=library_no_exists,
+ title=media_info.tmdb_id)
+ if rss_no_exists.get(media_info.tmdb_id):
+ log.info("【Subscribe】%s 订阅缺失季集:%s" % (
+ media_info.get_title_string(),
+ rss_no_exists.get(media_info.tmdb_id)
+ ))
+ else:
+ # 把洗版标志加入检索
+ media_info.over_edition = over_edition
+ # 将当前的优先级传入检索
+ media_info.res_order = self.dbhelper.get_rss_overedition_order(rtype=MediaType.TV,
+ rssid=rssid)
+
+ # 开始检索
+ filter_dict = {
+ "restype": rss_info.get('filter_restype'),
+ "pix": rss_info.get('filter_pix'),
+ "team": rss_info.get('filter_team'),
+ "rule": rss_info.get('filter_rule'),
+ "site": rss_info.get("search_sites")
+ }
+ search_result, no_exists, _, _ = self.searcher.search_one_media(
+ media_info=media_info,
+ in_from=SearchType.RSS,
+ no_exists=rss_no_exists,
+ sites=rss_info.get("search_sites"),
+ filters=filter_dict)
+ if search_result \
+ or not no_exists \
+ or not no_exists.get(media_info.tmdb_id):
+ # 洗版
+ if over_edition:
+ self.update_subscribe_over_edition(rtype=media_info.type,
+ rssid=rssid,
+ media=search_result)
+ else:
+ # 完成订阅
+ self.finish_rss_subscribe(rssid=rssid, media=media_info)
+ elif no_exists:
+ # 更新状态
+ self.update_subscribe_tv_lack(rssid=rssid,
+ media_info=media_info,
+ seasoninfo=no_exists.get(media_info.tmdb_id))
+
+ def update_rss_state(self, rtype, rssid, state):
+ """
+ 根据类型更新订阅状态
+ :param rtype: 订阅类型
+ :param rssid: 订阅ID
+ :param state: 状态 R/D/S
+ """
+ if rtype == MediaType.MOVIE:
+ self.dbhelper.update_rss_movie_state(rssid=rssid, state=state)
+ else:
+ self.dbhelper.update_rss_tv_state(rssid=rssid, state=state)
+
+ def update_subscribe_over_edition(self, rtype, rssid, media):
+ """
+ 更新洗版订阅
+ :param rtype: 订阅类型
+ :param rssid: 订阅ID
+ :param media: 含订阅信息的媒体信息
+ :return 完成订阅返回True,否则返回False
+ """
+ if not rssid \
+ or not media.res_order \
+ or not media.filter_rule \
+ or not media.res_order:
+ return False
+ # 更新订阅命中的优先级
+ self.dbhelper.update_rss_filter_order(rtype=media.type,
+ rssid=rssid,
+ res_order=media.res_order)
+ # 检查是否匹配最高优先级规则
+ over_edition_order = self.filter.get_rule_first_order(rulegroup=media.filter_rule)
+ if int(media.res_order) >= int(over_edition_order):
+ # 完成洗版订阅
+ self.finish_rss_subscribe(rssid=rssid, media=media)
+ return True
+ else:
+ self.update_rss_state(rtype=rtype, rssid=rssid, state='R')
+ return False
+
+ def check_subscribe_over_edition(self, rtype, rssid, res_order):
+ """
+ 检查洗版订阅的优先级
+ :param rtype: 订阅类型
+ :param rssid: 订阅ID
+ :param res_order: 优先级
+ :return 资源更优先返回True,否则返回False
+ """
+ pre_res_order = self.dbhelper.get_rss_overedition_order(rtype=rtype, rssid=rssid)
+ if not pre_res_order:
+ return True
+ return True if int(pre_res_order) < int(res_order) else False
+
+ def update_subscribe_tv_lack(self, rssid, media_info, seasoninfo):
+ """
+ 更新电视剧订阅缺失集数
+ """
+ if not seasoninfo:
+ return
+ self.dbhelper.update_rss_tv_state(rssid=rssid, state='R')
+ for info in seasoninfo:
+ if str(info.get("season")) == media_info.get_season_seq():
+ if info.get("episodes"):
+ log.info("【Subscribe】更新电视剧 %s %s 缺失集数为 %s" % (
+ media_info.get_title_string(),
+ media_info.get_season_string(),
+ len(info.get("episodes"))))
+ self.dbhelper.update_rss_tv_lack(rssid=rssid, lack_episodes=info.get("episodes"))
+ break
+
+ def get_subscribe_tv_episodes(self, rssid):
+ """
+ 查询数据库中订阅的电视剧缺失集数
+ """
+ return self.dbhelper.get_rss_tv_episodes(rssid)
diff --git a/app/subtitle.py b/app/subtitle.py
new file mode 100644
index 0000000..d256116
--- /dev/null
+++ b/app/subtitle.py
@@ -0,0 +1,363 @@
+import datetime
+import os.path
+import re
+import shutil
+
+from lxml import etree
+
+import log
+from app.conf import SiteConf
+from app.helper import OpenSubtitles
+from app.utils import RequestUtils, PathUtils, SystemUtils, StringUtils, ExceptionUtils
+from app.utils.commons import singleton
+from app.utils.types import MediaType
+from config import Config, RMT_SUBEXT
+
+
+@singleton
+class Subtitle:
+ opensubtitles = None
+ _save_tmp_path = None
+ _server = None
+ _host = None
+ _api_key = None
+ _remote_path = None
+ _local_path = None
+ _opensubtitles_enable = False
+
+ def __init__(self):
+ self.init_config()
+
+ def init_config(self):
+ self.opensubtitles = OpenSubtitles()
+ self._save_tmp_path = Config().get_temp_path()
+ if not os.path.exists(self._save_tmp_path):
+ os.makedirs(self._save_tmp_path)
+ subtitle = Config().get_config('subtitle')
+ if subtitle:
+ self._server = subtitle.get("server")
+ if self._server == "chinesesubfinder":
+ self._api_key = subtitle.get("chinesesubfinder", {}).get("api_key")
+ self._host = subtitle.get("chinesesubfinder", {}).get('host')
+ if self._host:
+ if not self._host.startswith('http'):
+ self._host = "http://" + self._host
+ if not self._host.endswith('/'):
+ self._host = self._host + "/"
+ self._local_path = subtitle.get("chinesesubfinder", {}).get("local_path")
+ self._remote_path = subtitle.get("chinesesubfinder", {}).get("remote_path")
+ else:
+ self._opensubtitles_enable = subtitle.get("opensubtitles", {}).get("enable")
+
+ def download_subtitle(self, items, server=None):
+ """
+ 字幕下载入口
+ :param items: {"type":, "file", "file_ext":, "name":, "title", "year":, "season":, "episode":, "bluray":}
+ :param server: 字幕下载服务器
+ :return: 是否成功,消息内容
+ """
+ if not items:
+ return False, "参数有误"
+ _server = self._server if not server else server
+ if not _server:
+ return False, "未配置字幕下载器"
+ if _server == "opensubtitles":
+ if server or self._opensubtitles_enable:
+ return self.__download_opensubtitles(items)
+ elif _server == "chinesesubfinder":
+ return self.__download_chinesesubfinder(items)
+ return False, "未配置字幕下载器"
+
+ def __search_opensubtitles(self, item):
+ """
+ 爬取OpenSubtitles.org字幕
+ """
+ if not self.opensubtitles:
+ return []
+ return self.opensubtitles.search_subtitles(item)
+
+ def __download_opensubtitles(self, items):
+ """
+ 调用OpenSubtitles Api下载字幕
+ """
+ if not self.opensubtitles:
+ return False, "未配置OpenSubtitles"
+ subtitles_cache = {}
+ success = False
+ ret_msg = ""
+ for item in items:
+ if not item:
+ continue
+ if not item.get("name") or not item.get("file"):
+ continue
+ if item.get("type") == MediaType.TV and not item.get("imdbid"):
+ log.warn("【Subtitle】电视剧类型需要imdbid检索字幕,跳过...")
+ ret_msg = "电视剧需要imdbid检索字幕"
+ continue
+ subtitles = subtitles_cache.get(item.get("name"))
+ if subtitles is None:
+ log.info(
+ "【Subtitle】开始从Opensubtitle.org检索字幕: %s,imdbid=%s" % (item.get("name"), item.get("imdbid")))
+ subtitles = self.__search_opensubtitles(item)
+ if not subtitles:
+ subtitles_cache[item.get("name")] = []
+ log.info("【Subtitle】%s 未检索到字幕" % item.get("name"))
+ ret_msg = "%s 未检索到字幕" % item.get("name")
+ else:
+ subtitles_cache[item.get("name")] = subtitles
+ log.info("【Subtitle】opensubtitles.org返回数据:%s" % len(subtitles))
+ if not subtitles:
+ continue
+ # 成功数
+ subtitle_count = 0
+ for subtitle in subtitles:
+ # 标题
+ if not item.get("imdbid"):
+ if str(subtitle.get('title')) != "%s (%s)" % (item.get("name"), item.get("year")):
+ continue
+ # 季
+ if item.get('season') \
+ and str(subtitle.get('season').replace("Season", "").strip()) != str(item.get('season')):
+ continue
+ # 集
+ if item.get('episode') \
+ and str(subtitle.get('episode')) != str(item.get('episode')):
+ continue
+ # 字幕文件名
+ SubFileName = subtitle.get('description')
+ # 下载链接
+ Download_Link = subtitle.get('link')
+ # 下载后的字幕文件路径
+ Media_File = "%s.chi.zh-cn%s" % (item.get("file"), item.get("file_ext"))
+ log.info("【Subtitle】正在从opensubtitles.org下载字幕 %s 到 %s " % (SubFileName, Media_File))
+ # 下载
+ ret = RequestUtils(cookies=self.opensubtitles.get_cookie(),
+ headers=self.opensubtitles.get_ua()).get_res(Download_Link)
+ if ret and ret.status_code == 200:
+ # 保存ZIP
+ file_name = self.__get_url_subtitle_name(ret.headers.get('content-disposition'), Download_Link)
+ if not file_name:
+ continue
+ zip_file = os.path.join(self._save_tmp_path, file_name)
+ zip_path = os.path.splitext(zip_file)[0]
+ with open(zip_file, 'wb') as f:
+ f.write(ret.content)
+ # 解压文件
+ shutil.unpack_archive(zip_file, zip_path, format='zip')
+ # 遍历转移文件
+ for sub_file in PathUtils.get_dir_files(in_path=zip_path, exts=RMT_SUBEXT):
+ self.__transfer_subtitle(sub_file, Media_File)
+ # 删除临时文件
+ try:
+ shutil.rmtree(zip_path)
+ os.remove(zip_file)
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ else:
+ log.error("【Subtitle】下载字幕文件失败:%s" % Download_Link)
+ continue
+ # 最多下载3个字幕
+ subtitle_count += 1
+ if subtitle_count > 2:
+ break
+ if not subtitle_count:
+ if item.get('episode'):
+ log.info("【Subtitle】%s 第%s季 第%s集 未找到符合条件的字幕" % (
+ item.get("name"), item.get("season"), item.get("episode")))
+ ret_msg = "%s 第%s季 第%s集 未找到符合条件的字幕" % (
+ item.get("name"), item.get("season"), item.get("episode"))
+ else:
+ log.info("【Subtitle】%s 未找到符合条件的字幕" % item.get("name"))
+ ret_msg = "%s 未找到符合条件的字幕" % item.get("name")
+ else:
+ log.info("【Subtitle】%s 共下载了 %s 个字幕" % (item.get("name"), subtitle_count))
+ ret_msg = "%s 共下载了 %s 个字幕" % (item.get("name"), subtitle_count)
+ success = True
+ if success:
+ return True, ret_msg
+ else:
+ return False, ret_msg
+
+ def __download_chinesesubfinder(self, items):
+ """
+ 调用ChineseSubFinder下载字幕
+ """
+ if not self._host or not self._api_key:
+ return False, "未配置ChineseSubFinder"
+ req_url = "%sapi/v1/add-job" % self._host
+ notify_items = []
+ success = False
+ ret_msg = ""
+ for item in items:
+ if not item:
+ continue
+ if not item.get("name") or not item.get("file"):
+ continue
+ if item.get("bluray"):
+ file_path = "%s.mp4" % item.get("file")
+ else:
+ if os.path.splitext(item.get("file"))[-1] != item.get("file_ext"):
+ file_path = "%s%s" % (item.get("file"), item.get("file_ext"))
+ else:
+ file_path = item.get("file")
+
+ # 路径替换
+ if self._local_path and self._remote_path and file_path.startswith(self._local_path):
+ file_path = file_path.replace(self._local_path, self._remote_path).replace('\\', '/')
+
+ # 一个名称只建一个任务
+ if file_path not in notify_items:
+ notify_items.append(file_path)
+ log.info("【Subtitle】通知ChineseSubFinder下载字幕: %s" % file_path)
+ params = {
+ "video_type": 0 if item.get("type") == MediaType.MOVIE else 1,
+ "physical_video_file_full_path": file_path,
+ "task_priority_level": 3,
+ "media_server_inside_video_id": "",
+ "is_bluray": item.get("bluray")
+ }
+ try:
+ res = RequestUtils(headers={
+ "Authorization": "Bearer %s" % self._api_key
+ }).post(req_url, json=params)
+ if not res or res.status_code != 200:
+ log.error("【Subtitle】调用ChineseSubFinder API失败!")
+ ret_msg = "调用ChineseSubFinder API失败"
+ else:
+ # 如果文件目录没有识别的nfo元数据, 此接口会返回控制符,推测是ChineseSubFinder的原因
+ # emby refresh元数据时异步的
+ if res.text:
+ job_id = res.json().get("job_id")
+ message = res.json().get("message")
+ if not job_id:
+ log.warn("【Subtitle】ChineseSubFinder下载字幕出错:%s" % message)
+ ret_msg = "ChineseSubFinder下载字幕出错:%s" % message
+ else:
+ log.info("【Subtitle】ChineseSubFinder任务添加成功:%s" % job_id)
+ ret_msg = "ChineseSubFinder任务添加成功:%s" % job_id
+ success = True
+ else:
+ log.error("【Subtitle】%s 目录缺失nfo元数据" % file_path)
+ ret_msg = "%s 目录下缺失nfo元数据:" % file_path
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ log.error("【Subtitle】连接ChineseSubFinder出错:" + str(e))
+ ret_msg = "连接ChineseSubFinder出错:%s" % str(e)
+ if success:
+ return True, ret_msg
+ else:
+ return False, ret_msg
+
+ @staticmethod
+ def __transfer_subtitle(sub_file, media_file):
+ """
+ 转移字幕
+ """
+ new_sub_file = "%s%s" % (os.path.splitext(media_file)[0], os.path.splitext(sub_file)[-1])
+ if os.path.exists(new_sub_file):
+ return 1
+ else:
+ return SystemUtils.copy(sub_file, new_sub_file)
+
+ def download_subtitle_from_site(self, media_info, cookie, ua, download_dir):
+ """
+ 从站点下载字幕文件,并保存到本地
+ """
+ if not media_info.page_url:
+ return
+ # 字幕下载目录
+ log.info("【Subtitle】开始从站点下载字幕:%s" % media_info.page_url)
+ if not download_dir:
+ log.warn("【Subtitle】未找到字幕下载目录")
+ return
+ # 读取网站代码
+ request = RequestUtils(cookies=cookie, headers=ua)
+ res = request.get_res(media_info.page_url)
+ if res and res.status_code == 200:
+ if not res.text:
+ log.warn(f"【Subtitle】读取页面代码失败:{media_info.page_url}")
+ return
+ html = etree.HTML(res.text)
+ sublink_list = []
+ for xpath in SiteConf.SITE_SUBTITLE_XPATH:
+ sublinks = html.xpath(xpath)
+ if sublinks:
+ for sublink in sublinks:
+ if not sublink:
+ continue
+ if not sublink.startswith("http"):
+ base_url = StringUtils.get_base_url(media_info.page_url)
+ if sublink.startswith("/"):
+ sublink = "%s%s" % (base_url, sublink)
+ else:
+ sublink = "%s/%s" % (base_url, sublink)
+ sublink_list.append(sublink)
+ # 下载所有字幕文件
+ for sublink in sublink_list:
+ log.info(f"【Subtitle】找到字幕下载链接:{sublink},开始下载...")
+ # 下载
+ ret = request.get_res(sublink)
+ if ret and ret.status_code == 200:
+ # 创建目录
+ if not os.path.exists(download_dir):
+ os.makedirs(download_dir)
+ # 保存ZIP
+ file_name = self.__get_url_subtitle_name(ret.headers.get('content-disposition'), sublink)
+ if not file_name:
+ log.warn(f"【Subtitle】链接不是字幕文件:{sublink}")
+ continue
+ if file_name.lower().endswith(".zip"):
+ # ZIP包
+ zip_file = os.path.join(self._save_tmp_path, file_name)
+ # 解压路径
+ zip_path = os.path.splitext(zip_file)[0]
+ with open(zip_file, 'wb') as f:
+ f.write(ret.content)
+ # 解压文件
+ shutil.unpack_archive(zip_file, zip_path, format='zip')
+ # 遍历转移文件
+ for sub_file in PathUtils.get_dir_files(in_path=zip_path, exts=RMT_SUBEXT):
+ target_sub_file = os.path.join(download_dir,
+ os.path.splitext(os.path.basename(sub_file))[0])
+ log.info(f"【Subtitle】转移字幕 {sub_file} 到 {target_sub_file}")
+ self.__transfer_subtitle(sub_file, target_sub_file)
+ # 删除临时文件
+ try:
+ shutil.rmtree(zip_path)
+ os.remove(zip_file)
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ else:
+ sub_file = os.path.join(self._save_tmp_path, file_name)
+ # 保存
+ with open(sub_file, 'wb') as f:
+ f.write(ret.content)
+ target_sub_file = os.path.join(download_dir,
+ os.path.splitext(os.path.basename(sub_file))[0])
+ log.info(f"【Subtitle】转移字幕 {sub_file} 到 {target_sub_file}")
+ self.__transfer_subtitle(sub_file, target_sub_file)
+ else:
+ log.error(f"【Subtitle】下载字幕文件失败:{sublink}")
+ continue
+ if sublink_list:
+ log.info(f"【Subtitle】{media_info.page_url} 页面字幕下载完成")
+ elif res is not None:
+ log.warn(f"【Subtitle】连接 {media_info.page_url} 失败,状态码:{res.status_code}")
+ else:
+ log.warn(f"【Subtitle】无法打开链接:{media_info.page_url}")
+
+ @staticmethod
+ def __get_url_subtitle_name(disposition, url):
+ """
+ 从下载请求中获取字幕文件名
+ """
+ file_name = re.findall(r"filename=\"?(.+)\"?", disposition or "")
+ if file_name:
+ file_name = str(file_name[0].encode('ISO-8859-1').decode()).split(";")[0].strip()
+ if file_name.endswith('"'):
+ file_name = file_name[:-1]
+ elif url and os.path.splitext(url)[-1] in (RMT_SUBEXT + ['.zip']):
+ file_name = url.split("/")[-1]
+ else:
+ file_name = str(datetime.datetime.now())
+ return file_name
diff --git a/app/sync.py b/app/sync.py
new file mode 100644
index 0000000..e193705
--- /dev/null
+++ b/app/sync.py
@@ -0,0 +1,394 @@
+import os
+import threading
+import traceback
+
+from watchdog.events import FileSystemEventHandler
+from watchdog.observers import Observer
+from watchdog.observers.polling import PollingObserver
+
+import log
+from app.conf import ModuleConf
+from app.helper import DbHelper
+from config import RMT_MEDIAEXT, Config
+from app.filetransfer import FileTransfer
+from app.utils.commons import singleton
+from app.utils import PathUtils, ExceptionUtils
+from app.utils.types import SyncType, OsType
+
+lock = threading.Lock()
+
+
+class FileMonitorHandler(FileSystemEventHandler):
+ """
+ 目录监控响应类
+ """
+
+ def __init__(self, monpath, sync, **kwargs):
+ super(FileMonitorHandler, self).__init__(**kwargs)
+ self._watch_path = monpath
+ self.sync = sync
+
+ def on_created(self, event):
+ self.sync.file_change_handler(event, "创建", event.src_path)
+
+ def on_moved(self, event):
+ self.sync.file_change_handler(event, "移动", event.dest_path)
+
+ """
+ def on_modified(self, event):
+ self.sync.file_change_handler(event, "修改", event.src_path)
+ """
+
+
+@singleton
+class Sync(object):
+ filetransfer = None
+ dbhelper = None
+
+ sync_dir_config = {}
+ _observer = []
+ _sync_paths = []
+ _sync_sys = OsType.LINUX
+ _synced_files = []
+ _need_sync_paths = {}
+
+ def __init__(self):
+ self.init_config()
+
+ def init_config(self):
+ self.dbhelper = DbHelper()
+ self.filetransfer = FileTransfer()
+ sync = Config().get_config('sync')
+ sync_paths = self.dbhelper.get_config_sync_paths()
+ if sync and sync_paths:
+ if sync.get('nas_sys') == "windows":
+ self._sync_sys = OsType.WINDOWS
+ self._sync_paths = sync_paths
+ self.init_sync_dirs()
+
+ def init_sync_dirs(self):
+ """
+ 初始化监控文件配置
+ """
+ self.sync_dir_config = {}
+ if self._sync_paths:
+ for sync_item in self._sync_paths:
+ if not sync_item:
+ continue
+ # ID
+ sync_id = sync_item.ID
+ # 启用标志
+ enabled = True if sync_item.ENABLED else False
+ # 仅硬链接标志
+ only_link = False if sync_item.RENAME else True
+ # 转移方式
+ path_syncmode = ModuleConf.RMT_MODES.get(sync_item.MODE)
+ # 源目录|目的目录|未知目录
+ monpath = sync_item.SOURCE
+ target_path = sync_item.DEST
+ unknown_path = sync_item.UNKNOWN
+ if target_path and unknown_path:
+ log.info("【Sync】读取到监控目录:%s,目的目录:%s,未识别目录:%s,转移方式:%s" % (
+ monpath, target_path, unknown_path, path_syncmode.value))
+ elif target_path:
+ log.info(
+ "【Sync】读取到监控目录:%s,目的目录:%s,转移方式:%s" % (monpath, target_path, path_syncmode.value))
+ else:
+ log.info("【Sync】读取到监控目录:%s,转移方式:%s" % (monpath, path_syncmode.value))
+ if not enabled:
+ log.info("【Sync】%s 不进行监控和同步:手动关闭" % monpath)
+ continue
+ if only_link:
+ log.info("【Sync】%s 不进行识别和重命名" % monpath)
+ if target_path and not os.path.exists(target_path):
+ log.info("【Sync】目的目录不存在,正在创建:%s" % target_path)
+ os.makedirs(target_path)
+ if unknown_path and not os.path.exists(unknown_path):
+ log.info("【Sync】未识别目录不存在,正在创建:%s" % unknown_path)
+ os.makedirs(unknown_path)
+ # 登记关系
+ if os.path.exists(monpath):
+ self.sync_dir_config[monpath] = {
+ 'id': sync_id,
+ 'target': target_path,
+ 'unknown': unknown_path,
+ 'onlylink': only_link,
+ 'syncmod': path_syncmode
+ }
+ else:
+ log.error("【Sync】%s 目录不存在!" % monpath)
+
+ def get_sync_dirs(self):
+ """
+ 返回所有的同步监控目录
+ """
+ if not self.sync_dir_config:
+ return []
+ return [os.path.normpath(key) for key in self.sync_dir_config.keys()]
+
+ def file_change_handler(self, event, text, event_path):
+ """
+ 处理文件变化
+ :param event: 事件
+ :param text: 事件描述
+ :param event_path: 事件文件路径
+ """
+ if not event.is_directory:
+ # 文件发生变化
+ try:
+ if not os.path.exists(event_path):
+ return
+ log.debug("【Sync】文件%s:%s" % (text, event_path))
+ # 判断是否处理过了
+ need_handler_flag = False
+ try:
+ lock.acquire()
+ if event_path not in self._synced_files:
+ self._synced_files.append(event_path)
+ need_handler_flag = True
+ finally:
+ lock.release()
+ if not need_handler_flag:
+ log.debug("【Sync】文件已处理过:%s" % event_path)
+ return
+ # 不是监控目录下的文件不处理
+ is_monitor_file = False
+ for tpath in self.sync_dir_config.keys():
+ if PathUtils.is_path_in_path(tpath, event_path):
+ is_monitor_file = True
+ break
+ if not is_monitor_file:
+ return
+ # 目的目录的子文件不处理
+ for tpath in self.sync_dir_config.values():
+ if not tpath:
+ continue
+ if PathUtils.is_path_in_path(tpath.get('target'), event_path):
+ return
+ if PathUtils.is_path_in_path(tpath.get('unknown'), event_path):
+ return
+ # 媒体库目录及子目录不处理
+ if self.filetransfer.is_target_dir_path(event_path):
+ return
+ # 回收站及隐藏的文件不处理
+ if PathUtils.is_invalid_path(event_path):
+ return
+ # 上级目录
+ from_dir = os.path.dirname(event_path)
+ # 找到是哪个监控目录下的
+ monitor_dir = event_path
+ is_root_path = False
+ for m_path in self.sync_dir_config.keys():
+ if PathUtils.is_path_in_path(m_path, event_path):
+ monitor_dir = m_path
+ if os.path.normpath(m_path) == os.path.normpath(from_dir):
+ is_root_path = True
+
+ # 查找目的目录
+ target_dirs = self.sync_dir_config.get(monitor_dir)
+ target_path = target_dirs.get('target')
+ unknown_path = target_dirs.get('unknown')
+ onlylink = target_dirs.get('onlylink')
+ sync_mode = target_dirs.get('syncmod')
+
+ # 只做硬链接,不做识别重命名
+ if onlylink:
+ if self.dbhelper.is_sync_in_history(event_path, target_path):
+ return
+ log.info("【Sync】开始同步 %s" % event_path)
+ ret, msg = self.filetransfer.link_sync_file(src_path=monitor_dir,
+ in_file=event_path,
+ target_dir=target_path,
+ sync_transfer_mode=sync_mode)
+ if ret != 0:
+ log.warn("【Sync】%s 同步失败,错误码:%s" % (event_path, ret))
+ elif not msg:
+ self.dbhelper.insert_sync_history(event_path, monitor_dir, target_path)
+ log.info("【Sync】%s 同步完成" % event_path)
+ # 识别转移
+ else:
+ # 不是媒体文件不处理
+ name = os.path.basename(event_path)
+ if not name:
+ return
+ if name.lower() != "index.bdmv":
+ ext = os.path.splitext(name)[-1]
+ if ext.lower() not in RMT_MEDIAEXT:
+ return
+ # 监控根目录下的文件发生变化时直接发走
+ if is_root_path:
+ ret, ret_msg = self.filetransfer.transfer_media(in_from=SyncType.MON,
+ in_path=event_path,
+ target_dir=target_path,
+ unknown_dir=unknown_path,
+ rmt_mode=sync_mode)
+ if not ret:
+ log.warn("【Sync】%s 转移失败:%s" % (event_path, ret_msg))
+ else:
+ try:
+ lock.acquire()
+ if self._need_sync_paths.get(from_dir):
+ files = self._need_sync_paths[from_dir].get('files')
+ if not files:
+ files = [event_path]
+ else:
+ if event_path not in files:
+ files.append(event_path)
+ else:
+ return
+ self._need_sync_paths[from_dir].update({'files': files})
+ else:
+ self._need_sync_paths[from_dir] = {'target': target_path,
+ 'unknown': unknown_path,
+ 'syncmod': sync_mode,
+ 'files': [event_path]}
+ finally:
+ lock.release()
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ log.error("【Sync】发生错误:%s - %s" % (str(e), traceback.format_exc()))
+
+ def transfer_mon_files(self):
+ """
+ 批量转移文件,由定时服务定期调用执行
+ """
+ try:
+ lock.acquire()
+ finished_paths = []
+ for path in list(self._need_sync_paths):
+ if not PathUtils.is_invalid_path(path) and os.path.exists(path):
+ log.info("【Sync】开始转移监控目录文件...")
+ target_info = self._need_sync_paths.get(path)
+ bluray_dir = PathUtils.get_bluray_dir(path)
+ if not bluray_dir:
+ src_path = path
+ files = target_info.get('files')
+ else:
+ src_path = bluray_dir
+ files = []
+ if src_path not in finished_paths:
+ finished_paths.append(src_path)
+ else:
+ continue
+ target_path = target_info.get('target')
+ unknown_path = target_info.get('unknown')
+ sync_mode = target_info.get('syncmod')
+ # 判断是否根目录
+ is_root_path = False
+ for m_path in self.sync_dir_config.keys():
+ if os.path.normpath(m_path) == os.path.normpath(src_path):
+ is_root_path = True
+ ret, ret_msg = self.filetransfer.transfer_media(in_from=SyncType.MON,
+ in_path=src_path,
+ files=files,
+ target_dir=target_path,
+ unknown_dir=unknown_path,
+ rmt_mode=sync_mode,
+ root_path=is_root_path)
+ if not ret:
+ log.warn("【Sync】%s转移失败:%s" % (path, ret_msg))
+ self._need_sync_paths.pop(path)
+ finally:
+ lock.release()
+
+ def run_service(self):
+ """
+ 启动监控服务
+ """
+ self._observer = []
+ for monpath in self.sync_dir_config.keys():
+ if monpath and os.path.exists(monpath):
+ try:
+ if self._sync_sys == OsType.WINDOWS:
+ # 考虑到windows的docker需要直接指定才能生效(修改配置文件为windows)
+ observer = PollingObserver(timeout=10)
+ else:
+ # 内部处理系统操作类型选择最优解
+ observer = Observer(timeout=10)
+ self._observer.append(observer)
+ observer.schedule(FileMonitorHandler(monpath, self), path=monpath, recursive=True)
+ observer.daemon = True
+ observer.start()
+ log.info("%s 的监控服务启动" % monpath)
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ log.error("%s 启动目录监控失败:%s" % (monpath, str(e)))
+
+ def stop_service(self):
+ """
+ 关闭监控服务
+ """
+ if self._observer:
+ for observer in self._observer:
+ observer.stop()
+ self._observer = []
+
+ def transfer_all_sync(self, sid=None):
+ """
+ 全量转移Sync目录下的文件,WEB界面点击目录同步时获发
+ """
+ for monpath, target_dirs in self.sync_dir_config.items():
+ if not monpath:
+ continue
+ if sid and sid != target_dirs.get('id'):
+ continue
+ target_path = target_dirs.get('target')
+ unknown_path = target_dirs.get('unknown')
+ onlylink = target_dirs.get('onlylink')
+ sync_mode = target_dirs.get('syncmod')
+ # 只做硬链接,不做识别重命名
+ if onlylink:
+ for link_file in PathUtils.get_dir_files(monpath):
+ if self.dbhelper.is_sync_in_history(link_file, target_path):
+ continue
+ log.info("【Sync】开始同步 %s" % link_file)
+ ret, msg = self.filetransfer.link_sync_file(src_path=monpath,
+ in_file=link_file,
+ target_dir=target_path,
+ sync_transfer_mode=sync_mode)
+ if ret != 0:
+ log.warn("【Sync】%s 同步失败,错误码:%s" % (link_file, ret))
+ elif not msg:
+ self.dbhelper.insert_sync_history(link_file, monpath, target_path)
+ log.info("【Sync】%s 同步完成" % link_file)
+ else:
+ for path in PathUtils.get_dir_level1_medias(monpath, RMT_MEDIAEXT):
+ if PathUtils.is_invalid_path(path):
+ continue
+ ret, ret_msg = self.filetransfer.transfer_media(in_from=SyncType.MON,
+ in_path=path,
+ target_dir=target_path,
+ unknown_dir=unknown_path,
+ rmt_mode=sync_mode)
+ if not ret:
+ log.error("【Sync】%s 处理失败:%s" % (monpath, ret_msg))
+
+
+def run_monitor():
+ """
+ 启动监控
+ """
+ try:
+ Sync().run_service()
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ log.error("启动目录同步服务失败:%s" % str(err))
+
+
+def stop_monitor():
+ """
+ 停止监控
+ """
+ try:
+ Sync().stop_service()
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ log.error("停止目录同步服务失败:%s" % str(err))
+
+
+def restart_monitor():
+ """
+ 重启监控
+ """
+ stop_monitor()
+ run_monitor()
diff --git a/app/torrentremover.py b/app/torrentremover.py
new file mode 100644
index 0000000..d33686a
--- /dev/null
+++ b/app/torrentremover.py
@@ -0,0 +1,305 @@
+import json
+from threading import Lock
+
+from apscheduler.schedulers.background import BackgroundScheduler
+
+import log
+from app.conf import ModuleConf
+from app.downloader import Downloader
+from app.helper import DbHelper
+from app.message import Message
+from app.utils import ExceptionUtils
+from app.utils.commons import singleton
+from config import Config
+
+lock = Lock()
+
+
+@singleton
+class TorrentRemover(object):
+ message = None
+ downloader = None
+ dbhelper = None
+
+ _scheduler = None
+ _remove_tasks = {}
+
+ def __init__(self):
+ self.init_config()
+
+ def init_config(self):
+ self.message = Message()
+ self.downloader = Downloader()
+ self.dbhelper = DbHelper()
+ # 移出现有任务
+ try:
+ if self._scheduler:
+ self._scheduler.remove_all_jobs()
+ if self._scheduler.running:
+ self._scheduler.shutdown()
+ self._scheduler = None
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ # 读取任务任务列表
+ removetasks = self.dbhelper.get_torrent_remove_tasks()
+ self._remove_tasks = {}
+ for task in removetasks:
+ config = task.CONFIG
+ self._remove_tasks[str(task.ID)] = {
+ "id": task.ID,
+ "name": task.NAME,
+ "downloader": task.DOWNLOADER,
+ "onlynastool": task.ONLYNASTOOL,
+ "samedata": task.SAMEDATA,
+ "action": task.ACTION,
+ "config": json.loads(config) if config else {},
+ "interval": task.INTERVAL,
+ "enabled": task.ENABLED,
+ }
+ if not self._remove_tasks:
+ return
+ # 启动删种任务
+ self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
+ remove_flag = False
+ for task in self._remove_tasks.values():
+ if task.get("enabled") and task.get("interval") and task.get("config"):
+ remove_flag = True
+ self._scheduler.add_job(func=self.auto_remove_torrents,
+ args=[task.get("id")],
+ trigger='interval',
+ seconds=int(task.get("interval")) * 60)
+ if remove_flag:
+ self._scheduler.print_jobs()
+ self._scheduler.start()
+ log.info("自动删种服务启动")
+
+ def get_torrent_remove_tasks(self, taskid=None):
+ """
+ 获取删种任务详细信息
+ """
+ if taskid:
+ task = self._remove_tasks.get(str(taskid))
+ return task if task else {}
+ return self._remove_tasks
+
+ def auto_remove_torrents(self, taskids=None):
+ """
+ 处理自动删种任务,由定时服务调用
+ :param taskids: 自动删种任务的ID
+ """
+ # 获取自动删种任务
+ tasks = []
+ # 如果没有指定任务ID,则处理所有启用任务
+ if not taskids:
+ for task in self._remove_tasks.values():
+ if task.get("enabled") and task.get("interval") and task.get("config"):
+ tasks.append(task)
+ # 如果指定任务id,则处理指定任务无论是否启用
+ elif isinstance(taskids, list):
+ for taskid in taskids:
+ task = self._remove_tasks.get(str(taskid))
+ if task:
+ tasks.append(task)
+ else:
+ task = self._remove_tasks.get(str(taskids))
+ tasks = [task] if task else []
+ if not tasks:
+ return
+ for task in tasks:
+ try:
+ lock.acquire()
+ # 获取需删除种子列表
+ downloader_type = ModuleConf.TORRENTREMOVER_DICT.get(task.get("downloader")).get("downloader_type")
+ task.get("config")["samedata"] = task.get("samedata")
+ task.get("config")["onlynastool"] = task.get("onlynastool")
+ torrents = self.downloader.get_remove_torrents(
+ downloader=downloader_type,
+ config=task.get("config")
+ )
+ log.info(f"【TorrentRemover】自动删种任务:{task.get('name')} 获取符合处理条件种子数 {len(torrents)}")
+ title = f"自动删种任务:{task.get('name')}"
+ text = ""
+ if task.get("action") == 1:
+ text = f"共暂停{len(torrents)}个种子"
+ for torrent in torrents:
+ name = torrent.get("name")
+ site = torrent.get("site")
+ size = round(torrent.get("size")/1021/1024/1024, 3)
+ text_item = f"{name} 来自站点:{site} 大小:{size} GB"
+ log.info(f"【TorrentRemover】暂停种子:{text_item}")
+ text = f"{text}\n{text_item}"
+ # 暂停种子
+ self.downloader.stop_torrents(downloader=downloader_type,
+ ids=[torrent.get("id")])
+ elif task.get("action") == 2:
+ text = f"共删除{len(torrents)}个种子"
+ for torrent in torrents:
+ name = torrent.get("name")
+ site = torrent.get("site")
+ size = round(torrent.get("size") / 1021 / 1024 / 1024, 3)
+ text_item = f"{name} 来自站点:{site} 大小:{size} GB"
+ log.info(f"【TorrentRemover】删除种子:{text_item}")
+ text = f"{text}\n{text_item}"
+ # 删除种子
+ self.downloader.delete_torrents(downloader=downloader_type,
+ delete_file=False,
+ ids=[torrent.get("id")])
+ elif task.get("action") == 3:
+ text = f"共删除{len(torrents)}个种子(及文件)"
+ for torrent in torrents:
+ name = torrent.get("name")
+ site = torrent.get("site")
+ size = round(torrent.get("size") / 1021 / 1024 / 1024, 3)
+ text_item = f"{name} 来自站点:{site} 大小:{size} GB"
+ log.info(f"【TorrentRemover】删除种子及文件:{text_item}")
+ text = f"{text}\n{text_item}"
+ # 删除种子
+ self.downloader.delete_torrents(downloader=downloader_type,
+ delete_file=True,
+ ids=[torrent.get("id")])
+ if torrents and title and text:
+ self.message.send_brushtask_remove_message(title=title, text=text)
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ log.error(f"【TorrentRemover】自动删种任务:{task.get('name')}异常:{str(e)}")
+ finally:
+ lock.release()
+
+ def update_torrent_remove_task(self, data):
+ """
+ 更新自动删种任务
+ """
+ tid = data.get("tid")
+ name = data.get("name")
+ if not name:
+ return False, "名称参数不合法"
+ action = data.get("action")
+ if not str(action).isdigit() or int(action) not in [1, 2, 3]:
+ return False, "动作参数不合法"
+ else:
+ action = int(action)
+ interval = data.get("interval")
+ if not str(interval).isdigit():
+ return False, "运行间隔参数不合法"
+ else:
+ interval = int(interval)
+ enabled = data.get("enabled")
+ if not str(enabled).isdigit() or int(enabled) not in [0, 1]:
+ return False, "状态参数不合法"
+ else:
+ enabled = int(enabled)
+ samedata = data.get("samedata")
+ if not str(enabled).isdigit() or int(samedata) not in [0, 1]:
+ return False, "处理辅种参数不合法"
+ else:
+ samedata = int(samedata)
+ onlynastool = data.get("onlynastool")
+ if not str(enabled).isdigit() or int(onlynastool) not in [0, 1]:
+ return False, "仅处理NASTOOL添加种子参数不合法"
+ else:
+ onlynastool = int(onlynastool)
+ ratio = data.get("ratio") or 0
+ if not str(ratio).replace(".", "").isdigit():
+ return False, "分享率参数不合法"
+ else:
+ ratio = round(float(ratio), 2)
+ seeding_time = data.get("seeding_time") or 0
+ if not str(seeding_time).isdigit():
+ return False, "做种时间参数不合法"
+ else:
+ seeding_time = int(seeding_time)
+ upload_avs = data.get("upload_avs") or 0
+ if not str(upload_avs).isdigit():
+ return False, "平均上传速度参数不合法"
+ else:
+ upload_avs = int(upload_avs)
+ size = data.get("size")
+ size = str(size).split("-") if size else []
+ if size and (len(size) != 2 or not str(size[0]).isdigit() or not str(size[-1]).isdigit()):
+ return False, "种子大小参数不合法"
+ else:
+ size = [int(size[0]), int(size[-1])] if size else []
+ tags = data.get("tags")
+ tags = tags.split(";") if tags else []
+ tags = [tag for tag in tags if tag]
+ savepath_key = data.get("savepath_key")
+ tracker_key = data.get("tracker_key")
+ downloader = data.get("downloader")
+ if downloader not in ModuleConf.TORRENTREMOVER_DICT.keys():
+ return False, "下载器参数不合法"
+ if downloader == "Qb":
+ qb_state = data.get("qb_state")
+ qb_state = qb_state.split(";") if qb_state else []
+ qb_state = [state for state in qb_state if state]
+ if qb_state:
+ for qb_state_item in qb_state:
+ if qb_state_item not in ModuleConf.TORRENTREMOVER_DICT.get("Qb").get("torrent_state").keys():
+ return False, "种子状态参数不合法"
+ qb_category = data.get("qb_category")
+ qb_category = qb_category.split(";") if qb_category else []
+ qb_category = [category for category in qb_category if category]
+ tr_state = []
+ tr_error_key = ""
+ else:
+ qb_state = []
+ qb_category = []
+ tr_state = data.get("tr_state")
+ tr_state = tr_state.split(";") if tr_state else []
+ tr_state = [state for state in tr_state if state]
+ if tr_state:
+ for tr_state_item in tr_state:
+ if tr_state_item not in ModuleConf.TORRENTREMOVER_DICT.get("Tr").get("torrent_state").keys():
+ return False, "种子状态参数不合法"
+ tr_error_key = data.get("tr_error_key")
+ config = {
+ "ratio": ratio,
+ "seeding_time": seeding_time,
+ "upload_avs": upload_avs,
+ "size": size,
+ "tags": tags,
+ "savepath_key": savepath_key,
+ "tracker_key": tracker_key,
+ "qb_state": qb_state,
+ "qb_category": qb_category,
+ "tr_state": tr_state,
+ "tr_error_key": tr_error_key,
+ }
+ if tid:
+ self.dbhelper.delete_torrent_remove_task(tid=tid)
+ self.dbhelper.insert_torrent_remove_task(
+ name=name,
+ action=action,
+ interval=interval,
+ enabled=enabled,
+ samedata=samedata,
+ onlynastool=onlynastool,
+ downloader=downloader,
+ config=config,
+ )
+ return True, "更新成功"
+
+ def delete_torrent_remove_task(self, taskid=None):
+ """
+ 删除自动删种任务
+ """
+ if not taskid:
+ return False
+ else:
+ self.dbhelper.delete_torrent_remove_task(tid=taskid)
+ return True
+
+ def get_remove_torrents(self, taskid):
+ """
+ 获取满足自动删种任务的种子
+ """
+ task = self._remove_tasks.get(str(taskid))
+ if not task:
+ return False, []
+ else:
+ task.get("config")["samedata"] = task.get("samedata")
+ task.get("config")["onlynastool"] = task.get("onlynastool")
+ torrents = self.downloader.get_remove_torrents(
+ downloader=ModuleConf.TORRENTREMOVER_DICT.get(task.get("downloader")).get("downloader_type"),
+ config=task.get("config")
+ )
+ return True, torrents
diff --git a/app/utils/__init__.py b/app/utils/__init__.py
new file mode 100644
index 0000000..addef48
--- /dev/null
+++ b/app/utils/__init__.py
@@ -0,0 +1,13 @@
+from .dom_utils import DomUtils
+from .episode_format import EpisodeFormat
+from .http_utils import RequestUtils
+from .json_utils import JsonUtils
+from .number_utils import NumberUtils
+from .path_utils import PathUtils
+from .string_utils import StringUtils
+from .system_utils import SystemUtils
+from .tokens import Tokens
+from .torrent import Torrent
+from .cache_manager import cacheman, TokenCache, ConfigLoadCache
+from .exception_utils import ExceptionUtils
+from .rsstitle_utils import RssTitleUtils
diff --git a/app/utils/cache_manager.py b/app/utils/cache_manager.py
new file mode 100644
index 0000000..fdf140b
--- /dev/null
+++ b/app/utils/cache_manager.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+import time
+
+from cacheout import CacheManager, LRUCache, Cache
+
+CACHES = {
+ "tmdb_supply": {'maxsize': 200}
+}
+
+cacheman = CacheManager(CACHES, cache_class=LRUCache)
+
+TokenCache = Cache(maxsize=256, ttl=4*3600, timer=time.time, default=None)
+
+ConfigLoadCache = Cache(maxsize=1, ttl=10, timer=time.time, default=None)
diff --git a/app/utils/commons.py b/app/utils/commons.py
new file mode 100644
index 0000000..fc00c6b
--- /dev/null
+++ b/app/utils/commons.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+import threading
+
+# 线程锁
+lock = threading.RLock()
+
+# 全局实例
+INSTANCES = {}
+
+
+# 单例模式注解
+def singleton(cls):
+ # 创建字典用来保存类的实例对象
+ global INSTANCES
+
+ def _singleton(*args, **kwargs):
+ # 先判断这个类有没有对象
+ if cls not in INSTANCES:
+ with lock:
+ if cls not in INSTANCES:
+ INSTANCES[cls] = cls(*args, **kwargs)
+ pass
+ # 将实例对象返回
+ return INSTANCES[cls]
+
+ return _singleton
diff --git a/app/utils/dom_utils.py b/app/utils/dom_utils.py
new file mode 100644
index 0000000..2e9070a
--- /dev/null
+++ b/app/utils/dom_utils.py
@@ -0,0 +1,30 @@
+class DomUtils:
+
+ @staticmethod
+ def tag_value(tag_item, tag_name, attname="", default=None):
+ """
+ 解析XML标签值
+ """
+ tagNames = tag_item.getElementsByTagName(tag_name)
+ if tagNames:
+ if attname:
+ attvalue = tagNames[0].getAttribute(attname)
+ if attvalue:
+ return attvalue
+ else:
+ firstChild = tagNames[0].firstChild
+ if firstChild:
+ return firstChild.data
+ return default
+
+ @staticmethod
+ def add_node(doc, parent, name, value=None):
+ """
+ 添加一个DOM节点
+ """
+ node = doc.createElement(name)
+ parent.appendChild(node)
+ if value is not None:
+ text = doc.createTextNode(str(value))
+ node.appendChild(text)
+ return node
diff --git a/app/utils/episode_format.py b/app/utils/episode_format.py
new file mode 100644
index 0000000..031f194
--- /dev/null
+++ b/app/utils/episode_format.py
@@ -0,0 +1,85 @@
+import re
+import parse
+from config import SPLIT_CHARS
+
+
+class EpisodeFormat(object):
+ _key = ""
+
+ def __init__(self, eformat, details: str = None, offset=None, key="ep"):
+ self._format = eformat
+ self._start_ep = None
+ self._end_ep = None
+ if details:
+ if re.compile("\\d{1,4}-\\d{1,4}").match(details):
+ self._start_ep = details
+ self._end_ep = details
+ else:
+ tmp = details.split(",")
+ if len(tmp) > 1:
+ self._start_ep = int(tmp[0])
+ self._end_ep = int(tmp[0]) if int(tmp[0]) > int(tmp[1]) else int(tmp[1])
+ else:
+ self._start_ep = self._end_ep = int(tmp[0])
+ self.__offset = int(offset) if offset else 0
+ self._key = key
+
+ @property
+ def format(self):
+ return self._format
+
+ @property
+ def start_ep(self):
+ return self._start_ep
+
+ @property
+ def end_ep(self):
+ return self._end_ep
+
+ @property
+ def offset(self):
+ return self.__offset
+
+ def match(self, file: str):
+ if not self._format:
+ return True
+ s, e = self.__handle_single(file)
+ if not s:
+ return False
+ if self._start_ep is None:
+ return True
+ if self._start_ep <= s <= self._end_ep:
+ return True
+ return False
+
+ def split_episode(self, file_name):
+ # 指定的具体集数,直接返回
+ if self._start_ep is not None and self._start_ep == self._end_ep:
+ if isinstance(self._start_ep, str):
+ s, e = self._start_ep.split("-")
+ if int(s) == int(e):
+ return int(s) + self.__offset, None
+ return int(s) + self.__offset, int(e) + self.__offset
+ return self._start_ep + self.__offset, None
+ if not self._format:
+ return None, None
+ s, e = self.__handle_single(file_name)
+ return s + self.__offset if s is not None else None, e + self.__offset if e is not None else None
+
+ def __handle_single(self, file: str):
+ if not self._format:
+ return None, None
+ ret = parse.parse(self._format, file)
+ if not ret or not ret.__contains__(self._key):
+ return None, None
+ episodes = ret.__getitem__(self._key)
+ if not re.compile(r"^(EP)?(\d{1,4})(-(EP)?(\d{1,4}))?$", re.IGNORECASE).match(episodes):
+ return None, None
+ episode_splits = list(filter(lambda x: re.compile(r'[a-zA-Z]*\d{1,4}', re.IGNORECASE).match(x),
+ re.split(r'%s' % SPLIT_CHARS, episodes)))
+ if len(episode_splits) == 1:
+ return int(re.compile(r'[a-zA-Z]*', re.IGNORECASE).sub("", episode_splits[0])), None
+ else:
+ return int(re.compile(r'[a-zA-Z]*', re.IGNORECASE).sub("", episode_splits[0])), int(
+ re.compile(r'[a-zA-Z]*', re.IGNORECASE).sub("", episode_splits[1]))
+
diff --git a/app/utils/exception_utils.py b/app/utils/exception_utils.py
new file mode 100644
index 0000000..fa5e2f3
--- /dev/null
+++ b/app/utils/exception_utils.py
@@ -0,0 +1,8 @@
+# -*- coding: utf-8 -*-
+import traceback
+
+
+class ExceptionUtils:
+ @classmethod
+ def exception_traceback(cls, e):
+ print(f"\nException: {str(e)}\nCallstack:\n{traceback.format_exc()}\n")
diff --git a/app/utils/http_utils.py b/app/utils/http_utils.py
new file mode 100644
index 0000000..f5d5b64
--- /dev/null
+++ b/app/utils/http_utils.py
@@ -0,0 +1,164 @@
+import requests
+import urllib3
+from urllib3.exceptions import InsecureRequestWarning
+from config import Config
+
+urllib3.disable_warnings(InsecureRequestWarning)
+
+
+class RequestUtils:
+ _headers = None
+ _cookies = None
+ _proxies = None
+ _timeout = 20
+ _session = None
+
+ def __init__(self,
+ headers=None,
+ cookies=None,
+ proxies=False,
+ session=None,
+ timeout=None,
+ referer=None,
+ content_type=None):
+ if not content_type:
+ content_type = "application/x-www-form-urlencoded; charset=UTF-8"
+ if headers:
+ if isinstance(headers, str):
+ self._headers = {
+ "Content-Type": content_type,
+ "User-Agent": f"{headers}"
+ }
+ else:
+ self._headers = headers
+ else:
+ self._headers = {
+ "Content-Type": content_type,
+ "User-Agent": Config().get_ua()
+ }
+ if referer:
+ self._headers.update({
+ "referer": referer
+ })
+ if cookies:
+ if isinstance(cookies, str):
+ self._cookies = self.cookie_parse(cookies)
+ else:
+ self._cookies = cookies
+ if proxies:
+ self._proxies = proxies
+ if session:
+ self._session = session
+ if timeout:
+ self._timeout = timeout
+
+ def post(self, url, params=None, json=None):
+ if json is None:
+ json = {}
+ try:
+ if self._session:
+ return self._session.post(url,
+ data=params,
+ verify=False,
+ headers=self._headers,
+ proxies=self._proxies,
+ timeout=self._timeout,
+ json=json)
+ else:
+ return requests.post(url,
+ data=params,
+ verify=False,
+ headers=self._headers,
+ proxies=self._proxies,
+ timeout=self._timeout,
+ json=json)
+ except requests.exceptions.RequestException:
+ return None
+
+ def get(self, url, params=None):
+ try:
+ if self._session:
+ r = self._session.get(url,
+ verify=False,
+ headers=self._headers,
+ proxies=self._proxies,
+ timeout=self._timeout,
+ params=params)
+ else:
+ r = requests.get(url,
+ verify=False,
+ headers=self._headers,
+ proxies=self._proxies,
+ timeout=self._timeout,
+ params=params)
+ return str(r.content, 'utf-8')
+ except requests.exceptions.RequestException:
+ return None
+
+ def get_res(self, url, params=None, allow_redirects=True):
+ try:
+ if self._session:
+ return self._session.get(url,
+ params=params,
+ verify=False,
+ headers=self._headers,
+ proxies=self._proxies,
+ cookies=self._cookies,
+ timeout=self._timeout,
+ allow_redirects=allow_redirects)
+ else:
+ return requests.get(url,
+ params=params,
+ verify=False,
+ headers=self._headers,
+ proxies=self._proxies,
+ cookies=self._cookies,
+ timeout=self._timeout,
+ allow_redirects=allow_redirects)
+ except requests.exceptions.RequestException:
+ return None
+
+ def post_res(self, url, params=None, allow_redirects=True, files=None, json=None):
+ try:
+ if self._session:
+ return self._session.post(url,
+ data=params,
+ verify=False,
+ headers=self._headers,
+ proxies=self._proxies,
+ cookies=self._cookies,
+ timeout=self._timeout,
+ allow_redirects=allow_redirects,
+ files=files,
+ json=json)
+ else:
+ return requests.post(url,
+ data=params,
+ verify=False,
+ headers=self._headers,
+ proxies=self._proxies,
+ cookies=self._cookies,
+ timeout=self._timeout,
+ allow_redirects=allow_redirects,
+ files=files,
+ json=json)
+ except requests.exceptions.RequestException:
+ return None
+
+ @staticmethod
+ def cookie_parse(cookies_str, array=False):
+ if not cookies_str:
+ return {}
+ cookie_dict = {}
+ cookies = cookies_str.split(';')
+ for cookie in cookies:
+ cstr = cookie.split('=')
+ if len(cstr) > 1:
+ cookie_dict[cstr[0].strip()] = cstr[1].strip()
+ if array:
+ cookiesList = []
+ for cookieName, cookieValue in cookie_dict.items():
+ cookies = {'name': cookieName, 'value': cookieValue}
+ cookiesList.append(cookies)
+ return cookiesList
+ return cookie_dict
diff --git a/app/utils/json_utils.py b/app/utils/json_utils.py
new file mode 100644
index 0000000..ccfe2e5
--- /dev/null
+++ b/app/utils/json_utils.py
@@ -0,0 +1,24 @@
+import json
+from enum import Enum
+
+
+class JsonUtils:
+
+ @staticmethod
+ def json_serializable(obj):
+ """
+ 将普通对象转化为支持json序列化的对象
+ @param obj: 待转化的对象
+ @return: 支持json序列化的对象
+ """
+
+ def _try(o):
+ if isinstance(o, Enum):
+ return o.value
+ try:
+ return o.__dict__
+ except Exception as err:
+ print(str(err))
+ return str(o)
+
+ return json.loads(json.dumps(obj, default=lambda o: _try(o)))
diff --git a/app/utils/number_utils.py b/app/utils/number_utils.py
new file mode 100644
index 0000000..693fc14
--- /dev/null
+++ b/app/utils/number_utils.py
@@ -0,0 +1,12 @@
+class NumberUtils:
+
+ @staticmethod
+ def max_ele(a, b):
+ """
+ 返回非空最大值
+ """
+ if not a:
+ return b
+ if not b:
+ return a
+ return max(int(a), int(b))
diff --git a/app/utils/path_utils.py b/app/utils/path_utils.py
new file mode 100644
index 0000000..ce85c90
--- /dev/null
+++ b/app/utils/path_utils.py
@@ -0,0 +1,155 @@
+import os
+
+
+class PathUtils:
+
+ @staticmethod
+ def get_dir_files(in_path, exts="", filesize=0, episode_format=None):
+ """
+ 获得目录下的媒体文件列表List ,按后缀、大小、格式过滤
+ """
+ if not in_path:
+ return []
+ if not os.path.exists(in_path):
+ return []
+ ret_list = []
+ if os.path.isdir(in_path):
+ for root, dirs, files in os.walk(in_path):
+ for file in files:
+ cur_path = os.path.join(root, file)
+ # 检查路径是否合法
+ if PathUtils.is_invalid_path(cur_path):
+ continue
+ # 检查格式匹配
+ if episode_format and not episode_format.match(file):
+ continue
+ # 检查后缀
+ if exts and os.path.splitext(file)[-1].lower() not in exts:
+ continue
+ # 检查文件大小
+ if filesize and os.path.getsize(cur_path) < filesize:
+ continue
+ # 命中
+ if cur_path not in ret_list:
+ ret_list.append(cur_path)
+ else:
+ # 检查路径是否合法
+ if PathUtils.is_invalid_path(in_path):
+ return []
+ # 检查后缀
+ if exts and os.path.splitext(in_path)[-1].lower() not in exts:
+ return []
+ # 检查格式
+ if episode_format and not episode_format.match(os.path.basename(in_path)):
+ return []
+ # 检查文件大小
+ if filesize and os.path.getsize(in_path) < filesize:
+ return []
+ ret_list.append(in_path)
+ return ret_list
+
+ @staticmethod
+ def get_dir_level1_files(in_path, exts=""):
+ """
+ 查询目录下的文件(只查询一级)
+ """
+ ret_list = []
+ if not os.path.exists(in_path):
+ return []
+ for file in os.listdir(in_path):
+ path = os.path.join(in_path, file)
+ if os.path.isfile(path):
+ if not exts or os.path.splitext(file)[-1].lower() in exts:
+ ret_list.append(path)
+ return ret_list
+
+ @staticmethod
+ def get_dir_level1_medias(in_path, exts=""):
+ """
+ 根据后缀,返回目录下所有的文件及文件夹列表(只查询一级)
+ """
+ ret_list = []
+ if not os.path.exists(in_path):
+ return []
+ if os.path.isdir(in_path):
+ for file in os.listdir(in_path):
+ path = os.path.join(in_path, file)
+ if os.path.isfile(path):
+ if not exts or os.path.splitext(file)[-1].lower() in exts:
+ ret_list.append(path)
+ else:
+ ret_list.append(path)
+ else:
+ ret_list.append(in_path)
+ return ret_list
+
+ @staticmethod
+ def is_invalid_path(path):
+ """
+ 判断是否不能处理的路径
+ """
+ if not path:
+ return True
+ if path.find('/@Recycle/') != -1 or path.find('/#recycle/') != -1 or path.find('/.') != -1 or path.find(
+ '/@eaDir') != -1:
+ return True
+ return False
+
+ @staticmethod
+ def is_path_in_path(path1, path2):
+ """
+ 判断两个路径是否包含关系 path1 in path2
+ """
+ if not path1 or not path2:
+ return False
+ path1 = os.path.normpath(path1)
+ path2 = os.path.normpath(path2)
+ if path1 == path2:
+ return True
+ path = os.path.dirname(path2)
+ while True:
+ if path == path1:
+ return True
+ path = os.path.dirname(path)
+ if path == os.path.dirname(path):
+ break
+ return False
+
+ @staticmethod
+ def get_bluray_dir(path):
+ """
+ 判断是否蓝光原盘目录,是则返回原盘的根目录,否则返回空
+ """
+ if not path or not os.path.exists(path):
+ return None
+ if os.path.isdir(path):
+ if os.path.exists(os.path.join(path, "BDMV", "index.bdmv")):
+ return path
+ elif os.path.normpath(path).endswith("BDMV") \
+ and os.path.exists(os.path.join(path, "index.bdmv")):
+ return os.path.dirname(path)
+ elif os.path.normpath(path).endswith("STREAM") \
+ and os.path.exists(os.path.join(os.path.dirname(path), "index.bdmv")):
+ return PathUtils.get_parent_paths(path, 2)
+ else:
+ # 电视剧原盘下会存在多个目录形如:Spider Man 2021/DIsc1, Spider Man 2021/Disc2
+ for level1 in PathUtils.get_dir_level1_medias(path):
+ if os.path.exists(os.path.join(level1, "BDMV", "index.bdmv")):
+ return path
+ return None
+ else:
+ if str(os.path.splitext(path)[-1]).lower() in [".m2ts", ".ts"] \
+ and os.path.normpath(os.path.dirname(path)).endswith("STREAM") \
+ and os.path.exists(os.path.join(PathUtils.get_parent_paths(path, 2), "index.bdmv")):
+ return PathUtils.get_parent_paths(path, 3)
+ else:
+ return None
+
+ @staticmethod
+ def get_parent_paths(path, level: int = 1):
+ """
+ 获取父目录路径,level为向上查找的层数
+ """
+ for lv in range(0, level):
+ path = os.path.dirname(path)
+ return path
diff --git a/app/utils/rsstitle_utils.py b/app/utils/rsstitle_utils.py
new file mode 100644
index 0000000..dc2ad1f
--- /dev/null
+++ b/app/utils/rsstitle_utils.py
@@ -0,0 +1,30 @@
+import re
+
+from app.utils.exception_utils import ExceptionUtils
+
+
+class RssTitleUtils:
+
+ @staticmethod
+ def keepfriends_title(title):
+ """
+ 处理pt.keepfrds.com的RSS标题
+ """
+ if not title:
+ return ""
+ try:
+ title_search = re.search(r"\[(.*)]", title, re.IGNORECASE)
+ if title_search:
+ if title_search.span()[0] == 0:
+ title_all = re.findall(r"\[(.*?)]", title, re.IGNORECASE)
+ if title_all and len(title_all) > 1:
+ torrent_name = title_all[-1]
+ torrent_desc = title.replace(f"[{torrent_name}]", "").strip()
+ title = "%s %s" % (torrent_name, torrent_desc)
+ else:
+ torrent_name = title_search.group(1)
+ torrent_desc = title.replace(title_search.group(), "").strip()
+ title = "%s %s" % (torrent_name, torrent_desc)
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ return title
diff --git a/app/utils/string_utils.py b/app/utils/string_utils.py
new file mode 100644
index 0000000..ad27d8d
--- /dev/null
+++ b/app/utils/string_utils.py
@@ -0,0 +1,440 @@
+import bisect
+import datetime
+import hashlib
+import random
+import re
+from urllib import parse
+
+import dateparser
+import dateutil.parser
+
+import cn2an
+from app.utils.exception_utils import ExceptionUtils
+from app.utils.types import MediaType
+
+
+class StringUtils:
+
+ @staticmethod
+ def num_filesize(text):
+ """
+ 将文件大小文本转化为字节
+ """
+ if not text:
+ return 0
+ if not isinstance(text, str):
+ text = str(text)
+ if text.isdigit():
+ return int(text)
+ text = text.replace(",", "").replace(" ", "").upper()
+ size = re.sub(r"[KMGTPI]*B?", "", text, flags=re.IGNORECASE)
+ try:
+ size = float(size)
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return 0
+ if text.find("PB") != -1 or text.find("PIB") != -1:
+ size *= 1024 ** 5
+ elif text.find("TB") != -1 or text.find("TIB") != -1:
+ size *= 1024 ** 4
+ elif text.find("GB") != -1 or text.find("GIB") != -1:
+ size *= 1024 ** 3
+ elif text.find("MB") != -1 or text.find("MIB") != -1:
+ size *= 1024 ** 2
+ elif text.find("KB") != -1 or text.find("KIB") != -1:
+ size *= 1024
+ return round(size)
+
+ @staticmethod
+ def str_timelong(time_sec):
+ """
+ 将数字转换为时间描述
+ """
+ if not isinstance(time_sec, int) or not isinstance(time_sec, float):
+ try:
+ time_sec = float(time_sec)
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return ""
+ d = [(0, '秒'), (60 - 1, '分'), (3600 - 1, '小时'), (86400 - 1, '天')]
+ s = [x[0] for x in d]
+ index = bisect.bisect_left(s, time_sec) - 1
+ if index == -1:
+ return str(time_sec)
+ else:
+ b, u = d[index]
+ return str(round(time_sec / (b + 1))) + u
+
+ @staticmethod
+ def is_chinese(word):
+ """
+ 判断是否含有中文
+ """
+ if isinstance(word, list):
+ word = " ".join(word)
+ chn = re.compile(r'[\u4e00-\u9fff]')
+ if chn.search(word):
+ return True
+ else:
+ return False
+
+ @staticmethod
+ def is_japanese(word):
+ jap = re.compile(r'[\u3040-\u309F\u30A0-\u30FF]')
+ if jap.search(word):
+ return True
+ else:
+ return False
+
+ @staticmethod
+ def is_korean(word):
+ kor = re.compile(r'[\uAC00-\uD7FF]')
+ if kor.search(word):
+ return True
+ else:
+ return False
+
+ @staticmethod
+ def is_all_chinese(word):
+ """
+ 判断是否全是中文
+ """
+ for ch in word:
+ if ch == ' ':
+ continue
+ if '\u4e00' <= ch <= '\u9fff':
+ continue
+ else:
+ return False
+ return True
+
+ @staticmethod
+ def xstr(s):
+ """
+ 字符串None输出为空
+ """
+ return s if s else ''
+
+ @staticmethod
+ def str_sql(in_str):
+ """
+ 转化SQL字符
+ """
+ return "" if not in_str else str(in_str)
+
+ @staticmethod
+ def str_int(text):
+ """
+ web字符串转int
+ :param text:
+ :return:
+ """
+ int_val = 0
+ try:
+ int_val = int(text.strip().replace(',', ''))
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+
+ return int_val
+
+ @staticmethod
+ def str_float(text):
+ """
+ web字符串转float
+ :param text:
+ :return:
+ """
+ float_val = 0.0
+ try:
+ float_val = float(text.strip().replace(',', ''))
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return float_val
+
+ @staticmethod
+ def handler_special_chars(text, replace_word="", allow_space=False):
+ """
+ 忽略特殊字符
+ """
+ # 需要忽略的特殊字符
+ CONVERT_EMPTY_CHARS = r"[、.。,,·::;;!!'’\"“”()()\[\]【】「」\-——\+\|\\_/~~]"
+ if not text:
+ return text
+ if not isinstance(text, list):
+ text = re.sub(r"[\u200B-\u200D\uFEFF]",
+ "",
+ re.sub(r"%s" % CONVERT_EMPTY_CHARS, replace_word, text),
+ flags=re.IGNORECASE)
+ if not allow_space:
+ return re.sub(r"\s+", "", text)
+ else:
+ return re.sub(r"\s+", " ", text).strip()
+ else:
+ return [StringUtils.handler_special_chars(x) for x in text]
+
+ @staticmethod
+ def str_filesize(size, pre=2):
+ """
+ 将字节计算为文件大小描述(带单位的格式化后返回)
+ """
+ if not size:
+ return size
+ size = re.sub(r"\s|B|iB", "", str(size), re.I)
+ if size.replace(".", "").isdigit():
+ try:
+ size = float(size)
+ d = [(1024 - 1, 'K'), (1024 ** 2 - 1, 'M'), (1024 ** 3 - 1, 'G'), (1024 ** 4 - 1, 'T')]
+ s = [x[0] for x in d]
+ index = bisect.bisect_left(s, size) - 1
+ if index == -1:
+ return str(size) + "B"
+ else:
+ b, u = d[index]
+ return str(round(size / (b + 1), pre)) + u
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return ""
+ if re.findall(r"[KMGTP]", size, re.I):
+ return size
+ else:
+ return size + "B"
+
+ @staticmethod
+ def url_equal(url1, url2):
+ """
+ 比较两个地址是否为同一个网站
+ """
+ if not url1 or not url2:
+ return False
+ if url1.startswith("http"):
+ url1 = parse.urlparse(url1).netloc
+ if url2.startswith("http"):
+ url2 = parse.urlparse(url2).netloc
+ if url1.replace("www.", "") == url2.replace("www.", ""):
+ return True
+ return False
+
+ @staticmethod
+ def get_url_netloc(url):
+ """
+ 获取URL的协议和域名部分
+ """
+ if not url:
+ return "", ""
+ if not url.startswith("http"):
+ return "http", url
+ addr = parse.urlparse(url)
+ return addr.scheme, addr.netloc
+
+ @staticmethod
+ def get_url_domain(url):
+ """
+ 获取URL的域名部分,不含WWW和HTTP
+ """
+ if not url:
+ return ""
+ _, netloc = StringUtils.get_url_netloc(url)
+ if netloc:
+ return netloc.lower().replace("www.", "")
+ return ""
+
+ @staticmethod
+ def get_base_url(url):
+ """
+ 获取URL根地址
+ """
+ if not url:
+ return ""
+ scheme, netloc = StringUtils.get_url_netloc(url)
+ return f"{scheme}://{netloc}"
+
+ @staticmethod
+ def clear_file_name(name):
+ if not name:
+ return None
+ return re.sub(r"[*?\\/\"<>~]", "", name, flags=re.IGNORECASE).replace(":", ":")
+
+ @staticmethod
+ def get_keyword_from_string(content):
+ """
+ 从检索关键字中拆分中年份、季、集、类型
+ """
+ if not content:
+ return None, None, None, None, None
+ # 去掉查询中的电影或电视剧关键字
+ if re.search(r'^电视剧|\s+电视剧|^动漫|\s+动漫', content):
+ mtype = MediaType.TV
+ else:
+ mtype = None
+ content = re.sub(r'^电影|^电视剧|^动漫|\s+电影|\s+电视剧|\s+动漫', '', content).strip()
+ # 稍微切一下剧集吧
+ season_num = None
+ episode_num = None
+ year = None
+ season_re = re.search(r"第\s*([0-9一二三四五六七八九十]+)\s*季", content, re.IGNORECASE)
+ if season_re:
+ mtype = MediaType.TV
+ season_num = int(cn2an.cn2an(season_re.group(1), mode='smart'))
+ episode_re = re.search(r"第\s*([0-9一二三四五六七八九十]+)\s*集", content, re.IGNORECASE)
+ if episode_re:
+ mtype = MediaType.TV
+ episode_num = int(cn2an.cn2an(episode_re.group(1), mode='smart'))
+ if episode_num and not season_num:
+ season_num = 1
+ year_re = re.search(r"[\s(]+(\d{4})[\s)]*", content)
+ if year_re:
+ year = year_re.group(1)
+ key_word = re.sub(
+ r'第\s*[0-9一二三四五六七八九十]+\s*季|第\s*[0-9一二三四五六七八九十]+\s*集|[\s(]+(\d{4})[\s)]*', '',
+ content,
+ flags=re.IGNORECASE).strip()
+ if key_word:
+ key_word = re.sub(r'\s+', ' ', key_word)
+ if not key_word:
+ key_word = year
+
+ return mtype, key_word, season_num, episode_num, year, content
+
+ @staticmethod
+ def generate_random_str(randomlength=16):
+ """
+ 生成一个指定长度的随机字符串
+ """
+ random_str = ''
+ base_str = 'ABCDEFGHIGKLMNOPQRSTUVWXYZabcdefghigklmnopqrstuvwxyz0123456789'
+ length = len(base_str) - 1
+ for i in range(randomlength):
+ random_str += base_str[random.randint(0, length)]
+ return random_str
+
+ @staticmethod
+ def get_time_stamp(date):
+ tempsTime = None
+ try:
+ tempsTime = dateutil.parser.parse(date)
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ return tempsTime
+
+ @staticmethod
+ def unify_datetime_str(datetime_str):
+ """
+ 日期时间格式化 统一转成 2020-10-14 07:48:04 这种格式
+ # 场景1: 带有时区的日期字符串 eg: Sat, 15 Oct 2022 14:02:54 +0800
+ # 场景2: 中间带T的日期字符串 eg: 2020-10-14T07:48:04
+ # 场景3: 中间带T的日期字符串 eg: 2020-10-14T07:48:04.208
+ # 场景4: 日期字符串以GMT结尾 eg: Fri, 14 Oct 2022 07:48:04 GMT
+ # 场景5: 日期字符串以UTC结尾 eg: Fri, 14 Oct 2022 07:48:04 UTC
+ # 场景6: 日期字符串以Z结尾 eg: Fri, 14 Oct 2022 07:48:04Z
+ # 场景7: 日期字符串为相对时间 eg: 1 month, 2 days ago
+ :param datetime_str:
+ :return:
+ """
+ # 传入的参数如果是None 或者空字符串 直接返回
+ if not datetime_str:
+ return datetime_str
+
+ try:
+ return dateparser.parse(datetime_str).strftime('%Y-%m-%d %H:%M:%S')
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return datetime_str
+
+ @staticmethod
+ def timestamp_to_date(timestamp, date_format='%Y-%m-%d %H:%M:%S'):
+ """
+ 时间戳转日期
+ :param timestamp:
+ :param date_format:
+ :return:
+ """
+ try:
+ return datetime.datetime.fromtimestamp(timestamp).strftime(date_format)
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return timestamp
+
+ @staticmethod
+ def to_bool(text, default_val: bool = False) -> bool:
+ """
+ 字符串转bool
+ :param text: 要转换的值
+ :param default_val: 默认值
+ :return:
+ """
+ if isinstance(text, str) and not text:
+ return default_val
+ if isinstance(text, bool):
+ return text
+ if isinstance(text, int) or isinstance(text, float):
+ return True if text > 0 else False
+ if isinstance(text, str) and text.lower() in ['y', 'true', '1']:
+ return True
+ return False
+
+ @staticmethod
+ def str_from_cookiejar(cj):
+ """
+ 将cookiejar转换为字符串
+ :param cj:
+ :return:
+ """
+ return '; '.join(['='.join(item) for item in cj.items()])
+
+ @staticmethod
+ def get_idlist_from_string(content, dicts):
+ """
+ 从字符串中提取id列表
+ :param content: 字符串
+ :param dicts: 字典
+ :return:
+ """
+ if not content:
+ return []
+ id_list = []
+ content_list = content.split()
+ for dic in dicts:
+ if dic.get('name') in content_list and dic.get('id') not in id_list:
+ id_list.append(dic.get('id'))
+ content = content.replace(dic.get('name'), '')
+ return id_list, re.sub(r'\s+', ' ', content).strip()
+
+ @staticmethod
+ def str_title(s):
+ """
+ 讲英文的首字母大写
+ :param s: en_name string
+ :return: string title
+ """
+ return s.title() if s else s
+
+ @staticmethod
+ def md5_hash(data):
+ """
+ MD5 HASH
+ """
+ if not data:
+ return ""
+ return hashlib.md5(str(data).encode()).hexdigest()
+
+ @staticmethod
+ def str_timehours(minutes):
+ """
+ 将分钟转换成小时和分钟
+ :param minutes:
+ :return:
+ """
+ if not minutes:
+ return ""
+ hours = minutes // 60
+ minutes = minutes % 60
+ return "%s小时%s分" % (hours, minutes)
+
+ @staticmethod
+ def str_amount(amount, curr="$"):
+ """
+ 格式化显示金额
+ """
+ if not amount:
+ return "0"
+ return curr + format(amount, ",")
diff --git a/app/utils/system_utils.py b/app/utils/system_utils.py
new file mode 100644
index 0000000..ed7da0a
--- /dev/null
+++ b/app/utils/system_utils.py
@@ -0,0 +1,324 @@
+import datetime
+import os
+import platform
+import shutil
+import subprocess
+
+from app.utils.path_utils import PathUtils
+from app.utils.exception_utils import ExceptionUtils
+from app.utils.types import OsType
+from config import WEBDRIVER_PATH
+
+
+class SystemUtils:
+
+ @staticmethod
+ def __get_hidden_shell():
+ if os.name == "nt":
+ st = subprocess.STARTUPINFO()
+ st.dwFlags = subprocess.STARTF_USESHOWWINDOW
+ st.wShowWindow = subprocess.SW_HIDE
+ return st
+ else:
+ return None
+
+ @staticmethod
+ def get_used_of_partition(path):
+ """
+ 获取系统存储空间占用信息
+ """
+ if not path:
+ return 0, 0
+ if not os.path.exists(path):
+ return 0, 0
+ try:
+ total_b, used_b, free_b = shutil.disk_usage(path)
+ return used_b, total_b
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return 0, 0
+
+ @staticmethod
+ def get_system():
+ """
+ 获取操作系统类型
+ """
+ if SystemUtils.is_windows():
+ return OsType.WINDOWS
+ elif SystemUtils.is_synology():
+ return OsType.SYNOLOGY
+ elif SystemUtils.is_docker():
+ return OsType.DOCKER
+ elif SystemUtils.is_macos():
+ return OsType.MACOS
+ else:
+ return OsType.LINUX
+
+ @staticmethod
+ def get_free_space_gb(folder):
+ """
+ 计算目录剩余空间大小
+ """
+ total_b, used_b, free_b = shutil.disk_usage(folder)
+ return free_b / 1024 / 1024 / 1024
+
+ @staticmethod
+ def get_local_time(utc_time_str):
+ """
+ 通过UTC的时间字符串获取时间
+ """
+ try:
+ utc_date = datetime.datetime.strptime(utc_time_str.replace('0000', ''), '%Y-%m-%dT%H:%M:%S.%fZ')
+ local_date = utc_date + datetime.timedelta(hours=8)
+ local_date_str = datetime.datetime.strftime(local_date, '%Y-%m-%d %H:%M:%S')
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return utc_time_str
+ return local_date_str
+
+ @staticmethod
+ def check_process(pname):
+ """
+ 检查进程序是否存在
+ """
+ if not pname:
+ return False
+ text = subprocess.Popen('ps -ef | grep -v grep | grep %s' % pname, shell=True).communicate()
+ return True if text else False
+
+ @staticmethod
+ def execute(cmd):
+ """
+ 执行命令,获得返回结果
+ """
+ try:
+ with os.popen(cmd) as p:
+ return p.readline().strip()
+ except Exception as err:
+ print(str(err))
+ return ""
+
+ @staticmethod
+ def is_docker():
+ return os.path.exists('/.dockerenv')
+
+ @staticmethod
+ def is_synology():
+ if SystemUtils.is_windows():
+ return False
+ return True if "synology" in SystemUtils.execute('uname -a') else False
+
+ @staticmethod
+ def is_windows():
+ return True if os.name == "nt" else False
+
+ @staticmethod
+ def is_macos():
+ return True if platform.system() == 'Darwin' else False
+
+ @staticmethod
+ def is_lite_version():
+ return True if SystemUtils.is_docker() \
+ and os.environ.get("NASTOOL_VERSION") == "lite" else False
+
+ @staticmethod
+ def get_webdriver_path():
+ if SystemUtils.is_lite_version():
+ return None
+ else:
+ return WEBDRIVER_PATH.get(SystemUtils.get_system().value)
+
+ @staticmethod
+ def copy(src, dest):
+ """
+ 复制
+ """
+ try:
+ shutil.copy2(os.path.normpath(src), os.path.normpath(dest))
+ return 0, ""
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ return -1, str(err)
+
+ @staticmethod
+ def move(src, dest):
+ """
+ 移动
+ """
+ try:
+ tmp_file = os.path.normpath(os.path.join(os.path.dirname(src),
+ os.path.basename(dest)))
+ shutil.move(os.path.normpath(src), tmp_file)
+ shutil.move(tmp_file, os.path.normpath(dest))
+ return 0, ""
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ return -1, str(err)
+
+ @staticmethod
+ def link(src, dest):
+ """
+ 硬链接
+ """
+ try:
+ if platform.release().find("-z4-") >= 0:
+ # 兼容极空间Z4
+ tmp = os.path.normpath(os.path.join(PathUtils.get_parent_paths(dest, 2),
+ os.path.basename(dest)))
+ os.link(os.path.normpath(src), tmp)
+ shutil.move(tmp, os.path.normpath(dest))
+ else:
+ os.link(os.path.normpath(src), os.path.normpath(dest))
+ return 0, ""
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ return -1, str(err)
+
+ @staticmethod
+ def softlink(src, dest):
+ """
+ 软链接
+ """
+ try:
+ os.symlink(os.path.normpath(src), os.path.normpath(dest))
+ return 0, ""
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ return -1, str(err)
+
+ @staticmethod
+ def rclone_move(src, dest):
+ """
+ Rclone移动
+ """
+ try:
+ src = os.path.normpath(src)
+ dest = dest.replace("\\", "/")
+ retcode = subprocess.run(['rclone', 'moveto',
+ src,
+ f'NASTOOL:{dest}'],
+ startupinfo=SystemUtils.__get_hidden_shell()).returncode
+ return retcode, ""
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ return -1, str(err)
+
+ @staticmethod
+ def rclone_copy(src, dest):
+ """
+ Rclone复制
+ """
+ try:
+ src = os.path.normpath(src)
+ dest = dest.replace("\\", "/")
+ retcode = subprocess.run(['rclone', 'copyto',
+ src,
+ f'NASTOOL:{dest}'],
+ startupinfo=SystemUtils.__get_hidden_shell()).returncode
+ return retcode, ""
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ return -1, str(err)
+
+ @staticmethod
+ def minio_move(src, dest):
+ """
+ Minio移动
+ """
+ try:
+ src = os.path.normpath(src)
+ dest = dest.replace("\\", "/")
+ if dest.startswith("/"):
+ dest = dest[1:]
+ retcode = subprocess.run(['mc', 'mv',
+ '--recursive',
+ src,
+ f'NASTOOL/{dest}'],
+ startupinfo=SystemUtils.__get_hidden_shell()).returncode
+ return retcode, ""
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ return -1, str(err)
+
+ @staticmethod
+ def minio_copy(src, dest):
+ """
+ Minio复制
+ """
+ try:
+ src = os.path.normpath(src)
+ dest = dest.replace("\\", "/")
+ if dest.startswith("/"):
+ dest = dest[1:]
+ retcode = subprocess.run(['mc', 'cp',
+ '--recursive',
+ src,
+ f'NASTOOL/{dest}'],
+ startupinfo=SystemUtils.__get_hidden_shell()).returncode
+ return retcode, ""
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ return -1, str(err)
+
+ @staticmethod
+ def get_windows_drives():
+ """
+ 获取Windows所有盘符
+ """
+ vols = []
+ for i in range(65, 91):
+ vol = chr(i) + ':'
+ if os.path.isdir(vol):
+ vols.append(vol)
+ return vols
+
+ def find_hardlinks(self, file, fdir=None):
+ """
+ 查找文件的所有硬链接
+ """
+ ret_files = []
+ if os.name == "nt":
+ ret = subprocess.run(
+ ['fsutil', 'hardlink', 'list', file],
+ startupinfo=self.__get_hidden_shell(),
+ stdout=subprocess.PIPE
+ )
+ if ret.returncode != 0:
+ return []
+ if ret.stdout:
+ drive = os.path.splitdrive(file)[0]
+ link_files = ret.stdout.decode('GBK').replace('\\', '/').split('\r\n')
+ for link_file in link_files:
+ if link_file \
+ and "$RECYCLE.BIN" not in link_file \
+ and os.path.normpath(file) != os.path.normpath(f'{drive}{link_file}'):
+ link_file = f'{drive.upper()}{link_file}'
+ file_name = os.path.basename(link_file)
+ file_path = os.path.dirname(link_file)
+ ret_files.append({
+ "file": link_file,
+ "filename": file_name,
+ "filepath": file_path
+ })
+ else:
+ inode = os.stat(file).st_ino
+ if not fdir:
+ fdir = os.path.dirname(file)
+ stdout = subprocess.run(
+ ['find', fdir, '-inum', str(inode)],
+ stdout=subprocess.PIPE
+ ).stdout
+ if stdout:
+ link_files = stdout.decode('utf-8').split('\n')
+ for link_file in link_files:
+ if link_file \
+ and os.path.normpath(file) != os.path.normpath(link_file):
+ file_name = os.path.basename(link_file)
+ file_path = os.path.dirname(link_file)
+ ret_files.append({
+ "file": link_file,
+ "filename": file_name,
+ "filepath": file_path
+ })
+
+ return ret_files
diff --git a/app/utils/tokens.py b/app/utils/tokens.py
new file mode 100644
index 0000000..e454a2d
--- /dev/null
+++ b/app/utils/tokens.py
@@ -0,0 +1,40 @@
+import re
+
+from config import SPLIT_CHARS
+
+
+class Tokens:
+ _text = ""
+ _index = 0
+ _tokens = []
+
+ def __init__(self, text):
+ self._text = text
+ self._tokens = []
+ self.load_text(text)
+
+ def load_text(self, text):
+ splited_text = re.split(r'%s' % SPLIT_CHARS, text)
+ for sub_text in splited_text:
+ if sub_text:
+ self._tokens.append(sub_text)
+
+ def cur(self):
+ if self._index >= len(self._tokens):
+ return None
+ else:
+ token = self._tokens[self._index]
+ return token
+
+ def get_next(self):
+ token = self.cur()
+ if token:
+ self._index = self._index + 1
+ return token
+
+ def peek(self):
+ index = self._index + 1
+ if index >= len(self._tokens):
+ return None
+ else:
+ return self._tokens[index]
diff --git a/app/utils/torrent.py b/app/utils/torrent.py
new file mode 100644
index 0000000..a128886
--- /dev/null
+++ b/app/utils/torrent.py
@@ -0,0 +1,259 @@
+import os.path
+import re
+import datetime
+from urllib.parse import quote, unquote
+
+from bencode import bdecode
+
+from app.utils.http_utils import RequestUtils
+from config import Config
+
+# Trackers列表
+trackers = [
+ "udp://tracker.opentrackr.org:1337/announce",
+ "udp://9.rarbg.com:2810/announce",
+ "udp://opentracker.i2p.rocks:6969/announce",
+ "https://opentracker.i2p.rocks:443/announce",
+ "udp://tracker.torrent.eu.org:451/announce",
+ "udp://tracker1.bt.moack.co.kr:80/announce",
+ "udp://tracker.pomf.se:80/announce",
+ "udp://tracker.moeking.me:6969/announce",
+ "udp://tracker.dler.org:6969/announce",
+ "udp://p4p.arenabg.com:1337/announce",
+ "udp://open.stealth.si:80/announce",
+ "udp://movies.zsw.ca:6969/announce",
+ "udp://ipv4.tracker.harry.lu:80/announce",
+ "udp://explodie.org:6969/announce",
+ "udp://exodus.desync.com:6969/announce",
+ "https://tracker.nanoha.org:443/announce",
+ "https://tracker.lilithraws.org:443/announce",
+ "https://tr.burnabyhighstar.com:443/announce",
+ "http://tracker.mywaifu.best:6969/announce",
+ "http://bt.okmp3.ru:2710/announce"
+]
+
+
+class Torrent:
+ _torrent_temp_path = None
+
+ def __init__(self):
+ self._torrent_temp_path = Config().get_temp_path()
+ if not os.path.exists(self._torrent_temp_path):
+ os.makedirs(self._torrent_temp_path)
+
+ def get_torrent_info(self, url, cookie=None, ua=None, referer=None, proxy=False):
+ """
+ 把种子下载到本地,返回种子内容
+ :param url: 种子链接
+ :param cookie: 站点Cookie
+ :param ua: 站点UserAgent
+ :param referer: 关联地址,有的网站需要这个否则无法下载
+ :param proxy: 是否使用内置代理
+ :return: 种子保存路径、种子内容、种子文件列表主目录、种子文件列表、错误信息
+ """
+ if not url:
+ return None, None, "", [], "URL为空"
+ if url.startswith("magnet:"):
+ return None, url, "", [], f"{url} 为磁力链接"
+ try:
+ # 下载保存种子文件
+ file_path, content, errmsg = self.save_torrent_file(url=url,
+ cookie=cookie,
+ ua=ua,
+ referer=referer,
+ proxy=proxy)
+ if not file_path:
+ return None, content, "", [], errmsg
+ # 解析种子文件
+ files_folder, files, retmsg = self.get_torrent_files(file_path)
+ # 种子文件路径、种子内容、种子文件列表主目录、种子文件列表、错误信息
+ return file_path, content, files_folder, files, retmsg
+
+ except Exception as err:
+ return None, None, "", [], "下载种子文件出现异常:%s" % str(err)
+
+ def save_torrent_file(self, url, cookie=None, ua=None, referer=None, proxy=False):
+ """
+ 把种子下载到本地
+ :return: 种子保存路径,错误信息
+ """
+ req = RequestUtils(
+ headers=ua,
+ cookies=cookie,
+ referer=referer,
+ proxies=Config().get_proxies() if proxy else None
+ ).get_res(url=url, allow_redirects=False)
+ while req and req.status_code in [301, 302]:
+ url = req.headers['Location']
+ if url and url.startswith("magnet:"):
+ return None, url, f"获取到磁力链接:{url}"
+ req = RequestUtils(
+ headers=ua,
+ cookies=cookie,
+ referer=referer,
+ proxies=Config().get_proxies() if proxy else None
+ ).get_res(url=url, allow_redirects=False)
+ if req and req.status_code == 200:
+ if not req.content:
+ return None, None, "未下载到种子数据"
+ # 解析内容格式
+ if req.text and str(req.text).startswith("magnet:"):
+ return None, req.text, "磁力链接"
+ else:
+ try:
+ bdecode(req.content)
+ except Exception as err:
+ print(str(err))
+ return None, None, "种子数据有误,请确认链接是否正确,如为PT站点则需手工在站点下载一次种子"
+ # 读取种子文件名
+ file_name = self.__get_url_torrent_filename(req, url)
+ # 种子文件路径
+ file_path = os.path.join(self._torrent_temp_path, file_name)
+ # 种子内容
+ file_content = req.content
+ # 写入磁盘
+ with open(file_path, 'wb') as f:
+ f.write(file_content)
+ elif req is None:
+ return None, None, "无法打开链接:%s" % url
+ else:
+ return None, None, "下载种子出错,状态码:%s" % req.status_code
+
+ return file_path, file_content, ""
+
+ @staticmethod
+ def convert_hash_to_magnet(hash_text, title):
+ """
+ 根据hash值,转换为磁力链,自动添加tracker
+ :param hash_text: 种子Hash值
+ :param title: 种子标题
+ """
+ if not hash_text or not title:
+ return None
+ hash_text = re.search(r'[0-9a-z]+', hash_text, re.IGNORECASE)
+ if not hash_text:
+ return None
+ hash_text = hash_text.group(0)
+ ret_magnet = f'magnet:?xt=urn:btih:{hash_text}&dn={quote(title)}'
+ for tracker in trackers:
+ ret_magnet = f'{ret_magnet}&tr={quote(tracker)}'
+ return ret_magnet
+
+ @staticmethod
+ def add_trackers_to_magnet(url, title=None):
+ """
+ 添加tracker和标题到磁力链接
+ """
+ if not url or not title:
+ return None
+ ret_magnet = url
+ if title and url.find("&dn=") == -1:
+ ret_magnet = f'{ret_magnet}&dn={quote(title)}'
+ for tracker in trackers:
+ ret_magnet = f'{ret_magnet}&tr={quote(tracker)}'
+ return ret_magnet
+
+ @staticmethod
+ def get_torrent_files(path):
+ """
+ 解析Torrent文件,获取文件清单
+ :return: 种子文件列表主目录、种子文件列表、错误信息
+ """
+ if not path or not os.path.exists(path):
+ return "", [], f"种子文件不存在:{path}"
+ file_names = []
+ file_folder = ""
+ try:
+ torrent = bdecode(open(path, 'rb').read())
+ if torrent.get("info"):
+ files = torrent.get("info", {}).get("files") or []
+ if files:
+ for item in files:
+ if item.get("path"):
+ file_names.append(item["path"][0])
+ file_folder = torrent.get("info", {}).get("name")
+ else:
+ file_names.append(torrent.get("info", {}).get("name"))
+ except Exception as err:
+ return file_folder, file_names, "解析种子文件异常:%s" % str(err)
+ return file_folder, file_names, ""
+
+ def read_torrent_content(self, path):
+ """
+ 读取本地种子文件的内容
+ :return: 种子内容、种子文件列表主目录、种子文件列表、错误信息
+ """
+ if not path or not os.path.exists(path):
+ return None, "", [], "种子文件不存在:%s" % path
+ content, retmsg, file_folder, files = None, "", "", []
+ try:
+ # 读取种子文件内容
+ with open(path, 'rb') as f:
+ content = f.read()
+ # 解析种子文件
+ file_folder, files, retmsg = self.get_torrent_files(path)
+ except Exception as e:
+ retmsg = "读取种子文件出错:%s" % str(e)
+ return content, file_folder, files, retmsg
+
+ @staticmethod
+ def __get_url_torrent_filename(req, url):
+ """
+ 从下载请求中获取种子文件名
+ """
+ if not req:
+ return ""
+ disposition = req.headers.get('content-disposition') or ""
+ file_name = re.findall(r"filename=\"?(.+)\"?", disposition)
+ if file_name:
+ file_name = unquote(str(file_name[0].encode('ISO-8859-1').decode()).split(";")[0].strip())
+ if file_name.endswith('"'):
+ file_name = file_name[:-1]
+ elif url and url.endswith(".torrent"):
+ file_name = unquote(url.split("/")[-1])
+ else:
+ file_name = str(datetime.datetime.now())
+ return file_name
+
+ @staticmethod
+ def get_magnet_title(url):
+ """
+ 从磁力链接中获取标题
+ """
+ if not url:
+ return ""
+ title = re.findall(r"dn=(.+)&?", url)
+ return unquote(title[0]) if title else ""
+
+ @staticmethod
+ def get_intersection_episodes(target, source, title):
+ """
+ 对两个季集字典进行判重,有相同项目的取集的交集
+ """
+ if not source or not title:
+ return target
+ if not source.get(title):
+ return target
+ if not target.get(title):
+ target[title] = source.get(title)
+ return target
+ index = -1
+ for target_info in target.get(title):
+ index += 1
+ source_info = None
+ for info in source.get(title):
+ if info.get("season") == target_info.get("season"):
+ source_info = info
+ break
+ if not source_info:
+ continue
+ if not source_info.get("episodes"):
+ continue
+ if not target_info.get("episodes"):
+ target_episodes = source_info.get("episodes")
+ target[title][index]["episodes"] = target_episodes
+ continue
+ target_episodes = list(set(target_info.get("episodes")).intersection(set(source_info.get("episodes"))))
+ target[title][index]["episodes"] = target_episodes
+ return target
+
diff --git a/app/utils/types.py b/app/utils/types.py
new file mode 100644
index 0000000..1f7956a
--- /dev/null
+++ b/app/utils/types.py
@@ -0,0 +1,96 @@
+from enum import Enum
+
+
+class MediaType(Enum):
+ TV = '电视剧'
+ MOVIE = '电影'
+ ANIME = '动漫'
+ UNKNOWN = '未知'
+
+
+class DownloaderType(Enum):
+ QB = 'Qbittorrent'
+ TR = 'Transmission'
+ Client115 = '115网盘'
+ PikPak = 'PikPak'
+
+
+class SyncType(Enum):
+ MAN = "手动整理"
+ MON = "目录同步"
+
+
+class SearchType(Enum):
+ WX = "微信"
+ WEB = "WEB"
+ DB = "豆瓣"
+ RSS = "电影/电视剧订阅"
+ USERRSS = "自定义订阅"
+ OT = "手动下载"
+ TG = "Telegram"
+ API = "第三方API请求"
+ SLACK = "Slack"
+ SYNOLOGY = "Synology Chat"
+
+
+class RmtMode(Enum):
+ LINK = "硬链接"
+ SOFTLINK = "软链接"
+ COPY = "复制"
+ MOVE = "移动"
+ RCLONECOPY = "Rclone复制"
+ RCLONE = "Rclone移动"
+ MINIOCOPY = "Minio复制"
+ MINIO = "Minio移动"
+
+
+class MatchMode(Enum):
+ NORMAL = "正常模式"
+ STRICT = "严格模式"
+
+
+class OsType(Enum):
+ WINDOWS = "Windows"
+ LINUX = "Linux"
+ SYNOLOGY = "Synology"
+ MACOS = "MacOS"
+ DOCKER = "Docker"
+
+
+class IndexerType(Enum):
+ BUILTIN = "Indexer"
+
+
+class MediaServerType(Enum):
+ JELLYFIN = "Jellyfin"
+ EMBY = "Emby"
+ PLEX = "Plex"
+
+
+class BrushDeleteType(Enum):
+ NOTDELETE = "不删除"
+ SEEDTIME = "做种时间"
+ RATIO = "分享率"
+ UPLOADSIZE = "上传量"
+ DLTIME = "下载耗时"
+ AVGUPSPEED = "平均上传速度"
+ IATIME = "未活动时间"
+
+
+# 站点框架
+class SiteSchema(Enum):
+ DiscuzX = "Discuz!"
+ Gazelle = "Gazelle"
+ Ipt = "IPTorrents"
+ NexusPhp = "NexusPhp"
+ NexusProject = "NexusProject"
+ NexusRabbit = "NexusRabbit"
+ SmallHorse = "Small Horse"
+ Unit3d = "Unit3d"
+ TorrentLeech = "TorrentLeech"
+ FileList = "FileList"
+ TNode = "TNode"
+
+
+MovieTypes = ['MOV', '电影']
+TvTypes = ['TV', '电视剧']
diff --git a/build_sites.py b/build_sites.py
new file mode 100644
index 0000000..68521fd
--- /dev/null
+++ b/build_sites.py
@@ -0,0 +1,17 @@
+import os.path
+import pickle
+import ruamel.yaml
+from app.utils.path_utils import PathUtils
+from config import Config
+
+
+if __name__ == "__main__":
+ _indexers = []
+ _site_path = os.path.join(Config().get_config_path(), "sites")
+ cfg_files = PathUtils.get_dir_files(in_path=_site_path, exts=[".yml"])
+ for cfg_file in cfg_files:
+ with open(cfg_file, mode='r', encoding='utf-8') as f:
+ print(cfg_file)
+ _indexers.append(ruamel.yaml.YAML().load(f))
+ with open(os.path.join(Config().get_inner_config_path(), "sites.dat"), 'wb') as f:
+ pickle.dump(_indexers, f, pickle.HIGHEST_PROTOCOL)
diff --git a/check_config.py b/check_config.py
new file mode 100644
index 0000000..678475e
--- /dev/null
+++ b/check_config.py
@@ -0,0 +1,753 @@
+import json
+import os
+from werkzeug.security import generate_password_hash
+from app.helper import DbHelper
+from app.utils import StringUtils, ExceptionUtils
+from config import Config
+
+
+def check_config():
+ """
+ 检查配置文件,如有错误进行日志输出
+ """
+ # 检查日志输出
+ if Config().get_config('app'):
+ logtype = Config().get_config('app').get('logtype')
+ if logtype:
+ print("日志输出类型为:%s" % logtype)
+ if logtype == "server":
+ logserver = Config().get_config('app').get('logserver')
+ if not logserver:
+ print("【Config】日志中心地址未配置,无法正常输出日志")
+ else:
+ print("日志将上送到服务器:%s" % logserver)
+ elif logtype == "file":
+ logpath = Config().get_config('app').get('logpath')
+ if not logpath:
+ print("【Config】日志文件路径未配置,无法正常输出日志")
+ else:
+ print("日志将写入文件:%s" % logpath)
+
+ # 检查WEB端口
+ web_port = Config().get_config('app').get('web_port')
+ if not web_port:
+ print("WEB服务端口未设置,将使用默认3000端口")
+
+ # 检查登录用户和密码
+ login_user = Config().get_config('app').get('login_user')
+ login_password = Config().get_config('app').get('login_password')
+ if not login_user or not login_password:
+ print("WEB管理用户或密码未设置,将使用默认用户:admin,密码:password")
+ else:
+ print("WEB管理页面用户:%s" % str(login_user))
+
+ # 检查HTTPS
+ ssl_cert = Config().get_config('app').get('ssl_cert')
+ ssl_key = Config().get_config('app').get('ssl_key')
+ if not ssl_cert or not ssl_key:
+ print("未启用https,请使用 http://IP:%s 访问管理页面" % str(web_port))
+ else:
+ if not os.path.exists(ssl_cert):
+ print("ssl_cert文件不存在:%s" % ssl_cert)
+ if not os.path.exists(ssl_key):
+ print("ssl_key文件不存在:%s" % ssl_key)
+ print("已启用https,请使用 https://IP:%s 访问管理页面" % str(web_port))
+
+ rmt_tmdbkey = Config().get_config('app').get('rmt_tmdbkey')
+ if not rmt_tmdbkey:
+ print("TMDB API Key未配置,媒体整理、搜索下载等功能将无法正常运行!")
+ rmt_match_mode = Config().get_config('app').get('rmt_match_mode')
+ if rmt_match_mode:
+ rmt_match_mode = rmt_match_mode.upper()
+ else:
+ rmt_match_mode = "NORMAL"
+ if rmt_match_mode == "STRICT":
+ print("TMDB匹配模式:严格模式")
+ else:
+ print("TMDB匹配模式:正常模式")
+ else:
+ print("配置文件格式错误,找不到app配置项!")
+
+ # 检查媒体库目录路径
+ if Config().get_config('media'):
+ media_server = Config().get_config('media').get('media_server')
+ if media_server:
+ print("媒体管理软件设置为:%s" % media_server)
+ if media_server == "jellyfin":
+ if not Config().get_config('jellyfin'):
+ print("jellyfin未配置")
+ else:
+ if not Config().get_config('jellyfin').get('host') \
+ or not Config().get_config('jellyfin').get('api_key'):
+ print("jellyfin配置不完整")
+ elif media_server == "plex":
+ if not Config().get_config('plex'):
+ print("plex未配置")
+ else:
+ if not Config().get_config('plex').get('token') \
+ and not Config().get_config('plex').get('username'):
+ print("plex配置不完整")
+ else:
+ if not Config().get_config('emby'):
+ print("emby未配置")
+ else:
+ if not Config().get_config('emby').get('host') \
+ or not Config().get_config('emby').get('api_key'):
+ print("emby配置不完整")
+
+ movie_paths = Config().get_config('media').get('movie_path')
+ if not movie_paths:
+ print("未配置电影媒体库目录")
+ else:
+ if not isinstance(movie_paths, list):
+ movie_paths = [movie_paths]
+ for movie_path in movie_paths:
+ if not os.path.exists(movie_path):
+ print("电影媒体库目录不存在:%s" % movie_path)
+
+ tv_paths = Config().get_config('media').get('tv_path')
+ if not tv_paths:
+ print("未配置电视剧媒体库目录")
+ else:
+ if not isinstance(tv_paths, list):
+ tv_paths = [tv_paths]
+ for tv_path in tv_paths:
+ if not os.path.exists(tv_path):
+ print("电视剧媒体库目录不存在:%s" % tv_path)
+
+ anime_paths = Config().get_config('media').get('anime_path')
+ if anime_paths:
+ if not isinstance(anime_paths, list):
+ anime_paths = [anime_paths]
+ for anime_path in anime_paths:
+ if not os.path.exists(anime_path):
+ print("动漫媒体库目录不存在:%s" % anime_path)
+
+ category = Config().get_config('media').get('category')
+ if not category:
+ print("未配置分类策略")
+ else:
+ print("配置文件格式错误,找不到media配置项!")
+
+ # 检查站点配置
+ if Config().get_config('pt'):
+ pt_client = Config().get_config('pt').get('pt_client')
+ print("下载软件设置为:%s" % pt_client)
+
+ rmt_mode = Config().get_config('pt').get('rmt_mode', 'copy')
+ if rmt_mode == "link":
+ print("默认文件转移模式为:硬链接")
+ elif rmt_mode == "softlink":
+ print("默认文件转移模式为:软链接")
+ elif rmt_mode == "move":
+ print("默认文件转移模式为:移动")
+ elif rmt_mode == "rclone":
+ print("默认文件转移模式为:rclone移动")
+ elif rmt_mode == "rclonecopy":
+ print("默认文件转移模式为:rclone复制")
+ else:
+ print("默认文件转移模式为:复制")
+
+ search_indexer = Config().get_config('pt').get('search_indexer')
+ if search_indexer:
+ print("索引器设置为:%s" % search_indexer)
+
+ search_auto = Config().get_config('pt').get('search_auto')
+ if search_auto:
+ print("微信等移动端渠道搜索已开启自动择优下载")
+
+ ptsignin_cron = Config().get_config('pt').get('ptsignin_cron')
+ if not ptsignin_cron:
+ print("站点自动签到时间未配置,站点签到功能已关闭")
+
+ pt_check_interval = Config().get_config('pt').get('pt_check_interval')
+ if not pt_check_interval:
+ print("RSS订阅周期未配置,RSS订阅功能已关闭")
+
+ pt_monitor = Config().get_config('pt').get('pt_monitor')
+ if not pt_monitor:
+ print("下载软件监控未开启,下载器监控功能已关闭")
+ else:
+ print("配置文件格式错误,找不到pt配置项!")
+
+ # 检查Douban配置
+ if not Config().get_config('douban'):
+ print("豆瓣未配置")
+ else:
+ if not Config().get_config('douban').get('users') \
+ or not Config().get_config('douban').get('types') \
+ or not Config().get_config('douban').get('days'):
+ print("豆瓣配置不完整")
+
+
+def update_config():
+ """
+ 升级配置文件
+ """
+ _config = Config().get_config()
+ _dbhelper = DbHelper()
+ overwrite_cofig = False
+
+ # 密码初始化
+ login_password = _config.get("app", {}).get("login_password") or "password"
+ if login_password and not login_password.startswith("[hash]"):
+ _config['app']['login_password'] = "[hash]%s" % generate_password_hash(
+ login_password)
+ overwrite_cofig = True
+
+ # 实验室配置初始化
+ if not _config.get("laboratory"):
+ _config['laboratory'] = {
+ 'search_keyword': False,
+ 'tmdb_cache_expire': True,
+ 'use_douban_titles': False,
+ 'search_en_title': True,
+ 'chrome_browser': False
+ }
+ overwrite_cofig = True
+
+ # 安全配置初始化
+ if not _config.get("security"):
+ _config['security'] = {
+ 'media_server_webhook_allow_ip': {
+ 'ipv4': '0.0.0.0/0',
+ 'ipv6': '::/0'
+ },
+ 'telegram_webhook_allow_ip': {
+ 'ipv4': '127.0.0.1',
+ 'ipv6': '::/0'
+ }
+ }
+ overwrite_cofig = True
+
+ # Synology Chat安全配置初始化
+ if not _config.get("security", {}).get("synology_webhook_allow_ip"):
+ _config['security']['synology_webhook_allow_ip'] = {
+ 'ipv4': '127.0.0.1',
+ 'ipv6': '::/0'
+ }
+ overwrite_cofig = True
+
+ # API密钥初始化
+ if not _config.get("security", {}).get("api_key"):
+ _config['security']['api_key'] = _config.get("security",
+ {}).get("subscribe_token") \
+ or StringUtils.generate_random_str()
+ if _config.get('security', {}).get('subscribe_token'):
+ _config['security'].pop('subscribe_token')
+ overwrite_cofig = True
+
+ # 刮削NFO配置初始化
+ if not _config.get("scraper_nfo"):
+ _config['scraper_nfo'] = {
+ "movie": {
+ "basic": True,
+ "credits": True,
+ "credits_chinese": False},
+ "tv": {
+ "basic": True,
+ "credits": True,
+ "credits_chinese": False,
+ "season_basic": True,
+ "episode_basic": True,
+ "episode_credits": True}
+ }
+ overwrite_cofig = True
+
+ # 刮削图片配置初始化
+ if not _config.get("scraper_pic"):
+ _config['scraper_pic'] = {
+ "movie": {
+ "poster": True,
+ "backdrop": True,
+ "background": True,
+ "logo": True,
+ "disc": True,
+ "banner": True,
+ "thumb": True},
+ "tv": {
+ "poster": True,
+ "backdrop": True,
+ "background": True,
+ "logo": True,
+ "clearart": True,
+ "banner": True,
+ "thumb": True,
+ "season_poster": True,
+ "season_banner": True,
+ "season_thumb": True,
+ "episode_thumb": False}
+ }
+ overwrite_cofig = True
+
+ # 下载目录配置初始化
+ if not _config.get('downloaddir'):
+ dl_client = _config.get('pt', {}).get('pt_client')
+ if dl_client and _config.get(dl_client):
+ save_path = _config.get(dl_client).get('save_path')
+ if not isinstance(save_path, dict):
+ save_path = {"movie": save_path,
+ "tv": save_path, "anime": save_path}
+ container_path = _config.get(dl_client).get('save_containerpath')
+ if not isinstance(container_path, dict):
+ container_path = {"movie": container_path,
+ "tv": container_path, "anime": container_path}
+ downloaddir = []
+ type_dict = {"movie": "电影", "tv": "电视剧", "anime": "动漫"}
+ for mtype, path in save_path.items():
+ if not path:
+ continue
+ save_dir = path.split('|')[0]
+ save_label = None
+ if len(path.split('|')) > 1:
+ save_label = path.split('|')[1]
+ container_dir = container_path.get(mtype)
+ if save_dir:
+ downloaddir.append({"save_path": save_dir,
+ "type": type_dict.get(mtype),
+ "category": "",
+ "container_path": container_dir,
+ "label": save_label})
+ _config['downloaddir'] = downloaddir
+ if _config.get('qbittorrent', {}).get('save_path'):
+ _config['qbittorrent'].pop('save_path')
+ if _config.get('qbittorrent', {}).get('save_containerpath'):
+ _config['qbittorrent'].pop('save_containerpath')
+ if _config.get('transmission', {}).get('save_path'):
+ _config['transmission'].pop('save_path')
+ if _config.get('transmission', {}).get('save_containerpath'):
+ _config['transmission'].pop('save_containerpath')
+ if _config.get('client115', {}).get('save_path'):
+ _config['client115'].pop('save_path')
+ if _config.get('client115', {}).get('save_containerpath'):
+ _config['client115'].pop('save_containerpath')
+ if _config.get('pikpak', {}).get('save_path'):
+ _config['pikpak'].pop('save_path')
+ if _config.get('pikpak', {}).get('save_containerpath'):
+ _config['pikpak'].pop('save_containerpath')
+ overwrite_cofig = True
+ elif isinstance(_config.get('downloaddir'), dict):
+ downloaddir_list = []
+ for path, attr in _config.get('downloaddir').items():
+ downloaddir_list.append({"save_path": path,
+ "type": attr.get("type"),
+ "category": attr.get("category"),
+ "container_path": attr.get("path"),
+ "label": attr.get("label")})
+ _config['downloaddir'] = downloaddir_list
+ overwrite_cofig = True
+
+ # 自定义识别词兼容旧配置
+ try:
+ ignored_words = Config().get_config('laboratory').get("ignored_words")
+ if ignored_words:
+ ignored_words = ignored_words.split("||")
+ for ignored_word in ignored_words:
+ if not _dbhelper.is_custom_words_existed(replaced=ignored_word):
+ _dbhelper.insert_custom_word(replaced=ignored_word,
+ replace="",
+ front="",
+ back="",
+ offset=0,
+ wtype=1,
+ gid=-1,
+ season=-2,
+ enabled=1,
+ regex=1,
+ whelp="")
+ _config['laboratory'].pop('ignored_words')
+ overwrite_cofig = True
+ replaced_words = Config().get_config('laboratory').get("replaced_words")
+ if replaced_words:
+ replaced_words = replaced_words.split("||")
+ for replaced_word in replaced_words:
+ replaced_word = replaced_word.split("@")
+ if not _dbhelper.is_custom_words_existed(replaced=replaced_word[0]):
+ _dbhelper.insert_custom_word(replaced=replaced_word[0],
+ replace=replaced_word[1],
+ front="",
+ back="",
+ offset=0,
+ wtype=2,
+ gid=-1,
+ season=-2,
+ enabled=1,
+ regex=1,
+ whelp="")
+ _config['laboratory'].pop('replaced_words')
+ overwrite_cofig = True
+ offset_words = Config().get_config('laboratory').get("offset_words")
+ if offset_words:
+ offset_words = offset_words.split("||")
+ for offset_word in offset_words:
+ offset_word = offset_word.split("@")
+ if not _dbhelper.is_custom_words_existed(front=offset_word[0], back=offset_word[1]):
+ _dbhelper.insert_custom_word(replaced="",
+ replace="",
+ front=offset_word[0],
+ back=offset_word[1],
+ offset=offset_word[2],
+ wtype=4,
+ gid=-1,
+ season=-2,
+ enabled=1,
+ regex=1,
+ whelp="")
+ _config['laboratory'].pop('offset_words')
+ overwrite_cofig = True
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+
+ # 目录同步兼容旧配置
+ try:
+ sync_paths = Config().get_config('sync').get('sync_path')
+ rmt_mode = Config().get_config('pt').get('sync_mod')
+ if sync_paths:
+ if isinstance(sync_paths, list):
+ for sync_items in sync_paths:
+ SyncPath = {'from': "",
+ 'to': "",
+ 'unknown': "",
+ 'syncmod': rmt_mode,
+ 'rename': 1,
+ 'enabled': 1}
+ # 是否启用
+ if sync_items.startswith("#"):
+ SyncPath['enabled'] = 0
+ sync_items = sync_items[1:-1]
+ # 是否重命名
+ if sync_items.startswith("["):
+ SyncPath['rename'] = 0
+ sync_items = sync_items[1:-1]
+ # 转移方式
+ config_items = sync_items.split("@")
+ if not config_items:
+ continue
+ if len(config_items) > 1:
+ SyncPath['syncmod'] = config_items[-1]
+ else:
+ SyncPath['syncmod'] = rmt_mode
+ if not SyncPath['syncmod']:
+ continue
+ # 源目录|目的目录|未知目录
+ paths = config_items[0].split("|")
+ if not paths:
+ continue
+ if len(paths) > 0:
+ if not paths[0]:
+ continue
+ SyncPath['from'] = os.path.normpath(paths[0])
+ if len(paths) > 1:
+ SyncPath['to'] = os.path.normpath(paths[1])
+ if len(paths) > 2:
+ SyncPath['unknown'] = os.path.normpath(paths[2])
+ # 相同from的同步目录不能同时开启
+ if SyncPath['enabled'] == 1:
+ _dbhelper.check_config_sync_paths(source=SyncPath['from'],
+ enabled=0)
+ _dbhelper.insert_config_sync_path(source=SyncPath['from'],
+ dest=SyncPath['to'],
+ unknown=SyncPath['unknown'],
+ mode=SyncPath['syncmod'],
+ rename=SyncPath['rename'],
+ enabled=SyncPath['enabled'])
+ else:
+ _dbhelper.insert_config_sync_path(source=sync_paths,
+ dest="",
+ unknown="",
+ mode=rmt_mode,
+ rename=1,
+ enabled=0)
+ _config['sync'].pop('sync_path')
+ overwrite_cofig = True
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+
+ # 消息服务兼容旧配置
+ try:
+ message = Config().get_config('message') or {}
+ msg_channel = message.get('msg_channel')
+ if msg_channel:
+ switchs = []
+ switch = message.get('switch')
+ if switch:
+ if switch.get("download_start"):
+ switchs.append("download_start")
+ if switch.get("download_fail"):
+ switchs.append("download_fail")
+ if switch.get("transfer_finished"):
+ switchs.append("transfer_finished")
+ if switch.get("transfer_fail"):
+ switchs.append("transfer_fail")
+ if switch.get("rss_added"):
+ switchs.append("rss_added")
+ if switch.get("rss_finished"):
+ switchs.append("rss_finished")
+ if switch.get("site_signin"):
+ switchs.append("site_signin")
+ switchs.append('site_message')
+ switchs.append('brushtask_added')
+ switchs.append('brushtask_remove')
+ switchs.append('mediaserver_message')
+ if message.get('telegram'):
+ token = message.get('telegram', {}).get('telegram_token')
+ chat_id = message.get('telegram', {}).get('telegram_chat_id')
+ user_ids = message.get('telegram', {}).get('telegram_user_ids')
+ webhook = message.get('telegram', {}).get('webhook')
+ if token and chat_id:
+ name = "Telegram"
+ ctype = 'telegram'
+ enabled = 1 if msg_channel == ctype else 0
+ interactive = 1 if enabled else 0
+ client_config = json.dumps({
+ 'token': token,
+ 'chat_id': chat_id,
+ 'user_ids': user_ids,
+ 'webhook': webhook
+ })
+ _dbhelper.insert_message_client(name=name,
+ ctype=ctype,
+ config=client_config,
+ switchs=switchs,
+ interactive=interactive,
+ enabled=enabled)
+ if message.get('wechat'):
+ corpid = message.get('wechat', {}).get('corpid')
+ corpsecret = message.get('wechat', {}).get('corpsecret')
+ agent_id = message.get('wechat', {}).get('agentid')
+ default_proxy = message.get('wechat', {}).get('default_proxy')
+ token = message.get('wechat', {}).get('Token')
+ encodingAESkey = message.get(
+ 'wechat', {}).get('EncodingAESKey')
+ if corpid and corpsecret and agent_id:
+ name = "WeChat"
+ ctype = 'wechat'
+ enabled = 1 if msg_channel == ctype else 0
+ interactive = 1 if enabled else 0
+ client_config = json.dumps({
+ 'corpid': corpid,
+ 'corpsecret': corpsecret,
+ 'agentid': agent_id,
+ 'default_proxy': default_proxy,
+ 'token': token,
+ 'encodingAESKey': encodingAESkey
+ })
+ _dbhelper.insert_message_client(name=name,
+ ctype=ctype,
+ config=client_config,
+ switchs=switchs,
+ interactive=interactive,
+ enabled=enabled)
+ if message.get('serverchan'):
+ sckey = message.get('serverchan', {}).get('sckey')
+ if sckey:
+ name = "ServerChan"
+ ctype = 'serverchan'
+ interactive = 0
+ enabled = 1 if msg_channel == ctype else 0
+ client_config = json.dumps({
+ 'sckey': sckey
+ })
+ _dbhelper.insert_message_client(name=name,
+ ctype=ctype,
+ config=client_config,
+ switchs=switchs,
+ interactive=interactive,
+ enabled=enabled)
+ if message.get('bark'):
+ server = message.get('bark', {}).get('server')
+ apikey = message.get('bark', {}).get('apikey')
+ if server and apikey:
+ name = "Bark"
+ ctype = 'bark'
+ interactive = 0
+ enabled = 1 if msg_channel == ctype else 0
+ client_config = json.dumps({
+ 'server': server,
+ 'apikey': apikey
+ })
+ _dbhelper.insert_message_client(name=name,
+ ctype=ctype,
+ config=client_config,
+ switchs=switchs,
+ interactive=interactive,
+ enabled=enabled)
+ if message.get('pushplus'):
+ token = message.get('pushplus', {}).get('push_token')
+ topic = message.get('pushplus', {}).get('push_topic')
+ channel = message.get('pushplus', {}).get('push_channel')
+ webhook = message.get('pushplus', {}).get('push_webhook')
+ if token and channel:
+ name = "PushPlus"
+ ctype = 'pushplus'
+ interactive = 0
+ enabled = 1 if msg_channel == ctype else 0
+ client_config = json.dumps({
+ 'token': token,
+ 'topic': topic,
+ 'channel': channel,
+ 'webhook': webhook
+ })
+ _dbhelper.insert_message_client(name=name,
+ ctype=ctype,
+ config=client_config,
+ switchs=switchs,
+ interactive=interactive,
+ enabled=enabled)
+ if message.get('iyuu'):
+ token = message.get('iyuu', {}).get('iyuu_token')
+ if token:
+ name = "IyuuMsg"
+ ctype = 'iyuu'
+ interactive = 0
+ enabled = 1 if msg_channel == ctype else 0
+ client_config = json.dumps({
+ 'token': token
+ })
+ _dbhelper.insert_message_client(name=name,
+ ctype=ctype,
+ config=client_config,
+ switchs=switchs,
+ interactive=interactive,
+ enabled=enabled)
+ # 删除旧配置
+ if _config.get('message', {}).get('msg_channel'):
+ _config['message'].pop('msg_channel')
+ if _config.get('message', {}).get('switch'):
+ _config['message'].pop('switch')
+ if _config.get('message', {}).get('wechat'):
+ _config['message'].pop('wechat')
+ if _config.get('message', {}).get('telegram'):
+ _config['message'].pop('telegram')
+ if _config.get('message', {}).get('serverchan'):
+ _config['message'].pop('serverchan')
+ if _config.get('message', {}).get('bark'):
+ _config['message'].pop('bark')
+ if _config.get('message', {}).get('pushplus'):
+ _config['message'].pop('pushplus')
+ if _config.get('message', {}).get('iyuu'):
+ _config['message'].pop('iyuu')
+ overwrite_cofig = True
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+
+ # 站点兼容旧配置
+ try:
+ sites = _dbhelper.get_config_site()
+ for site in sites:
+ if not site.NOTE or str(site.NOTE).find('{') != -1:
+ continue
+ # 是否解析种子详情为|分隔的第1位
+ site_parse = str(site.NOTE).split("|")[0] or "Y"
+ # 站点过滤规则为|分隔的第2位
+ rule_groupid = str(site.NOTE).split("|")[1] if site.NOTE and len(
+ str(site.NOTE).split("|")) > 1 else ""
+ # 站点未读消息为|分隔的第3位
+ site_unread_msg_notify = str(site.NOTE).split("|")[2] if site.NOTE and len(
+ str(site.NOTE).split("|")) > 2 else "Y"
+ # 自定义UA为|分隔的第4位
+ ua = str(site.NOTE).split("|")[3] if site.NOTE and len(
+ str(site.NOTE).split("|")) > 3 else ""
+ # 是否开启浏览器仿真为|分隔的第5位
+ chrome = str(site.NOTE).split("|")[4] if site.NOTE and len(
+ str(site.NOTE).split("|")) > 4 else "N"
+ # 是否使用代理为|分隔的第6位
+ proxy = str(site.NOTE).split("|")[5] if site.NOTE and len(
+ str(site.NOTE).split("|")) > 5 else "N"
+ _dbhelper.update_config_site_note(tid=site.ID, note=json.dumps({
+ "parse": site_parse,
+ "rule": rule_groupid,
+ "message": site_unread_msg_notify,
+ "ua": ua,
+ "chrome": chrome,
+ "proxy": proxy
+ }))
+
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+
+ # 订阅兼容旧配置
+ try:
+ def __parse_rss_desc(desc):
+ rss_sites = []
+ search_sites = []
+ over_edition = False
+ restype = None
+ pix = None
+ team = None
+ rule = None
+ total = None
+ current = None
+ notes = str(desc).split('#')
+ # 订阅站点
+ if len(notes) > 0:
+ if notes[0]:
+ rss_sites = [s for s in str(notes[0]).split(
+ '|') if s and len(s) < 20]
+ # 搜索站点
+ if len(notes) > 1:
+ if notes[1]:
+ search_sites = [s for s in str(notes[1]).split('|') if s]
+ # 洗版
+ if len(notes) > 2:
+ over_edition = notes[2]
+ # 过滤条件
+ if len(notes) > 3:
+ if notes[3]:
+ filters = notes[3].split('@')
+ if len(filters) > 0:
+ restype = filters[0]
+ if len(filters) > 1:
+ pix = filters[1]
+ if len(filters) > 2:
+ rule = int(
+ filters[2]) if filters[2].isdigit() else None
+ if len(filters) > 3:
+ team = filters[3]
+ # 总集数及当前集数
+ if len(notes) > 4:
+ if notes[4]:
+ ep_info = notes[4].split('@')
+ if len(ep_info) > 0:
+ total = int(ep_info[0]) if ep_info[0] else None
+ if len(ep_info) > 1:
+ current = int(ep_info[1]) if ep_info[1] else None
+ return {
+ "rss_sites": rss_sites,
+ "search_sites": search_sites,
+ "over_edition": over_edition,
+ "restype": restype,
+ "pix": pix,
+ "team": team,
+ "rule": rule,
+ "total": total,
+ "current": current
+ }
+
+ # 电影订阅
+ rss_movies = _dbhelper.get_rss_movies()
+ for movie in rss_movies:
+ if not movie.DESC or str(movie.DESC).find('#') == -1:
+ continue
+ # 更新到具体字段
+ _dbhelper.update_rss_movie_desc(
+ rid=movie.ID,
+ desc=json.dumps(__parse_rss_desc(movie.DESC))
+ )
+ # 电视剧订阅
+ rss_tvs = _dbhelper.get_rss_tvs()
+ for tv in rss_tvs:
+ if not tv.DESC or str(tv.DESC).find('#') == -1:
+ continue
+ # 更新到具体字段
+ _dbhelper.update_rss_tv_desc(
+ rid=tv.ID,
+ desc=json.dumps(__parse_rss_desc(tv.DESC))
+ )
+
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+
+ # 重写配置文件
+ if overwrite_cofig:
+ Config().save_config(_config)
diff --git a/config.py b/config.py
new file mode 100644
index 0000000..9d3a7f2
--- /dev/null
+++ b/config.py
@@ -0,0 +1,193 @@
+import os
+import shutil
+import sys
+from threading import Lock
+import ruamel.yaml
+
+# 种子名/文件名要素分隔字符
+SPLIT_CHARS = r"\.|\s+|\(|\)|\[|]|-|\+|【|】|/|~|;|&|\||#|_|「|」|(|)|~"
+# 默认User-Agent
+DEFAULT_UA = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36"
+# 收藏了的媒体的目录名,名字可以改,在Emby中点击红星则会自动将电影转移到此分类下,需要在Emby Webhook中配置用户行为通知
+RMT_FAVTYPE = '精选'
+# 支持的媒体文件后缀格式
+RMT_MEDIAEXT = ['.mp4', '.mkv', '.ts', '.iso',
+ '.rmvb', '.avi', '.mov', '.mpeg',
+ '.mpg', '.wmv', '.3gp', '.asf',
+ '.m4v', '.flv', '.m2ts', '.strm']
+# 支持的字幕文件后缀格式
+RMT_SUBEXT = ['.srt', '.ass', '.ssa']
+# 电视剧动漫的分类genre_ids
+ANIME_GENREIDS = ['16']
+# 默认过滤的文件大小,150M
+RMT_MIN_FILESIZE = 150 * 1024 * 1024
+# 删种检查时间间隔
+AUTO_REMOVE_TORRENTS_INTERVAL = 1800
+# 下载文件转移检查时间间隔,
+PT_TRANSFER_INTERVAL = 300
+# TMDB信息缓存定时保存时间
+METAINFO_SAVE_INTERVAL = 600
+# SYNC目录同步聚合转移时间
+SYNC_TRANSFER_INTERVAL = 60
+# RSS队列中处理时间间隔
+RSS_CHECK_INTERVAL = 300
+# 站点流量数据刷新时间间隔(小时)
+REFRESH_PT_DATA_INTERVAL = 6
+# 刷新订阅TMDB数据的时间间隔(小时)
+RSS_REFRESH_TMDB_INTERVAL = 6
+# 刷流删除的检查时间间隔
+BRUSH_REMOVE_TORRENTS_INTERVAL = 300
+# 定时清除未识别的缓存时间间隔(小时)
+META_DELETE_UNKNOWN_INTERVAL = 12
+# 定时刷新壁纸的间隔(小时)
+REFRESH_WALLPAPER_INTERVAL = 1
+# fanart的api,用于拉取封面图片
+FANART_MOVIE_API_URL = 'https://webservice.fanart.tv/v3/movies/%s?api_key=d2d31f9ecabea050fc7d68aa3146015f'
+FANART_TV_API_URL = 'https://webservice.fanart.tv/v3/tv/%s?api_key=d2d31f9ecabea050fc7d68aa3146015f'
+# 默认背景图地址
+DEFAULT_TMDB_IMAGE = 'https://s3.bmp.ovh/imgs/2022/07/10/77ef9500c851935b.webp'
+# 默认微信消息代理服务器地址
+DEFAULT_WECHAT_PROXY = 'https://wechat.nastool.cn'
+# 默认OCR识别服务地址
+DEFAULT_OCR_SERVER = 'https://nastool.cn'
+# 默认TMDB代理服务地址
+DEFAULT_TMDB_PROXY = 'https://tmdb.nastool.cn'
+# 默认CookieCloud服务地址
+DEFAULT_COOKIECLOUD_SERVER = 'http://nastool.cn:8088'
+# TMDB图片地址
+TMDB_IMAGE_W500_URL = 'https://image.tmdb.org/t/p/w500%s'
+TMDB_IMAGE_ORIGINAL_URL = 'https://image.tmdb.org/t/p/original%s'
+TMDB_IMAGE_FACE_URL = 'https://image.tmdb.org/t/p/h632%s'
+TMDB_PEOPLE_PROFILE_URL = 'https://www.themoviedb.org/person/%s'
+# 添加下载时增加的标签,开始只监控NASTool添加的下载时有效
+PT_TAG = "NASTOOL"
+# 电影默认命名格式
+DEFAULT_MOVIE_FORMAT = '{title} ({year})/{title} ({year})-{part} - {videoFormat}'
+# 电视剧默认命名格式
+DEFAULT_TV_FORMAT = '{title} ({year})/Season {season}/{title} - {season_episode}-{part} - 第 {episode} 集'
+# 辅助识别参数
+KEYWORD_SEARCH_WEIGHT_1 = [10, 3, 2, 0.5, 0.5]
+KEYWORD_SEARCH_WEIGHT_2 = [10, 2, 1]
+KEYWORD_SEARCH_WEIGHT_3 = [10, 2]
+KEYWORD_STR_SIMILARITY_THRESHOLD = 0.2
+KEYWORD_DIFF_SCORE_THRESHOLD = 30
+KEYWORD_BLACKLIST = ['中字', '韩语', '双字', '中英', '日语', '双语', '国粤', 'HD', 'BD', '中日', '粤语', '完全版',
+ '法语', '西班牙语', 'HRHDTVAC3264', '未删减版', '未删减', '国语', '字幕组', '人人影视', 'www66ystv',
+ '人人影视制作', '英语', 'www6vhaotv', '无删减版', '完成版', '德意']
+
+# WebDriver路径
+WEBDRIVER_PATH = {
+ "Docker": "/usr/lib/chromium/chromedriver",
+ "Synology": "/var/packages/NASTool/target/bin/chromedriver"
+}
+
+# Xvfb虚拟显示路程
+XVFB_PATH = [
+ "/usr/bin/Xvfb",
+ "/usr/local/bin/Xvfb"
+]
+
+# 线程锁
+lock = Lock()
+
+# 全局实例
+_CONFIG = None
+
+
+def singleconfig(cls):
+ def _singleconfig(*args, **kwargs):
+ global _CONFIG
+ if not _CONFIG:
+ with lock:
+ _CONFIG = cls(*args, **kwargs)
+ return _CONFIG
+
+ return _singleconfig
+
+
+@singleconfig
+class Config(object):
+ _config = {}
+ _config_path = None
+
+ def __init__(self):
+ self._config_path = os.environ.get('NASTOOL_CONFIG')
+ if not os.environ.get('TZ'):
+ os.environ['TZ'] = 'Asia/Shanghai'
+ self.init_syspath()
+ self.init_config()
+
+ def init_config(self):
+ try:
+ if not self._config_path:
+ print("【Config】NASTOOL_CONFIG 环境变量未设置,程序无法工作,正在退出...")
+ quit()
+ if not os.path.exists(self._config_path):
+ cfg_tp_path = os.path.join(self.get_inner_config_path(), "config.yaml")
+ cfg_tp_path = cfg_tp_path.replace("\\", "/")
+ shutil.copy(cfg_tp_path, self._config_path)
+ print("【Config】config.yaml 配置文件不存在,已将配置文件模板复制到配置目录...")
+ with open(self._config_path, mode='r', encoding='utf-8') as cf:
+ try:
+ # 读取配置
+ print("正在加载配置:%s" % self._config_path)
+ self._config = ruamel.yaml.YAML().load(cf)
+ except Exception as e:
+ print("【Config】配置文件 config.yaml 格式出现严重错误!请检查:%s" % str(e))
+ self._config = {}
+ except Exception as err:
+ print("【Config】加载 config.yaml 配置出错:%s" % str(err))
+ return False
+
+ def init_syspath(self):
+ with open(os.path.join(self.get_root_path(),
+ "third_party.txt"), "r") as f:
+ for third_party_lib in f.readlines():
+ module_path = os.path.join(self.get_root_path(),
+ "third_party",
+ third_party_lib.strip()).replace("\\", "/")
+ if module_path not in sys.path:
+ sys.path.append(module_path)
+
+ def get_proxies(self):
+ return self.get_config('app').get("proxies")
+
+ def get_ua(self):
+ return self.get_config('app').get("user_agent") or DEFAULT_UA
+
+ def get_config(self, node=None):
+ if not node:
+ return self._config
+ return self._config.get(node, {})
+
+ def save_config(self, new_cfg):
+ self._config = new_cfg
+ with open(self._config_path, mode='w', encoding='utf-8') as sf:
+ yaml = ruamel.yaml.YAML()
+ return yaml.dump(new_cfg, sf)
+
+ def get_config_path(self):
+ return os.path.dirname(self._config_path)
+
+ def get_temp_path(self):
+ return os.path.join(self.get_config_path(), "temp")
+
+ @staticmethod
+ def get_root_path():
+ return os.path.dirname(os.path.realpath(__file__))
+
+ def get_inner_config_path(self):
+ return os.path.join(self.get_root_path(), "config")
+
+ def get_script_path(self):
+ return os.path.join(self.get_inner_config_path(), "scripts")
+
+ def get_domain(self):
+ domain = (self.get_config('app') or {}).get('domain')
+ if domain and not domain.startswith('http'):
+ domain = "http://" + domain
+ return domain
+
+ @staticmethod
+ def get_timezone():
+ return os.environ.get('TZ')
diff --git a/config/config.yaml b/config/config.yaml
new file mode 100644
index 0000000..0624add
--- /dev/null
+++ b/config/config.yaml
@@ -0,0 +1,320 @@
+# 【配置注意要符合yaml语法,:号后有1个空格,不能使用全角标点符号】
+# 【最新版本已经可以通过WEB页面对所有配置项进行配置,推荐使用WEB页面进行配置】
+# 【文件转移方式的说明】
+# 目前支持的文件转移方式:link、copy、softlink、move、rclone、rclonecopy,link即硬链接、softlink为软链接、copy为复制、move为移动、rclone针对rclone网盘挂载(rclone为移动、rclonecopy为复制)
+# link要求源目录和目的目录或媒体库目录在一个磁盘分区或者存储空间,Docker运行时link模式需要直接映射源目录和目的目录或媒体库目录的上级目录,否则docker可能仍然会认为是跨盘
+# softlink模式注意宿主机的源目录映射到docker容器中后要路径要一致,否则可能软链接成功但无法在宿主机使用
+# copy模式会直接复制一份文件数据
+# move会直接移动原文件,会影响做种,请谨慎使用
+# rclone需要自行映射rclone配置目录到容器中,或在容器内完成rclone配置
+app:
+ # 【日志记录类型】:server、file、console
+ # 如果是使用Docker安装建议设置为console,通过Docker管理器查看日志
+ # 如果是使用群晖套件建议配置为 server,可将日志输出到群晖的日志中心便于查看
+ # 其它情况可以设置为file,将日志写入文件
+ logtype: console
+ # 【日志文件的路径】:logtype为file时生效
+ logpath:
+ # 【群晖日志中心IP和端口】:logtype为SERVER时生效。端口一般是514,只需要改动IP为群晖的IP,示例:127.0.0.1:514
+ logserver: 127.0.0.1:514
+ # 【日志级别】:info、debug、error
+ loglevel: info
+ # 【WEB管理界面监听地址】:如需支持ipv6需设置为::,如::无法访问可改为0.0.0.0
+ web_host: "::"
+ # 【WEB管理界面端口】:默认3000
+ web_port: 3000
+ # 【WEB管理页面登录用户】,默认admin
+ login_user: admin
+ # 【WEB管理页面登录密码】:默认password,如果是全数字密码,要用''括起来
+ login_password: password
+ # 【WEB管理界面使用的HTTPS的证书和KEY的路径】,留空则不启用HTTPS
+ ssl_cert:
+ ssl_key:
+ # 【TMDB API KEY】:需要在https://www.themoviedb.org/申请,必须配置,否则无法识别媒体资源和重命名
+ # 以下地址需要网络能够正常访问:api.themoviedb.org、webservice.fanart.tv
+ rmt_tmdbkey:
+ # 【使用TMDB服务器域名】:api.themoviedb.org、api.tmdb.org,如api.themoviedb.org无法访问可偿试使用api.tmdb.org
+ tmdb_domain: api.tmdb.org
+ # 【TMDB匹配模式】:normal、strict,normal模式下如使用文件名/种子名中的年份无法匹配到媒体信息,会去掉年份再匹配一次;strict模式则严格按文件中年份匹配
+ # normal模式下会提升识别成功率,但也可能会导致误识别率增加;strict模式可以降低误识别率,但可能导致很多文件名/种子名中年份不正确的无法被识别(特别是剧集,需要是首播年份)
+ rmt_match_mode: normal
+ # 【设置代理】,themoviedb、fanart、telegram等将使用代理访问,http和https均需配置,可以是http也可以是socks5、socks5h(remote DNS) ,但需要带http或socks5前缀,两项可以配置为一样,留空则不启用
+ # 示例:'http://127.0.0.1:7890' 'socks5://127.0.0.1:8018' 'socks5h://127.0.0.1:8018'
+ proxies:
+ http:
+ https:
+ # 【本系统的WEB的外网地址】:需要是外网IP或者域名,需要包含端口,用于微信/Telegram信息点击跳转,如不需要可配空
+ # 示例:http://IP:3000
+ domain: ""
+ # 【UserAgent】:可适当修改,用于站点签到、豆瓣数据抓取等
+ user_agent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36"
+ # 【登录界面壁纸】:themoviedb、bing,设置为themoviedb时需要配置TMDB API Key时才生效
+ wallpaper: bing
+ # Debug mode
+ debug: true
+
+# 【配置媒体库信息】
+media:
+ # 【媒体库管理软件】:emby、jellyfin、plex,需要在emby或jellyfin或plex区配置详细信息,用于下载检查控重、媒体库展示等,建议配置
+ media_server: emby
+ # 【媒体库数据同步周期】:定时同步媒体服务器数据到本地,单位小时
+ mediasync_interval: 12
+ # 【媒体库电影文件存放目录】:支持配置多个目录,不同的硬盘需映射为不同的根目录,以更于程序区分
+ movie_path:
+ # 【媒体库电视剧文件存放目录】:支持配置多个目录,不同的硬盘需映射为不同的根目录,以更于程序区分
+ tv_path:
+ # 【媒体库动漫文件单独存放目录】:支持配置多个目录,不同的硬盘需映射为不同的根目录,以更于程序区分
+ # 如果设置了该目录,则所有动漫电视剧都会识别为动漫并存放在该目录下,否则动漫电视剧会识别为电视剧并存放在电视剧目录分类下;动漫电影仍然在电影目录分类下
+ anime_path:
+ # 【无法识别时转移存放的目录】:如有多个磁盘,需要对应配置多个目录,否则跨盘无法硬链接
+ # 注意:如果你在sync区域配置了未识别目录,由会优先转移到对应未识别目录下,只有下载文件转移及sync未配置未识别目录时才会使用该目录
+ # 未识别的记录同时会在媒体整理->手动识别下面出现,unknown_path只是硬链接一份用于备份,同时手工识别处理后程序也不会主动删除,如果不想要多硬链接一份,可以不配置该目录
+ unknown_path:
+ # 【二级分类开关】:电影/电视剧/动漫是否需要二级分类,启用二级分类后会在电影/电视剧/动漫目录下按二级分类名建立子目录
+ # 此处配置分类的策略名,配置文件目录中需要有与策略名同名的.yaml配置文件
+ # 默认策略default-category分类设置可参考"default-category.yaml",分类参见README.MD说明
+ # 如不需要启动分类,则该项配置为空
+ category: "default-category"
+ # 【转移到媒体库的最小文件大小】:避免预告片/MV等影响识别,单位M
+ min_filesize: 150
+ # 【文件名转移忽略词】:文件名包含忽略词,忽略转移
+ ignored_files:
+ # 【文件路径转移忽略词】:文件路径包含忽略词,忽略转移
+ ignored_paths:
+ # 【洗版开关】:如开启则则新下载了更大的文件会覆盖媒体库目录中已有的文件
+ filesize_cover: true
+ # 【电影命名定义】:程序会按定义的命名格式对电影进行重命名;/代表上下级目录,{}内为占位符;占位符会使用文件识别出来的实际值替换;占位符外的字符会当成普通字符,直接体现在名称上
+ # 电影占位符有:{title}:标题,{en_title}:英文标题,{original_title}:原语种标题,{original_name}:原文件名,{year}:年份,{edition}:版本(Bluray/WEB-DL等),{videoFormat}:分辨率(1080p/4k等),{videoCodec}:视频编码,{audioCodec}:音频编码及声道,{effect}: 视频特效(DV,HDR等), {tmdbid}:TMDB的ID,{part}:part1/disc1/dvd1,{releaseGroup}:制作组/字幕组等
+ movie_name_format: "{title} ({year})/{title}-{part} ({year}) - {videoFormat}"
+ # 【电视剧命名定义】:程序会按定义的命名格式对电视剧进行重命名;/代表上下级目录,{}内为占位符;占位符会使用文件识别出来的实际值替换,占位符外的字符会当成普通字符,直接体现在名称上
+ # 电视剧占位符有:{title}:标题,{en_title}:英文标题,{original_title}:原语种标题,{original_name}:原文件名,{year}:年份,{edition}:版本(Bluray/WEB-DL等),{videoFormat}:分辨率(1080p/4k等),{videoCodec}:视频编码,{audioCodec}:音频编码及声道,{effect}: 视频特效(DV,HDR等), {tmdbid}:TMDB的ID,{season}:季数,{episode}:集数,{season_episode}:剧集SxxExx,{part}:part1/disc1/dvd1,{releaseGroup}:制作组/字幕组等
+ tv_name_format: "{title} ({year})/Season {season}/{title}-{part} - {season_episode} - 第{episode}集"
+ # 【刮削元数据及图片】:开启后文件转移完成时会自动生成nfo描述文件及poster海报,协助媒体服务器识别和搜刮
+ nfo_poster: false
+ # 【实时刷新媒体库】:开启后文件转移完成时会实时刷新媒体服务器(Emby/Jellyfin/Plex)的媒体库
+ refresh_mediaserver: true
+
+# 配置Emby服务器信息
+emby:
+ # 【Emby服务器IP地址和端口】:注意区分http和https,http时可以不加http://,https时必须加https://
+ host: http://127.0.0.1:8096
+ # 【Emby ApiKey】:在Emby设置->高级->API密钥处生成,注意不要复制到了应用名称
+ api_key:
+
+# 配置Jellyfin服务器信息
+jellyfin:
+ # 【Jellyfin服务器IP地址和端口】:注意区分http和https,http时可以不加http://,https时必须加https://
+ host: http://127.0.0.1:8096
+ # 【Jellyfin ApiKey】:在Jellyfin设置->高级->API密钥处生成
+ api_key:
+
+# 配置Plex服务器信息
+plex:
+ # 【Plex服务器IP地址和端口】:注意区分http和https,http时可以不加http://,https时必须加https://
+ host: http://127.0.0.1:32400
+ # 【X-Plex-Token】:Plex页面Cookie中的X-Plex-Token,如填写token则无需填写servername、username、password
+ token:
+ # 【Plex服务器的名称】
+ servername:
+ # 【Plex用户名】
+ username:
+ # 【Plex用户密码】
+ password:
+
+# 【配置nfo刮削信息】
+scraper_nfo:
+ # 电影
+ movie:
+ basic: true
+ credits: true
+ credits_chinese: true
+ # 电视剧
+ tv:
+ basic: true
+ credits: true
+ credits_chinese: true
+ # 季
+ season_basic: true
+ # 集
+ episode_basic: true
+ episode_credits: true
+
+# 【配置图片刮削信息】
+scraper_pic:
+ # 电影
+ movie:
+ poster: true
+ backdrop: true
+ background: true
+ logo: true
+ disc: true
+ banner: true
+ thumb: true
+ # 电视剧
+ tv:
+ poster: true
+ backdrop: true
+ background: true
+ logo: true
+ clearart: true
+ banner: true
+ thumb: true
+ # 季
+ season_poster: true
+ season_banner: true
+ season_thumb: true
+ # 集
+ episode_thumb: false
+
+# 【配置消息通知服务】
+message:
+ # 【Emby播放状态通知白名单】:配置了Emby webhooks插件回调时,用户播放媒体库中的媒体时会发送消息通知,本处配置哪些用户的设备不通知,避免打扰,配置格式:用户:设备名称,可用 - 增加多项
+ webhook_ignore:
+
+# 【配置文件夹监控】:文件夹内容发生变化时自动识别转移
+sync:
+ # 监控目录配置已转移至数据库
+ # 【监控目录操作系统类型】:windows、linux。如果是windows,目录同步功能性能会比较差,会导致NAS不能休眠,除非是挂载的windows的远程共享目录或者是windows的docker,否则建议设置为linux
+ nas_sys: linux
+
+# 【配置站点检索信息】
+pt:
+ # 【下载使用的客户端软件】:qbittorrent、transmission、client115等
+ pt_client: qbittorrent
+ # 【下载软件监控开关】:是否监控下载软件:true、false,如为true则下载完成会自动转移和重命名,如为false则不会处理
+ # 下载软件监控与Sync下载目录同步不要同时开启,否则功能存在重复
+ pt_monitor: false
+ # 【只监控NASTool添加的下载】:启用后只有NASTool添加的下载才会被自动转移和显示,关闭则下载软件中所有的任务都会转移和显示
+ pt_monitor_only: true
+ # 【下载完成后转移到媒体库的转移模式】:link、copy、softlink、move、rclone、rclonecopy、minio、miniocopy,详情参考顶部说明
+ rmt_mode: link
+ #【聚合检索使用的检索器】:builtin
+ search_indexer: builtin
+ # 【内建索引器使用的站点】:只有在该站点列表中内建索引器搜索时才会使用
+ indexer_sites:
+ # 【远程搜索自动择优下载开关】:如开启则微信等渠道搜索后会自动择优选择一项下载,如不开启则需要手工点击进入WEB页面选择下载
+ # 如没有配置app.domain或无公网环境建议开启,否则无法跳转WEB页面手工选择
+ search_auto: true
+ # 【远程下载不完整自动订阅】:如开启,远程搜索下载不完整时,会自动添加RSS订阅
+ search_no_result_rss: false
+ # 【站点每日签到时间】
+ # 两种配置方法,1、配置间隔,单位小时,建议不要设置为24小时的整数倍,避免每天的签到时间一样。2、配置固定时间,如'08:00',注意要加引号和冒号。3、配置时间范围,如08:00-09:00,表示在该时间范围内随机执行一次
+ ptsignin_cron: "08:01"
+ # 【RSS订阅开关】:此处配置RSS订阅检查时间间隔,即每隔多长时间检查一下各站点是否有资源更新,建议不要少于30分钟,单位时间为秒
+ # 配置为空或者0则不启用RSS订阅功能
+ pt_check_interval: 1800
+ # 【定量搜索RSS开关】:打开后,每隔设置时间会通过站点资源检索的方式查询和下载订阅,单位:小时,配置小于6小时时强制为6小时,不配置则为关
+ search_rss_interval: 6
+ # 【下载优先规则】:订阅及远程搜索下载将按此优先规则选择下载资源,字典:site 站点优先、seeder做种数优先
+ download_order: site
+ # 【搜索结果数量限制】:每个站点返回搜索结果的最大数量
+ site_search_result_num: 100
+
+# 【配置qBittorrent下载软件】:pt区的pt_client如配置为qbittorrent则需要同步配置该项
+qbittorrent:
+ # 【qBittorrent IP地址和端口】:注意如果qb启动了HTTPS证书,则需要配置为https://IP
+ qbhost:
+ qbport:
+ # qBittorrent 登录用户名和密码
+ qbusername:
+ qbpassword:
+ # 转移完成后是否自动强制作种,按需要设置
+ force_upload: true
+ # 是否开始自动管理模式
+ auto_management: false
+
+# 【配置transmission下载软件】:pt区的pt_client如配置为transmission则需要同步配置该项,需要3.0以上版本,否则可能会报错
+transmission:
+ # 【transmission IP地址和端口】:注意如果tr启用了HTTPS证书,则需要配置为https://IP
+ trhost:
+ trport:
+ # transmission 登录用户名和密码
+ trusername:
+ trpassword:
+
+# 配置 115 网盘下载器
+client115:
+ # 115 Cookie 抓包获取
+ cookie:
+
+# 配置 pikpak 网盘下载器
+pikpak:
+ # 用户名
+ username:
+ # 密码
+ password:
+ # 代理
+ proxy:
+
+# 【下载目录】:配置下载目录,自按分类下载到指定目录
+downloaddir:
+
+# 【配置豆瓣账号信息】:配置后会自动同步豆瓣收藏,豆瓣标记想看内容后,后台自动下载
+douban:
+ # 【用户ID列表】:豆瓣电影点个我主页people后面的那一串数字,或者使用豆瓣App个人信息中查看。可以配置多个,注意要加引号
+ # 这里可以是自己的,也可以是别人的,比如填写几个大V的账号ID,实现热门影视自动下载
+ users:
+ - ""
+ # 【豆瓣Cookie】:选配,嫌麻烦的可以不用配置,可能影响个别电影的同步
+ cookie:
+ # 【同步天数】:同步多少天内加入的数据
+ days: 30
+ # 【同步间隔】:多久同步一次数据,单位小时,建议不要太频繁,避免被检测到后封号
+ interval:
+ # 【同步数据类型】:同步哪些类型的收藏数据:do 在看,wish 想看,collect 看过,用逗号分隔配置
+ types: "wish"
+ # 【自动开载开关】:同步到豆瓣的数据后是否自动检索站点并下载
+ auto_search: true
+ # 【自动添加RSS开关】:站点检索找不到的记录是否自动添加RSS订阅(可实现未搜索到的自动追更)
+ auto_rss: true
+
+# 【配置字幕自动下载】
+subtitle:
+ # 【下载渠道】:opensubtitles、chinesesubfinder
+ server: opensubtitles
+ # opensubtitles.org
+ opensubtitles:
+ # 是否启用
+ enable: true
+ # 配置ChineseSubFinder的服务器地址和API Key,API Key在ChineseSubFinder->配置中心->实验室->API Key处生成
+ chinesesubfinder:
+ # IP地址和端口
+ host:
+ # API KEY
+ api_key:
+ # NASTOOL媒体的映射路径
+ local_path:
+ # ChineseSubFinder媒体的映射路径
+ remote_path:
+
+# 【配置安全】
+security:
+ # 【媒体服务器webhook允许ip范围】:即只有如下范围的IP才允许调用webhook
+ media_server_webhook_allow_ip:
+ ipv4: 0.0.0.0/0
+ ipv6: ::/0
+ # 【Telegram webhook允许ip范围】:即只有如下范围的IP才允许调用webhook
+ telegram_webhook_allow_ip:
+ ipv4: 127.0.0.1
+ ipv6: ::/0
+ # 【Synology Chat webhook允许ip范围】:即只有如下范围的IP才允许调用webhook
+ synology_webhook_allow_ip:
+ ipv4: 127.0.0.1
+ ipv6: ::/0
+ # 【API认证密钥】:用于Jellyseerr、Overseerr中Authorization认证以及非客户端类的API调用
+ api_key:
+
+# 【实验室】
+laboratory:
+ # 【识别增强】关键字猜想
+ search_keyword: false
+ # 【识别增强】通过TMDB WEB检索
+ search_tmdbweb: false
+ # 【TMDB缓存过期策略】:是否开启TMDB缓存过期策略,默认7天过期,过期缓存将被删除, 7天内访问过期时间可以被刷新
+ tmdb_cache_expire: true
+ # 【使用豆瓣名称联想】:开启将使用豆瓣进行电影电视剧的名称联想,否则使用TMDB的数据
+ use_douban_titles: false
+ # 【精确搜索使用英文名称】:开启后对于精确搜索场景(远程搜索、订阅搜索等)将会使用英文名检索站点资源以提升匹配度,但对有些站点资源标题全是中文的则需要关闭,否则匹配不到
+ search_en_title: true
+ # 【使用TMDB代理】
+ tmdb_proxy: false
diff --git a/config/default-category.yaml b/config/default-category.yaml
new file mode 100644
index 0000000..c5d857d
--- /dev/null
+++ b/config/default-category.yaml
@@ -0,0 +1,219 @@
+# 配置电影的分类策略, 配置为空或者不配置该项则不启用电影分类
+movie:
+ # 分类名同时也是目录名,会按先后顺序匹配,匹配后程序会按这个名称建立二级目录
+ 华语电影:
+ # 分类依据,可以是:original_language 语种、production_countries(电影)/origin_country(电视剧) 国家或地区、genre_ids 内容类型等,只要TMDB API返回的字段中有就行
+ # 配置多项条件时,需要同时满足;不需要的匹配项可以删掉或者配置为空
+ # 匹配值对应用,号分隔,这里是匹配语种
+ original_language: 'zh,cn,bo,za'
+ 动画电影:
+ # 匹配 genre_ids 内容类型,16是动漫
+ genre_ids: '16'
+ # 未配置任何过滤条件时,则按先后顺序不符合上面分类的都会在这个分类下,建议配置在最末尾
+ 外语电影:
+
+# 配置电视剧的分类策略, 配置为空或者不配置该项则不启用电视剧分类
+tv:
+ # 分类名同时也是目录名,会按先后顺序匹配,匹配后程序会按这个名称建立二级目录
+ # 如果有配置动漫独立目录,则实际上不会使用到tv下的动漫二级分类
+ 动漫:
+ # 匹配 genre_ids 内容类型,16是动漫
+ genre_ids: '16'
+ 纪录片:
+ # 匹配 genre_ids 内容类型,99是纪录片
+ genre_ids: '99'
+ 儿童:
+ # 匹配 genre_ids 内容类型,10762是儿童
+ genre_ids: '10762'
+ 综艺:
+ # 匹配 genre_ids 内容类型,10764 10767都是综艺
+ genre_ids: '10764,10767'
+ 国产剧:
+ # 匹配 origin_country 国家,CN是中国大陆,TW是中国台湾,HK是中国香港
+ origin_country: 'CN,TW,HK'
+ 欧美剧:
+ # 匹配 origin_country 国家,主要欧美国家列表
+ origin_country: 'US,FR,GB,DE,ES,IT,NL,PT,RU,UK'
+ 日韩剧:
+ # 匹配 origin_country 国家,主要亚洲国家列表
+ origin_country: 'JP,KP,KR,TH,IN,SG'
+ # 未匹配以上分类,则命名为未分类
+ 未分类:
+
+# 配置动漫的分类策略, 配置为空或者不配置该项则不启用动漫分类
+anime:
+ # 如果你的anime_path动漫目录已经直接设置到了动漫子目录,则这个分类可以取消
+ 动漫:
+ # 匹配 genre_ids 内容类型,16是动漫
+ genre_ids: '16'
+
+## genre_ids 内容类型 字典,注意部分中英文是不一样的
+# 28 Action
+# 12 Adventure
+# 16 Animation
+# 35 Comedy
+# 80 Crime
+# 99 Documentary
+# 18 Drama
+# 10751 Family
+# 14 Fantasy
+# 36 History
+# 27 Horror
+# 10402 Music
+# 9648 Mystery
+# 10749 Romance
+# 878 Science Fiction
+# 10770 TV Movie
+# 53 Thriller
+# 10752 War
+# 37 Western
+# 28 动作
+# 12 冒险
+# 16 动画
+# 35 喜剧
+# 80 犯罪
+# 99 纪录
+# 18 剧情
+# 10751 家庭
+# 14 奇幻
+# 36 历史
+# 27 恐怖
+# 10402 音乐
+# 9648 悬疑
+# 10749 爱情
+# 878 科幻
+# 10770 电视电影
+# 53 惊悚
+# 10752 战争
+# 37 西部
+
+## original_language 语种 字典
+# af 南非语
+# ar 阿拉伯语
+# az 阿塞拜疆语
+# be 比利时语
+# bg 保加利亚语
+# ca 加泰隆语
+# cs 捷克语
+# cy 威尔士语
+# da 丹麦语
+# de 德语
+# dv 第维埃语
+# el 希腊语
+# en 英语
+# eo 世界语
+# es 西班牙语
+# et 爱沙尼亚语
+# eu 巴士克语
+# fa 法斯语
+# fi 芬兰语
+# fo 法罗语
+# fr 法语
+# gl 加里西亚语
+# gu 古吉拉特语
+# he 希伯来语
+# hi 印地语
+# hr 克罗地亚语
+# hu 匈牙利语
+# hy 亚美尼亚语
+# id 印度尼西亚语
+# is 冰岛语
+# it 意大利语
+# ja 日语
+# ka 格鲁吉亚语
+# kk 哈萨克语
+# kn 卡纳拉语
+# ko 朝鲜语
+# kok 孔卡尼语
+# ky 吉尔吉斯语
+# lt 立陶宛语
+# lv 拉脱维亚语
+# mi 毛利语
+# mk 马其顿语
+# mn 蒙古语
+# mr 马拉地语
+# ms 马来语
+# mt 马耳他语
+# nb 挪威语(伯克梅尔)
+# nl 荷兰语
+# ns 北梭托语
+# pa 旁遮普语
+# pl 波兰语
+# pt 葡萄牙语
+# qu 克丘亚语
+# ro 罗马尼亚语
+# ru 俄语
+# sa 梵文
+# se 北萨摩斯语
+# sk 斯洛伐克语
+# sl 斯洛文尼亚语
+# sq 阿尔巴尼亚语
+# sv 瑞典语
+# sw 斯瓦希里语
+# syr 叙利亚语
+# ta 泰米尔语
+# te 泰卢固语
+# th 泰语
+# tl 塔加路语
+# tn 茨瓦纳语
+# tr 土耳其语
+# ts 宗加语
+# tt 鞑靼语
+# uk 乌克兰语
+# ur 乌都语
+# uz 乌兹别克语
+# vi 越南语
+# xh 班图语
+# zh 中文
+# cn 中文
+# zu 祖鲁语
+
+## origin_country 国家地区 字典
+# AR 阿根廷
+# AU 澳大利亚
+# BE 比利时
+# BR 巴西
+# CA 加拿大
+# CH 瑞士
+# CL 智利
+# CO 哥伦比亚
+# CZ 捷克
+# DE 德国
+# DK 丹麦
+# EG 埃及
+# ES 西班牙
+# FR 法国
+# GR 希腊
+# HK 香港
+# IL 以色列
+# IN 印度
+# IQ 伊拉克
+# IR 伊朗
+# IT 意大利
+# JP 日本
+# MM 缅甸
+# MO 澳门
+# MX 墨西哥
+# MY 马来西亚
+# NL 荷兰
+# NO 挪威
+# PH 菲律宾
+# PK 巴基斯坦
+# PL 波兰
+# RU 俄罗斯
+# SE 瑞典
+# SG 新加坡
+# TH 泰国
+# TR 土耳其
+# US 美国
+# VN 越南
+# CN 中国 内地
+# GB 英国
+# TW 中国台湾
+# NZ 新西兰
+# SA 沙特阿拉伯
+# LA 老挝
+# KP 朝鲜 北朝鲜
+# KR 韩国 南朝鲜
+# PT 葡萄牙
+# MN 蒙古国 蒙古
diff --git a/config/scripts/init_filter.sql b/config/scripts/init_filter.sql
new file mode 100644
index 0000000..044e6ba
--- /dev/null
+++ b/config/scripts/init_filter.sql
@@ -0,0 +1,100 @@
+INSERT OR IGNORE INTO "CONFIG_FILTER_GROUP" ("ID","GROUP_NAME","IS_DEFAULT","NOTE") VALUES
+ (1000,'日常观影','N',NULL);
+INSERT OR IGNORE INTO "CONFIG_FILTER_RULES" ("ID","GROUP_ID","ROLE_NAME","PRIORITY","INCLUDE","EXCLUDE","SIZE_LIMIT","NOTE") VALUES
+ (10000,'1000','1080p特效-bluray','1','特效
+1080[pi]
+blu-?ray
+[Hx].?26[45]','Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|\Wsdr\W|minibd|[\W_]diy[\W_]|[\W_]3d[\W_]|REMUX','1,30',NULL),
+ (10001,'1000','1080p中字-bluray','2','[中国國繁简](/|\s|\\|\|)?[繁简英粤]|[英简繁](/|\s|\\|\|)?[中繁简]|繁體|简体|[中国國][字配]|国语|國語|中文
+1080[pi]
+blu-?ray
+[Hx].?26[45]','Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|\Wsdr\W|minibd|[\W_]diy[\W_]|[\W_]3d[\W_]|REMUX','1,30',NULL),
+ (10002,'1000','4k特效-bluray','3','特效
+4k|2160p
+blu-?ray
+[Hx].?26[45]','Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|\Wsdr\W|minibd|[\W_]diy[\W_]|[\W_]3d[\W_]|REMUX','1,30',NULL),
+ (10003,'1000','4k中字-bluray','4','[中国國繁简](/|\s|\\|\|)?[繁简英粤]|[英简繁](/|\s|\\|\|)?[中繁简]|繁體|简体|[中国國][字配]|国语|國語|中文
+4k|2160p
+blu-?ray
+[Hx].?26[45]','Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|\Wsdr\W|minibd|[\W_]diy[\W_]|[\W_]3d[\W_]|REMUX','1,30',NULL),
+ (10004,'1000','高清特效-bluray','5','特效
+720p
+blu-?ray
+[Hx].?26[45]','Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|\Wsdr\W|minibd|[\W_]diy[\W_]|[\W_]3d[\W_]|REMUX','30',NULL),
+ (10005,'1000','高清中字-bluray','6','[中国國繁简](/|\s|\\|\|)?[繁简英粤]|[英简繁](/|\s|\\|\|)?[中繁简]|繁體|简体|[中国國][字配]|国语|國語|中文
+720p
+blu-?ray
+[Hx].?26[45]','Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|\Wsdr\W|minibd|[\W_]diy[\W_]|[\W_]3d[\W_]|REMUX','30',NULL),
+ (10006,'1000','1080p-bluray','7','1080[pi]
+blu-?ray
+[Hx].?26[45]','Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|\Wsdr\W|minibd|[\W_]diy[\W_]|[\W_]3d[\W_]|REMUX','30',NULL),
+ (10007,'1000','4k-bluray','8','4k|2160p
+blu-?ray
+[Hx].?26[45]','Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|\Wsdr\W|minibd|[\W_]diy[\W_]|[\W_]3d[\W_]|REMUX','30',NULL),
+ (10008,'1000','高清-bluray','9','720p
+blu-?ray
+[Hx].?26[45]','Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|\Wsdr\W|minibd|[\W_]diy[\W_]|[\W_]3d[\W_]|REMUX','30',NULL),
+ (10025,'1000','1080p特效-其他来源','1','特效
+1080[pi]','Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|\Wsdr\W|minibd|[\W_]diy[\W_]|[\W_]3d[\W_]|REMUX','1,30',NULL),
+ (10026,'1000','1080p中字-其他来源','2','[中国國繁简](/|\s|\\|\|)?[繁简英粤]|[英简繁](/|\s|\\|\|)?[中繁简]|繁體|简体|[中国國][字配]|国语|國語|中文
+1080[pi]','Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|\Wsdr\W|minibd|[\W_]diy[\W_]|[\W_]3d[\W_]|REMUX','1,30',NULL),
+ (10027,'1000','4k特效-其他来源','3','特效
+4k|2160p','Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|\Wsdr\W|minibd|[\W_]diy[\W_]|[\W_]3d[\W_]|REMUX','1,30',NULL),
+ (10028,'1000','4k中字-其他来源','4','[中国國繁简](/|\s|\\|\|)?[繁简英粤]|[英简繁](/|\s|\\|\|)?[中繁简]|繁體|简体|[中国國][字配]|国语|國語|中文
+4k|2160p','Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|\Wsdr\W|minibd|[\W_]diy[\W_]|[\W_]3d[\W_]|REMUX','1,30',NULL),
+ (10029,'1000','高清特效-其他来源','5','特效
+720p','Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|\Wsdr\W|minibd|[\W_]diy[\W_]|[\W_]3d[\W_]|REMUX','30',NULL),
+ (10030,'1000','高清中字-其他来源','6','[中国國繁简](/|\s|\\|\|)?[繁简英粤]|[英简繁](/|\s|\\|\|)?[中繁简]|繁體|简体|[中国國][字配]|国语|國語|中文
+720p','Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|\Wsdr\W|minibd|[\W_]diy[\W_]|[\W_]3d[\W_]|REMUX','30',NULL),
+ (10031,'1000','1080p-其他来源','7','1080[pi]','Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|\Wsdr\W|minibd|[\W_]diy[\W_]|[\W_]3d[\W_]|REMUX','30',NULL),
+ (10032,'1000','4k-其他来源','8','4k|2160p','Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|\Wsdr\W|minibd|[\W_]diy[\W_]|[\W_]3d[\W_]|REMUX','30',NULL),
+ (10033,'1000','高清-其他来源','9','720p','Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|\Wsdr\W|minibd|[\W_]diy[\W_]|[\W_]3d[\W_]|REMUX','30',NULL);
+INSERT OR IGNORE INTO "CONFIG_FILTER_GROUP" ("ID","GROUP_NAME","IS_DEFAULT","NOTE") VALUES
+ (1001,'洗版收藏','N',NULL);
+INSERT OR IGNORE INTO "CONFIG_FILTER_RULES" ("ID","GROUP_ID","ROLE_NAME","PRIORITY","INCLUDE","EXCLUDE","SIZE_LIMIT","NOTE") VALUES
+ (10009,'1001','DIY典藏-4K-原盘','1','Mbps@Audies|Oldboys
+4k|2160p
+Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC','[Hx].?26[45]','20,99',NULL),
+ (10010,'1001','DIY典藏-1080p-原盘','2','Mbps@Audies|Oldboys
+1080[pi]
+Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC','[Hx].?26[45]','20,99',NULL),
+ (10011,'1001','特效典藏-4K-原盘','3','特效
+4k|2160p
+Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC','[Hx].?26[45]','20,99',NULL),
+ (10012,'1001','特效典藏-1080p-原盘','4','特效
+1080[pi]
+Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC','[Hx].?26[45]','20,99',NULL),
+ (10013,'1001','中字典藏-4K-原盘','5','[中国國繁简](/|\s|\\|\|)?[繁简英粤]|[英简繁](/|\s|\\|\|)?[中繁简]|繁體|简体|[中国國][字配]|国语|國語|中文
+4k|2160p
+Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC','[Hx].?26[45]','20,99',NULL),
+ (10014,'1001','中字典藏-1080p-原盘','6','[中国國繁简](/|\s|\\|\|)?[繁简英粤]|[英简繁](/|\s|\\|\|)?[中繁简]|繁體|简体|[中国國][字配]|国语|國語|中文
+1080[pi]
+Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC','[Hx].?26[45]','20,99',NULL),
+ (10015,'1001','典藏-4K-原盘','7','4k|2160p
+Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC','[Hx].?26[45]','20,99',NULL),
+ (10016,'1001','典藏-1080p-原盘','8','1080[pi]
+Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC','[Hx].?26[45]','20,99',NULL),
+ (10017,'1001','DIY典藏-4K-REMUX','1','Mbps@Audies|Oldboys
+4k|2160p
+remux','[Hx].?26[45]','20,99',NULL),
+ (10018,'1001','DIY典藏-1080p-REMUX','2','Mbps@Audies|Oldboys
+1080[pi]
+remux','[Hx].?26[45]','20,99',NULL),
+ (10019,'1001','特效典藏-4K-REMUX','3','特效
+4k|2160p
+remux','[Hx].?26[45]','20,99',NULL),
+ (10020,'1001','特效典藏-1080p-REMUX','4','特效
+1080[pi]
+remux','[Hx].?26[45]','20,99',NULL),
+ (10021,'1001','中字典藏-4K-REMUX','5','[中国國繁简](/|\s|\\|\|)?[繁简英粤]|[英简繁](/|\s|\\|\|)?[中繁简]|繁體|简体|[中国國][字配]|国语|國語|中文
+4k|2160p
+remux','[Hx].?26[45]','20,99',NULL),
+ (10022,'1001','中字典藏-1080p-REMUX','6','[中国國繁简](/|\s|\\|\|)?[繁简英粤]|[英简繁](/|\s|\\|\|)?[中繁简]|繁體|简体|[中国國][字配]|国语|國語|中文
+1080[pi]
+remux','[Hx].?26[45]','20,99',NULL),
+ (10023,'1001','典藏-4K-REMUX','7','4k|2160p
+remux','[Hx].?26[45]','20,99',NULL),
+ (10024,'1001','典藏-1080p-REMUX','8','1080[pi]
+remux','[Hx].?26[45]','20,99',NULL);
+INSERT OR IGNORE INTO "CONFIG_FILTER_GROUP" ("ID","GROUP_NAME","IS_DEFAULT","NOTE") VALUES
+ (9999,'不过滤','Y',NULL);
\ No newline at end of file
diff --git a/config/scripts/init_userrss_v3.sql b/config/scripts/init_userrss_v3.sql
new file mode 100644
index 0000000..1235aaf
--- /dev/null
+++ b/config/scripts/init_userrss_v3.sql
@@ -0,0 +1,100 @@
+INSERT OR REPLACE INTO "CONFIG_RSS_PARSER" ("ID", "NAME", "TYPE", "FORMAT", "PARAMS", "NOTE", "SYSDEF") VALUES ('1', '通用', 'XML', '{
+ "list": "//channel/item",
+ "item": {
+ "title": {
+ "path": ".//title/text()"
+ },
+ "enclosure": {
+ "path": ".//enclosure[@type=''application/x-bittorrent'']/@url"
+ },
+ "link": {
+ "path": ".//link/text()"
+ },
+ "date": {
+ "path": ".//pubDate/text()"
+ },
+ "description": {
+ "path": ".//description/text()"
+ },
+ "size": {
+ "path": ".//link/@length"
+ }
+ }
+}', '', '', 'Y');
+INSERT OR REPLACE INTO "CONFIG_RSS_PARSER" ("ID", "NAME", "TYPE", "FORMAT", "PARAMS", "NOTE", "SYSDEF") VALUES ('2', '蜜柑计划', 'XML', '{
+ "list": "//channel/item",
+ "item": {
+ "title": {
+ "path": ".//title/text()"
+ },
+ "enclosure": {
+ "path": ".//enclosure[@type=''application/x-bittorrent'']/@url"
+ },
+ "link": {
+ "path": "link/text()",
+ "namespaces": "https://mikanani.me/0.1/"
+ },
+ "date": {
+ "path": "pubDate/text()",
+ "namespaces": "https://mikanani.me/0.1/"
+ },
+ "description": {
+ "path": ".//description/text()"
+ },
+ "size": {
+ "path": ".//enclosure[@type=''application/x-bittorrent'']/@length"
+ }
+ }
+}', '', '', 'Y');
+INSERT OR REPLACE INTO "CONFIG_RSS_PARSER" ("ID", "NAME", "TYPE", "FORMAT", "PARAMS", "NOTE", "SYSDEF") VALUES ('3', 'TMDB电影片单', 'JSON', '{
+ "list": "$.items",
+ "item": {
+ "title": {
+ "path": "title"
+ },
+ "year": {
+ "path": "release_date"
+ },
+ "type": {
+ "value": "movie"
+ }
+ }
+}', 'api_key={TMDBKEY}&language=zh-CN', '', 'Y');
+INSERT OR REPLACE INTO "CONFIG_RSS_PARSER" ("ID", "NAME", "TYPE", "FORMAT", "PARAMS", "NOTE", "SYSDEF") VALUES ('4', 'TMDB电视剧片单', 'JSON', '{
+ "list": "$.items",
+ "item": {
+ "title": {
+ "path": "name"
+ },
+ "year": {
+ "path": "first_air_date"
+ },
+ "type": {
+ "value": "tv"
+ }
+ }
+}', 'api_key={TMDBKEY}&language=zh-CN', '', 'Y');
+INSERT OR REPLACE INTO "CONFIG_RSS_PARSER" ("ID", "NAME", "TYPE", "FORMAT", "PARAMS", "NOTE", "SYSDEF") VALUES ('5', 'Nyaa', 'XML', '{
+ "list": "//channel/item",
+ "item": {
+ "title": {
+ "path": ".//title/text()"
+ },
+ "enclosure": {
+ "path": ".//link/text()"
+ },
+ "link": {
+ "path": ".//guid/text()"
+ },
+ "date": {
+ "path": ".//pubDate/text()"
+ },
+ "description": {
+ "path": ".//description/text()"
+ },
+ "size": {
+ "path": "size/text()",
+ "namespaces": "https://nyaa.si/xmlns/nyaa"
+ }
+ }
+}', '', '', 'Y');
\ No newline at end of file
diff --git a/config/scripts/reset_db_version.sql b/config/scripts/reset_db_version.sql
new file mode 100644
index 0000000..9083322
--- /dev/null
+++ b/config/scripts/reset_db_version.sql
@@ -0,0 +1 @@
+delete from alembic_version where 1
\ No newline at end of file
diff --git a/config/scripts/update_subscribe.sql b/config/scripts/update_subscribe.sql
new file mode 100644
index 0000000..7f97acb
--- /dev/null
+++ b/config/scripts/update_subscribe.sql
@@ -0,0 +1,2 @@
+UPDATE RSS_MOVIES SET DOWNLOAD_SETTING = null WHERE DOWNLOAD_SETTING = -1;
+UPDATE RSS_TVS SET DOWNLOAD_SETTING = null WHERE DOWNLOAD_SETTING = -1;
diff --git a/config/scripts/update_userpris.sql b/config/scripts/update_userpris.sql
new file mode 100644
index 0000000..5b1e5b7
--- /dev/null
+++ b/config/scripts/update_userpris.sql
@@ -0,0 +1 @@
+UPDATE main.CONFIG_USERS SET PRIS = replace(PRIS, '推荐', '探索') WHERE 1
\ No newline at end of file
diff --git a/config/scripts/update_userrss.sql b/config/scripts/update_userrss.sql
new file mode 100644
index 0000000..7fc540c
--- /dev/null
+++ b/config/scripts/update_userrss.sql
@@ -0,0 +1 @@
+UPDATE CONFIG_USER_RSS SET PROCESS_COUNT = '0' WHERE PROCESS_COUNT is null
\ No newline at end of file
diff --git a/config/sites.dat b/config/sites.dat
new file mode 100644
index 0000000..1301d43
Binary files /dev/null and b/config/sites.dat differ
diff --git a/db_scripts/README b/db_scripts/README
new file mode 100644
index 0000000..98e4f9c
--- /dev/null
+++ b/db_scripts/README
@@ -0,0 +1 @@
+Generic single-database configuration.
\ No newline at end of file
diff --git a/db_scripts/env.py b/db_scripts/env.py
new file mode 100644
index 0000000..0192775
--- /dev/null
+++ b/db_scripts/env.py
@@ -0,0 +1,80 @@
+from logging.config import fileConfig
+
+from sqlalchemy import engine_from_config
+from sqlalchemy import pool
+
+from alembic import context
+
+from app.db.models import Base
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+if config.config_file_name is not None:
+ fileConfig(config.config_file_name)
+
+# add your model's MetaData object here
+# for 'autogenerate' support
+# from myapp import mymodel
+# target_metadata = mymodel.Base.metadata
+target_metadata = Base.metadata
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+
+def run_migrations_offline() -> None:
+ """Run migrations in 'offline' mode.
+
+ This configures the context with just a URL
+ and not an Engine, though an Engine is acceptable
+ here as well. By skipping the Engine creation
+ we don't even need a DBAPI to be available.
+
+ Calls to context.execute() here emit the given string to the
+ script output.
+
+ """
+ url = config.get_main_option("sqlalchemy.url")
+ context.configure(
+ url=url,
+ target_metadata=target_metadata,
+ literal_binds=True,
+ dialect_opts={"paramstyle": "named"},
+ render_as_batch=True
+ )
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+def run_migrations_online() -> None:
+ """Run migrations in 'online' mode.
+
+ In this scenario we need to create an Engine
+ and associate a connection with the context.
+
+ """
+ connectable = engine_from_config(
+ config.get_section(config.config_ini_section),
+ prefix="sqlalchemy.",
+ poolclass=pool.NullPool,
+ )
+
+ with connectable.connect() as connection:
+ context.configure(
+ connection=connection, target_metadata=target_metadata
+ )
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+if context.is_offline_mode():
+ run_migrations_offline()
+else:
+ run_migrations_online()
diff --git a/db_scripts/script.py.mako b/db_scripts/script.py.mako
new file mode 100644
index 0000000..55df286
--- /dev/null
+++ b/db_scripts/script.py.mako
@@ -0,0 +1,24 @@
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision = ${repr(up_revision)}
+down_revision = ${repr(down_revision)}
+branch_labels = ${repr(branch_labels)}
+depends_on = ${repr(depends_on)}
+
+
+def upgrade() -> None:
+ ${upgrades if upgrades else "pass"}
+
+
+def downgrade() -> None:
+ ${downgrades if downgrades else "pass"}
diff --git a/db_scripts/versions/720a6289a697_1_1_0.py b/db_scripts/versions/720a6289a697_1_1_0.py
new file mode 100644
index 0000000..6eb4d57
--- /dev/null
+++ b/db_scripts/versions/720a6289a697_1_1_0.py
@@ -0,0 +1,150 @@
+"""1.1.0
+
+Revision ID: 720a6289a697
+Revises: None
+Create Date: 2023-01-22 08:18:00.723780
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '720a6289a697'
+down_revision = None
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ # 1.0.0
+ op.execute('DROP TABLE IF EXISTS IGNORED_WORDS')
+ op.execute('DROP TABLE IF EXISTS REPLACED_WORDS')
+ op.execute('DROP TABLE IF EXISTS OFFSET_WORDS')
+ try:
+ with op.batch_alter_table("CUSTOM_WORDS") as batch_op:
+ batch_op.alter_column('OFFSET', type_=sa.Text, existing_type=sa.Integer)
+ except Exception as e:
+ print(str(e))
+ # 1.0.1
+ try:
+ with op.batch_alter_table("CONFIG_USER_RSS") as batch_op:
+ batch_op.add_column(sa.Column('SAVE_PATH', sa.Text))
+ batch_op.add_column(sa.Column('DOWNLOAD_SETTING', sa.Integer))
+ except Exception as e:
+ print(str(e))
+ # 1.0.2
+ try:
+ with op.batch_alter_table("RSS_MOVIES") as batch_op:
+ batch_op.add_column(sa.Column('RSS_SITES', sa.Text))
+ batch_op.add_column(sa.Column('SEARCH_SITES', sa.Text))
+ batch_op.add_column(sa.Column('OVER_EDITION', sa.Integer))
+ batch_op.add_column(sa.Column('FILTER_RESTYPE', sa.Text))
+ batch_op.add_column(sa.Column('FILTER_PIX', sa.Text))
+ batch_op.add_column(sa.Column('FILTER_RULE', sa.Integer))
+ batch_op.add_column(sa.Column('FILTER_TEAM', sa.Text))
+ batch_op.add_column(sa.Column('SAVE_PATH', sa.Text))
+ batch_op.add_column(sa.Column('DOWNLOAD_SETTING', sa.Integer))
+ batch_op.add_column(sa.Column('FUZZY_MATCH', sa.Integer))
+ batch_op.add_column(sa.Column('NOTE', sa.Text))
+ except Exception as e:
+ print(str(e))
+ try:
+ with op.batch_alter_table("RSS_TVS") as batch_op:
+ batch_op.add_column(sa.Column('RSS_SITES', sa.Text))
+ batch_op.add_column(sa.Column('SEARCH_SITES', sa.Text))
+ batch_op.add_column(sa.Column('OVER_EDITION', sa.Integer))
+ batch_op.add_column(sa.Column('FILTER_RESTYPE', sa.Text))
+ batch_op.add_column(sa.Column('FILTER_PIX', sa.Text))
+ batch_op.add_column(sa.Column('FILTER_RULE', sa.Integer))
+ batch_op.add_column(sa.Column('FILTER_TEAM', sa.Text))
+ batch_op.add_column(sa.Column('SAVE_PATH', sa.Text))
+ batch_op.add_column(sa.Column('DOWNLOAD_SETTING', sa.Integer))
+ batch_op.add_column(sa.Column('FUZZY_MATCH', sa.Integer))
+ batch_op.add_column(sa.Column('TOTAL_EP', sa.Integer))
+ batch_op.add_column(sa.Column('CURRENT_EP', sa.Integer))
+ batch_op.add_column(sa.Column('NOTE', sa.Text))
+ except Exception as e:
+ print(str(e))
+ # 1.0.3
+ try:
+ with op.batch_alter_table("TRANSFER_HISTORY") as batch_op:
+ batch_op.alter_column('FILE_PATH', new_column_name="SOURCE_PATH", existing_type=sa.Text)
+ batch_op.alter_column('FILE_NAME', new_column_name="SOURCE_FILENAME", existing_type=sa.Text)
+ batch_op.alter_column('SE', new_column_name="SEASON_EPISODE", existing_type=sa.Text)
+ batch_op.add_column(sa.Column('TMDBID', sa.Integer))
+ batch_op.add_column(sa.Column('DEST_PATH', sa.Text))
+ batch_op.add_column(sa.Column('DEST_FILENAME', sa.Text))
+ except Exception as e:
+ print(str(e))
+ try:
+ with op.batch_alter_table("DOWNLOAD_SETTING") as batch_op:
+ batch_op.add_column(sa.Column('DOWNLOADER', sa.Text))
+ except Exception as e:
+ print(str(e))
+ # 1.0.7
+ try:
+ with op.batch_alter_table("TRANSFER_UNKNOWN") as batch_op:
+ batch_op.add_column(sa.Column('MODE', sa.Text, nullable=True))
+ except Exception as e:
+ print(str(e))
+ # 1.0.8
+ try:
+ with op.batch_alter_table("CONFIG_USER_RSS") as batch_op:
+ batch_op.add_column(sa.Column('RECOGNIZATION', sa.Text, nullable=True))
+ batch_op.add_column(sa.Column('MEDIAINFOS', sa.Text, nullable=True))
+ except Exception as e:
+ print(str(e))
+ # 1.0.9
+ try:
+ with op.batch_alter_table("SITE_USER_INFO_STATS") as batch_op:
+ batch_op.drop_column('FAVICON')
+ except Exception as e:
+ print(e)
+ try:
+ with op.batch_alter_table("DOUBAN_MEDIAS") as batch_op:
+ batch_op.add_column(sa.Column('ADD_TIME', sa.Text, nullable=True))
+ except Exception as e:
+ print(str(e))
+ try:
+ with op.batch_alter_table("SITE_BRUSH_TASK") as batch_op:
+ batch_op.add_column(sa.Column('SENDMESSAGE', sa.Text, nullable=True))
+ batch_op.add_column(sa.Column('FORCEUPLOAD', sa.Text, nullable=True))
+ except Exception as e:
+ print(str(e))
+ # 1.0.10
+ try:
+ with op.batch_alter_table("RSS_MOVIES") as batch_op:
+ batch_op.add_column(sa.Column('FILTER_ORDER', sa.Integer, nullable=True))
+ except Exception as e:
+ print(str(e))
+ try:
+ with op.batch_alter_table("RSS_TVS") as batch_op:
+ batch_op.add_column(sa.Column('FILTER_ORDER', sa.Integer, nullable=True))
+ except Exception as e:
+ print(str(e))
+ # 1.0.11
+ try:
+ with op.batch_alter_table("RSS_MOVIES") as batch_op:
+ batch_op.add_column(sa.Column('KEYWORD', sa.Text, nullable=True))
+ except Exception as e:
+ print(str(e))
+ try:
+ with op.batch_alter_table("RSS_TVS") as batch_op:
+ batch_op.add_column(sa.Column('KEYWORD', sa.Text, nullable=True))
+ except Exception as e:
+ print(str(e))
+ # 1.0.12
+ try:
+ with op.batch_alter_table("CONFIG_USER_RSS") as batch_op:
+ batch_op.add_column(sa.Column('OVER_EDITION', sa.Integer, nullable=True))
+ batch_op.add_column(sa.Column('SITES', sa.Text, nullable=True))
+ batch_op.add_column(sa.Column('FILTER_ARGS', sa.Text, nullable=True))
+ except Exception as e:
+ print(str(e))
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ pass
diff --git a/dbscript_gen.py b/dbscript_gen.py
new file mode 100644
index 0000000..d5e4b52
--- /dev/null
+++ b/dbscript_gen.py
@@ -0,0 +1,12 @@
+import os
+from config import Config
+from alembic.config import Config as AlembicConfig
+from alembic.command import revision as alembic_revision
+
+db_version = input("请输入版本号:")
+db_location = os.path.join(Config().get_config_path(), 'user.db').replace('\\', '/')
+script_location = os.path.join(os.path.dirname(__file__), 'db_scripts').replace('\\', '/')
+alembic_cfg = AlembicConfig()
+alembic_cfg.set_main_option('script_location', script_location)
+alembic_cfg.set_main_option('sqlalchemy.url', f"sqlite:///{db_location}")
+alembic_revision(alembic_cfg, db_version, True)
diff --git a/docker/Dockerfile b/docker/Dockerfile
new file mode 100644
index 0000000..293e513
--- /dev/null
+++ b/docker/Dockerfile
@@ -0,0 +1,41 @@
+FROM alpine
+RUN apk add --no-cache libffi-dev \
+ && apk add --no-cache $(echo $(wget --no-check-certificate -qO- https://raw.githubusercontent.com/NAStool/nas-tools/master/package_list.txt)) \
+ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \
+ && echo "${TZ}" > /etc/timezone \
+ && ln -sf /usr/bin/python3 /usr/bin/python \
+ && curl https://rclone.org/install.sh | bash \
+ && if [ "$(uname -m)" = "x86_64" ]; then ARCH=amd64; elif [ "$(uname -m)" = "aarch64" ]; then ARCH=arm64; fi \
+ && curl https://dl.min.io/client/mc/release/linux-${ARCH}/mc --create-dirs -o /usr/bin/mc \
+ && chmod +x /usr/bin/mc \
+ && pip install --upgrade pip setuptools wheel \
+ && pip install cython \
+ && pip install -r https://raw.githubusercontent.com/NAStool/nas-tools/master/requirements.txt \
+ && apk del libffi-dev \
+ && npm install pm2 -g \
+ && rm -rf /tmp/* /root/.cache /var/cache/apk/*
+ENV LANG="C.UTF-8" \
+ TZ="Asia/Shanghai" \
+ NASTOOL_CONFIG="/config/config.yaml" \
+ NASTOOL_AUTO_UPDATE=true \
+ NASTOOL_CN_UPDATE=true \
+ NASTOOL_VERSION=master \
+ PS1="\u@\h:\w \$ " \
+ REPO_URL="https://github.com/NAStool/nas-tools.git" \
+ PYPI_MIRROR="https://pypi.tuna.tsinghua.edu.cn/simple" \
+ ALPINE_MIRROR="mirrors.ustc.edu.cn" \
+ PUID=0 \
+ PGID=0 \
+ UMASK=000 \
+ WORKDIR="/nas-tools"
+WORKDIR ${WORKDIR}
+RUN python_ver=$(python3 -V | awk '{print $2}') \
+ && echo "${WORKDIR}/" > /usr/lib/python${python_ver%.*}/site-packages/nas-tools.pth \
+ && echo 'fs.inotify.max_user_watches=524288' >> /etc/sysctl.conf \
+ && echo 'fs.inotify.max_user_instances=524288' >> /etc/sysctl.conf \
+ && git config --global pull.ff only \
+ && git clone -b master ${REPO_URL} ${WORKDIR} --depth=1 --recurse-submodule \
+ && git config --global --add safe.directory ${WORKDIR}
+EXPOSE 3000
+VOLUME ["/config"]
+ENTRYPOINT ["/nas-tools/docker/entrypoint.sh"]
\ No newline at end of file
diff --git a/docker/Dockerfile.beta b/docker/Dockerfile.beta
new file mode 100644
index 0000000..fafe141
--- /dev/null
+++ b/docker/Dockerfile.beta
@@ -0,0 +1,41 @@
+FROM alpine
+RUN apk add --no-cache libffi-dev \
+ && apk add --no-cache $(echo $(wget --no-check-certificate -qO- https://raw.githubusercontent.com/NAStool/nas-tools/dev/package_list.txt)) \
+ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \
+ && echo "${TZ}" > /etc/timezone \
+ && ln -sf /usr/bin/python3 /usr/bin/python \
+ && curl https://rclone.org/install.sh | bash \
+ && if [ "$(uname -m)" = "x86_64" ]; then ARCH=amd64; elif [ "$(uname -m)" = "aarch64" ]; then ARCH=arm64; fi \
+ && curl https://dl.min.io/client/mc/release/linux-${ARCH}/mc --create-dirs -o /usr/bin/mc \
+ && chmod +x /usr/bin/mc \
+ && pip install --upgrade pip setuptools wheel \
+ && pip install cython \
+ && pip install -r https://raw.githubusercontent.com/NAStool/nas-tools/dev/requirements.txt \
+ && apk del libffi-dev \
+ && npm install pm2 -g \
+ && rm -rf /tmp/* /root/.cache /var/cache/apk/*
+ENV LANG="C.UTF-8" \
+ TZ="Asia/Shanghai" \
+ NASTOOL_CONFIG="/config/config.yaml" \
+ NASTOOL_AUTO_UPDATE=true \
+ NASTOOL_CN_UPDATE=true \
+ NASTOOL_VERSION=dev \
+ PS1="\u@\h:\w \$ " \
+ REPO_URL="https://github.com/NAStool/nas-tools.git" \
+ PYPI_MIRROR="https://pypi.tuna.tsinghua.edu.cn/simple" \
+ ALPINE_MIRROR="mirrors.ustc.edu.cn" \
+ PUID=0 \
+ PGID=0 \
+ UMASK=000 \
+ WORKDIR="/nas-tools"
+WORKDIR ${WORKDIR}
+RUN python_ver=$(python3 -V | awk '{print $2}') \
+ && echo "${WORKDIR}/" > /usr/lib/python${python_ver%.*}/site-packages/nas-tools.pth \
+ && echo 'fs.inotify.max_user_watches=524288' >> /etc/sysctl.conf \
+ && echo 'fs.inotify.max_user_instances=524288' >> /etc/sysctl.conf \
+ && git config --global pull.ff only \
+ && git clone -b dev ${REPO_URL} ${WORKDIR} --depth=1 --recurse-submodule \
+ && git config --global --add safe.directory ${WORKDIR}
+EXPOSE 3000
+VOLUME ["/config"]
+ENTRYPOINT ["/nas-tools/docker/entrypoint.sh"]
diff --git a/docker/Dockerfile.lite b/docker/Dockerfile.lite
new file mode 100644
index 0000000..023172a
--- /dev/null
+++ b/docker/Dockerfile.lite
@@ -0,0 +1,48 @@
+FROM alpine
+RUN apk add --no-cache libffi-dev \
+ git \
+ gcc \
+ musl-dev \
+ python3-dev \
+ py3-pip \
+ libxml2-dev \
+ libxslt-dev \
+ tzdata \
+ su-exec \
+ dumb-init \
+ npm \
+ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \
+ && echo "${TZ}" > /etc/timezone \
+ && ln -sf /usr/bin/python3 /usr/bin/python \
+ && pip install --upgrade pip setuptools wheel \
+ && pip install cython \
+ && pip install -r https://raw.githubusercontent.com/NAStool/nas-tools/master/requirements.txt \
+ && npm install pm2 -g \
+ && apk del --purge libffi-dev gcc musl-dev libxml2-dev libxslt-dev \
+ && pip uninstall -y cython \
+ && rm -rf /tmp/* /root/.cache /var/cache/apk/*
+ENV LANG="C.UTF-8" \
+ TZ="Asia/Shanghai" \
+ NASTOOL_CONFIG="/config/config.yaml" \
+ NASTOOL_AUTO_UPDATE=false \
+ NASTOOL_CN_UPDATE=true \
+ NASTOOL_VERSION=lite \
+ PS1="\u@\h:\w \$ " \
+ REPO_URL="https://github.com/NAStool/nas-tools.git" \
+ PYPI_MIRROR="https://pypi.tuna.tsinghua.edu.cn/simple" \
+ ALPINE_MIRROR="mirrors.ustc.edu.cn" \
+ PUID=0 \
+ PGID=0 \
+ UMASK=000 \
+ WORKDIR="/nas-tools"
+WORKDIR ${WORKDIR}
+RUN python_ver=$(python3 -V | awk '{print $2}') \
+ && echo "${WORKDIR}/" > /usr/lib/python${python_ver%.*}/site-packages/nas-tools.pth \
+ && echo 'fs.inotify.max_user_watches=524288' >> /etc/sysctl.conf \
+ && echo 'fs.inotify.max_user_instances=524288' >> /etc/sysctl.conf \
+ && git config --global pull.ff only \
+ && git clone -b master ${REPO_URL} ${WORKDIR} --depth=1 --recurse-submodule \
+ && git config --global --add safe.directory ${WORKDIR}
+EXPOSE 3000
+VOLUME ["/config"]
+ENTRYPOINT ["/nas-tools/docker/entrypoint.sh"]
\ No newline at end of file
diff --git a/docker/compose.yml b/docker/compose.yml
new file mode 100644
index 0000000..18e2eb8
--- /dev/null
+++ b/docker/compose.yml
@@ -0,0 +1,19 @@
+version: "3"
+services:
+ nas-tools:
+ image: jxxghp/nas-tools:latest
+ ports:
+ - 3000:3000 # 默认的webui控制端口
+ volumes:
+ - ./config:/config # 冒号左边请修改为你想保存配置的路径
+ - /你的媒体目录:/你想设置的容器内能见到的目录 # 媒体目录,多个目录需要分别映射进来,需要满足配置文件说明中的要求
+ environment:
+ - PUID=0 # 想切换为哪个用户来运行程序,该用户的uid
+ - PGID=0 # 想切换为哪个用户来运行程序,该用户的gid
+ - UMASK=000 # 掩码权限,默认000,可以考虑设置为022
+ - NASTOOL_AUTO_UPDATE=false # 如需在启动容器时自动升级程程序请设置为true
+ #- REPO_URL=https://ghproxy.com/https://github.com/NAStool/nas-tools.git # 当你访问github网络很差时,可以考虑解释本行注释
+ restart: always
+ network_mode: bridge
+ hostname: nas-tools
+ container_name: nas-tools
\ No newline at end of file
diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh
new file mode 100755
index 0000000..d6fd324
--- /dev/null
+++ b/docker/entrypoint.sh
@@ -0,0 +1,100 @@
+#!/bin/sh
+
+cd ${WORKDIR}
+if [ "${NASTOOL_AUTO_UPDATE}" = "true" ]; then
+ if [ ! -s /tmp/requirements.txt.sha256sum ]; then
+ sha256sum requirements.txt > /tmp/requirements.txt.sha256sum
+ fi
+ if [ ! -s /tmp/third_party.txt.sha256sum ]; then
+ sha256sum third_party.txt > /tmp/third_party.txt.sha256sum
+ fi
+ if [ "${NASTOOL_VERSION}" != "lite" ]; then
+ if [ ! -s /tmp/package_list.txt.sha256sum ]; then
+ sha256sum package_list.txt > /tmp/package_list.txt.sha256sum
+ fi
+ fi
+ echo "更新程序..."
+ git remote set-url origin "${REPO_URL}" &> /dev/null
+ echo "windows/" > .gitignore
+ if [ "${NASTOOL_VERSION}" == "dev" ]; then
+ branch="dev"
+ else
+ branch="master"
+ fi
+ git clean -dffx
+ git fetch --depth 1 origin ${branch}
+ git reset --hard origin/${branch}
+ if [ $? -eq 0 ]; then
+ echo "更新成功..."
+ # Python依赖包更新
+ hash_old=$(cat /tmp/requirements.txt.sha256sum)
+ hash_new=$(sha256sum requirements.txt)
+ if [ "${hash_old}" != "${hash_new}" ]; then
+ echo "检测到requirements.txt有变化,重新安装依赖..."
+ if [ "${NASTOOL_CN_UPDATE}" = "true" ]; then
+ pip install --upgrade pip setuptools wheel -i "${PYPI_MIRROR}"
+ pip install -r requirements.txt -i "${PYPI_MIRROR}"
+ else
+ pip install --upgrade pip setuptools wheel
+ pip install -r requirements.txt
+ fi
+ if [ $? -ne 0 ]; then
+ echo "无法安装依赖,请更新镜像..."
+ else
+ echo "依赖安装成功..."
+ sha256sum requirements.txt > /tmp/requirements.txt.sha256sum
+ hash_old=$(cat /tmp/third_party.txt.sha256sum)
+ hash_new=$(sha256sum third_party.txt)
+ if [ "${hash_old}" != "${hash_new}" ]; then
+ echo "检测到third_party.txt有变化,更新第三方组件..."
+ git submodule update --init --recursive
+ if [ $? -ne 0 ]; then
+ echo "无法更新第三方组件,请更新镜像..."
+ else
+ echo "第三方组件安装成功..."
+ sha256sum third_party.txt > /tmp/third_party.txt.sha256sum
+ fi
+ fi
+ fi
+ fi
+ # 系统软件包更新
+ if [ "${NASTOOL_VERSION}" != "lite" ]; then
+ hash_old=$(cat /tmp/package_list.txt.sha256sum)
+ hash_new=$(sha256sum package_list.txt)
+ if [ "${hash_old}" != "${hash_new}" ]; then
+ echo "检测到package_list.txt有变化,更新软件包..."
+ if [ "${NASTOOL_CN_UPDATE}" = "true" ]; then
+ sed -i "s/dl-cdn.alpinelinux.org/${ALPINE_MIRROR}/g" /etc/apk/repositories
+ apk update -f
+ fi
+ apk add --no-cache libffi-dev
+ apk add --no-cache $(echo $(cat package_list.txt))
+ if [ $? -ne 0 ]; then
+ echo "无法更新软件包,请更新镜像..."
+ else
+ apk del libffi-dev
+ echo "软件包安装成功..."
+ sha256sum package_list.txt > /tmp/package_list.txt.sha256sum
+ fi
+ fi
+ fi
+ else
+ echo "更新失败,继续使用旧的程序来启动..."
+ fi
+else
+ echo "程序自动升级已关闭,如需自动升级请在创建容器时设置环境变量:NASTOOL_AUTO_UPDATE=true"
+fi
+
+echo "以PUID=${PUID},PGID=${PGID}的身份启动程序..."
+
+if [ "${NASTOOL_VERSION}" = "lite" ]; then
+ mkdir -p /.pm2
+ chown -R "${PUID}":"${PGID}" "${WORKDIR}" /config /.pm2
+else
+ mkdir -p /.local
+ mkdir -p /.pm2
+ chown -R "${PUID}":"${PGID}" "${WORKDIR}" /config /usr/lib/chromium /.local /.pm2
+ export PATH=${PATH}:/usr/lib/chromium
+fi
+umask "${UMASK}"
+exec su-exec "${PUID}":"${PGID}" "$(which dumb-init)" "$(which pm2-runtime)" start run.py -n NAStool --interpreter python3
diff --git a/docker/readme.md b/docker/readme.md
new file mode 100644
index 0000000..c18713b
--- /dev/null
+++ b/docker/readme.md
@@ -0,0 +1,93 @@
+## 特点
+
+- 基于alpine实现,镜像体积小;
+
+- 镜像层数少;
+
+- 支持 amd64/arm64 架构;
+
+- 重启即可更新程序,如果依赖有变化,会自动尝试重新安装依赖,若依赖自动安装不成功,会提示更新镜像;
+
+- 可以以非root用户执行任务,降低程序权限和潜在风险;
+
+- 可以设置文件掩码权限umask。
+
+- lite 版本不包含浏览器内核及xvfb,不支持浏览器仿真;不支持Rclone/Minio转移方式;不支持复杂依赖变更时的自动安装升级;但是体积更小。
+
+## 创建
+
+**注意**
+
+- 媒体目录的设置必须符合 [配置说明](https://github.com/NAStool/nas-tools#%E9%85%8D%E7%BD%AE) 的要求。
+
+- umask含义详见:http://www.01happy.com/linux-umask-analyze 。
+
+- 创建后请根据 [配置说明](https://github.com/NAStool/nas-tools#%E9%85%8D%E7%BD%AE) 及该文件本身的注释,修改`config/config.yaml`,修改好后再重启容器,最后访问`http://
",
+ re.sub(r"<[^>]+>", "",
+ re.sub(r"
", "####", message.get("content"), flags=re.IGNORECASE)))
+ message_html.append(f"""
+
".join(rule_htmls)
+
+ @staticmethod
+ def __clear_tmdb_cache(data):
+ """
+ 清空TMDB缓存
+ """
+ try:
+ MetaHelper().clear_meta_data()
+ os.remove(MetaHelper().get_meta_data_path())
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return {"code": 0, "msg": str(e)}
+ return {"code": 0}
+
+ @staticmethod
+ def __check_site_attr(data):
+ """
+ 检查站点标识
+ """
+ site_attr = Sites().get_grapsite_conf(data.get("url"))
+ site_free = site_2xfree = site_hr = False
+ if site_attr.get("FREE"):
+ site_free = True
+ if site_attr.get("2XFREE"):
+ site_2xfree = True
+ if site_attr.get("HR"):
+ site_hr = True
+ return {"code": 0, "site_free": site_free, "site_2xfree": site_2xfree, "site_hr": site_hr}
+
+ @staticmethod
+ def __refresh_process(data):
+ """
+ 刷新进度条
+ """
+ detail = ProgressHelper().get_process(data.get("type"))
+ if detail:
+ return {"code": 0, "value": detail.get("value"), "text": detail.get("text")}
+ else:
+ return {"code": 1, "value": 0, "text": "正在处理..."}
+
+ @staticmethod
+ def __restory_backup(data):
+ """
+ 解压恢复备份文件
+ """
+ filename = data.get("file_name")
+ if filename:
+ config_path = Config().get_config_path()
+ temp_path = Config().get_temp_path()
+ file_path = os.path.join(temp_path, filename)
+ try:
+ shutil.unpack_archive(file_path, config_path, format='zip')
+ return {"code": 0, "msg": ""}
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return {"code": 1, "msg": str(e)}
+ finally:
+ if os.path.exists(file_path):
+ os.remove(file_path)
+
+ return {"code": 1, "msg": "文件不存在"}
+
+ @staticmethod
+ def __start_mediasync(data):
+ """
+ 开始媒体库同步
+ """
+ ThreadHelper().start_thread(MediaServer().sync_mediaserver, ())
+ return {"code": 0}
+
+ @staticmethod
+ def __mediasync_state(data):
+ """
+ 获取媒体库同步数据情况
+ """
+ status = MediaServer().get_mediasync_status()
+ if not status:
+ return {"code": 0, "text": "未同步"}
+ else:
+ return {"code": 0, "text": "电影:%s,电视剧:%s,同步时间:%s" %
+ (status.get("movie_count"),
+ status.get("tv_count"),
+ status.get("time"))}
+
+ @staticmethod
+ def __get_tvseason_list(data):
+ """
+ 获取剧集季列表
+ """
+ tmdbid = data.get("tmdbid")
+ title = data.get("title")
+ if title:
+ title_season = MetaInfo(title=title).begin_season
+ else:
+ title_season = None
+ if not str(tmdbid).isdigit():
+ media_info = WebUtils.get_mediainfo_from_id(mtype=MediaType.TV,
+ mediaid=tmdbid)
+ season_infos = Media().get_tmdb_tv_seasons(media_info.tmdb_info)
+ else:
+ season_infos = Media().get_tmdb_tv_seasons_byid(tmdbid=tmdbid)
+ if title_season:
+ seasons = [
+ {
+ "text": "第%s季" % title_season,
+ "num": title_season
+ }
+ ]
+ else:
+ seasons = [
+ {
+ "text": "第%s季" % cn2an.an2cn(season.get("season_number"), mode='low'),
+ "num": season.get("season_number")
+ }
+ for season in season_infos
+ ]
+ return {"code": 0, "seasons": seasons}
+
+ @staticmethod
+ def __get_userrss_task(data):
+ """
+ 获取自定义订阅详情
+ """
+ taskid = data.get("id")
+ return {"code": 0, "detail": RssChecker().get_rsstask_info(taskid=taskid)}
+
+ def __delete_userrss_task(self, data):
+ """
+ 删除自定义订阅
+ """
+ if self.dbhelper.delete_userrss_task(data.get("id")):
+ RssChecker().init_config()
+ return {"code": 0}
+ else:
+ return {"code": 1}
+
+ def __update_userrss_task(self, data):
+ """
+ 新增或修改自定义订阅
+ """
+ uses = data.get("uses")
+ params = {
+ "id": data.get("id"),
+ "name": data.get("name"),
+ "address": data.get("address"),
+ "parser": data.get("parser"),
+ "interval": data.get("interval"),
+ "uses": uses,
+ "include": data.get("include"),
+ "exclude": data.get("exclude"),
+ "filter_rule": data.get("rule"),
+ "state": data.get("state"),
+ "save_path": data.get("save_path"),
+ "download_setting": data.get("download_setting"),
+ }
+ if uses == "D":
+ params.update({
+ "recognization": data.get("recognization")
+ })
+ elif uses == "R":
+ params.update({
+ "over_edition": data.get("over_edition"),
+ "sites": data.get("sites"),
+ "filter_args": {
+ "restype": data.get("restype"),
+ "pix": data.get("pix"),
+ "team": data.get("team")
+ }
+ })
+ else:
+ return {"code": 1}
+ if self.dbhelper.update_userrss_task(params):
+ RssChecker().init_config()
+ return {"code": 0}
+ else:
+ return {"code": 1}
+
+ @staticmethod
+ def __get_rssparser(data):
+ """
+ 获取订阅解析器详情
+ """
+ pid = data.get("id")
+ return {"code": 0, "detail": RssChecker().get_userrss_parser(pid=pid)}
+
+ def __delete_rssparser(self, data):
+ """
+ 删除订阅解析器
+ """
+ if self.dbhelper.delete_userrss_parser(data.get("id")):
+ RssChecker().init_config()
+ return {"code": 0}
+ else:
+ return {"code": 1}
+
+ def __update_rssparser(self, data):
+ """
+ 新增或更新订阅解析器
+ """
+ params = {
+ "id": data.get("id"),
+ "name": data.get("name"),
+ "type": data.get("type"),
+ "format": data.get("format"),
+ "params": data.get("params")
+ }
+ if self.dbhelper.update_userrss_parser(params):
+ RssChecker().init_config()
+ return {"code": 0}
+ else:
+ return {"code": 1}
+
+ @staticmethod
+ def __run_userrss(data):
+ RssChecker().check_task_rss(data.get("id"))
+ return {"code": 0}
+
+ @staticmethod
+ def __run_brushtask(data):
+ BrushTask().check_task_rss(data.get("id"))
+ return {"code": 0}
+
+ @staticmethod
+ def __list_site_resources(data):
+ resources = Indexer().list_builtin_resources(index_id=data.get("id"),
+ page=data.get("page"),
+ keyword=data.get("keyword"))
+ if not resources:
+ return {"code": 1, "msg": "获取站点资源出现错误,无法连接到站点!"}
+ else:
+ return {"code": 0, "data": resources}
+
+ @staticmethod
+ def __list_rss_articles(data):
+ uses = RssChecker().get_rsstask_info(taskid=data.get("id")).get("uses")
+ articles = RssChecker().get_rss_articles(data.get("id"))
+ count = len(articles)
+ if articles:
+ return {"code": 0, "data": articles, "count": count, "uses": uses}
+ else:
+ return {"code": 1, "msg": "未获取到报文"}
+
+ def __rss_article_test(self, data):
+ taskid = data.get("taskid")
+ title = data.get("title")
+ if not taskid:
+ return {"code": -1}
+ if not title:
+ return {"code": -1}
+ media_info, match_flag, exist_flag = RssChecker(
+ ).test_rss_articles(taskid=taskid, title=title)
+ if not media_info:
+ return {"code": 0, "data": {"name": "无法识别"}}
+ media_dict = self.mediainfo_dict(media_info)
+ media_dict.update({"match_flag": match_flag, "exist_flag": exist_flag})
+ return {"code": 0, "data": media_dict}
+
+ def __list_rss_history(self, data):
+ downloads = []
+ historys = self.dbhelper.get_userrss_task_history(data.get("id"))
+ count = len(historys)
+ for history in historys:
+ params = {
+ "title": history.TITLE,
+ "downloader": history.DOWNLOADER,
+ "date": history.DATE
+ }
+ downloads.append(params)
+ if downloads:
+ return {"code": 0, "data": downloads, "count": count}
+ else:
+ return {"code": 1, "msg": "无下载记录"}
+
+ @staticmethod
+ def __rss_articles_check(data):
+ if not data.get("articles"):
+ return {"code": 2}
+ res = RssChecker().check_rss_articles(
+ flag=data.get("flag"), articles=data.get("articles"))
+ if res:
+ return {"code": 0}
+ else:
+ return {"code": 1}
+
+ @staticmethod
+ def __rss_articles_download(data):
+ if not data.get("articles"):
+ return {"code": 2}
+ res = RssChecker().download_rss_articles(
+ taskid=data.get("taskid"), articles=data.get("articles"))
+ if res:
+ return {"code": 0}
+ else:
+ return {"code": 1}
+
+ def __add_custom_word_group(self, data):
+ try:
+ tmdb_id = data.get("tmdb_id")
+ tmdb_type = data.get("tmdb_type")
+ if tmdb_type == "tv":
+ if not self.dbhelper.is_custom_word_group_existed(tmdbid=tmdb_id, gtype=2):
+ tmdb_info = Media().get_tmdb_info(mtype=MediaType.TV, tmdbid=tmdb_id)
+ if not tmdb_info:
+ return {"code": 1, "msg": "添加失败,无法查询到TMDB信息"}
+ self.dbhelper.insert_custom_word_groups(title=tmdb_info.get("name"),
+ year=tmdb_info.get(
+ "first_air_date")[0:4],
+ gtype=2,
+ tmdbid=tmdb_id,
+ season_count=tmdb_info.get("number_of_seasons"))
+ return {"code": 0, "msg": ""}
+ else:
+ return {"code": 1, "msg": "识别词组(TMDB ID)已存在"}
+ elif tmdb_type == "movie":
+ if not self.dbhelper.is_custom_word_group_existed(tmdbid=tmdb_id, gtype=1):
+ tmdb_info = Media().get_tmdb_info(mtype=MediaType.MOVIE, tmdbid=tmdb_id)
+ if not tmdb_info:
+ return {"code": 1, "msg": "添加失败,无法查询到TMDB信息"}
+ self.dbhelper.insert_custom_word_groups(title=tmdb_info.get("title"),
+ year=tmdb_info.get(
+ "release_date")[0:4],
+ gtype=1,
+ tmdbid=tmdb_id,
+ season_count=0)
+ return {"code": 0, "msg": ""}
+ else:
+ return {"code": 1, "msg": "识别词组(TMDB ID)已存在"}
+ else:
+ return {"code": 1, "msg": "无法识别媒体类型"}
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return {"code": 1, "msg": str(e)}
+
+ def __delete_custom_word_group(self, data):
+ try:
+ gid = data.get("gid")
+ self.dbhelper.delete_custom_word_group(gid=gid)
+ WordsHelper().init_config()
+ return {"code": 0, "msg": ""}
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return {"code": 1, "msg": str(e)}
+
+ def __add_or_edit_custom_word(self, data):
+ try:
+ wid = data.get("id")
+ gid = data.get("gid")
+ group_type = data.get("group_type")
+ replaced = data.get("new_replaced")
+ replace = data.get("new_replace")
+ front = data.get("new_front")
+ back = data.get("new_back")
+ offset = data.get("new_offset")
+ whelp = data.get("new_help")
+ wtype = data.get("type")
+ season = data.get("season")
+ enabled = data.get("enabled")
+ regex = data.get("regex")
+ # 集数偏移格式检查
+ if wtype in ["3", "4"]:
+ if not re.findall(r'EP', offset):
+ return {"code": 1, "msg": "偏移集数格式有误"}
+ if re.findall(r'(?!-|\+|\*|/|[0-9]).', re.sub(r'EP', "", offset)):
+ return {"code": 1, "msg": "偏移集数格式有误"}
+ if wid:
+ self.dbhelper.delete_custom_word(wid=wid)
+ # 电影
+ if group_type == "1":
+ season = -2
+ # 屏蔽
+ if wtype == "1":
+ if not self.dbhelper.is_custom_words_existed(replaced=replaced):
+ self.dbhelper.insert_custom_word(replaced=replaced,
+ replace="",
+ front="",
+ back="",
+ offset="",
+ wtype=wtype,
+ gid=gid,
+ season=season,
+ enabled=enabled,
+ regex=regex,
+ whelp=whelp if whelp else "")
+ WordsHelper().init_config()
+ return {"code": 0, "msg": ""}
+ else:
+ return {"code": 1, "msg": "识别词已存在\n(被替换词:%s)" % replaced}
+ # 替换
+ elif wtype == "2":
+ if not self.dbhelper.is_custom_words_existed(replaced=replaced):
+ self.dbhelper.insert_custom_word(replaced=replaced,
+ replace=replace,
+ front="",
+ back="",
+ offset="",
+ wtype=wtype,
+ gid=gid,
+ season=season,
+ enabled=enabled,
+ regex=regex,
+ whelp=whelp if whelp else "")
+ WordsHelper().init_config()
+ return {"code": 0, "msg": ""}
+ else:
+ return {"code": 1, "msg": "识别词已存在\n(被替换词:%s)" % replaced}
+ # 集偏移
+ elif wtype == "4":
+ if not self.dbhelper.is_custom_words_existed(front=front, back=back):
+ self.dbhelper.insert_custom_word(replaced="",
+ replace="",
+ front=front,
+ back=back,
+ offset=offset,
+ wtype=wtype,
+ gid=gid,
+ season=season,
+ enabled=enabled,
+ regex=regex,
+ whelp=whelp if whelp else "")
+ WordsHelper().init_config()
+ return {"code": 0, "msg": ""}
+ else:
+ return {"code": 1, "msg": "识别词已存在\n(前后定位词:%s@%s)" % (front, back)}
+ # 替换+集偏移
+ elif wtype == "3":
+ if not self.dbhelper.is_custom_words_existed(replaced=replaced):
+ self.dbhelper.insert_custom_word(replaced=replaced,
+ replace=replace,
+ front=front,
+ back=back,
+ offset=offset,
+ wtype=wtype,
+ gid=gid,
+ season=season,
+ enabled=enabled,
+ regex=regex,
+ whelp=whelp if whelp else "")
+ WordsHelper().init_config()
+ return {"code": 0, "msg": ""}
+ else:
+ return {"code": 1, "msg": "识别词已存在\n(被替换词:%s)" % replaced}
+ else:
+ return {"code": 1, "msg": ""}
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return {"code": 1, "msg": str(e)}
+
+ def __get_custom_word(self, data):
+ try:
+ wid = data.get("wid")
+ word_info = self.dbhelper.get_custom_words(wid=wid)
+ if word_info:
+ word_info = word_info[0]
+ word = {"id": word_info.ID,
+ "replaced": word_info.REPLACED,
+ "replace": word_info.REPLACE,
+ "front": word_info.FRONT,
+ "back": word_info.BACK,
+ "offset": word_info.OFFSET,
+ "type": word_info.TYPE,
+ "group_id": word_info.GROUP_ID,
+ "season": word_info.SEASON,
+ "enabled": word_info.ENABLED,
+ "regex": word_info.REGEX,
+ "help": word_info.HELP, }
+ else:
+ word = {}
+ return {"code": 0, "data": word}
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return {"code": 1, "msg": "查询识别词失败"}
+
+ def __delete_custom_word(self, data):
+ try:
+ wid = data.get("id")
+ self.dbhelper.delete_custom_word(wid)
+ WordsHelper().init_config()
+ return {"code": 0, "msg": ""}
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return {"code": 1, "msg": str(e)}
+
+ def __check_custom_words(self, data):
+ try:
+ flag_dict = {"enable": 1, "disable": 0}
+ ids_info = data.get("ids_info")
+ enabled = flag_dict.get(data.get("flag"))
+ ids = [id_info.split("_")[1] for id_info in ids_info]
+ for wid in ids:
+ self.dbhelper.check_custom_word(wid=wid, enabled=enabled)
+ WordsHelper().init_config()
+ return {"code": 0, "msg": ""}
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return {"code": 1, "msg": "识别词状态设置失败"}
+
+ def __export_custom_words(self, data):
+ try:
+ note = data.get("note")
+ ids_info = data.get("ids_info").split("@")
+ group_ids = []
+ word_ids = []
+ for id_info in ids_info:
+ wid = id_info.split("_")
+ group_ids.append(wid[0])
+ word_ids.append(wid[1])
+ export_dict = {}
+ for group_id in group_ids:
+ if group_id == "-1":
+ export_dict["-1"] = {"id": -1,
+ "title": "通用",
+ "type": 1,
+ "words": {}, }
+ else:
+ group_info = self.dbhelper.get_custom_word_groups(
+ gid=group_id)
+ if group_info:
+ group_info = group_info[0]
+ export_dict[str(group_info.ID)] = {"id": group_info.ID,
+ "title": group_info.TITLE,
+ "year": group_info.YEAR,
+ "type": group_info.TYPE,
+ "tmdbid": group_info.TMDBID,
+ "season_count": group_info.SEASON_COUNT,
+ "words": {}, }
+ for word_id in word_ids:
+ word_info = self.dbhelper.get_custom_words(wid=word_id)
+ if word_info:
+ word_info = word_info[0]
+ export_dict[str(word_info.GROUP_ID)]["words"][str(word_info.ID)] = {"id": word_info.ID,
+ "replaced": word_info.REPLACED,
+ "replace": word_info.REPLACE,
+ "front": word_info.FRONT,
+ "back": word_info.BACK,
+ "offset": word_info.OFFSET,
+ "type": word_info.TYPE,
+ "season": word_info.SEASON,
+ "regex": word_info.REGEX,
+ "help": word_info.HELP, }
+ export_string = json.dumps(export_dict) + "@@@@@@" + str(note)
+ string = base64.b64encode(
+ export_string.encode("utf-8")).decode('utf-8')
+ return {"code": 0, "string": string}
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return {"code": 1, "msg": str(e)}
+
+ @staticmethod
+ def __analyse_import_custom_words_code(data):
+ try:
+ import_code = data.get('import_code')
+ string = base64.b64decode(import_code.encode(
+ "utf-8")).decode('utf-8').split("@@@@@@")
+ note_string = string[1]
+ import_dict = json.loads(string[0])
+ groups = []
+ for group in import_dict.values():
+ wid = group.get('id')
+ title = group.get("title")
+ year = group.get("year")
+ wtype = group.get("type")
+ tmdbid = group.get("tmdbid")
+ season_count = group.get("season_count") or ""
+ words = group.get("words")
+ if tmdbid:
+ link = "https://www.themoviedb.org/%s/%s" % (
+ "movie" if int(wtype) == 1 else "tv", tmdbid)
+ else:
+ link = ""
+ groups.append({"id": wid,
+ "name": "%s(%s)" % (title, year) if year else title,
+ "link": link,
+ "type": wtype,
+ "seasons": season_count,
+ "words": words})
+ return {"code": 0, "groups": groups, "note_string": note_string}
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return {"code": 1, "msg": str(e)}
+
+ def __import_custom_words(self, data):
+ try:
+ import_code = data.get('import_code')
+ ids_info = data.get('ids_info')
+ string = base64.b64decode(import_code.encode(
+ "utf-8")).decode('utf-8').split("@@@@@@")
+ import_dict = json.loads(string[0])
+ import_group_ids = [id_info.split("_")[0] for id_info in ids_info]
+ group_id_dict = {}
+ for import_group_id in import_group_ids:
+ import_group_info = import_dict.get(import_group_id)
+ if int(import_group_info.get("id")) == -1:
+ group_id_dict["-1"] = -1
+ continue
+ title = import_group_info.get("title")
+ year = import_group_info.get("year")
+ gtype = import_group_info.get("type")
+ tmdbid = import_group_info.get("tmdbid")
+ season_count = import_group_info.get("season_count")
+ if not self.dbhelper.is_custom_word_group_existed(tmdbid=tmdbid, gtype=gtype):
+ self.dbhelper.insert_custom_word_groups(title=title,
+ year=year,
+ gtype=gtype,
+ tmdbid=tmdbid,
+ season_count=season_count)
+ group_info = self.dbhelper.get_custom_word_groups(
+ tmdbid=tmdbid, gtype=gtype)
+ if group_info:
+ group_id_dict[import_group_id] = group_info[0].ID
+ for id_info in ids_info:
+ id_info = id_info.split('_')
+ import_group_id = id_info[0]
+ import_word_id = id_info[1]
+ import_word_info = import_dict.get(
+ import_group_id).get("words").get(import_word_id)
+ gid = group_id_dict.get(import_group_id)
+ replaced = import_word_info.get("replaced")
+ replace = import_word_info.get("replace")
+ front = import_word_info.get("front")
+ back = import_word_info.get("back")
+ offset = import_word_info.get("offset")
+ whelp = import_word_info.get("help")
+ wtype = int(import_word_info.get("type"))
+ season = import_word_info.get("season")
+ regex = import_word_info.get("regex")
+ # 屏蔽, 替换, 替换+集偏移
+ if wtype in [1, 2, 3]:
+ if self.dbhelper.is_custom_words_existed(replaced=replaced):
+ return {"code": 1, "msg": "识别词已存在\n(被替换词:%s)" % replaced}
+ # 集偏移
+ elif wtype == 4:
+ if self.dbhelper.is_custom_words_existed(front=front, back=back):
+ return {"code": 1, "msg": "识别词已存在\n(前后定位词:%s@%s)" % (front, back)}
+ self.dbhelper.insert_custom_word(replaced=replaced,
+ replace=replace,
+ front=front,
+ back=back,
+ offset=offset,
+ wtype=wtype,
+ gid=gid,
+ season=season,
+ enabled=1,
+ regex=regex,
+ whelp=whelp if whelp else "")
+ WordsHelper().init_config()
+ return {"code": 0, "msg": ""}
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return {"code": 1, "msg": str(e)}
+
+ @staticmethod
+ def __get_categories(data):
+ if data.get("type") == "电影":
+ categories = Category().get_movie_categorys()
+ elif data.get("type") == "电视剧":
+ categories = Category().get_tv_categorys()
+ else:
+ categories = Category().get_anime_categorys()
+ return {"code": 0, "category": list(categories), "id": data.get("id"), "value": data.get("value")}
+
+ def __delete_rss_history(self, data):
+ rssid = data.get("rssid")
+ self.dbhelper.delete_rss_history(rssid=rssid)
+ return {"code": 0}
+
+ def __re_rss_history(self, data):
+ rssid = data.get("rssid")
+ rtype = data.get("type")
+ rssinfo = self.dbhelper.get_rss_history(rtype=rtype, rid=rssid)
+ if rssinfo:
+ if rtype == "MOV":
+ mtype = MediaType.MOVIE
+ else:
+ mtype = MediaType.TV
+ if rssinfo[0].SEASON:
+ season = int(str(rssinfo[0].SEASON).replace("S", ""))
+ else:
+ season = None
+ code, msg, _ = Subscribe().add_rss_subscribe(mtype=mtype,
+ name=rssinfo[0].NAME,
+ year=rssinfo[0].YEAR,
+ season=season,
+ mediaid=rssinfo[0].TMDBID,
+ total_ep=rssinfo[0].TOTAL,
+ current_ep=rssinfo[0].START)
+ return {"code": code, "msg": msg}
+ else:
+ return {"code": 1, "msg": "订阅历史记录不存在"}
+
+ def __share_filtergroup(self, data):
+ gid = data.get("id")
+ group_info = self.dbhelper.get_config_filter_group(gid=gid)
+ if not group_info:
+ return {"code": 1, "msg": "规则组不存在"}
+ group_rules = self.dbhelper.get_config_filter_rule(groupid=gid)
+ if not group_rules:
+ return {"code": 1, "msg": "规则组没有对应规则"}
+ rules = []
+ for rule in group_rules:
+ rules.append({
+ "name": rule.ROLE_NAME,
+ "pri": rule.PRIORITY,
+ "include": rule.INCLUDE,
+ "exclude": rule.EXCLUDE,
+ "size": rule.SIZE_LIMIT,
+ "free": rule.NOTE
+ })
+ rule_json = {
+ "name": group_info[0].GROUP_NAME,
+ "rules": rules
+ }
+ json_string = base64.b64encode(json.dumps(
+ rule_json).encode("utf-8")).decode('utf-8')
+ return {"code": 0, "string": json_string}
+
+ def __import_filtergroup(self, data):
+ content = data.get("content")
+ try:
+ json_str = base64.b64decode(
+ str(content).encode("utf-8")).decode('utf-8')
+ json_obj = json.loads(json_str)
+ if json_obj:
+ if not json_obj.get("name"):
+ return {"code": 1, "msg": "数据格式不正确"}
+ self.dbhelper.add_filter_group(name=json_obj.get("name"))
+ group_id = self.dbhelper.get_filter_groupid_by_name(
+ json_obj.get("name"))
+ if not group_id:
+ return {"code": 1, "msg": "数据内容不正确"}
+ if json_obj.get("rules"):
+ for rule in json_obj.get("rules"):
+ self.dbhelper.insert_filter_rule(item={
+ "group": group_id,
+ "name": rule.get("name"),
+ "pri": rule.get("pri"),
+ "include": rule.get("include"),
+ "exclude": rule.get("exclude"),
+ "size": rule.get("size"),
+ "free": rule.get("free")
+ })
+ Filter().init_config()
+ return {"code": 0, "msg": ""}
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ return {"code": 1, "msg": "数据格式不正确,%s" % str(err)}
+
+ @staticmethod
+ def get_library_spacesize(data=None):
+ """
+ 查询媒体库存储空间
+ """
+ # 磁盘空间
+ UsedPercent = 0
+ TotalSpaceList = []
+ media = Config().get_config('media')
+ if media:
+ # 电影目录
+ movie_paths = media.get('movie_path')
+ if not isinstance(movie_paths, list):
+ movie_paths = [movie_paths]
+ movie_used, movie_total = 0, 0
+ for movie_path in movie_paths:
+ if not movie_path:
+ continue
+ used, total = SystemUtils.get_used_of_partition(movie_path)
+ if "%s-%s" % (used, total) not in TotalSpaceList:
+ TotalSpaceList.append("%s-%s" % (used, total))
+ movie_used += used
+ movie_total += total
+ # 电视目录
+ tv_paths = media.get('tv_path')
+ if not isinstance(tv_paths, list):
+ tv_paths = [tv_paths]
+ tv_used, tv_total = 0, 0
+ for tv_path in tv_paths:
+ if not tv_path:
+ continue
+ used, total = SystemUtils.get_used_of_partition(tv_path)
+ if "%s-%s" % (used, total) not in TotalSpaceList:
+ TotalSpaceList.append("%s-%s" % (used, total))
+ tv_used += used
+ tv_total += total
+ # 动漫目录
+ anime_paths = media.get('anime_path')
+ if not isinstance(anime_paths, list):
+ anime_paths = [anime_paths]
+ anime_used, anime_total = 0, 0
+ for anime_path in anime_paths:
+ if not anime_path:
+ continue
+ used, total = SystemUtils.get_used_of_partition(anime_path)
+ if "%s-%s" % (used, total) not in TotalSpaceList:
+ TotalSpaceList.append("%s-%s" % (used, total))
+ anime_used += used
+ anime_total += total
+ # 总空间
+ TotalSpaceAry = []
+ if movie_total not in TotalSpaceAry:
+ TotalSpaceAry.append(movie_total)
+ if tv_total not in TotalSpaceAry:
+ TotalSpaceAry.append(tv_total)
+ if anime_total not in TotalSpaceAry:
+ TotalSpaceAry.append(anime_total)
+ TotalSpace = sum(TotalSpaceAry)
+ # 已使用空间
+ UsedSapceAry = []
+ if movie_used not in UsedSapceAry:
+ UsedSapceAry.append(movie_used)
+ if tv_used not in UsedSapceAry:
+ UsedSapceAry.append(tv_used)
+ if anime_used not in UsedSapceAry:
+ UsedSapceAry.append(anime_used)
+ UsedSapce = sum(UsedSapceAry)
+ # 电影电视使用百分比格式化
+ if TotalSpace:
+ UsedPercent = "%0.1f" % ((UsedSapce / TotalSpace) * 100)
+ # 总剩余空间 格式化
+ FreeSpace = "{:,} TB".format(
+ round((TotalSpace - UsedSapce) / 1024 / 1024 / 1024 / 1024, 2))
+ # 总使用空间 格式化
+ UsedSapce = "{:,} TB".format(
+ round(UsedSapce / 1024 / 1024 / 1024 / 1024, 2))
+ # 总空间 格式化
+ TotalSpace = "{:,} TB".format(
+ round(TotalSpace / 1024 / 1024 / 1024 / 1024, 2))
+
+ return {"code": 0,
+ "UsedPercent": UsedPercent,
+ "FreeSpace": FreeSpace,
+ "UsedSapce": UsedSapce,
+ "TotalSpace": TotalSpace}
+
+ def get_transfer_statistics(self, data=None):
+ """
+ 查询转移历史统计数据
+ """
+ MovieChartLabels = []
+ MovieNums = []
+ TvChartData = {}
+ TvNums = []
+ AnimeNums = []
+ for statistic in self.dbhelper.get_transfer_statistics():
+ if statistic[0] == "电影":
+ MovieChartLabels.append(statistic[1])
+ MovieNums.append(statistic[2])
+ else:
+ if not TvChartData.get(statistic[1]):
+ TvChartData[statistic[1]] = {"tv": 0, "anime": 0}
+ if statistic[0] == "电视剧":
+ TvChartData[statistic[1]]["tv"] += statistic[2]
+ elif statistic[0] == "动漫":
+ TvChartData[statistic[1]]["anime"] += statistic[2]
+ TvChartLabels = list(TvChartData)
+ for tv_data in TvChartData.values():
+ TvNums.append(tv_data.get("tv"))
+ AnimeNums.append(tv_data.get("anime"))
+
+ return {
+ "code": 0,
+ "MovieChartLabels": MovieChartLabels,
+ "MovieNums": MovieNums,
+ "TvChartLabels": TvChartLabels,
+ "TvNums": TvNums,
+ "AnimeNums": AnimeNums
+ }
+
+ @staticmethod
+ def get_library_mediacount(data=None):
+ """
+ 查询媒体库统计数据
+ """
+ MediaServerClient = MediaServer()
+ media_counts = MediaServerClient.get_medias_count()
+ UserCount = MediaServerClient.get_user_count()
+ if media_counts:
+ return {
+ "code": 0,
+ "Movie": "{:,}".format(media_counts.get('MovieCount')),
+ "Series": "{:,}".format(media_counts.get('SeriesCount')),
+ "Episodes": "{:,}".format(media_counts.get('EpisodeCount')) if media_counts.get(
+ 'EpisodeCount') else "",
+ "Music": "{:,}".format(media_counts.get('SongCount')),
+ "User": UserCount
+ }
+ else:
+ return {"code": -1, "msg": "媒体库服务器连接失败"}
+
+ @staticmethod
+ def get_library_playhistory(data=None):
+ """
+ 查询媒体库播放记录
+ """
+ return {"code": 0, "result": MediaServer().get_activity_log(30)}
+
+ def get_search_result(self, data=None):
+ """
+ 查询所有搜索结果
+ """
+ SearchResults = {}
+ res = self.dbhelper.get_search_results()
+ total = len(res)
+ for item in res:
+ # 质量(来源、效果)、分辨率
+ if item.RES_TYPE:
+ try:
+ res_mix = json.loads(item.RES_TYPE)
+ except Exception as err:
+ ExceptionUtils.exception_traceback(err)
+ continue
+ respix = res_mix.get("respix") or ""
+ video_encode = res_mix.get("video_encode") or ""
+ restype = res_mix.get("restype") or ""
+ reseffect = res_mix.get("reseffect") or ""
+ else:
+ restype = ""
+ respix = ""
+ reseffect = ""
+ video_encode = ""
+ # 分组标识 (来源,分辨率)
+ group_key = re.sub(r"[-.\s@|]", "", f"{respix}_{restype}").lower()
+ # 分组信息
+ group_info = {
+ "respix": respix,
+ "restype": restype,
+ }
+ # 种子唯一标识 (大小,质量(来源、效果),制作组组成)
+ unique_key = re.sub(r"[-.\s@|]", "",
+ f"{respix}_{restype}_{video_encode}_{reseffect}_{item.SIZE}_{item.OTHERINFO}").lower()
+ # 标识信息
+ unique_info = {
+ "video_encode": video_encode,
+ "size": item.SIZE,
+ "reseffect": reseffect,
+ "releasegroup": item.OTHERINFO
+ }
+ # 结果
+ title_string = f"{item.TITLE}"
+ if item.YEAR:
+ title_string = f"{title_string} ({item.YEAR})"
+ # 电视剧季集标识
+ mtype = item.TYPE or ""
+ SE_key = item.ES_STRING if item.ES_STRING and mtype != "MOV" else "MOV"
+ media_type = {"MOV": "电影", "TV": "电视剧", "ANI": "动漫"}.get(mtype)
+ # 种子信息
+ torrent_item = {
+ "id": item.ID,
+ "seeders": item.SEEDERS,
+ "enclosure": item.ENCLOSURE,
+ "site": item.SITE,
+ "torrent_name": item.TORRENT_NAME,
+ "description": item.DESCRIPTION,
+ "pageurl": item.PAGEURL,
+ "uploadvalue": item.UPLOAD_VOLUME_FACTOR,
+ "downloadvalue": item.DOWNLOAD_VOLUME_FACTOR,
+ "size": item.SIZE,
+ "respix": respix,
+ "restype": restype,
+ "reseffect": reseffect,
+ "releasegroup": item.OTHERINFO,
+ "video_encode": video_encode
+ }
+ # 促销
+ free_item = {
+ "value": f"{item.UPLOAD_VOLUME_FACTOR} {item.DOWNLOAD_VOLUME_FACTOR}",
+ "name": MetaBase.get_free_string(item.UPLOAD_VOLUME_FACTOR, item.DOWNLOAD_VOLUME_FACTOR)
+ }
+ # 季
+ filter_season = SE_key.split()[0] if SE_key and SE_key not in [
+ "MOV", "TV"] else None
+ # 合并搜索结果
+ if SearchResults.get(title_string):
+ # 种子列表
+ result_item = SearchResults[title_string]
+ torrent_dict = SearchResults[title_string].get("torrent_dict")
+ SE_dict = torrent_dict.get(SE_key)
+ if SE_dict:
+ group = SE_dict.get(group_key)
+ if group:
+ unique = group.get("group_torrents").get(unique_key)
+ if unique:
+ unique["torrent_list"].append(torrent_item)
+ group["group_total"] += 1
+ else:
+ group["group_total"] += 1
+ group.get("group_torrents")[unique_key] = {
+ "unique_info": unique_info,
+ "torrent_list": [torrent_item]
+ }
+ else:
+ SE_dict[group_key] = {
+ "group_info": group_info,
+ "group_total": 1,
+ "group_torrents": {
+ unique_key: {
+ "unique_info": unique_info,
+ "torrent_list": [torrent_item]
+ }
+ }
+ }
+ else:
+ torrent_dict[SE_key] = {
+ group_key: {
+ "group_info": group_info,
+ "group_total": 1,
+ "group_torrents": {
+ unique_key: {
+ "unique_info": unique_info,
+ "torrent_list": [torrent_item]
+ }
+ }
+ }
+ }
+ # 过滤条件
+ torrent_filter = dict(result_item.get("filter"))
+ if free_item not in torrent_filter.get("free"):
+ torrent_filter["free"].append(free_item)
+ if item.SITE not in torrent_filter.get("site"):
+ torrent_filter["site"].append(item.SITE)
+ if video_encode \
+ and video_encode not in torrent_filter.get("video"):
+ torrent_filter["video"].append(video_encode)
+ if filter_season \
+ and filter_season not in torrent_filter.get("season"):
+ torrent_filter["season"].append(filter_season)
+ else:
+ # 是否已存在
+ if item.TMDBID:
+ exist_flag = MediaServer().check_item_exists(
+ title=item.TITLE, year=item.YEAR, tmdbid=item.TMDBID)
+ else:
+ exist_flag = False
+ SearchResults[title_string] = {
+ "key": item.ID,
+ "title": item.TITLE,
+ "year": item.YEAR,
+ "type_key": mtype,
+ "image": item.IMAGE,
+ "type": media_type,
+ "vote": item.VOTE,
+ "tmdbid": item.TMDBID,
+ "backdrop": item.IMAGE,
+ "poster": item.POSTER,
+ "overview": item.OVERVIEW,
+ "exist": exist_flag,
+ "torrent_dict": {
+ SE_key: {
+ group_key: {
+ "group_info": group_info,
+ "group_total": 1,
+ "group_torrents": {
+ unique_key: {
+ "unique_info": unique_info,
+ "torrent_list": [torrent_item]
+ }
+ }
+ }
+ }
+ },
+ "filter": {
+ "site": [item.SITE],
+ "free": [free_item],
+ "video": [video_encode] if video_encode else [],
+ "season": [filter_season] if filter_season else []
+ }
+ }
+
+ # 提升整季的顺序到顶层
+ def se_sort(k):
+ k = re.sub(r" +|(?<=s\d)\D*?(?=e)|(?<=s\d\d)\D*?(?=e)",
+ " ", k[0], flags=re.I).split()
+ return (k[0], k[1]) if len(k) > 1 else ("Z" + k[0], "ZZZ")
+
+ # 开始排序季集顺序
+ for title, item in SearchResults.items():
+ # 排序筛选器 季
+ item["filter"]["season"].sort(reverse=True)
+ # 排序种子列 集
+ item["torrent_dict"] = sorted(item["torrent_dict"].items(),
+ key=se_sort,
+ reverse=True)
+ return {"code": 0, "total": total, "result": SearchResults}
+
+ @staticmethod
+ def search_media_infos(data):
+ """
+ 根据关键字搜索相似词条
+ """
+ SearchWord = data.get("keyword")
+ if not SearchWord:
+ return []
+ SearchSourceType = data.get("searchtype")
+ medias = WebUtils.search_media_infos(keyword=SearchWord,
+ source=SearchSourceType)
+
+ return {"code": 0, "result": [media.to_dict() for media in medias]}
+
+ @staticmethod
+ def get_movie_rss_list(data=None):
+ """
+ 查询所有电影订阅
+ """
+ return {"code": 0, "result": Subscribe().get_subscribe_movies()}
+
+ @staticmethod
+ def get_tv_rss_list(data=None):
+ """
+ 查询所有电视剧订阅
+ """
+ return {"code": 0, "result": Subscribe().get_subscribe_tvs()}
+
+ def get_rss_history(self, data):
+ """
+ 查询所有订阅历史
+ """
+ mtype = data.get("type")
+ return {"code": 0, "result": [rec.as_dict() for rec in self.dbhelper.get_rss_history(rtype=mtype)]}
+
+ @staticmethod
+ def get_downloading(data=None):
+ """
+ 查询正在下载的任务
+ """
+ torrents = Downloader().get_downloading_progress()
+ MediaHander = Media()
+ for torrent in torrents:
+ # 识别
+ name = torrent.get("name")
+ media_info = MediaHander.get_media_info(title=name)
+ if not media_info:
+ torrent.update({
+ "title": name,
+ "image": ""
+ })
+ continue
+ if not media_info.tmdb_info:
+ year = media_info.year
+ if year:
+ title = "%s (%s) %s" % (media_info.get_name(),
+ year, media_info.get_season_episode_string())
+ else:
+ title = "%s %s" % (media_info.get_name(),
+ media_info.get_season_episode_string())
+ else:
+ title = "%s %s" % (media_info.get_title_string(
+ ), media_info.get_season_episode_string())
+ poster_path = media_info.get_poster_image()
+ torrent.update({
+ "title": title,
+ "image": poster_path or ""
+ })
+ return {"code": 0, "result": torrents}
+
+ def get_transfer_history(self, data):
+ """
+ 查询媒体整理历史记录
+ """
+ PageNum = data.get("pagenum")
+ if not PageNum:
+ PageNum = 30
+ SearchStr = data.get("keyword")
+ CurrentPage = data.get("page")
+ if not CurrentPage:
+ CurrentPage = 1
+ else:
+ CurrentPage = int(CurrentPage)
+ totalCount, historys = self.dbhelper.get_transfer_history(
+ SearchStr, CurrentPage, PageNum)
+ historys_list = []
+ for history in historys:
+ history = history.as_dict()
+ sync_mode = history.get("MODE")
+ rmt_mode = ModuleConf.get_dictenum_key(
+ ModuleConf.RMT_MODES, sync_mode) if sync_mode else ""
+ history.update({
+ "SYNC_MODE": sync_mode,
+ "RMT_MODE": rmt_mode
+ })
+ historys_list.append(history)
+ TotalPage = floor(totalCount / PageNum) + 1
+
+ return {
+ "code": 0,
+ "total": totalCount,
+ "result": historys_list,
+ "totalPage": TotalPage,
+ "pageNum": PageNum,
+ "currentPage": CurrentPage
+ }
+
+ def get_unknown_list(self, data=None):
+ """
+ 查询所有未识别记录
+ """
+ Items = []
+ Records = self.dbhelper.get_transfer_unknown_paths()
+ for rec in Records:
+ if not rec.PATH:
+ continue
+ path = rec.PATH.replace("\\", "/") if rec.PATH else ""
+ path_to = rec.DEST.replace("\\", "/") if rec.DEST else ""
+ sync_mode = rec.MODE or ""
+ rmt_mode = ModuleConf.get_dictenum_key(ModuleConf.RMT_MODES,
+ sync_mode) if sync_mode else ""
+ Items.append({
+ "id": rec.ID,
+ "path": path,
+ "to": path_to,
+ "name": path,
+ "sync_mode": sync_mode,
+ "rmt_mode": rmt_mode,
+ })
+
+ return {"code": 0, "items": Items}
+
+ def unidentification(self):
+ """
+ 重新识别所有未识别记录
+ """
+ ItemIds = []
+ Records = self.dbhelper.get_transfer_unknown_paths()
+ for rec in Records:
+ if not rec.PATH:
+ continue
+ ItemIds.append(rec.ID)
+
+ if len(ItemIds) > 0:
+ WebAction.re_identification(self, {"flag": "unidentification", "ids": ItemIds})
+
+ def get_customwords(self, data=None):
+ words = []
+ words_info = self.dbhelper.get_custom_words(gid=-1)
+ for word_info in words_info:
+ words.append({"id": word_info.ID,
+ "replaced": word_info.REPLACED,
+ "replace": word_info.REPLACE,
+ "front": word_info.FRONT,
+ "back": word_info.BACK,
+ "offset": word_info.OFFSET,
+ "type": word_info.TYPE,
+ "group_id": word_info.GROUP_ID,
+ "season": word_info.SEASON,
+ "enabled": word_info.ENABLED,
+ "regex": word_info.REGEX,
+ "help": word_info.HELP, })
+ groups = [{"id": "-1",
+ "name": "通用",
+ "link": "",
+ "type": "1",
+ "seasons": "0",
+ "words": words}]
+ groups_info = self.dbhelper.get_custom_word_groups()
+ for group_info in groups_info:
+ gid = group_info.ID
+ name = "%s (%s)" % (group_info.TITLE, group_info.YEAR)
+ gtype = group_info.TYPE
+ if gtype == 1:
+ link = "https://www.themoviedb.org/movie/%s" % group_info.TMDBID
+ else:
+ link = "https://www.themoviedb.org/tv/%s" % group_info.TMDBID
+ words = []
+ words_info = self.dbhelper.get_custom_words(gid=gid)
+ for word_info in words_info:
+ words.append({"id": word_info.ID,
+ "replaced": word_info.REPLACED,
+ "replace": word_info.REPLACE,
+ "front": word_info.FRONT,
+ "back": word_info.BACK,
+ "offset": word_info.OFFSET,
+ "type": word_info.TYPE,
+ "group_id": word_info.GROUP_ID,
+ "season": word_info.SEASON,
+ "enabled": word_info.ENABLED,
+ "regex": word_info.REGEX,
+ "help": word_info.HELP, })
+ groups.append({"id": gid,
+ "name": name,
+ "link": link,
+ "type": group_info.TYPE,
+ "seasons": group_info.SEASON_COUNT,
+ "words": words})
+ return {
+ "code": 0,
+ "result": groups
+ }
+
+ def get_directorysync(self, data=None):
+ """
+ 查询所有同步目录
+ """
+ sync_paths = self.dbhelper.get_config_sync_paths()
+ SyncPaths = []
+ if sync_paths:
+ for sync_item in sync_paths:
+ SyncPath = {'id': sync_item.ID,
+ 'from': sync_item.SOURCE,
+ 'to': sync_item.DEST or "",
+ 'unknown': sync_item.UNKNOWN or "",
+ 'syncmod': sync_item.MODE,
+ 'syncmod_name': RmtMode[sync_item.MODE.upper()].value,
+ 'rename': sync_item.RENAME,
+ 'enabled': sync_item.ENABLED}
+ SyncPaths.append(SyncPath)
+ SyncPaths = sorted(SyncPaths, key=lambda o: o.get("from"))
+ return {"code": 0, "result": SyncPaths}
+
+ def get_users(self, data=None):
+ """
+ 查询所有用户
+ """
+ user_list = self.dbhelper.get_users()
+ Users = []
+ for user in user_list:
+ pris = str(user.PRIS).split(",")
+ Users.append({"id": user.ID, "name": user.NAME, "pris": pris})
+ return {"code": 0, "result": Users}
+
+ @staticmethod
+ def get_filterrules(data=None):
+ """
+ 查询所有过滤规则
+ """
+ RuleGroups = Filter().get_rule_infos()
+ sql_file = os.path.join(Config().get_script_path(), "init_filter.sql")
+ with open(sql_file, "r", encoding="utf-8") as f:
+ sql_list = f.read().split(';\n')
+ Init_RuleGroups = []
+ i = 0
+ while i < len(sql_list):
+ rulegroup = {}
+ rulegroup_info = re.findall(
+ r"[0-9]+,'[^\"]+NULL", sql_list[i], re.I)[0].split(",")
+ rulegroup['id'] = int(rulegroup_info[0])
+ rulegroup['name'] = rulegroup_info[1][1:-1]
+ rulegroup['rules'] = []
+ rulegroup['sql'] = [sql_list[i]]
+ if i + 1 < len(sql_list):
+ rules = re.findall(
+ r"[0-9]+,'[^\"]+NULL", sql_list[i + 1], re.I)[0].split("),\n (")
+ for rule in rules:
+ rule_info = {}
+ rule = rule.split(",")
+ rule_info['name'] = rule[2][1:-1]
+ rule_info['include'] = rule[4][1:-1]
+ rule_info['exclude'] = rule[5][1:-1]
+ rulegroup['rules'].append(rule_info)
+ rulegroup["sql"].append(sql_list[i + 1])
+ Init_RuleGroups.append(rulegroup)
+ i = i + 2
+ return {
+ "code": 0,
+ "ruleGroups": RuleGroups,
+ "initRules": Init_RuleGroups
+ }
+
+ def __update_directory(self, data):
+ """
+ 维护媒体库目录
+ """
+ cfg = self.set_config_directory(Config().get_config(),
+ data.get("oper"),
+ data.get("key"),
+ data.get("value"),
+ data.get("replace_value"))
+ # 保存配置
+ Config().save_config(cfg)
+ return {"code": 0}
+
+ @staticmethod
+ def __test_site(data):
+ """
+ 测试站点连通性
+ """
+ flag, msg, times = Sites().test_connection(data.get("id"))
+ code = 0 if flag else -1
+ return {"code": code, "msg": msg, "time": times}
+
+ @staticmethod
+ def __get_sub_path(data):
+ """
+ 查询下级子目录
+ """
+ r = []
+ try:
+ ft = data.get("filter") or "ALL"
+ d = data.get("dir")
+ if not d or d == "/":
+ if SystemUtils.get_system() == OsType.WINDOWS:
+ partitions = SystemUtils.get_windows_drives()
+ if partitions:
+ dirs = [os.path.join(partition, "/")
+ for partition in partitions]
+ else:
+ dirs = [os.path.join("C:/", f)
+ for f in os.listdir("C:/")]
+ else:
+ dirs = [os.path.join("/", f) for f in os.listdir("/")]
+ else:
+ d = os.path.normpath(unquote(d))
+ if not os.path.isdir(d):
+ d = os.path.dirname(d)
+ dirs = [os.path.join(d, f) for f in os.listdir(d)]
+ dirs.sort()
+ for ff in dirs:
+ if os.path.isdir(ff):
+ if 'ONLYDIR' in ft or 'ALL' in ft:
+ r.append({
+ "path": ff.replace("\\", "/"),
+ "name": os.path.basename(ff),
+ "type": "dir",
+ "rel": os.path.dirname(ff).replace("\\", "/")
+ })
+ else:
+ ext = os.path.splitext(ff)[-1][1:]
+ flag = False
+ if 'ONLYFILE' in ft or 'ALL' in ft:
+ flag = True
+ elif "MEDIAFILE" in ft and f".{str(ext).lower()}" in RMT_MEDIAEXT:
+ flag = True
+ elif "SUBFILE" in ft and f".{str(ext).lower()}" in RMT_SUBEXT:
+ flag = True
+ if flag:
+ r.append({
+ "path": ff.replace("\\", "/"),
+ "name": os.path.basename(ff),
+ "type": "file",
+ "rel": os.path.dirname(ff).replace("\\", "/"),
+ "ext": ext,
+ "size": StringUtils.str_filesize(os.path.getsize(ff))
+ })
+
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return {
+ "code": -1,
+ "message": '加载路径失败: %s' % str(e)
+ }
+ return {
+ "code": 0,
+ "count": len(r),
+ "data": r
+ }
+
+ @staticmethod
+ def __rename_file(data):
+ """
+ 文件重命名
+ """
+ path = data.get("path")
+ name = data.get("name")
+ if path and name:
+ try:
+ shutil.move(path, os.path.join(os.path.dirname(path), name))
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return {"code": -1, "msg": str(e)}
+ return {"code": 0}
+
+ def __delete_files(self, data):
+ """
+ 删除文件
+ """
+ files = data.get("files")
+ if files:
+ # 删除文件
+ for file in files:
+ del_flag, del_msg = self.delete_media_file(filedir=os.path.dirname(file),
+ filename=os.path.basename(file))
+ if not del_flag:
+ log.error(f"【MediaFile】{del_msg}")
+ else:
+ log.info(f"【MediaFile】{del_msg}")
+ return {"code": 0}
+
+ @staticmethod
+ def __download_subtitle(data):
+ """
+ 从配置的字幕服务下载单个文件的字幕
+ """
+ path = data.get("path")
+ name = data.get("name")
+ media = Media().get_media_info(title=name)
+ if not media or not media.tmdb_info:
+ return {"code": -1, "msg": f"{name} 无法从TMDB查询到媒体信息"}
+ if not media.imdb_id:
+ media.set_tmdb_info(Media().get_tmdb_info(mtype=media.type,
+ tmdbid=media.tmdb_id))
+ subtitle_item = [{"type": media.type,
+ "file": os.path.splitext(path)[0],
+ "file_ext": os.path.splitext(name)[-1],
+ "name": media.en_name if media.en_name else media.cn_name,
+ "title": media.title,
+ "year": media.year,
+ "season": media.begin_season,
+ "episode": media.begin_episode,
+ "bluray": False,
+ "imdbid": media.imdb_id}]
+ success, retmsg = Subtitle().download_subtitle(items=subtitle_item)
+ if success:
+ return {"code": 0, "msg": retmsg}
+ else:
+ return {"code": -1, "msg": retmsg}
+
+ @staticmethod
+ def __get_download_setting(data):
+ sid = data.get("sid")
+ if sid:
+ download_setting = Downloader().get_download_setting(sid=sid)
+ else:
+ download_setting = list(
+ Downloader().get_download_setting().values())
+ return {"code": 0, "data": download_setting}
+
+ def __update_download_setting(self, data):
+ sid = data.get("sid")
+ name = data.get("name")
+ category = data.get("category")
+ tags = data.get("tags")
+ content_layout = data.get("content_layout")
+ is_paused = data.get("is_paused")
+ upload_limit = data.get("upload_limit")
+ download_limit = data.get("download_limit")
+ ratio_limit = data.get("ratio_limit")
+ seeding_time_limit = data.get("seeding_time_limit")
+ downloader = data.get("downloader")
+ self.dbhelper.update_download_setting(sid=sid,
+ name=name,
+ category=category,
+ tags=tags,
+ content_layout=content_layout,
+ is_paused=is_paused,
+ upload_limit=upload_limit or 0,
+ download_limit=download_limit or 0,
+ ratio_limit=ratio_limit or 0,
+ seeding_time_limit=seeding_time_limit or 0,
+ downloader=downloader)
+ Downloader().init_config()
+ return {"code": 0}
+
+ def __delete_download_setting(self, data):
+ sid = data.get("sid")
+ self.dbhelper.delete_download_setting(sid=sid)
+ Downloader().init_config()
+ return {"code": 0}
+
+ def __update_message_client(self, data):
+ """
+ 更新消息设置
+ """
+ name = data.get("name")
+ cid = data.get("cid")
+ ctype = data.get("type")
+ config = data.get("config")
+ switchs = data.get("switchs")
+ interactive = data.get("interactive")
+ enabled = data.get("enabled")
+ if cid:
+ self.dbhelper.delete_message_client(cid=cid)
+ self.dbhelper.insert_message_client(name=name,
+ ctype=ctype,
+ config=config,
+ switchs=switchs,
+ interactive=interactive,
+ enabled=enabled)
+ Message().init_config()
+ return {"code": 0}
+
+ def __delete_message_client(self, data):
+ """
+ 删除消息设置
+ """
+ if self.dbhelper.delete_message_client(cid=data.get("cid")):
+ Message().init_config()
+ return {"code": 0}
+ else:
+ return {"code": 1}
+
+ def __check_message_client(self, data):
+ """
+ 维护消息设置
+ """
+ flag = data.get("flag")
+ cid = data.get("cid")
+ ctype = data.get("type")
+ checked = data.get("checked")
+ if flag == "interactive":
+ # TG/WX只能开启一个交互
+ if checked:
+ self.dbhelper.check_message_client(interactive=0, ctype=ctype)
+ self.dbhelper.check_message_client(cid=cid,
+ interactive=1 if checked else 0)
+ Message().init_config()
+ return {"code": 0}
+ elif flag == "enable":
+ self.dbhelper.check_message_client(cid=cid,
+ enabled=1 if checked else 0)
+ Message().init_config()
+ return {"code": 0}
+ else:
+ return {"code": 1}
+
+ @staticmethod
+ def __get_message_client(data):
+ """
+ 获取消息设置
+ """
+ cid = data.get("cid")
+ return {"code": 0, "detail": Message().get_message_client_info(cid=cid)}
+
+ @staticmethod
+ def __test_message_client(data):
+ """
+ 测试消息设置
+ """
+ ctype = data.get("type")
+ config = json.loads(data.get("config"))
+ res = Message().get_status(ctype=ctype, config=config)
+ if res:
+ return {"code": 0}
+ else:
+ return {"code": 1}
+
+ @staticmethod
+ def __get_indexers(data=None):
+ """
+ 获取索引器
+ """
+ return {"code": 0, "indexers": Indexer().get_indexer_dict()}
+
+ @staticmethod
+ def __get_download_dirs(data):
+ """
+ 获取下载目录
+ """
+ sid = data.get("sid")
+ site = data.get("site")
+ if not sid and site:
+ sid = Sites().get_site_download_setting(site_name=site)
+ dirs = Downloader().get_download_dirs(setting=sid)
+ return {"code": 0, "paths": dirs}
+
+ @staticmethod
+ def __find_hardlinks(data):
+ files = data.get("files")
+ file_dir = data.get("dir")
+ if not files:
+ return []
+ if not file_dir and os.name != "nt":
+ # 取根目录下一级为查找目录
+ file_dir = os.path.commonpath(files).replace("\\", "/")
+ if file_dir != "/":
+ file_dir = "/" + str(file_dir).split("/")[1]
+ else:
+ return []
+ hardlinks = {}
+ if files:
+ try:
+ for file in files:
+ hardlinks[os.path.basename(file)] = SystemUtils(
+ ).find_hardlinks(file=file, fdir=file_dir)
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return {"code": 1}
+ return {"code": 0, "data": hardlinks}
+
+ @staticmethod
+ def __update_sites_cookie_ua(data):
+ """
+ 更新所有站点的Cookie和UA
+ """
+ siteid = data.get("siteid")
+ username = data.get("username")
+ password = data.get("password")
+ twostepcode = data.get("two_step_code")
+ ocrflag = data.get("ocrflag")
+ # 保存设置
+ SystemConfig().set_system_config(key="CookieUserInfo",
+ value={
+ "username": username,
+ "password": password,
+ "two_step_code": twostepcode
+ })
+ retcode, messages = SiteCookie().update_sites_cookie_ua(siteid=siteid,
+ username=username,
+ password=password,
+ twostepcode=twostepcode,
+ ocrflag=ocrflag)
+ if retcode == 0:
+ Sites().init_config()
+ return {"code": retcode, "messages": messages}
+
+ @staticmethod
+ def __set_site_captcha_code(data):
+ """
+ 设置站点验证码
+ """
+ code = data.get("code")
+ value = data.get("value")
+ SiteCookie().set_code(code=code, value=value)
+ return {"code": 0}
+
+ @staticmethod
+ def __update_torrent_remove_task(data):
+ """
+ 更新自动删种任务
+ """
+ flag, msg = TorrentRemover().update_torrent_remove_task(data=data)
+ if not flag:
+ return {"code": 1, "msg": msg}
+ else:
+ TorrentRemover().init_config()
+ return {"code": 0}
+
+ @staticmethod
+ def __get_torrent_remove_task(data=None):
+ """
+ 获取自动删种任务
+ """
+ if data:
+ tid = data.get("tid")
+ else:
+ tid = None
+ return {"code": 0, "detail": TorrentRemover().get_torrent_remove_tasks(taskid=tid)}
+
+ @staticmethod
+ def __delete_torrent_remove_task(data):
+ """
+ 删除自动删种任务
+ """
+ tid = data.get("tid")
+ flag = TorrentRemover().delete_torrent_remove_task(taskid=tid)
+ if flag:
+ TorrentRemover().init_config()
+ return {"code": 0}
+ else:
+ return {"code": 1}
+
+ @staticmethod
+ def __get_remove_torrents(data):
+ """
+ 获取满足自动删种任务的种子
+ """
+ tid = data.get("tid")
+ flag, torrents = TorrentRemover().get_remove_torrents(taskid=tid)
+ if not flag or not torrents:
+ return {"code": 1, "msg": "未获取到符合处理条件种子"}
+ return {"code": 0, "data": torrents}
+
+ @staticmethod
+ def __auto_remove_torrents(data):
+ """
+ 执行自动删种任务
+ """
+ tid = data.get("tid")
+ TorrentRemover().auto_remove_torrents(taskids=tid)
+ return {"code": 0}
+
+ @staticmethod
+ def __get_site_favicon(data):
+ """
+ 获取站点图标
+ """
+ sitename = data.get("name")
+ return {"code": 0, "icon": Sites().get_site_favicon(site_name=sitename)}
+
+ def get_douban_history(self, data=None):
+ """
+ 查询豆瓣同步历史
+ """
+ results = self.dbhelper.get_douban_history()
+ return {"code": 0, "result": [item.as_dict() for item in results]}
+
+ def __delete_douban_history(self, data):
+ """
+ 删除豆瓣同步历史
+ """
+ self.dbhelper.delete_douban_history(data.get("id"))
+ return {"code": 0}
+
+ def __list_brushtask_torrents(self, data):
+ """
+ 获取刷流任务的种子明细
+ """
+ results = self.dbhelper.get_brushtask_torrents(brush_id=data.get("id"),
+ active=False)
+ if not results:
+ return {"code": 1, "msg": "未下载种子或未获取到种子明细"}
+ return {"code": 0, "data": [item.as_dict() for item in results]}
+
+ @staticmethod
+ def __set_system_config(data):
+ """
+ 设置系统设置(数据库)
+ """
+ key = data.get("key")
+ value = data.get("value")
+ if not key or not value:
+ return {"code": 1}
+ try:
+ SystemConfig().set_system_config(key=key, value=value)
+ if key == "SpeedLimit":
+ SpeedLimiter().init_config()
+ return {"code": 0}
+ except Exception as e:
+ ExceptionUtils.exception_traceback(e)
+ return {"code": 1}
+
+ @staticmethod
+ def get_site_user_statistics(data):
+ """
+ 获取站点用户统计信息
+ """
+ sites = data.get("sites")
+ encoding = data.get("encoding") or "RAW"
+ sort_by = data.get("sort_by")
+ sort_on = data.get("sort_on")
+ site_hash = data.get("site_hash")
+ statistics = SiteUserInfo().get_site_user_statistics(sites=sites, encoding=encoding)
+ if sort_by and sort_on in ["asc", "desc"]:
+ if sort_on == "asc":
+ statistics.sort(key=lambda x: x[sort_by])
+ else:
+ statistics.sort(key=lambda x: x[sort_by], reverse=True)
+ if site_hash == "Y":
+ for item in statistics:
+ item["site_hash"] = StringUtils.md5_hash(item.get("site"))
+ return {"code": 0, "data": statistics}
+
+ @staticmethod
+ def send_custom_message(data):
+ """
+ 发送自定义消息
+ """
+ title = data.get("title")
+ text = data.get("text") or ""
+ image = data.get("image") or ""
+ Message().send_custom_message(title=title, text=text, image=image)
+ return {"code": 0}
+
+ @staticmethod
+ def get_rmt_modes():
+ RmtModes = ModuleConf.RMT_MODES_LITE if SystemUtils.is_lite_version(
+ ) else ModuleConf.RMT_MODES
+ return [{
+ "value": value,
+ "name": name.value
+ } for value, name in RmtModes.items()]
+
+ def __cookiecloud_sync(self, data):
+ """
+ CookieCloud数据同步
+ """
+ server = data.get("server")
+ key = data.get("key")
+ password = data.get("password")
+ # 保存设置
+ SystemConfig().set_system_config(key="CookieCloud",
+ value={
+ "server": server,
+ "key": key,
+ "password": password
+ })
+ # 同步数据
+ contents, retmsg = CookieCloudHelper(server=server,
+ key=key,
+ password=password).download_data()
+ if not contents:
+ return {"code": 1, "msg": retmsg}
+ success_count = 0
+ for domain, content_list in contents.items():
+ if domain.startswith('.'):
+ domain = domain[1:]
+ cookie_str = ""
+ for content in content_list:
+ cookie_str += content.get("name") + \
+ "=" + content.get("value") + ";"
+ if not cookie_str:
+ continue
+ site_info = Sites().get_sites(siteurl=domain)
+ if not site_info:
+ continue
+ self.dbhelper.update_site_cookie_ua(tid=site_info.get("id"),
+ cookie=cookie_str)
+ success_count += 1
+ if success_count:
+ # 重载站点信息
+ Sites().init_config()
+ return {"code": 0, "msg": f"成功更新 {success_count} 个站点的Cookie数据"}
+ return {"code": 0, "msg": "同步完成,但未更新任何站点的Cookie!"}
+
+ @staticmethod
+ def media_detail(data):
+ """
+ 获取媒体详情
+ """
+ # TMDBID 或 DB:豆瓣ID
+ tmdbid = data.get("tmdbid")
+ mtype = MediaType.MOVIE if data.get(
+ "type") in MovieTypes else MediaType.TV
+ if not tmdbid:
+ return {"code": 1, "msg": "未指定媒体ID"}
+ media_info = WebUtils.get_mediainfo_from_id(
+ mtype=mtype, mediaid=tmdbid)
+ # 检查TMDB信息
+ if not media_info or not media_info.tmdb_info:
+ return {
+ "code": 1,
+ "msg": "无法查询到TMDB信息"
+ }
+ # 查询存在及订阅状态
+ fav, rssid = FileTransfer().get_media_exists_flag(mtype=mtype,
+ title=media_info.title,
+ year=media_info.year,
+ mediaid=media_info.tmdb_id)
+ MediaHander = Media()
+ return {
+ "code": 0,
+ "data": {
+ "tmdbid": media_info.tmdb_id,
+ "douban_id": media_info.douban_id,
+ "background": MediaHander.get_tmdb_backdrops(tmdbinfo=media_info.tmdb_info),
+ "image": media_info.get_poster_image(),
+ "vote": media_info.vote_average,
+ "year": media_info.year,
+ "title": media_info.title,
+ "genres": MediaHander.get_tmdb_genres_names(tmdbinfo=media_info.tmdb_info),
+ "overview": media_info.overview,
+ "runtime": StringUtils.str_timehours(media_info.runtime),
+ "fact": MediaHander.get_tmdb_factinfo(media_info),
+ "crews": MediaHander.get_tmdb_crews(tmdbinfo=media_info.tmdb_info, nums=6),
+ "actors": MediaHander.get_tmdb_cats(mtype=mtype, tmdbid=media_info.tmdb_id),
+ "link": media_info.get_detail_url(),
+ "douban_link": media_info.get_douban_detail_url(),
+ "fav": fav,
+ "rssid": rssid
+ }
+ }
+
+ @staticmethod
+ def __media_similar(data):
+ """
+ 查询TMDB相似媒体
+ """
+ tmdbid = data.get("tmdbid")
+ page = data.get("page") or 1
+ mtype = MediaType.MOVIE if data.get(
+ "type") in MovieTypes else MediaType.TV
+ if not tmdbid:
+ return {"code": 1, "msg": "未指定TMDBID"}
+ if mtype == MediaType.MOVIE:
+ result = Media().get_movie_similar(tmdbid=tmdbid, page=page)
+ else:
+ result = Media().get_tv_similar(tmdbid=tmdbid, page=page)
+ return {"code": 0, "data": result}
+
+ @staticmethod
+ def __media_recommendations(data):
+ """
+ 查询TMDB同类推荐媒体
+ """
+ tmdbid = data.get("tmdbid")
+ page = data.get("page") or 1
+ mtype = MediaType.MOVIE if data.get(
+ "type") in MovieTypes else MediaType.TV
+ if not tmdbid:
+ return {"code": 1, "msg": "未指定TMDBID"}
+ if mtype == MediaType.MOVIE:
+ result = Media().get_movie_recommendations(tmdbid=tmdbid, page=page)
+ else:
+ result = Media().get_tv_recommendations(tmdbid=tmdbid, page=page)
+ return {"code": 0, "data": result}
+
+ @staticmethod
+ def __media_person(data):
+ """
+ 查询TMDB媒体所有演员
+ """
+ tmdbid = data.get("tmdbid")
+ mtype = MediaType.MOVIE if data.get(
+ "type") in MovieTypes else MediaType.TV
+ if not tmdbid:
+ return {"code": 1, "msg": "未指定TMDBID"}
+ return {"code": 0, "data": Media().get_tmdb_cats(tmdbid=tmdbid,
+ mtype=mtype)}
+
+ @staticmethod
+ def __person_medias(data):
+ """
+ 查询演员参演作品
+ """
+ personid = data.get("personid")
+ page = data.get("page") or 1
+ mtype = MediaType.MOVIE if data.get(
+ "type") in MovieTypes else MediaType.TV
+ if not personid:
+ return {"code": 1, "msg": "未指定演员ID"}
+ return {"code": 0, "data": Media().get_person_medias(personid=personid,
+ mtype=mtype,
+ page=page)}
+
+ @staticmethod
+ def __save_user_script(data):
+ """
+ 保存用户自定义脚本
+ """
+ script = data.get("javascript") or ""
+ css = data.get("css") or ""
+ SystemConfig().set_system_config(key="CustomScript",
+ value={
+ "css": css,
+ "javascript": script
+ })
+ return {"code": 0, "msg": "保存成功"}
+
+ @staticmethod
+ def __run_directory_sync(data):
+ """
+ 执行单个目录的目录同步
+ """
+ Sync().transfer_all_sync(sid=data.get("sid"))
+ return {"code": 0, "msg": "执行成功"}
diff --git a/web/apiv1.py b/web/apiv1.py
new file mode 100644
index 0000000..7f457d4
--- /dev/null
+++ b/web/apiv1.py
@@ -0,0 +1,2278 @@
+from flask import Blueprint, request
+from flask_restx import Api, reqparse, Resource
+
+from app.brushtask import BrushTask
+from app.rsschecker import RssChecker
+from app.sites import Sites
+from app.utils import TokenCache
+from config import Config
+from web.action import WebAction
+from web.backend.user import User
+from web.security import require_auth, login_required, generate_access_token
+
+apiv1_bp = Blueprint("apiv1",
+ __name__,
+ static_url_path='',
+ static_folder='./frontend/static/',
+ template_folder='./frontend/', )
+Apiv1 = Api(apiv1_bp,
+ version="1.0",
+ title="NAStool Api",
+ description="POST接口调用 /user/login 获取Token,GET接口使用 基础设置->安全->Api Key 调用",
+ doc="/",
+ security='Bearer Auth',
+ authorizations={"Bearer Auth": {"type": "apiKey", "name": "Authorization", "in": "header"}},
+ )
+# API分组
+user = Apiv1.namespace('user', description='用户')
+system = Apiv1.namespace('system', description='系统')
+config = Apiv1.namespace('config', description='设置')
+site = Apiv1.namespace('site', description='站点')
+service = Apiv1.namespace('service', description='服务')
+subscribe = Apiv1.namespace('subscribe', description='订阅')
+rss = Apiv1.namespace('rss', description='自定义RSS')
+recommend = Apiv1.namespace('recommend', description='推荐')
+search = Apiv1.namespace('search', description='搜索')
+download = Apiv1.namespace('download', description='下载')
+organization = Apiv1.namespace('organization', description='整理')
+torrentremover = Apiv1.namespace('torrentremover', description='自动删种')
+library = Apiv1.namespace('library', description='媒体库')
+brushtask = Apiv1.namespace('brushtask', description='刷流')
+media = Apiv1.namespace('media', description='媒体')
+sync = Apiv1.namespace('sync', description='目录同步')
+filterrule = Apiv1.namespace('filterrule', description='过滤规则')
+words = Apiv1.namespace('words', description='识别词')
+message = Apiv1.namespace('message', description='消息通知')
+douban = Apiv1.namespace('douban', description='豆瓣')
+
+
+class ApiResource(Resource):
+ """
+ API 认证
+ """
+ method_decorators = [require_auth]
+
+
+class ClientResource(Resource):
+ """
+ 登录认证
+ """
+ method_decorators = [login_required]
+
+
+def Failed():
+ """
+ 返回失败报名
+ """
+ return {
+ "code": -1,
+ "success": False,
+ "data": {}
+ }
+
+
+@user.route('/login')
+class UserLogin(Resource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('username', type=str, help='用户名', location='form', required=True)
+ parser.add_argument('password', type=str, help='密码', location='form', required=True)
+
+ @user.doc(parser=parser)
+ def post(self):
+ """
+ 用户登录
+ """
+ args = self.parser.parse_args()
+ username = args.get('username')
+ password = args.get('password')
+ if not username or not password:
+ return {"code": 1, "success": False, "message": "用户名或密码错误"}
+ user_info = User().get_user(username)
+ if not user_info:
+ return {"code": 1, "success": False, "message": "用户名或密码错误"}
+ # 校验密码
+ if not user_info.verify_password(password):
+ return {"code": 1, "success": False, "message": "用户名或密码错误"}
+ # 缓存Token
+ token = generate_access_token(username)
+ TokenCache.set(token, token)
+ return {
+ "code": 0,
+ "success": True,
+ "data": {
+ "token": token,
+ "apikey": Config().get_config("security").get("api_key"),
+ "userinfo": {
+ "userid": user_info.id,
+ "username": user_info.username,
+ "userpris": str(user_info.pris).split(",")
+ }
+ }
+ }
+
+
+@user.route('/info')
+class UserInfo(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('username', type=str, help='用户名', location='form', required=True)
+
+ @user.doc(parser=parser)
+ def post(self):
+ """
+ 获取用户信息
+ """
+ args = self.parser.parse_args()
+ username = args.get('username')
+ user_info = User().get_user(username)
+ if not user_info:
+ return {"code": 1, "success": False, "message": "用户名不正确"}
+ return {
+ "code": 0,
+ "success": True,
+ "data": {
+ "userid": user_info.id,
+ "username": user_info.username,
+ "userpris": str(user_info.pris).split(",")
+ }
+ }
+
+
+@user.route('/manage')
+class UserManage(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('oper', type=str, help='操作类型(add 新增/del删除)', location='form', required=True)
+ parser.add_argument('name', type=str, help='用户名', location='form', required=True)
+ parser.add_argument('pris', type=str, help='权限', location='form')
+
+ @user.doc(parser=parser)
+ def post(self):
+ """
+ 用户管理
+ """
+ return WebAction().api_action(cmd='user_manager', data=self.parser.parse_args())
+
+
+@user.route('/list')
+class UserList(ClientResource):
+ @staticmethod
+ def post():
+ """
+ 查询所有用户
+ """
+ return WebAction().api_action(cmd='get_users')
+
+
+@service.route('/mediainfo')
+class ServiceMediaInfo(ApiResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('name', type=str, help='名称', location='args', required=True)
+
+ @service.doc(parser=parser)
+ def get(self):
+ """
+ 识别媒体信息(密钥认证)
+ """
+ return WebAction().api_action(cmd='name_test', data=self.parser.parse_args())
+
+
+@service.route('/name/test')
+class ServiceNameTest(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('name', type=str, help='名称', location='form', required=True)
+
+ @service.doc(parser=parser)
+ def post(self):
+ """
+ 名称识别测试
+ """
+ return WebAction().api_action(cmd='name_test', data=self.parser.parse_args())
+
+
+@service.route('/rule/test')
+class ServiceRuleTest(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('title', type=str, help='名称', location='form', required=True)
+ parser.add_argument('subtitle', type=str, help='描述', location='form')
+ parser.add_argument('size', type=float, help='大小(GB)', location='form')
+
+ @service.doc(parser=parser)
+ def post(self):
+ """
+ 过滤规则测试
+ """
+ return WebAction().api_action(cmd='rule_test', data=self.parser.parse_args())
+
+
+@service.route('/network/test')
+class ServiceNetworkTest(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('url', type=str, help='URL地址', location='form', required=True)
+
+ @service.doc(parser=parser)
+ def post(self):
+ """
+ 网络连接性测试
+ """
+ return WebAction().api_action(cmd='net_test', data=self.parser.parse_args().get("url"))
+
+
+@service.route('/run')
+class ServiceRun(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('item', type=str,
+ help='服务名称(autoremovetorrents、pttransfer、ptsignin、sync、rssdownload、douban、subscribe_search_all)',
+ location='form',
+ required=True)
+
+ @service.doc(parser=parser)
+ def post(self):
+ """
+ 运行服务
+ """
+ return WebAction().api_action(cmd='sch', data=self.parser.parse_args())
+
+
+@site.route('/statistics')
+class SiteStatistic(ApiResource):
+ @staticmethod
+ def get():
+ """
+ 获取站点数据明细(密钥认证)
+ """
+ # 返回站点信息
+ return {
+ "code": 0,
+ "success": True,
+ "data": {
+ "user_statistics": WebAction().get_site_user_statistics({"encoding": "DICT"}).get("data")
+ }
+ }
+
+
+@site.route('/sites')
+class SiteSites(ApiResource):
+ @staticmethod
+ def get():
+ """
+ 获取所有站点配置(密钥认证)
+ """
+ return {
+ "code": 0,
+ "success": True,
+ "data": {
+ "user_sites": Sites().get_sites()
+ }
+ }
+
+
+@site.route('/update')
+class SiteUpdate(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('site_name', type=str, help='站点名称', location='form', required=True)
+ parser.add_argument('site_id', type=int, help='更新站点ID', location='form')
+ parser.add_argument('site_pri', type=str, help='优先级', location='form')
+ parser.add_argument('site_rssurl', type=str, help='RSS地址', location='form')
+ parser.add_argument('site_signurl', type=str, help='站点地址', location='form')
+ parser.add_argument('site_cookie', type=str, help='Cookie', location='form')
+ parser.add_argument('site_note', type=str, help='站点属性', location='form')
+ parser.add_argument('site_include', type=str, help='站点用途', location='form')
+
+ @site.doc(parser=parser)
+ def post(self):
+ """
+ 新增/删除站点
+ """
+ return WebAction().api_action(cmd='update_site', data=self.parser.parse_args())
+
+
+@site.route('/info')
+class SiteInfo(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='站点ID', location='form', required=True)
+
+ @site.doc(parser=parser)
+ def post(self):
+ """
+ 查询单个站点详情
+ """
+ return WebAction().api_action(cmd='get_site', data=self.parser.parse_args())
+
+
+@site.route('/favicon')
+class SiteFavicon(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('name', type=str, help='站点名称', location='form', required=True)
+
+ @site.doc(parser=parser)
+ def post(self):
+ """
+ 获取站点图标(Base64)
+ """
+ return WebAction().api_action(cmd='get_site_favicon', data=self.parser.parse_args())
+
+
+@site.route('/test')
+class SiteTest(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='站点ID', location='form', required=True)
+
+ @site.doc(parser=parser)
+ def post(self):
+ """
+ 测试站点连通性
+ """
+ return WebAction().api_action(cmd='test_site', data=self.parser.parse_args())
+
+
+@site.route('/delete')
+class SiteDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='站点ID', location='form', required=True)
+
+ @site.doc(parser=parser)
+ def post(self):
+ """
+ 删除站点
+ """
+ return WebAction().api_action(cmd='del_site', data=self.parser.parse_args())
+
+
+@site.route('/statistics/activity')
+class SiteStatisticsActivity(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('name', type=str, help='站点名称', location='form', required=True)
+
+ @site.doc(parser=parser)
+ def post(self):
+ """
+ 查询站点 上传/下载/做种数据
+ """
+ return WebAction().api_action(cmd='get_site_activity', data=self.parser.parse_args())
+
+
+@site.route('/check')
+class SiteCheck(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('url', type=str, help='站点地址', location='form', required=True)
+
+ @site.doc(parser=parser)
+ def post(self):
+ """
+ 检查站点是否支持FREE/HR检测
+ """
+ return WebAction().api_action(cmd='check_site_attr', data=self.parser.parse_args())
+
+
+@site.route('/statistics/history')
+class SiteStatisticsHistory(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('days', type=int, help='时间范围(天)', location='form', required=True)
+
+ @site.doc(parser=parser)
+ def post(self):
+ """
+ 查询所有站点历史数据
+ """
+ return WebAction().api_action(cmd='get_site_history', data=self.parser.parse_args())
+
+
+@site.route('/statistics/seedinfo')
+class SiteStatisticsSeedinfo(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('name', type=str, help='站点名称', location='form', required=True)
+
+ @site.doc(parser=parser)
+ def post(self):
+ """
+ 查询站点做种分布
+ """
+ return WebAction().api_action(cmd='get_site_seeding_info', data=self.parser.parse_args())
+
+
+@site.route('/resources')
+class SiteResources(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=str, help='站点索引ID', location='form', required=True)
+ parser.add_argument('page', type=int, help='页码', location='form')
+ parser.add_argument('keyword', type=str, help='站点名称', location='form')
+
+ @site.doc(parser=parser)
+ def post(self):
+ """
+ 查询站点资源列表
+ """
+ return WebAction().api_action(cmd='list_site_resources', data=self.parser.parse_args())
+
+
+@site.route('/list')
+class SiteList(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('basic', type=int, help='只查询基本信息(0-否/1-是)', location='form')
+ parser.add_argument('rss', type=int, help='订阅(0-否/1-是)', location='form')
+ parser.add_argument('brush', type=int, help='刷流(0-否/1-是)', location='form')
+ parser.add_argument('signin', type=int, help='签到(0-否/1-是)', location='form')
+ parser.add_argument('statistic', type=int, help='数据统计(0-否/1-是)', location='form')
+
+ def post(self):
+ """
+ 查询站点列表
+ """
+ return WebAction().api_action(cmd='get_sites', data=self.parser.parse_args())
+
+
+@site.route('/indexers')
+class SiteIndexers(ClientResource):
+
+ @staticmethod
+ def post():
+ """
+ 查询站点索引列表
+ """
+ return WebAction().api_action(cmd='get_indexers')
+
+
+@search.route('/keyword')
+class SearchKeyword(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('search_word', type=str, help='搜索关键字', location='form', required=True)
+ parser.add_argument('unident', type=int, help='快速模式(0-否/1-是)', location='form')
+ parser.add_argument('filters', type=str, help='过滤条件', location='form')
+ parser.add_argument('tmdbid', type=str, help='TMDBID', location='form')
+ parser.add_argument('media_type', type=str, help='类型(电影/电视剧)', location='form')
+
+ @search.doc(parser=parser)
+ def post(self):
+ """
+ 根据关键字/TMDBID搜索
+ """
+ return WebAction().api_action(cmd='search', data=self.parser.parse_args())
+
+
+@search.route('/result')
+class SearchResult(ClientResource):
+ @staticmethod
+ def post():
+ """
+ 查询搜索结果
+ """
+ return WebAction().api_action(cmd='get_search_result')
+
+
+@download.route('/search')
+class DownloadSearch(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=str, help='搜索结果ID', location='form', required=True)
+ parser.add_argument('dir', type=str, help='保存目录', location='form')
+ parser.add_argument('setting', type=str, help='下载设置', location='form')
+
+ @download.doc(parser=parser)
+ def post(self):
+ """
+ 下载搜索结果
+ """
+ return WebAction().api_action(cmd='download', data=self.parser.parse_args())
+
+
+@download.route('/item')
+class DownloadItem(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('enclosure', type=str, help='链接URL', location='form', required=True)
+ parser.add_argument('title', type=str, help='标题', location='form', required=True)
+ parser.add_argument('site', type=str, help='站点名称', location='form')
+ parser.add_argument('description', type=str, help='描述', location='form')
+ parser.add_argument('page_url', type=str, help='详情页面URL', location='form')
+ parser.add_argument('size', type=str, help='大小', location='form')
+ parser.add_argument('seeders', type=str, help='做种数', location='form')
+ parser.add_argument('uploadvolumefactor', type=float, help='上传因子', location='form')
+ parser.add_argument('downloadvolumefactor', type=float, help='下载因子', location='form')
+ parser.add_argument('dl_dir', type=str, help='保存目录', location='form')
+
+ @download.doc(parser=parser)
+ def post(self):
+ """
+ 下载链接
+ """
+ return WebAction().api_action(cmd='download_link', data=self.parser.parse_args())
+
+
+@download.route('/start')
+class DownloadStart(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=str, help='任务ID', location='form', required=True)
+
+ @download.doc(parser=parser)
+ def post(self):
+ """
+ 开始下载任务
+ """
+ return WebAction().api_action(cmd='pt_start', data=self.parser.parse_args())
+
+
+@download.route('/stop')
+class DownloadStop(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=str, help='任务ID', location='form', required=True)
+
+ @download.doc(parser=parser)
+ def post(self):
+ """
+ 暂停下载任务
+ """
+ return WebAction().api_action(cmd='pt_stop', data=self.parser.parse_args())
+
+
+@download.route('/info')
+class DownloadInfo(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('ids', type=str, help='任务IDS', location='form', required=True)
+
+ @download.doc(parser=parser)
+ def post(self):
+ """
+ 查询下载进度
+ """
+ return WebAction().api_action(cmd='pt_info', data=self.parser.parse_args())
+
+
+@download.route('/remove')
+class DownloadRemove(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=str, help='任务ID', location='form', required=True)
+
+ @download.doc(parser=parser)
+ def post(self):
+ """
+ 删除下载任务
+ """
+ return WebAction().api_action(cmd='pt_remove', data=self.parser.parse_args())
+
+
+@download.route('/history')
+class DownloadHistory(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('page', type=str, help='第几页', location='form', required=True)
+
+ @download.doc(parser=parser)
+ def post(self):
+ """
+ 查询下载历史
+ """
+ return WebAction().api_action(cmd='get_downloaded', data=self.parser.parse_args())
+
+
+@download.route('/now')
+class DownloadNow(ClientResource):
+ @staticmethod
+ def post():
+ """
+ 查询正在下载的任务
+ """
+ return WebAction().api_action(cmd='get_downloading')
+
+
+@download.route('/config/info')
+class DownloadConfigInfo(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('sid', type=str, help='下载设置ID', location='form', required=True)
+
+ @download.doc(parser=parser)
+ def post(self):
+ """
+ 查询下载设置
+ """
+ return WebAction().api_action(cmd='get_download_setting', data=self.parser.parse_args())
+
+
+@download.route('/config/update')
+class DownloadConfigUpdate(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('sid', type=str, help='下载设置ID', location='form', required=True)
+ parser.add_argument('name', type=str, help='名称', location='form', required=True)
+ parser.add_argument('category', type=str, help='分类', location='form')
+ parser.add_argument('tags', type=str, help='标签', location='form')
+ parser.add_argument('content_layout', type=int, help='布局(0-全局/1-原始/2-创建子文件夹/3-不建子文件夹)',
+ location='form')
+ parser.add_argument('is_paused', type=int, help='动作(0-添加后开始/1-添加后暂停)', location='form')
+ parser.add_argument('upload_limit', type=int, help='上传速度限制', location='form')
+ parser.add_argument('download_limit', type=int, help='下载速度限制', location='form')
+ parser.add_argument('ratio_limit', type=int, help='分享率限制', location='form')
+ parser.add_argument('seeding_time_limit', type=int, help='做种时间限制', location='form')
+ parser.add_argument('downloader', type=str, help='下载器(Qbittorrent/Transmission)', location='form')
+
+ @download.doc(parser=parser)
+ def post(self):
+ """
+ 新增/修改下载设置
+ """
+ return WebAction().api_action(cmd='update_download_setting', data=self.parser.parse_args())
+
+
+@download.route('/config/delete')
+class DownloadConfigDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('sid', type=str, help='下载设置ID', location='form', required=True)
+
+ @download.doc(parser=parser)
+ def post(self):
+ """
+ 删除下载设置
+ """
+ return WebAction().api_action(cmd='delete_download_setting', data=self.parser.parse_args())
+
+
+@download.route('/config/list')
+class DownloadConfigList(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('sid', type=str, help='ID', location='form')
+
+ def post(self):
+ """
+ 查询下载设置
+ """
+ return WebAction().api_action(cmd="get_download_setting", data=self.parser.parse_args())
+
+
+@download.route('/config/directory')
+class DownloadConfigDirectory(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('sid', type=str, help='下载设置ID', location='form')
+
+ def post(self):
+ """
+ 查询下载保存目录
+ """
+ return WebAction().api_action(cmd="get_download_dirs", data=self.parser.parse_args())
+
+
+@organization.route('/unknown/delete')
+class UnknownDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=str, help='未识别记录ID', location='form', required=True)
+
+ @organization.doc(parser=parser)
+ def post(self):
+ """
+ 删除未识别记录
+ """
+ return WebAction().api_action(cmd='del_unknown_path', data=self.parser.parse_args())
+
+
+@organization.route('/unknown/rename')
+class UnknownRename(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('logid', type=str, help='转移历史记录ID', location='form')
+ parser.add_argument('unknown_id', type=str, help='未识别记录ID', location='form')
+ parser.add_argument('syncmod', type=str, help='转移模式', location='form', required=True)
+ parser.add_argument('tmdb', type=int, help='TMDB ID', location='form')
+ parser.add_argument('title', type=str, help='标题', location='form')
+ parser.add_argument('year', type=str, help='年份', location='form')
+ parser.add_argument('type', type=str, help='类型(MOV/TV/ANIME)', location='form')
+ parser.add_argument('season', type=int, help='季号', location='form')
+ parser.add_argument('episode_format', type=str, help='集数定位', location='form')
+ parser.add_argument('min_filesize', type=int, help='最小文件大小', location='form')
+
+ @organization.doc(parser=parser)
+ def post(self):
+ """
+ 手动识别
+ """
+ return WebAction().api_action(cmd='rename', data=self.parser.parse_args())
+
+
+@organization.route('/unknown/renameudf')
+class UnknownRenameUDF(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('inpath', type=str, help='源目录', location='form', required=True)
+ parser.add_argument('outpath', type=str, help='目的目录', location='form', required=True)
+ parser.add_argument('syncmod', type=str, help='转移模式', location='form', required=True)
+ parser.add_argument('tmdb', type=int, help='TMDB ID', location='form')
+ parser.add_argument('title', type=str, help='标题', location='form')
+ parser.add_argument('year', type=str, help='年份', location='form')
+ parser.add_argument('type', type=str, help='类型(MOV/TV/ANIME)', location='form')
+ parser.add_argument('season', type=int, help='季号', location='form')
+ parser.add_argument('episode_format', type=str, help='集数定位', location='form')
+ parser.add_argument('episode_details', type=str, help='集数范围', location='form')
+ parser.add_argument('episode_offset', type=str, help='集数偏移', location='form')
+ parser.add_argument('min_filesize', type=int, help='最小文件大小', location='form')
+
+ @organization.doc(parser=parser)
+ def post(self):
+ """
+ 自定义识别
+ """
+ return WebAction().api_action(cmd='rename_udf', data=self.parser.parse_args())
+
+
+@organization.route('/unknown/redo')
+class UnknownRedo(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('flag', type=str, help='类型(unknow/history)', location='form', required=True)
+ parser.add_argument('ids', type=list, help='记录ID', location='form', required=True)
+
+ @organization.doc(parser=parser)
+ def post(self):
+ """
+ 重新识别
+ """
+ return WebAction().api_action(cmd='re_identification', data=self.parser.parse_args())
+
+
+@organization.route('/history/delete')
+class TransferHistoryDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('logids', type=list, help='记录IDS', location='form', required=True)
+
+ @organization.doc(parser=parser)
+ def post(self):
+ """
+ 删除媒体整理历史记录
+ """
+ return WebAction().api_action(cmd='delete_history', data=self.parser.parse_args())
+
+
+@organization.route('/unknown/list')
+class TransferUnknownList(ClientResource):
+ @staticmethod
+ def post():
+ """
+ 查询所有未识别记录
+ """
+ return WebAction().api_action(cmd='get_unknown_list')
+
+
+@organization.route('/history/list')
+class TransferHistoryList(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('page', type=int, help='页码', location='form', required=True)
+ parser.add_argument('pagenum', type=int, help='每页条数', location='form', required=True)
+ parser.add_argument('keyword', type=str, help='过滤关键字', location='form')
+
+ @organization.doc(parser=parser)
+ def post(self):
+ """
+ 查询媒体整理历史记录
+ """
+ return WebAction().api_action(cmd='get_transfer_history', data=self.parser.parse_args())
+
+
+@organization.route('/history/statistics')
+class HistoryStatistics(ClientResource):
+
+ @staticmethod
+ def post():
+ """
+ 查询转移历史统计数据
+ """
+ return WebAction().api_action(cmd='get_transfer_statistics')
+
+
+@organization.route('/cache/empty')
+class TransferCacheEmpty(ClientResource):
+
+ @staticmethod
+ def post():
+ """
+ 清空文件转移缓存
+ """
+ return WebAction().api_action(cmd='truncate_blacklist')
+
+
+@library.route('/sync/start')
+class LibrarySyncStart(ClientResource):
+
+ @staticmethod
+ def post():
+ """
+ 开始媒体库同步
+ """
+ return WebAction().api_action(cmd='start_mediasync')
+
+
+@library.route('/sync/status')
+class LibrarySyncStatus(ClientResource):
+
+ @staticmethod
+ def post():
+ """
+ 查询媒体库同步状态
+ """
+ return WebAction().api_action(cmd='mediasync_state')
+
+
+@library.route('/mediaserver/playhistory')
+class LibraryPlayHistory(ClientResource):
+
+ @staticmethod
+ def post():
+ """
+ 查询媒体库播放历史
+ """
+ return WebAction().api_action(cmd='get_library_playhistory')
+
+
+@library.route('/mediaserver/statistics')
+class LibraryStatistics(ClientResource):
+
+ @staticmethod
+ def post():
+ """
+ 查询媒体库统计数据
+ """
+ return WebAction().api_action(cmd="get_library_mediacount")
+
+
+@library.route('/space')
+class LibrarySpace(ClientResource):
+
+ @staticmethod
+ def post():
+ """
+ 查询媒体库存储空间
+ """
+ return WebAction().api_action(cmd='get_library_spacesize')
+
+
+@system.route('/logging')
+class SystemLogging(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('refresh_new', type=int, help='是否刷新增量日志(0-否/1-是)', location='form', required=True)
+
+ @system.doc(parser=parser)
+ def post(self):
+ """
+ 获取实时日志
+ """
+ return WebAction().api_action(cmd='logging', data=self.parser.parse_args())
+
+
+@system.route('/version')
+class SystemVersion(ClientResource):
+
+ @staticmethod
+ def post():
+ """
+ 查询最新版本号
+ """
+ return WebAction().api_action(cmd='version')
+
+
+@system.route('/path')
+class SystemPath(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('dir', type=str, help='路径', location='form', required=True)
+ parser.add_argument('filter', type=str, help='过滤器(ONLYFILE/ONLYDIR/MEDIAFILE/SUBFILE/ALL)', location='form',
+ required=True)
+
+ @system.doc(parser=parser)
+ def post(self):
+ """
+ 查询目录的子目录/文件
+ """
+ return WebAction().api_action(cmd='get_sub_path', data=self.parser.parse_args())
+
+
+@system.route('/restart')
+class SystemRestart(ClientResource):
+
+ @staticmethod
+ def post():
+ """
+ 重启
+ """
+ return WebAction().api_action(cmd='restart')
+
+
+@system.route('/update')
+class SystemUpdate(ClientResource):
+
+ @staticmethod
+ def post():
+ """
+ 升级
+ """
+ return WebAction().api_action(cmd='update_system')
+
+
+@system.route('/logout')
+class SystemUpdate(ClientResource):
+
+ @staticmethod
+ def post():
+ """
+ 注销
+ """
+ token = request.headers.get("Authorization", default=None)
+ if token:
+ TokenCache.delete(token)
+ return {
+ "code": 0,
+ "success": True
+ }
+
+
+@system.route('/message')
+class SystemMessage(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('lst_time', type=str, help='时间(YYYY-MM-DD HH24:MI:SS)', location='form')
+
+ @system.doc(parser=parser)
+ def post(self):
+ """
+ 查询消息中心消息
+ """
+ return WebAction().get_system_message(lst_time=self.parser.parse_args().get("lst_time"))
+
+
+@system.route('/progress')
+class SystemProgress(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('type', type=str, help='类型(search/mediasync)', location='form', required=True)
+
+ @system.doc(parser=parser)
+ def post(self):
+ """
+ 查询搜索/媒体同步等进度
+ """
+ return WebAction().api_action(cmd='refresh_process', data=self.parser.parse_args())
+
+
+@config.route('/update')
+class ConfigUpdate(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('items', type=dict, help='配置项', location='form', required=True)
+
+ @config.doc(parser=parser)
+ def post(self):
+ """
+ 新增/修改配置
+ """
+ return WebAction().api_action(cmd='update_config', data=self.parser.parse_args().get("items"))
+
+
+@config.route('/test')
+class ConfigTest(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('command', type=str, help='测试命令', location='form', required=True)
+
+ @config.doc(parser=parser)
+ def post(self):
+ """
+ 测试配置连通性
+ """
+ return WebAction().api_action(cmd='test_connection', data=self.parser.parse_args())
+
+
+@config.route('/restore')
+class ConfigRestore(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('file_name', type=str, help='备份文件名', location='form', required=True)
+
+ @config.doc(parser=parser)
+ def post(self):
+ """
+ 恢复备份的配置
+ """
+ return WebAction().api_action(cmd='restory_backup', data=self.parser.parse_args())
+
+
+@config.route('/info')
+class ConfigInfo(ClientResource):
+ @staticmethod
+ def post():
+ """
+ 获取所有配置信息
+ """
+ return {
+ "code": 0,
+ "success": True,
+ "data": Config().get_config()
+ }
+
+
+@config.route('/directory')
+class ConfigDirectory(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('oper', type=str, help='操作类型(add/sub/set)', location='form', required=True)
+ parser.add_argument('key', type=str, help='配置项', location='form', required=True)
+ parser.add_argument('value', type=str, help='配置值', location='form', required=True)
+
+ @config.doc(parser=parser)
+ def post(self):
+ """
+ 配置媒体库目录
+ """
+ return WebAction().api_action(cmd='update_directory', data=self.parser.parse_args())
+
+
+@subscribe.route('/delete')
+class SubscribeDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('name', type=str, help='名称', location='form')
+ parser.add_argument('type', type=str, help='类型(MOV/TV)', location='form')
+ parser.add_argument('year', type=str, help='发行年份', location='form')
+ parser.add_argument('season', type=int, help='季号', location='form')
+ parser.add_argument('rssid', type=int, help='已有订阅ID', location='form')
+ parser.add_argument('tmdbid', type=str, help='TMDBID', location='form')
+
+ @subscribe.doc(parser=parser)
+ def post(self):
+ """
+ 删除订阅
+ """
+ return WebAction().api_action(cmd='remove_rss_media', data=self.parser.parse_args())
+
+
+@subscribe.route('/add')
+class SubscribeAdd(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('name', type=str, help='名称', location='form', required=True)
+ parser.add_argument('type', type=str, help='类型(MOV/TV)', location='form', required=True)
+ parser.add_argument('year', type=str, help='发行年份', location='form')
+ parser.add_argument('keyword', type=str, help='自定义搜索词', location='form')
+ parser.add_argument('season', type=int, help='季号', location='form')
+ parser.add_argument('rssid', type=int, help='已有订阅ID', location='form')
+ parser.add_argument('mediaid', type=str, help='TMDBID/DB:豆瓣ID', location='form')
+ parser.add_argument('fuzzy_match', type=int, help='模糊匹配(0-否/1-是)', location='form')
+ parser.add_argument('rss_sites', type=list, help='RSS站点', location='form')
+ parser.add_argument('search_sites', type=list, help='搜索站点', location='form')
+ parser.add_argument('over_edition', type=int, help='洗版(0-否/1-是)', location='form')
+ parser.add_argument('filter_restype', type=str, help='资源类型', location='form')
+ parser.add_argument('filter_pix', type=str, help='分辨率', location='form')
+ parser.add_argument('filter_team', type=str, help='字幕组/发布组', location='form')
+ parser.add_argument('filter_rule', type=int, help='过滤规则', location='form')
+ parser.add_argument('download_setting', type=int, help='下载设置', location='form')
+ parser.add_argument('save_path', type=str, help='保存路径', location='form')
+ parser.add_argument('total_ep', type=int, help='总集数', location='form')
+ parser.add_argument('current_ep', type=int, help='开始集数', location='form')
+
+ @subscribe.doc(parser=parser)
+ def post(self):
+ """
+ 新增/修改订阅
+ """
+ return WebAction().api_action(cmd='add_rss_media', data=self.parser.parse_args())
+
+
+@subscribe.route('/movie/date')
+class SubscribeMovieDate(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=str, help='TMDBID/DB:豆瓣ID', location='form', required=True)
+
+ @subscribe.doc(parser=parser)
+ def post(self):
+ """
+ 电影上映日期
+ """
+ return WebAction().api_action(cmd='movie_calendar_data', data=self.parser.parse_args())
+
+
+@subscribe.route('/tv/date')
+class SubscribeTVDate(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=str, help='TMDBID/DB:豆瓣ID', location='form', required=True)
+ parser.add_argument('season', type=int, help='季号', location='form', required=True)
+ parser.add_argument('name', type=str, help='名称', location='form')
+
+ @subscribe.doc(parser=parser)
+ def post(self):
+ """
+ 电视剧上映日期
+ """
+ return WebAction().api_action(cmd='tv_calendar_data', data=self.parser.parse_args())
+
+
+@subscribe.route('/search')
+class SubscribeSearch(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('type', type=str, help='类型(MOV/TV)', location='form', required=True)
+ parser.add_argument('rssid', type=int, help='订阅ID', location='form', required=True)
+
+ @subscribe.doc(parser=parser)
+ def post(self):
+ """
+ 订阅刷新搜索
+ """
+ return WebAction().api_action(cmd='refresh_rss', data=self.parser.parse_args())
+
+
+@subscribe.route('/info')
+class SubscribeInfo(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('rssid', type=int, help='订阅ID', location='form', required=True)
+ parser.add_argument('type', type=str, help='订阅类型(MOV/TV)', location='form', required=True)
+
+ @subscribe.doc(parser=parser)
+ def post(self):
+ """
+ 订阅详情
+ """
+ return WebAction().api_action(cmd='rss_detail', data=self.parser.parse_args())
+
+
+@subscribe.route('/redo')
+class SubscribeRedo(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('rssid', type=int, help='订阅历史ID', location='form', required=True)
+ parser.add_argument('type', type=str, help='订阅类型(MOV/TV)', location='form', required=True)
+
+ @subscribe.doc(parser=parser)
+ def post(self):
+ """
+ 历史重新订阅
+ """
+ return WebAction().api_action(cmd='re_rss_history', data=self.parser.parse_args())
+
+
+@subscribe.route('/history/delete')
+class SubscribeHistoryDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('rssid', type=int, help='订阅ID', location='form', required=True)
+
+ @subscribe.doc(parser=parser)
+ def post(self):
+ """
+ 删除订阅历史
+ """
+ return WebAction().api_action(cmd='delete_rss_history', data=self.parser.parse_args())
+
+
+@subscribe.route('/history')
+class SubscribeHistory(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('type', type=str, help='类型(MOV/TV)', location='form', required=True)
+
+ @subscribe.doc(parser=parser)
+ def post(self):
+ """
+ 查询订阅历史
+ """
+ return WebAction().api_action(cmd='get_rss_history', data=self.parser.parse_args())
+
+
+@subscribe.route('/cache/delete')
+class SubscribeCacheDelete(ClientResource):
+ @staticmethod
+ def post():
+ """
+ 清理订阅缓存
+ """
+ return WebAction().api_action(cmd='truncate_rsshistory')
+
+
+@subscribe.route('/movie/list')
+class SubscribeMovieList(ClientResource):
+ @staticmethod
+ def post():
+ """
+ 查询所有电影订阅
+ """
+ return WebAction().api_action(cmd='get_movie_rss_list')
+
+
+@subscribe.route('/tv/list')
+class SubscribeTvList(ClientResource):
+ @staticmethod
+ def post():
+ """
+ 查询所有电视剧订阅
+ """
+ return WebAction().api_action(cmd='get_tv_rss_list')
+
+
+@recommend.route('/list')
+class RecommendList(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('type', type=str,
+ help='类型(hm/ht/nm/nt/dbom/dbhm/dbht/dbdh/dbnm/dbtop/dbzy/bangumi)',
+ location='form', required=True)
+ parser.add_argument('page', type=int, help='页码', location='form', required=True)
+
+ @recommend.doc(parser=parser)
+ def post(self):
+ """
+ 推荐列表
+ """
+ return WebAction().api_action(cmd='get_recommend', data=self.parser.parse_args())
+
+
+@rss.route('/info')
+class RssInfo(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='任务ID', location='form', required=True)
+
+ @rss.doc(parser=parser)
+ def post(self):
+ """
+ 自定义订阅任务详情
+ """
+ return WebAction().api_action(cmd='get_userrss_task', data=self.parser.parse_args())
+
+
+@rss.route('/delete')
+class RssDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='任务ID', location='form', required=True)
+
+ @rss.doc(parser=parser)
+ def post(self):
+ """
+ 删除自定义订阅任务
+ """
+ return WebAction().api_action(cmd='delete_userrss_task', data=self.parser.parse_args())
+
+
+@rss.route('/update')
+class RssUpdate(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='任务ID', location='form')
+ parser.add_argument('name', type=str, help='任务名称', location='form', required=True)
+ parser.add_argument('address', type=str, help='RSS地址', location='form', required=True)
+ parser.add_argument('parser', type=int, help='解析器ID', location='form', required=True)
+ parser.add_argument('interval', type=int, help='刷新间隔(分钟)', location='form', required=True)
+ parser.add_argument('uses', type=str, help='动作', location='form', required=True)
+ parser.add_argument('state', type=str, help='状态(Y/N)', location='form', required=True)
+ parser.add_argument('include', type=str, help='包含', location='form')
+ parser.add_argument('exclude', type=str, help='排除', location='form')
+ parser.add_argument('filterrule', type=int, help='过滤规则', location='form')
+ parser.add_argument('note', type=str, help='备注', location='form')
+
+ @rss.doc(parser=parser)
+ def post(self):
+ """
+ 新增/修改自定义订阅任务
+ """
+ return WebAction().api_action(cmd='update_userrss_task', data=self.parser.parse_args())
+
+
+@rss.route('/parser/info')
+class RssParserInfo(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='解析器ID', location='form', required=True)
+
+ @rss.doc(parser=parser)
+ def post(self):
+ """
+ 解析器详情
+ """
+ return WebAction().api_action(cmd='get_rssparser', data=self.parser.parse_args())
+
+
+@rss.route('/parser/delete')
+class RssParserDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='解析器ID', location='form', required=True)
+
+ @rss.doc(parser=parser)
+ def post(self):
+ """
+ 删除解析器
+ """
+ return WebAction().api_action(cmd='delete_rssparser', data=self.parser.parse_args())
+
+
+@rss.route('/parser/update')
+class RssParserUpdate(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='解析器ID', location='form', required=True)
+ parser.add_argument('name', type=str, help='名称', location='form', required=True)
+ parser.add_argument('type', type=str, help='类型(JSON/XML)', location='form', required=True)
+ parser.add_argument('format', type=str, help='解析格式', location='form', required=True)
+ parser.add_argument('params', type=str, help='附加参数', location='form')
+
+ @rss.doc(parser=parser)
+ def post(self):
+ """
+ 新增/修改解析器
+ """
+ return WebAction().api_action(cmd='update_rssparser', data=self.parser.parse_args())
+
+
+@rss.route('/parser/list')
+class RssParserList(ClientResource):
+ @staticmethod
+ def post():
+ """
+ 查询所有解析器
+ """
+ return {
+ "code": 0,
+ "success": True,
+ "data": {
+ "parsers": RssChecker().get_userrss_parser()
+ }
+ }
+
+
+@rss.route('/list')
+class RssList(ClientResource):
+ @staticmethod
+ def post():
+ """
+ 查询所有自定义订阅任务
+ """
+ return {
+ "code": 0,
+ "success": False,
+ "data": {
+ "tasks": RssChecker().get_rsstask_info(),
+ "parsers": RssChecker().get_userrss_parser()
+ }
+ }
+
+
+@rss.route('/preview')
+class RssPreview(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='任务ID', location='form', required=True)
+
+ @rss.doc(parser=parser)
+ def post(self):
+ """
+ 自定义订阅预览
+ """
+ return WebAction().api_action(cmd='list_rss_articles', data=self.parser.parse_args())
+
+
+@rss.route('/name/test')
+class RssNameTest(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('taskid', type=int, help='任务ID', location='form', required=True)
+ parser.add_argument('title', type=str, help='名称', location='form', required=True)
+
+ @rss.doc(parser=parser)
+ def post(self):
+ """
+ 自定义订阅名称测试
+ """
+ return WebAction().api_action(cmd='rss_article_test', data=self.parser.parse_args())
+
+
+@rss.route('/item/history')
+class RssItemHistory(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='任务ID', location='form', required=True)
+
+ @rss.doc(parser=parser)
+ def post(self):
+ """
+ 自定义订阅任务条目处理记录
+ """
+ return WebAction().api_action(cmd='list_rss_history', data=self.parser.parse_args())
+
+
+@rss.route('/item/set')
+class RssItemSet(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('flag', type=str, help='操作类型(set_finished/set_unfinish)', location='form', required=True)
+ parser.add_argument('articles', type=list, help='条目({title/enclosure})', location='form', required=True)
+
+ @rss.doc(parser=parser)
+ def post(self):
+ """
+ 自定义订阅任务条目状态调整
+ """
+ return WebAction().api_action(cmd='rss_articles_check', data=self.parser.parse_args())
+
+
+@rss.route('/item/download')
+class RssItemDownload(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('taskid', type=int, help='任务ID', location='form', required=True)
+ parser.add_argument('articles', type=list, help='条目({title/enclosure})', location='form', required=True)
+
+ @rss.doc(parser=parser)
+ def post(self):
+ """
+ 自定义订阅任务条目下载
+ """
+ return WebAction().api_action(cmd='rss_articles_download', data=self.parser.parse_args())
+
+
+@media.route('/search')
+class MediaSearch(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('keyword', type=str, help='关键字', location='form', required=True)
+
+ @media.doc(parser=parser)
+ def post(self):
+ """
+ 搜索TMDB/豆瓣词条
+ """
+ return WebAction().api_action(cmd='search_media_infos', data=self.parser.parse_args())
+
+
+@media.route('/cache/update')
+class MediaCacheUpdate(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('key', type=str, help='缓存Key值', location='form', required=True)
+ parser.add_argument('title', type=str, help='标题', location='form', required=True)
+
+ @media.doc(parser=parser)
+ def post(self):
+ """
+ 修改TMDB缓存标题
+ """
+ return WebAction().api_action(cmd='modify_tmdb_cache', data=self.parser.parse_args())
+
+
+@media.route('/cache/delete')
+class MediaCacheDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('cache_key', type=str, help='缓存Key值', location='form', required=True)
+
+ @media.doc(parser=parser)
+ def post(self):
+ """
+ 删除TMDB缓存
+ """
+ return WebAction().api_action(cmd='delete_tmdb_cache', data=self.parser.parse_args())
+
+
+@media.route('/cache/clear')
+class MediaCacheClear(ClientResource):
+
+ @staticmethod
+ def post():
+ """
+ 清空TMDB缓存
+ """
+ return WebAction().api_action(cmd='clear_tmdb_cache')
+
+
+@media.route('/tv/seasons')
+class MediaTvSeasons(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('tmdbid', type=str, help='TMDBID', location='form', required=True)
+
+ @media.doc(parser=parser)
+ def post(self):
+ """
+ 查询电视剧季列表
+ """
+ return WebAction().api_action(cmd='get_tvseason_list', data=self.parser.parse_args())
+
+
+@media.route('/category/list')
+class MediaCategoryList(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('type', type=str, help='类型(电影/电视剧/动漫)', location='form', required=True)
+
+ @media.doc(parser=parser)
+ def post(self):
+ """
+ 查询二级分类配置
+ """
+ return WebAction().api_action(cmd='get_categories', data=self.parser.parse_args())
+
+
+@media.route('/info')
+class MediaInfo(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('type', type=str, help='类型(MOV/TV)', location='form', required=True)
+ parser.add_argument('id', type=str, help='TMDBID/DB:豆瓣ID', location='form')
+ parser.add_argument('title', type=str, help='标题', location='form')
+ parser.add_argument('year', type=str, help='年份', location='form')
+ parser.add_argument('rssid', type=str, help='订阅ID', location='form')
+
+ @media.doc(parser=parser)
+ def post(self):
+ """
+ 识别媒体信息
+ """
+ return WebAction().api_action(cmd='media_info', data=self.parser.parse_args())
+
+
+@media.route('/detail')
+class MediaDetail(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('type', type=str, help='类型(MOV/TV)', location='form', required=True)
+ parser.add_argument('tmdbid', type=str, help='TMDBID/DB:豆瓣ID', location='form')
+
+ @media.doc(parser=parser)
+ def post(self):
+ """
+ 查询TMDB媒体详情
+ """
+ return WebAction().api_action(cmd='media_detail', data=self.parser.parse_args())
+
+
+@media.route('/similar')
+class MediaSimilar(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('type', type=str, help='类型(MOV/TV)', location='form', required=True)
+ parser.add_argument('tmdbid', type=str, help='TMDBID', location='form')
+ parser.add_argument('page', type=int, help='页码', location='form')
+
+ @media.doc(parser=parser)
+ def post(self):
+ """
+ 根据TMDBID查询类似媒体
+ """
+ return WebAction().api_action(cmd='media_similar', data=self.parser.parse_args())
+
+
+@media.route('/recommendations')
+class MediaRecommendations(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('type', type=str, help='类型(MOV/TV)', location='form', required=True)
+ parser.add_argument('tmdbid', type=str, help='TMDBID', location='form')
+ parser.add_argument('page', type=int, help='页码', location='form')
+
+ @media.doc(parser=parser)
+ def post(self):
+ """
+ 根据TMDBID查询推荐媒体
+ """
+ return WebAction().api_action(cmd='media_recommendations', data=self.parser.parse_args())
+
+
+@media.route('/person')
+class MediaPersonList(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('type', type=str, help='类型(MOV/TV)', location='form', required=True)
+ parser.add_argument('personid', type=str, help='演员ID', location='form')
+ parser.add_argument('page', type=int, help='页码', location='form')
+
+ @media.doc(parser=parser)
+ def post(self):
+ """
+ 查询TMDB演员参演作品
+ """
+ return WebAction().api_action(cmd='person_medias', data=self.parser.parse_args())
+
+
+@media.route('/subtitle/download')
+class MediaSubtitleDownload(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('path', type=str, help='文件路径(含文件名)', location='form', required=True)
+ parser.add_argument('name', type=str, help='名称(用于识别)', location='form', required=True)
+
+ @media.doc(parser=parser)
+ def post(self):
+ """
+ 下载单个文件字幕
+ """
+ return WebAction().api_action(cmd='download_subtitle', data=self.parser.parse_args())
+
+
+@brushtask.route('/update')
+class BrushTaskUpdate(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('brushtask_id', type=str, help='刷流任务ID', location='form')
+ parser.add_argument('brushtask_name', type=str, help='任务名称', location='form', required=True)
+ parser.add_argument('brushtask_site', type=int, help='站点', location='form', required=True)
+ parser.add_argument('brushtask_interval', type=int, help='刷新间隔(分钟)', location='form', required=True)
+ parser.add_argument('brushtask_downloader', type=int, help='下载器', location='form', required=True)
+ parser.add_argument('brushtask_totalsize', type=int, help='保种体积(GB)', location='form', required=True)
+ parser.add_argument('brushtask_state', type=str, help='状态(Y/N)', location='form', required=True)
+ parser.add_argument('brushtask_transfer', type=str, help='转移到媒体库(Y/N)', location='form')
+ parser.add_argument('brushtask_sendmessage', type=str, help='消息推送(Y/N)', location='form')
+ parser.add_argument('brushtask_forceupload', type=str, help='强制做种(Y/N)', location='form')
+ parser.add_argument('brushtask_free', type=str, help='促销(FREE/2XFREE)', location='form')
+ parser.add_argument('brushtask_hr', type=str, help='Hit&Run(HR)', location='form')
+ parser.add_argument('brushtask_torrent_size', type=int, help='种子大小(GB)', location='form')
+ parser.add_argument('brushtask_include', type=str, help='包含', location='form')
+ parser.add_argument('brushtask_exclude', type=str, help='排除', location='form')
+ parser.add_argument('brushtask_dlcount', type=int, help='同时下载任务数', location='form')
+ parser.add_argument('brushtask_peercount', type=int, help='做种人数限制', location='form')
+ parser.add_argument('brushtask_seedtime', type=float, help='做种时间(小时)', location='form')
+ parser.add_argument('brushtask_seedratio', type=float, help='分享率', location='form')
+ parser.add_argument('brushtask_seedsize', type=int, help='上传量(GB)', location='form')
+ parser.add_argument('brushtask_dltime', type=float, help='下载耗时(小时)', location='form')
+ parser.add_argument('brushtask_avg_upspeed', type=int, help='平均上传速度(KB/S)', location='form')
+ parser.add_argument('brushtask_iatime', type=float, help='未活动时间(小时)', location='form')
+ parser.add_argument('brushtask_pubdate', type=int, help='发布时间(小时)', location='form')
+ parser.add_argument('brushtask_upspeed', type=int, help='上传限速(KB/S)', location='form')
+ parser.add_argument('brushtask_downspeed', type=int, help='下载限速(KB/S)', location='form')
+
+ @brushtask.doc(parser=parser)
+ def post(self):
+ """
+ 新增/修改刷流任务
+ """
+ return WebAction().api_action(cmd='add_brushtask', data=self.parser.parse_args())
+
+
+@brushtask.route('/delete')
+class BrushTaskDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=str, help='刷流任务ID', location='form', required=True)
+
+ @brushtask.doc(parser=parser)
+ def post(self):
+ """
+ 删除刷流任务
+ """
+ return WebAction().api_action(cmd='del_brushtask', data=self.parser.parse_args())
+
+
+@brushtask.route('/info')
+class BrushTaskInfo(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=str, help='刷流任务ID', location='form', required=True)
+
+ @brushtask.doc(parser=parser)
+ def post(self):
+ """
+ 刷流任务详情
+ """
+ return WebAction().api_action(cmd='brushtask_detail', data=self.parser.parse_args())
+
+
+@brushtask.route('/list')
+class BrushTaskList(ClientResource):
+ @staticmethod
+ def post():
+ """
+ 查询所有刷流任务
+ """
+ return {
+ "code": 0,
+ "success": True,
+ "data": {
+ "tasks": BrushTask().get_brushtask_info()
+ }
+ }
+
+
+@brushtask.route('/torrents')
+class BrushTaskTorrents(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=str, help='刷流任务ID', location='form', required=True)
+
+ @brushtask.doc(parser=parser)
+ def post(self):
+ """
+ 查询刷流任务种子明细
+ """
+ return WebAction().api_action(cmd='list_brushtask_torrents', data=self.parser.parse_args())
+
+
+@brushtask.route('/downloader/update')
+class BrushTaskDownloaderUpdate(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('test', type=int, help='测试(0-否/1-是)', location='form', required=True)
+ parser.add_argument('id', type=int, help='下载器ID', location='form')
+ parser.add_argument('name', type=str, help='名称', location='form', required=True)
+ parser.add_argument('type', type=str, help='类型(qbittorrent/transmission)', location='form', required=True)
+ parser.add_argument('host', type=str, help='地址', location='form', required=True)
+ parser.add_argument('port', type=int, help='端口', location='form', required=True)
+ parser.add_argument('username', type=str, help='用户名', location='form')
+ parser.add_argument('password', type=str, help='密码', location='form')
+ parser.add_argument('save_dir', type=str, help='保存目录', location='form')
+
+ @brushtask.doc(parser=parser)
+ def post(self):
+ """
+ 新增/修改刷流下载器
+ """
+ return WebAction().api_action(cmd='add_downloader', data=self.parser.parse_args())
+
+
+@brushtask.route('/downloader/delete')
+class BrushTaskDownloaderDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='下载器ID', location='form', required=True)
+
+ @brushtask.doc(parser=parser)
+ def post(self):
+ """
+ 删除刷流下载器
+ """
+ return WebAction().api_action(cmd='delete_downloader', data=self.parser.parse_args())
+
+
+@brushtask.route('/downloader/info')
+class BrushTaskDownloaderInfo(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='下载器ID', location='form', required=True)
+
+ @brushtask.doc(parser=parser)
+ def post(self):
+ """
+ 刷流下载器详情
+ """
+ return WebAction().api_action(cmd='get_downloader', data=self.parser.parse_args())
+
+
+@brushtask.route('/downloader/list')
+class BrushTaskDownloaderList(ClientResource):
+ @staticmethod
+ def post():
+ """
+ 查询所有刷流下载器
+ """
+ return {
+ "code": 0,
+ "success": True,
+ "data": {
+ "downloaders": BrushTask().get_downloader_info()
+ }
+ }
+
+
+@brushtask.route('/run')
+class BrushTaskRun(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='刷流任务ID', location='form', required=True)
+
+ @brushtask.doc(parser=parser)
+ def post(self):
+ """
+ 刷流下载器详情
+ """
+ return WebAction().api_action(cmd='run_brushtask', data=self.parser.parse_args())
+
+
+@filterrule.route('/list')
+class FilterRuleList(ClientResource):
+ @staticmethod
+ def post():
+ """
+ 查询所有过滤规则
+ """
+ return WebAction().api_action(cmd='get_filterrules')
+
+
+@filterrule.route('/group/add')
+class FilterRuleGroupAdd(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('name', type=str, help='名称', location='form', required=True)
+ parser.add_argument('default', type=str, help='默认(Y/N)', location='form', required=True)
+
+ @filterrule.doc(parser=parser)
+ def post(self):
+ """
+ 新增规则组
+ """
+ return WebAction().api_action(cmd='add_filtergroup', data=self.parser.parse_args())
+
+
+@filterrule.route('/group/restore')
+class FilterRuleGroupRestore(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('groupids', type=list, help='规则组ID', location='form', required=True)
+ parser.add_argument('init_rulegroups', type=list, help='规则组脚本', location='form', required=True)
+
+ @filterrule.doc(parser=parser)
+ def post(self):
+ """
+ 恢复默认规则组
+ """
+ return WebAction().api_action(cmd='restore_filtergroup', data=self.parser.parse_args())
+
+
+@filterrule.route('/group/default')
+class FilterRuleGroupDefault(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=str, help='规则组ID', location='form', required=True)
+
+ @filterrule.doc(parser=parser)
+ def post(self):
+ """
+ 设置默认规则组
+ """
+ return WebAction().api_action(cmd='set_default_filtergroup', data=self.parser.parse_args())
+
+
+@filterrule.route('/group/delete')
+class FilterRuleGroupDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=str, help='规则组ID', location='form', required=True)
+
+ @filterrule.doc(parser=parser)
+ def post(self):
+ """
+ 删除规则组
+ """
+ return WebAction().api_action(cmd='del_filtergroup', data=self.parser.parse_args())
+
+
+@filterrule.route('/rule/update')
+class FilterRuleUpdate(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('rule_id', type=int, help='规则ID', location='form')
+ parser.add_argument('group_id', type=int, help='规则组ID', location='form', required=True)
+ parser.add_argument('rule_name', type=str, help='规则名称', location='form', required=True)
+ parser.add_argument('rule_pri', type=str, help='优先级', location='form', required=True)
+ parser.add_argument('rule_include', type=str, help='包含', location='form')
+ parser.add_argument('rule_exclude', type=str, help='排除', location='form')
+ parser.add_argument('rule_sizelimit', type=str, help='大小限制', location='form')
+ parser.add_argument('rule_free', type=str, help='促销(FREE/2XFREE)', location='form')
+
+ @filterrule.doc(parser=parser)
+ def post(self):
+ """
+ 新增/修改规则
+ """
+ return WebAction().api_action(cmd='add_filterrule', data=self.parser.parse_args())
+
+
+@filterrule.route('/rule/delete')
+class FilterRuleDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='规则ID', location='form', required=True)
+
+ @filterrule.doc(parser=parser)
+ def post(self):
+ """
+ 删除规则
+ """
+ return WebAction().api_action(cmd='del_filterrule', data=self.parser.parse_args())
+
+
+@filterrule.route('/rule/info')
+class FilterRuleInfo(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('ruleid', type=int, help='规则ID', location='form', required=True)
+ parser.add_argument('groupid', type=int, help='规则组ID', location='form', required=True)
+
+ @filterrule.doc(parser=parser)
+ def post(self):
+ """
+ 规则详情
+ """
+ return WebAction().api_action(cmd='filterrule_detail', data=self.parser.parse_args())
+
+
+@filterrule.route('/rule/share')
+class FilterRuleShare(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='规则组ID', location='form', required=True)
+
+ @filterrule.doc(parser=parser)
+ def post(self):
+ """
+ 分享规则组
+ """
+ return WebAction().api_action(cmd='share_filtergroup', data=self.parser.parse_args())
+
+
+@filterrule.route('/rule/import')
+class FilterRuleImport(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('content', type=str, help='规则内容', location='form', required=True)
+
+ @filterrule.doc(parser=parser)
+ def post(self):
+ """
+ 导入规则组
+ """
+ return WebAction().api_action(cmd='import_filtergroup', data=self.parser.parse_args())
+
+
+@words.route('/group/add')
+class WordsGroupAdd(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('tmdb_id', type=str, help='TMDBID', location='form', required=True)
+ parser.add_argument('tmdb_type', type=str, help='类型(movie/tv)', location='form', required=True)
+
+ @words.doc(parser=parser)
+ def post(self):
+ """
+ 新增识别词组
+ """
+ return WebAction().api_action(cmd='add_custom_word_group', data=self.parser.parse_args())
+
+
+@words.route('/group/delete')
+class WordsGroupDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('gid', type=int, help='识别词组ID', location='form', required=True)
+
+ @words.doc(parser=parser)
+ def post(self):
+ """
+ 删除识别词组
+ """
+ return WebAction().api_action(cmd='delete_custom_word_group', data=self.parser.parse_args())
+
+
+@words.route('/item/update')
+class WordItemUpdate(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='识别词ID', location='form', required=True)
+ parser.add_argument('gid', type=int, help='识别词组ID', location='form', required=True)
+ parser.add_argument('group_type', type=str, help='媒体类型(1-电影/2-电视剧)', location='form', required=True)
+ parser.add_argument('new_replaced', type=str, help='被替换词', location='form')
+ parser.add_argument('new_replace', type=str, help='替换词', location='form')
+ parser.add_argument('new_front', type=str, help='前定位词', location='form')
+ parser.add_argument('new_back', type=str, help='后定位词', location='form')
+ parser.add_argument('new_offset', type=str, help='偏移集数', location='form')
+ parser.add_argument('new_help', type=str, help='备注', location='form')
+ parser.add_argument('type', type=str, help='识别词类型(1-屏蔽/2-替换/3-替换+集偏移/4-集偏移)', location='form',
+ required=True)
+ parser.add_argument('season', type=str, help='季', location='form')
+ parser.add_argument('enabled', type=str, help='状态(1-启用/0-停用)', location='form', required=True)
+ parser.add_argument('regex', type=str, help='正则表达式(1-使用/0-不使用)', location='form')
+
+ @words.doc(parser=parser)
+ def post(self):
+ """
+ 新增/修改识别词
+ """
+ return WebAction().api_action(cmd='add_or_edit_custom_word', data=self.parser.parse_args())
+
+
+@words.route('/item/info')
+class WordItemInfo(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('wid', type=int, help='识别词ID', location='form', required=True)
+
+ @words.doc(parser=parser)
+ def post(self):
+ """
+ 识别词详情
+ """
+ return WebAction().api_action(cmd='get_custom_word', data=self.parser.parse_args())
+
+
+@words.route('/item/delete')
+class WordItemDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='识别词ID', location='form', required=True)
+
+ @words.doc(parser=parser)
+ def post(self):
+ """
+ 删除识别词
+ """
+ return WebAction().api_action(cmd='delete_custom_word', data=self.parser.parse_args())
+
+
+@words.route('/item/status')
+class WordItemStatus(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('ids_info', type=list, help='识别词IDS', location='form', required=True)
+ parser.add_argument('flag', type=int, help='状态(1/0)', location='form', required=True)
+
+ @words.doc(parser=parser)
+ def post(self):
+ """
+ 设置识别词状态
+ """
+ return WebAction().api_action(cmd='check_custom_words', data=self.parser.parse_args())
+
+
+@words.route('/item/export')
+class WordItemExport(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('note', type=str, help='备注', location='form', required=True)
+ parser.add_argument('ids_info', type=str, help='识别词IDS(@_)', location='form', required=True)
+
+ @words.doc(parser=parser)
+ def post(self):
+ """
+ 导出识别词
+ """
+ return WebAction().api_action(cmd='export_custom_words', data=self.parser.parse_args())
+
+
+@words.route('/item/analyse')
+class WordItemAnalyse(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('import_code', type=str, help='识别词代码', location='form', required=True)
+
+ @words.doc(parser=parser)
+ def post(self):
+ """
+ 分析识别词
+ """
+ return WebAction().api_action(cmd='analyse_import_custom_words_code', data=self.parser.parse_args())
+
+
+@words.route('/item/import')
+class WordItemImport(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('import_code', type=str, help='识别词代码', location='form', required=True)
+ parser.add_argument('ids_info', type=list, help='识别词IDS', location='form', required=True)
+
+ @words.doc(parser=parser)
+ def post(self):
+ """
+ 导入识别词
+ """
+ return WebAction().api_action(cmd='import_custom_words', data=self.parser.parse_args())
+
+
+@words.route('/list')
+class WordList(ClientResource):
+ @staticmethod
+ def post():
+ """
+ 查询所有自定义识别词
+ """
+ return WebAction().api_action(cmd='get_customwords')
+
+
+@sync.route('/directory/update')
+class SyncDirectoryUpdate(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('sid', type=int, help='同步目录ID', location='form')
+ parser.add_argument('from', type=str, help='源目录', location='form', required=True)
+ parser.add_argument('to', type=str, help='目的目录', location='form')
+ parser.add_argument('unknown', type=str, help='未知目录', location='form')
+ parser.add_argument('syncmod', type=str, help='同步模式', location='form')
+ parser.add_argument('rename', type=str, help='重命名', location='form')
+ parser.add_argument('enabled', type=str, help='开启', location='form')
+
+ @sync.doc(parser=parser)
+ def post(self):
+ """
+ 新增/修改同步目录
+ """
+ return WebAction().api_action(cmd='add_or_edit_sync_path', data=self.parser.parse_args())
+
+
+@sync.route('/directory/info')
+class SyncDirectoryInfo(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('sid', type=int, help='同步目录ID', location='form', required=True)
+
+ @sync.doc(parser=parser)
+ def post(self):
+ """
+ 同步目录详情
+ """
+ return WebAction().api_action(cmd='get_sync_path', data=self.parser.parse_args())
+
+
+@sync.route('/directory/delete')
+class SyncDirectoryDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('sid', type=int, help='同步目录ID', location='form', required=True)
+
+ @sync.doc(parser=parser)
+ def post(self):
+ """
+ 删除同步目录
+ """
+ return WebAction().api_action(cmd='delete_sync_path', data=self.parser.parse_args())
+
+
+@sync.route('/directory/status')
+class SyncDirectoryStatus(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('sid', type=int, help='同步目录ID', location='form', required=True)
+ parser.add_argument('flag', type=str, help='操作(rename/enable)', location='form', required=True)
+ parser.add_argument('checked', type=int, help='状态(0-否/1-是)', location='form', required=True)
+
+ @sync.doc(parser=parser)
+ def post(self):
+ """
+ 设置同步目录状态
+ """
+ return WebAction().api_action(cmd='check_sync_path', data=self.parser.parse_args())
+
+
+@sync.route('/directory/list')
+class SyncDirectoryList(ClientResource):
+ @staticmethod
+ def post():
+ """
+ 查询所有同步目录
+ """
+ return WebAction().api_action(cmd='get_directorysync')
+
+
+@sync.route('/directory/run')
+class SyncDirectoryRun(ApiResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('sid', type=int, help='同步目录ID', location='args', required=True)
+
+ @sync.doc(parser=parser)
+ def get(self):
+ """
+ 立即运行单个目录同步服务(密钥认证)
+ """
+ return WebAction().api_action(cmd='run_directory_sync', data=self.parser.parse_args())
+
+
+@sync.route('/run')
+class SyncRun(ApiResource):
+
+ @staticmethod
+ def get():
+ """
+ 立即运行所有目录同步服务(密钥认证)
+ """
+ return WebAction().api_action(cmd='sch', data={"item": "sync"})
+
+
+@message.route('/client/update')
+class MessageClientUpdate(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('cid', type=int, help='ID', location='form')
+ parser.add_argument('name', type=str, help='名称', location='form', required=True)
+ parser.add_argument('type', type=str, help='类型(wechat/telegram/serverchan/bark/pushplus/iyuu/slack/gotify)',
+ location='form', required=True)
+ parser.add_argument('config', type=str, help='配置项(JSON)', location='form', required=True)
+ parser.add_argument('switchs', type=list, help='开关', location='form', required=True)
+ parser.add_argument('interactive', type=int, help='是否开启交互(0/1)', location='form', required=True)
+ parser.add_argument('enabled', type=int, help='是否启用(0/1)', location='form', required=True)
+
+ @message.doc(parser=parser)
+ def post(self):
+ """
+ 新增/修改通知消息服务渠道
+ """
+ return WebAction().api_action(cmd='update_message_client', data=self.parser.parse_args())
+
+
+@message.route('/client/delete')
+class MessageClientDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('cid', type=int, help='ID', location='form', required=True)
+
+ @message.doc(parser=parser)
+ def post(self):
+ """
+ 删除通知消息服务渠道
+ """
+ return WebAction().api_action(cmd='delete_message_client', data=self.parser.parse_args())
+
+
+@message.route('/client/status')
+class MessageClientStatus(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('flag', type=str, help='操作类型(interactive/enable)', location='form', required=True)
+ parser.add_argument('cid', type=int, help='ID', location='form', required=True)
+
+ @message.doc(parser=parser)
+ def post(self):
+ """
+ 设置通知消息服务渠道状态
+ """
+ return WebAction().api_action(cmd='check_message_client', data=self.parser.parse_args())
+
+
+@message.route('/client/info')
+class MessageClientInfo(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('cid', type=int, help='ID', location='form', required=True)
+
+ @message.doc(parser=parser)
+ def post(self):
+ """
+ 查询通知消息服务渠道设置
+ """
+ return WebAction().api_action(cmd='get_message_client', data=self.parser.parse_args())
+
+
+@message.route('/client/test')
+class MessageClientTest(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('type', type=str, help='类型(wechat/telegram/serverchan/bark/pushplus/iyuu/slack/gotify)',
+ location='form', required=True)
+ parser.add_argument('config', type=str, help='配置(JSON)', location='form', required=True)
+
+ @message.doc(parser=parser)
+ def post(self):
+ """
+ 测试通知消息服务配置正确性
+ """
+ return WebAction().api_action(cmd='test_message_client', data=self.parser.parse_args())
+
+
+@torrentremover.route('/task/info')
+class TorrentRemoverTaskInfo(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('tid', type=int, help='任务ID', location='form', required=True)
+
+ @torrentremover.doc(parser=parser)
+ def post(self):
+ """
+ 查询自动删种任务详情
+ """
+ return WebAction().api_action(cmd='get_torrent_remove_task', data=self.parser.parse_args())
+
+
+@torrentremover.route('/task/list')
+class TorrentRemoverTaskList(ClientResource):
+ @staticmethod
+ @torrentremover.doc()
+ def post():
+ """
+ 查询所有自动删种任务
+ """
+ return WebAction().api_action(cmd='get_torrent_remove_task')
+
+
+@torrentremover.route('/task/delete')
+class TorrentRemoverTaskDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('tid', type=int, help='任务ID', location='form', required=True)
+
+ @torrentremover.doc(parser=parser)
+ def post(self):
+ """
+ 删除自动删种任务
+ """
+ return WebAction().api_action(cmd='delete_torrent_remove_task', data=self.parser.parse_args())
+
+
+@torrentremover.route('/task/update')
+class TorrentRemoverTaskUpdate(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('tid', type=int, help='任务ID', location='form')
+ parser.add_argument('name', type=str, help='名称', location='form', required=True)
+ parser.add_argument('action', type=int, help='动作(1-暂停/2-删除种子/3-删除种子及文件)', location='form',
+ required=True)
+ parser.add_argument('interval', type=int, help='运行间隔(分钟)', location='form', required=True)
+ parser.add_argument('enabled', type=int, help='状态(0-停用/1-启用)', location='form', required=True)
+ parser.add_argument('samedata', type=int, help='处理辅种(0-否/1-是)', location='form', required=True)
+ parser.add_argument('onlynastool', type=int, help='只管理NASTool添加的下载(0-否/1-是)', location='form',
+ required=True)
+ parser.add_argument('ratio', type=float, help='分享率', location='form')
+ parser.add_argument('seeding_time', type=int, help='做种时间(小时)', location='form')
+ parser.add_argument('upload_avs', type=int, help='平均上传速度(KB/S)', location='form')
+ parser.add_argument('size', type=str, help='种子大小(GB)', location='form')
+ parser.add_argument('savepath_key', type=str, help='保存路径关键词', location='form')
+ parser.add_argument('tracker_key', type=str, help='tracker关键词', location='form')
+ parser.add_argument('downloader', type=str, help='下载器(Qb/Tr)', location='form')
+ parser.add_argument('qb_state', type=str, help='Qb种子状态(多个用;分隔)', location='form')
+ parser.add_argument('qb_category', type=str, help='Qb分类(多个用;分隔)', location='form')
+ parser.add_argument('tr_state', type=str, help='Tr种子状态(多个用;分隔)', location='form')
+ parser.add_argument('tr_error_key', type=str, help='Tr错误信息关键词', location='form')
+
+ @torrentremover.doc(parser=parser)
+ def post(self):
+ """
+ 新增/修改自动删种任务
+ """
+ return WebAction().api_action(cmd='update_torrent_remove_task', data=self.parser.parse_args())
+
+
+@douban.route('/history/list')
+class DoubanHistoryList(ClientResource):
+
+ @staticmethod
+ def post():
+ """
+ 查询豆瓣同步历史记录
+ """
+ return WebAction().api_action(cmd='get_douban_history')
+
+
+@douban.route('/history/delete')
+class DoubanHistoryDelete(ClientResource):
+ parser = reqparse.RequestParser()
+ parser.add_argument('id', type=int, help='ID', location='form', required=True)
+
+ @douban.doc(parser=parser)
+ def post(self):
+ """
+ 删除豆瓣同步历史记录
+ """
+ return WebAction().api_action(cmd='delete_douban_history', data=self.parser.parse_args())
+
+
+@douban.route('/run')
+class DoubanRun(ClientResource):
+ @staticmethod
+ def post():
+ """
+ 立即同步豆瓣数据
+ """
+ # 返回站点信息
+ return WebAction().api_action(cmd='sch', data={"item": "douban"})
diff --git a/web/backend/WXBizMsgCrypt3.py b/web/backend/WXBizMsgCrypt3.py
new file mode 100644
index 0000000..ce10d0c
--- /dev/null
+++ b/web/backend/WXBizMsgCrypt3.py
@@ -0,0 +1,300 @@
+#!/usr/bin/env python
+# -*- encoding:utf-8 -*-
+
+""" 对企业微信发送给企业后台的消息加解密示例代码.
+@copyright: Copyright (c) 1998-2014 Tencent Inc.
+
+"""
+import base64
+import hashlib
+# ------------------------------------------------------------------------
+import logging
+import random
+import socket
+import struct
+import time
+import xml.etree.cElementTree as ET
+
+from Crypto.Cipher import AES
+
+# Description:定义错误码含义
+#########################################################################
+WXBizMsgCrypt_OK = 0
+WXBizMsgCrypt_ValidateSignature_Error = -40001
+WXBizMsgCrypt_ParseXml_Error = -40002
+WXBizMsgCrypt_ComputeSignature_Error = -40003
+WXBizMsgCrypt_IllegalAesKey = -40004
+WXBizMsgCrypt_ValidateCorpid_Error = -40005
+WXBizMsgCrypt_EncryptAES_Error = -40006
+WXBizMsgCrypt_DecryptAES_Error = -40007
+WXBizMsgCrypt_IllegalBuffer = -40008
+WXBizMsgCrypt_EncodeBase64_Error = -40009
+WXBizMsgCrypt_DecodeBase64_Error = -40010
+WXBizMsgCrypt_GenReturnXml_Error = -40011
+
+"""
+关于Crypto.Cipher模块,ImportError: No module named 'Crypto'解决方案
+请到官方网站 https://www.dlitz.net/software/pycrypto/ 下载pycrypto。
+下载后,按照README中的“Installation”小节的提示进行pycrypto安装。
+"""
+
+
+class FormatException(Exception):
+ pass
+
+
+def throw_exception(message, exception_class=FormatException):
+ """my define raise exception function"""
+ raise exception_class(message)
+
+
+class SHA1:
+ """计算企业微信的消息签名接口"""
+
+ @staticmethod
+ def getSHA1(token, timestamp, nonce, encrypt):
+ """用SHA1算法生成安全签名
+ @param token: 票据
+ @param timestamp: 时间戳
+ @param encrypt: 密文
+ @param nonce: 随机字符串
+ @return: 安全签名
+ """
+ try:
+ sortlist = [token, timestamp, nonce, encrypt]
+ sortlist.sort()
+ sha = hashlib.sha1()
+ sha.update("".join(sortlist).encode())
+ return WXBizMsgCrypt_OK, sha.hexdigest()
+ except Exception as e:
+ logger = logging.getLogger()
+ logger.error(e)
+ return WXBizMsgCrypt_ComputeSignature_Error, None
+
+
+class XMLParse:
+ """提供提取消息格式中的密文及生成回复消息格式的接口"""
+
+ # xml消息模板
+ AES_TEXT_RESPONSE_TEMPLATE = """
微信回调配置步聚:
1、在微信企业应用接收消息设置页面生成Token和EncodingAESKey并填入设置->消息通知->微信对应项,打开微信交互开关。
2、保存并重启本工具,保存并重启本工具,保存并重启本工具。
3、在微信企业应用接收消息设置页面输入此地址:http(s)://IP:PORT/wechat(IP、PORT替换为本工具的外网访问地址及端口,需要有公网IP并做好端口转发,最好有域名)。"
+ sVerifyEchoStr = request.args.get("echostr")
+ log.debug("收到微信验证请求: echostr= %s" % sVerifyEchoStr)
+ ret, sEchoStr = wxcpt.VerifyURL(sVerifyMsgSig, sVerifyTimeStamp, sVerifyNonce, sVerifyEchoStr)
+ if ret != 0:
+ log.error("微信请求验证失败 VerifyURL ret: %s" % str(ret))
+ # 验证URL成功,将sEchoStr返回给企业号
+ return sEchoStr
+ else:
+ try:
+ sReqData = request.data
+ log.debug("收到微信消息:%s" % str(sReqData))
+ ret, sMsg = wxcpt.DecryptMsg(sReqData, sVerifyMsgSig, sVerifyTimeStamp, sVerifyNonce)
+ if ret != 0:
+ log.error("解密微信消息失败 DecryptMsg ret = %s" % str(ret))
+ return make_response("ok", 200)
+ # 解析XML报文
+ """
+ 1、消息格式:
+
+ ${this.person_name}
+
+
+
+ ${this.media_info.overview ?? this._render_placeholder("200px", "", "col-12", 7)}
+
+
+ ${Object.keys(item)[0]}
+
+ ${this.page_title}
+ =1&&t.renderer.isScrollableBy(e.wheelX*e.speed,0)&&(f=!0),a<=1&&t.renderer.isScrollableBy(0,e.wheelY*e.speed)&&(f=!0);if(f)n.allowed=r;else if(r-n.allowedt.session.documentToScreenRow(l.row,l.column))return c()}if(f==s)return;f=s.text.join("
"),i.setHtml(f);var p=s.className;p&&i.setClassName(p.trim()),i.show(),t._signal("showGutterTooltip",i),t.on("mousewheel",c);if(e.$tooltipFollowsMouse)h(u);else{var d=u.domEvent.target,v=d.getBoundingClientRect(),m=i.getElement().style;m.left=v.right+"px",m.top=v.bottom+"px"}}function c(){o&&(o=clearTimeout(o)),f&&(i.hide(),f=null,t._signal("hideGutterTooltip",i),t.off("mousewheel",c))}function h(e){i.setPosition(e.x,e.y)}var t=e.editor,n=t.renderer.$gutterLayer,i=new a(t.container);e.editor.setDefaultHandler("guttermousedown",function(r){if(!t.isFocused()||r.getButton()!=0)return;var i=n.getRegion(r);if(i=="foldWidgets")return;var s=r.getDocumentPosition().row,o=t.session.selection;if(r.getShiftKey())o.selectTo(s,0);else{if(r.domEvent.detail==2)return t.selectAll(),r.preventDefault();e.$clickSelection=t.selection.getLineRange(s)}return e.setState("selectByLines"),e.captureMouse(r),r.preventDefault()});var o,u,f;e.editor.setDefaultHandler("guttermousemove",function(t){var n=t.domEvent.target||t.domEvent.srcElement;if(r.hasCssClass(n,"ace_fold-widget"))return c();f&&e.$tooltipFollowsMouse&&h(t),u=t;if(o)return;o=setTimeout(function(){o=null,u&&!e.isMousePressed?l():c()},50)}),s.addListener(t.renderer.$gutter,"mouseout",function(e){u=null;if(!f||o)return;o=setTimeout(function(){o=null,c()},50)},t),t.on("changeSession",c)}function a(e){o.call(this,e)}var r=e("../lib/dom"),i=e("../lib/oop"),s=e("../lib/event"),o=e("../tooltip").Tooltip;i.inherits(a,o),function(){this.setPosition=function(e,t){var n=window.innerWidth||document.documentElement.clientWidth,r=window.innerHeight||document.documentElement.clientHeight,i=this.getWidth(),s=this.getHeight();e+=15,t+=15,e+i>n&&(e-=e+i-n),t+s>r&&(t-=20+s),o.prototype.setPosition.call(this,e,t)}}.call(a.prototype),t.GutterHandler=u}),ace.define("ace/mouse/mouse_event",["require","exports","module","ace/lib/event","ace/lib/useragent"],function(e,t,n){"use strict";var r=e("../lib/event"),i=e("../lib/useragent"),s=t.MouseEvent=function(e,t){this.domEvent=e,this.editor=t,this.x=this.clientX=e.clientX,this.y=this.clientY=e.clientY,this.$pos=null,this.$inSelection=null,this.propagationStopped=!1,this.defaultPrevented=!1};(function(){this.stopPropagation=function(){r.stopPropagation(this.domEvent),this.propagationStopped=!0},this.preventDefault=function(){r.preventDefault(this.domEvent),this.defaultPrevented=!0},this.stop=function(){this.stopPropagation(),this.preventDefault()},this.getDocumentPosition=function(){return this.$pos?this.$pos:(this.$pos=this.editor.renderer.screenToTextCoordinates(this.clientX,this.clientY),this.$pos)},this.inSelection=function(){if(this.$inSelection!==null)return this.$inSelection;var e=this.editor,t=e.getSelectionRange();if(t.isEmpty())this.$inSelection=!1;else{var n=this.getDocumentPosition();this.$inSelection=t.contains(n.row,n.column)}return this.$inSelection},this.getButton=function(){return r.getButton(this.domEvent)},this.getShiftKey=function(){return this.domEvent.shiftKey},this.getAccelKey=i.isMac?function(){return this.domEvent.metaKey}:function(){return this.domEvent.ctrlKey}}).call(s.prototype)}),ace.define("ace/mouse/dragdrop_handler",["require","exports","module","ace/lib/dom","ace/lib/event","ace/lib/useragent"],function(e,t,n){"use strict";function f(e){function T(e,n){var r=Date.now(),i=!n||e.row!=n.row,s=!n||e.column!=n.column;if(!S||i||s)t.moveCursorToPosition(e),S=r,x={x:p,y:d};else{var o=l(x.x,x.y,p,d);o>a?S=null:r-S>=u&&(t.renderer.scrollCursorIntoView(),S=null)}}function N(e,n){var r=Date.now(),i=t.renderer.layerConfig.lineHeight,s=t.renderer.layerConfig.characterWidth,u=t.renderer.scroller.getBoundingClientRect(),a={x:{left:p-u.left,right:u.right-p},y:{top:d-u.top,bottom:u.bottom-d}},f=Math.min(a.x.left,a.x.right),l=Math.min(a.y.top,a.y.bottom),c={row:e.row,column:e.column};f/s<=2&&(c.column+=a.x.left0&&/^\s*$/.test(r));t=r.length,/\s+$/.test(r)||(r="")}var s=i.stringReverse(r),o=this.$shortWordEndIndex(s);return this.moveCursorTo(e,t-o)},this.moveCursorWordRight=function(){this.session.$selectLongWords?this.moveCursorLongWordRight():this.moveCursorShortWordRight()},this.moveCursorWordLeft=function(){this.session.$selectLongWords?this.moveCursorLongWordLeft():this.moveCursorShortWordLeft()},this.moveCursorBy=function(e,t){var n=this.session.documentToScreenPosition(this.lead.row,this.lead.column),r;t===0&&(e!==0&&(this.session.$bidiHandler.isBidiRow(n.row,this.lead.row)?(r=this.session.$bidiHandler.getPosLeft(n.column),n.column=Math.round(r/this.session.$bidiHandler.charWidths[0])):r=n.column*this.session.$bidiHandler.charWidths[0]),this.$desiredColumn?n.column=this.$desiredColumn:this.$desiredColumn=n.column);if(e!=0&&this.session.lineWidgets&&this.session.lineWidgets[this.lead.row]){var i=this.session.lineWidgets[this.lead.row];e<0?e-=i.rowsAbove||0:e>0&&(e+=i.rowCount-(i.rowsAbove||0))}var s=this.session.screenToDocumentPosition(n.row+e,n.column,r);e!==0&&t===0&&s.row===this.lead.row&&s.column===this.lead.column,this.moveCursorTo(s.row,s.column+t,t===0)},this.moveCursorToPosition=function(e){this.moveCursorTo(e.row,e.column)},this.moveCursorTo=function(e,t,n){var r=this.session.getFoldAt(e,t,1);r&&(e=r.start.row,t=r.start.column),this.$keepDesiredColumnOnChange=!0;var i=this.session.getLine(e);/[\uDC00-\uDFFF]/.test(i.charAt(t))&&i.charAt(t-1)&&(this.lead.row==e&&this.lead.column==t+1?t-=1:t+=1),this.lead.setPosition(e,t),this.$keepDesiredColumnOnChange=!1,n||(this.$desiredColumn=null)},this.moveCursorToScreen=function(e,t,n){var r=this.session.screenToDocumentPosition(e,t);this.moveCursorTo(r.row,r.column,n)},this.detach=function(){this.lead.detach(),this.anchor.detach()},this.fromOrientedRange=function(e){this.setSelectionRange(e,e.cursor==e.start),this.$desiredColumn=e.desiredColumn||this.$desiredColumn},this.toOrientedRange=function(e){var t=this.getRange();return e?(e.start.column=t.start.column,e.start.row=t.start.row,e.end.column=t.end.column,e.end.row=t.end.row):e=t,e.cursor=this.isBackwards()?e.start:e.end,e.desiredColumn=this.$desiredColumn,e},this.getRangeOfMovements=function(e){var t=this.getCursor();try{e(this);var n=this.getCursor();return o.fromPoints(t,n)}catch(r){return o.fromPoints(t,t)}finally{this.moveCursorToPosition(t)}},this.toJSON=function(){if(this.rangeCount)var e=this.ranges.map(function(e){var t=e.clone();return t.isBackwards=e.cursor==e.start,t});else{var e=this.getRange();e.isBackwards=this.isBackwards()}return e},this.fromJSON=function(e){if(e.start==undefined){if(this.rangeList&&e.length>1){this.toSingleRange(e[0]);for(var t=e.length;t--;){var n=o.fromPoints(e[t].start,e[t].end);e[t].isBackwards&&(n.cursor=n.start),this.addRange(n,!0)}return}e=e[0]}this.rangeList&&this.toSingleRange(e),this.setSelectionRange(e,e.isBackwards)},this.isEqual=function(e){if((e.length||this.rangeCount)&&e.length!=this.rangeCount)return!1;if(!e.length||!this.ranges)return this.getRange().isEqual(e);for(var t=this.ranges.length;t--;)if(!this.ranges[t].isEqual(e[t]))return!1;return!0}}).call(u.prototype),t.Selection=u}),ace.define("ace/tokenizer",["require","exports","module","ace/config"],function(e,t,n){"use strict";var r=e("./config"),i=2e3,s=function(e){this.states=e,this.regExps={},this.matchMappings={};for(var t in this.states){var n=this.states[t],r=[],i=0,s=this.matchMappings[t]={defaultToken:"text"},o="g",u=[];for(var a=0;al){var g=e.substring(l,m-v.length);h.type==p?h.value+=g:(h.type&&f.push(h),h={type:p,value:g})}for(var y=0;y=t)break}return r=n[s],r?(r.index=s,r.start=i-r.value.length,r):null},this.setUndoManager=function(e){this.$undoManager=e,this.$informUndoManager&&this.$informUndoManager.cancel();if(e){var t=this;e.addSession(this),this.$syncInformUndoManager=function(){t.$informUndoManager.cancel(),t.mergeUndoDeltas=!1},this.$informUndoManager=i.delayedCall(this.$syncInformUndoManager)}else this.$syncInformUndoManager=function(){}},this.markUndoGroup=function(){this.$syncInformUndoManager&&this.$syncInformUndoManager()},this.$defaultUndoManager={undo:function(){},redo:function(){},hasUndo:function(){},hasRedo:function(){},reset:function(){},add:function(){},addSelection:function(){},startNewGroup:function(){},addSession:function(){}},this.getUndoManager=function(){return this.$undoManager||this.$defaultUndoManager},this.getTabString=function(){return this.getUseSoftTabs()?i.stringRepeat(" ",this.getTabSize()):" "},this.setUseSoftTabs=function(e){this.setOption("useSoftTabs",e)},this.getUseSoftTabs=function(){return this.$useSoftTabs&&!this.$mode.$indentWithTabs},this.setTabSize=function(e){this.setOption("tabSize",e)},this.getTabSize=function(){return this.$tabSize},this.isTabStop=function(e){return this.$useSoftTabs&&e.column%this.$tabSize===0},this.setNavigateWithinSoftTabs=function(e){this.setOption("navigateWithinSoftTabs",e)},this.getNavigateWithinSoftTabs=function(){return this.$navigateWithinSoftTabs},this.$overwrite=!1,this.setOverwrite=function(e){this.setOption("overwrite",e)},this.getOverwrite=function(){return this.$overwrite},this.toggleOverwrite=function(){this.setOverwrite(!this.$overwrite)},this.addGutterDecoration=function(e,t){this.$decorations[e]||(this.$decorations[e]=""),this.$decorations[e]+=" "+t,this._signal("changeBreakpoint",{})},this.removeGutterDecoration=function(e,t){this.$decorations[e]=(this.$decorations[e]||"").replace(" "+t,""),this._signal("changeBreakpoint",{})},this.getBreakpoints=function(){return this.$breakpoints},this.setBreakpoints=function(e){this.$breakpoints=[];for(var t=0;t=1-this.scrollMargin.top)return!0;if(t>0&&this.session.getScrollTop()+this.$size.scrollerHeight-this.layerConfig.maxHeight<-1+this.scrollMargin.bottom)return!0;if(e<0&&this.session.getScrollLeft()>=1-this.scrollMargin.left)return!0;if(e>0&&this.session.getScrollLeft()+this.$size.scrollerWidth-this.layerConfig.width<-1+this.scrollMargin.right)return!0},this.pixelToScreenCoordinates=function(e,t){var n;if(this.$hasCssTransforms){n={top:0,left:0};var r=this.$fontMetrics.transformCoordinates([e,t]);e=r[1]-this.gutterWidth-this.margin.left,t=r[0]}else n=this.scroller.getBoundingClientRect();var i=e+this.scrollLeft-n.left-this.$padding,s=i/this.characterWidth,o=Math.floor((t+this.scrollTop-n.top)/this.lineHeight),u=this.$blockCursor?Math.floor(s):Math.round(s);return{row:o,column:u,side:s-u>0?1:-1,offsetX:i}},this.screenToTextCoordinates=function(e,t){var n;if(this.$hasCssTransforms){n={top:0,left:0};var r=this.$fontMetrics.transformCoordinates([e,t]);e=r[1]-this.gutterWidth-this.margin.left,t=r[0]}else n=this.scroller.getBoundingClientRect();var i=e+this.scrollLeft-n.left-this.$padding,s=i/this.characterWidth,o=this.$blockCursor?Math.floor(s):Math.round(s),u=Math.floor((t+this.scrollTop-n.top)/this.lineHeight);return this.session.screenToDocumentPosition(u,Math.max(o,0),i)},this.textToScreenCoordinates=function(e,t){var n=this.scroller.getBoundingClientRect(),r=this.session.documentToScreenPosition(e,t),i=this.$padding+(this.session.$bidiHandler.isBidiRow(r.row,e)?this.session.$bidiHandler.getPosLeft(r.column):Math.round(r.column*this.characterWidth)),s=r.row*this.lineHeight;return{pageX:n.left+i-this.scrollLeft,pageY:n.top+s-this.scrollTop}},this.visualizeFocus=function(){i.addCssClass(this.container,"ace_focus")},this.visualizeBlur=function(){i.removeCssClass(this.container,"ace_focus")},this.showComposition=function(e){this.$composition=e,e.cssText||(e.cssText=this.textarea.style.cssText),e.useTextareaForIME==undefined&&(e.useTextareaForIME=this.$useTextareaForIME),this.$useTextareaForIME?(i.addCssClass(this.textarea,"ace_composition"),this.textarea.style.cssText="",this.$moveTextAreaToCursor(),this.$cursorLayer.element.style.display="none"):e.markerId=this.session.addMarker(e.markerRange,"ace_composition_marker","text")},this.setCompositionText=function(e){var t=this.session.selection.cursor;this.addToken(e,"composition_placeholder",t.row,t.column),this.$moveTextAreaToCursor()},this.hideComposition=function(){if(!this.$composition)return;this.$composition.markerId&&this.session.removeMarker(this.$composition.markerId),i.removeCssClass(this.textarea,"ace_composition"),this.textarea.style.cssText=this.$composition.cssText;var e=this.session.selection.cursor;this.removeExtraToken(e.row,e.column),this.$composition=null,this.$cursorLayer.element.style.display=""},this.setGhostText=function(e,t){var n=this.session.selection.cursor,r=t||{row:n.row,column:n.column};this.removeGhostText();var i=e.split("\n");this.addToken(i[0],"ghost_text",r.row,r.column),this.$ghostText={text:e,position:{row:r.row,column:r.column}},i.length>1&&(this.$ghostTextWidget={text:i.slice(1).join("\n"),row:r.row,column:r.column,className:"ace_ghost_text"},this.session.widgetManager.addLineWidget(this.$ghostTextWidget))},this.removeGhostText=function(){if(!this.$ghostText)return;var e=this.$ghostText.position;this.removeExtraToken(e.row,e.column),this.$ghostTextWidget&&(this.session.widgetManager.removeLineWidget(this.$ghostTextWidget),this.$ghostTextWidget=null),this.$ghostText=null},this.addToken=function(e,t,n,r){var i=this.session;i.bgTokenizer.lines[n]=null;var s={type:t,value:e},o=i.getTokens(n);if(r==null)o.push(s);else{var u=0;for(var a=0;a
"),p.appendChild(i.createElement("div"));var m=function(e,t,n){if(t===0&&(n==="esc"||n==="return"))return h.destroy(),{command:"null"}};h.destroy=function(){if(e.$mouseHandler.isMousePressed)return;e.keyBinding.removeKeyboardHandler(m),n.widgetManager.removeLineWidget(h),e.off("changeSelection",h.destroy),e.off("changeSession",h.destroy),e.off("mouseup",h.destroy),e.off("change",h.destroy)},e.keyBinding.addKeyboardHandler(m),e.on("changeSelection",h.destroy),e.on("changeSession",h.destroy),e.on("mouseup",h.destroy),e.on("change",h.destroy),e.session.widgetManager.addLineWidget(h),h.el.onmousedown=e.focus.bind(e),e.renderer.scrollCursorIntoView(null,.5,{bottom:h.el.offsetHeight})},i.importCssString("\n .error_widget_wrapper {\n background: inherit;\n color: inherit;\n border:none\n }\n .error_widget {\n border-top: solid 2px;\n border-bottom: solid 2px;\n margin: 5px 0;\n padding: 10px 40px;\n white-space: pre-wrap;\n }\n .error_widget.ace_error, .error_widget_arrow.ace_error{\n border-color: #ff5a5a\n }\n .error_widget.ace_warning, .error_widget_arrow.ace_warning{\n border-color: #F1D817\n }\n .error_widget.ace_info, .error_widget_arrow.ace_info{\n border-color: #5a5a5a\n }\n .error_widget.ace_ok, .error_widget_arrow.ace_ok{\n border-color: #5aaa5a\n }\n .error_widget_arrow {\n position: absolute;\n border: solid 5px;\n border-top-color: transparent!important;\n border-right-color: transparent!important;\n border-left-color: transparent!important;\n top: -5px;\n }\n","error_marker.css",!1)}),ace.define("ace/ace",["require","exports","module","ace/lib/dom","ace/lib/event","ace/range","ace/editor","ace/edit_session","ace/undomanager","ace/virtual_renderer","ace/worker/worker_client","ace/keyboard/hash_handler","ace/placeholder","ace/multi_select","ace/mode/folding/fold_mode","ace/theme/textmate","ace/ext/error_marker","ace/config","ace/loader_build"],function(e,t,n){"use strict";e("./loader_build")(t);var r=e("./lib/dom"),i=e("./lib/event"),s=e("./range").Range,o=e("./editor").Editor,u=e("./edit_session").EditSession,a=e("./undomanager").UndoManager,f=e("./virtual_renderer").VirtualRenderer;e("./worker/worker_client"),e("./keyboard/hash_handler"),e("./placeholder"),e("./multi_select"),e("./mode/folding/fold_mode"),e("./theme/textmate"),e("./ext/error_marker"),t.config=e("./config"),t.edit=function(e,n){if(typeof e=="string"){var s=e;e=document.getElementById(s);if(!e)throw new Error("ace.edit can't find div #"+s)}if(e&&e.env&&e.env.editor instanceof o)return e.env.editor;var u="";if(e&&/input|textarea/i.test(e.tagName)){var a=e;u=a.value,e=r.createElement("pre"),a.parentNode.replaceChild(e,a)}else e&&(u=e.textContent,e.innerHTML="");var l=t.createEditSession(u),c=new o(new f(e),l,n),h={document:l,editor:c,onResize:c.resize.bind(c,null)};return a&&(h.textarea=a),i.addListener(window,"resize",h.onResize),c.on("destroy",function(){i.removeListener(window,"resize",h.onResize),h.editor.container.env=null}),c.container.env=c.env=h,c},t.createEditSession=function(e,t){var n=new u(e,t);return n.setUndoManager(new a),n},t.Range=s,t.Editor=o,t.EditSession=u,t.UndoManager=a,t.VirtualRenderer=f,t.version=t.config.version}); (function() {
+ ace.require(["ace/ace"], function(a) {
+ if (a) {
+ a.config.init(true);
+ a.define = ace.define;
+ }
+ if (!window.ace)
+ window.ace = a;
+ for (var key in a) if (a.hasOwnProperty(key))
+ window.ace[key] = a[key];
+ window.ace["default"] = window.ace;
+ if (typeof module == "object" && typeof exports == "object" && module) {
+ module.exports = window.ace;
+ }
+ });
+ })();
+
\ No newline at end of file
diff --git a/web/static/js/demo-theme.min.js b/web/static/js/demo-theme.min.js
new file mode 100644
index 0000000..89ed4fb
--- /dev/null
+++ b/web/static/js/demo-theme.min.js
@@ -0,0 +1,9 @@
+/*!
+* Tabler v1.0.0-beta16 (https://tabler.io)
+* @version 1.0.0-beta16
+* @link https://tabler.io
+* Copyright 2018-2022 The Tabler Authors
+* Copyright 2018-2022 codecalm.net Paweł Kuna
+* Licensed under MIT (https://github.com/tabler/tabler/blob/master/LICENSE)
+*/
+!function(e){"function"==typeof define&&define.amd?define(e):e()}((function(){"use strict";var e,t="tablerTheme",n=new Proxy(new URLSearchParams(window.location.search),{get:function(e,t){return e.get(t)}});if(n.theme)localStorage.setItem(t,n.theme),e=n.theme;else{var o=localStorage.getItem(t);e=o||"light"}document.body.classList.remove("theme-dark","theme-light"),document.body.classList.add("theme-".concat(e))}));
\ No newline at end of file
diff --git a/web/static/js/demo.min.js b/web/static/js/demo.min.js
new file mode 100644
index 0000000..3630fea
--- /dev/null
+++ b/web/static/js/demo.min.js
@@ -0,0 +1,9 @@
+/*!
+* Tabler v1.0.0-beta16 (https://tabler.io)
+* @version 1.0.0-beta16
+* @link https://tabler.io
+* Copyright 2018-2022 The Tabler Authors
+* Copyright 2018-2022 codecalm.net Paweł Kuna
+* Licensed under MIT (https://github.com/tabler/tabler/blob/master/LICENSE)
+*/
+!function(t){"function"==typeof define&&define.amd?define(t):t()}((function(){"use strict";function t(t,r){return function(t){if(Array.isArray(t))return t}(t)||function(t,e){var r=null==t?null:"undefined"!=typeof Symbol&&t[Symbol.iterator]||t["@@iterator"];if(null==r)return;var n,o,a=[],l=!0,i=!1;try{for(r=r.call(t);!(l=(n=r.next()).done)&&(a.push(n.value),!e||a.length!==e);l=!0);}catch(t){i=!0,o=t}finally{try{l||null==r.return||r.return()}finally{if(i)throw o}}return a}(t,r)||function(t,r){if(!t)return;if("string"==typeof t)return e(t,r);var n=Object.prototype.toString.call(t).slice(8,-1);"Object"===n&&t.constructor&&(n=t.constructor.name);if("Map"===n||"Set"===n)return Array.from(t);if("Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))return e(t,r)}(t,r)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function e(t,e){(null==e||e>t.length)&&(e=t.length);for(var r=0,n=new Array(e);r
=a)}}for(var h=this.__startIndex;h t.unconstrainedWidth?null:d:null;i.setStyle("width",f)}var g=i.getBoundingRect();o.width=g.width;var y=(i.style.margin||0)+2.1;o.height=g.height+y,o.y-=(o.height-c)/2}}}function rM(t){return"center"===t.position}function oM(t){var e,n,i=t.getData(),r=[],o=!1,a=(t.get("minShowLabelAngle")||0)*eM,s=i.getLayout("viewRect"),l=i.getLayout("r"),u=s.width,h=s.x,c=s.y,p=s.height;function d(t){t.ignore=!0}i.each((function(t){var s=i.getItemGraphicEl(t),c=s.shape,p=s.getTextContent(),f=s.getTextGuideLine(),g=i.getItemModel(t),y=g.getModel("label"),v=y.get("position")||g.get(["emphasis","label","position"]),m=y.get("distanceToLabelLine"),x=y.get("alignTo"),_=Er(y.get("edgeDistance"),u),b=y.get("bleedMargin"),w=g.getModel("labelLine"),S=w.get("length");S=Er(S,u);var M=w.get("length2");if(M=Er(M,u),Math.abs(c.endAngle-c.startAngle)0?"right":"left":k>0?"left":"right"}var B=Math.PI,F=0,G=y.get("rotate");if(j(G))F=G*(B/180);else if("center"===v)F=0;else if("radial"===G||!0===G){F=k<0?-A+B:-A}else if("tangential"===G&&"outside"!==v&&"outer"!==v){var W=Math.atan2(k,L);W<0&&(W=2*B+W),L>0&&(W=B+W),F=W-B}if(o=!!F,p.x=I,p.y=T,p.rotation=F,p.setStyle({verticalAlign:"middle"}),P){p.setStyle({align:D});var H=p.states.select;H&&(H.x+=p.x,H.y+=p.y)}else{var Y=p.getBoundingRect().clone();Y.applyTransform(p.getComputedTransform());var U=(p.style.margin||0)+2.1;Y.y-=U/2,Y.height+=U,r.push({label:p,labelLine:f,position:v,len:S,len2:M,minTurnAngle:w.get("minTurnAngle"),maxSurfaceAngle:w.get("maxSurfaceAngle"),surfaceNormal:new Ji(k,L),linePoints:C,textAlign:D,labelDistance:m,labelAlignTo:x,edgeDistance:_,bleedMargin:b,rect:Y,unconstrainedWidth:Y.width,labelStyleWidth:p.style.width})}s.setTextConfig({inside:P})}})),!o&&t.get("avoidLabelOverlap")&&function(t,e,n,i,r,o,a,s){for(var l=[],u=[],h=Number.MAX_VALUE,c=-Number.MAX_VALUE,p=0;p i&&(i=e);var o=i%2?i+2:i+3;r=[];for(var a=0;a5)return;var i=this._model.coordinateSystem.getSlidedAxisExpandWindow([t.offsetX,t.offsetY]);"none"!==i.behavior&&this._dispatchExpand({axisExpandWindow:i.axisExpandWindow})}this._mouseDownPoint=null},mousemove:function(t){if(!this._mouseDownPoint&&ok(this,"mousemove")){var e=this._model,n=e.coordinateSystem.getSlidedAxisExpandWindow([t.offsetX,t.offsetY]),i=n.behavior;"jump"===i&&this._throttledDispatchExpand.debounceNextCall(e.get("axisExpandDebounce")),this._throttledDispatchExpand("none"===i?null:{axisExpandWindow:n.axisExpandWindow,animation:"jump"===i?null:{duration:0}})}}};function ok(t,e){var n=t._model;return n.get("axisExpandable")&&n.get("axisExpandTriggerOn")===e}var ak=function(t){function e(){var n=null!==t&&t.apply(this,arguments)||this;return n.type=e.type,n}return n(e,t),e.prototype.init=function(){t.prototype.init.apply(this,arguments),this.mergeOption({})},e.prototype.mergeOption=function(t){var e=this.option;t&&C(e,t,!0),this._initDimensions()},e.prototype.contains=function(t,e){var n=t.get("parallelIndex");return null!=n&&e.getComponent("parallel",n)===this},e.prototype.setAxisExpand=function(t){E(["axisExpandable","axisExpandCenter","axisExpandCount","axisExpandWidth","axisExpandWindow"],(function(e){t.hasOwnProperty(e)&&(this.option[e]=t[e])}),this)},e.prototype._initDimensions=function(){var t=this.dimensions=[],e=this.parallelAxisIndex=[];E(B(this.ecModel.queryComponents({mainType:"parallelAxis"}),(function(t){return(t.get("parallelIndex")||0)===this.componentIndex}),this),(function(n){t.push("dim"+n.get("dim")),e.push(n.componentIndex)}))},e.type="parallel",e.dependencies=["parallelAxis"],e.layoutMode="box",e.defaultOption={z:0,left:80,top:60,right:80,bottom:60,layout:"horizontal",axisExpandable:!1,axisExpandCenter:null,axisExpandCount:0,axisExpandWidth:50,axisExpandRate:17,axisExpandDebounce:50,axisExpandSlideTriggerArea:[-.15,.05,.4],axisExpandTriggerOn:"click",parallelAxisDefault:null},e}(Tp),sk=function(t){function e(e,n,i,r,o){var a=t.call(this,e,n,i)||this;return a.type=r||"value",a.axisIndex=o,a}return n(e,t),e.prototype.isHorizontal=function(){return"horizontal"!==this.coordinateSystem.getModel().get("layout")},e}(H_);function lk(t,e,n,i,r,o){t=t||0;var a=n[1]-n[0];if(null!=r&&(r=hk(r,[0,a])),null!=o&&(o=Math.max(o,null!=r?r:0)),"all"===i){var s=Math.abs(e[1]-e[0]);s=hk(s,[0,a]),r=o=hk(s,[r,o]),i=0}e[0]=hk(e[0],n),e[1]=hk(e[1],n);var l=uk(e,i);e[i]+=t;var u,h=r||0,c=n.slice();return l.sign<0?c[0]+=h:c[1]-=h,e[i]=hk(e[i],c),u=uk(e,i),null!=r&&(u.sign!==l.sign||u.span