Skip to content

Commit

Permalink
Merge pull request #174 from aymene69/test_rtn
Browse files Browse the repository at this point in the history
Test rtn
  • Loading branch information
aymene69 authored Nov 27, 2024
2 parents d4aa5c2 + e72bb61 commit 80c0798
Show file tree
Hide file tree
Showing 19 changed files with 258 additions and 269 deletions.
3 changes: 0 additions & 3 deletions source/debrid/alldebrid.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,9 +79,6 @@ def get_stream_link(self, query_string, ip):
matching_files.append(file)
rank += 1

if len(strict_matching_files) > 0:
matching_files = strict_matching_files

if len(matching_files) == 0:
logger.error(f"No matching files for {season} {episode} in torrent.")
return f"Error: No matching files for {season} {episode} in torrent."
Expand Down
2 changes: 1 addition & 1 deletion source/debrid/get_debrid_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,4 @@ def get_debrid_service(config):
else:
raise HTTPException(status_code=500, detail="Invalid service configuration.")

return debrid_service
return debrid_service
8 changes: 1 addition & 7 deletions source/debrid/premiumize.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,18 +101,12 @@ def get_stream_link(self, query, ip=None):
season = query["season"]
episode = query["episode"]
files = details.get("content", [])
strict_matching_files = []
matching_files = []

for file in files:
if season_episode_in_filename(file["name"], season, episode, strict=True):
strict_matching_files.append(file)
elif season_episode_in_filename(file["name"], season, episode, strict=False):
if season_episode_in_filename(file["name"], season, episode):
matching_files.append(file)

if len(strict_matching_files) > 0:
matching_files = strict_matching_files

if len(matching_files) == 0:
logger.error(f"No matching files for {season} {episode} in torrent.")
return f"Error: No matching files for {season} {episode} in torrent."
Expand Down
2 changes: 1 addition & 1 deletion source/debrid/realdebrid.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,4 +286,4 @@ def __find_appropiate_link(self, torrent_info, links, file_index, season, episod
logger.debug(f"From selected files {selected_files}, index: {index} is out of range for {links}.")
return NO_CACHE_VIDEO_URL

return links[index]
return links[index]
1 change: 0 additions & 1 deletion source/debrid/torbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,4 +182,3 @@ def get_availability_bulk(self, hashes_or_magnets, ip=None):
continue

return available_torrents

35 changes: 13 additions & 22 deletions source/jackett/jackett_result.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from RTN import parse

from models.series import Series
from torrent.torrent_item import TorrentItem
from utils.logger import setup_logger
Expand All @@ -6,7 +8,7 @@

class JackettResult:
def __init__(self):
self.title = None # Title of the torrent
self.raw_title = None # Raw title of the torrent
self.size = None # Size of the torrent
self.link = None # Download link for the torrent file or magnet url
self.indexer = None # Indexer
Expand All @@ -17,51 +19,40 @@ def __init__(self):

# Extra processed details for further filtering
self.languages = None # Language of the torrent
self.quality = None # Quality of the torrent
self.quality_spec = None # Quality specifications of the torrent
self.type = None # series or movie

# Not sure about these
self.season = None # Season, if the media is a series
self.episode = None # Episode, if the media is a series
self.parsed_data = None # Ranked result

def convert_to_torrent_item(self):
return TorrentItem(
self.title,
self.raw_title,
self.size,
self.magnet,
self.info_hash.lower() if self.info_hash is not None else None,
self.link,
self.seeders,
self.languages,
self.quality,
self.quality_spec,
self.indexer,
self.privacy,
self.episode,
self.season,
self.type
self.type,
self.parsed_data
)

def from_cached_item(self, cached_item, media):
if type(cached_item) is not dict:
logger.error(cached_item)
self.title = cached_item['title']

parsed_result = parse(cached_item['title'])

self.raw_title = cached_item['title']
self.indexer = "Cache" # Cache doesn't return an indexer sadly (It stores it tho)
self.magnet = cached_item['magnet']
self.link = cached_item['magnet']
self.info_hash = cached_item['hash']
self.languages = cached_item['language'].split(";") if cached_item['language'] is not None else []
self.quality = cached_item['quality']
self.quality_spec = cached_item['qualitySpec'].split(";") if cached_item['qualitySpec'] is not None else []
self.seeders = cached_item['seeders']
self.size = cached_item['size']

if isinstance(media, Series):
self.season = media.season
self.episode = media.episode
self.type = media.type
else:
self.type = media.type
self.type = media.type
self.parsed_data = parsed_result

return self
27 changes: 15 additions & 12 deletions source/jackett/jackett_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,13 @@
import xml.etree.ElementTree as ET

import requests
from RTN import parse

from jackett.jackett_indexer import JackettIndexer
from jackett.jackett_result import JackettResult
from models.movie import Movie
from models.series import Series
from utils import detection
from utils.detection import detect_languages
from utils.logger import setup_logger


Expand Down Expand Up @@ -43,7 +44,7 @@ def thread_target(media, indexer):
raise TypeError("Only Movie and Series is allowed as media!")

self.logger.info(
f"Search on {indexer.title} took {time.time() - start_time} seconds and found {len(result)} results")
f"Search on {indexer.title} took {time.time() - start_time} seconds and found {len([result for sublist in result for result in sublist])} results")

results_queue.put(result) # Put the result in the queue

Expand Down Expand Up @@ -82,7 +83,8 @@ def __search_movie_indexer(self, movie, indexer):
languages = movie.languages
titles = movie.titles
else:
index_of_language = [index for index, lang in enumerate(movie.languages) if lang == indexer.language or lang == 'en']
index_of_language = [index for index, lang in enumerate(movie.languages) if
lang == indexer.language or lang == 'en']
languages = [movie.languages[index] for index in index_of_language]
titles = [movie.titles[index] for index in index_of_language]

Expand Down Expand Up @@ -118,7 +120,6 @@ def __search_series_indexer(self, series, indexer):
season = str(int(series.season.replace('S', '')))
episode = str(int(series.episode.replace('E', '')))


has_imdb_search_capability = (os.getenv("DISABLE_JACKETT_IMDB_SEARCH") != "true"
and indexer.tv_search_capatabilities is not None
and 'imdbid' in indexer.tv_search_capatabilities)
Expand Down Expand Up @@ -243,7 +244,7 @@ def __get_torrent_links_from_xml(self, xml_content):
if int(result.seeders) <= 0:
continue

result.title = item.find('title').text
result.raw_title = item.find('title').text
result.size = item.find('size').text
result.link = item.find('link').text
result.indexer = item.find('jackettindexer').text
Expand All @@ -265,13 +266,15 @@ def __get_torrent_links_from_xml(self, xml_content):

def __post_process_results(self, results, media):
for result in results:
result.languages = detection.detect_languages(result.title)
result.quality = detection.detect_quality(result.title)
result.quality_spec = detection.detect_quality_spec(result.title)
result.type = media.type

if isinstance(media, Series):
result.season = media.season
result.episode = media.episode
# self.logger.info(result.title)
# self.logger.info(parse(result.title))

parsed_result = parse(result.raw_title)
# result.languages = [languages.get(name=language).alpha2 for language in parsed_result.language]
result.parsed_data = parsed_result
# TODO: replace with parsed_result.lang_codes when RTN is updated
result.languages = detect_languages(result.raw_title)
result.type = media.type

return results
13 changes: 6 additions & 7 deletions source/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@
from torrent.torrent_service import TorrentService
from torrent.torrent_smart_container import TorrentSmartContainer
from utils.cache import search_cache
from utils.filter_results import filter_items
from utils.filter_results import sort_items
from utils.filter_results import filter_items, sort_items
from utils.logger import setup_logger
from utils.parse_config import parse_config
from utils.stremio_parser import parse_to_stremio_streams
Expand Down Expand Up @@ -79,8 +78,6 @@ async def root():
@app.get("/configure")
@app.get("/{config}/configure")
async def configure(request: Request):
print(request.headers.get("X-Real-IP"))
print(request.headers.get("X-Forwarded-For"))
return templates.TemplateResponse(
"index.html",
{"request": request, "isCommunityVersion": COMMUNITY_VERSION},
Expand Down Expand Up @@ -138,7 +135,7 @@ async def get_results(config: str, stream_type: str, stream_id: str, request: Re
debrid_service = get_debrid_service(config)

search_results = []
if COMMUNITY_VERSION or config['cache']:
if COMMUNITY_VERSION and config['cache']:
logger.info("Getting cached results")
cached_results = search_cache(media)
cached_results = [JackettResult().from_cached_item(torrent, media) for torrent in cached_results]
Expand Down Expand Up @@ -175,13 +172,14 @@ async def get_results(config: str, stream_type: str, stream_id: str, request: Re
logger.debug("Converted result to TorrentItems (results: " + str(len(torrent_results)) + ")")

torrent_smart_container = TorrentSmartContainer(torrent_results, media)

if config['debrid']:
if config['service'] == "torbox":
logger.debug("Checking availability")
hashes = torrent_smart_container.get_hashes()
ip = request.client.host
result = debrid_service.get_availability_bulk(hashes, ip)
torrent_smart_container.update_availability(result, type(debrid_service))
torrent_smart_container.update_availability(result, type(debrid_service), media)
logger.debug("Checked availability (results: " + str(len(result.items())) + ")")

# TODO: Maybe add an if to only save to cache if caching is enabled?
Expand All @@ -193,14 +191,15 @@ async def get_results(config: str, stream_type: str, stream_id: str, request: Re
logger.debug("Got best matching results (results: " + str(len(best_matching_results)) + ")")

logger.info("Processing results")
stream_list = parse_to_stremio_streams(best_matching_results, config)
stream_list = parse_to_stremio_streams(best_matching_results, config, media)
logger.info("Processed results (results: " + str(len(stream_list)) + ")")

logger.info("Total time: " + str(time.time() - start) + "s")

return {"streams": stream_list}


# @app.head("/playback/{config}/{query}")
@app.get("/playback/{config}/{query}")
async def get_playback(config: str, query: str, request: Request):
try:
Expand Down
1 change: 1 addition & 0 deletions source/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,4 @@ bencode.py
jinja2
aiocron
python-dotenv
rank-torrent-name
27 changes: 13 additions & 14 deletions source/torrent/torrent_item.py
Original file line number Diff line number Diff line change
@@ -1,43 +1,42 @@
from urllib.parse import quote

from models.media import Media
from models.series import Series
from utils.logger import setup_logger


class TorrentItem:
def __init__(self, title, size, magnet, info_hash, link, seeders, languages, quality, quality_spec, indexer,
privacy,
episode=None, season=None, type=None):
def __init__(self, raw_title, size, magnet, info_hash, link, seeders, languages, indexer,
privacy, type=None, parsed_data=None):
self.logger = setup_logger(__name__)

self.title = title # Title of the torrent
self.size = size # Size of the video file inside of the torrent - it may be updated durring __process_torrent()
self.raw_title = raw_title # Raw title of the torrent
self.size = size # Size of the video file inside the torrent - it may be updated during __process_torrent()
self.magnet = magnet # Magnet to torrent
self.info_hash = info_hash # Hash of the torrent
self.link = link # Link to download torrent file or magnet link
self.seeders = seeders # The number of seeders
self.languages = languages # Language of the torrent
self.quality = quality # Quality of the torrent
self.quality_spec = quality_spec if quality_spec is not None else [] # Quality specifications of the torrent
self.indexer = indexer # Indexer of the torrent
self.episode = episode # Episode if its a series (for example: "E01" or "E14")
self.season = season # Season if its a series (for example: "S01" or "S14")
self.type = type # "series" or "movie"
self.privacy = privacy # "public" or "private"

self.file_name = None # it may be updated durring __process_torrent()
self.file_name = None # it may be updated during __process_torrent()
self.files = None # The files inside of the torrent. If it's None, it means that there is only one file inside of the torrent
self.torrent_download = None # The torrent jackett download url if its None, it means that there is only a magnet link provided by Jackett. It also means, that we cant do series file filtering before debrid.
self.trackers = [] # Trackers of the torrent
self.file_index = None # Index of the file inside of the torrent - it may be updated durring __process_torrent() and update_availability(). If the index is None and torrent is not None, it means that the series episode is not inside of the torrent.

self.availability = False # If its instantly available on the debrid service
self.availability = False # If it's instantly available on the debrid service

def to_debrid_stream_query(self) -> dict:
self.parsed_data = parsed_data # Ranked result

def to_debrid_stream_query(self, media: Media) -> dict:
return {
"magnet": self.magnet,
"type": self.type,
"file_index": self.file_index,
"season": self.season,
"episode": self.episode,
"season": media.season if isinstance(media, Series) else None,
"episode": media.episode if isinstance(media, Series) else None,
"torrent_download": quote(self.torrent_download) if self.torrent_download is not None else None
}
Loading

0 comments on commit 80c0798

Please sign in to comment.