Skip to content

Commit

Permalink
Merge pull request #544 from rix1337/dev
Browse files Browse the repository at this point in the history
v.11.0.18
  • Loading branch information
rix1337 authored Oct 30, 2021
2 parents 5a1c067 + 3a7e9f7 commit 69534a4
Show file tree
Hide file tree
Showing 11 changed files with 201 additions and 127 deletions.
13 changes: 7 additions & 6 deletions .github/Changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,20 +9,21 @@
---

### Changelog:
- **11.0.18** FX Suche (Feed und Web) wiederhergestellt
- **11.0.18** FC-Pakete werden immer zuletzt zum Entschlüsseln angeboten
- **11.0.18** Lade Daten im Webinterface seltener neu (verbessert Performance)
- **11.0.18** Verhindere Crash bei Ombi-Anfragen ohne IMDb-ID #543 Danke @postboy99
- **11.0.17** Linkerkennung auf FX angepasst
- **11.0.16** Verhindere, dass
der [FeedCrawler Sponsors Helper](https://github.com/rix1337/RSScrawler/wiki/5.-FeedCrawler-Sponsors-Helper) den
- **11.0.16** Verhindere, dass der [FeedCrawler Sponsors Helper](https://github.com/rix1337/RSScrawler/wiki/5.-FeedCrawler-Sponsors-Helper) den
selben Link mehrfach öffnet
- **11.0.15** Release der neuen Version 2.0.1
des [FeedCrawler Sponsors Helpers](https://github.com/rix1337/RSScrawler/wiki/5.-FeedCrawler-Sponsors-Helper)
- **11.0.15** Release der neuen Version 2.0.1 des [FeedCrawler Sponsors Helpers](https://github.com/rix1337/RSScrawler/wiki/5.-FeedCrawler-Sponsors-Helper)
- Der Helper basiert ab sofort auf Chromium, statt Firefox.
- Damit werden Captchas auf DW wieder automatisch und zuverlässig gelöst.
- Außerdem wurden alle Scripte aktualisiert um stabiler zu laufen.
- Es sind folgende Anpassungen der Docker Konfiguration des Helpers sinnvoll:
- das `--privileged`-Flag entfernen
- das `--restart unless-stopped`-Flag ergänzen
- **11.0.15** Anpassung der Startseite
des [FeedCrawler Sponsors Helpers](https://github.com/rix1337/RSScrawler/wiki/5.-FeedCrawler-Sponsors-Helper)
- **11.0.15** Anpassung der Startseite des [FeedCrawler Sponsors Helpers](https://github.com/rix1337/RSScrawler/wiki/5.-FeedCrawler-Sponsors-Helper)
- Die Seite aktualisiert ab sofort zweimalig je Minute, anstatt einmalig.
- Wurde ein Captcha nicht gelöst, öffnet der Helper die Seite erneut, statt abzuwarten.
- **11.0.14** Bugfixes
Expand Down
27 changes: 26 additions & 1 deletion feedcrawler/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,10 +227,18 @@ def fullhd_title(key):

def get_to_decrypt():
try:
to_decrypt = FeedDb('to_decrypt').retrieve_all_titles()
to_decrypt = FeedDb('to_decrypt').retrieve_all_titles_unordered()
if to_decrypt:
easy_decrypt_exists = False
fx = CrawlerConfig('Hostnames').get('fx')
for package in to_decrypt:
if not "filecrypt." in package[1] and not fx in package[1]:
easy_decrypt_exists = True

packages = []
for package in to_decrypt:
if easy_decrypt_exists and ("filecrypt." in package[1] or fx in package[1]):
continue
title = package[0]
try:
details = package[1].split('|')
Expand All @@ -244,6 +252,23 @@ def get_to_decrypt():
'url': url,
'password': password
})

for package in to_decrypt:
if easy_decrypt_exists and ("filecrypt." in package[1] or fx in package[1]):
title = package[0]
try:
details = package[1].split('|')
url = details[0]
password = details[1]
except:
url = package[1]
password = ""
packages.append({
'name': title,
'url': url,
'password': password
})

return packages
else:
return False
Expand Down
8 changes: 8 additions & 0 deletions feedcrawler/db.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,14 @@ def retrieve_all_titles(self):
items.append([str(r[0]), str(r[1])])
return items if items else None

def retrieve_all_titles_unordered(self):
res = self._conn.execute(
"SELECT distinct key, value FROM %s" % self._table)
items = []
for r in res:
items.append([str(r[0]), str(r[1])])
return items if items else None

def store(self, key, value):
self._conn.execute("INSERT INTO '%s' VALUES ('%s', '%s')" %
(self._table, key, value))
Expand Down
10 changes: 8 additions & 2 deletions feedcrawler/ombi.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,10 @@ def imdb_movie(imdb_id):
year = str(output.data['year'])
return title + " " + year
except:
print(u"[Ombi] - Fehler beim Abruf der IMDb für: " + imdb_id)
if imdb_id is None:
internal.logger.debug("Ein Film ohne IMDb-ID wurde angefordert.")
else:
print(u"[Ombi] - Fehler beim Abruf der IMDb für: " + imdb_id)
return False, False


Expand All @@ -49,7 +52,10 @@ def imdb_show(imdb_id):

return title, eps
except:
print(u"[Ombi] - Fehler beim Abruf der IMDb für: " + imdb_id)
if imdb_id is None:
internal.logger.debug("Eine Serie ohne IMDb-ID wurde angefordert.")
else:
print(u"[Ombi] - Fehler beim Abruf der IMDb für: " + imdb_id)
return False, False, False


Expand Down
1 change: 1 addition & 0 deletions feedcrawler/search/shared/content_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@ def download(payload):
for hoster in hosters:
url_hosters.append(['https://' + nk + hoster["href"], hoster.text])
elif "FX" in site:
download_method = add_decrypt_instead_of_download
key = payload[1]
password = payload[2]
else:
Expand Down
4 changes: 2 additions & 2 deletions feedcrawler/sites/content_all_fx.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import feedcrawler.sites.shared.content_all as shared_blogs
from feedcrawler.config import CrawlerConfig
from feedcrawler.db import FeedDb
from feedcrawler.myjd import myjd_download
from feedcrawler.sites.shared.internal_feed import add_decrypt_instead_of_download
from feedcrawler.sites.shared.internal_feed import fx_feed_enricher
from feedcrawler.sites.shared.internal_feed import fx_get_download_links
from feedcrawler.url import get_url
Expand Down Expand Up @@ -64,7 +64,7 @@ def __init__(self, filename):
self.get_url_method = get_url
self.get_url_headers_method = get_url_headers
self.get_download_links_method = fx_get_download_links
self.download_method = myjd_download
self.download_method = add_decrypt_instead_of_download

try:
self.imdb = float(self.config.get('imdb'))
Expand Down
12 changes: 7 additions & 5 deletions feedcrawler/sites/shared/content_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -365,7 +365,7 @@ def download_hevc(self, title):
for result in search_results:
i += 1

key = result[0]
key = result[0].replace(" ", ".")

if feedsearch_title in key:
payload = result[1].split("|")
Expand All @@ -387,7 +387,7 @@ def download_hevc(self, title):
link = get_url(link)
link_grabbed = True
get_download_links_method = fx_get_download_links
download_method = myjd_download
download_method = add_decrypt_instead_of_download
elif "NK" in site:
get_download_links_method = nk_page_download_link
download_method = myjd_download
Expand Down Expand Up @@ -502,15 +502,15 @@ def download_dual_language(self, title, hevc=False):

hevc_found = False
for result in search_results:
key = result[0]
key = result[0].replace(" ", ".")
if feedsearch_title in key and ".dl." in key.lower() and (hevc and is_hevc(key)):
hevc_found = True

i = 0
for result in search_results:
i += 1

key = result[0]
key = result[0].replace(" ", ".")

if feedsearch_title in key:
payload = result[1].split("|")
Expand All @@ -529,7 +529,7 @@ def download_dual_language(self, title, hevc=False):
elif "FX" in site:
link = get_url(link)
get_download_links_method = fx_get_download_links
download_method = myjd_download
download_method = add_decrypt_instead_of_download
elif "NK" in site:
get_download_links_method = nk_page_download_link
download_method = myjd_download
Expand Down Expand Up @@ -605,6 +605,7 @@ def download_dual_language(self, title, hevc=False):


def download_imdb(self, key, download_links, score, imdb_id, hevc_retail, site, download_method):
key = key.replace(" ", ".")
added_items = []
if not hevc_retail:
if self.hevc_retail:
Expand Down Expand Up @@ -677,6 +678,7 @@ def download_imdb(self, key, download_links, score, imdb_id, hevc_retail, site,


def download_feed(self, key, content, hevc_retail):
key = key.replace(" ", ".")
added_items = []
if not hevc_retail:
if self.hevc_retail:
Expand Down
12 changes: 7 additions & 5 deletions feedcrawler/sites/shared/internal_feed.py
Original file line number Diff line number Diff line change
Expand Up @@ -513,15 +513,16 @@ def fx_get_download_links(self, content, title):

def fx_feed_enricher(feed):
feed = BeautifulSoup(feed, 'lxml')
fx = CrawlerConfig('Hostnames').get('fx')
articles = feed.findAll("article")
entries = []

for article in articles:
try:
article = BeautifulSoup(str(article), 'lxml')
titles = article.findAll("a", href=re.compile("filecrypt"))
titles = article.findAll("a", href=re.compile("(filecrypt|safe." + fx + ")"))
for title in titles:
title = title.text.encode("ascii", errors="ignore").decode().replace("/", "")
title = title.text.encode("ascii", errors="ignore").decode().replace("/", "").replace(" ", ".")
if title:
if "download" in title.lower():
try:
Expand Down Expand Up @@ -550,6 +551,7 @@ def fx_feed_enricher(feed):


def fx_search_results(content):
fx = CrawlerConfig('Hostnames').get('fx')
articles = content.find("main").find_all("article")
result_urls = []
for article in articles:
Expand All @@ -566,11 +568,11 @@ def fx_search_results(content):

for result in results:
article = BeautifulSoup(str(result), 'lxml')
titles = article.find_all("a", href=re.compile("filecrypt"))
titles = article.find_all("a", href=re.compile("(filecrypt|safe." + fx + ")"))
for title in titles:
link = article.find("link", rel="canonical")["href"]
title = title.text.encode("ascii", errors="ignore").decode().replace("/", "")
if title:
title = title.text.encode("ascii", errors="ignore").decode().replace("/", "").replace(" ", ".")
if title and "-fun" in title.lower():
if "download" in title.lower():
try:
title = str(content.find("strong", text=re.compile(r".*Release.*")).nextSibling)
Expand Down
2 changes: 1 addition & 1 deletion feedcrawler/version.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@


def get_version():
return "11.0.17"
return "11.0.18"


def create_version_file():
Expand Down
Loading

0 comments on commit 69534a4

Please sign in to comment.