Skip to content

Commit

Permalink
fix 500
Browse files Browse the repository at this point in the history
  • Loading branch information
Her Email authored and alphatownsman committed Dec 13, 2023
1 parent 49c59ea commit 6a62a11
Show file tree
Hide file tree
Showing 2 changed files with 60 additions and 50 deletions.
107 changes: 57 additions & 50 deletions catalog/search/external.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,31 +162,34 @@ def search(cls, q, page=1):
api_url = f"https://api.themoviedb.org/3/search/multi?query={quote_plus(q)}&page={page}&api_key={settings.TMDB_API3_KEY}&language=zh-CN&include_adult=true"
try:
j = requests.get(api_url, timeout=2).json()
for m in j["results"]:
if m["media_type"] in ["tv", "movie"]:
url = f"https://www.themoviedb.org/{m['media_type']}/{m['id']}"
if m["media_type"] == "tv":
cat = ItemCategory.TV
title = m["name"]
subtitle = f"{m.get('first_air_date', '')} {m.get('original_name', '')}"
else:
cat = ItemCategory.Movie
title = m["title"]
subtitle = (
f"{m.get('release_date', '')} {m.get('original_name', '')}"
if j.get("results"):
for m in j["results"]:
if m["media_type"] in ["tv", "movie"]:
url = f"https://www.themoviedb.org/{m['media_type']}/{m['id']}"
if m["media_type"] == "tv":
cat = ItemCategory.TV
title = m["name"]
subtitle = f"{m.get('first_air_date', '')} {m.get('original_name', '')}"
else:
cat = ItemCategory.Movie
title = m["title"]
subtitle = f"{m.get('release_date', '')} {m.get('original_name', '')}"
cover = (
f"https://image.tmdb.org/t/p/w500/{m.get('poster_path')}"
)
cover = f"https://image.tmdb.org/t/p/w500/{m.get('poster_path')}"
results.append(
SearchResultItem(
cat,
SiteName.TMDB,
url,
title,
subtitle,
m.get("overview"),
cover,
results.append(
SearchResultItem(
cat,
SiteName.TMDB,
url,
title,
subtitle,
m.get("overview"),
cover,
)
)
)
else:
logger.warning(f"TMDB search '{q}' no results found.")
except requests.exceptions.RequestException as e:
logger.warning(f"Search {api_url} error: {e}")
except Exception as e:
Expand All @@ -202,24 +205,27 @@ def search(cls, q, page=1):
try:
headers = {"Authorization": f"Bearer {get_spotify_token()}"}
j = requests.get(api_url, headers=headers, timeout=2).json()
for a in j["albums"]["items"]:
title = a["name"]
subtitle = a["release_date"]
for artist in a["artists"]:
subtitle += " " + artist["name"]
url = a["external_urls"]["spotify"]
cover = a["images"][0]["url"]
results.append(
SearchResultItem(
ItemCategory.Music,
SiteName.Spotify,
url,
title,
subtitle,
"",
cover,
if j.get("albums"):
for a in j["albums"]["items"]:
title = a["name"]
subtitle = a["release_date"]
for artist in a["artists"]:
subtitle += " " + artist["name"]
url = a["external_urls"]["spotify"]
cover = a["images"][0]["url"]
results.append(
SearchResultItem(
ItemCategory.Music,
SiteName.Spotify,
url,
title,
subtitle,
"",
cover,
)
)
)
else:
logger.warning(f"Spotify search '{q}' no results found.")
except requests.exceptions.RequestException as e:
logger.warning(f"Search {api_url} error: {e}")
except Exception as e:
Expand Down Expand Up @@ -271,17 +277,18 @@ def search(cls, q, page=1):
try:
r = requests.get(search_url, timeout=2).json()
for p in r["results"][(page - 1) * SEARCH_PAGE_SIZE :]:
results.append(
SearchResultItem(
ItemCategory.Podcast,
SiteName.RSS,
p["feedUrl"],
p["trackName"],
p["artistName"],
"",
p["artworkUrl600"],
if p.get("feedUrl"):
results.append(
SearchResultItem(
ItemCategory.Podcast,
SiteName.RSS,
p["feedUrl"],
p["trackName"],
p["artistName"],
"",
p["artworkUrl600"],
)
)
)
except requests.exceptions.RequestException as e:
logger.warning(f"Search {search_url} error: {e}")
except Exception as e:
Expand Down
3 changes: 3 additions & 0 deletions journal/importers/douban.py
Original file line number Diff line number Diff line change
Expand Up @@ -312,6 +312,9 @@ def import_review_sheet(self, worksheet, sheet_name):

def get_item_by_url(self, url):
item = None
if not url:
logger.warning(f"URL empty")
return None
try:
site = SiteManager.get_site_by_url(url)
if not site:
Expand Down

0 comments on commit 6a62a11

Please sign in to comment.