Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Goodreads search fix #433

Merged
merged 3 commits into from
Dec 11, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 26 additions & 14 deletions catalog/search/external.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,27 +55,25 @@ class Goodreads:
@classmethod
def search(cls, q, page=1):
results = []
search_url = f"https://www.goodreads.com/search?page={page}&q={quote_plus(q)}"
try:
search_url = (
f"https://www.goodreads.com/search?page={page}&q={quote_plus(q)}"
)
r = requests.get(search_url, timeout=2)
r = requests.get(search_url, timeout=3)
if r.url.startswith("https://www.goodreads.com/book/show/"):
# Goodreads will 302 if only one result matches ISBN
site = SiteManager.get_site_by_url(r.url)
if site:
res = site.get_resource_ready()
if res:
subtitle = f"{res.metadata['pub_year']} {', '.join(res.metadata['author'])} {', '.join(res.metadata['translator'] if res.metadata['translator'] else [])}"
subtitle = f"{res.metadata.get('pub_year')} {', '.join(res.metadata.get('author', []))} {', '.join(res.metadata.get('translator', []))}"
results.append(
SearchResultItem(
ItemCategory.Book,
SiteName.Goodreads,
res.url,
res.metadata["title"],
subtitle,
res.metadata["brief"],
res.metadata["cover_image_url"],
res.metadata.get("brief"),
res.metadata.get("cover_image_url"),
)
)
else:
Expand All @@ -101,6 +99,8 @@ def search(cls, q, page=1):
cover,
)
)
except requests.exceptions.RequestException as e:
logger.warning(f"Search {search_url} error: {e}")
except Exception as e:
logger.error(f"Goodreads search '{q}' error: {e}")
return results
Expand All @@ -110,8 +110,8 @@ class GoogleBooks:
@classmethod
def search(cls, q, page=1):
results = []
api_url = f"https://www.googleapis.com/books/v1/volumes?country=us&q={quote_plus(q)}&startIndex={SEARCH_PAGE_SIZE*(page-1)}&maxResults={SEARCH_PAGE_SIZE}&maxAllowedMaturityRating=MATURE"
try:
api_url = f"https://www.googleapis.com/books/v1/volumes?country=us&q={quote_plus(q)}&startIndex={SEARCH_PAGE_SIZE*(page-1)}&maxResults={SEARCH_PAGE_SIZE}&maxAllowedMaturityRating=MATURE"
j = requests.get(api_url, timeout=2).json()
if "items" in j:
for b in j["items"]:
Expand Down Expand Up @@ -148,6 +148,8 @@ def search(cls, q, page=1):
cover,
)
)
except requests.exceptions.RequestException as e:
logger.warning(f"Search {api_url} error: {e}")
except Exception as e:
logger.error(f"GoogleBooks search '{q}' error: {e}")
return results
Expand All @@ -157,8 +159,8 @@ class TheMovieDatabase:
@classmethod
def search(cls, q, page=1):
results = []
api_url = f"https://api.themoviedb.org/3/search/multi?query={quote_plus(q)}&page={page}&api_key={settings.TMDB_API3_KEY}&language=zh-CN&include_adult=true"
try:
api_url = f"https://api.themoviedb.org/3/search/multi?query={quote_plus(q)}&page={page}&api_key={settings.TMDB_API3_KEY}&language=zh-CN&include_adult=true"
j = requests.get(api_url, timeout=2).json()
for m in j["results"]:
if m["media_type"] in ["tv", "movie"]:
Expand All @@ -185,6 +187,8 @@ def search(cls, q, page=1):
cover,
)
)
except requests.exceptions.RequestException as e:
logger.warning(f"Search {api_url} error: {e}")
except Exception as e:
logger.error(f"TMDb search '{q}' error: {e}")
return results
Expand All @@ -194,8 +198,8 @@ class Spotify:
@classmethod
def search(cls, q, page=1):
results = []
api_url = f"https://api.spotify.com/v1/search?q={q}&type=album&limit={SEARCH_PAGE_SIZE}&offset={page*SEARCH_PAGE_SIZE}"
try:
api_url = f"https://api.spotify.com/v1/search?q={q}&type=album&limit={SEARCH_PAGE_SIZE}&offset={page*SEARCH_PAGE_SIZE}"
headers = {"Authorization": f"Bearer {get_spotify_token()}"}
j = requests.get(api_url, headers=headers, timeout=2).json()
for a in j["albums"]["items"]:
Expand All @@ -216,6 +220,8 @@ def search(cls, q, page=1):
cover,
)
)
except requests.exceptions.RequestException as e:
logger.warning(f"Search {api_url} error: {e}")
except Exception as e:
logger.error(f"Spotify search '{q}' error: {e}")
return results
Expand All @@ -225,8 +231,8 @@ class Bandcamp:
@classmethod
def search(cls, q, page=1):
results = []
search_url = f"https://bandcamp.com/search?from=results&item_type=a&page={page}&q={quote_plus(q)}"
try:
search_url = f"https://bandcamp.com/search?from=results&item_type=a&page={page}&q={quote_plus(q)}"
r = requests.get(search_url, timeout=2)
h = html.fromstring(r.content.decode("utf-8"))
albums = h.xpath('//li[@class="searchresult data-search"]')
Expand All @@ -250,6 +256,8 @@ def search(cls, q, page=1):
cover,
)
)
except requests.exceptions.RequestException as e:
logger.warning(f"Search {search_url} error: {e}")
except Exception as e:
logger.error(f"Goodreads search '{q}' error: {e}")
return results
Expand All @@ -259,8 +267,8 @@ class ApplePodcast:
@classmethod
def search(cls, q, page=1):
results = []
search_url = f"https://itunes.apple.com/search?entity=podcast&limit={page*SEARCH_PAGE_SIZE}&term={quote_plus(q)}"
try:
search_url = f"https://itunes.apple.com/search?entity=podcast&limit={page*SEARCH_PAGE_SIZE}&term={quote_plus(q)}"
r = requests.get(search_url, timeout=2).json()
for p in r["results"][(page - 1) * SEARCH_PAGE_SIZE :]:
results.append(
Expand All @@ -274,6 +282,8 @@ def search(cls, q, page=1):
p["artworkUrl600"],
)
)
except requests.exceptions.RequestException as e:
logger.warning(f"Search {search_url} error: {e}")
except Exception as e:
logger.error(f"ApplePodcast search '{q}' error: {e}")
return results
Expand All @@ -282,15 +292,17 @@ def search(cls, q, page=1):
class Fediverse:
@staticmethod
async def search_task(host, q, category=None):
api_url = f"https://{host}/api/catalog/search?query={quote_plus(q)}{'&category='+category if category else ''}"
async with httpx.AsyncClient() as client:
results = []
try:
response = await client.get(
f"https://{host}/api/catalog/search?query={q}&category={category or ''}",
api_url,
timeout=2,
)
r = response.json()
except:
except Exception as e:
logger.warning(f"Search {api_url} error: {e}")
return []
if "data" in r:
for item in r["data"]:
Expand Down
5 changes: 4 additions & 1 deletion takahe/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,14 @@
from django.utils.http import http_date
from loguru import logger

from common.utils import user_identity_required

from .models import TakaheSession
from .utils import Takahe


@login_required
@user_identity_required
def auth_login(request):
"""Redirect to the login page if not yet, otherwise sync login info to takahe session"""
Takahe.sync_password(request.user)
Expand All @@ -24,7 +27,7 @@ def auth_login(request):
session["_auth_user_backend"] = "django.contrib.auth.backends.ModelBackend"
session_key: str = session._get_session_key() # type: ignore

# if SESSION_ENGINE = "django.contrib.sessions.backends.db"
# if SESSION_ENGINE = "django.contrib.sessions.backends.db" in Takahe
# sess = request.session._session
# sess["_auth_user_backend"] = "django.contrib.auth.backends.ModelBackend"
# logger.info(f"session: {sess}")
Expand Down