Skip to content

Commit

Permalink
simplify auth headers retrieval
Browse files Browse the repository at this point in the history
  • Loading branch information
v-rocheleau committed Aug 28, 2023
1 parent a08727c commit 2fa3ed1
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 12 deletions.
4 changes: 0 additions & 4 deletions bento_aggregation_service/search/dataset_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

from aiohttp import ClientSession
from bento_lib.search.queries import Query
from fastapi import Request

from bento_aggregation_service.config import Config
from bento_aggregation_service.search import query_utils
Expand Down Expand Up @@ -173,7 +172,6 @@ async def _run_search(
dataset_linked_fields_results: list[set],
config: Config,
http_session: ClientSession,
request: Request,
service_manager: ServiceManager,
headers: dict[str, str]
):
Expand Down Expand Up @@ -315,7 +313,6 @@ async def run_search_on_dataset(
config: Config,
http_session: ClientSession,
logger: logging.Logger,
request: Request,
service_manager: ServiceManager,
headers: dict[str, str]
) -> dict[str, list]:
Expand Down Expand Up @@ -412,7 +409,6 @@ async def run_search_on_dataset(
dataset_linked_fields_results,
config,
http_session,
request,
service_manager,
headers
)
Expand Down
16 changes: 8 additions & 8 deletions bento_aggregation_service/search/handlers/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@ async def search_worker(
config: Config,
http_session: ClientSession,
logger: logging.Logger,
request: Request,
service_manager: ServiceManager,
headers: dict[str, str],

# Flags
include_internal_results: bool = False,
Expand All @@ -61,9 +61,8 @@ async def _search_dataset(dataset: dict) -> tuple[str, dict[str, list] | None]:
config,
http_session,
logger,
request,
service_manager,
headers=service_request_headers(request)
headers
)
return dataset_id, dataset_results

Expand Down Expand Up @@ -114,9 +113,10 @@ async def all_datasets_search_handler(
# TODO: Why fetch projects instead of datasets? Is it to avoid "orphan" datasets? Is that even possible?

logger.debug("fetching projects from Katsu")
headers = service_request_headers(request=request)
res = await http_session.get(
urljoin(config.katsu_url, "api/projects"),
headers=service_request_headers(request),
headers=headers,
raise_for_status=True,
)

Expand All @@ -141,8 +141,8 @@ async def all_datasets_search_handler(
config,
http_session,
logger,
request,
service_manager,
headers,
)

logger.info("Done fetching individual service search results.")
Expand Down Expand Up @@ -189,9 +189,10 @@ async def dataset_search_handler(
try:

logger.debug(f"fetching dataset {dataset_id} from Katsu")
headers = service_request_headers(request)
res = await http_session.get(
urljoin(config.katsu_url, f"api/datasets/{dataset_id}"),
headers=service_request_headers(request),
headers=headers,
raise_for_status=True,
)

Expand All @@ -214,9 +215,8 @@ async def dataset_search_handler(
config=config,
http_session=http_session,
logger=logger,
request=request,
service_manager=service_manager,
headers=service_request_headers(request)
headers=headers,
)

return {**dataset, **dataset_results}
Expand Down

0 comments on commit 2fa3ed1

Please sign in to comment.