Skip to content

Commit

Permalink
Merge pull request #110 from uktrade/feature/resolve-celery
Browse files Browse the repository at this point in the history
feat:add CSV download functionality for search results
  • Loading branch information
hareshkainthdbt authored Dec 11, 2024
2 parents b442179 + b92a628 commit 7816614
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 62 deletions.
4 changes: 2 additions & 2 deletions app/search/utils/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def search_database(


def search(
context: dict, request: HttpRequest
context: dict, request: HttpRequest, ignore_pagination=False
) -> dict | QuerySet[DataResponseModel]:
logger.debug("received search request: %s", request)
start_time = time.time()
Expand Down Expand Up @@ -176,7 +176,7 @@ def search(
# Search across specific fields
results = search_database(config)

if config.limit == "*":
if ignore_pagination:
return results

# convert search_results into json
Expand Down
45 changes: 45 additions & 0 deletions app/search/views.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import csv
import logging

from django.conf import settings
Expand Down Expand Up @@ -78,3 +79,47 @@ def search_react(request: HttpRequest) -> HttpResponse:
}

return render(request, template_name="react-fbr.html", context=context)


def download_csv(request):
"""
Download CSV view.
Handles the GET request to download the search results in CSV format.
"""
context = {
"service_name": settings.SERVICE_NAME_SEARCH,
}

try:
response_data = search(context, request, ignore_pagination=True)

logger.info(f"response_data length: {len(response_data)}")

search_results = []
for result in response_data:
search_results.append(
{
"title": result.title,
"publisher": result.publisher,
"description": result.description,
"type": result.type,
"date_valid": result.date_valid,
}
)

response = HttpResponse(content_type="text/csv")
response["Content-Disposition"] = (
'attachment; filename="search_results.csv"'
)

writer = csv.DictWriter(response, fieldnames=search_results[0].keys())
writer.writeheader()
writer.writerows(search_results)
return response
except Exception as e:
logger.error("error downloading CSV: %s", e)
return HttpResponse(
content="error downloading CSVs",
status=500,
)
63 changes: 3 additions & 60 deletions fbr/urls.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,13 @@
"""Find business regulations URL configuration."""

import csv
import logging

import pandas as pd

from rest_framework import routers, status, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response

from django.conf import settings
from django.contrib import admin
from django.http import HttpResponse
from django.urls import include, path

import app.core.views as core_views
Expand Down Expand Up @@ -78,71 +74,18 @@ def publishers(self, request, *args, **kwargs):
)


class DownloadDataResponseViewSet(viewsets.ModelViewSet):
@action(detail=False, methods=["get"], url_path="download_csv")
def download_csv(self, request, *args, **kwargs):
context = {
"service_name": settings.SERVICE_NAME_SEARCH,
}

urls_logger.debug(f"download_csv - request: {request}")

try:
# set the limit to '*' to get all results
request.GET = request.GET.copy()
request.GET["limit"] = "*"

response_data = search(context, request)
urls_logger.debug(f"response_data: {response_data}")

search_results = []
for result in response_data:
urls_logger.debug(f"result: {result}")

search_results.append(
{
"title": result["title"],
"publisher": result["publisher"],
"description": result["description"],
"type": result["type"],
"date": result["date_valid"],
}
)

search_results_df = pd.DataFrame(search_results)

response = HttpResponse(content_type="text/csv")
response["Content-Disposition"] = (
'attachment; filename="search_results.csv"'
)

writer = csv.writer(response)

# Write the DataFrame to the response
writer.writerow(search_results_df.columns) # Write the header
for _, row in search_results_df.iterrows():
writer.writerow(row)

return Response(response, status=status.HTTP_200_OK)
except Exception as e:
return Response(
data={"message": f"error building csv download response: {e}"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)


# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()

router.register(r"v1", DataResponseViewSet, basename="search")
router.register(
r"v1/download_csv", DownloadDataResponseViewSet, basename="download_csv"
)
router.register(r"v1/retrieve", PublishersViewSet, basename="publishers")


urlpatterns = [
path("api/", include(router.urls)),
path("", search_views.search_react, name="search_react"),
path("nojs/", search_views.search_django, name="search_django"),
path("download_csv/", search_views.download_csv, name="csvdata"),
# If we choose to have a start page with green button, this is it:
# path("", core_views.home, name="home"),
path("document/<str:id>", search_views.document, name="document"),
Expand Down

0 comments on commit 7816614

Please sign in to comment.