Skip to content

Commit

Permalink
api: refactor pagination
Browse files Browse the repository at this point in the history
* indexer: rename GetListAccounts -> AccountsList
* indexer: AccountsList, ProcessList and EntityList now return a TotalCount
* indexer: EntityList inverted order of args (from, max) to be consistent with others

* test: add TestAPIAccountsList and TestAPIElectionsList

* api: unify hardcoded structs into a new types:
  * AccountsList
  * ElectionsList
  * OrganizationsList
  * CountResult

* api: add `pagination` field to endpoints:
  * GET /elections
  * GET /accounts
  * GET /chain/organizations

* api: refactor filtered endpoints to unify pagination logic (and add `pagination` field):
  * GET /accounts/{organizationID}/elections/status/{status}/page/{page}
  * GET /accounts/{organizationID}/elections/page/{page}
  * GET /elections/page/{page}
  * POST /elections/filter/page/{page}
  * GET /chain/organizations/page/{page}
  * POST /chain/organizations/filter/page/{page}
  * GET /accounts/page/{page}
also, marked all of these endpoints as deprecated on swagger docs

* api: return ErrPageNotFound on paginated endpoints, when page is negative or higher than last_page

* api: deduplicate several code snippets, with marshalAndSend and parse* helpers

* api: fix strings in errors returned to client, replacing "ID" -> "Id"
* api: fix swagger docs, replace "electionID" -> "electionId"

* rename api.MaxPageSize -> api.ItemsPerPage

* fixed lots of swagger docs
  • Loading branch information
altergui committed Jul 15, 2024
1 parent f79d57b commit 9b0d842
Show file tree
Hide file tree
Showing 24 changed files with 1,003 additions and 630 deletions.
275 changes: 129 additions & 146 deletions api/accounts.go

Large diffs are not rendered by default.

17 changes: 15 additions & 2 deletions api/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,9 +47,22 @@ import (

// @securityDefinitions.basic BasicAuth

// MaxPageSize defines the maximum number of results returned by the paginated endpoints
const MaxPageSize = 10
// ItemsPerPage defines how many items per page are returned by the paginated endpoints
const ItemsPerPage = 10

// These consts define the keywords for query (?param=), url (/url/param/) and POST params.
// Note: In JS/TS acronyms like "ID" are camelCased as in "Id".
//
//nolint:revive
const (
ParamPage = "page"
ParamStatus = "status"
ParamOrganizationId = "organizationId"
ParamElectionId = "electionId"
ParamWithResults = "withResults"
)

// Errors used by EnableHandlers
var (
ErrMissingModulesForHandler = fmt.Errorf("missing modules attached for enabling handler")
ErrHandlerUnknown = fmt.Errorf("handler unknown")
Expand Down
47 changes: 39 additions & 8 deletions api/api_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,18 +12,32 @@ import (
"google.golang.org/protobuf/encoding/protojson"
)

type Organization struct {
OrganizationID types.HexBytes `json:"organizationID,omitempty" `
Elections []*ElectionSummary `json:"elections,omitempty"`
Organizations []*OrganizationList `json:"organizations,omitempty"`
Count *uint64 `json:"count,omitempty" example:"1"`
// CountResult wraps a count inside an object
type CountResult struct {
Count uint64 `json:"count" example:"10"`
}

type OrganizationList struct {
// Pagination contains all the values needed for the UI to easily organize the returned data
type Pagination struct {
TotalItems uint64 `json:"total_items"`
PreviousPage *uint64 `json:"previous_page"`
CurrentPage uint64 `json:"current_page"`
NextPage *uint64 `json:"next_page"`
LastPage uint64 `json:"last_page"`
}

type OrganizationSummary struct {
OrganizationID types.HexBytes `json:"organizationID" example:"0x370372b92514d81a0e3efb8eba9d036ae0877653"`
ElectionCount uint64 `json:"electionCount" example:"1"`
}

// OrganizationList wraps the organizations list to consistently return the list inside an object,
// and return an empty object if the list does not contains any result
type OrganizationsList struct {
Organizations []OrganizationSummary `json:"organizations"`
Pagination *Pagination `json:"pagination"`
}

type ElectionSummary struct {
ElectionID types.HexBytes `json:"electionId" `
OrganizationID types.HexBytes `json:"organizationId" `
Expand All @@ -37,6 +51,13 @@ type ElectionSummary struct {
ChainID string `json:"chainId"`
}

// ElectionsList wraps the elections list to consistently return the list inside an object,
// and return an empty object if the list does not contains any result
type ElectionsList struct {
Elections []ElectionSummary `json:"elections"`
Pagination *Pagination `json:"pagination"`
}

// ElectionResults is the struct used to wrap the results of an election
type ElectionResults struct {
// ABIEncoded is the abi encoded election results
Expand Down Expand Up @@ -101,11 +122,16 @@ type ElectionDescription struct {
}

type ElectionFilter struct {
OrganizationID types.HexBytes `json:"organizationId,omitempty" `
ElectionID types.HexBytes `json:"electionId,omitempty" `
Page int `json:"page,omitempty"`
OrganizationID types.HexBytes `json:"organizationId,omitempty"`
ElectionID types.HexBytes `json:"electionId,omitempty"`
WithResults *bool `json:"withResults,omitempty"`
Status string `json:"status,omitempty"`
}
type OrganizationFilter struct {
Page int `json:"page,omitempty"`
OrganizationID types.HexBytes `json:"organizationId,omitempty"`
}

type Key struct {
Index int `json:"index"`
Expand Down Expand Up @@ -228,6 +254,11 @@ type Account struct {
SIK types.HexBytes `json:"sik"`
}

type AccountsList struct {
Accounts []indexertypes.Account `json:"accounts"`
Pagination *Pagination `json:"pagination"`
}

type AccountSet struct {
TxPayload []byte `json:"txPayload,omitempty" swaggerignore:"true"`
Metadata []byte `json:"metadata,omitempty" swaggerignore:"true"`
Expand Down
10 changes: 7 additions & 3 deletions api/censuses.go
Original file line number Diff line number Diff line change
Expand Up @@ -631,8 +631,10 @@ func (a *API) censusDeleteHandler(msg *apirest.APIdata, ctx *httprouter.HTTPCont
// @Security BasicAuth
// @Success 200 {object} object{census=object{censusID=string,uri=string}} "It return published censusID and the ipfs uri where its uploaded"
// @Param censusID path string true "Census id"
// @Param root path string false "Specific root where to publish the census. Not required"
// @Router /censuses/{censusID}/publish [post]
// @Router /censuses/{censusID}/publish/async [post]
// @Router /censuses/{censusID}/publish/{root} [post]
func (a *API) censusPublishHandler(msg *apirest.APIdata, ctx *httprouter.HTTPContext) error {
token, err := uuid.Parse(msg.AuthToken)
if err != nil {
Expand Down Expand Up @@ -957,7 +959,7 @@ func (a *API) censusVerifyHandler(msg *apirest.APIdata, ctx *httprouter.HTTPCont
// @Accept json
// @Produce json
// @Success 200 {object} object{valid=bool}
// @Router /censuses/list/ [get]
// @Router /censuses/list [get]
func (a *API) censusListHandler(_ *apirest.APIdata, ctx *httprouter.HTTPContext) error {
list, err := a.censusdb.List()
if err != nil {
Expand All @@ -979,7 +981,8 @@ func (a *API) censusListHandler(_ *apirest.APIdata, ctx *httprouter.HTTPContext)
// @Produce json
// @Param ipfs path string true "Export to IPFS. Blank to return the JSON file"
// @Success 200 {object} object{valid=bool}
// @Router /censuses/export/{ipfs} [get]
// @Router /censuses/export/ipfs [get]
// @Router /censuses/export [get]
func (a *API) censusExportDBHandler(_ *apirest.APIdata, ctx *httprouter.HTTPContext) error {
isIPFSExport := strings.HasSuffix(ctx.Request.URL.Path, "ipfs")
buf := bytes.Buffer{}
Expand Down Expand Up @@ -1012,7 +1015,8 @@ func (a *API) censusExportDBHandler(_ *apirest.APIdata, ctx *httprouter.HTTPCont
// @Accept json
// @Produce json
// @Success 200 {object} object{valid=bool}
// @Router /censuses/import/{ipfscid} [post]
// @Router /censuses/import/{ipfscid} [get]
// @Router /censuses/import [post]
func (a *API) censusImportDBHandler(msg *apirest.APIdata, ctx *httprouter.HTTPContext) error {
ipfscid := ctx.URLParam("ipfscid")
if ipfscid == "" {
Expand Down
Loading

0 comments on commit 9b0d842

Please sign in to comment.