diff --git a/v2/CHANGELOG.md b/v2/CHANGELOG.md index 45fc33a4..27cdb674 100644 --- a/v2/CHANGELOG.md +++ b/v2/CHANGELOG.md @@ -3,6 +3,7 @@ ## [master](https://github.com/arangodb/go-driver/tree/master) (N/A) - Add tasks endpoints to v2 - Add missing endpoints from collections to v2 +- Add missing endpoints from query to v2 ## [2.1.3](https://github.com/arangodb/go-driver/tree/v2.1.3) (2025-02-21) - Switch to Go 1.22.11 diff --git a/v2/arangodb/database_query.go b/v2/arangodb/database_query.go index 56c639af..6b7b8847 100644 --- a/v2/arangodb/database_query.go +++ b/v2/arangodb/database_query.go @@ -43,6 +43,81 @@ type DatabaseQuery interface { // ExplainQuery explains an AQL query and return information about it. ExplainQuery(ctx context.Context, query string, bindVars map[string]interface{}, opts *ExplainQueryOptions) (ExplainQueryResult, error) + + // GetQueryProperties returns the properties of the query system. + GetQueryProperties(ctx context.Context) (QueryProperties, error) + + // UpdateQueryProperties updates the properties of the query system. + // The properties are updated with the provided options. + // The updated properties are returned. + UpdateQueryProperties(ctx context.Context, options QueryProperties) (QueryProperties, error) + + // ListOfRunningAQLQueries returns a list of currently running AQL queries. + // If the all parameter is set to true, it returns all queries, otherwise only the queries that are currently running. + // The result is a list of RunningAQLQuery objects. + ListOfRunningAQLQueries(ctx context.Context, all *bool) ([]RunningAQLQuery, error) + + // ListOfSlowAQLQueries returns a list of slow AQL queries. + // If the all parameter is set to true, it returns all slow queries, otherwise only the queries that are currently running. + // The result is a list of RunningAQLQuery objects. + // Slow queries are defined as queries that have been running longer than the configured slow query threshold. + // The slow query threshold can be configured in the query properties. + // The result is a list of RunningAQLQuery objects. + ListOfSlowAQLQueries(ctx context.Context, all *bool) ([]RunningAQLQuery, error) + + // ClearSlowAQLQueries clears the list of slow AQL queries. + // If the all parameter is set to true, it clears all slow queries, otherwise only + // the queries that are currently running. + ClearSlowAQLQueries(ctx context.Context, all *bool) error + + // KillAQLQuery kills a running AQL query. + // The queryId is the unique identifier of the query + KillAQLQuery(ctx context.Context, queryId string, all *bool) error + + // GetAllOptimizerRules returns all optimizer rules available in the database. + // The result is a list of OptimizerRule objects. + GetAllOptimizerRules(ctx context.Context) ([]OptimizerRules, error) + + // GetQueryPlanCache returns a list of cached query plans. + // The result is a list of QueryPlanCacheRespObject objects. + GetQueryPlanCache(ctx context.Context) ([]QueryPlanCacheRespObject, error) + + // ClearQueryPlanCache clears the query plan cache. + ClearQueryPlanCache(ctx context.Context) error + + // GetQueryEntriesCache returns a list of cached query entries. + // The result is a list of QueryCacheEntriesRespObject objects. + GetQueryEntriesCache(ctx context.Context) ([]QueryCacheEntriesRespObject, error) + + // ClearQueryCache clears the query cache. + // This will remove all cached query entries. + ClearQueryCache(ctx context.Context) error + + // GetQueryCacheProperties returns the properties of the query cache. + // The result is a QueryCacheProperties object. + GetQueryCacheProperties(ctx context.Context) (QueryCacheProperties, error) + + // SetQueryCacheProperties sets the properties of the query cache. + // The properties are updated with the provided options. + SetQueryCacheProperties(ctx context.Context, options QueryCacheProperties) (QueryCacheProperties, error) + + // CreateUserDefinedFunction creates a user-defined function in the database. + // The function is created with the provided options. + // The function is created in the system collection `_aqlfunctions`. + // The function is created with the provided code and name. + // If the function already exists, it will be updated with the new code. + CreateUserDefinedFunction(ctx context.Context, options UserDefinedFunctionObject) (bool, error) + + // DeleteUserDefinedFunction removes a user-defined AQL function from the current database. + // If group is true, all functions with the given name as a namespace prefix will be deleted. + // If group is false, only the function with the fully qualified name will be removed. + // It returns the number of functions deleted. + DeleteUserDefinedFunction(ctx context.Context, name *string, group *bool) (*int, error) + + // GetUserDefinedFunctions retrieves all user-defined AQL functions registered in the current database. + // It returns a list of UserDefinedFunctionObject, each containing the function's name, code, and isDeterministic. + // The returned list may be empty array if no user-defined functions are registered. + GetUserDefinedFunctions(ctx context.Context) ([]UserDefinedFunctionObject, error) } type QuerySubOptions struct { @@ -330,3 +405,129 @@ type ExplainQueryResult struct { // This attribute is not present when allPlans is set to true. Cacheable *bool `json:"cacheable,omitempty"` } + +type QueryProperties struct { + Enabled *bool `json:"enabled"` + TrackSlowQueries *bool `json:"trackSlowQueries"` + TrackBindVars *bool `json:"trackBindVars"` + MaxSlowQueries *int `json:"maxSlowQueries"` + SlowQueryThreshold *float64 `json:"slowQueryThreshold"` + MaxQueryStringLength *int `json:"maxQueryStringLength"` +} + +type RunningAQLQuery struct { + // The unique identifier of the query. + Id *string `json:"id,omitempty"` + // The database in which the query is running. + Database *string `json:"database,omitempty"` + // The user who executed the query. + // This is the user who executed the query, not the user who is currently running the + User *string `json:"user,omitempty"` + // The query string. + // This is the AQL query string that was executed. + Query *string `json:"query,omitempty"` + // The bind variables used in the query. + BindVars *map[string]interface{} `json:"bindVars,omitempty"` + // The time when the query started executing. + // This is the time when the query started executing on the server. + Started *string `json:"started,omitempty"` + // The time when the query was last updated. + // This is the time when the query was last updated on the server. + RunTime *float64 `json:"runTime,omitempty"` + // The PeakMemoryUsage is the peak memory usage of the query in bytes. + PeakMemoryUsage *uint64 `json:"peakMemoryUsage,omitempty"` + // The State of the query. + // This is the current state of the query, e.g. "running", "finished", "executing", etc. + State *string `json:"state,omitempty"` + // The stream option indicates whether the query is executed in streaming mode. + Stream *bool `json:"stream,omitempty"` +} + +type Flags struct { + // CanBeDisabled indicates whether the query can be disabled. + CanBeDisabled *bool `json:"canBeDisabled,omitempty"` + // CanBeExecuted indicates whether the query can be executed. + CanCreateAdditionalPlans *bool `json:"canCreateAdditionalPlans,omitempty"` + //ClusterOnly indicates whether the query is only available in a cluster environment. + ClusterOnly *bool `json:"clusterOnly,omitempty"` + // DisabledByDefault indicates whether the query is disabled by default. + // This means that the query is not executed unless explicitly enabled. + DisabledByDefault *bool `json:"disabledByDefault,omitempty"` + // EnterpriseOnly indicates whether the query is only available in the Enterprise Edition. + EnterpriseOnly *bool `json:"enterpriseOnly,omitempty"` + // Hidden indicates whether the query is hidden from the user. + Hidden *bool `json:"hidden,omitempty"` +} + +type OptimizerRules struct { + // Name of the optimizer rule. + Name string `json:"name,omitempty"` + Flags `json:"flags,omitempty"` +} + +type CacheRespObject struct { + // BindVars are the bind variables used in the query. + BindVars map[string]interface{} `json:"bindVars,omitempty"` + // DataSources is a list of data sources used in the query. + DataSources *[]string `json:"dataSources,omitempty"` + // Hash is the plan cache key. + Hash *string `json:"hash,omitempty"` + // Hits is the number of times the cached plan has been utilized so far. + Hits *uint32 `json:"hits,omitempty"` + // Query is the AQL query string. + Query *string `json:"query,omitempty"` +} + +type QueryPlanCacheRespObject struct { + CacheRespObject `json:",inline"` + // QueryHash is the hash of the AQL query string. + QueryHash *uint32 `json:"queryHash,omitempty"` + // FullCount indicates whether the query result contains the full count of documents. + FullCount *bool `json:"fullCount,omitempty"` + // Created is the time when the query plan has been added to the cache. + Created *string `json:"created,omitempty"` + // MemoryUsage is the memory usage of the cached plan in bytes. + // This is the amount of memory used by the cached plan on the server. + MemoryUsage *uint64 `json:"memoryUsage,omitempty"` +} + +type QueryCacheEntriesRespObject struct { + CacheRespObject `json:",inline"` + // Result is the number of documents in the query result. + Results *uint32 `json:"results,omitempty"` + // RunTime is the time it took to execute the query in seconds. + RunTime string `json:"runTime,omitempty"` + // Size is the size of the query result in bytes. + Size *uint64 `json:"size,omitempty"` + // Started is the time when the query has been started. + // Date and time at which the query result has been added to the cache. + Started *string `json:"started,omitempty"` +} + +type QueryCacheProperties struct { + // IncludeSystem indicates whether the query cache includes system collections. + IncludeSystem *bool `json:"includeSystem,omitempty"` + // MaxEntrySize is the maximum size of a single query cache entry in bytes. + MaxEntrySize *uint64 `json:"maxEntrySize,omitempty"` + // MaxResults is the maximum number of results that can be stored in the query cache. + MaxResults *uint16 `json:"maxResults,omitempty"` + // MaxResultsSize is the maximum size of the query cache in bytes. + MaxResultsSize *uint64 `json:"maxResultsSize,omitempty"` + // Mode is the query cache mode. + // The mode can be one of the following values: + // "on" - the query cache is enabled and will be used for all queries. + // "off" - the query cache is disabled and will not be used for any queries. + // "demand" - the query cache is enabled, but will only be used for queries that explicitly request it. + Mode *string `json:"mode,omitempty"` +} + +type UserDefinedFunctionObject struct { + // Code is the JavaScript function body as a string. + Code *string `json:"code"` + + // Name is the fully qualified name of the user-defined function, including namespace. + Name *string `json:"name"` + + // IsDeterministic indicates whether the function always produces the same output for identical input. + IsDeterministic *bool `json:"isDeterministic"` +} diff --git a/v2/arangodb/database_query_impl.go b/v2/arangodb/database_query_impl.go index 0386c1bd..4fb6359b 100644 --- a/v2/arangodb/database_query_impl.go +++ b/v2/arangodb/database_query_impl.go @@ -23,9 +23,12 @@ package arangodb import ( "context" "encoding/json" + "fmt" "net/http" + "path" "github.com/arangodb/go-driver/v2/arangodb/shared" + "github.com/arangodb/go-driver/v2/utils" "github.com/arangodb/go-driver/v2/connection" ) @@ -134,3 +137,397 @@ func (d databaseQuery) ExplainQuery(ctx context.Context, query string, bindVars return ExplainQueryResult{}, response.AsArangoErrorWithCode(code) } } + +func (d databaseQuery) GetQueryProperties(ctx context.Context) (QueryProperties, error) { + url := d.db.url("_api", "query", "properties") + + var response struct { + shared.ResponseStruct `json:",inline"` + QueryProperties `json:",inline"` + } + resp, err := connection.CallGet(ctx, d.db.connection(), url, &response, d.db.modifiers...) + if err != nil { + return QueryProperties{}, err + } + + switch code := resp.Code(); code { + case http.StatusOK: + return response.QueryProperties, nil + default: + return QueryProperties{}, response.AsArangoErrorWithCode(code) + } +} + +// Validation for all fields is required by the ArangoDB API spec for PUT /_api/query/properties. +// Partial updates are not supported; all fields must be included in the request. +func validateQueryPropertiesFields(options QueryProperties) error { + if options.Enabled == nil { + return RequiredFieldError("enabled") + } + if options.TrackSlowQueries == nil { + return RequiredFieldError("trackSlowQueries") + } + if options.TrackBindVars == nil { + return RequiredFieldError("trackBindVars") + } + if options.MaxSlowQueries == nil { + return RequiredFieldError("maxSlowQueries") + } + if options.SlowQueryThreshold == nil { + return RequiredFieldError("slowQueryThreshold") + } + if options.MaxQueryStringLength == nil { + return RequiredFieldError("maxQueryStringLength") + } + return nil +} + +func (d databaseQuery) UpdateQueryProperties(ctx context.Context, options QueryProperties) (QueryProperties, error) { + url := d.db.url("_api", "query", "properties") + + // Validate all fields are set + if err := validateQueryPropertiesFields(options); err != nil { + return QueryProperties{}, err + } + + var response struct { + shared.ResponseStruct `json:",inline"` + QueryProperties `json:",inline"` + } + resp, err := connection.CallPut(ctx, d.db.connection(), url, &response, options, d.db.modifiers...) + if err != nil { + return QueryProperties{}, err + } + + switch code := resp.Code(); code { + case http.StatusOK: + return response.QueryProperties, nil + default: + return QueryProperties{}, response.AsArangoErrorWithCode(code) + } +} + +func (d databaseQuery) listAQLQueries(ctx context.Context, endpoint string, all *bool) ([]RunningAQLQuery, error) { + url := d.db.url("_api", "query", endpoint) + if all != nil && *all { + url += "?all=true" + } + + // Use json.RawMessage to capture raw response for debugging + var rawResult json.RawMessage + resp, err := connection.CallGet(ctx, d.db.connection(), url, &rawResult, d.db.modifiers...) + if err != nil { + return nil, err + } + + switch code := resp.Code(); code { + case http.StatusOK: + // Try to unmarshal as array first + var result []RunningAQLQuery + if err := json.Unmarshal(rawResult, &result); err == nil { + return result, nil + } + + // If array unmarshaling fails, try as object with result field + var objResult struct { + Result []RunningAQLQuery `json:"result"` + Error bool `json:"error"` + Code int `json:"code"` + } + + if err := json.Unmarshal(rawResult, &objResult); err == nil { + if objResult.Error { + return nil, fmt.Errorf("ArangoDB API error: code %d", objResult.Code) + } + return objResult.Result, nil + } + + // If both fail, return the unmarshal error + return nil, fmt.Errorf("cannot unmarshal response into []RunningAQLQuery or object with result field: %s", string(rawResult)) + case http.StatusForbidden: + // Add custom 403 error message here + return nil, fmt.Errorf("403 Forbidden: likely insufficient permissions to access /_api/query/%s. Make sure the user has admin rights", endpoint) + default: + return nil, (&shared.ResponseStruct{}).AsArangoErrorWithCode(code) + } +} + +func (d databaseQuery) ListOfRunningAQLQueries(ctx context.Context, all *bool) ([]RunningAQLQuery, error) { + return d.listAQLQueries(ctx, "current", all) +} + +func (d databaseQuery) ListOfSlowAQLQueries(ctx context.Context, all *bool) ([]RunningAQLQuery, error) { + return d.listAQLQueries(ctx, "slow", all) +} + +func (d databaseQuery) deleteQueryEndpoint(ctx context.Context, path string, all *bool) error { + url := d.db.url(path) + + if all != nil && *all { + url += "?all=true" + } + + var response struct { + shared.ResponseStruct `json:",inline"` + } + + resp, err := connection.CallDelete(ctx, d.db.connection(), url, &response, d.db.modifiers...) + if err != nil { + return err + } + + switch code := resp.Code(); code { + case http.StatusOK: + return nil + default: + return response.AsArangoErrorWithCode(code) + } +} + +func (d databaseQuery) ClearSlowAQLQueries(ctx context.Context, all *bool) error { + return d.deleteQueryEndpoint(ctx, "_api/query/slow", all) +} + +func (d databaseQuery) KillAQLQuery(ctx context.Context, queryId string, all *bool) error { + return d.deleteQueryEndpoint(ctx, path.Join("_api/query", queryId), all) +} + +func (d databaseQuery) GetAllOptimizerRules(ctx context.Context) ([]OptimizerRules, error) { + url := d.db.url("_api", "query", "rules") + + var response []OptimizerRules + + resp, err := connection.CallGet(ctx, d.db.connection(), url, &response, d.db.modifiers...) + if err != nil { + return nil, err + } + + switch code := resp.Code(); code { + case http.StatusOK: + return response, nil + default: + return nil, fmt.Errorf("API returned status %d", code) + } +} + +func (d databaseQuery) GetQueryPlanCache(ctx context.Context) ([]QueryPlanCacheRespObject, error) { + url := d.db.url("_api", "query-plan-cache") + + var response []QueryPlanCacheRespObject + + resp, err := connection.CallGet(ctx, d.db.connection(), url, &response, d.db.modifiers...) + if err != nil { + return nil, err + } + + switch code := resp.Code(); code { + case http.StatusOK: + return response, nil + default: + return nil, fmt.Errorf("API returned status %d", code) + } +} + +func (d databaseQuery) ClearQueryPlanCache(ctx context.Context) error { + url := d.db.url("_api", "query-plan-cache") + + var response struct { + shared.ResponseStruct `json:",inline"` + } + + resp, err := connection.CallDelete(ctx, d.db.connection(), url, &response, d.db.modifiers...) + if err != nil { + return err + } + + switch code := resp.Code(); code { + case http.StatusOK: + return nil + default: + return response.AsArangoErrorWithCode(code) + } +} + +func (d databaseQuery) GetQueryEntriesCache(ctx context.Context) ([]QueryCacheEntriesRespObject, error) { + url := d.db.url("_api", "query-cache", "entries") + + var response []QueryCacheEntriesRespObject + + resp, err := connection.CallGet(ctx, d.db.connection(), url, &response, d.db.modifiers...) + if err != nil { + return nil, err + } + + switch code := resp.Code(); code { + case http.StatusOK: + return response, nil + default: + return nil, fmt.Errorf("API returned status %d", code) + } +} + +func (d databaseQuery) ClearQueryCache(ctx context.Context) error { + url := d.db.url("_api", "query-cache") + + var response struct { + shared.ResponseStruct `json:",inline"` + } + + resp, err := connection.CallDelete(ctx, d.db.connection(), url, &response, d.db.modifiers...) + if err != nil { + return err + } + + switch code := resp.Code(); code { + case http.StatusOK: + return nil + default: + return response.AsArangoErrorWithCode(code) + } +} + +func (d databaseQuery) GetQueryCacheProperties(ctx context.Context) (QueryCacheProperties, error) { + url := d.db.url("_api", "query-cache", "properties") + + var response struct { + shared.ResponseStruct `json:",inline"` + QueryCacheProperties `json:",inline"` + } + + resp, err := connection.CallGet(ctx, d.db.connection(), url, &response, d.db.modifiers...) + if err != nil { + return QueryCacheProperties{}, err + } + + switch code := resp.Code(); code { + case http.StatusOK: + return response.QueryCacheProperties, nil + default: + return QueryCacheProperties{}, response.AsArangoErrorWithCode(code) + } +} + +func validateQueryCachePropertiesFields(options QueryCacheProperties) error { + if options.Mode != nil { + validModes := map[string]bool{"on": true, "off": true, "demand": true} + if !validModes[*options.Mode] { + return fmt.Errorf("invalid mode: %s. Valid values are 'on', 'off', or 'demand'", *options.Mode) + } + } + return nil +} + +func (d databaseQuery) SetQueryCacheProperties(ctx context.Context, options QueryCacheProperties) (QueryCacheProperties, error) { + url := d.db.url("_api", "query-cache", "properties") + // Validate all fields are set + if err := validateQueryCachePropertiesFields(options); err != nil { + return QueryCacheProperties{}, err + } + var response struct { + shared.ResponseStruct `json:",inline"` + QueryCacheProperties `json:",inline"` + } + + resp, err := connection.CallPut(ctx, d.db.connection(), url, &response, options, d.db.modifiers...) + if err != nil { + return QueryCacheProperties{}, err + } + + switch code := resp.Code(); code { + case http.StatusOK: + return response.QueryCacheProperties, nil + default: + return QueryCacheProperties{}, response.AsArangoErrorWithCode(code) + } +} + +func validateUserDefinedFunctionFields(options UserDefinedFunctionObject) error { + if options.Code == nil { + return RequiredFieldError("code") + } + if options.IsDeterministic == nil { + return RequiredFieldError("isDeterministic") + } + if options.Name == nil { + return RequiredFieldError("name") + } + return nil + +} + +func (d databaseQuery) CreateUserDefinedFunction(ctx context.Context, options UserDefinedFunctionObject) (bool, error) { + url := d.db.url("_api", "aqlfunction") + // Validate all fields are set + if err := validateUserDefinedFunctionFields(options); err != nil { + return false, err + } + var response struct { + shared.ResponseStruct `json:",inline"` + IsNewlyCreated bool `json:"isNewlyCreated,omitempty"` + } + + resp, err := connection.CallPost(ctx, d.db.connection(), url, &response, options, d.db.modifiers...) + if err != nil { + return false, err + } + + switch code := resp.Code(); code { + case http.StatusOK, http.StatusCreated: + return response.IsNewlyCreated, nil + default: + return false, response.AsArangoErrorWithCode(code) + } +} + +func (d databaseQuery) DeleteUserDefinedFunction(ctx context.Context, name *string, group *bool) (*int, error) { + // Validate 'name' is required + if name == nil || *name == "" { + return nil, RequiredFieldError("name") // You must return the error + } + + // Construct URL with name + url := d.db.url("_api", "aqlfunction", *name) + + // Append optional group query parameter + if group != nil { + url = fmt.Sprintf("%s?group=%t", url, *group) + } + + var response struct { + shared.ResponseStruct `json:",inline"` + DeletedCount *int `json:"deletedCount,omitempty"` + } + + resp, err := connection.CallDelete(ctx, d.db.connection(), url, &response, d.db.modifiers...) + if err != nil { + return utils.NewType(0), err + } + + switch code := resp.Code(); code { + case http.StatusOK, http.StatusCreated: + return response.DeletedCount, nil + default: + return utils.NewType(0), response.AsArangoErrorWithCode(code) + } +} + +func (d databaseQuery) GetUserDefinedFunctions(ctx context.Context) ([]UserDefinedFunctionObject, error) { + url := d.db.url("_api", "aqlfunction") + + var response struct { + shared.ResponseStruct `json:",inline"` + Result []UserDefinedFunctionObject `json:"result"` + } + + resp, err := connection.CallGet(ctx, d.db.connection(), url, &response, d.db.modifiers...) + if err != nil { + return nil, err + } + + switch code := resp.Code(); code { + case http.StatusOK: + return response.Result, nil + default: + return nil, response.AsArangoErrorWithCode(code) + } +} diff --git a/v2/arangodb/utils.go b/v2/arangodb/utils.go index a3a72a5d..0a599eec 100644 --- a/v2/arangodb/utils.go +++ b/v2/arangodb/utils.go @@ -24,6 +24,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" "reflect" @@ -108,3 +109,7 @@ func CreateDocuments(ctx context.Context, col Collection, docCount int, generato _, err := col.CreateDocuments(ctx, docs) return err } + +func RequiredFieldError(field string) error { + return fmt.Errorf("%s field must be set", field) +} diff --git a/v2/tests/database_query_test.go b/v2/tests/database_query_test.go index c5e093ab..0423f2d4 100644 --- a/v2/tests/database_query_test.go +++ b/v2/tests/database_query_test.go @@ -23,7 +23,9 @@ package tests import ( "context" "fmt" + "strings" "testing" + "time" "github.com/stretchr/testify/require" @@ -229,3 +231,1079 @@ func Test_QueryBatchWithRetries(t *testing.T) { }) }) } + +func Test_GetQueryProperties(t *testing.T) { + Wrap(t, func(t *testing.T, client arangodb.Client) { + WithDatabase(t, client, nil, func(db arangodb.Database) { + res, err := db.GetQueryProperties(context.Background()) + require.NoError(t, err) + jsonResp, err := utils.ToJSONString(res) + require.NoError(t, err) + t.Logf("Query Properties: %s", jsonResp) + // Check that the response contains expected fields + require.NotNil(t, res) + require.IsType(t, true, *res.Enabled) + require.IsType(t, true, *res.TrackSlowQueries) + require.IsType(t, true, *res.TrackBindVars) + require.GreaterOrEqual(t, *res.MaxSlowQueries, 0) + require.Greater(t, *res.SlowQueryThreshold, 0.0) + require.Greater(t, *res.MaxQueryStringLength, 0) + }) + }) +} + +func Test_UpdateQueryProperties(t *testing.T) { + Wrap(t, func(t *testing.T, client arangodb.Client) { + WithDatabase(t, client, nil, func(db arangodb.Database) { + res, err := db.GetQueryProperties(context.Background()) + require.NoError(t, err) + require.NotNil(t, res) + options := arangodb.QueryProperties{ + Enabled: utils.NewType(true), + TrackSlowQueries: utils.NewType(true), + TrackBindVars: utils.NewType(false), // optional but useful for debugging + MaxSlowQueries: utils.NewType(*res.MaxSlowQueries + 1), + SlowQueryThreshold: utils.NewType(*res.SlowQueryThreshold + 0.1), + MaxQueryStringLength: utils.NewType(*res.MaxQueryStringLength + 100), + } + updateResp, err := db.UpdateQueryProperties(context.Background(), options) + require.NoError(t, err) + require.NotNil(t, updateResp) + require.Equal(t, *options.Enabled, *updateResp.Enabled) + require.Equal(t, *options.TrackSlowQueries, *updateResp.TrackSlowQueries) + require.Equal(t, *options.TrackBindVars, *updateResp.TrackBindVars) + require.Equal(t, *options.MaxSlowQueries, *updateResp.MaxSlowQueries) + require.Equal(t, *options.SlowQueryThreshold, *updateResp.SlowQueryThreshold) + require.Equal(t, *options.MaxQueryStringLength, *updateResp.MaxQueryStringLength) + res, err = db.GetQueryProperties(context.Background()) + require.NoError(t, err) + require.NotNil(t, res) + }) + }) +} + +func Test_ListOfRunningAQLQueries(t *testing.T) { + Wrap(t, func(t *testing.T, client arangodb.Client) { + ctx := context.Background() + db, err := client.GetDatabase(ctx, "_system", nil) + require.NoError(t, err) + + // Enable query tracking AND plan caching + _, err = db.UpdateQueryProperties(ctx, arangodb.QueryProperties{ + Enabled: utils.NewType(true), + TrackBindVars: utils.NewType(true), + TrackSlowQueries: utils.NewType(true), + SlowQueryThreshold: utils.NewType(0.0001), + MaxSlowQueries: utils.NewType(54), + MaxQueryStringLength: utils.NewType(4094), + }) + require.NoError(t, err) + + // Test that the endpoint works (should return empty list or some queries) + queries, err := db.ListOfRunningAQLQueries(ctx, utils.NewType(false)) + require.NoError(t, err) + require.NotNil(t, queries) + t.Logf("Current running queries (all=false): %d\n", len(queries)) + t.Run("Test that queries are not empty", func(t *testing.T) { + + // Create a context we can cancel + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Start a transaction with a long-running query + queryStarted := make(chan struct{}) + go func() { + defer close(queryStarted) + + // Use a streaming query that processes results slowly + bindVars := map[string]interface{}{ + "max": 10000000, + } + + cursor, err := db.Query(ctx, ` + FOR i IN 1..@max + LET computation = ( + FOR x IN 1..100 + RETURN x * i + ) + RETURN {i: i, sum: SUM(computation)} +`, &arangodb.QueryOptions{ + BindVars: bindVars, + }) + + if err != nil { + if !strings.Contains(err.Error(), "canceled") { + t.Logf("Query error: %v", err) + } + return + } + + // Process results slowly to keep query active longer + if cursor != nil { + for cursor.HasMore() { + var result interface{} + _, err := cursor.ReadDocument(ctx, &result) + if err != nil { + break + } + // Add small delay to keep query running longer + time.Sleep(10 * time.Millisecond) + } + cursor.Close() + } + }() + + // Wait for query to start and be registered + time.Sleep(2 * time.Second) + + // Check for running queries multiple times + var foundRunningQuery bool + for attempt := 0; attempt < 15; attempt++ { + queries, err := db.ListOfRunningAQLQueries(ctx, utils.NewType(false)) + require.NoError(t, err) + + t.Logf("Attempt %d: Found %d queries", attempt+1, len(queries)) + + if len(queries) > 0 { + foundRunningQuery = true + t.Logf("SUCCESS: Found %d running queries on attempt %d\n", len(queries), attempt+1) + // Log query details + for i, query := range queries { + bindVarsJSON, _ := utils.ToJSONString(*query.BindVars) + t.Logf("Query %d: ID=%s, State=%s, BindVars=%s", + i, *query.Id, *query.State, bindVarsJSON) + } + break + } + + time.Sleep(300 * time.Millisecond) + } + + // Cancel the query + cancel() + + // Assert we found running queries + require.True(t, foundRunningQuery, "Should have found at least one running query") + }) + }) +} + +func Test_ListOfSlowAQLQueries(t *testing.T) { + Wrap(t, func(t *testing.T, client arangodb.Client) { + ctx := context.Background() + // Get the database + db, err := client.GetDatabase(ctx, "_system", nil) + require.NoError(t, err) + + // Get the query properties + res, err := db.GetQueryProperties(ctx) + require.NoError(t, err) + + jsonResp, err := utils.ToJSONString(res) + require.NoError(t, err) + t.Logf("Query Properties: %s", jsonResp) + // Check that the response contains expected fields + require.NotNil(t, res) + // Update query properties to ensure slow queries are tracked + t.Logf("Updating query properties to track slow queries") + // Set a low threshold to ensure we capture slow queries + // and limit the number of slow queries to 1 for testing + options := arangodb.QueryProperties{ + Enabled: utils.NewType(true), + TrackSlowQueries: utils.NewType(true), + TrackBindVars: utils.NewType(true), // optional but useful for debugging + MaxSlowQueries: utils.NewType(1), + SlowQueryThreshold: utils.NewType(0.0001), + MaxQueryStringLength: utils.NewType(4096), + } + // Update the query properties + _, err = db.UpdateQueryProperties(ctx, options) + require.NoError(t, err) + + t.Run("Test that queries are not empty", func(t *testing.T) { + + _, err := db.Query(ctx, "FOR i IN 1..1000000 COLLECT WITH COUNT INTO length RETURN length", nil) + require.NoError(t, err) + + // Wait for query to start and be registered + time.Sleep(2 * time.Second) + + // Check for running queries multiple times + var foundRunningQuery bool + for attempt := 0; attempt < 15; attempt++ { + queries, err := db.ListOfSlowAQLQueries(ctx, utils.NewType(false)) + require.NoError(t, err) + + t.Logf("Attempt %d: Found %d queries", attempt+1, len(queries)) + + if len(queries) > 0 { + foundRunningQuery = true + t.Logf("SUCCESS: Found %d running queries on attempt %d\n", len(queries), attempt+1) + // Log query details + for i, query := range queries { + t.Logf("Query %d: ID=%s, State=%s", i, *query.Id, *query.State) + } + break + } + + time.Sleep(300 * time.Millisecond) + } + + // Assert we found running queries + require.True(t, foundRunningQuery, "Should have found at least one running query") + }) + }) +} + +func Test_KillAQLQuery(t *testing.T) { + Wrap(t, func(t *testing.T, client arangodb.Client) { + ctx := context.Background() + // Get the database + db, err := client.GetDatabase(ctx, "_system", nil) + require.NoError(t, err) + + options := arangodb.QueryProperties{ + Enabled: utils.NewType(true), + TrackSlowQueries: utils.NewType(true), + TrackBindVars: utils.NewType(true), // optional but useful for debugging + MaxSlowQueries: utils.NewType(1), + SlowQueryThreshold: utils.NewType(0.0001), + MaxQueryStringLength: utils.NewType(3096), + } + // Update the query properties + _, err = db.UpdateQueryProperties(ctx, options) + require.NoError(t, err) + // Channel to signal when query has started + // Create a context we can cancel + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Start a transaction with a long-running query + queryStarted := make(chan struct{}) + go func() { + defer close(queryStarted) + + // Use a streaming query that processes results slowly + bindVars := map[string]interface{}{ + "max": 10000000, + } + + cursor, err := db.Query(ctx, ` + FOR i IN 1..@max + LET computation = ( + FOR x IN 1..100 + RETURN x * i + ) + RETURN {i: i, sum: SUM(computation)} +`, &arangodb.QueryOptions{ + BindVars: bindVars, + }) + + if err != nil { + if !strings.Contains(err.Error(), "canceled") { + t.Logf("Query error: %v", err) + } + return + } + + // Process results slowly to keep query active longer + if cursor != nil { + for cursor.HasMore() { + var result interface{} + _, err := cursor.ReadDocument(ctx, &result) + if err != nil { + break + } + // Add small delay to keep query running longer + time.Sleep(10 * time.Millisecond) + } + cursor.Close() + } + }() + + // Wait for query to start and be registered + time.Sleep(2 * time.Second) + + // Check for running queries multiple times + var foundRunningQuery bool + for attempt := 0; attempt < 15; attempt++ { + queries, err := db.ListOfRunningAQLQueries(context.Background(), utils.NewType(false)) + + // Enhanced error logging to help debug the issue + if err != nil { + t.Logf("Attempt %d: Error getting queries: %v", attempt+1, err) + + // Log additional context about the error + if strings.Contains(err.Error(), "cannot unmarshal") { + t.Logf("This suggests a response format mismatch between local and CI environments") + t.Logf("Consider checking ArangoDB version differences or server configuration") + } + + // Continue to next attempt instead of failing immediately + time.Sleep(300 * time.Millisecond) + continue + } + + t.Logf("Attempt %d: Found %d queries", attempt+1, len(queries)) + + if len(queries) > 0 { + foundRunningQuery = true + t.Logf("SUCCESS: Found %d running queries on attempt %d\n", len(queries), attempt+1) + // Log query details + for i, query := range queries { + bindVarsJSON, _ := utils.ToJSONString(*query.BindVars) + t.Logf("Query %d: ID=%s, State=%s, BindVars=%s", + i, *query.Id, *query.State, bindVarsJSON) + // Kill the query + err := db.KillAQLQuery(ctx, *query.Id, utils.NewType(false)) + require.NoError(t, err, "Failed to kill query %s", *query.Id) + t.Logf("Killed query %s", *query.Id) + } + break + } + + time.Sleep(300 * time.Millisecond) + } + + // Cancel the query + cancel() + + // More detailed assertion message + if !foundRunningQuery { + t.Logf("FAILURE ANALYSIS:") + t.Logf("- No running queries were found during any of the 15 attempts") + t.Logf("- This could indicate:") + t.Logf(" 1. Query executed too quickly in CI environment") + t.Logf(" 2. Query tracking is disabled in CI ArangoDB configuration") + t.Logf(" 3. Different ArangoDB version/configuration in CI") + t.Logf(" 4. Resource constraints causing immediate query completion") + } + + // Assert we found running queries + require.True(t, foundRunningQuery, "Should have found at least one running query") + }) +} + +func Test_GetAllOptimizerRules(t *testing.T) { + Wrap(t, func(t *testing.T, client arangodb.Client) { + WithDatabase(t, client, nil, func(db arangodb.Database) { + res, err := db.GetAllOptimizerRules(context.Background()) + require.NoError(t, err) + // Check that the response contains expected fields + require.NotNil(t, res) + require.GreaterOrEqual(t, len(res), 1, "Should return at least one optimizer rule") + require.NotNil(t, res[0].Name, "Optimizer rule name should not be empty") + require.NotNil(t, res[0].Flags, "Optimizer rule flags should not be empty") + require.NotNil(t, res[0].Flags.CanBeDisabled, "Optimizer flags canBeDisabled should not be empty") + require.NotNil(t, res[0].Flags.CanCreateAdditionalPlans, "Optimizer flags canCreateAdditionalPlans should not be empty") + require.NotNil(t, res[0].Flags.ClusterOnly, "Optimizer flags clusterOnly should not be empty") + require.NotNil(t, res[0].Flags.DisabledByDefault, "Optimizer flags disabledByDefault should not be empty") + require.NotNil(t, res[0].Flags.EnterpriseOnly, "Optimizer flags enterpriseOnly should not be empty") + require.NotNil(t, res[0].Flags.Hidden, "Optimizer flags hidden should not be empty") + }) + }) +} + +func Test_GetQueryPlanCache(t *testing.T) { + Wrap(t, func(t *testing.T, client arangodb.Client) { + ctx := context.Background() + + // Use _system or test DB + db, err := client.GetDatabase(ctx, "_system", nil) + require.NoError(t, err) + + // Enable query tracking AND plan caching + _, err = db.UpdateQueryProperties(ctx, arangodb.QueryProperties{ + Enabled: utils.NewType(true), + TrackBindVars: utils.NewType(true), + TrackSlowQueries: utils.NewType(true), + SlowQueryThreshold: utils.NewType(0.0001), + MaxSlowQueries: utils.NewType(54), + MaxQueryStringLength: utils.NewType(3904), + }) + require.NoError(t, err) + + plansBefore, _ := db.GetQueryEntriesCache(ctx) + t.Logf("Before: %d plans", len(plansBefore)) + + // Create test collection + WithCollectionV2(t, db, nil, func(col arangodb.Collection) { + // Insert more data to make query more complex + docs := make([]map[string]interface{}, 100) + for i := 0; i < 100; i++ { + docs[i] = map[string]interface{}{ + "value": i, + "category": fmt.Sprintf("cat_%d", i%5), + "active": i%2 == 0, + } + } + _, err := col.CreateDocuments(ctx, docs) + require.NoError(t, err) + + // Use a more complex query that's more likely to be cached + query := ` + FOR d IN @@col + FILTER d.value >= @minVal AND d.value <= @maxVal + FILTER d.category == @category + SORT d.value + LIMIT @offset, @count + RETURN { + id: d._key, + value: d.value, + category: d.category, + computed: d.value * 2 + } + ` + + bindVars := map[string]interface{}{ + "@col": col.Name(), + "minVal": 10, + "maxVal": 50, + "category": "cat_1", + "offset": 0, + "count": 10, + } + + // Run the same query many more times to encourage caching + // ArangoDB typically caches plans after they've been used multiple times + for i := 0; i < 100; i++ { + cursor, err := db.Query(ctx, query, &arangodb.QueryOptions{ + BindVars: bindVars, + Cache: true, // Explicitly enable caching if supported + }) + require.NoError(t, err) + + for cursor.HasMore() { + var doc interface{} + _, err := cursor.ReadDocument(ctx, &doc) + require.NoError(t, err) + } + cursor.Close() + + // Vary the parameters slightly to create different cached plans + if i%20 == 0 { + bindVars["category"] = fmt.Sprintf("cat_%d", (i/20)%5) + } + } + + // Also try some different but similar queries + queries := []struct { + query string + bindVars map[string]interface{} + }{ + { + query: `FOR d IN @@col FILTER d.value > @val SORT d.value LIMIT 5 RETURN d`, + bindVars: map[string]interface{}{ + "@col": col.Name(), + "val": 25, + }, + }, + { + query: `FOR d IN @@col FILTER d.category == @category RETURN d.value`, + bindVars: map[string]interface{}{ + "@col": col.Name(), + "category": "cat_1", + }, + }, + { + query: `FOR d IN @@col FILTER d.active == @active SORT d.value DESC LIMIT 10 RETURN d`, + bindVars: map[string]interface{}{ + "@col": col.Name(), + "active": true, + }, + }, + } + + for _, queryInfo := range queries { + for i := 0; i < 20; i++ { + cursor, err := db.Query(ctx, queryInfo.query, &arangodb.QueryOptions{ + BindVars: queryInfo.bindVars, + Cache: true, // Enable query plan caching + }) + require.NoError(t, err) + + for cursor.HasMore() { + var doc interface{} + _, err := cursor.ReadDocument(ctx, &doc) + require.NoError(t, err) + } + cursor.Close() + } + } + }) + + // Wait a moment for caching to happen + time.Sleep(100 * time.Millisecond) + + plansAfter, err := db.GetQueryPlanCache(ctx) + require.NoError(t, err) + t.Logf("After: %d plans", len(plansAfter)) + + // Check current query properties to verify settings + props, err := db.GetQueryProperties(ctx) + if err == nil { + propsJson, _ := utils.ToJSONString(props) + t.Logf("Query Properties: %s", propsJson) + } + + // Get query plan cache + resp, err := db.GetQueryPlanCache(ctx) + require.NoError(t, err) + require.NotNil(t, resp) + + // If still empty, check if the feature is supported + if len(resp) == 0 { + t.Logf("Query plan cache is empty. This might be because:") + t.Logf("1. Query plan caching is disabled in ArangoDB configuration") + t.Logf("2. Queries are too simple to warrant caching") + t.Logf("3. Not enough executions to trigger caching") + t.Logf("4. Feature might not be available in this ArangoDB version") + + // Check ArangoDB version + version, err := client.Version(ctx) + if err == nil { + t.Logf("ArangoDB Version: %s", version.Version) + } + } else { + // Success case - we have cached plans + require.Greater(t, len(resp), 0, "Expected at least one cached plan") + t.Logf("Successfully found %d cached query plans", len(resp)) + } + }) +} + +func Test_ClearQueryPlanCache(t *testing.T) { + Wrap(t, func(t *testing.T, client arangodb.Client) { + ctx := context.Background() + + // Use _system or test DB + db, err := client.GetDatabase(ctx, "_system", nil) + require.NoError(t, err) + + // Enable query tracking AND plan caching + _, err = db.UpdateQueryProperties(ctx, arangodb.QueryProperties{ + Enabled: utils.NewType(true), + TrackBindVars: utils.NewType(true), + TrackSlowQueries: utils.NewType(true), + SlowQueryThreshold: utils.NewType(0.0001), + MaxSlowQueries: utils.NewType(54), + MaxQueryStringLength: utils.NewType(3904), + }) + require.NoError(t, err) + + // Create test collection + WithCollectionV2(t, db, nil, func(col arangodb.Collection) { + // Insert more data to make query more complex + docs := make([]map[string]interface{}, 100) + for i := 0; i < 100; i++ { + docs[i] = map[string]interface{}{ + "value": i, + "category": fmt.Sprintf("cat_%d", i%5), + "active": i%2 == 0, + } + } + _, err := col.CreateDocuments(ctx, docs) + require.NoError(t, err) + + // Use a more complex query that's more likely to be cached + query := ` + FOR d IN @@col + FILTER d.value >= @minVal AND d.value <= @maxVal + FILTER d.category == @category + SORT d.value + LIMIT @offset, @count + RETURN { + id: d._key, + value: d.value, + category: d.category, + computed: d.value * 2 + } + ` + + bindVars := map[string]interface{}{ + "@col": col.Name(), + "minVal": 10, + "maxVal": 50, + "category": "cat_1", + "offset": 0, + "count": 10, + } + + // Run the same query many more times to encourage caching + // ArangoDB typically caches plans after they've been used multiple times + for i := 0; i < 100; i++ { + cursor, err := db.Query(ctx, query, &arangodb.QueryOptions{ + BindVars: bindVars, + Cache: true, // Explicitly enable caching if supported + }) + require.NoError(t, err) + + // Process all results + for cursor.HasMore() { + var doc interface{} + _, err := cursor.ReadDocument(ctx, &doc) + require.NoError(t, err) + } + cursor.Close() + + // Vary the parameters slightly to create different cached plans + if i%20 == 0 { + bindVars["category"] = fmt.Sprintf("cat_%d", (i/20)%5) + } + } + + // Also try some different but similar queries + queries := []struct { + query string + bindVars map[string]interface{} + }{ + { + query: `FOR d IN @@col FILTER d.value > @val SORT d.value LIMIT 5 RETURN d`, + bindVars: map[string]interface{}{ + "@col": col.Name(), + "val": 25, + }, + }, + { + query: `FOR d IN @@col FILTER d.category == @category RETURN d.value`, + bindVars: map[string]interface{}{ + "@col": col.Name(), + "category": "cat_1", + }, + }, + { + query: `FOR d IN @@col FILTER d.active == @active SORT d.value DESC LIMIT 10 RETURN d`, + bindVars: map[string]interface{}{ + "@col": col.Name(), + "active": true, + }, + }, + } + + for _, queryInfo := range queries { + for i := 0; i < 20; i++ { + cursor, err := db.Query(ctx, queryInfo.query, &arangodb.QueryOptions{ + BindVars: queryInfo.bindVars, + Cache: true, // Enable query plan caching + }) + require.NoError(t, err) + + for cursor.HasMore() { + var doc interface{} + _, err := cursor.ReadDocument(ctx, &doc) + require.NoError(t, err) + } + cursor.Close() + } + } + }) + + // Wait a moment for caching to happen + time.Sleep(100 * time.Millisecond) + + err = db.ClearQueryPlanCache(ctx) + require.NoError(t, err) + }) +} + +func Test_GetQueryEntriesCache(t *testing.T) { + Wrap(t, func(t *testing.T, client arangodb.Client) { + ctx := context.Background() + + // Use _system or test DB + db, err := client.GetDatabase(ctx, "_system", nil) + require.NoError(t, err) + + // Enable query tracking AND plan caching + _, err = db.UpdateQueryProperties(ctx, arangodb.QueryProperties{ + Enabled: utils.NewType(true), + TrackBindVars: utils.NewType(true), + TrackSlowQueries: utils.NewType(true), + SlowQueryThreshold: utils.NewType(0.0001), + MaxSlowQueries: utils.NewType(54), + MaxQueryStringLength: utils.NewType(3904), + }) + require.NoError(t, err) + + plansBefore, _ := db.GetQueryEntriesCache(ctx) + t.Logf("Before: %d plans", len(plansBefore)) + + // Create test collection + WithCollectionV2(t, db, nil, func(col arangodb.Collection) { + // Insert more data to make query more complex + docs := make([]map[string]interface{}, 100) + for i := 0; i < 100; i++ { + docs[i] = map[string]interface{}{ + "value": i, + "category": fmt.Sprintf("cat_%d", i%5), + "active": i%2 == 0, + } + } + _, err := col.CreateDocuments(ctx, docs) + require.NoError(t, err) + + // Use a more complex query that's more likely to be cached + query := ` + FOR d IN @@col + FILTER d.value >= @minVal AND d.value <= @maxVal + FILTER d.category == @category + SORT d.value + LIMIT @offset, @count + RETURN { + id: d._key, + value: d.value, + category: d.category, + computed: d.value * 2 + } + ` + + bindVars := map[string]interface{}{ + "@col": col.Name(), + "minVal": 10, + "maxVal": 50, + "category": "cat_1", + "offset": 0, + "count": 10, + } + + // Run the same query many more times to encourage caching + // ArangoDB typically caches plans after they've been used multiple times + for i := 0; i < 100; i++ { + cursor, err := db.Query(ctx, query, &arangodb.QueryOptions{ + BindVars: bindVars, + Cache: true, // Explicitly enable caching if supported + }) + require.NoError(t, err) + + for cursor.HasMore() { + var doc interface{} + _, err := cursor.ReadDocument(ctx, &doc) + require.NoError(t, err) + } + cursor.Close() + + // Vary the parameters slightly to create different cached plans + if i%20 == 0 { + bindVars["category"] = fmt.Sprintf("cat_%d", (i/20)%5) + } + } + + // Also try some different but similar queries + queries := []struct { + query string + bindVars map[string]interface{} + }{ + { + query: `FOR d IN @@col FILTER d.value > @val SORT d.value LIMIT 5 RETURN d`, + bindVars: map[string]interface{}{ + "@col": col.Name(), + "val": 25, + }, + }, + { + query: `FOR d IN @@col FILTER d.category == @category RETURN d.value`, + bindVars: map[string]interface{}{ + "@col": col.Name(), + "category": "cat_1", + }, + }, + { + query: `FOR d IN @@col FILTER d.active == @active SORT d.value DESC LIMIT 10 RETURN d`, + bindVars: map[string]interface{}{ + "@col": col.Name(), + "active": true, + }, + }, + } + + for _, queryInfo := range queries { + for i := 0; i < 20; i++ { + cursor, err := db.Query(ctx, queryInfo.query, &arangodb.QueryOptions{ + BindVars: queryInfo.bindVars, + Cache: true, // Enable query entries caching + }) + require.NoError(t, err) + + for cursor.HasMore() { + var doc interface{} + _, err := cursor.ReadDocument(ctx, &doc) + require.NoError(t, err) + } + cursor.Close() + } + } + }) + + // Wait a moment for caching to happen + time.Sleep(100 * time.Millisecond) + + plansAfter, err := db.GetQueryEntriesCache(ctx) + require.NoError(t, err) + t.Logf("After: %d plans", len(plansAfter)) + + // Check current query properties to verify settings + props, err := db.GetQueryProperties(ctx) + if err == nil { + propsJson, _ := utils.ToJSONString(props) + t.Logf("Query Properties: %s", propsJson) + } + + // Get query plan cache + resp, err := db.GetQueryEntriesCache(ctx) + require.NoError(t, err) + require.NotNil(t, resp) + + // If still empty, check if the feature is supported + if len(resp) == 0 { + t.Logf("Query plan cache is empty. This might be because:") + t.Logf("1. Query query entries caching is disabled in ArangoDB configuration") + t.Logf("2. Queries are too simple to warrant caching") + t.Logf("3. Not enough executions to trigger caching") + t.Logf("4. Feature might not be available in this ArangoDB version") + + // Check ArangoDB version + version, err := client.Version(ctx) + if err == nil { + t.Logf("ArangoDB Version: %s", version.Version) + } + } else { + // Success case - we have cached query entries + require.Greater(t, len(resp), 0, "Expected at least one query entries") + t.Logf("Successfully found %d cached query entries", len(resp)) + } + }) +} + +func Test_ClearQueryCache(t *testing.T) { + Wrap(t, func(t *testing.T, client arangodb.Client) { + ctx := context.Background() + + // Use _system or test DB + db, err := client.GetDatabase(ctx, "_system", nil) + require.NoError(t, err) + + // Enable query tracking AND plan caching + _, err = db.UpdateQueryProperties(ctx, arangodb.QueryProperties{ + Enabled: utils.NewType(true), + TrackBindVars: utils.NewType(true), + TrackSlowQueries: utils.NewType(true), + SlowQueryThreshold: utils.NewType(0.0001), + MaxSlowQueries: utils.NewType(54), + MaxQueryStringLength: utils.NewType(3904), + }) + require.NoError(t, err) + + // Create test collection + WithCollectionV2(t, db, nil, func(col arangodb.Collection) { + // Insert more data to make query more complex + docs := make([]map[string]interface{}, 100) + for i := 0; i < 100; i++ { + docs[i] = map[string]interface{}{ + "value": i, + "category": fmt.Sprintf("cat_%d", i%5), + "active": i%2 == 0, + } + } + _, err := col.CreateDocuments(ctx, docs) + require.NoError(t, err) + + // Use a more complex query that's more likely to be cached + query := ` + FOR d IN @@col + FILTER d.value >= @minVal AND d.value <= @maxVal + FILTER d.category == @category + SORT d.value + LIMIT @offset, @count + RETURN { + id: d._key, + value: d.value, + category: d.category, + computed: d.value * 2 + } + ` + + bindVars := map[string]interface{}{ + "@col": col.Name(), + "minVal": 10, + "maxVal": 50, + "category": "cat_1", + "offset": 0, + "count": 10, + } + + // Run the same query many more times to encourage caching + // ArangoDB typically caches plans after they've been used multiple times + for i := 0; i < 100; i++ { + cursor, err := db.Query(ctx, query, &arangodb.QueryOptions{ + BindVars: bindVars, + Cache: true, // Explicitly enable caching if supported + }) + require.NoError(t, err) + + for cursor.HasMore() { + var doc interface{} + _, err := cursor.ReadDocument(ctx, &doc) + require.NoError(t, err) + } + cursor.Close() + + // Vary the parameters slightly to create different cached plans + if i%20 == 0 { + bindVars["category"] = fmt.Sprintf("cat_%d", (i/20)%5) + } + } + + // Also try some different but similar queries + queries := []struct { + query string + bindVars map[string]interface{} + }{ + { + query: `FOR d IN @@col FILTER d.value > @val SORT d.value LIMIT 5 RETURN d`, + bindVars: map[string]interface{}{ + "@col": col.Name(), + "val": 25, + }, + }, + { + query: `FOR d IN @@col FILTER d.category == @category RETURN d.value`, + bindVars: map[string]interface{}{ + "@col": col.Name(), + "category": "cat_1", + }, + }, + { + query: `FOR d IN @@col FILTER d.active == @active SORT d.value DESC LIMIT 10 RETURN d`, + bindVars: map[string]interface{}{ + "@col": col.Name(), + "active": true, + }, + }, + } + + for _, queryInfo := range queries { + for i := 0; i < 20; i++ { + cursor, err := db.Query(ctx, queryInfo.query, &arangodb.QueryOptions{ + BindVars: queryInfo.bindVars, + Cache: true, // Enable query plan caching + }) + require.NoError(t, err) + + for cursor.HasMore() { + var doc interface{} + _, err := cursor.ReadDocument(ctx, &doc) + require.NoError(t, err) + } + cursor.Close() + } + } + }) + + // Wait a moment for caching to happen + time.Sleep(100 * time.Millisecond) + + err = db.ClearQueryCache(ctx) + require.NoError(t, err) + }) +} + +func Test_GetQueryCacheProperties(t *testing.T) { + Wrap(t, func(t *testing.T, client arangodb.Client) { + ctx := context.Background() + + // Use _system or test DB + db, err := client.GetDatabase(ctx, "_system", nil) + require.NoError(t, err) + + queryCacheProperties, err := db.GetQueryCacheProperties(ctx) + require.NoError(t, err) + propsJson, err := utils.ToJSONString(queryCacheProperties) + require.NoError(t, err) + t.Logf("Query Properties: %s", propsJson) + require.NotNil(t, queryCacheProperties) + require.NotNil(t, queryCacheProperties.IncludeSystem, "IncludeSystem should not be nil") + require.NotNil(t, queryCacheProperties.MaxEntrySize, "MaxEntrySize should not be nil") + require.NotNil(t, queryCacheProperties.MaxResults, "MaxResults should not be nil") + require.NotNil(t, queryCacheProperties.MaxResultsSize, "MaxResultsSize should not be nil") + require.NotNil(t, queryCacheProperties.Mode, "Mode should not be nil") + }) +} + +func Test_SetQueryCacheProperties(t *testing.T) { + Wrap(t, func(t *testing.T, client arangodb.Client) { + ctx := context.Background() + + // Use _system or test DB + db, err := client.GetDatabase(ctx, "_system", nil) + require.NoError(t, err) + queryCacheProperties, err := db.GetQueryCacheProperties(ctx) + require.NoError(t, err) + propsJson, err := utils.ToJSONString(queryCacheProperties) + require.NoError(t, err) + t.Logf("Before Query Properties: %s", propsJson) + SetQueryCacheProperties, err := db.SetQueryCacheProperties(ctx, arangodb.QueryCacheProperties{ + IncludeSystem: utils.NewType(true), + MaxResults: utils.NewType(uint16(32)), + }) + require.NoError(t, err) + SetQueryCachePropertiesJson, err := utils.ToJSONString(SetQueryCacheProperties) + require.NoError(t, err) + t.Logf("After Setting - Query Properties: %s", SetQueryCachePropertiesJson) + require.NotNil(t, SetQueryCacheProperties) + require.NotNil(t, SetQueryCacheProperties.IncludeSystem, "IncludeSystem should not be nil") + require.NotNil(t, SetQueryCacheProperties.MaxEntrySize, "MaxEntrySize should not be nil") + require.NotNil(t, SetQueryCacheProperties.MaxResults, "MaxResults should not be nil") + require.NotNil(t, SetQueryCacheProperties.MaxResultsSize, "MaxResultsSize should not be nil") + require.NotNil(t, SetQueryCacheProperties.Mode, "Mode should not be nil") + AfterSetQueryCacheProperties, err := db.GetQueryCacheProperties(ctx) + require.NoError(t, err) + AfterSetQueryCachePropertiesJson, err := utils.ToJSONString(AfterSetQueryCacheProperties) + require.NoError(t, err) + t.Logf("After Query Properties: %s", AfterSetQueryCachePropertiesJson) + }) +} + +func Test_UserDefinedFunctions(t *testing.T) { + Wrap(t, func(t *testing.T, client arangodb.Client) { + ctx := context.Background() + + db, err := client.GetDatabase(ctx, "_system", nil) + require.NoError(t, err) + + // Define UDF details + namespace := "myfunctions::temperature" + functionName := namespace + "::celsiustofahrenheit" + code := "function (celsius) { return celsius * 9 / 5 + 32; }" + + // Create UDF + createdFn, err := db.CreateUserDefinedFunction(ctx, arangodb.UserDefinedFunctionObject{ + Name: &functionName, + Code: &code, + IsDeterministic: utils.NewType(true), + }) + require.NoError(t, err) + require.NotNil(t, createdFn) + + // Get all UDFs + fns, err := db.GetUserDefinedFunctions(ctx) + require.NoError(t, err) + require.NotNil(t, fns) + + // Optionally validate that our created function exists in the list + var found bool + for _, fn := range fns { + if fn.Name != nil && *fn.Name == functionName { + found = true + break + } + } + require.True(t, found, "Created function not found in list of user-defined functions") + + // Delete all functions in the namespace + deletedCount, err := db.DeleteUserDefinedFunction(ctx, &namespace, utils.NewType(true)) + require.NoError(t, err) + require.NotNil(t, deletedCount) + t.Logf("Deleted user-defined function(s): %d", *deletedCount) + require.Greater(t, *deletedCount, 0, "Expected at least one function to be deleted") + }) +} diff --git a/v2/utils/type.go b/v2/utils/type.go index d6c37ca5..fb4bd9cd 100644 --- a/v2/utils/type.go +++ b/v2/utils/type.go @@ -20,7 +20,10 @@ package utils -import "reflect" +import ( + "encoding/json" + "reflect" +) func IsListPtr(i interface{}) bool { t := reflect.ValueOf(i) @@ -46,3 +49,11 @@ func IsList(i interface{}) bool { func NewType[T any](val T) *T { return &val } + +func ToJSONString(i interface{}) (string, error) { + data, err := json.MarshalIndent(i, "", " ") + if err != nil { + return "", err + } + return string(data), nil +}