Skip to content

Commit

Permalink
chore: Use more optimal iterate hierarchy v2 (#18929) (#18972)
Browse files Browse the repository at this point in the history
Closes #18929
Helps with #18500
Use iterate hierarchy v2 to have a roughly linear performance for getting the resource tree instead of up to quadratic.

Signed-off-by: Andrii Korotkov <[email protected]>
  • Loading branch information
andrii-korotkov-verkada authored Jul 19, 2024
1 parent 22e498e commit c59cb52
Show file tree
Hide file tree
Showing 6 changed files with 85 additions and 47 deletions.
81 changes: 43 additions & 38 deletions controller/appcontroller.go
Original file line number Diff line number Diff line change
Expand Up @@ -537,6 +537,7 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed
warnOrphaned = proj.Spec.OrphanedResources.IsWarn()
}
ts.AddCheckpoint("get_orphaned_resources_ms")
managedResourcesKeys := make([]kube.ResourceKey, 0)
for i := range managedResources {
managedResource := managedResources[i]
delete(orphanedNodesMap, kube.NewResourceKey(managedResource.Group, managedResource.Kind, managedResource.Namespace, managedResource.Name))
Expand All @@ -562,57 +563,61 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed
},
})
} else {
err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, kube.GetResourceKey(live), func(child appv1.ResourceNode, appName string) bool {
permitted, _ := proj.IsResourcePermitted(schema.GroupKind{Group: child.ResourceRef.Group, Kind: child.ResourceRef.Kind}, child.Namespace, a.Spec.Destination, func(project string) ([]*appv1.Cluster, error) {
clusters, err := ctrl.db.GetProjectClusters(context.TODO(), project)
if err != nil {
return nil, fmt.Errorf("failed to get project clusters: %w", err)
}
return clusters, nil
})
if !permitted {
return false
}
nodes = append(nodes, child)
return true
})
managedResourcesKeys = append(managedResourcesKeys, kube.GetResourceKey(live))
}
}
err = ctrl.stateCache.IterateHierarchyV2(a.Spec.Destination.Server, managedResourcesKeys, func(child appv1.ResourceNode, appName string) bool {
permitted, _ := proj.IsResourcePermitted(schema.GroupKind{Group: child.ResourceRef.Group, Kind: child.ResourceRef.Kind}, child.Namespace, a.Spec.Destination, func(project string) ([]*appv1.Cluster, error) {
clusters, err := ctrl.db.GetProjectClusters(context.TODO(), project)
if err != nil {
return nil, fmt.Errorf("failed to iterate resource hierarchy: %w", err)
return nil, fmt.Errorf("failed to get project clusters: %w", err)
}
return clusters, nil
})
if !permitted {
return false
}
nodes = append(nodes, child)
return true
})
if err != nil {
return nil, fmt.Errorf("failed to iterate resource hierarchy v2: %w", err)
}
ts.AddCheckpoint("process_managed_resources_ms")
orphanedNodes := make([]appv1.ResourceNode, 0)
orphanedNodesKeys := make([]kube.ResourceKey, 0)
for k := range orphanedNodesMap {
if k.Namespace != "" && proj.IsGroupKindPermitted(k.GroupKind(), true) && !isKnownOrphanedResourceExclusion(k, proj) {
err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, k, func(child appv1.ResourceNode, appName string) bool {
belongToAnotherApp := false
if appName != "" {
appKey := ctrl.toAppKey(appName)
if _, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey); exists && err == nil {
belongToAnotherApp = true
}
}
orphanedNodesKeys = append(orphanedNodesKeys, k)
}
}
err = ctrl.stateCache.IterateHierarchyV2(a.Spec.Destination.Server, orphanedNodesKeys, func(child appv1.ResourceNode, appName string) bool {
belongToAnotherApp := false
if appName != "" {
appKey := ctrl.toAppKey(appName)
if _, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey); exists && err == nil {
belongToAnotherApp = true
}
}

if belongToAnotherApp {
return false
}
if belongToAnotherApp {
return false
}

permitted, _ := proj.IsResourcePermitted(schema.GroupKind{Group: child.ResourceRef.Group, Kind: child.ResourceRef.Kind}, child.Namespace, a.Spec.Destination, func(project string) ([]*appv1.Cluster, error) {
return ctrl.db.GetProjectClusters(context.TODO(), project)
})
permitted, _ := proj.IsResourcePermitted(schema.GroupKind{Group: child.ResourceRef.Group, Kind: child.ResourceRef.Kind}, child.Namespace, a.Spec.Destination, func(project string) ([]*appv1.Cluster, error) {
return ctrl.db.GetProjectClusters(context.TODO(), project)
})

if !permitted {
return false
}
orphanedNodes = append(orphanedNodes, child)
return true
})
if err != nil {
return nil, err
}
if !permitted {
return false
}
orphanedNodes = append(orphanedNodes, child)
return true
})
if err != nil {
return nil, err
}

var conditions []appv1.ApplicationCondition
if len(orphanedNodes) > 0 && warnOrphaned {
conditions = []appv1.ApplicationCondition{{
Expand Down
14 changes: 8 additions & 6 deletions controller/appcontroller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -193,14 +193,16 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
mockStateCache.On("GetNamespaceTopLevelResources", mock.Anything, mock.Anything).Return(response, nil)
mockStateCache.On("IterateResources", mock.Anything, mock.Anything).Return(nil)
mockStateCache.On("GetClusterCache", mock.Anything).Return(&clusterCacheMock, nil)
mockStateCache.On("IterateHierarchy", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
key := args[1].(kube.ResourceKey)
mockStateCache.On("IterateHierarchyV2", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
keys := args[1].([]kube.ResourceKey)
action := args[2].(func(child v1alpha1.ResourceNode, appName string) bool)
appName := ""
if res, ok := data.namespacedResources[key]; ok {
appName = res.AppName
for _, key := range keys {
appName := ""
if res, ok := data.namespacedResources[key]; ok {
appName = res.AppName
}
_ = action(v1alpha1.ResourceNode{ResourceRef: v1alpha1.ResourceRef{Kind: key.Kind, Group: key.Group, Namespace: key.Namespace, Name: key.Name}}, appName)
}
_ = action(v1alpha1.ResourceNode{ResourceRef: v1alpha1.ResourceRef{Kind: key.Kind, Group: key.Group, Namespace: key.Namespace, Name: key.Name}}, appName)
}).Return(nil)
return ctrl
}
Expand Down
13 changes: 13 additions & 0 deletions controller/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,8 @@ type LiveStateCache interface {
GetClusterCache(server string) (clustercache.ClusterCache, error)
// Executes give callback against resource specified by the key and all its children
IterateHierarchy(server string, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error
// Executes give callback against resources specified by the keys and all its children
IterateHierarchyV2(server string, keys []kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error
// Returns state of live nodes which correspond for target nodes of specified application.
GetManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error)
// IterateResources iterates all resource stored in cache
Expand Down Expand Up @@ -625,6 +627,17 @@ func (c *liveStateCache) IterateHierarchy(server string, key kube.ResourceKey, a
return nil
}

func (c *liveStateCache) IterateHierarchyV2(server string, keys []kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error {
clusterInfo, err := c.getSyncedCluster(server)
if err != nil {
return err
}
clusterInfo.IterateHierarchyV2(keys, func(resource *clustercache.Resource, namespaceResources map[kube.ResourceKey]*clustercache.Resource) bool {
return action(asResourceNode(resource), getApp(resource, namespaceResources))
})
return nil
}

func (c *liveStateCache) IterateResources(server string, callback func(res *clustercache.Resource, info *ResourceInfo)) error {
clusterInfo, err := c.getSyncedCluster(server)
if err != nil {
Expand Down
18 changes: 18 additions & 0 deletions controller/cache/mocks/LiveStateCache.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ require (
github.com/TomOnTime/utfutil v0.0.0-20180511104225-09c41003ee1d
github.com/alicebob/miniredis/v2 v2.33.0
github.com/antonmedv/expr v1.15.2
github.com/argoproj/gitops-engine v0.7.1-0.20240714153147-adb68bcaab73
github.com/argoproj/gitops-engine v0.7.1-0.20240718175351-6b2984ebc470
github.com/argoproj/notifications-engine v0.4.1-0.20240606074338-0802cd427621
github.com/argoproj/pkg v0.13.7-0.20230626144333-d56162821bd1
github.com/aws/aws-sdk-go v1.50.8
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -694,8 +694,8 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU=
github.com/appscode/go v0.0.0-20191119085241-0887d8ec2ecc/go.mod h1:OawnOmAL4ZX3YaPdN+8HTNwBveT1jMsqP74moa9XUbE=
github.com/argoproj/gitops-engine v0.7.1-0.20240714153147-adb68bcaab73 h1:7kyTgFsPjvb6noafslp2pr7fBCS9s8OJ759LdLzrOro=
github.com/argoproj/gitops-engine v0.7.1-0.20240714153147-adb68bcaab73/go.mod h1:xMIbuLg9Qj2e0egTy+8NcukbhRaVmWwK9vm3aAQZoi4=
github.com/argoproj/gitops-engine v0.7.1-0.20240718175351-6b2984ebc470 h1:RUo6je4n+FgNEkGsONhwxUtT67YqyEtrvMNd+t8pKSo=
github.com/argoproj/gitops-engine v0.7.1-0.20240718175351-6b2984ebc470/go.mod h1:xMIbuLg9Qj2e0egTy+8NcukbhRaVmWwK9vm3aAQZoi4=
github.com/argoproj/notifications-engine v0.4.1-0.20240606074338-0802cd427621 h1:Yg1nt+D2uDK1SL2jSlfukA4yc7db184TTN7iWy3voRE=
github.com/argoproj/notifications-engine v0.4.1-0.20240606074338-0802cd427621/go.mod h1:N0A4sEws2soZjEpY4hgZpQS8mRIEw6otzwfkgc3g9uQ=
github.com/argoproj/pkg v0.13.7-0.20230626144333-d56162821bd1 h1:qsHwwOJ21K2Ao0xPju1sNuqphyMnMYkyB3ZLoLtxWpo=
Expand Down

0 comments on commit c59cb52

Please sign in to comment.