Skip to content

Commit

Permalink
Add consumer aware slices
Browse files Browse the repository at this point in the history
Signed-off-by: Mangirdas Judeikis <[email protected]>
On-behalf-of: @SAP [email protected]
  • Loading branch information
mjudeikis committed Jan 28, 2025
1 parent 85ecae8 commit 5fe9f02
Show file tree
Hide file tree
Showing 16 changed files with 1,138 additions and 326 deletions.
9 changes: 9 additions & 0 deletions config/crds/apis.kcp.io_apiexportendpointslices.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,15 @@ spec:
- url
type: object
type: array
x-kubernetes-list-map-keys:
- url
x-kubernetes-list-type: map
shardSelector:
description: |-
shardSelector is the selector used to filter the shards. It is used to filter the shards
when determining partition scope when deriving the endpoints. This is set by owning shard,
and is used by follower shards to determine if its inscope or not.
type: string
type: object
type: object
served: true
Expand Down
8 changes: 8 additions & 0 deletions pkg/authorization/bootstrap/policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest"
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy"

"github.com/kcp-dev/kcp/sdk/apis/apis"
"github.com/kcp-dev/kcp/sdk/apis/core"
"github.com/kcp-dev/kcp/sdk/apis/tenancy"
)
Expand Down Expand Up @@ -101,6 +102,13 @@ func clusterRoles() []rbacv1.ClusterRole {
rbacv1helpers.NewRule("access").URLs("/").RuleOrDie(),
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: SystemExternalLogicalClusterAdmin},
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("update", "patch", "get").Groups(apis.GroupName).Resources("apiexportendpointslices/status").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch").Groups(apis.GroupName).Resources("apiexportendpointslices").RuleOrDie(),
},
},
}
}

Expand Down
15 changes: 15 additions & 0 deletions pkg/openapi/zz_generated.openapi.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
Expand All @@ -45,7 +45,7 @@ import (
topologyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1"
kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster"
apisv1alpha1client "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/apis/v1alpha1"
apisinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/apis/v1alpha1"
apisv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/apis/v1alpha1"
corev1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/core/v1alpha1"
topologyinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/topology/v1alpha1"
)
Expand All @@ -57,9 +57,9 @@ const (
// NewController returns a new controller for APIExportEndpointSlices.
// Shards and APIExports are read from the cache server.
func NewController(
apiExportEndpointSliceClusterInformer apisinformers.APIExportEndpointSliceClusterInformer,
apiExportEndpointSliceClusterInformer apisv1alpha1informers.APIExportEndpointSliceClusterInformer,
globalShardClusterInformer corev1alpha1informers.ShardClusterInformer,
globalAPIExportClusterInformer apisinformers.APIExportClusterInformer,
globalAPIExportClusterInformer apisv1alpha1informers.APIExportClusterInformer,
partitionClusterInformer topologyinformers.PartitionClusterInformer,
kcpClusterClient kcpclientset.ClusterInterface,
) (*controller, error) {
Expand All @@ -76,8 +76,12 @@ func NewController(
listShards: func(selector labels.Selector) ([]*corev1alpha1.Shard, error) {
return globalShardClusterInformer.Lister().List(selector)
},
getAPIExportEndpointSlice: func(clusterName logicalcluster.Name, name string) (*apisv1alpha1.APIExportEndpointSlice, error) {
return apiExportEndpointSliceClusterInformer.Lister().Cluster(clusterName).Get(name)
getAPIExportEndpointSlice: func(ctx context.Context, path logicalcluster.Path, name string) (*apisv1alpha1.APIExportEndpointSlice, error) {
obj, err := indexers.ByPathAndName[*apisv1alpha1.APIExportEndpointSlice](apisv1alpha1.Resource("apiexportendpointslices"), apiExportEndpointSliceClusterInformer.Informer().GetIndexer(), path, name)
if err != nil {
return nil, err
}
return obj, err
},
getAPIExport: func(path logicalcluster.Path, name string) (*apisv1alpha1.APIExport, error) {
return indexers.ByPathAndName[*apisv1alpha1.APIExport](apisv1alpha1.Resource("apiexports"), globalAPIExportClusterInformer.Informer().GetIndexer(), path, name)
Expand Down Expand Up @@ -168,20 +172,20 @@ type controller struct {

listShards func(selector labels.Selector) ([]*corev1alpha1.Shard, error)
listAPIExportEndpointSlices func() ([]*apisv1alpha1.APIExportEndpointSlice, error)
getAPIExportEndpointSlice func(clusterName logicalcluster.Name, name string) (*apisv1alpha1.APIExportEndpointSlice, error)
getAPIExportEndpointSlice func(ctx context.Context, path logicalcluster.Path, name string) (*apisv1alpha1.APIExportEndpointSlice, error)
getAPIExport func(path logicalcluster.Path, name string) (*apisv1alpha1.APIExport, error)
getPartition func(clusterName logicalcluster.Name, name string) (*topologyv1alpha1.Partition, error)
getAPIExportEndpointSlicesByPartition func(key string) ([]*apisv1alpha1.APIExportEndpointSlice, error)

apiExportEndpointSliceClusterInformer apisinformers.APIExportEndpointSliceClusterInformer
apiExportEndpointSliceClusterInformer apisv1alpha1informers.APIExportEndpointSliceClusterInformer
commit CommitFunc
}

// enqueueAPIExportEndpointSlice enqueues an APIExportEndpointSlice.
func (c *controller) enqueueAPIExportEndpointSlice(obj interface{}) {
key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj)
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
return
}

Expand All @@ -197,7 +201,7 @@ func (c *controller) enqueueAPIExportEndpointSlicesForAPIExport(obj interface{})
}
export, ok := obj.(*apisv1alpha1.APIExport)
if !ok {
runtime.HandleError(fmt.Errorf("obj is supposed to be a APIExport, but is %T", obj))
utilruntime.HandleError(fmt.Errorf("obj is supposed to be a APIExport, but is %T", obj))
return
}

Expand All @@ -206,23 +210,23 @@ func (c *controller) enqueueAPIExportEndpointSlicesForAPIExport(obj interface{})
if path := logicalcluster.NewPath(export.Annotations[core.LogicalClusterPathAnnotationKey]); !path.Empty() {
pathKeys, err := c.apiExportEndpointSliceClusterInformer.Informer().GetIndexer().IndexKeys(indexAPIExportEndpointSliceByAPIExport, path.Join(export.Name).String())
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
return
}
keys.Insert(pathKeys...)
}

clusterKeys, err := c.apiExportEndpointSliceClusterInformer.Informer().GetIndexer().IndexKeys(indexAPIExportEndpointSliceByAPIExport, logicalcluster.From(export).Path().Join(export.Name).String())
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
return
}
keys.Insert(clusterKeys...)

for _, key := range sets.List[string](keys) {
slice, exists, err := c.apiExportEndpointSliceClusterInformer.Informer().GetIndexer().GetByKey(key)
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
continue
} else if !exists {
continue
Expand All @@ -237,15 +241,15 @@ func (c *controller) enqueueAPIExportEndpointSlicesForAPIExport(obj interface{})
func (c *controller) enqueueAllAPIExportEndpointSlices(shard interface{}) {
list, err := c.listAPIExportEndpointSlices()
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
return
}

logger := logging.WithObject(logging.WithReconciler(klog.Background(), ControllerName), shard.(*corev1alpha1.Shard))
for i := range list {
key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(list[i])
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
continue
}

Expand All @@ -258,13 +262,13 @@ func (c *controller) enqueueAllAPIExportEndpointSlices(shard interface{}) {
func (c *controller) enqueuePartition(obj interface{}) {
key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj)
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
return
}

slices, err := c.getAPIExportEndpointSlicesByPartition(key)
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
return
}

Expand All @@ -277,7 +281,7 @@ func (c *controller) enqueuePartition(obj interface{}) {

// Start starts the controller, which stops when ctx.Done() is closed.
func (c *controller) Start(ctx context.Context, numThreads int) {
defer runtime.HandleCrash()
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()

logger := logging.WithReconciler(klog.FromContext(ctx), ControllerName)
Expand Down Expand Up @@ -314,7 +318,7 @@ func (c *controller) processNextWorkItem(ctx context.Context) bool {
defer c.queue.Done(key)

if err := c.process(ctx, key); err != nil {
runtime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", ControllerName, key, err))
utilruntime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", ControllerName, key, err))
c.queue.AddRateLimited(key)
return true
}
Expand All @@ -325,10 +329,10 @@ func (c *controller) processNextWorkItem(ctx context.Context) bool {
func (c *controller) process(ctx context.Context, key string) error {
clusterName, _, name, err := kcpcache.SplitMetaClusterNamespaceKey(key)
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
return nil
}
obj, err := c.getAPIExportEndpointSlice(clusterName, name)
obj, err := c.getAPIExportEndpointSlice(ctx, clusterName.Path(), name)
if err != nil {
if errors.IsNotFound(err) {
return nil // object deleted before we handled it
Expand All @@ -353,6 +357,7 @@ func (c *controller) process(ctx context.Context, key string) error {
// If the object being reconciled changed as a result, update it.
oldResource := &Resource{ObjectMeta: old.ObjectMeta, Spec: &old.Spec, Status: &old.Status}
newResource := &Resource{ObjectMeta: obj.ObjectMeta, Spec: &obj.Spec, Status: &obj.Status}

if err := c.commit(ctx, oldResource, newResource); err != nil {
errs = append(errs, err)
}
Expand Down Expand Up @@ -380,15 +385,19 @@ func filterShardEvent(oldObj, newObj interface{}) bool {
}

// InstallIndexers adds the additional indexers that this controller requires to the informers.
func InstallIndexers(globalAPIExportClusterInformer apisinformers.APIExportClusterInformer, apiExportEndpointSliceClusterInformer apisinformers.APIExportEndpointSliceClusterInformer) {
func InstallIndexers(
globalAPIExportClusterInformer apisv1alpha1informers.APIExportClusterInformer,
apiExportEndpointSliceClusterInformer apisv1alpha1informers.APIExportEndpointSliceClusterInformer,
) {
indexers.AddIfNotPresentOrDie(globalAPIExportClusterInformer.Informer().GetIndexer(), cache.Indexers{
indexers.ByLogicalClusterPathAndName: indexers.IndexByLogicalClusterPathAndName,
})

indexers.AddIfNotPresentOrDie(apiExportEndpointSliceClusterInformer.Informer().GetIndexer(), cache.Indexers{
indexers.ByLogicalClusterPathAndName: indexers.IndexByLogicalClusterPathAndName,
})
indexers.AddIfNotPresentOrDie(apiExportEndpointSliceClusterInformer.Informer().GetIndexer(), cache.Indexers{
indexAPIExportEndpointSliceByAPIExport: indexAPIExportEndpointSliceByAPIExportFunc,
})

indexers.AddIfNotPresentOrDie(apiExportEndpointSliceClusterInformer.Informer().GetIndexer(), cache.Indexers{
indexAPIExportEndpointSlicesByPartition: indexAPIExportEndpointSlicesByPartitionFunc,
})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ func TestReconcile(t *testing.T) {
wantPartitionNotValid: true,
wantAPIExportEndpointSliceURLsError: true,
},
"APIExportEndpointSliceURLs set when no issue": {
"APIExportEndpointSliceReadyForURLs set when no issue": {
wantAPIExportEndpointSliceURLsReady: true,
wantAPIExportValid: true,
wantPartitionValid: true,
Expand Down Expand Up @@ -189,7 +189,7 @@ func TestReconcile(t *testing.T) {
if tc.wantAPIExportEndpointSliceURLsError {
requireConditionMatches(t, apiExportEndpointSlice,
conditions.FalseCondition(
apisv1alpha1.APIExportEndpointSliceURLsReady,
apisv1alpha1.APIExportEndpointSliceReadyForURLs,
apisv1alpha1.ErrorGeneratingURLsReason,
conditionsv1alpha1.ConditionSeverityError,
"",
Expand All @@ -198,17 +198,13 @@ func TestReconcile(t *testing.T) {
}

if tc.wantAPIExportEndpointSliceURLsReady {
requireConditionMatches(t, apiExportEndpointSlice, conditions.TrueCondition(apisv1alpha1.APIExportEndpointSliceURLsReady))
require.Equal(t, []apisv1alpha1.APIExportEndpoint{
{URL: "https://server-1.kcp.dev/services/apiexport/root:org:ws/my-export"},
{URL: "https://server-2.kcp.dev/services/apiexport/root:org:ws/my-export"},
}, apiExportEndpointSlice.Status.APIExportEndpoints)
requireConditionMatches(t, apiExportEndpointSlice, conditions.TrueCondition(apisv1alpha1.APIExportEndpointSliceReadyForURLs))
}

if tc.wantAPIExportEndpointSliceURLsUnknown {
requireConditionMatches(t, apiExportEndpointSlice,
conditions.UnknownCondition(
apisv1alpha1.APIExportEndpointSliceURLsReady,
apisv1alpha1.APIExportEndpointSliceReadyForURLs,
apisv1alpha1.ErrorGeneratingURLsReason,
"",
),
Expand Down
Loading

0 comments on commit 5fe9f02

Please sign in to comment.