Skip to content

Commit

Permalink
Add consumer aware slices
Browse files Browse the repository at this point in the history
Signed-off-by: Mangirdas Judeikis <[email protected]>
On-behalf-of: @SAP [email protected]
  • Loading branch information
mjudeikis committed Jan 26, 2025
1 parent a1b3400 commit b4f6f7d
Show file tree
Hide file tree
Showing 21 changed files with 1,220 additions and 157 deletions.
2 changes: 1 addition & 1 deletion cmd/sharded-test-server/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ func start(proxyFlags, shardFlags []string, logDirPath, workDirPath string, numb
if i >= len(regions) {
break
}
patch := fmt.Sprintf(`{"metadata":{"labels":{"region":%q}}}`, regions[i])
patch := fmt.Sprintf(`{"metadata":{"labels":{"region":%q,"shared": "true"}}}`, regions[i])
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
_, err := client.Cluster(core.RootCluster.Path()).CoreV1alpha1().Shards().Patch(ctx, name, types.MergePatchType, []byte(patch), metav1.PatchOptions{})
return err
Expand Down
9 changes: 9 additions & 0 deletions config/crds/apis.kcp.io_apiexportendpointslices.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,15 @@ spec:
- url
type: object
type: array
x-kubernetes-list-map-keys:
- url
x-kubernetes-list-type: map
shardSelector:
description: |-
shardSelector is the selector used to filter the shards. It is used to filter the shards
when determining partition scope when deriving the endpoints. This is set by owning shard,
and is used by follower shards to determine if its inscope or not.
type: string
type: object
type: object
served: true
Expand Down
8 changes: 8 additions & 0 deletions pkg/authorization/bootstrap/policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest"
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy"

"github.com/kcp-dev/kcp/sdk/apis/apis"
"github.com/kcp-dev/kcp/sdk/apis/core"
"github.com/kcp-dev/kcp/sdk/apis/tenancy"
)
Expand Down Expand Up @@ -101,6 +102,13 @@ func clusterRoles() []rbacv1.ClusterRole {
rbacv1helpers.NewRule("access").URLs("/").RuleOrDie(),
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: SystemExternalLogicalClusterAdmin},
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("update", "patch", "get").Groups(apis.GroupName).Resources("apiexportendpointslices/status").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch").Groups(apis.GroupName).Resources("apiexportendpointslices").RuleOrDie(),
},
},
}
}

Expand Down
1 change: 1 addition & 0 deletions pkg/cache/server/bootstrap/bootstrap.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ func Bootstrap(ctx context.Context, apiExtensionsClusterClient kcpapiextensionsc
{"apis.kcp.io", "apiresourceschemas"},
{"apis.kcp.io", "apiconversions"},
{"apis.kcp.io", "apiexports"},
{"apis.kcp.io", "apiexportendpointslices"},
{"core.kcp.io", "logicalclusters"},
{"core.kcp.io", "shards"},
{"tenancy.kcp.io", "workspacetypes"},
Expand Down
15 changes: 15 additions & 0 deletions pkg/openapi/zz_generated.openapi.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions pkg/reconciler/apis/apibinding/apibinding_reconcile.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ import (
"k8s.io/apiextensions-apiserver/pkg/apiserver"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilserrors "k8s.io/apimachinery/pkg/util/errors"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"

Expand Down Expand Up @@ -77,7 +77,7 @@ func (c *controller) reconcile(ctx context.Context, apiBinding *apisv1alpha1.API
}
}

return requeue, utilserrors.NewAggregate(errs)
return requeue, utilerrors.NewAggregate(errs)
}

type summaryReconciler struct {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
Expand All @@ -45,7 +45,7 @@ import (
topologyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1"
kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster"
apisv1alpha1client "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/apis/v1alpha1"
apisinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/apis/v1alpha1"
apisv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/apis/v1alpha1"
corev1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/core/v1alpha1"
topologyinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/topology/v1alpha1"
)
Expand All @@ -57,13 +57,16 @@ const (
// NewController returns a new controller for APIExportEndpointSlices.
// Shards and APIExports are read from the cache server.
func NewController(
apiExportEndpointSliceClusterInformer apisinformers.APIExportEndpointSliceClusterInformer,
shardName string,
apiExportEndpointSliceClusterInformer apisv1alpha1informers.APIExportEndpointSliceClusterInformer,
apiBindingInformer apisv1alpha1informers.APIBindingClusterInformer,
globalShardClusterInformer corev1alpha1informers.ShardClusterInformer,
globalAPIExportClusterInformer apisinformers.APIExportClusterInformer,
globalAPIExportClusterInformer apisv1alpha1informers.APIExportClusterInformer,
partitionClusterInformer topologyinformers.PartitionClusterInformer,
kcpClusterClient kcpclientset.ClusterInterface,
) (*controller, error) {
c := &controller{
shardName: shardName,
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{
Expand All @@ -76,8 +79,12 @@ func NewController(
listShards: func(selector labels.Selector) ([]*corev1alpha1.Shard, error) {
return globalShardClusterInformer.Lister().List(selector)
},
getAPIExportEndpointSlice: func(clusterName logicalcluster.Name, name string) (*apisv1alpha1.APIExportEndpointSlice, error) {
return apiExportEndpointSliceClusterInformer.Lister().Cluster(clusterName).Get(name)
getAPIExportEndpointSlice: func(ctx context.Context, path logicalcluster.Path, name string) (*apisv1alpha1.APIExportEndpointSlice, error) {
obj, err := indexers.ByPathAndName[*apisv1alpha1.APIExportEndpointSlice](apisv1alpha1.Resource("apiexportendpointslices"), apiExportEndpointSliceClusterInformer.Informer().GetIndexer(), path, name)
if err != nil {
return nil, err
}
return obj, err
},
getAPIExport: func(path logicalcluster.Path, name string) (*apisv1alpha1.APIExport, error) {
return indexers.ByPathAndName[*apisv1alpha1.APIExport](apisv1alpha1.Resource("apiexports"), globalAPIExportClusterInformer.Informer().GetIndexer(), path, name)
Expand All @@ -100,6 +107,37 @@ func NewController(
}
return slices, nil
},
listAPIBindingsByAPIExport: func(export *apisv1alpha1.APIExport) ([]*apisv1alpha1.APIBinding, error) {
// binding keys by full path
keys := sets.New[string]()
if path := logicalcluster.NewPath(export.Annotations[core.LogicalClusterPathAnnotationKey]); !path.Empty() {
pathKeys, err := apiBindingInformer.Informer().GetIndexer().IndexKeys(indexers.APIBindingsByAPIExport, path.Join(export.Name).String())
if err != nil {
return nil, err
}
keys.Insert(pathKeys...)
}

clusterKeys, err := apiBindingInformer.Informer().GetIndexer().IndexKeys(indexers.APIBindingsByAPIExport, logicalcluster.From(export).Path().Join(export.Name).String())
if err != nil {
return nil, err
}
keys.Insert(clusterKeys...)

bindings := make([]*apisv1alpha1.APIBinding, 0, keys.Len())
for _, key := range sets.List[string](keys) {
binding, exists, err := apiBindingInformer.Informer().GetIndexer().GetByKey(key)
if err != nil {
utilruntime.HandleError(err)
continue
} else if !exists {
utilruntime.HandleError(fmt.Errorf("APIBinding %q does not exist", key))
continue
}
bindings = append(bindings, binding.(*apisv1alpha1.APIBinding))
}
return bindings, nil
},
apiExportEndpointSliceClusterInformer: apiExportEndpointSliceClusterInformer,
commit: committer.NewCommitter[*APIExportEndpointSlice, Patcher, *APIExportEndpointSliceSpec, *APIExportEndpointSliceStatus](kcpClusterClient.ApisV1alpha1().APIExportEndpointSlices()),
}
Expand Down Expand Up @@ -164,24 +202,26 @@ type CommitFunc = func(context.Context, *Resource, *Resource) error
// controller reconciles APIExportEndpointSlices. It ensures that the shard endpoints are populated
// in the status of every APIExportEndpointSlices.
type controller struct {
queue workqueue.TypedRateLimitingInterface[string]
queue workqueue.TypedRateLimitingInterface[string]
shardName string

listShards func(selector labels.Selector) ([]*corev1alpha1.Shard, error)
listAPIExportEndpointSlices func() ([]*apisv1alpha1.APIExportEndpointSlice, error)
getAPIExportEndpointSlice func(clusterName logicalcluster.Name, name string) (*apisv1alpha1.APIExportEndpointSlice, error)
getAPIExportEndpointSlice func(ctx context.Context, path logicalcluster.Path, name string) (*apisv1alpha1.APIExportEndpointSlice, error)
getAPIExport func(path logicalcluster.Path, name string) (*apisv1alpha1.APIExport, error)
listAPIBindingsByAPIExport func(apiexport *apisv1alpha1.APIExport) ([]*apisv1alpha1.APIBinding, error)
getPartition func(clusterName logicalcluster.Name, name string) (*topologyv1alpha1.Partition, error)
getAPIExportEndpointSlicesByPartition func(key string) ([]*apisv1alpha1.APIExportEndpointSlice, error)

apiExportEndpointSliceClusterInformer apisinformers.APIExportEndpointSliceClusterInformer
apiExportEndpointSliceClusterInformer apisv1alpha1informers.APIExportEndpointSliceClusterInformer
commit CommitFunc
}

// enqueueAPIExportEndpointSlice enqueues an APIExportEndpointSlice.
func (c *controller) enqueueAPIExportEndpointSlice(obj interface{}) {
key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj)
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
return
}

Expand All @@ -197,7 +237,7 @@ func (c *controller) enqueueAPIExportEndpointSlicesForAPIExport(obj interface{})
}
export, ok := obj.(*apisv1alpha1.APIExport)
if !ok {
runtime.HandleError(fmt.Errorf("obj is supposed to be a APIExport, but is %T", obj))
utilruntime.HandleError(fmt.Errorf("obj is supposed to be a APIExport, but is %T", obj))
return
}

Expand All @@ -206,23 +246,23 @@ func (c *controller) enqueueAPIExportEndpointSlicesForAPIExport(obj interface{})
if path := logicalcluster.NewPath(export.Annotations[core.LogicalClusterPathAnnotationKey]); !path.Empty() {
pathKeys, err := c.apiExportEndpointSliceClusterInformer.Informer().GetIndexer().IndexKeys(indexAPIExportEndpointSliceByAPIExport, path.Join(export.Name).String())
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
return
}
keys.Insert(pathKeys...)
}

clusterKeys, err := c.apiExportEndpointSliceClusterInformer.Informer().GetIndexer().IndexKeys(indexAPIExportEndpointSliceByAPIExport, logicalcluster.From(export).Path().Join(export.Name).String())
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
return
}
keys.Insert(clusterKeys...)

for _, key := range sets.List[string](keys) {
slice, exists, err := c.apiExportEndpointSliceClusterInformer.Informer().GetIndexer().GetByKey(key)
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
continue
} else if !exists {
continue
Expand All @@ -237,15 +277,15 @@ func (c *controller) enqueueAPIExportEndpointSlicesForAPIExport(obj interface{})
func (c *controller) enqueueAllAPIExportEndpointSlices(shard interface{}) {
list, err := c.listAPIExportEndpointSlices()
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
return
}

logger := logging.WithObject(logging.WithReconciler(klog.Background(), ControllerName), shard.(*corev1alpha1.Shard))
for i := range list {
key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(list[i])
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
continue
}

Expand All @@ -258,13 +298,13 @@ func (c *controller) enqueueAllAPIExportEndpointSlices(shard interface{}) {
func (c *controller) enqueuePartition(obj interface{}) {
key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj)
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
return
}

slices, err := c.getAPIExportEndpointSlicesByPartition(key)
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
return
}

Expand All @@ -277,7 +317,7 @@ func (c *controller) enqueuePartition(obj interface{}) {

// Start starts the controller, which stops when ctx.Done() is closed.
func (c *controller) Start(ctx context.Context, numThreads int) {
defer runtime.HandleCrash()
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()

logger := logging.WithReconciler(klog.FromContext(ctx), ControllerName)
Expand Down Expand Up @@ -314,7 +354,7 @@ func (c *controller) processNextWorkItem(ctx context.Context) bool {
defer c.queue.Done(key)

if err := c.process(ctx, key); err != nil {
runtime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", ControllerName, key, err))
utilruntime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", ControllerName, key, err))
c.queue.AddRateLimited(key)
return true
}
Expand All @@ -325,10 +365,10 @@ func (c *controller) processNextWorkItem(ctx context.Context) bool {
func (c *controller) process(ctx context.Context, key string) error {
clusterName, _, name, err := kcpcache.SplitMetaClusterNamespaceKey(key)
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
return nil
}
obj, err := c.getAPIExportEndpointSlice(clusterName, name)
obj, err := c.getAPIExportEndpointSlice(ctx, clusterName.Path(), name)
if err != nil {
if errors.IsNotFound(err) {
return nil // object deleted before we handled it
Expand All @@ -353,6 +393,7 @@ func (c *controller) process(ctx context.Context, key string) error {
// If the object being reconciled changed as a result, update it.
oldResource := &Resource{ObjectMeta: old.ObjectMeta, Spec: &old.Spec, Status: &old.Status}
newResource := &Resource{ObjectMeta: obj.ObjectMeta, Spec: &obj.Spec, Status: &obj.Status}

if err := c.commit(ctx, oldResource, newResource); err != nil {
errs = append(errs, err)
}
Expand Down Expand Up @@ -380,15 +421,19 @@ func filterShardEvent(oldObj, newObj interface{}) bool {
}

// InstallIndexers adds the additional indexers that this controller requires to the informers.
func InstallIndexers(globalAPIExportClusterInformer apisinformers.APIExportClusterInformer, apiExportEndpointSliceClusterInformer apisinformers.APIExportEndpointSliceClusterInformer) {
func InstallIndexers(
globalAPIExportClusterInformer apisv1alpha1informers.APIExportClusterInformer,
apiExportEndpointSliceClusterInformer apisv1alpha1informers.APIExportEndpointSliceClusterInformer,
) {
indexers.AddIfNotPresentOrDie(globalAPIExportClusterInformer.Informer().GetIndexer(), cache.Indexers{
indexers.ByLogicalClusterPathAndName: indexers.IndexByLogicalClusterPathAndName,
})

indexers.AddIfNotPresentOrDie(apiExportEndpointSliceClusterInformer.Informer().GetIndexer(), cache.Indexers{
indexers.ByLogicalClusterPathAndName: indexers.IndexByLogicalClusterPathAndName,
})
indexers.AddIfNotPresentOrDie(apiExportEndpointSliceClusterInformer.Informer().GetIndexer(), cache.Indexers{
indexAPIExportEndpointSliceByAPIExport: indexAPIExportEndpointSliceByAPIExportFunc,
})

indexers.AddIfNotPresentOrDie(apiExportEndpointSliceClusterInformer.Informer().GetIndexer(), cache.Indexers{
indexAPIExportEndpointSlicesByPartition: indexAPIExportEndpointSlicesByPartitionFunc,
})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ func TestReconcile(t *testing.T) {
wantPartitionNotValid: true,
wantAPIExportEndpointSliceURLsError: true,
},
"APIExportEndpointSliceURLs set when no issue": {
"APIExportEndpointSliceReadyForURLs set when no issue": {
wantAPIExportEndpointSliceURLsReady: true,
wantAPIExportValid: true,
wantPartitionValid: true,
Expand Down Expand Up @@ -189,7 +189,7 @@ func TestReconcile(t *testing.T) {
if tc.wantAPIExportEndpointSliceURLsError {
requireConditionMatches(t, apiExportEndpointSlice,
conditions.FalseCondition(
apisv1alpha1.APIExportEndpointSliceURLsReady,
apisv1alpha1.APIExportEndpointSliceReadyForURLs,
apisv1alpha1.ErrorGeneratingURLsReason,
conditionsv1alpha1.ConditionSeverityError,
"",
Expand All @@ -198,17 +198,13 @@ func TestReconcile(t *testing.T) {
}

if tc.wantAPIExportEndpointSliceURLsReady {
requireConditionMatches(t, apiExportEndpointSlice, conditions.TrueCondition(apisv1alpha1.APIExportEndpointSliceURLsReady))
require.Equal(t, []apisv1alpha1.APIExportEndpoint{
{URL: "https://server-1.kcp.dev/services/apiexport/root:org:ws/my-export"},
{URL: "https://server-2.kcp.dev/services/apiexport/root:org:ws/my-export"},
}, apiExportEndpointSlice.Status.APIExportEndpoints)
requireConditionMatches(t, apiExportEndpointSlice, conditions.TrueCondition(apisv1alpha1.APIExportEndpointSliceReadyForURLs))
}

if tc.wantAPIExportEndpointSliceURLsUnknown {
requireConditionMatches(t, apiExportEndpointSlice,
conditions.UnknownCondition(
apisv1alpha1.APIExportEndpointSliceURLsReady,
apisv1alpha1.APIExportEndpointSliceReadyForURLs,
apisv1alpha1.ErrorGeneratingURLsReason,
"",
),
Expand Down
Loading

0 comments on commit b4f6f7d

Please sign in to comment.