Skip to content

Commit

Permalink
secrets support
Browse files Browse the repository at this point in the history
- Add `secrets` references to **Plan** spec. Changes in referenced `secrets` can trigger re-application of a **Plans**.
- The label that a **Plan** places on an applied `node` is no longer the `status.latestVersion` but is instead a sha224 digest of `status.latestVersion` and the key-order-value(s) of each secret's `data` in the order each `secret` is specified. This value is represented as `status.latestHash`
  • Loading branch information
dweomer committed Jan 29, 2020
1 parent e80c4c2 commit 0e77c79
Show file tree
Hide file tree
Showing 12 changed files with 139 additions and 29 deletions.
1 change: 0 additions & 1 deletion .dockerignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,3 @@
./.dapper
./.cache
./.trash-cache
./dist/
3 changes: 1 addition & 2 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
/.dapper
/.cache
/.trash-cache
.idea/
/bin
/dist
*.swp
.idea
tffy
2 changes: 1 addition & 1 deletion Dockerfile.dapper
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ FROM golang:1.13-alpine
ARG DAPPER_HOST_ARCH
ENV ARCH $DAPPER_HOST_ARCH

RUN apk -U add bash git gcc musl-dev docker vim less file curl wget ca-certificates
RUN apk -U add bash expect git gcc jq musl-dev docker vim less file curl wget ca-certificates
RUN go get -d golang.org/x/lint/golint && \
git -C /go/src/golang.org/x/lint/golint checkout -b current 06c8688daad7faa9da5a0c2f163a3d14aac986ca && \
go install golang.org/x/lint/golint && \
Expand Down
5 changes: 3 additions & 2 deletions manifests/system-upgrade-controller.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ metadata:
name: default-controller-env
namespace: system-upgrade
data:
SYSTEM_UPGRADE_CONTROLLER_DEBUG: "true"
SYSTEM_UPGRADE_CONTROLLER_DEBUG: "false"
SYSTEM_UPGRADE_CONTROLLER_THREADS: "2"
SYSTEM_UPGRADE_JOB_ACTIVE_DEADLINE_SECONDS: "900"
SYSTEM_UPGRADE_JOB_BACKOFF_LIMIT: "2"
Expand Down Expand Up @@ -57,7 +57,8 @@ spec:
effect: "NoSchedule"
containers:
- name: system-upgrade-controller
image: rancher/system-upgrade-controller:v0.1.0
image: rancher/system-upgrade-controller:latest
imagePullPolicy: IfNotPresent
envFrom:
- configMapRef:
name: default-controller-env
Expand Down
11 changes: 9 additions & 2 deletions pkg/apis/upgrade.cattle.io/v1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,9 @@ type PlanSpec struct {
Concurrency int64 `json:"concurrency,omitempty"`
NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"`

Channel string `json:"channel,omitempty"`
Version string `json:"version,omitempty"`
Channel string `json:"channel,omitempty"`
Version string `json:"version,omitempty"`
Secrets []SecretSpec `json:"secrets,omitempty"`

Upgrade *UpgradeSpec `json:"upgrade,omitempty"`
Cordon bool `json:"cordon,omitempty"`
Expand All @@ -45,6 +46,7 @@ type PlanSpec struct {
type PlanStatus struct {
Conditions []genericcondition.GenericCondition `json:"conditions,omitempty"`
LatestVersion string `json:"latestVersion,omitempty"`
LatestHash string `json:"latestHash,omitempty"`
Applying []string `json:"applying,omitempty"`
}

Expand All @@ -62,3 +64,8 @@ type DrainSpec struct {
IgnoreDaemonSets *bool `json:"ignoreDaemonSets,omitempty"`
Force bool `json:"force,omitempty"`
}

type SecretSpec struct {
Name string `json:"name,omitempty"`
Path string `json:"path,omitempty"`
}
21 changes: 21 additions & 0 deletions pkg/apis/upgrade.cattle.io/v1/zz_generated_deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

34 changes: 31 additions & 3 deletions pkg/upgrade/job/job.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package job

import (
"os"
"path/filepath"
"strconv"
"strings"

Expand Down Expand Up @@ -78,13 +79,13 @@ func NewUpgradeJob(plan *upgradeapiv1.Plan, serviceAccountName, nodeName, contro
labelPlanName := upgradeapi.LabelPlanName(plan.Name)
job := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: name.SafeConcatName(plan.Name, nodeName, plan.Status.LatestVersion),
Name: name.SafeConcatName("upgrade", nodeName, "with", plan.Name, "at", plan.Status.LatestHash),
Namespace: plan.Namespace,
Labels: labels.Set{
upgradeapi.LabelController: controllerName,
upgradeapi.LabelNode: nodeName,
upgradeapi.LabelPlan: plan.Name,
labelPlanName: plan.Status.LatestVersion,
labelPlanName: plan.Status.LatestHash,
},
},
Spec: batchv1.JobSpec{
Expand All @@ -95,7 +96,7 @@ func NewUpgradeJob(plan *upgradeapiv1.Plan, serviceAccountName, nodeName, contro
upgradeapi.LabelController: controllerName,
upgradeapi.LabelNode: nodeName,
upgradeapi.LabelPlan: plan.Name,
labelPlanName: plan.Status.LatestVersion,
labelPlanName: plan.Status.LatestHash,
},
},
Spec: corev1.PodSpec{
Expand Down Expand Up @@ -182,6 +183,9 @@ func NewUpgradeJob(plan *upgradeapiv1.Plan, serviceAccountName, nodeName, contro
Env: []corev1.EnvVar{{
Name: "SYSTEM_UPGRADE_PLAN_NAME",
Value: plan.Name,
}, {
Name: "SYSTEM_UPGRADE_PLAN_LATEST_HASH",
Value: plan.Status.LatestHash,
}, {
Name: "SYSTEM_UPGRADE_PLAN_LATEST_VERSION",
Value: plan.Status.LatestVersion,
Expand Down Expand Up @@ -212,6 +216,30 @@ func NewUpgradeJob(plan *upgradeapiv1.Plan, serviceAccountName, nodeName, contro
},
},
}

for _, secret := range plan.Spec.Secrets {
secretVolumeName := name.SafeConcatName("secret", secret.Name)
secretVolumePath := secret.Path
if secretVolumePath == "" {
secretVolumePath = filepath.Join("/run/system-upgrade/secrets", secret.Name)
} else if secretVolumePath[0:1] != "/" {
secretVolumePath = filepath.Join("/run/system-upgrade/secrets", secretVolumePath)
}
job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, corev1.Volume{
Name: secretVolumeName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secret.Name,
},
},
})
job.Spec.Template.Spec.Containers[0].VolumeMounts = append(job.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
Name: secretVolumeName,
MountPath: secretVolumePath,
ReadOnly: true,
})
}

cordon, drain := plan.Spec.Cordon, plan.Spec.Drain
if drain != nil {
args := []string{"drain", nodeName, "--pod-selector", `!` + upgradeapi.LabelController}
Expand Down
77 changes: 64 additions & 13 deletions pkg/upgrade/plan/plan.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package plan

import (
"context"
"crypto/sha256"
"fmt"
"net/http"
"os"
Expand Down Expand Up @@ -69,31 +70,52 @@ func RegisterHandlers(ctx context.Context, serviceAccountName, controllerNamespa
plans := upgradeFactory.Upgrade().V1().Plan()
jobs := batchFactory.Batch().V1().Job()
nodes := coreFactory.Core().V1().Node()
secrets := coreFactory.Core().V1().Secret()

// cluster id hack: see https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/mVGobfD4TpY/nkdbkX1iBwAJ
systemNS, err := coreFactory.Core().V1().Namespace().Get(metav1.NamespaceSystem, metav1.GetOptions{})
if err != nil {
return err
}

// node events with labels that match a plan's selectors (potentially) trigger that plan
nodes.OnChange(ctx, controllerName, func(key string, obj *corev1.Node) (*corev1.Node, error) {
if obj == nil {
return obj, nil
}
if planList, err := plans.Cache().List(controllerNamespace, labels.Everything()); err != nil {
logrus.Error(err)
} else {
for _, plan := range planList {
if selector, err := metav1.LabelSelectorAsSelector(plan.Spec.NodeSelector); err != nil {
logrus.Error(err)
} else if selector.Matches(labels.Set(obj.Labels)) {
planList, err := plans.Cache().List(controllerNamespace, labels.Everything())
if err != nil {
return obj, err
}
for _, plan := range planList {
if selector, err := metav1.LabelSelectorAsSelector(plan.Spec.NodeSelector); err != nil {
return obj, err
} else if selector.Matches(labels.Set(obj.Labels)) {
plans.Enqueue(plan.Namespace, plan.Name)
}
}

return obj, nil
})

// secret events referred to by a plan (potentially) trigger that plan
secrets.OnChange(ctx, controllerName, func(key string, obj *corev1.Secret) (*corev1.Secret, error) {
planList, err := plans.Cache().List(controllerNamespace, labels.Everything())
if err != nil {
return obj, err
}
for _, plan := range planList {
for _, secret := range plan.Spec.Secrets {
if obj.Name == secret.Name {
plans.Enqueue(plan.Namespace, plan.Name)
continue
}
}
}
return obj, nil
})

// job events (successful completions) cause the node the job ran on to be labeled as per the plan
jobs.OnChange(ctx, controllerName, func(key string, obj *batchv1.Job) (*batchv1.Job, error) {
if obj == nil {
return obj, nil
Expand Down Expand Up @@ -121,18 +143,20 @@ func RegisterHandlers(ctx context.Context, serviceAccountName, controllerNamespa
return obj, nil
})

// process plan events, mutating status accordingly
upgradectlv1.RegisterPlanStatusHandler(ctx, plans, "", controllerName,
func(obj *upgradeapiv1.Plan, status upgradeapiv1.PlanStatus) (upgradeapiv1.PlanStatus, error) {
secretsCache := secrets.Cache()
resolved := upgradeapiv1.PlanLatestResolved
resolved.CreateUnknownIfNotExists(obj)
if obj.Spec.Version == "" && obj.Spec.Channel == "" {
resolved.SetError(obj, "Error", fmt.Errorf("missing one of channel or version"))
return obj.Status, nil
return hashPlanLatest(secretsCache, obj)
}
if obj.Spec.Version != "" {
resolved.SetError(obj, "Version", nil)
obj.Status.LatestVersion = obj.Spec.Version
return obj.Status, nil
return hashPlanLatest(secretsCache, obj)
}
if resolved.IsTrue(obj) {
if lastUpdated, err := time.Parse(time.RFC3339, resolved.GetLastUpdated(obj)); err == nil {
Expand All @@ -148,11 +172,12 @@ func RegisterHandlers(ctx context.Context, serviceAccountName, controllerNamespa
}
resolved.SetError(obj, "Channel", nil)
obj.Status.LatestVersion = latest
return obj.Status, nil
return hashPlanLatest(secretsCache, obj)
},
)

upgradectlv1.RegisterPlanGeneratingHandler(ctx, plans, apply.WithCacheTypes(jobs).WithCacheTypes(nodes).WithNoDelete(), "", controllerName,
// process plan events by creating jobs to apply the plan
upgradectlv1.RegisterPlanGeneratingHandler(ctx, plans, apply.WithCacheTypes(jobs, nodes, secrets).WithNoDelete(), "", controllerName,
func(obj *upgradeapiv1.Plan, status upgradeapiv1.PlanStatus) (objects []runtime.Object, _ upgradeapiv1.PlanStatus, _ error) {
concurrentNodeNames, err := selectConcurrentNodeNames(nodes.Cache(), obj)
if err != nil {
Expand All @@ -174,6 +199,32 @@ func RegisterHandlers(ctx context.Context, serviceAccountName, controllerNamespa
return nil
}

func hashPlanLatest(secretCache corectlv1.SecretCache, plan *upgradeapiv1.Plan) (upgradeapiv1.PlanStatus, error) {
if upgradeapiv1.PlanLatestResolved.GetReason(plan) == "Error" {
plan.Status.LatestVersion = ""
plan.Status.LatestHash = ""
} else {
hash := sha256.New224()
hash.Write([]byte(plan.Status.LatestVersion))
for _, s := range plan.Spec.Secrets {
secret, err := secretCache.Get(plan.Namespace, s.Name)
if err != nil {
return plan.Status, err
}
keys := []string{}
for k := range secret.Data {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
hash.Write(secret.Data[k])
}
}
plan.Status.LatestHash = fmt.Sprintf("%x", hash.Sum(nil))
}
return plan.Status, nil
}

func resolveChannel(ctx context.Context, channelURL, clusterID string) (string, error) {
httpClient := &http.Client{
CheckRedirect: func(*http.Request, []*http.Request) error {
Expand All @@ -185,7 +236,7 @@ func resolveChannel(ctx context.Context, channelURL, clusterID string) (string,
if err != nil {
return "", err
}
request.Header.Set(`x-`+metav1.NamespaceSystem, string(clusterID))
request.Header.Set(`x-`+metav1.NamespaceSystem, clusterID)
logrus.Debugf("Sending %+v", request)
response, err := httpClient.Do(request)
if err != nil {
Expand Down Expand Up @@ -213,7 +264,7 @@ func selectConcurrentNodeNames(nodeCache corectlv1.NodeCache, plan *upgradeapiv1
if err != nil {
return nil, err
}
requirementPlanNotLatest, err := labels.NewRequirement(upgradeapi.LabelPlanName(plan.Name), selection.NotIn, []string{"disabled", plan.Status.LatestVersion})
requirementPlanNotLatest, err := labels.NewRequirement(upgradeapi.LabelPlanName(plan.Name), selection.NotIn, []string{"disabled", plan.Status.LatestHash})
if err != nil {
return nil, err
}
Expand Down
1 change: 1 addition & 0 deletions scripts/build
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,6 @@ cd $(dirname $0)/..
mkdir -p bin
[ "$(uname)" != "Darwin" ] && LINKFLAGS="-extldflags -static -s"
PKG=github.com/rancher/system-upgrade-controller
echo "Building $PKG ..."
VERSIONFLAGS="-X ${PKG}/pkg/version.Version=${VERSION} -X ${PKG}/pkg/version.GitCommit=${COMMIT:0:8}"
CGO_ENABLED=0 go build -ldflags "$VERSIONFLAGS $LINKFLAGS" -o bin/system-upgrade-controller
1 change: 0 additions & 1 deletion scripts/ci
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,5 @@ cd $(dirname $0)

./build
./test
#./e2e
./validate
./package
10 changes: 7 additions & 3 deletions scripts/package-controller
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@ if echo $TAG | grep -q dirty; then
fi

IMAGE=${REPO}/system-upgrade-controller:${TAG}
docker build -t ${IMAGE} -f ./package/Dockerfile .
echo ${IMAGE} > ./dist/images
echo Built ${IMAGE}
docker image build -t ${IMAGE} -f ./package/Dockerfile .
docker image tag ${IMAGE} ${REPO}/system-upgrade-controller:latest
docker image save --output ./dist/images.tar \
${REPO}/system-upgrade-controller:${TAG} \
${REPO}/system-upgrade-controller:latest
echo ${IMAGE} > ./dist/images.txt
echo Built ${IMAGE} and ${REPO}/system-upgrade-controller:latest
2 changes: 1 addition & 1 deletion scripts/test
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,5 @@ set -e
cd $(dirname $0)/..


echo Running tests
echo "Running tests"
go test -cover -tags=test ./pkg/...

0 comments on commit 0e77c79

Please sign in to comment.