Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

K8SPS-359: Fix rebooting cluster if it's partially online #725

Merged
merged 4 commits into from
Aug 19, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions cmd/bootstrap/group_replication.go
Original file line number Diff line number Diff line change
Expand Up @@ -373,6 +373,19 @@ func bootstrapGroupReplication(ctx context.Context) error {

log.Printf("Cluster status:\n%s", status)

for _, member := range status.DefaultReplicaSet.Topology {
if member.MemberRole == innodbcluster.MemberRolePrimary && member.MemberState != innodbcluster.MemberStateOnline {
log.Printf("Primary (%s) is not ONLINE. Starting full cluster crash recovery...", member.Address)

if err := handleFullClusterCrash(ctx); err != nil {
return errors.Wrap(err, "handle full cluster crash")
}

// force restart container
os.Exit(1)
}
}

member, ok := status.DefaultReplicaSet.Topology[fmt.Sprintf("%s:%d", localShell.host, 3306)]
if !ok {
log.Printf("Adding instance (%s) to InnoDB cluster", localShell.host)
Expand Down
4 changes: 3 additions & 1 deletion cmd/bootstrap/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package main

import (
"context"
"io"
"log"
"os"
"path/filepath"
Expand All @@ -20,7 +21,8 @@ func main() {
log.Fatalf("error opening file: %v", err)
}
defer f.Close()
log.SetOutput(f)

log.SetOutput(io.MultiWriter(os.Stderr, f))

fullClusterCrash, err := fileExists(fullClusterCrashFile)
if err == nil && fullClusterCrash {
Expand Down
5 changes: 4 additions & 1 deletion e2e-tests/functions
Original file line number Diff line number Diff line change
Expand Up @@ -575,7 +575,10 @@ get_primary_from_haproxy() {
}

get_primary_from_group_replication() {
run_mysql "SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='PRIMARY';" "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" | cut -d'.' -f1
run_mysql \
"SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='PRIMARY';" \
"-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" \
| cut -d'.' -f1
Comment on lines +579 to +581
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[shfmt] reported by reviewdog 🐶

Suggested change
"SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='PRIMARY';" \
"-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" \
| cut -d'.' -f1
"SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='PRIMARY';" \
"-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" \
| cut -d'.' -f1

}

verify_certificate_sans() {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
timeout: 10
commands:
- script: |-
set -o errexit
Expand All @@ -9,3 +8,4 @@ commands:
source ../../functions

deploy_chaos_mesh
timeout: 120
2 changes: 1 addition & 1 deletion e2e-tests/tests/gr-self-healing/14-cluster-crash.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
timeout: 30
commands:
- script: |-
set -o errexit
Expand All @@ -10,3 +9,4 @@ commands:

kill_pods "${NAMESPACE}" "label" "app.kubernetes.io/instance" "gr-self-healing" "cluster-crash"
sleep 30 # wait for crash
timeout: 40
202 changes: 202 additions & 0 deletions e2e-tests/tests/gr-self-healing/17-assert.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,202 @@
apiVersion: kuttl.dev/v1beta1
kind: TestAssert
timeout: 480
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: gr-self-healing-mysql
status:
observedGeneration: 1
replicas: 3
readyReplicas: 3
currentReplicas: 3
updatedReplicas: 3
collisionCount: 0
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: gr-self-healing-router
status:
observedGeneration: 1
replicas: 3
readyReplicas: 3
updatedReplicas: 3
---
apiVersion: ps.percona.com/v1alpha1
kind: PerconaServerMySQL
metadata:
name: gr-self-healing
finalizers:
- percona.com/delete-mysql-pods-in-order
status:
mysql:
ready: 3
size: 3
state: ready
router:
ready: 3
size: 3
state: ready
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: mysql
app.kubernetes.io/instance: gr-self-healing
app.kubernetes.io/managed-by: percona-server-operator
app.kubernetes.io/name: percona-server
app.kubernetes.io/part-of: percona-server
name: gr-self-healing-mysql
ownerReferences:
- apiVersion: ps.percona.com/v1alpha1
blockOwnerDeletion: true
controller: true
kind: PerconaServerMySQL
name: gr-self-healing
spec:
clusterIP: None
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: 3306
- name: mysql-admin
port: 33062
protocol: TCP
targetPort: 33062
- name: mysqlx
port: 33060
protocol: TCP
targetPort: 33060
- name: http
port: 6450
protocol: TCP
targetPort: 6450
- name: mysql-gr
port: 33061
protocol: TCP
targetPort: 33061
selector:
app.kubernetes.io/component: mysql
app.kubernetes.io/instance: gr-self-healing
app.kubernetes.io/managed-by: percona-server-operator
app.kubernetes.io/name: percona-server
app.kubernetes.io/part-of: percona-server
sessionAffinity: None
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: router
app.kubernetes.io/instance: gr-self-healing
app.kubernetes.io/managed-by: percona-server-operator
app.kubernetes.io/name: percona-server
app.kubernetes.io/part-of: percona-server
name: gr-self-healing-router
ownerReferences:
- apiVersion: ps.percona.com/v1alpha1
blockOwnerDeletion: true
controller: true
kind: PerconaServerMySQL
name: gr-self-healing
spec:
ports:
- name: http
port: 8443
protocol: TCP
targetPort: 8443
- name: rw-default
port: 3306
protocol: TCP
targetPort: 6446
- name: read-write
port: 6446
protocol: TCP
targetPort: 6446
- name: read-only
port: 6447
protocol: TCP
targetPort: 6447
- name: x-read-write
port: 6448
protocol: TCP
targetPort: 6448
- name: x-read-only
port: 6449
protocol: TCP
targetPort: 6449
- name: x-default
port: 33060
protocol: TCP
targetPort: 33060
- name: rw-admin
port: 33062
protocol: TCP
targetPort: 33062
selector:
app.kubernetes.io/component: router
app.kubernetes.io/instance: gr-self-healing
app.kubernetes.io/managed-by: percona-server-operator
app.kubernetes.io/name: percona-server
app.kubernetes.io/part-of: percona-server
sessionAffinity: None
type: ClusterIP
---
apiVersion: chaos-mesh.org/v1alpha1
kind: PodChaos
metadata:
name: chaos-kill-label-cluster-crash
spec:
action: pod-kill
mode: all
status:
experiment:
containerRecords:
- events:
- operation: Apply
type: Succeeded
injectedCount: 1
phase: Injected
recoveredCount: 0
selectorKey: .
- events:
- operation: Apply
type: Succeeded
injectedCount: 1
phase: Injected
recoveredCount: 0
selectorKey: .
- events:
- operation: Apply
type: Succeeded
injectedCount: 1
phase: Injected
recoveredCount: 0
selectorKey: .
- events:
- operation: Apply
type: Succeeded
injectedCount: 1
phase: Injected
recoveredCount: 0
selectorKey: .
- events:
- operation: Apply
type: Succeeded
injectedCount: 1
phase: Injected
recoveredCount: 0
selectorKey: .
- events:
- operation: Apply
type: Succeeded
injectedCount: 1
phase: Injected
recoveredCount: 0
selectorKey: .
desiredPhase: Run
16 changes: 16 additions & 0 deletions e2e-tests/tests/gr-self-healing/17-quorum-loss.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
timeout: 30
commands:
- script: |-
set -o errexit
set -o xtrace

source ../../functions

primary=$(get_primary_from_group_replication)
a_replica=$(run_mysql \
"SELECT MEMBER_HOST FROM performance_schema.replication_group_members WHERE MEMBER_ROLE='SECONDARY' LIMIT 1;" \
"-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" | cut -d'.' -f1)

kubectl -n ${NAMESPACE} delete pod ${primary} ${a_replica} --force --grace-period=0
19 changes: 19 additions & 0 deletions pkg/controller/ps/crash_recovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@ import (
"strings"

"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"

apiv1alpha1 "github.com/percona/percona-server-mysql-operator/api/v1alpha1"
Expand Down Expand Up @@ -93,6 +96,22 @@ func (r *PerconaServerMySQLReconciler) reconcileFullClusterCrash(ctx context.Con
}
break
}

if strings.Contains(err.Error(), "The Cluster is ONLINE") {
log.Info("Tried to reboot the cluster but MySQL says the cluster is already online")
log.Info("Deleting all MySQL pods")
err := r.Client.DeleteAllOf(ctx, &corev1.Pod{}, &client.DeleteAllOfOptions{
ListOptions: client.ListOptions{
LabelSelector: labels.SelectorFromSet(mysql.MatchLabels(cr)),
},
})
if err != nil {
return errors.Wrap(err, "failed to delete MySQL pods")
}
break
}

log.Error(err, "failed to reboot cluster from complete outage")
}

return nil
Expand Down
8 changes: 8 additions & 0 deletions pkg/innodbcluster/innodbcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,16 @@ const (
MemberStateMissing MemberState = "(MISSING)"
)

type MemberRole string

const (
MemberRolePrimary MemberRole = "PRIMARY"
MemberRoleSecondary MemberRole = "SECONDARY"
)

type Member struct {
Address string `json:"address"`
MemberRole MemberRole `json:"memberRole"`
MemberState MemberState `json:"status"`
InstanceErrors []string `json:"instanceErrors"`
}
Expand Down
Loading