From 2f281d65597f6c71974551b06a90d4c4b1580f1f Mon Sep 17 00:00:00 2001 From: kmetin Date: Wed, 6 Sep 2023 12:39:30 +0300 Subject: [PATCH 01/53] * add initial tests for DMT * transition to event driven arch with ITopic --- base/commands/migration/const.go | 5 +- base/commands/migration/stages.go | 183 ------------- base/commands/migration/start_stages.go | 240 ++++++++++++++++++ .../migration/start_stages_it_test.go | 102 ++++++++ base/commands/migration/utils.go | 4 +- 5 files changed, 347 insertions(+), 187 deletions(-) delete mode 100644 base/commands/migration/stages.go create mode 100644 base/commands/migration/start_stages.go create mode 100644 base/commands/migration/start_stages_it_test.go diff --git a/base/commands/migration/const.go b/base/commands/migration/const.go index 3888287c9..ec63bd481 100644 --- a/base/commands/migration/const.go +++ b/base/commands/migration/const.go @@ -1,6 +1,7 @@ package migration const ( - startQueueName = "__datamigration_start_queue" - statusMapEntryName = "status" + StartQueueName = "__datamigration_start_queue" + StatusMapEntryName = "status" + updateTopic = "__datamigration_updates_" ) diff --git a/base/commands/migration/stages.go b/base/commands/migration/stages.go deleted file mode 100644 index b123d8946..000000000 --- a/base/commands/migration/stages.go +++ /dev/null @@ -1,183 +0,0 @@ -package migration - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "time" - - clcerrors "github.com/hazelcast/hazelcast-commandline-client/errors" - "github.com/hazelcast/hazelcast-go-client" - "github.com/hazelcast/hazelcast-go-client/serialization" - "golang.org/x/exp/slices" - - "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" - "github.com/hazelcast/hazelcast-commandline-client/internal/plug" -) - -type Stages struct { - migrationID string - configDir string - ci *hazelcast.ClientInternal - startQueue *hazelcast.Queue - statusMap *hazelcast.Map -} - -func NewStages(migrationID, configDir string) *Stages { - if migrationID == "" { - panic("migrationID is required") - } - return &Stages{ - migrationID: migrationID, - configDir: configDir, - } -} - -func (st *Stages) Build(ctx context.Context, ec plug.ExecContext) []stage.Stage { - return []stage.Stage{ - { - ProgressMsg: "Connecting to the migration cluster", - SuccessMsg: "Connected to the migration cluster", - FailureMsg: "Could not connect to the migration cluster", - Func: st.connectStage(ctx, ec), - }, - { - ProgressMsg: "Starting the migration", - SuccessMsg: "Started the migration", - FailureMsg: "Could not start the migration", - Func: st.startStage(ctx), - }, - { - ProgressMsg: "Migrating the cluster", - SuccessMsg: "Migrated the cluster", - FailureMsg: "Could not migrate the cluster", - Func: st.migrateStage(ctx), - }, - } -} - -func (st *Stages) connectStage(ctx context.Context, ec plug.ExecContext) func(stage.Statuser) error { - return func(status stage.Statuser) error { - var err error - st.ci, err = ec.ClientInternal(ctx) - if err != nil { - return err - } - st.startQueue, err = st.ci.Client().GetQueue(ctx, startQueueName) - if err != nil { - return err - } - st.statusMap, err = st.ci.Client().GetMap(ctx, makeStatusMapName(st.migrationID)) - if err != nil { - return err - } - return nil - } -} - -func (st *Stages) startStage(ctx context.Context) func(stage.Statuser) error { - return func(stage.Statuser) error { - if err := st.statusMap.Delete(ctx, statusMapEntryName); err != nil { - return err - } - var cb configBundle - cb.MigrationID = st.migrationID - if err := cb.Walk(st.configDir); err != nil { - return err - } - b, err := json.Marshal(cb) - if err != nil { - return err - } - if err = st.startQueue.Put(ctx, serialization.JSON(b)); err != nil { - return err - } - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - if err = st.waitForStatus(ctx, time.Second, statusInProgress, statusComplete); err != nil { - return err - } - return nil - } -} - -func (st *Stages) migrateStage(ctx context.Context) func(statuser stage.Statuser) error { - return func(stage.Statuser) error { - return st.waitForStatus(ctx, 5*time.Second, statusComplete) - } -} - -func (st *Stages) waitForStatus(ctx context.Context, waitInterval time.Duration, targetStatuses ...status) error { - timeoutErr := fmt.Errorf("migration could not be completed: reached timeout while reading status: "+ - "please ensure that you are using Hazelcast's migration cluster distribution and your DMT config points to that cluster: %w", - context.DeadlineExceeded) - for { - if err := ctx.Err(); err != nil { - if errors.Is(err, context.DeadlineExceeded) { - return timeoutErr - } - return fmt.Errorf("migration failed: %w", err) - } - s, err := st.readStatus(ctx) - if err != nil { - if errors.Is(err, context.DeadlineExceeded) { - return timeoutErr - } - return fmt.Errorf("reading status: %w", err) - } - switch s { - case statusComplete: - return nil - case statusCanceled: - return clcerrors.ErrUserCancelled - case statusFailed: - return errors.New("migration failed") - } - if slices.Contains(targetStatuses, s) { - return nil - } - time.Sleep(waitInterval) - } -} - -func (st *Stages) readStatus(ctx context.Context) (status, error) { - v, err := st.statusMap.Get(ctx, statusMapEntryName) - if err != nil { - return statusNone, err - } - if v == nil { - return statusNone, nil - } - var b []byte - if vv, ok := v.(string); ok { - b = []byte(vv) - } else if vv, ok := v.(serialization.JSON); ok { - b = vv - } else { - return statusNone, fmt.Errorf("invalid status value") - } - var ms migrationStatus - if err := json.Unmarshal(b, &ms); err != nil { - return statusNone, fmt.Errorf("unmarshaling status: %w", err) - } - return ms.Status, nil -} - -func makeStatusMapName(migrationID string) string { - return "__datamigration_" + migrationID -} - -type status string - -const ( - statusNone status = "" - statusComplete status = "COMPLETED" - statusCanceled status = "CANCELED" - statusFailed status = "FAILED" - statusInProgress status = "IN_PROGRESS" -) - -type migrationStatus struct { - Status status `json:"status"` -} diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go new file mode 100644 index 000000000..2c6b8a55a --- /dev/null +++ b/base/commands/migration/start_stages.go @@ -0,0 +1,240 @@ +package migration + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "time" + + clcerrors "github.com/hazelcast/hazelcast-commandline-client/errors" + "github.com/hazelcast/hazelcast-go-client" + "github.com/hazelcast/hazelcast-go-client/serialization" + "github.com/hazelcast/hazelcast-go-client/types" + "golang.org/x/exp/slices" + + "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" + "github.com/hazelcast/hazelcast-commandline-client/internal/plug" +) + +type Stages struct { + migrationID string + configDir string + ci *hazelcast.ClientInternal + startQueue *hazelcast.Queue + statusMap *hazelcast.Map + updateTopic *hazelcast.Topic + topicListenerID types.UUID + updateMessageChan chan UpdateMessage +} + +var timeoutErr = fmt.Errorf("migration could not be completed: reached timeout while reading status: "+ + "please ensure that you are using Hazelcast's migration cluster distribution and your DMT config points to that cluster: %w", + context.DeadlineExceeded) + +func NewStages(migrationID, configDir string) *Stages { + if migrationID == "" { + panic("migrationID is required") + } + return &Stages{ + migrationID: migrationID, + configDir: configDir, + } +} + +func (st *Stages) Build(ctx context.Context, ec plug.ExecContext) []stage.Stage { + return []stage.Stage{ + { + ProgressMsg: "Connecting to the migration cluster", + SuccessMsg: "Connected to the migration cluster", + FailureMsg: "Could not connect to the migration cluster", + Func: st.connectStage(ctx, ec), + }, + { + ProgressMsg: "Starting the migration", + SuccessMsg: "Started the migration", + FailureMsg: "Could not start the migration", + Func: st.startStage(ctx, ec), + }, + { + ProgressMsg: "Migrating the cluster", + SuccessMsg: "Migrated the cluster", + FailureMsg: "Could not migrate the cluster", + Func: st.migrateStage(ctx, ec), + }, + } +} + +func (st *Stages) connectStage(ctx context.Context, ec plug.ExecContext) func(stage.Statuser) error { + return func(status stage.Statuser) error { + var err error + st.ci, err = ec.ClientInternal(ctx) + if err != nil { + return err + } + st.startQueue, err = st.ci.Client().GetQueue(ctx, StartQueueName) + if err != nil { + return err + } + st.statusMap, err = st.ci.Client().GetMap(ctx, MakeStatusMapName(st.migrationID)) + if err != nil { + return err + } + st.updateTopic, err = st.ci.Client().GetTopic(ctx, MakeUpdateTopicName(st.migrationID)) + if err != nil { + return err + } + st.updateMessageChan = make(chan UpdateMessage) + _, err = st.updateTopic.AddMessageListener(ctx, st.topicListener) + return err + } +} + +func (st *Stages) topicListener(event *hazelcast.MessagePublished) { + st.updateMessageChan <- event.Value.(UpdateMessage) +} + +func (st *Stages) startStage(ctx context.Context, ec plug.ExecContext) func(stage.Statuser) error { + return func(stage.Statuser) error { + if err := st.statusMap.Delete(ctx, StatusMapEntryName); err != nil { + return err + } + var cb ConfigBundle + cb.MigrationID = st.migrationID + if err := cb.Walk(st.configDir); err != nil { + return err + } + b, err := json.Marshal(cb) + if err != nil { + return err + } + if err = st.startQueue.Put(ctx, serialization.JSON(b)); err != nil { + return err + } + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + msg := <-st.updateMessageChan // read the first message + if slices.Contains([]status{StatusComplete, StatusFailed, statusCanceled}, msg.Status) { + ms, err := st.readMigrationStatus(ctx) + if ctx.Err() != nil { + if errors.Is(err, context.DeadlineExceeded) { + return timeoutErr + } + return fmt.Errorf("migration failed: %w", err) + } + if err != nil { + return fmt.Errorf("reading status: %w", err) + } + ec.PrintlnUnnecessary(msg.Message) + ec.PrintlnUnnecessary(ms.Report) + switch ms.Status { + case StatusComplete: + return nil + case statusCanceled: + return clcerrors.ErrUserCancelled + case StatusFailed: + return errors.New("migration failed") + } + } else { + ec.PrintlnUnnecessary(msg.Message) + } + return nil + } +} + +func (st *Stages) migrateStage(ctx context.Context, ec plug.ExecContext) func(statuser stage.Statuser) error { + return func(stage.Statuser) error { + defer st.updateTopic.RemoveListener(ctx, st.topicListenerID) + for { + select { + case msg := <-st.updateMessageChan: + if slices.Contains([]status{StatusComplete, StatusFailed, statusCanceled}, msg.Status) { + ms, err := st.readMigrationStatus(ctx) + if err != nil { + return fmt.Errorf("reading status: %w", err) + } + ec.PrintlnUnnecessary(msg.Message) + ec.PrintlnUnnecessary(ms.Report) + switch ms.Status { + case StatusComplete: + return nil + case statusCanceled: + return clcerrors.ErrUserCancelled + case StatusFailed: + return errors.New("migration failed") + } + } else { + ec.PrintlnUnnecessary(msg.Message) + } + case <-ctx.Done(): + if err := ctx.Err(); err != nil { + if errors.Is(err, context.DeadlineExceeded) { + return timeoutErr + } + return fmt.Errorf("migration failed: %w", err) + } + } + } + } +} + +func (st *Stages) readMigrationStatus(ctx context.Context) (MigrationStatus, error) { + v, err := st.statusMap.Get(ctx, StatusMapEntryName) + if err != nil { + return migrationStatusNone, err + } + if v == nil { + return migrationStatusNone, nil + } + var b []byte + if vv, ok := v.(string); ok { + b = []byte(vv) + } else if vv, ok := v.(serialization.JSON); ok { + b = vv + } else { + return migrationStatusNone, fmt.Errorf("invalid status value") + } + var ms MigrationStatus + if err := json.Unmarshal(b, &ms); err != nil { + return migrationStatusNone, fmt.Errorf("unmarshaling status: %w", err) + } + return ms, nil +} + +func MakeStatusMapName(migrationID string) string { + return "__datamigration_" + migrationID +} + +func MakeUpdateTopicName(migrationID string) string { + return updateTopic + migrationID +} + +type status string + +const ( + statusNone status = "" + StatusComplete status = "COMPLETED" + statusCanceled status = "CANCELED" + StatusFailed status = "FAILED" + StatusInProgress status = "IN_PROGRESS" +) + +type MigrationStatus struct { + Status status `json:"status"` + Logs []string `json:"logs"` + Errors []string `json:"errors"` + Report string `json:"report"` +} + +var migrationStatusNone = MigrationStatus{ + Status: statusNone, + Logs: nil, + Errors: nil, + Report: "", +} + +type UpdateMessage struct { + Status status `json:"status"` + CompletionPercentage float32 `json:"completionPercentage"` + Message string `json:"message"` +} diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go new file mode 100644 index 000000000..e071fbc1b --- /dev/null +++ b/base/commands/migration/start_stages_it_test.go @@ -0,0 +1,102 @@ +//go:build migration + +package migration_test + +import ( + "context" + "encoding/json" + "testing" + "time" + + _ "github.com/hazelcast/hazelcast-commandline-client/base" + _ "github.com/hazelcast/hazelcast-commandline-client/base/commands" + "github.com/hazelcast/hazelcast-commandline-client/base/commands/migration" + . "github.com/hazelcast/hazelcast-commandline-client/internal/check" + "github.com/hazelcast/hazelcast-commandline-client/internal/it" + "github.com/hazelcast/hazelcast-go-client/serialization" +) + +func TestMigration(t *testing.T) { + testCases := []struct { + name string + f func(t *testing.T) + }{ + {name: "start_Successful", f: startTest_Successful}, + {name: "start_Failure", f: startTest_Failure}, + } + for _, tc := range testCases { + t.Run(tc.name, tc.f) + } +} + +func startTest_Successful(t *testing.T) { + tcx := it.TestContext{T: t} + ctx := context.Background() + tcx.Tester(func(tcx it.TestContext) { + go tcx.WithReset(func() { + Must(tcx.CLC().Execute(ctx, "start", "dmt-config", "--yes")) + }) + successfulRunner(tcx, ctx) + for _, m := range []string{"first message", "second message", "last message", "status report"} { + tcx.AssertStdoutContains(m) + } + }) +} + +func startTest_Failure(t *testing.T) { + tcx := it.TestContext{T: t} + ctx := context.Background() + tcx.Tester(func(tcx it.TestContext) { + go tcx.WithReset(func() { + tcx.CLC().Execute(ctx, "start", "dmt-config", "--yes") + }) + failureRunner(tcx, ctx) + for _, m := range []string{"first message", "second message", "fail status report"} { + tcx.AssertStdoutContains(m) + } + }) +} + +func successfulRunner(tcx it.TestContext, ctx context.Context) { + c := make(chan string, 1) + go findMigrationID(ctx, tcx, c) + migrationID := <-c + topic := MustValue(tcx.Client.GetTopic(ctx, migration.MakeUpdateTopicName(migrationID))) + Must(topic.Publish(ctx, migration.UpdateMessage{Status: migration.StatusInProgress, Message: "first message"})) + Must(topic.Publish(ctx, migration.UpdateMessage{Status: migration.StatusInProgress, Message: "second message"})) + statusMap := MustValue(tcx.Client.GetMap(ctx, migration.MakeStatusMapName(migrationID))) + b := MustValue(json.Marshal(migration.MigrationStatus{ + Status: migration.StatusComplete, + Report: "status report", + })) + Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(b))) + Must(topic.Publish(ctx, migration.UpdateMessage{Status: migration.StatusComplete, Message: "last message"})) +} + +func failureRunner(tcx it.TestContext, ctx context.Context) { + c := make(chan string, 1) + go findMigrationID(ctx, tcx, c) + migrationID := <-c + topic := MustValue(tcx.Client.GetTopic(ctx, migration.MakeUpdateTopicName(migrationID))) + Must(topic.Publish(ctx, migration.UpdateMessage{Status: migration.StatusInProgress, Message: "first message"})) + statusMap := MustValue(tcx.Client.GetMap(ctx, migration.MakeStatusMapName(migrationID))) + b := MustValue(json.Marshal(migration.MigrationStatus{ + Status: migration.StatusFailed, + Report: "fail status report", + })) + Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(b))) + Must(topic.Publish(ctx, migration.UpdateMessage{Status: migration.StatusFailed, Message: "second message"})) +} + +func findMigrationID(ctx context.Context, tcx it.TestContext, c chan string) { + q := MustValue(tcx.Client.GetQueue(ctx, migration.StartQueueName)) + var b migration.ConfigBundle + for { + v := MustValue(q.PollWithTimeout(ctx, 2*time.Second)) + if v != nil { + Must(json.Unmarshal(v.(serialization.JSON), &b)) + c <- b.MigrationID + break + } + } +} diff --git a/base/commands/migration/utils.go b/base/commands/migration/utils.go index 61b364cc8..699d47968 100644 --- a/base/commands/migration/utils.go +++ b/base/commands/migration/utils.go @@ -21,7 +21,7 @@ type bundleFile struct { Content string `json:"content"` } -type configBundle struct { +type ConfigBundle struct { MigrationID string `json:"migrationId"` ConfigPath string `json:"configPath"` Source []bundleFile `json:"source"` @@ -30,7 +30,7 @@ type configBundle struct { ReplicatedMaps []string `json:"replicatedMaps"` } -func (cb *configBundle) Walk(root string) error { +func (cb *ConfigBundle) Walk(root string) error { var err error cb.IMaps, err = readItems(filepath.Join(root, "data", "imap_names.txt")) if err != nil { From f79befd40de4263efa9259a049395f5b0cd9589b Mon Sep 17 00:00:00 2001 From: kmetin Date: Wed, 6 Sep 2023 12:55:09 +0300 Subject: [PATCH 02/53] add errors and logs --- base/commands/migration/start_stages.go | 11 +++++++++-- base/commands/migration/start_stages_it_test.go | 4 +++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index 2c6b8a55a..4d666919e 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "strings" "time" clcerrors "github.com/hazelcast/hazelcast-commandline-client/errors" @@ -127,13 +128,16 @@ func (st *Stages) startStage(ctx context.Context, ec plug.ExecContext) func(stag } ec.PrintlnUnnecessary(msg.Message) ec.PrintlnUnnecessary(ms.Report) + for _, l := range ms.Logs { + ec.Logger().Info(l) + } switch ms.Status { case StatusComplete: return nil case statusCanceled: return clcerrors.ErrUserCancelled case StatusFailed: - return errors.New("migration failed") + return fmt.Errorf("migration failed with following error(s): %s", strings.Join(ms.Errors, "\n")) } } else { ec.PrintlnUnnecessary(msg.Message) @@ -155,13 +159,16 @@ func (st *Stages) migrateStage(ctx context.Context, ec plug.ExecContext) func(st } ec.PrintlnUnnecessary(msg.Message) ec.PrintlnUnnecessary(ms.Report) + for _, l := range ms.Logs { + ec.Logger().Info(l) + } switch ms.Status { case StatusComplete: return nil case statusCanceled: return clcerrors.ErrUserCancelled case StatusFailed: - return errors.New("migration failed") + return fmt.Errorf("migration failed with following error(s): %s", strings.Join(ms.Errors, "\n")) } } else { ec.PrintlnUnnecessary(msg.Message) diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go index e071fbc1b..04dd947b9 100644 --- a/base/commands/migration/start_stages_it_test.go +++ b/base/commands/migration/start_stages_it_test.go @@ -51,7 +51,7 @@ func startTest_Failure(t *testing.T) { tcx.CLC().Execute(ctx, "start", "dmt-config", "--yes") }) failureRunner(tcx, ctx) - for _, m := range []string{"first message", "second message", "fail status report"} { + for _, m := range []string{"first message", "second message", "error1", "error2", "fail status report"} { tcx.AssertStdoutContains(m) } }) @@ -68,6 +68,7 @@ func successfulRunner(tcx it.TestContext, ctx context.Context) { b := MustValue(json.Marshal(migration.MigrationStatus{ Status: migration.StatusComplete, Report: "status report", + Logs: []string{"log1", "log2"}, })) Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(b))) Must(topic.Publish(ctx, migration.UpdateMessage{Status: migration.StatusComplete, Message: "last message"})) @@ -83,6 +84,7 @@ func failureRunner(tcx it.TestContext, ctx context.Context) { b := MustValue(json.Marshal(migration.MigrationStatus{ Status: migration.StatusFailed, Report: "fail status report", + Errors: []string{"error1", "error2"}, })) Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(b))) Must(topic.Publish(ctx, migration.UpdateMessage{Status: migration.StatusFailed, Message: "second message"})) From 8b68b51ebf012eca8225570b29bf128d276fccd6 Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 7 Sep 2023 10:08:42 +0300 Subject: [PATCH 03/53] [CLC-311]: Add status command to DMT --- base/commands/migration/const.go | 7 +- base/commands/migration/migration_start.go | 2 +- base/commands/migration/migration_status.go | 41 ++++ base/commands/migration/start_stages.go | 37 ++-- base/commands/migration/status_stages.go | 178 ++++++++++++++++++ .../migration/status_stages_it_test.go | 110 +++++++++++ base/commands/migration/utils.go | 2 +- 7 files changed, 359 insertions(+), 18 deletions(-) create mode 100644 base/commands/migration/migration_status.go create mode 100644 base/commands/migration/status_stages.go create mode 100644 base/commands/migration/status_stages_it_test.go diff --git a/base/commands/migration/const.go b/base/commands/migration/const.go index ec63bd481..3f832219e 100644 --- a/base/commands/migration/const.go +++ b/base/commands/migration/const.go @@ -1,7 +1,8 @@ package migration const ( - StartQueueName = "__datamigration_start_queue" - StatusMapEntryName = "status" - updateTopic = "__datamigration_updates_" + StartQueueName = "__datamigration_start_queue" + StatusMapEntryName = "status" + updateTopic = "__datamigration_updates_" + MigrationsInProgressList = "__datamigrations_in_progress" ) diff --git a/base/commands/migration/migration_start.go b/base/commands/migration/migration_start.go index 95136cb92..988991d93 100644 --- a/base/commands/migration/migration_start.go +++ b/base/commands/migration/migration_start.go @@ -45,7 +45,7 @@ Selected data structures in the source cluster will be migrated to the target cl } } ec.PrintlnUnnecessary("") - sts := NewStages(makeMigrationID(), ec.Args()[0]) + sts := NewStartStages(MakeMigrationID(), ec.Args()[0]) sp := stage.NewFixedProvider(sts.Build(ctx, ec)...) if err := stage.Execute(ctx, ec, sp); err != nil { return err diff --git a/base/commands/migration/migration_status.go b/base/commands/migration/migration_status.go new file mode 100644 index 000000000..964f11d77 --- /dev/null +++ b/base/commands/migration/migration_status.go @@ -0,0 +1,41 @@ +package migration + +import ( + "context" + + "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" + "github.com/hazelcast/hazelcast-commandline-client/internal/check" + "github.com/hazelcast/hazelcast-commandline-client/internal/plug" +) + +type StatusCmd struct{} + +func (s StatusCmd) Unwrappable() {} + +func (s StatusCmd) Init(cc plug.InitContext) error { + cc.SetCommandUsage("status [flags]") + cc.SetCommandGroup("migration") + help := "Get status of the data migration" + cc.SetCommandHelp(help, help) + cc.SetPositionalArgCount(0, 0) + return nil +} + +func (s StatusCmd) Exec(ctx context.Context, ec plug.ExecContext) error { + ec.PrintlnUnnecessary("") + ec.PrintlnUnnecessary(`Hazelcast Data Migration Tool v5.3.0 +(c) 2023 Hazelcast, Inc. +`) + sts := NewStatusStages() + sp := stage.NewFixedProvider(sts.Build(ctx, ec)...) + if err := stage.Execute(ctx, ec, sp); err != nil { + return err + } + ec.PrintlnUnnecessary("") + ec.PrintlnUnnecessary("OK") + return nil +} + +func init() { + check.Must(plug.Registry.RegisterCommand("status", &StatusCmd{})) +} diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index 4d666919e..cab070465 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -18,7 +18,7 @@ import ( "github.com/hazelcast/hazelcast-commandline-client/internal/plug" ) -type Stages struct { +type StartStages struct { migrationID string configDir string ci *hazelcast.ClientInternal @@ -33,17 +33,17 @@ var timeoutErr = fmt.Errorf("migration could not be completed: reached timeout w "please ensure that you are using Hazelcast's migration cluster distribution and your DMT config points to that cluster: %w", context.DeadlineExceeded) -func NewStages(migrationID, configDir string) *Stages { +func NewStartStages(migrationID, configDir string) *StartStages { if migrationID == "" { panic("migrationID is required") } - return &Stages{ + return &StartStages{ migrationID: migrationID, configDir: configDir, } } -func (st *Stages) Build(ctx context.Context, ec plug.ExecContext) []stage.Stage { +func (st *StartStages) Build(ctx context.Context, ec plug.ExecContext) []stage.Stage { return []stage.Stage{ { ProgressMsg: "Connecting to the migration cluster", @@ -66,7 +66,7 @@ func (st *Stages) Build(ctx context.Context, ec plug.ExecContext) []stage.Stage } } -func (st *Stages) connectStage(ctx context.Context, ec plug.ExecContext) func(stage.Statuser) error { +func (st *StartStages) connectStage(ctx context.Context, ec plug.ExecContext) func(stage.Statuser) error { return func(status stage.Statuser) error { var err error st.ci, err = ec.ClientInternal(ctx) @@ -91,11 +91,11 @@ func (st *Stages) connectStage(ctx context.Context, ec plug.ExecContext) func(st } } -func (st *Stages) topicListener(event *hazelcast.MessagePublished) { +func (st *StartStages) topicListener(event *hazelcast.MessagePublished) { st.updateMessageChan <- event.Value.(UpdateMessage) } -func (st *Stages) startStage(ctx context.Context, ec plug.ExecContext) func(stage.Statuser) error { +func (st *StartStages) startStage(ctx context.Context, ec plug.ExecContext) func(stage.Statuser) error { return func(stage.Statuser) error { if err := st.statusMap.Delete(ctx, StatusMapEntryName); err != nil { return err @@ -146,7 +146,7 @@ func (st *Stages) startStage(ctx context.Context, ec plug.ExecContext) func(stag } } -func (st *Stages) migrateStage(ctx context.Context, ec plug.ExecContext) func(statuser stage.Statuser) error { +func (st *StartStages) migrateStage(ctx context.Context, ec plug.ExecContext) func(statuser stage.Statuser) error { return func(stage.Statuser) error { defer st.updateTopic.RemoveListener(ctx, st.topicListenerID) for { @@ -185,7 +185,7 @@ func (st *Stages) migrateStage(ctx context.Context, ec plug.ExecContext) func(st } } -func (st *Stages) readMigrationStatus(ctx context.Context) (MigrationStatus, error) { +func (st *StartStages) readMigrationStatus(ctx context.Context) (MigrationStatus, error) { v, err := st.statusMap.Get(ctx, StatusMapEntryName) if err != nil { return migrationStatusNone, err @@ -227,10 +227,21 @@ const ( ) type MigrationStatus struct { - Status status `json:"status"` - Logs []string `json:"logs"` - Errors []string `json:"errors"` - Report string `json:"report"` + Status status `json:"status"` + Logs []string `json:"logs"` + Errors []string `json:"errors"` + Report string `json:"report"` + Migrations []Migration `json:"migrations"` +} + +type Migration struct { + Name string `json:"name"` + Type string `json:"type"` + Status status `json:"status"` + StartTimestamp time.Time `json:"startTimestamp"` + EntriesMigrated int `json:"entriesMigrated"` + TotalEntries int `json:"totalEntries"` + CompletionPercentage float64 `json:"completionPercentage"` } var migrationStatusNone = MigrationStatus{ diff --git a/base/commands/migration/status_stages.go b/base/commands/migration/status_stages.go new file mode 100644 index 000000000..5f158432a --- /dev/null +++ b/base/commands/migration/status_stages.go @@ -0,0 +1,178 @@ +package migration + +import ( + "context" + "encoding/json" + "fmt" + "slices" + "strings" + + "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" + "github.com/hazelcast/hazelcast-commandline-client/internal/output" + "github.com/hazelcast/hazelcast-commandline-client/internal/plug" + serialization2 "github.com/hazelcast/hazelcast-commandline-client/internal/serialization" + "github.com/hazelcast/hazelcast-go-client" + "github.com/hazelcast/hazelcast-go-client/serialization" + "github.com/hazelcast/hazelcast-go-client/types" +) + +type StatusStages struct { + migrationID string + ci *hazelcast.ClientInternal + migrationsInProgressList *hazelcast.List + statusMap *hazelcast.Map + updateTopic *hazelcast.Topic + topicListenerID types.UUID + updateMessageChan chan UpdateMessage +} + +func NewStatusStages() *StatusStages { + return &StatusStages{} +} + +func (st *StatusStages) Build(ctx context.Context, ec plug.ExecContext) []stage.Stage { + return []stage.Stage{ + { + ProgressMsg: "Connecting to the migration cluster", + SuccessMsg: "Connected to the migration cluster", + FailureMsg: "Could not connect to the migration cluster", + Func: st.connectStage(ctx, ec), + }, + { + ProgressMsg: "Fetching migration status", + SuccessMsg: "Fetched migration status", + FailureMsg: "Could not fetch migration status", + Func: st.fetchStage(ctx, ec), + }, + } +} + +type MigrationInProgress struct { + MigrationID string `json:"migrationId"` +} + +func (st *StatusStages) connectStage(ctx context.Context, ec plug.ExecContext) func(statuser stage.Statuser) error { + return func(status stage.Statuser) error { + var err error + st.ci, err = ec.ClientInternal(ctx) + if err != nil { + return err + } + st.migrationsInProgressList, err = st.ci.Client().GetList(ctx, MigrationsInProgressList) + if err != nil { + return err + } + all, err := st.migrationsInProgressList.GetAll(ctx) + if err != nil { + return err + } + m := all[0].(MigrationInProgress) + st.migrationID = m.MigrationID + st.statusMap, err = st.ci.Client().GetMap(ctx, MakeStatusMapName(st.migrationID)) + if err != nil { + return err + } + st.updateTopic, err = st.ci.Client().GetTopic(ctx, MakeUpdateTopicName(st.migrationID)) + if err != nil { + return err + } + st.updateMessageChan = make(chan UpdateMessage) + _, err = st.updateTopic.AddMessageListener(ctx, st.topicListener) + return err + } +} + +func (st *StatusStages) fetchStage(ctx context.Context, ec plug.ExecContext) func(statuser stage.Statuser) error { + return func(stage.Statuser) error { + defer st.updateTopic.RemoveListener(ctx, st.topicListenerID) + for { + select { + case msg := <-st.updateMessageChan: + ms, err := st.readMigrationStatus(ctx) + if err != nil { + return fmt.Errorf("reading status: %w", err) + } + if slices.Contains([]status{StatusComplete, StatusFailed, statusCanceled}, msg.Status) { + ec.PrintlnUnnecessary(msg.Message) + ec.PrintlnUnnecessary(ms.Report) + if len(ms.Errors) > 0 { + ec.PrintlnUnnecessary(fmt.Sprintf("migration failed with following error(s): %s", strings.Join(ms.Errors, "\n"))) + } + if len(ms.Migrations) > 0 { + var rows []output.Row + for _, m := range ms.Migrations { + rows = append(rows, output.Row{ + output.Column{ + Name: "Name", + Type: serialization2.TypeString, + Value: m.Name, + }, + output.Column{ + Name: "Type", + Type: serialization2.TypeString, + Value: m.Type, + }, + output.Column{ + Name: "Status", + Type: serialization2.TypeString, + Value: string(m.Status), + }, + output.Column{ + Name: "Start Timestamp", + Type: serialization2.TypeJavaLocalDateTime, + Value: types.LocalDateTime(m.StartTimestamp), + }, + output.Column{ + Name: "Entries Migrated", + Type: serialization2.TypeInt32, + Value: int32(m.EntriesMigrated), + }, + output.Column{ + Name: "Total Entries", + Type: serialization2.TypeInt32, + Value: int32(m.TotalEntries), + }, + output.Column{ + Name: "Completion Percentage", + Type: serialization2.TypeFloat32, + Value: float32(m.CompletionPercentage), + }, + }) + } + return ec.AddOutputRows(ctx, rows...) + } + return nil + } else { + ec.PrintlnUnnecessary(msg.Message) + } + } + } + } +} + +func (st *StatusStages) topicListener(event *hazelcast.MessagePublished) { + st.updateMessageChan <- event.Value.(UpdateMessage) +} + +func (st *StatusStages) readMigrationStatus(ctx context.Context) (MigrationStatus, error) { + v, err := st.statusMap.Get(ctx, StatusMapEntryName) + if err != nil { + return migrationStatusNone, err + } + if v == nil { + return migrationStatusNone, nil + } + var b []byte + if vv, ok := v.(string); ok { + b = []byte(vv) + } else if vv, ok := v.(serialization.JSON); ok { + b = vv + } else { + return migrationStatusNone, fmt.Errorf("invalid status value") + } + var ms MigrationStatus + if err := json.Unmarshal(b, &ms); err != nil { + return migrationStatusNone, fmt.Errorf("unmarshaling status: %w", err) + } + return ms, nil +} diff --git a/base/commands/migration/status_stages_it_test.go b/base/commands/migration/status_stages_it_test.go new file mode 100644 index 000000000..67f6b8b5e --- /dev/null +++ b/base/commands/migration/status_stages_it_test.go @@ -0,0 +1,110 @@ +//go:build migration + +package migration_test + +import ( + "context" + "encoding/json" + "sync" + "testing" + "time" + + _ "github.com/hazelcast/hazelcast-commandline-client/base" + _ "github.com/hazelcast/hazelcast-commandline-client/base/commands" + "github.com/hazelcast/hazelcast-commandline-client/base/commands/migration" + . "github.com/hazelcast/hazelcast-commandline-client/internal/check" + "github.com/hazelcast/hazelcast-commandline-client/internal/it" + "github.com/hazelcast/hazelcast-go-client/serialization" + "github.com/stretchr/testify/require" +) + +func TestStatus(t *testing.T) { + testCases := []struct { + name string + f func(t *testing.T) + }{ + {name: "status", f: statusTest}, + } + for _, tc := range testCases { + t.Run(tc.name, tc.f) + } +} + +func statusTest(t *testing.T) { + tcx := it.TestContext{T: t} + ctx := context.Background() + tcx.Tester(func(tcx it.TestContext) { + mID := preStatusRunner(t, tcx, ctx) + var wg sync.WaitGroup + wg.Add(1) + go tcx.WithReset(func() { + defer wg.Done() + Must(tcx.CLC().Execute(ctx, "status")) + }) + time.Sleep(1 * time.Second) // give time to status command to register its topic listener + statusRunner(mID, tcx, ctx) + wg.Wait() + tcx.AssertStdoutContains(` +Hazelcast Data Migration Tool v5.3.0 +(c) 2023 Hazelcast, Inc. + + OK [1/2] Connected to the migration cluster. +first message +last message +status report +imap5 IMap FAILED 2023-01-01 00:00:00 141 1000 14.1 + OK [2/2] Fetched migration status. + +OK`) + }) +} + +func preStatusRunner(t *testing.T, tcx it.TestContext, ctx context.Context) string { + mID := migration.MakeMigrationID() + l := MustValue(tcx.Client.GetList(ctx, migration.MigrationsInProgressList)) + ok := MustValue(l.Add(ctx, migration.MigrationInProgress{ + MigrationID: mID, + })) + require.Equal(t, true, ok) + return mID +} + +func statusRunner(migrationID string, tcx it.TestContext, ctx context.Context) { + startTime := MustValue(time.Parse(time.RFC3339, "2023-01-01T00:00:00Z")) + statusMap := MustValue(tcx.Client.GetMap(ctx, migration.MakeStatusMapName(migrationID))) + b := MustValue(json.Marshal(migration.MigrationStatus{ + Status: migration.StatusInProgress, + Report: "status report", + Migrations: []migration.Migration{ + { + Name: "imap5", + Type: "IMap", + Status: migration.StatusInProgress, + StartTimestamp: startTime, + EntriesMigrated: 121, + TotalEntries: 1000, + CompletionPercentage: 12.1, + }, + }, + })) + Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(b))) + topic := MustValue(tcx.Client.GetTopic(ctx, migration.MakeUpdateTopicName(migrationID))) + Must(topic.Publish(ctx, migration.UpdateMessage{Status: migration.StatusInProgress, Message: "first message"})) + b = MustValue(json.Marshal(migration.MigrationStatus{ + Status: migration.StatusFailed, + Report: "status report", + Migrations: []migration.Migration{ + { + Name: "imap5", + Type: "IMap", + Status: migration.StatusFailed, + StartTimestamp: startTime, + EntriesMigrated: 141, + TotalEntries: 1000, + CompletionPercentage: 14.1, + }, + }, + })) + Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(b))) // update status map + Must(topic.Publish(ctx, migration.UpdateMessage{Status: migration.StatusFailed, Message: "last message"})) +} diff --git a/base/commands/migration/utils.go b/base/commands/migration/utils.go index 699d47968..7b9df4235 100644 --- a/base/commands/migration/utils.go +++ b/base/commands/migration/utils.go @@ -119,6 +119,6 @@ func readPathAsString(path string) (string, error) { return string(b), nil } -func makeMigrationID() string { +func MakeMigrationID() string { return types.NewUUID().String() } From 3f76b0fdb958f019233e7ec7b0e81d818ca994f9 Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 7 Sep 2023 11:08:30 +0300 Subject: [PATCH 04/53] refactor --- base/commands/migration/common.go | 65 +++++++++++ base/commands/migration/const.go | 12 +- base/commands/migration/dummy.go | 2 + base/commands/migration/start_stages.go | 109 ++++-------------- base/commands/migration/status_stages.go | 29 +---- .../migration/status_stages_it_test.go | 36 +++--- base/commands/migration/utils.go | 8 ++ 7 files changed, 122 insertions(+), 139 deletions(-) create mode 100644 base/commands/migration/common.go diff --git a/base/commands/migration/common.go b/base/commands/migration/common.go new file mode 100644 index 000000000..295ae357a --- /dev/null +++ b/base/commands/migration/common.go @@ -0,0 +1,65 @@ +package migration + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/hazelcast/hazelcast-go-client" + "github.com/hazelcast/hazelcast-go-client/serialization" +) + +type MigrationStatus struct { + Status Status `json:"status"` + Logs []string `json:"logs"` + Errors []string `json:"errors"` + Report string `json:"report"` + Migrations []Migration `json:"migrations"` +} + +type Migration struct { + Name string `json:"name"` + Type string `json:"type"` + Status Status `json:"status"` + StartTimestamp time.Time `json:"startTimestamp"` + EntriesMigrated int `json:"entriesMigrated"` + TotalEntries int `json:"totalEntries"` + CompletionPercentage float64 `json:"completionPercentage"` +} + +var migrationStatusNone = MigrationStatus{ + Status: StatusNone, + Logs: nil, + Errors: nil, + Report: "", +} + +type UpdateMessage struct { + Status Status `json:"status"` + CompletionPercentage float32 `json:"completionPercentage"` + Message string `json:"message"` +} + +func readMigrationStatus(ctx context.Context, statusMap *hazelcast.Map) (MigrationStatus, error) { + v, err := statusMap.Get(ctx, StatusMapEntryName) + if err != nil { + return migrationStatusNone, err + } + if v == nil { + return migrationStatusNone, nil + } + var b []byte + if vv, ok := v.(string); ok { + b = []byte(vv) + } else if vv, ok := v.(serialization.JSON); ok { + b = vv + } else { + return migrationStatusNone, fmt.Errorf("invalid status value") + } + var ms MigrationStatus + if err := json.Unmarshal(b, &ms); err != nil { + return migrationStatusNone, fmt.Errorf("unmarshaling status: %w", err) + } + return ms, nil +} diff --git a/base/commands/migration/const.go b/base/commands/migration/const.go index 3f832219e..64b1416b0 100644 --- a/base/commands/migration/const.go +++ b/base/commands/migration/const.go @@ -3,6 +3,16 @@ package migration const ( StartQueueName = "__datamigration_start_queue" StatusMapEntryName = "status" - updateTopic = "__datamigration_updates_" + UpdateTopic = "__datamigration_updates_" MigrationsInProgressList = "__datamigrations_in_progress" ) + +type Status string + +const ( + StatusNone Status = "" + StatusComplete Status = "COMPLETED" + StatusCanceled Status = "CANCELED" + StatusFailed Status = "FAILED" + StatusInProgress Status = "IN_PROGRESS" +) diff --git a/base/commands/migration/dummy.go b/base/commands/migration/dummy.go index 753000bce..b17283ab8 100644 --- a/base/commands/migration/dummy.go +++ b/base/commands/migration/dummy.go @@ -1 +1,3 @@ package migration + +// This file exists only for compilation diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index cab070465..8ac1fb7df 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -97,26 +97,18 @@ func (st *StartStages) topicListener(event *hazelcast.MessagePublished) { func (st *StartStages) startStage(ctx context.Context, ec plug.ExecContext) func(stage.Statuser) error { return func(stage.Statuser) error { - if err := st.statusMap.Delete(ctx, StatusMapEntryName); err != nil { - return err - } - var cb ConfigBundle - cb.MigrationID = st.migrationID - if err := cb.Walk(st.configDir); err != nil { - return err - } - b, err := json.Marshal(cb) + cb, err := makeConfigBundle(st.configDir, st.migrationID) if err != nil { return err } - if err = st.startQueue.Put(ctx, serialization.JSON(b)); err != nil { + if err = st.startQueue.Put(ctx, cb); err != nil { return err } ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() msg := <-st.updateMessageChan // read the first message - if slices.Contains([]status{StatusComplete, StatusFailed, statusCanceled}, msg.Status) { - ms, err := st.readMigrationStatus(ctx) + if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, msg.Status) { + ms, err := readMigrationStatus(ctx, st.statusMap) if ctx.Err() != nil { if errors.Is(err, context.DeadlineExceeded) { return timeoutErr @@ -134,7 +126,7 @@ func (st *StartStages) startStage(ctx context.Context, ec plug.ExecContext) func switch ms.Status { case StatusComplete: return nil - case statusCanceled: + case StatusCanceled: return clcerrors.ErrUserCancelled case StatusFailed: return fmt.Errorf("migration failed with following error(s): %s", strings.Join(ms.Errors, "\n")) @@ -146,14 +138,27 @@ func (st *StartStages) startStage(ctx context.Context, ec plug.ExecContext) func } } +func makeConfigBundle(configDir, migrationID string) (serialization.JSON, error) { + var cb ConfigBundle + cb.MigrationID = migrationID + if err := cb.Walk(configDir); err != nil { + return nil, err + } + b, err := json.Marshal(cb) + if err != nil { + return nil, err + } + return b, nil +} + func (st *StartStages) migrateStage(ctx context.Context, ec plug.ExecContext) func(statuser stage.Statuser) error { return func(stage.Statuser) error { defer st.updateTopic.RemoveListener(ctx, st.topicListenerID) for { select { case msg := <-st.updateMessageChan: - if slices.Contains([]status{StatusComplete, StatusFailed, statusCanceled}, msg.Status) { - ms, err := st.readMigrationStatus(ctx) + if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, msg.Status) { + ms, err := readMigrationStatus(ctx, st.statusMap) if err != nil { return fmt.Errorf("reading status: %w", err) } @@ -165,7 +170,7 @@ func (st *StartStages) migrateStage(ctx context.Context, ec plug.ExecContext) fu switch ms.Status { case StatusComplete: return nil - case statusCanceled: + case StatusCanceled: return clcerrors.ErrUserCancelled case StatusFailed: return fmt.Errorf("migration failed with following error(s): %s", strings.Join(ms.Errors, "\n")) @@ -184,75 +189,3 @@ func (st *StartStages) migrateStage(ctx context.Context, ec plug.ExecContext) fu } } } - -func (st *StartStages) readMigrationStatus(ctx context.Context) (MigrationStatus, error) { - v, err := st.statusMap.Get(ctx, StatusMapEntryName) - if err != nil { - return migrationStatusNone, err - } - if v == nil { - return migrationStatusNone, nil - } - var b []byte - if vv, ok := v.(string); ok { - b = []byte(vv) - } else if vv, ok := v.(serialization.JSON); ok { - b = vv - } else { - return migrationStatusNone, fmt.Errorf("invalid status value") - } - var ms MigrationStatus - if err := json.Unmarshal(b, &ms); err != nil { - return migrationStatusNone, fmt.Errorf("unmarshaling status: %w", err) - } - return ms, nil -} - -func MakeStatusMapName(migrationID string) string { - return "__datamigration_" + migrationID -} - -func MakeUpdateTopicName(migrationID string) string { - return updateTopic + migrationID -} - -type status string - -const ( - statusNone status = "" - StatusComplete status = "COMPLETED" - statusCanceled status = "CANCELED" - StatusFailed status = "FAILED" - StatusInProgress status = "IN_PROGRESS" -) - -type MigrationStatus struct { - Status status `json:"status"` - Logs []string `json:"logs"` - Errors []string `json:"errors"` - Report string `json:"report"` - Migrations []Migration `json:"migrations"` -} - -type Migration struct { - Name string `json:"name"` - Type string `json:"type"` - Status status `json:"status"` - StartTimestamp time.Time `json:"startTimestamp"` - EntriesMigrated int `json:"entriesMigrated"` - TotalEntries int `json:"totalEntries"` - CompletionPercentage float64 `json:"completionPercentage"` -} - -var migrationStatusNone = MigrationStatus{ - Status: statusNone, - Logs: nil, - Errors: nil, - Report: "", -} - -type UpdateMessage struct { - Status status `json:"status"` - CompletionPercentage float32 `json:"completionPercentage"` - Message string `json:"message"` -} diff --git a/base/commands/migration/status_stages.go b/base/commands/migration/status_stages.go index 5f158432a..e94afaebf 100644 --- a/base/commands/migration/status_stages.go +++ b/base/commands/migration/status_stages.go @@ -2,7 +2,6 @@ package migration import ( "context" - "encoding/json" "fmt" "slices" "strings" @@ -12,7 +11,6 @@ import ( "github.com/hazelcast/hazelcast-commandline-client/internal/plug" serialization2 "github.com/hazelcast/hazelcast-commandline-client/internal/serialization" "github.com/hazelcast/hazelcast-go-client" - "github.com/hazelcast/hazelcast-go-client/serialization" "github.com/hazelcast/hazelcast-go-client/types" ) @@ -88,11 +86,11 @@ func (st *StatusStages) fetchStage(ctx context.Context, ec plug.ExecContext) fun for { select { case msg := <-st.updateMessageChan: - ms, err := st.readMigrationStatus(ctx) + ms, err := readMigrationStatus(ctx, st.statusMap) if err != nil { return fmt.Errorf("reading status: %w", err) } - if slices.Contains([]status{StatusComplete, StatusFailed, statusCanceled}, msg.Status) { + if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, msg.Status) { ec.PrintlnUnnecessary(msg.Message) ec.PrintlnUnnecessary(ms.Report) if len(ms.Errors) > 0 { @@ -153,26 +151,3 @@ func (st *StatusStages) fetchStage(ctx context.Context, ec plug.ExecContext) fun func (st *StatusStages) topicListener(event *hazelcast.MessagePublished) { st.updateMessageChan <- event.Value.(UpdateMessage) } - -func (st *StatusStages) readMigrationStatus(ctx context.Context) (MigrationStatus, error) { - v, err := st.statusMap.Get(ctx, StatusMapEntryName) - if err != nil { - return migrationStatusNone, err - } - if v == nil { - return migrationStatusNone, nil - } - var b []byte - if vv, ok := v.(string); ok { - b = []byte(vv) - } else if vv, ok := v.(serialization.JSON); ok { - b = vv - } else { - return migrationStatusNone, fmt.Errorf("invalid status value") - } - var ms MigrationStatus - if err := json.Unmarshal(b, &ms); err != nil { - return migrationStatusNone, fmt.Errorf("unmarshaling status: %w", err) - } - return ms, nil -} diff --git a/base/commands/migration/status_stages_it_test.go b/base/commands/migration/status_stages_it_test.go index 67f6b8b5e..3f5ef0d5d 100644 --- a/base/commands/migration/status_stages_it_test.go +++ b/base/commands/migration/status_stages_it_test.go @@ -14,6 +14,7 @@ import ( "github.com/hazelcast/hazelcast-commandline-client/base/commands/migration" . "github.com/hazelcast/hazelcast-commandline-client/internal/check" "github.com/hazelcast/hazelcast-commandline-client/internal/it" + "github.com/hazelcast/hazelcast-go-client" "github.com/hazelcast/hazelcast-go-client/serialization" "github.com/stretchr/testify/require" ) @@ -70,34 +71,23 @@ func preStatusRunner(t *testing.T, tcx it.TestContext, ctx context.Context) stri } func statusRunner(migrationID string, tcx it.TestContext, ctx context.Context) { + m := MustValue(tcx.Client.GetMap(ctx, migration.MakeStatusMapName(migrationID))) + t := MustValue(tcx.Client.GetTopic(ctx, migration.MakeUpdateTopicName(migrationID))) + setState(ctx, t, m, migration.StatusInProgress, "first message") + setState(ctx, t, m, migration.StatusFailed, "last message") + +} + +func setState(ctx context.Context, updateTopic *hazelcast.Topic, statusMap *hazelcast.Map, status migration.Status, msg string) { startTime := MustValue(time.Parse(time.RFC3339, "2023-01-01T00:00:00Z")) - statusMap := MustValue(tcx.Client.GetMap(ctx, migration.MakeStatusMapName(migrationID))) b := MustValue(json.Marshal(migration.MigrationStatus{ - Status: migration.StatusInProgress, + Status: status, Report: "status report", Migrations: []migration.Migration{ { Name: "imap5", Type: "IMap", - Status: migration.StatusInProgress, - StartTimestamp: startTime, - EntriesMigrated: 121, - TotalEntries: 1000, - CompletionPercentage: 12.1, - }, - }, - })) - Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(b))) - topic := MustValue(tcx.Client.GetTopic(ctx, migration.MakeUpdateTopicName(migrationID))) - Must(topic.Publish(ctx, migration.UpdateMessage{Status: migration.StatusInProgress, Message: "first message"})) - b = MustValue(json.Marshal(migration.MigrationStatus{ - Status: migration.StatusFailed, - Report: "status report", - Migrations: []migration.Migration{ - { - Name: "imap5", - Type: "IMap", - Status: migration.StatusFailed, + Status: status, StartTimestamp: startTime, EntriesMigrated: 141, TotalEntries: 1000, @@ -105,6 +95,6 @@ func statusRunner(migrationID string, tcx it.TestContext, ctx context.Context) { }, }, })) - Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(b))) // update status map - Must(topic.Publish(ctx, migration.UpdateMessage{Status: migration.StatusFailed, Message: "last message"})) + Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(b))) + Must(updateTopic.Publish(ctx, migration.UpdateMessage{Status: status, Message: msg})) } diff --git a/base/commands/migration/utils.go b/base/commands/migration/utils.go index 7b9df4235..a56bfc50f 100644 --- a/base/commands/migration/utils.go +++ b/base/commands/migration/utils.go @@ -122,3 +122,11 @@ func readPathAsString(path string) (string, error) { func MakeMigrationID() string { return types.NewUUID().String() } + +func MakeStatusMapName(migrationID string) string { + return "__datamigration_" + migrationID +} + +func MakeUpdateTopicName(migrationID string) string { + return UpdateTopic + migrationID +} From 98ea33f3a65f52cdee294f28df592be0f5d4504a Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 7 Sep 2023 11:15:51 +0300 Subject: [PATCH 05/53] add overall completion percentage to status output --- base/commands/migration/common.go | 11 ++++++----- base/commands/migration/status_stages.go | 13 ++++++------- base/commands/migration/status_stages_it_test.go | 6 ++++-- 3 files changed, 16 insertions(+), 14 deletions(-) diff --git a/base/commands/migration/common.go b/base/commands/migration/common.go index 295ae357a..09db7b54c 100644 --- a/base/commands/migration/common.go +++ b/base/commands/migration/common.go @@ -11,11 +11,12 @@ import ( ) type MigrationStatus struct { - Status Status `json:"status"` - Logs []string `json:"logs"` - Errors []string `json:"errors"` - Report string `json:"report"` - Migrations []Migration `json:"migrations"` + Status Status `json:"status"` + Logs []string `json:"logs"` + Errors []string `json:"errors"` + Report string `json:"report"` + Migrations []Migration `json:"migrations"` + CompletionPercentage float32 `json:"completionPercentage"` } type Migration struct { diff --git a/base/commands/migration/status_stages.go b/base/commands/migration/status_stages.go index e94afaebf..9702b1ea8 100644 --- a/base/commands/migration/status_stages.go +++ b/base/commands/migration/status_stages.go @@ -21,7 +21,7 @@ type StatusStages struct { statusMap *hazelcast.Map updateTopic *hazelcast.Topic topicListenerID types.UUID - updateMessageChan chan UpdateMessage + updateMsgChan chan UpdateMessage } func NewStatusStages() *StatusStages { @@ -74,7 +74,7 @@ func (st *StatusStages) connectStage(ctx context.Context, ec plug.ExecContext) f if err != nil { return err } - st.updateMessageChan = make(chan UpdateMessage) + st.updateMsgChan = make(chan UpdateMessage) _, err = st.updateTopic.AddMessageListener(ctx, st.topicListener) return err } @@ -85,13 +85,14 @@ func (st *StatusStages) fetchStage(ctx context.Context, ec plug.ExecContext) fun defer st.updateTopic.RemoveListener(ctx, st.topicListenerID) for { select { - case msg := <-st.updateMessageChan: + case msg := <-st.updateMsgChan: ms, err := readMigrationStatus(ctx, st.statusMap) if err != nil { return fmt.Errorf("reading status: %w", err) } + ec.PrintlnUnnecessary(msg.Message) if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, msg.Status) { - ec.PrintlnUnnecessary(msg.Message) + ec.PrintlnUnnecessary(fmt.Sprintf("Completion Percentage: %f", ms.CompletionPercentage)) ec.PrintlnUnnecessary(ms.Report) if len(ms.Errors) > 0 { ec.PrintlnUnnecessary(fmt.Sprintf("migration failed with following error(s): %s", strings.Join(ms.Errors, "\n"))) @@ -140,8 +141,6 @@ func (st *StatusStages) fetchStage(ctx context.Context, ec plug.ExecContext) fun return ec.AddOutputRows(ctx, rows...) } return nil - } else { - ec.PrintlnUnnecessary(msg.Message) } } } @@ -149,5 +148,5 @@ func (st *StatusStages) fetchStage(ctx context.Context, ec plug.ExecContext) fun } func (st *StatusStages) topicListener(event *hazelcast.MessagePublished) { - st.updateMessageChan <- event.Value.(UpdateMessage) + st.updateMsgChan <- event.Value.(UpdateMessage) } diff --git a/base/commands/migration/status_stages_it_test.go b/base/commands/migration/status_stages_it_test.go index 3f5ef0d5d..03668f9cb 100644 --- a/base/commands/migration/status_stages_it_test.go +++ b/base/commands/migration/status_stages_it_test.go @@ -52,6 +52,7 @@ Hazelcast Data Migration Tool v5.3.0 OK [1/2] Connected to the migration cluster. first message last message +Completion Percentage: 12.123000 status report imap5 IMap FAILED 2023-01-01 00:00:00 141 1000 14.1 OK [2/2] Fetched migration status. @@ -81,8 +82,9 @@ func statusRunner(migrationID string, tcx it.TestContext, ctx context.Context) { func setState(ctx context.Context, updateTopic *hazelcast.Topic, statusMap *hazelcast.Map, status migration.Status, msg string) { startTime := MustValue(time.Parse(time.RFC3339, "2023-01-01T00:00:00Z")) b := MustValue(json.Marshal(migration.MigrationStatus{ - Status: status, - Report: "status report", + Status: status, + Report: "status report", + CompletionPercentage: 12.123, Migrations: []migration.Migration{ { Name: "imap5", From ad4bc1774c17f69ab99b65525524d32eeda29bab Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 7 Sep 2023 11:35:26 +0300 Subject: [PATCH 06/53] refactor --- base/commands/migration/start_stages.go | 74 +++++++------------ .../migration/start_stages_it_test.go | 41 ++++++++-- base/commands/migration/status_stages.go | 2 +- 3 files changed, 63 insertions(+), 54 deletions(-) diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index 8ac1fb7df..9b94b14fe 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -106,33 +106,8 @@ func (st *StartStages) startStage(ctx context.Context, ec plug.ExecContext) func } ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - msg := <-st.updateMessageChan // read the first message - if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, msg.Status) { - ms, err := readMigrationStatus(ctx, st.statusMap) - if ctx.Err() != nil { - if errors.Is(err, context.DeadlineExceeded) { - return timeoutErr - } - return fmt.Errorf("migration failed: %w", err) - } - if err != nil { - return fmt.Errorf("reading status: %w", err) - } - ec.PrintlnUnnecessary(msg.Message) - ec.PrintlnUnnecessary(ms.Report) - for _, l := range ms.Logs { - ec.Logger().Info(l) - } - switch ms.Status { - case StatusComplete: - return nil - case StatusCanceled: - return clcerrors.ErrUserCancelled - case StatusFailed: - return fmt.Errorf("migration failed with following error(s): %s", strings.Join(ms.Errors, "\n")) - } - } else { - ec.PrintlnUnnecessary(msg.Message) + if isTerminal, err := st.handleUpdateMessage(ctx, ec, <-st.updateMessageChan); isTerminal { + return err } return nil } @@ -157,26 +132,8 @@ func (st *StartStages) migrateStage(ctx context.Context, ec plug.ExecContext) fu for { select { case msg := <-st.updateMessageChan: - if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, msg.Status) { - ms, err := readMigrationStatus(ctx, st.statusMap) - if err != nil { - return fmt.Errorf("reading status: %w", err) - } - ec.PrintlnUnnecessary(msg.Message) - ec.PrintlnUnnecessary(ms.Report) - for _, l := range ms.Logs { - ec.Logger().Info(l) - } - switch ms.Status { - case StatusComplete: - return nil - case StatusCanceled: - return clcerrors.ErrUserCancelled - case StatusFailed: - return fmt.Errorf("migration failed with following error(s): %s", strings.Join(ms.Errors, "\n")) - } - } else { - ec.PrintlnUnnecessary(msg.Message) + if isTerminal, err := st.handleUpdateMessage(ctx, ec, msg); isTerminal { + return err } case <-ctx.Done(): if err := ctx.Err(); err != nil { @@ -189,3 +146,26 @@ func (st *StartStages) migrateStage(ctx context.Context, ec plug.ExecContext) fu } } } + +func (st *StartStages) handleUpdateMessage(ctx context.Context, ec plug.ExecContext, msg UpdateMessage) (bool, error) { + ec.PrintlnUnnecessary(msg.Message) + if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, msg.Status) { + ms, err := readMigrationStatus(ctx, st.statusMap) + if err != nil { + return true, fmt.Errorf("reading status: %w", err) + } + ec.PrintlnUnnecessary(ms.Report) + for _, l := range ms.Logs { + ec.Logger().Info(l) + } + switch ms.Status { + case StatusComplete: + return true, nil + case StatusCanceled: + return true, clcerrors.ErrUserCancelled + case StatusFailed: + return true, fmt.Errorf("migration failed with following error(s):\n%s", strings.Join(ms.Errors, "\n")) + } + } + return false, nil +} diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go index 04dd947b9..35c54ee56 100644 --- a/base/commands/migration/start_stages_it_test.go +++ b/base/commands/migration/start_stages_it_test.go @@ -5,6 +5,7 @@ package migration_test import ( "context" "encoding/json" + "sync" "testing" "time" @@ -33,13 +34,29 @@ func startTest_Successful(t *testing.T) { tcx := it.TestContext{T: t} ctx := context.Background() tcx.Tester(func(tcx it.TestContext) { + var wg sync.WaitGroup + wg.Add(1) go tcx.WithReset(func() { + defer wg.Done() Must(tcx.CLC().Execute(ctx, "start", "dmt-config", "--yes")) }) successfulRunner(tcx, ctx) - for _, m := range []string{"first message", "second message", "last message", "status report"} { - tcx.AssertStdoutContains(m) - } + tcx.AssertStdoutContains(` +Hazelcast Data Migration Tool v5.3.0 +(c) 2023 Hazelcast, Inc. + +Selected data structures in the source cluster will be migrated to the target cluster. + + + OK [1/3] Connected to the migration cluster. +first message + OK [2/3] Started the migration. +second message +last message +status report + OK [3/3] Migrated the cluster. + + OK Migration completed successfully.`) }) } @@ -51,9 +68,21 @@ func startTest_Failure(t *testing.T) { tcx.CLC().Execute(ctx, "start", "dmt-config", "--yes") }) failureRunner(tcx, ctx) - for _, m := range []string{"first message", "second message", "error1", "error2", "fail status report"} { - tcx.AssertStdoutContains(m) - } + tcx.AssertStdoutContains(` +Hazelcast Data Migration Tool v5.3.0 +(c) 2023 Hazelcast, Inc. + +Selected data structures in the source cluster will be migrated to the target cluster. + + + OK [1/3] Connected to the migration cluster. +first message + OK [2/3] Started the migration. +second message +fail status report + FAIL Could not migrate the cluster: migration failed with following error(s): +error1 +error2`) }) } diff --git a/base/commands/migration/status_stages.go b/base/commands/migration/status_stages.go index 9702b1ea8..5af1a7368 100644 --- a/base/commands/migration/status_stages.go +++ b/base/commands/migration/status_stages.go @@ -95,7 +95,7 @@ func (st *StatusStages) fetchStage(ctx context.Context, ec plug.ExecContext) fun ec.PrintlnUnnecessary(fmt.Sprintf("Completion Percentage: %f", ms.CompletionPercentage)) ec.PrintlnUnnecessary(ms.Report) if len(ms.Errors) > 0 { - ec.PrintlnUnnecessary(fmt.Sprintf("migration failed with following error(s): %s", strings.Join(ms.Errors, "\n"))) + ec.PrintlnUnnecessary(fmt.Sprintf("migration failed with following error(s):\n%s", strings.Join(ms.Errors, "\n"))) } if len(ms.Migrations) > 0 { var rows []output.Row From d0dfafa7c167f535e3ba6703862c0d9ea14dc7b6 Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 7 Sep 2023 15:22:28 +0300 Subject: [PATCH 07/53] fix PR comments --- base/commands/migration/common.go | 19 +++----- base/commands/migration/const.go | 6 ++- base/commands/migration/migration_status.go | 4 +- base/commands/migration/start_stages.go | 34 +++++++------- .../migration/start_stages_it_test.go | 20 +++++---- base/commands/migration/status_stages.go | 21 +++++++-- .../migration/status_stages_it_test.go | 45 ++++++++++++++----- base/commands/migration/utils.go | 4 +- 8 files changed, 96 insertions(+), 57 deletions(-) diff --git a/base/commands/migration/common.go b/base/commands/migration/common.go index 09db7b54c..ca38e6819 100644 --- a/base/commands/migration/common.go +++ b/base/commands/migration/common.go @@ -29,26 +29,19 @@ type Migration struct { CompletionPercentage float64 `json:"completionPercentage"` } -var migrationStatusNone = MigrationStatus{ - Status: StatusNone, - Logs: nil, - Errors: nil, - Report: "", -} - type UpdateMessage struct { Status Status `json:"status"` CompletionPercentage float32 `json:"completionPercentage"` Message string `json:"message"` } -func readMigrationStatus(ctx context.Context, statusMap *hazelcast.Map) (MigrationStatus, error) { +func readMigrationStatus(ctx context.Context, statusMap *hazelcast.Map) (*MigrationStatus, error) { v, err := statusMap.Get(ctx, StatusMapEntryName) if err != nil { - return migrationStatusNone, err + return nil, err } if v == nil { - return migrationStatusNone, nil + return nil, nil } var b []byte if vv, ok := v.(string); ok { @@ -56,11 +49,11 @@ func readMigrationStatus(ctx context.Context, statusMap *hazelcast.Map) (Migrati } else if vv, ok := v.(serialization.JSON); ok { b = vv } else { - return migrationStatusNone, fmt.Errorf("invalid status value") + return nil, fmt.Errorf("invalid status value") } var ms MigrationStatus if err := json.Unmarshal(b, &ms); err != nil { - return migrationStatusNone, fmt.Errorf("unmarshaling status: %w", err) + return nil, fmt.Errorf("unmarshaling status: %w", err) } - return ms, nil + return &ms, nil } diff --git a/base/commands/migration/const.go b/base/commands/migration/const.go index 64b1416b0..2217aa0aa 100644 --- a/base/commands/migration/const.go +++ b/base/commands/migration/const.go @@ -3,14 +3,16 @@ package migration const ( StartQueueName = "__datamigration_start_queue" StatusMapEntryName = "status" - UpdateTopic = "__datamigration_updates_" + StatusMapPrefix = "__datamigration_" + UpdateTopicPrefix = "__datamigration_updates_" MigrationsInProgressList = "__datamigrations_in_progress" ) type Status string const ( - StatusNone Status = "" + StatusStarted Status = "STARTED" + Canceling Status = "CANCELING" StatusComplete Status = "COMPLETED" StatusCanceled Status = "CANCELED" StatusFailed Status = "FAILED" diff --git a/base/commands/migration/migration_status.go b/base/commands/migration/migration_status.go index 964f11d77..026187e1a 100644 --- a/base/commands/migration/migration_status.go +++ b/base/commands/migration/migration_status.go @@ -13,9 +13,9 @@ type StatusCmd struct{} func (s StatusCmd) Unwrappable() {} func (s StatusCmd) Init(cc plug.InitContext) error { - cc.SetCommandUsage("status [flags]") + cc.SetCommandUsage("status") cc.SetCommandGroup("migration") - help := "Get status of the data migration" + help := "Get status of the data migration in progress" cc.SetCommandHelp(help, help) cc.SetPositionalArgCount(0, 0) return nil diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index 9b94b14fe..0b7e8e273 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -5,7 +5,6 @@ import ( "encoding/json" "errors" "fmt" - "strings" "time" clcerrors "github.com/hazelcast/hazelcast-commandline-client/errors" @@ -19,14 +18,14 @@ import ( ) type StartStages struct { - migrationID string - configDir string - ci *hazelcast.ClientInternal - startQueue *hazelcast.Queue - statusMap *hazelcast.Map - updateTopic *hazelcast.Topic - topicListenerID types.UUID - updateMessageChan chan UpdateMessage + migrationID string + configDir string + ci *hazelcast.ClientInternal + startQueue *hazelcast.Queue + statusMap *hazelcast.Map + updateTopic *hazelcast.Topic + topicListenerID types.UUID + updateMsgChan chan UpdateMessage } var timeoutErr = fmt.Errorf("migration could not be completed: reached timeout while reading status: "+ @@ -85,14 +84,19 @@ func (st *StartStages) connectStage(ctx context.Context, ec plug.ExecContext) fu if err != nil { return err } - st.updateMessageChan = make(chan UpdateMessage) - _, err = st.updateTopic.AddMessageListener(ctx, st.topicListener) + st.updateMsgChan = make(chan UpdateMessage) + st.topicListenerID, err = st.updateTopic.AddMessageListener(ctx, st.topicListener) return err } } func (st *StartStages) topicListener(event *hazelcast.MessagePublished) { - st.updateMessageChan <- event.Value.(UpdateMessage) + var u UpdateMessage + err := json.Unmarshal(event.Value.(serialization.JSON), &u) + if err != nil { + panic(fmt.Errorf("receiving update from migration cluster: %w", err)) + } + st.updateMsgChan <- u } func (st *StartStages) startStage(ctx context.Context, ec plug.ExecContext) func(stage.Statuser) error { @@ -106,7 +110,7 @@ func (st *StartStages) startStage(ctx context.Context, ec plug.ExecContext) func } ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - if isTerminal, err := st.handleUpdateMessage(ctx, ec, <-st.updateMessageChan); isTerminal { + if isTerminal, err := st.handleUpdateMessage(ctx, ec, <-st.updateMsgChan); isTerminal { return err } return nil @@ -131,7 +135,7 @@ func (st *StartStages) migrateStage(ctx context.Context, ec plug.ExecContext) fu defer st.updateTopic.RemoveListener(ctx, st.topicListenerID) for { select { - case msg := <-st.updateMessageChan: + case msg := <-st.updateMsgChan: if isTerminal, err := st.handleUpdateMessage(ctx, ec, msg); isTerminal { return err } @@ -164,7 +168,7 @@ func (st *StartStages) handleUpdateMessage(ctx context.Context, ec plug.ExecCont case StatusCanceled: return true, clcerrors.ErrUserCancelled case StatusFailed: - return true, fmt.Errorf("migration failed with following error(s):\n%s", strings.Join(ms.Errors, "\n")) + return true, fmt.Errorf("migration failed") } } return false, nil diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go index 35c54ee56..ae02194d6 100644 --- a/base/commands/migration/start_stages_it_test.go +++ b/base/commands/migration/start_stages_it_test.go @@ -79,10 +79,7 @@ Selected data structures in the source cluster will be migrated to the target cl first message OK [2/3] Started the migration. second message -fail status report - FAIL Could not migrate the cluster: migration failed with following error(s): -error1 -error2`) +fail status report`) }) } @@ -91,8 +88,10 @@ func successfulRunner(tcx it.TestContext, ctx context.Context) { go findMigrationID(ctx, tcx, c) migrationID := <-c topic := MustValue(tcx.Client.GetTopic(ctx, migration.MakeUpdateTopicName(migrationID))) - Must(topic.Publish(ctx, migration.UpdateMessage{Status: migration.StatusInProgress, Message: "first message"})) - Must(topic.Publish(ctx, migration.UpdateMessage{Status: migration.StatusInProgress, Message: "second message"})) + msg := MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusInProgress, Message: "first message", CompletionPercentage: 10})) + Must(topic.Publish(ctx, serialization.JSON(msg))) + msg = MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusInProgress, Message: "second message", CompletionPercentage: 20})) + Must(topic.Publish(ctx, serialization.JSON(msg))) statusMap := MustValue(tcx.Client.GetMap(ctx, migration.MakeStatusMapName(migrationID))) b := MustValue(json.Marshal(migration.MigrationStatus{ Status: migration.StatusComplete, @@ -100,7 +99,8 @@ func successfulRunner(tcx it.TestContext, ctx context.Context) { Logs: []string{"log1", "log2"}, })) Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(b))) - Must(topic.Publish(ctx, migration.UpdateMessage{Status: migration.StatusComplete, Message: "last message"})) + msg = MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusComplete, Message: "last message", CompletionPercentage: 100})) + Must(topic.Publish(ctx, serialization.JSON(msg))) } func failureRunner(tcx it.TestContext, ctx context.Context) { @@ -108,7 +108,8 @@ func failureRunner(tcx it.TestContext, ctx context.Context) { go findMigrationID(ctx, tcx, c) migrationID := <-c topic := MustValue(tcx.Client.GetTopic(ctx, migration.MakeUpdateTopicName(migrationID))) - Must(topic.Publish(ctx, migration.UpdateMessage{Status: migration.StatusInProgress, Message: "first message"})) + msg := MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusInProgress, Message: "first message", CompletionPercentage: 20})) + Must(topic.Publish(ctx, serialization.JSON(msg))) statusMap := MustValue(tcx.Client.GetMap(ctx, migration.MakeStatusMapName(migrationID))) b := MustValue(json.Marshal(migration.MigrationStatus{ Status: migration.StatusFailed, @@ -116,7 +117,8 @@ func failureRunner(tcx it.TestContext, ctx context.Context) { Errors: []string{"error1", "error2"}, })) Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(b))) - Must(topic.Publish(ctx, migration.UpdateMessage{Status: migration.StatusFailed, Message: "second message"})) + msg = MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusFailed, Message: "second message", CompletionPercentage: 60})) + Must(topic.Publish(ctx, serialization.JSON(msg))) } func findMigrationID(ctx context.Context, tcx it.TestContext, c chan string) { diff --git a/base/commands/migration/status_stages.go b/base/commands/migration/status_stages.go index 5af1a7368..0ea9fc1c5 100644 --- a/base/commands/migration/status_stages.go +++ b/base/commands/migration/status_stages.go @@ -2,6 +2,7 @@ package migration import ( "context" + "encoding/json" "fmt" "slices" "strings" @@ -11,6 +12,7 @@ import ( "github.com/hazelcast/hazelcast-commandline-client/internal/plug" serialization2 "github.com/hazelcast/hazelcast-commandline-client/internal/serialization" "github.com/hazelcast/hazelcast-go-client" + "github.com/hazelcast/hazelcast-go-client/serialization" "github.com/hazelcast/hazelcast-go-client/types" ) @@ -64,8 +66,16 @@ func (st *StatusStages) connectStage(ctx context.Context, ec plug.ExecContext) f if err != nil { return err } - m := all[0].(MigrationInProgress) - st.migrationID = m.MigrationID + if len(all) == 0 { + return fmt.Errorf("there are no migrations are in progress on migration cluster") + } + var mip MigrationInProgress + m := all[0].(serialization.JSON) + err = json.Unmarshal(m, &mip) + if err != nil { + return fmt.Errorf("parsing migration in progress: %w", err) + } + st.migrationID = mip.MigrationID st.statusMap, err = st.ci.Client().GetMap(ctx, MakeStatusMapName(st.migrationID)) if err != nil { return err @@ -148,5 +158,10 @@ func (st *StatusStages) fetchStage(ctx context.Context, ec plug.ExecContext) fun } func (st *StatusStages) topicListener(event *hazelcast.MessagePublished) { - st.updateMsgChan <- event.Value.(UpdateMessage) + var u UpdateMessage + err := json.Unmarshal(event.Value.(serialization.JSON), &u) + if err != nil { + panic(fmt.Errorf("receiving update from migration cluster: %w", err)) + } + st.updateMsgChan <- u } diff --git a/base/commands/migration/status_stages_it_test.go b/base/commands/migration/status_stages_it_test.go index 03668f9cb..7fae20f6b 100644 --- a/base/commands/migration/status_stages_it_test.go +++ b/base/commands/migration/status_stages_it_test.go @@ -25,12 +25,28 @@ func TestStatus(t *testing.T) { f func(t *testing.T) }{ {name: "status", f: statusTest}, + {name: "noMigrationsStatus", f: noMigrationsStatusTest}, } for _, tc := range testCases { t.Run(tc.name, tc.f) } } +func noMigrationsStatusTest(t *testing.T) { + tcx := it.TestContext{T: t} + ctx := context.Background() + tcx.Tester(func(tcx it.TestContext) { + var wg sync.WaitGroup + wg.Add(1) + go tcx.WithReset(func() { + defer wg.Done() + tcx.CLC().Execute(ctx, "status") + }) + wg.Wait() + tcx.AssertStdoutContains("there are no migrations are in progress on migration cluster") + }) +} + func statusTest(t *testing.T) { tcx := it.TestContext{T: t} ctx := context.Background() @@ -43,7 +59,7 @@ func statusTest(t *testing.T) { Must(tcx.CLC().Execute(ctx, "status")) }) time.Sleep(1 * time.Second) // give time to status command to register its topic listener - statusRunner(mID, tcx, ctx) + statusRunner(t, mID, tcx, ctx) wg.Wait() tcx.AssertStdoutContains(` Hazelcast Data Migration Tool v5.3.0 @@ -64,24 +80,30 @@ OK`) func preStatusRunner(t *testing.T, tcx it.TestContext, ctx context.Context) string { mID := migration.MakeMigrationID() l := MustValue(tcx.Client.GetList(ctx, migration.MigrationsInProgressList)) - ok := MustValue(l.Add(ctx, migration.MigrationInProgress{ + m := MustValue(json.Marshal(migration.MigrationInProgress{ MigrationID: mID, })) + ok := MustValue(l.Add(ctx, serialization.JSON(m))) require.Equal(t, true, ok) return mID } -func statusRunner(migrationID string, tcx it.TestContext, ctx context.Context) { - m := MustValue(tcx.Client.GetMap(ctx, migration.MakeStatusMapName(migrationID))) - t := MustValue(tcx.Client.GetTopic(ctx, migration.MakeUpdateTopicName(migrationID))) - setState(ctx, t, m, migration.StatusInProgress, "first message") - setState(ctx, t, m, migration.StatusFailed, "last message") - +func statusRunner(t *testing.T, migrationID string, tcx it.TestContext, ctx context.Context) { + statusMap := MustValue(tcx.Client.GetMap(ctx, migration.MakeStatusMapName(migrationID))) + topic := MustValue(tcx.Client.GetTopic(ctx, migration.MakeUpdateTopicName(migrationID))) + setState(ctx, topic, statusMap, migration.StatusInProgress, "first message") + setState(ctx, topic, statusMap, migration.StatusFailed, "last message") + l := MustValue(tcx.Client.GetList(ctx, migration.MigrationsInProgressList)) + m := MustValue(json.Marshal(migration.MigrationInProgress{ + MigrationID: migrationID, + })) + ok := MustValue(l.Remove(ctx, serialization.JSON(m))) + require.Equal(t, true, ok) } func setState(ctx context.Context, updateTopic *hazelcast.Topic, statusMap *hazelcast.Map, status migration.Status, msg string) { startTime := MustValue(time.Parse(time.RFC3339, "2023-01-01T00:00:00Z")) - b := MustValue(json.Marshal(migration.MigrationStatus{ + st := MustValue(json.Marshal(migration.MigrationStatus{ Status: status, Report: "status report", CompletionPercentage: 12.123, @@ -97,6 +119,7 @@ func setState(ctx context.Context, updateTopic *hazelcast.Topic, statusMap *haze }, }, })) - Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(b))) - Must(updateTopic.Publish(ctx, migration.UpdateMessage{Status: status, Message: msg})) + message := MustValue(json.Marshal(migration.UpdateMessage{Status: status, Message: msg, CompletionPercentage: 80})) + Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(st))) + Must(updateTopic.Publish(ctx, serialization.JSON(message))) } diff --git a/base/commands/migration/utils.go b/base/commands/migration/utils.go index a56bfc50f..98a084b16 100644 --- a/base/commands/migration/utils.go +++ b/base/commands/migration/utils.go @@ -124,9 +124,9 @@ func MakeMigrationID() string { } func MakeStatusMapName(migrationID string) string { - return "__datamigration_" + migrationID + return StatusMapPrefix + migrationID } func MakeUpdateTopicName(migrationID string) string { - return UpdateTopic + migrationID + return UpdateTopicPrefix + migrationID } From 318fff5a68b3560f34cb10c21312e35cdc2bd717 Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 7 Sep 2023 15:36:17 +0300 Subject: [PATCH 08/53] fix dependency --- base/commands/migration/status_stages.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/base/commands/migration/status_stages.go b/base/commands/migration/status_stages.go index 0ea9fc1c5..3f679e307 100644 --- a/base/commands/migration/status_stages.go +++ b/base/commands/migration/status_stages.go @@ -4,9 +4,10 @@ import ( "context" "encoding/json" "fmt" - "slices" "strings" + "golang.org/x/exp/slices" + "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" "github.com/hazelcast/hazelcast-commandline-client/internal/output" "github.com/hazelcast/hazelcast-commandline-client/internal/plug" From b529b9a28c7def9f4fd032d0cff1b32d1b2caeda Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 7 Sep 2023 20:47:28 +0300 Subject: [PATCH 09/53] fix PR comment: change error msg --- base/commands/migration/common.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/commands/migration/common.go b/base/commands/migration/common.go index ca38e6819..79a13ca3f 100644 --- a/base/commands/migration/common.go +++ b/base/commands/migration/common.go @@ -53,7 +53,7 @@ func readMigrationStatus(ctx context.Context, statusMap *hazelcast.Map) (*Migrat } var ms MigrationStatus if err := json.Unmarshal(b, &ms); err != nil { - return nil, fmt.Errorf("unmarshaling status: %w", err) + return nil, fmt.Errorf("parsing migration status: %w", err) } return &ms, nil } From d5591a030703633a6e871e9362197e17f2a20a79 Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 7 Sep 2023 20:54:44 +0300 Subject: [PATCH 10/53] fix PR comments --- base/commands/migration/status_stages.go | 53 +------------------ .../migration/status_stages_it_test.go | 4 +- 2 files changed, 4 insertions(+), 53 deletions(-) diff --git a/base/commands/migration/status_stages.go b/base/commands/migration/status_stages.go index 3f679e307..3e9c60daf 100644 --- a/base/commands/migration/status_stages.go +++ b/base/commands/migration/status_stages.go @@ -4,14 +4,11 @@ import ( "context" "encoding/json" "fmt" - "strings" "golang.org/x/exp/slices" "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" - "github.com/hazelcast/hazelcast-commandline-client/internal/output" "github.com/hazelcast/hazelcast-commandline-client/internal/plug" - serialization2 "github.com/hazelcast/hazelcast-commandline-client/internal/serialization" "github.com/hazelcast/hazelcast-go-client" "github.com/hazelcast/hazelcast-go-client/serialization" "github.com/hazelcast/hazelcast-go-client/types" @@ -86,7 +83,7 @@ func (st *StatusStages) connectStage(ctx context.Context, ec plug.ExecContext) f return err } st.updateMsgChan = make(chan UpdateMessage) - _, err = st.updateTopic.AddMessageListener(ctx, st.topicListener) + st.topicListenerID, err = st.updateTopic.AddMessageListener(ctx, st.topicListener) return err } } @@ -102,55 +99,9 @@ func (st *StatusStages) fetchStage(ctx context.Context, ec plug.ExecContext) fun return fmt.Errorf("reading status: %w", err) } ec.PrintlnUnnecessary(msg.Message) + ec.PrintlnUnnecessary(fmt.Sprintf("Completion Percentage: %f", msg.CompletionPercentage)) if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, msg.Status) { - ec.PrintlnUnnecessary(fmt.Sprintf("Completion Percentage: %f", ms.CompletionPercentage)) ec.PrintlnUnnecessary(ms.Report) - if len(ms.Errors) > 0 { - ec.PrintlnUnnecessary(fmt.Sprintf("migration failed with following error(s):\n%s", strings.Join(ms.Errors, "\n"))) - } - if len(ms.Migrations) > 0 { - var rows []output.Row - for _, m := range ms.Migrations { - rows = append(rows, output.Row{ - output.Column{ - Name: "Name", - Type: serialization2.TypeString, - Value: m.Name, - }, - output.Column{ - Name: "Type", - Type: serialization2.TypeString, - Value: m.Type, - }, - output.Column{ - Name: "Status", - Type: serialization2.TypeString, - Value: string(m.Status), - }, - output.Column{ - Name: "Start Timestamp", - Type: serialization2.TypeJavaLocalDateTime, - Value: types.LocalDateTime(m.StartTimestamp), - }, - output.Column{ - Name: "Entries Migrated", - Type: serialization2.TypeInt32, - Value: int32(m.EntriesMigrated), - }, - output.Column{ - Name: "Total Entries", - Type: serialization2.TypeInt32, - Value: int32(m.TotalEntries), - }, - output.Column{ - Name: "Completion Percentage", - Type: serialization2.TypeFloat32, - Value: float32(m.CompletionPercentage), - }, - }) - } - return ec.AddOutputRows(ctx, rows...) - } return nil } } diff --git a/base/commands/migration/status_stages_it_test.go b/base/commands/migration/status_stages_it_test.go index 7fae20f6b..8584c00bc 100644 --- a/base/commands/migration/status_stages_it_test.go +++ b/base/commands/migration/status_stages_it_test.go @@ -67,10 +67,10 @@ Hazelcast Data Migration Tool v5.3.0 OK [1/2] Connected to the migration cluster. first message +Completion Percentage: 80.000000 last message -Completion Percentage: 12.123000 +Completion Percentage: 80.000000 status report -imap5 IMap FAILED 2023-01-01 00:00:00 141 1000 14.1 OK [2/2] Fetched migration status. OK`) From 48a42ce85eb11a666198b6b903ac772cb1609562 Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 7 Sep 2023 21:17:56 +0300 Subject: [PATCH 11/53] subscribe to topic if status is not terminal --- base/commands/migration/common.go | 11 ++-- base/commands/migration/status_stages.go | 15 +++-- .../migration/status_stages_it_test.go | 55 +++++++++---------- 3 files changed, 41 insertions(+), 40 deletions(-) diff --git a/base/commands/migration/common.go b/base/commands/migration/common.go index 79a13ca3f..218b076e3 100644 --- a/base/commands/migration/common.go +++ b/base/commands/migration/common.go @@ -11,12 +11,11 @@ import ( ) type MigrationStatus struct { - Status Status `json:"status"` - Logs []string `json:"logs"` - Errors []string `json:"errors"` - Report string `json:"report"` - Migrations []Migration `json:"migrations"` - CompletionPercentage float32 `json:"completionPercentage"` + Status Status `json:"status"` + Logs []string `json:"logs"` + Errors []string `json:"errors"` + Report string `json:"report"` + CompletionPercentage float32 `json:"completionPercentage"` } type Migration struct { diff --git a/base/commands/migration/status_stages.go b/base/commands/migration/status_stages.go index 3e9c60daf..c9ecad158 100644 --- a/base/commands/migration/status_stages.go +++ b/base/commands/migration/status_stages.go @@ -79,17 +79,22 @@ func (st *StatusStages) connectStage(ctx context.Context, ec plug.ExecContext) f return err } st.updateTopic, err = st.ci.Client().GetTopic(ctx, MakeUpdateTopicName(st.migrationID)) - if err != nil { - return err - } - st.updateMsgChan = make(chan UpdateMessage) - st.topicListenerID, err = st.updateTopic.AddMessageListener(ctx, st.topicListener) return err } } func (st *StatusStages) fetchStage(ctx context.Context, ec plug.ExecContext) func(statuser stage.Statuser) error { return func(stage.Statuser) error { + ms, err := readMigrationStatus(ctx, st.statusMap) + if err != nil { + return fmt.Errorf("reading status: %w", err) + } + if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, ms.Status) { + ec.PrintlnUnnecessary(ms.Report) + return nil + } + st.updateMsgChan = make(chan UpdateMessage) + st.topicListenerID, err = st.updateTopic.AddMessageListener(ctx, st.topicListener) defer st.updateTopic.RemoveListener(ctx, st.topicListenerID) for { select { diff --git a/base/commands/migration/status_stages_it_test.go b/base/commands/migration/status_stages_it_test.go index 8584c00bc..c49de8139 100644 --- a/base/commands/migration/status_stages_it_test.go +++ b/base/commands/migration/status_stages_it_test.go @@ -14,7 +14,6 @@ import ( "github.com/hazelcast/hazelcast-commandline-client/base/commands/migration" . "github.com/hazelcast/hazelcast-commandline-client/internal/check" "github.com/hazelcast/hazelcast-commandline-client/internal/it" - "github.com/hazelcast/hazelcast-go-client" "github.com/hazelcast/hazelcast-go-client/serialization" "github.com/stretchr/testify/require" ) @@ -67,9 +66,9 @@ Hazelcast Data Migration Tool v5.3.0 OK [1/2] Connected to the migration cluster. first message -Completion Percentage: 80.000000 +Completion Percentage: 60.000000 last message -Completion Percentage: 80.000000 +Completion Percentage: 100.000000 status report OK [2/2] Fetched migration status. @@ -78,6 +77,7 @@ OK`) } func preStatusRunner(t *testing.T, tcx it.TestContext, ctx context.Context) string { + // create a migration in the __datamigrations_in_progress list mID := migration.MakeMigrationID() l := MustValue(tcx.Client.GetList(ctx, migration.MigrationsInProgressList)) m := MustValue(json.Marshal(migration.MigrationInProgress{ @@ -85,14 +85,34 @@ func preStatusRunner(t *testing.T, tcx it.TestContext, ctx context.Context) stri })) ok := MustValue(l.Add(ctx, serialization.JSON(m))) require.Equal(t, true, ok) + // create a record in the status map + statusMap := MustValue(tcx.Client.GetMap(ctx, migration.MakeStatusMapName(mID))) + st := MustValue(json.Marshal(migration.MigrationStatus{ + Status: migration.StatusInProgress, + Report: "status report", + CompletionPercentage: 60, + })) + Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(st))) return mID } func statusRunner(t *testing.T, migrationID string, tcx it.TestContext, ctx context.Context) { + // publish the first message in the update topic + updateTopic := MustValue(tcx.Client.GetTopic(ctx, migration.MakeUpdateTopicName(migrationID))) + msg := MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusInProgress, Message: "first message", CompletionPercentage: 60})) + Must(updateTopic.Publish(ctx, serialization.JSON(msg))) + // create a terminal record in status map statusMap := MustValue(tcx.Client.GetMap(ctx, migration.MakeStatusMapName(migrationID))) - topic := MustValue(tcx.Client.GetTopic(ctx, migration.MakeUpdateTopicName(migrationID))) - setState(ctx, topic, statusMap, migration.StatusInProgress, "first message") - setState(ctx, topic, statusMap, migration.StatusFailed, "last message") + st := MustValue(json.Marshal(migration.MigrationStatus{ + Status: migration.StatusComplete, + Report: "status report", + CompletionPercentage: 100, + })) + Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(st))) + // publish the second message in the update topic + msg = MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusComplete, Message: "last message", CompletionPercentage: 100})) + Must(updateTopic.Publish(ctx, serialization.JSON(msg))) + // remove the migration from the __datamigrations_in_progress list l := MustValue(tcx.Client.GetList(ctx, migration.MigrationsInProgressList)) m := MustValue(json.Marshal(migration.MigrationInProgress{ MigrationID: migrationID, @@ -100,26 +120,3 @@ func statusRunner(t *testing.T, migrationID string, tcx it.TestContext, ctx cont ok := MustValue(l.Remove(ctx, serialization.JSON(m))) require.Equal(t, true, ok) } - -func setState(ctx context.Context, updateTopic *hazelcast.Topic, statusMap *hazelcast.Map, status migration.Status, msg string) { - startTime := MustValue(time.Parse(time.RFC3339, "2023-01-01T00:00:00Z")) - st := MustValue(json.Marshal(migration.MigrationStatus{ - Status: status, - Report: "status report", - CompletionPercentage: 12.123, - Migrations: []migration.Migration{ - { - Name: "imap5", - Type: "IMap", - Status: status, - StartTimestamp: startTime, - EntriesMigrated: 141, - TotalEntries: 1000, - CompletionPercentage: 14.1, - }, - }, - })) - message := MustValue(json.Marshal(migration.UpdateMessage{Status: status, Message: msg, CompletionPercentage: 80})) - Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(st))) - Must(updateTopic.Publish(ctx, serialization.JSON(message))) -} From 999a3bfc98a13046d24672251010b903dbff6604 Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 7 Sep 2023 21:33:11 +0300 Subject: [PATCH 12/53] write migration report to file --- base/commands/migration/start_stages.go | 17 +++++++++++++++++ base/commands/migration/start_stages_it_test.go | 16 +++++++++------- 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index 0b7e8e273..b26fc7922 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "os" "time" clcerrors "github.com/hazelcast/hazelcast-commandline-client/errors" @@ -159,6 +160,12 @@ func (st *StartStages) handleUpdateMessage(ctx context.Context, ec plug.ExecCont return true, fmt.Errorf("reading status: %w", err) } ec.PrintlnUnnecessary(ms.Report) + name := fmt.Sprintf("migration_report_%s", st.migrationID) + err = saveReportToFile(name, ms.Report) + if err != nil { + return true, fmt.Errorf("writing report to file: %w", err) + } + ec.PrintlnUnnecessary(fmt.Sprintf("migration report saved to file: %s", name)) for _, l := range ms.Logs { ec.Logger().Info(l) } @@ -173,3 +180,13 @@ func (st *StartStages) handleUpdateMessage(ctx context.Context, ec plug.ExecCont } return false, nil } + +func saveReportToFile(fileName, report string) error { + f, err := os.Create(fmt.Sprintf(fileName)) + if err != nil { + return err + } + defer f.Close() + _, err = f.WriteString(report) + return err +} diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go index ae02194d6..41dd1c359 100644 --- a/base/commands/migration/start_stages_it_test.go +++ b/base/commands/migration/start_stages_it_test.go @@ -5,6 +5,7 @@ package migration_test import ( "context" "encoding/json" + "fmt" "sync" "testing" "time" @@ -40,8 +41,11 @@ func startTest_Successful(t *testing.T) { defer wg.Done() Must(tcx.CLC().Execute(ctx, "start", "dmt-config", "--yes")) }) - successfulRunner(tcx, ctx) - tcx.AssertStdoutContains(` + c := make(chan string, 1) + go findMigrationID(ctx, tcx, c) + migrationID := <-c + successfulRunner(migrationID, tcx, ctx) + tcx.AssertStdoutContains(fmt.Sprintf(` Hazelcast Data Migration Tool v5.3.0 (c) 2023 Hazelcast, Inc. @@ -54,9 +58,10 @@ first message second message last message status report +migration report saved to file: migration_report_%s OK [3/3] Migrated the cluster. - OK Migration completed successfully.`) + OK Migration completed successfully.`, migrationID)) }) } @@ -83,10 +88,7 @@ fail status report`) }) } -func successfulRunner(tcx it.TestContext, ctx context.Context) { - c := make(chan string, 1) - go findMigrationID(ctx, tcx, c) - migrationID := <-c +func successfulRunner(migrationID string, tcx it.TestContext, ctx context.Context) { topic := MustValue(tcx.Client.GetTopic(ctx, migration.MakeUpdateTopicName(migrationID))) msg := MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusInProgress, Message: "first message", CompletionPercentage: 10})) Must(topic.Publish(ctx, serialization.JSON(msg))) From aed1d0178b5e590730c0dc900b2cada378ad495a Mon Sep 17 00:00:00 2001 From: kmetin Date: Fri, 8 Sep 2023 09:32:39 +0300 Subject: [PATCH 13/53] refactor remove listener --- base/commands/migration/migration_start.go | 9 ++++++++- base/commands/migration/start_stages.go | 4 ++-- base/commands/migration/status_stages.go | 6 ++---- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/base/commands/migration/migration_start.go b/base/commands/migration/migration_start.go index 988991d93..01c642ee6 100644 --- a/base/commands/migration/migration_start.go +++ b/base/commands/migration/migration_start.go @@ -11,6 +11,7 @@ import ( "github.com/hazelcast/hazelcast-commandline-client/internal/check" "github.com/hazelcast/hazelcast-commandline-client/internal/plug" "github.com/hazelcast/hazelcast-commandline-client/internal/prompt" + "github.com/hazelcast/hazelcast-go-client" ) type StartCmd struct{} @@ -45,7 +46,13 @@ Selected data structures in the source cluster will be migrated to the target cl } } ec.PrintlnUnnecessary("") - sts := NewStartStages(MakeMigrationID(), ec.Args()[0]) + var updateTopic *hazelcast.Topic + sts := NewStartStages(updateTopic, MakeMigrationID(), ec.Args()[0]) + if !sts.topicListenerID.Default() && sts.updateTopic != nil { + if err := sts.updateTopic.RemoveListener(ctx, sts.topicListenerID); err != nil { + return err + } + } sp := stage.NewFixedProvider(sts.Build(ctx, ec)...) if err := stage.Execute(ctx, ec, sp); err != nil { return err diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index b26fc7922..770a52b8d 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -33,11 +33,12 @@ var timeoutErr = fmt.Errorf("migration could not be completed: reached timeout w "please ensure that you are using Hazelcast's migration cluster distribution and your DMT config points to that cluster: %w", context.DeadlineExceeded) -func NewStartStages(migrationID, configDir string) *StartStages { +func NewStartStages(updateTopic *hazelcast.Topic, migrationID, configDir string) *StartStages { if migrationID == "" { panic("migrationID is required") } return &StartStages{ + updateTopic: updateTopic, migrationID: migrationID, configDir: configDir, } @@ -133,7 +134,6 @@ func makeConfigBundle(configDir, migrationID string) (serialization.JSON, error) func (st *StartStages) migrateStage(ctx context.Context, ec plug.ExecContext) func(statuser stage.Statuser) error { return func(stage.Statuser) error { - defer st.updateTopic.RemoveListener(ctx, st.topicListenerID) for { select { case msg := <-st.updateMsgChan: diff --git a/base/commands/migration/status_stages.go b/base/commands/migration/status_stages.go index c9ecad158..cdf7f279f 100644 --- a/base/commands/migration/status_stages.go +++ b/base/commands/migration/status_stages.go @@ -11,7 +11,6 @@ import ( "github.com/hazelcast/hazelcast-commandline-client/internal/plug" "github.com/hazelcast/hazelcast-go-client" "github.com/hazelcast/hazelcast-go-client/serialization" - "github.com/hazelcast/hazelcast-go-client/types" ) type StatusStages struct { @@ -20,7 +19,6 @@ type StatusStages struct { migrationsInProgressList *hazelcast.List statusMap *hazelcast.Map updateTopic *hazelcast.Topic - topicListenerID types.UUID updateMsgChan chan UpdateMessage } @@ -94,8 +92,8 @@ func (st *StatusStages) fetchStage(ctx context.Context, ec plug.ExecContext) fun return nil } st.updateMsgChan = make(chan UpdateMessage) - st.topicListenerID, err = st.updateTopic.AddMessageListener(ctx, st.topicListener) - defer st.updateTopic.RemoveListener(ctx, st.topicListenerID) + id, err := st.updateTopic.AddMessageListener(ctx, st.topicListener) + defer st.updateTopic.RemoveListener(ctx, id) for { select { case msg := <-st.updateMsgChan: From 75654cdef86331b138e2986c3ddb4b3c262bb227 Mon Sep 17 00:00:00 2001 From: kmetin Date: Fri, 8 Sep 2023 15:17:41 +0300 Subject: [PATCH 14/53] add debug log files --- base/commands/migration/const.go | 1 + base/commands/migration/start_stages.go | 31 +++++++++++++++-- .../migration/start_stages_it_test.go | 33 +++++++++++++++++-- internal/it/test_context.go | 2 +- internal/it/util.go | 2 +- 5 files changed, 63 insertions(+), 6 deletions(-) diff --git a/base/commands/migration/const.go b/base/commands/migration/const.go index 2217aa0aa..b1f320b4b 100644 --- a/base/commands/migration/const.go +++ b/base/commands/migration/const.go @@ -5,6 +5,7 @@ const ( StatusMapEntryName = "status" StatusMapPrefix = "__datamigration_" UpdateTopicPrefix = "__datamigration_updates_" + DebugLogsListPrefix = "__datamigration_debug_logs_" MigrationsInProgressList = "__datamigrations_in_progress" ) diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index 770a52b8d..2ed4e37d3 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -10,6 +10,7 @@ import ( clcerrors "github.com/hazelcast/hazelcast-commandline-client/errors" "github.com/hazelcast/hazelcast-go-client" + "github.com/hazelcast/hazelcast-go-client/cluster" "github.com/hazelcast/hazelcast-go-client/serialization" "github.com/hazelcast/hazelcast-go-client/types" "golang.org/x/exp/slices" @@ -161,10 +162,12 @@ func (st *StartStages) handleUpdateMessage(ctx context.Context, ec plug.ExecCont } ec.PrintlnUnnecessary(ms.Report) name := fmt.Sprintf("migration_report_%s", st.migrationID) - err = saveReportToFile(name, ms.Report) - if err != nil { + if err = saveReportToFile(name, ms.Report); err != nil { return true, fmt.Errorf("writing report to file: %w", err) } + if err = st.saveDebugLogs(ctx, st.ci.OrderedMembers()); err != nil { + return true, fmt.Errorf("writing debug logs to file: %w", err) + } ec.PrintlnUnnecessary(fmt.Sprintf("migration report saved to file: %s", name)) for _, l := range ms.Logs { ec.Logger().Info(l) @@ -190,3 +193,27 @@ func saveReportToFile(fileName, report string) error { _, err = f.WriteString(report) return err } + +func (st *StartStages) saveDebugLogs(ctx context.Context, members []cluster.MemberInfo) error { + for _, m := range members { + f, err := os.Create(fmt.Sprintf("%s%s.log", DebugLogsListPrefix, m.UUID.String())) + if err != nil { + return err + } + defer f.Close() + l, err := st.ci.Client().GetList(ctx, DebugLogsListPrefix+m.UUID.String()) + if err != nil { + return err + } + logs, err := l.GetAll(ctx) + if err != nil { + return err + } + for _, log := range logs { + if _, err = fmt.Fprintf(f, log.(string)); err != nil { + return err + } + } + } + return nil +} diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go index 41dd1c359..03ea2f8cb 100644 --- a/base/commands/migration/start_stages_it_test.go +++ b/base/commands/migration/start_stages_it_test.go @@ -6,6 +6,7 @@ import ( "context" "encoding/json" "fmt" + "os" "sync" "testing" "time" @@ -15,7 +16,9 @@ import ( "github.com/hazelcast/hazelcast-commandline-client/base/commands/migration" . "github.com/hazelcast/hazelcast-commandline-client/internal/check" "github.com/hazelcast/hazelcast-commandline-client/internal/it" + hz "github.com/hazelcast/hazelcast-go-client" "github.com/hazelcast/hazelcast-go-client/serialization" + "github.com/stretchr/testify/require" ) func TestMigration(t *testing.T) { @@ -41,10 +44,12 @@ func startTest_Successful(t *testing.T) { defer wg.Done() Must(tcx.CLC().Execute(ctx, "start", "dmt-config", "--yes")) }) + c := make(chan string, 1) go findMigrationID(ctx, tcx, c) migrationID := <-c - successfulRunner(migrationID, tcx, ctx) + fileNames := successfulRunner(t, migrationID, tcx, ctx) + wg.Wait() tcx.AssertStdoutContains(fmt.Sprintf(` Hazelcast Data Migration Tool v5.3.0 (c) 2023 Hazelcast, Inc. @@ -62,6 +67,11 @@ migration report saved to file: migration_report_%s OK [3/3] Migrated the cluster. OK Migration completed successfully.`, migrationID)) + tcx.WithReset(func() { + for _, n := range fileNames { + require.Equal(t, true, fileExists(n)) + } + }) }) } @@ -88,7 +98,25 @@ fail status report`) }) } -func successfulRunner(migrationID string, tcx it.TestContext, ctx context.Context) { +func fileExists(filename string) bool { + a := MustValue(os.Getwd()) + fmt.Println(a) + _, err := os.Stat(filename) + if os.IsNotExist(err) { + return false + } + Must(os.Remove(filename)) + return true +} + +func successfulRunner(t *testing.T, migrationID string, tcx it.TestContext, ctx context.Context) []string { + var fileNames []string + for _, m := range hz.NewClientInternal(tcx.Client).OrderedMembers() { + l := MustValue(tcx.Client.GetList(ctx, migration.DebugLogsListPrefix+m.UUID.String())) + require.Equal(t, true, MustValue(l.AddAll(ctx, "log1\n", "log2\n", "log3\n"))) + fileNames = append(fileNames, fmt.Sprintf("%s%s.log", migration.DebugLogsListPrefix, m.UUID.String())) + } + fileNames = append(fileNames, fmt.Sprintf("migration_report_%s", migrationID)) topic := MustValue(tcx.Client.GetTopic(ctx, migration.MakeUpdateTopicName(migrationID))) msg := MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusInProgress, Message: "first message", CompletionPercentage: 10})) Must(topic.Publish(ctx, serialization.JSON(msg))) @@ -103,6 +131,7 @@ func successfulRunner(migrationID string, tcx it.TestContext, ctx context.Contex Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(b))) msg = MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusComplete, Message: "last message", CompletionPercentage: 100})) Must(topic.Publish(ctx, serialization.JSON(msg))) + return fileNames } func failureRunner(tcx it.TestContext, ctx context.Context) { diff --git a/internal/it/test_context.go b/internal/it/test_context.go index 6acc46406..4ac68ead9 100644 --- a/internal/it/test_context.go +++ b/internal/it/test_context.go @@ -120,7 +120,7 @@ func (tcx TestContext) Tester(f func(tcx TestContext)) { tcx.Cluster = defaultViridianTestCluster.Launch(tcx.T) tcx.Viridian = defaultViridianTestCluster.cls.(*viridianTestCluster).api } else { - tcx.Cluster = defaultDedicatedTestCluster.Launch(tcx.T) + tcx.Cluster = DefaultDedicatedTestCluster.Launch(tcx.T) } } if tcx.ClientConfig == nil { diff --git a/internal/it/util.go b/internal/it/util.go index 14746c49b..b1aa4b55d 100644 --- a/internal/it/util.go +++ b/internal/it/util.go @@ -57,7 +57,7 @@ func UniqueClusterName() string { var defaultDedicatedClusterName = UniqueClusterName() var rc *RemoteControllerClientWrapper var rcMu = &sync.RWMutex{} -var defaultDedicatedTestCluster = NewSingletonTestCluster(defaultDedicatedClusterName, func() TestCluster { +var DefaultDedicatedTestCluster = NewSingletonTestCluster(defaultDedicatedClusterName, func() TestCluster { port := NextPort() return rc.startNewCluster(MemberCount(), XMLConfig(defaultDedicatedClusterName, port), port) }) From dc45c7ab26cd55a6dc92f3ee424aefff7d1de0e8 Mon Sep 17 00:00:00 2001 From: kmetin Date: Mon, 18 Sep 2023 11:23:17 +0300 Subject: [PATCH 15/53] add progress --- base/commands/migration/start_stages.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index 2ed4e37d3..9f6697d52 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -103,7 +103,7 @@ func (st *StartStages) topicListener(event *hazelcast.MessagePublished) { } func (st *StartStages) startStage(ctx context.Context, ec plug.ExecContext) func(stage.Statuser) error { - return func(stage.Statuser) error { + return func(status stage.Statuser) error { cb, err := makeConfigBundle(st.configDir, st.migrationID) if err != nil { return err @@ -113,7 +113,7 @@ func (st *StartStages) startStage(ctx context.Context, ec plug.ExecContext) func } ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - if isTerminal, err := st.handleUpdateMessage(ctx, ec, <-st.updateMsgChan); isTerminal { + if isTerminal, err := st.handleUpdateMessage(ctx, ec, <-st.updateMsgChan, status); isTerminal { return err } return nil @@ -134,11 +134,11 @@ func makeConfigBundle(configDir, migrationID string) (serialization.JSON, error) } func (st *StartStages) migrateStage(ctx context.Context, ec plug.ExecContext) func(statuser stage.Statuser) error { - return func(stage.Statuser) error { + return func(status stage.Statuser) error { for { select { case msg := <-st.updateMsgChan: - if isTerminal, err := st.handleUpdateMessage(ctx, ec, msg); isTerminal { + if isTerminal, err := st.handleUpdateMessage(ctx, ec, msg, status); isTerminal { return err } case <-ctx.Done(): @@ -153,7 +153,8 @@ func (st *StartStages) migrateStage(ctx context.Context, ec plug.ExecContext) fu } } -func (st *StartStages) handleUpdateMessage(ctx context.Context, ec plug.ExecContext, msg UpdateMessage) (bool, error) { +func (st *StartStages) handleUpdateMessage(ctx context.Context, ec plug.ExecContext, msg UpdateMessage, status stage.Statuser) (bool, error) { + status.SetProgress(msg.CompletionPercentage) ec.PrintlnUnnecessary(msg.Message) if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, msg.Status) { ms, err := readMigrationStatus(ctx, st.statusMap) From 243f7b1aa770bb101bda7d82b2904d301496a824 Mon Sep 17 00:00:00 2001 From: kmetin Date: Mon, 18 Sep 2023 11:39:29 +0300 Subject: [PATCH 16/53] add flag for output dir of report file --- base/commands/migration/const.go | 2 ++ base/commands/migration/migration_start.go | 3 ++- base/commands/migration/start_stages.go | 15 ++++++++++----- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/base/commands/migration/const.go b/base/commands/migration/const.go index b1f320b4b..6c584a414 100644 --- a/base/commands/migration/const.go +++ b/base/commands/migration/const.go @@ -19,3 +19,5 @@ const ( StatusFailed Status = "FAILED" StatusInProgress Status = "IN_PROGRESS" ) + +const flagOutputDir = "output-dir" diff --git a/base/commands/migration/migration_start.go b/base/commands/migration/migration_start.go index 01c642ee6..7beea3a3d 100644 --- a/base/commands/migration/migration_start.go +++ b/base/commands/migration/migration_start.go @@ -25,6 +25,7 @@ func (StartCmd) Init(cc plug.InitContext) error { cc.SetCommandHelp(help, help) cc.SetPositionalArgCount(1, 1) cc.AddBoolFlag(clc.FlagAutoYes, "", false, false, "start the migration without confirmation") + cc.AddStringFlag(flagOutputDir, "o", "", false, "output directory for the migration report, if not given current directory is used") return nil } @@ -47,7 +48,7 @@ Selected data structures in the source cluster will be migrated to the target cl } ec.PrintlnUnnecessary("") var updateTopic *hazelcast.Topic - sts := NewStartStages(updateTopic, MakeMigrationID(), ec.Args()[0]) + sts := NewStartStages(updateTopic, MakeMigrationID(), ec.Args()[0], ec.Props().GetString(flagOutputDir)) if !sts.topicListenerID.Default() && sts.updateTopic != nil { if err := sts.updateTopic.RemoveListener(ctx, sts.topicListenerID); err != nil { return err diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index 9f6697d52..fc913ee05 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -28,20 +28,22 @@ type StartStages struct { updateTopic *hazelcast.Topic topicListenerID types.UUID updateMsgChan chan UpdateMessage + reportOutputDir string } var timeoutErr = fmt.Errorf("migration could not be completed: reached timeout while reading status: "+ "please ensure that you are using Hazelcast's migration cluster distribution and your DMT config points to that cluster: %w", context.DeadlineExceeded) -func NewStartStages(updateTopic *hazelcast.Topic, migrationID, configDir string) *StartStages { +func NewStartStages(updateTopic *hazelcast.Topic, migrationID, configDir, reportOutputDir string) *StartStages { if migrationID == "" { panic("migrationID is required") } return &StartStages{ - updateTopic: updateTopic, - migrationID: migrationID, - configDir: configDir, + updateTopic: updateTopic, + migrationID: migrationID, + configDir: configDir, + reportOutputDir: reportOutputDir, } } @@ -162,7 +164,10 @@ func (st *StartStages) handleUpdateMessage(ctx context.Context, ec plug.ExecCont return true, fmt.Errorf("reading status: %w", err) } ec.PrintlnUnnecessary(ms.Report) - name := fmt.Sprintf("migration_report_%s", st.migrationID) + var name string + if st.reportOutputDir == "" { + name = fmt.Sprintf("migration_report_%s", st.migrationID) + } if err = saveReportToFile(name, ms.Report); err != nil { return true, fmt.Errorf("writing report to file: %w", err) } From db210e3b11b2d33a69f38630c9600aa2ea9fcff6 Mon Sep 17 00:00:00 2001 From: kmetin Date: Mon, 18 Sep 2023 11:40:12 +0300 Subject: [PATCH 17/53] add flag for output dir of report file --- base/commands/migration/start_stages.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index fc913ee05..4e7f6346d 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -166,7 +166,7 @@ func (st *StartStages) handleUpdateMessage(ctx context.Context, ec plug.ExecCont ec.PrintlnUnnecessary(ms.Report) var name string if st.reportOutputDir == "" { - name = fmt.Sprintf("migration_report_%s", st.migrationID) + name = fmt.Sprintf("migration_report_%s.txt", st.migrationID) } if err = saveReportToFile(name, ms.Report); err != nil { return true, fmt.Errorf("writing report to file: %w", err) From e42e56cb6a5f33f2f545d6806ff442c4e0b45e81 Mon Sep 17 00:00:00 2001 From: kmetin Date: Mon, 18 Sep 2023 11:50:11 +0300 Subject: [PATCH 18/53] save debug logs to clc log file --- base/commands/migration/start_stages.go | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index 4e7f6346d..f885c5a0a 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -171,7 +171,7 @@ func (st *StartStages) handleUpdateMessage(ctx context.Context, ec plug.ExecCont if err = saveReportToFile(name, ms.Report); err != nil { return true, fmt.Errorf("writing report to file: %w", err) } - if err = st.saveDebugLogs(ctx, st.ci.OrderedMembers()); err != nil { + if err = st.saveDebugLogs(ctx, ec, st.ci.OrderedMembers()); err != nil { return true, fmt.Errorf("writing debug logs to file: %w", err) } ec.PrintlnUnnecessary(fmt.Sprintf("migration report saved to file: %s", name)) @@ -200,13 +200,8 @@ func saveReportToFile(fileName, report string) error { return err } -func (st *StartStages) saveDebugLogs(ctx context.Context, members []cluster.MemberInfo) error { +func (st *StartStages) saveDebugLogs(ctx context.Context, ec plug.ExecContext, members []cluster.MemberInfo) error { for _, m := range members { - f, err := os.Create(fmt.Sprintf("%s%s.log", DebugLogsListPrefix, m.UUID.String())) - if err != nil { - return err - } - defer f.Close() l, err := st.ci.Client().GetList(ctx, DebugLogsListPrefix+m.UUID.String()) if err != nil { return err @@ -215,10 +210,8 @@ func (st *StartStages) saveDebugLogs(ctx context.Context, members []cluster.Memb if err != nil { return err } - for _, log := range logs { - if _, err = fmt.Fprintf(f, log.(string)); err != nil { - return err - } + for _, l := range logs { + ec.Logger().Debugf(l.(string)) } } return nil From 45c4933728a25305a7da9b119c8a35e6015eba3e Mon Sep 17 00:00:00 2001 From: kmetin Date: Mon, 18 Sep 2023 11:57:01 +0300 Subject: [PATCH 19/53] fix tests --- .../migration/start_stages_it_test.go | 28 ++++++++----------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go index 03ea2f8cb..e0f842e3d 100644 --- a/base/commands/migration/start_stages_it_test.go +++ b/base/commands/migration/start_stages_it_test.go @@ -16,7 +16,6 @@ import ( "github.com/hazelcast/hazelcast-commandline-client/base/commands/migration" . "github.com/hazelcast/hazelcast-commandline-client/internal/check" "github.com/hazelcast/hazelcast-commandline-client/internal/it" - hz "github.com/hazelcast/hazelcast-go-client" "github.com/hazelcast/hazelcast-go-client/serialization" "github.com/stretchr/testify/require" ) @@ -48,7 +47,7 @@ func startTest_Successful(t *testing.T) { c := make(chan string, 1) go findMigrationID(ctx, tcx, c) migrationID := <-c - fileNames := successfulRunner(t, migrationID, tcx, ctx) + migrationReport := successfulRunner(migrationID, tcx, ctx) wg.Wait() tcx.AssertStdoutContains(fmt.Sprintf(` Hazelcast Data Migration Tool v5.3.0 @@ -63,14 +62,12 @@ first message second message last message status report -migration report saved to file: migration_report_%s +migration report saved to file: migration_report_%s.txt OK [3/3] Migrated the cluster. OK Migration completed successfully.`, migrationID)) tcx.WithReset(func() { - for _, n := range fileNames { - require.Equal(t, true, fileExists(n)) - } + require.Equal(t, true, fileExists(migrationReport)) }) }) } @@ -82,7 +79,7 @@ func startTest_Failure(t *testing.T) { go tcx.WithReset(func() { tcx.CLC().Execute(ctx, "start", "dmt-config", "--yes") }) - failureRunner(tcx, ctx) + migrationReport := failureRunner(tcx, ctx) tcx.AssertStdoutContains(` Hazelcast Data Migration Tool v5.3.0 (c) 2023 Hazelcast, Inc. @@ -95,6 +92,9 @@ first message OK [2/3] Started the migration. second message fail status report`) + tcx.WithReset(func() { + require.Equal(t, true, fileExists(migrationReport)) + }) }) } @@ -109,14 +109,7 @@ func fileExists(filename string) bool { return true } -func successfulRunner(t *testing.T, migrationID string, tcx it.TestContext, ctx context.Context) []string { - var fileNames []string - for _, m := range hz.NewClientInternal(tcx.Client).OrderedMembers() { - l := MustValue(tcx.Client.GetList(ctx, migration.DebugLogsListPrefix+m.UUID.String())) - require.Equal(t, true, MustValue(l.AddAll(ctx, "log1\n", "log2\n", "log3\n"))) - fileNames = append(fileNames, fmt.Sprintf("%s%s.log", migration.DebugLogsListPrefix, m.UUID.String())) - } - fileNames = append(fileNames, fmt.Sprintf("migration_report_%s", migrationID)) +func successfulRunner(migrationID string, tcx it.TestContext, ctx context.Context) string { topic := MustValue(tcx.Client.GetTopic(ctx, migration.MakeUpdateTopicName(migrationID))) msg := MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusInProgress, Message: "first message", CompletionPercentage: 10})) Must(topic.Publish(ctx, serialization.JSON(msg))) @@ -131,10 +124,10 @@ func successfulRunner(t *testing.T, migrationID string, tcx it.TestContext, ctx Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(b))) msg = MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusComplete, Message: "last message", CompletionPercentage: 100})) Must(topic.Publish(ctx, serialization.JSON(msg))) - return fileNames + return fmt.Sprintf("migration_report_%s.txt", migrationID) } -func failureRunner(tcx it.TestContext, ctx context.Context) { +func failureRunner(tcx it.TestContext, ctx context.Context) string { c := make(chan string, 1) go findMigrationID(ctx, tcx, c) migrationID := <-c @@ -150,6 +143,7 @@ func failureRunner(tcx it.TestContext, ctx context.Context) { Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(b))) msg = MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusFailed, Message: "second message", CompletionPercentage: 60})) Must(topic.Publish(ctx, serialization.JSON(msg))) + return fmt.Sprintf("migration_report_%s.txt", migrationID) } func findMigrationID(ctx context.Context, tcx it.TestContext, c chan string) { From dfaddaf6a766f7481cc44f42d5f794e5da19bdab Mon Sep 17 00:00:00 2001 From: kmetin Date: Mon, 18 Sep 2023 16:57:15 +0300 Subject: [PATCH 20/53] fix PR comment --- base/commands/migration/status_stages.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/base/commands/migration/status_stages.go b/base/commands/migration/status_stages.go index cdf7f279f..158c0c647 100644 --- a/base/commands/migration/status_stages.go +++ b/base/commands/migration/status_stages.go @@ -97,13 +97,13 @@ func (st *StatusStages) fetchStage(ctx context.Context, ec plug.ExecContext) fun for { select { case msg := <-st.updateMsgChan: - ms, err := readMigrationStatus(ctx, st.statusMap) - if err != nil { - return fmt.Errorf("reading status: %w", err) - } ec.PrintlnUnnecessary(msg.Message) ec.PrintlnUnnecessary(fmt.Sprintf("Completion Percentage: %f", msg.CompletionPercentage)) if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, msg.Status) { + ms, err := readMigrationStatus(ctx, st.statusMap) + if err != nil { + return fmt.Errorf("reading status: %w", err) + } ec.PrintlnUnnecessary(ms.Report) return nil } From f8742938d1a6d77699f9da5292c57fcde2a0d713 Mon Sep 17 00:00:00 2001 From: kmetin Date: Mon, 18 Sep 2023 17:03:37 +0300 Subject: [PATCH 21/53] delete unnecessary struct --- base/commands/migration/common.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/base/commands/migration/common.go b/base/commands/migration/common.go index 218b076e3..7a7661a02 100644 --- a/base/commands/migration/common.go +++ b/base/commands/migration/common.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "fmt" - "time" "github.com/hazelcast/hazelcast-go-client" "github.com/hazelcast/hazelcast-go-client/serialization" @@ -18,16 +17,6 @@ type MigrationStatus struct { CompletionPercentage float32 `json:"completionPercentage"` } -type Migration struct { - Name string `json:"name"` - Type string `json:"type"` - Status Status `json:"status"` - StartTimestamp time.Time `json:"startTimestamp"` - EntriesMigrated int `json:"entriesMigrated"` - TotalEntries int `json:"totalEntries"` - CompletionPercentage float64 `json:"completionPercentage"` -} - type UpdateMessage struct { Status Status `json:"status"` CompletionPercentage float32 `json:"completionPercentage"` From b5554a94e5a7009e43c09709ab8054e6342a5b02 Mon Sep 17 00:00:00 2001 From: kmetin Date: Mon, 18 Sep 2023 17:13:05 +0300 Subject: [PATCH 22/53] fix PR comments --- base/commands/migration/common.go | 11 ++++++++--- base/commands/migration/const.go | 2 ++ base/commands/migration/migration.go | 2 +- base/commands/migration/migration_start.go | 2 +- base/commands/migration/migration_status.go | 2 ++ base/commands/migration/start_stages.go | 2 ++ base/commands/migration/start_stages_it_test.go | 2 +- base/commands/migration/status_stages.go | 2 ++ base/commands/migration/status_stages_it_test.go | 2 +- base/commands/migration/utils.go | 2 ++ 10 files changed, 22 insertions(+), 7 deletions(-) diff --git a/base/commands/migration/common.go b/base/commands/migration/common.go index 7a7661a02..a29cec7e7 100644 --- a/base/commands/migration/common.go +++ b/base/commands/migration/common.go @@ -1,8 +1,11 @@ +//go:build std || migration + package migration import ( "context" "encoding/json" + "errors" "fmt" "github.com/hazelcast/hazelcast-go-client" @@ -23,13 +26,15 @@ type UpdateMessage struct { Message string `json:"message"` } +var ErrInvalidStatus = errors.New("invalid status value") + func readMigrationStatus(ctx context.Context, statusMap *hazelcast.Map) (*MigrationStatus, error) { v, err := statusMap.Get(ctx, StatusMapEntryName) if err != nil { - return nil, err + return nil, fmt.Errorf("getting status: %w", err) } if v == nil { - return nil, nil + return nil, ErrInvalidStatus } var b []byte if vv, ok := v.(string); ok { @@ -37,7 +42,7 @@ func readMigrationStatus(ctx context.Context, statusMap *hazelcast.Map) (*Migrat } else if vv, ok := v.(serialization.JSON); ok { b = vv } else { - return nil, fmt.Errorf("invalid status value") + return nil, ErrInvalidStatus } var ms MigrationStatus if err := json.Unmarshal(b, &ms); err != nil { diff --git a/base/commands/migration/const.go b/base/commands/migration/const.go index 6c584a414..44e452fed 100644 --- a/base/commands/migration/const.go +++ b/base/commands/migration/const.go @@ -1,3 +1,5 @@ +//go:build std || migration + package migration const ( diff --git a/base/commands/migration/migration.go b/base/commands/migration/migration.go index ad87d21a5..27df52448 100644 --- a/base/commands/migration/migration.go +++ b/base/commands/migration/migration.go @@ -1,4 +1,4 @@ -//go:build migration +//go:build std || migration package migration diff --git a/base/commands/migration/migration_start.go b/base/commands/migration/migration_start.go index 7beea3a3d..f1acf6e68 100644 --- a/base/commands/migration/migration_start.go +++ b/base/commands/migration/migration_start.go @@ -1,4 +1,4 @@ -//go:build migration +//go:build std || migration package migration diff --git a/base/commands/migration/migration_status.go b/base/commands/migration/migration_status.go index 026187e1a..6a29c4488 100644 --- a/base/commands/migration/migration_status.go +++ b/base/commands/migration/migration_status.go @@ -1,3 +1,5 @@ +//go:build std || migration + package migration import ( diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index f885c5a0a..e50e45a5e 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -1,3 +1,5 @@ +//go:build std || migration + package migration import ( diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go index e0f842e3d..b4c1938a1 100644 --- a/base/commands/migration/start_stages_it_test.go +++ b/base/commands/migration/start_stages_it_test.go @@ -1,4 +1,4 @@ -//go:build migration +//go:build std || migration package migration_test diff --git a/base/commands/migration/status_stages.go b/base/commands/migration/status_stages.go index 158c0c647..2a94a813d 100644 --- a/base/commands/migration/status_stages.go +++ b/base/commands/migration/status_stages.go @@ -1,3 +1,5 @@ +//go:build std || migration + package migration import ( diff --git a/base/commands/migration/status_stages_it_test.go b/base/commands/migration/status_stages_it_test.go index c49de8139..0e9317012 100644 --- a/base/commands/migration/status_stages_it_test.go +++ b/base/commands/migration/status_stages_it_test.go @@ -1,4 +1,4 @@ -//go:build migration +//go:build std || migration package migration_test diff --git a/base/commands/migration/utils.go b/base/commands/migration/utils.go index 98a084b16..e3a899d25 100644 --- a/base/commands/migration/utils.go +++ b/base/commands/migration/utils.go @@ -1,3 +1,5 @@ +//go:build std || migration + package migration import ( From e35e7b536c802111aa1b649c567e03f96540e2ef Mon Sep 17 00:00:00 2001 From: kmetin Date: Tue, 19 Sep 2023 09:39:55 +0300 Subject: [PATCH 23/53] fix PR comments --- base/commands/migration/const.go | 4 +++ base/commands/migration/migration_start.go | 2 +- base/commands/migration/migration_status.go | 6 ++-- base/commands/migration/start_stages.go | 34 ++++++++++++--------- base/commands/migration/status_stages.go | 15 ++++++--- 5 files changed, 38 insertions(+), 23 deletions(-) diff --git a/base/commands/migration/const.go b/base/commands/migration/const.go index 44e452fed..89b64881c 100644 --- a/base/commands/migration/const.go +++ b/base/commands/migration/const.go @@ -23,3 +23,7 @@ const ( ) const flagOutputDir = "output-dir" + +const banner = `Hazelcast Data Migration Tool v5.3.0 +(c) 2023 Hazelcast, Inc. +` diff --git a/base/commands/migration/migration_start.go b/base/commands/migration/migration_start.go index f1acf6e68..6b41cb44c 100644 --- a/base/commands/migration/migration_start.go +++ b/base/commands/migration/migration_start.go @@ -48,7 +48,7 @@ Selected data structures in the source cluster will be migrated to the target cl } ec.PrintlnUnnecessary("") var updateTopic *hazelcast.Topic - sts := NewStartStages(updateTopic, MakeMigrationID(), ec.Args()[0], ec.Props().GetString(flagOutputDir)) + sts := NewStartStages(ec.Logger(), updateTopic, MakeMigrationID(), ec.Args()[0], ec.Props().GetString(flagOutputDir)) if !sts.topicListenerID.Default() && sts.updateTopic != nil { if err := sts.updateTopic.RemoveListener(ctx, sts.topicListenerID); err != nil { return err diff --git a/base/commands/migration/migration_status.go b/base/commands/migration/migration_status.go index 6a29c4488..1533dfd77 100644 --- a/base/commands/migration/migration_status.go +++ b/base/commands/migration/migration_status.go @@ -25,10 +25,8 @@ func (s StatusCmd) Init(cc plug.InitContext) error { func (s StatusCmd) Exec(ctx context.Context, ec plug.ExecContext) error { ec.PrintlnUnnecessary("") - ec.PrintlnUnnecessary(`Hazelcast Data Migration Tool v5.3.0 -(c) 2023 Hazelcast, Inc. -`) - sts := NewStatusStages() + ec.PrintlnUnnecessary(banner) + sts := NewStatusStages(ec.Logger()) sp := stage.NewFixedProvider(sts.Build(ctx, ec)...) if err := stage.Execute(ctx, ec, sp); err != nil { return err diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index e50e45a5e..0faee6f61 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -11,6 +11,7 @@ import ( "time" clcerrors "github.com/hazelcast/hazelcast-commandline-client/errors" + "github.com/hazelcast/hazelcast-commandline-client/internal/log" "github.com/hazelcast/hazelcast-go-client" "github.com/hazelcast/hazelcast-go-client/cluster" "github.com/hazelcast/hazelcast-go-client/serialization" @@ -31,13 +32,14 @@ type StartStages struct { topicListenerID types.UUID updateMsgChan chan UpdateMessage reportOutputDir string + logger log.Logger } var timeoutErr = fmt.Errorf("migration could not be completed: reached timeout while reading status: "+ "please ensure that you are using Hazelcast's migration cluster distribution and your DMT config points to that cluster: %w", context.DeadlineExceeded) -func NewStartStages(updateTopic *hazelcast.Topic, migrationID, configDir, reportOutputDir string) *StartStages { +func NewStartStages(logger log.Logger, updateTopic *hazelcast.Topic, migrationID, configDir, reportOutputDir string) *StartStages { if migrationID == "" { panic("migrationID is required") } @@ -46,6 +48,7 @@ func NewStartStages(updateTopic *hazelcast.Topic, migrationID, configDir, report migrationID: migrationID, configDir: configDir, reportOutputDir: reportOutputDir, + logger: logger, } } @@ -81,19 +84,23 @@ func (st *StartStages) connectStage(ctx context.Context, ec plug.ExecContext) fu } st.startQueue, err = st.ci.Client().GetQueue(ctx, StartQueueName) if err != nil { - return err + return fmt.Errorf("retrieving the start Queue: %w", err) } st.statusMap, err = st.ci.Client().GetMap(ctx, MakeStatusMapName(st.migrationID)) if err != nil { - return err + return fmt.Errorf("retrieving the status Map: %w", err) } st.updateTopic, err = st.ci.Client().GetTopic(ctx, MakeUpdateTopicName(st.migrationID)) if err != nil { - return err + return fmt.Errorf("retrieving the update Topic: %w", err) } st.updateMsgChan = make(chan UpdateMessage) st.topicListenerID, err = st.updateTopic.AddMessageListener(ctx, st.topicListener) - return err + if err != nil { + return fmt.Errorf("adding message listener to update Topic: %w", err) + + } + return nil } } @@ -101,7 +108,7 @@ func (st *StartStages) topicListener(event *hazelcast.MessagePublished) { var u UpdateMessage err := json.Unmarshal(event.Value.(serialization.JSON), &u) if err != nil { - panic(fmt.Errorf("receiving update from migration cluster: %w", err)) + st.logger.Warn(fmt.Sprintf("receiving update from migration cluster: %s", err.Error())) } st.updateMsgChan <- u } @@ -110,10 +117,10 @@ func (st *StartStages) startStage(ctx context.Context, ec plug.ExecContext) func return func(status stage.Statuser) error { cb, err := makeConfigBundle(st.configDir, st.migrationID) if err != nil { - return err + return fmt.Errorf("making configuration bundle: %w", err) } if err = st.startQueue.Put(ctx, cb); err != nil { - return err + return fmt.Errorf("updating start Queue: %w", err) } ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() @@ -173,7 +180,7 @@ func (st *StartStages) handleUpdateMessage(ctx context.Context, ec plug.ExecCont if err = saveReportToFile(name, ms.Report); err != nil { return true, fmt.Errorf("writing report to file: %w", err) } - if err = st.saveDebugLogs(ctx, ec, st.ci.OrderedMembers()); err != nil { + if err = st.saveDebugLogs(ctx, ec, st.migrationID, st.ci.OrderedMembers()); err != nil { return true, fmt.Errorf("writing debug logs to file: %w", err) } ec.PrintlnUnnecessary(fmt.Sprintf("migration report saved to file: %s", name)) @@ -198,11 +205,10 @@ func saveReportToFile(fileName, report string) error { return err } defer f.Close() - _, err = f.WriteString(report) - return err + return os.WriteFile(fileName, []byte(report), 0600) } -func (st *StartStages) saveDebugLogs(ctx context.Context, ec plug.ExecContext, members []cluster.MemberInfo) error { +func (st *StartStages) saveDebugLogs(ctx context.Context, ec plug.ExecContext, migrationID string, members []cluster.MemberInfo) error { for _, m := range members { l, err := st.ci.Client().GetList(ctx, DebugLogsListPrefix+m.UUID.String()) if err != nil { @@ -212,8 +218,8 @@ func (st *StartStages) saveDebugLogs(ctx context.Context, ec plug.ExecContext, m if err != nil { return err } - for _, l := range logs { - ec.Logger().Debugf(l.(string)) + for _, line := range logs { + ec.Logger().Debugf(fmt.Sprintf("[%s_%s] %s", migrationID, m.UUID.String(), line.(string))) } } return nil diff --git a/base/commands/migration/status_stages.go b/base/commands/migration/status_stages.go index 2a94a813d..64e8eeea0 100644 --- a/base/commands/migration/status_stages.go +++ b/base/commands/migration/status_stages.go @@ -7,6 +7,7 @@ import ( "encoding/json" "fmt" + "github.com/hazelcast/hazelcast-commandline-client/internal/log" "golang.org/x/exp/slices" "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" @@ -22,10 +23,11 @@ type StatusStages struct { statusMap *hazelcast.Map updateTopic *hazelcast.Topic updateMsgChan chan UpdateMessage + logger log.Logger } -func NewStatusStages() *StatusStages { - return &StatusStages{} +func NewStatusStages(logger log.Logger) *StatusStages { + return &StatusStages{logger: logger} } func (st *StatusStages) Build(ctx context.Context, ec plug.ExecContext) []stage.Stage { @@ -116,9 +118,14 @@ func (st *StatusStages) fetchStage(ctx context.Context, ec plug.ExecContext) fun func (st *StatusStages) topicListener(event *hazelcast.MessagePublished) { var u UpdateMessage - err := json.Unmarshal(event.Value.(serialization.JSON), &u) + v, ok := event.Value.(serialization.JSON) + if !ok { + st.logger.Warn(fmt.Sprintf("update message type is unexpected")) + return + } + err := json.Unmarshal(v, &u) if err != nil { - panic(fmt.Errorf("receiving update from migration cluster: %w", err)) + st.logger.Warn(fmt.Sprintf("receiving update from migration cluster: %s", err.Error())) } st.updateMsgChan <- u } From 44260f5899c53403e0c8daac3a28fccd274f001b Mon Sep 17 00:00:00 2001 From: kmetin Date: Tue, 19 Sep 2023 14:19:53 +0300 Subject: [PATCH 24/53] merge --- base/commands/migration/migration_status.go | 3 +- base/commands/migration/stages.go | 184 -------------------- base/commands/migration/start_stages.go | 50 +++--- base/commands/migration/status_stages.go | 36 ++-- 4 files changed, 44 insertions(+), 229 deletions(-) delete mode 100644 base/commands/migration/stages.go diff --git a/base/commands/migration/migration_status.go b/base/commands/migration/migration_status.go index 1533dfd77..88444a89d 100644 --- a/base/commands/migration/migration_status.go +++ b/base/commands/migration/migration_status.go @@ -19,7 +19,6 @@ func (s StatusCmd) Init(cc plug.InitContext) error { cc.SetCommandGroup("migration") help := "Get status of the data migration in progress" cc.SetCommandHelp(help, help) - cc.SetPositionalArgCount(0, 0) return nil } @@ -28,7 +27,7 @@ func (s StatusCmd) Exec(ctx context.Context, ec plug.ExecContext) error { ec.PrintlnUnnecessary(banner) sts := NewStatusStages(ec.Logger()) sp := stage.NewFixedProvider(sts.Build(ctx, ec)...) - if err := stage.Execute(ctx, ec, sp); err != nil { + if _, err := stage.Execute(ctx, ec, any(nil), sp); err != nil { return err } ec.PrintlnUnnecessary("") diff --git a/base/commands/migration/stages.go b/base/commands/migration/stages.go deleted file mode 100644 index e5f196553..000000000 --- a/base/commands/migration/stages.go +++ /dev/null @@ -1,184 +0,0 @@ -package migration - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "time" - - "github.com/hazelcast/hazelcast-go-client" - "github.com/hazelcast/hazelcast-go-client/serialization" - "golang.org/x/exp/slices" - - clcerrors "github.com/hazelcast/hazelcast-commandline-client/errors" - - "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" - "github.com/hazelcast/hazelcast-commandline-client/internal/plug" -) - -type Stages struct { - migrationID string - configDir string - ci *hazelcast.ClientInternal - startQueue *hazelcast.Queue - statusMap *hazelcast.Map -} - -func NewStages(migrationID, configDir string) *Stages { - if migrationID == "" { - panic("migrationID is required") - } - return &Stages{ - migrationID: migrationID, - configDir: configDir, - } -} - -func (st *Stages) Build(ctx context.Context, ec plug.ExecContext) []stage.Stage[any] { - return []stage.Stage[any]{ - { - ProgressMsg: "Connecting to the migration cluster", - SuccessMsg: "Connected to the migration cluster", - FailureMsg: "Could not connect to the migration cluster", - Func: st.connectStage(ec), - }, - { - ProgressMsg: "Starting the migration", - SuccessMsg: "Started the migration", - FailureMsg: "Could not start the migration", - Func: st.startStage(), - }, - { - ProgressMsg: "Migrating the cluster", - SuccessMsg: "Migrated the cluster", - FailureMsg: "Could not migrate the cluster", - Func: st.migrateStage(), - }, - } -} - -func (st *Stages) connectStage(ec plug.ExecContext) func(context.Context, stage.Statuser[any]) (any, error) { - return func(ctx context.Context, status stage.Statuser[any]) (any, error) { - var err error - st.ci, err = ec.ClientInternal(ctx) - if err != nil { - return nil, err - } - st.startQueue, err = st.ci.Client().GetQueue(ctx, startQueueName) - if err != nil { - return nil, err - } - st.statusMap, err = st.ci.Client().GetMap(ctx, makeStatusMapName(st.migrationID)) - if err != nil { - return nil, err - } - return nil, nil - } -} - -func (st *Stages) startStage() func(context.Context, stage.Statuser[any]) (any, error) { - return func(ctx context.Context, status stage.Statuser[any]) (any, error) { - if err := st.statusMap.Delete(ctx, statusMapEntryName); err != nil { - return nil, err - } - var cb configBundle - cb.MigrationID = st.migrationID - if err := cb.Walk(st.configDir); err != nil { - return nil, err - } - b, err := json.Marshal(cb) - if err != nil { - return nil, err - } - if err = st.startQueue.Put(ctx, serialization.JSON(b)); err != nil { - return nil, err - } - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - if err = st.waitForStatus(ctx, time.Second, statusInProgress, statusComplete); err != nil { - return nil, err - } - return nil, nil - } -} - -func (st *Stages) migrateStage() func(context.Context, stage.Statuser[any]) (any, error) { - return func(ctx context.Context, status stage.Statuser[any]) (any, error) { - return st.waitForStatus(ctx, 5*time.Second, statusComplete), nil - } -} - -func (st *Stages) waitForStatus(ctx context.Context, waitInterval time.Duration, targetStatuses ...status) error { - timeoutErr := fmt.Errorf("migration could not be completed: reached timeout while reading status: "+ - "please ensure that you are using Hazelcast's migration cluster distribution and your DMT config points to that cluster: %w", - context.DeadlineExceeded) - for { - if err := ctx.Err(); err != nil { - if errors.Is(err, context.DeadlineExceeded) { - return timeoutErr - } - return fmt.Errorf("migration failed: %w", err) - } - s, err := st.readStatus(ctx) - if err != nil { - if errors.Is(err, context.DeadlineExceeded) { - return timeoutErr - } - return fmt.Errorf("reading status: %w", err) - } - switch s { - case statusComplete: - return nil - case statusCanceled: - return clcerrors.ErrUserCancelled - case statusFailed: - return errors.New("migration failed") - } - if slices.Contains(targetStatuses, s) { - return nil - } - time.Sleep(waitInterval) - } -} - -func (st *Stages) readStatus(ctx context.Context) (status, error) { - v, err := st.statusMap.Get(ctx, statusMapEntryName) - if err != nil { - return statusNone, err - } - if v == nil { - return statusNone, nil - } - var b []byte - if vv, ok := v.(string); ok { - b = []byte(vv) - } else if vv, ok := v.(serialization.JSON); ok { - b = vv - } else { - return statusNone, fmt.Errorf("invalid status value") - } - var ms migrationStatus - if err := json.Unmarshal(b, &ms); err != nil { - return statusNone, fmt.Errorf("unmarshaling status: %w", err) - } - return ms.Status, nil -} - -func makeStatusMapName(migrationID string) string { - return "__datamigration_" + migrationID -} - -type status string - -const ( - statusNone status = "" - statusComplete status = "COMPLETED" - statusCanceled status = "CANCELED" - statusFailed status = "FAILED" - statusInProgress status = "IN_PROGRESS" -) - -type migrationStatus struct { - Status status `json:"status"` -} diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index 0faee6f61..ac9ee82cd 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -52,55 +52,55 @@ func NewStartStages(logger log.Logger, updateTopic *hazelcast.Topic, migrationID } } -func (st *StartStages) Build(ctx context.Context, ec plug.ExecContext) []stage.Stage { - return []stage.Stage{ +func (st *StartStages) Build(ctx context.Context, ec plug.ExecContext) []stage.Stage[any] { + return []stage.Stage[any]{ { ProgressMsg: "Connecting to the migration cluster", SuccessMsg: "Connected to the migration cluster", FailureMsg: "Could not connect to the migration cluster", - Func: st.connectStage(ctx, ec), + Func: st.connectStage(ec), }, { ProgressMsg: "Starting the migration", SuccessMsg: "Started the migration", FailureMsg: "Could not start the migration", - Func: st.startStage(ctx, ec), + Func: st.startStage(ec), }, { ProgressMsg: "Migrating the cluster", SuccessMsg: "Migrated the cluster", FailureMsg: "Could not migrate the cluster", - Func: st.migrateStage(ctx, ec), + Func: st.migrateStage(ec), }, } } -func (st *StartStages) connectStage(ctx context.Context, ec plug.ExecContext) func(stage.Statuser) error { - return func(status stage.Statuser) error { +func (st *StartStages) connectStage(ec plug.ExecContext) func(context.Context, stage.Statuser[any]) (any, error) { + return func(ctx context.Context, status stage.Statuser[any]) (any, error) { var err error st.ci, err = ec.ClientInternal(ctx) if err != nil { - return err + return nil, err } st.startQueue, err = st.ci.Client().GetQueue(ctx, StartQueueName) if err != nil { - return fmt.Errorf("retrieving the start Queue: %w", err) + return nil, fmt.Errorf("retrieving the start Queue: %w", err) } st.statusMap, err = st.ci.Client().GetMap(ctx, MakeStatusMapName(st.migrationID)) if err != nil { - return fmt.Errorf("retrieving the status Map: %w", err) + return nil, fmt.Errorf("retrieving the status Map: %w", err) } st.updateTopic, err = st.ci.Client().GetTopic(ctx, MakeUpdateTopicName(st.migrationID)) if err != nil { - return fmt.Errorf("retrieving the update Topic: %w", err) + return nil, fmt.Errorf("retrieving the update Topic: %w", err) } st.updateMsgChan = make(chan UpdateMessage) st.topicListenerID, err = st.updateTopic.AddMessageListener(ctx, st.topicListener) if err != nil { - return fmt.Errorf("adding message listener to update Topic: %w", err) + return nil, fmt.Errorf("adding message listener to update Topic: %w", err) } - return nil + return nil, nil } } @@ -113,21 +113,21 @@ func (st *StartStages) topicListener(event *hazelcast.MessagePublished) { st.updateMsgChan <- u } -func (st *StartStages) startStage(ctx context.Context, ec plug.ExecContext) func(stage.Statuser) error { - return func(status stage.Statuser) error { +func (st *StartStages) startStage(ec plug.ExecContext) func(context.Context, stage.Statuser[any]) (any, error) { + return func(ctx context.Context, status stage.Statuser[any]) (any, error) { cb, err := makeConfigBundle(st.configDir, st.migrationID) if err != nil { - return fmt.Errorf("making configuration bundle: %w", err) + return nil, fmt.Errorf("making configuration bundle: %w", err) } if err = st.startQueue.Put(ctx, cb); err != nil { - return fmt.Errorf("updating start Queue: %w", err) + return nil, fmt.Errorf("updating start Queue: %w", err) } ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() if isTerminal, err := st.handleUpdateMessage(ctx, ec, <-st.updateMsgChan, status); isTerminal { - return err + return nil, err } - return nil + return nil, nil } } @@ -144,27 +144,27 @@ func makeConfigBundle(configDir, migrationID string) (serialization.JSON, error) return b, nil } -func (st *StartStages) migrateStage(ctx context.Context, ec plug.ExecContext) func(statuser stage.Statuser) error { - return func(status stage.Statuser) error { +func (st *StartStages) migrateStage(ec plug.ExecContext) func(context.Context, stage.Statuser[any]) (any, error) { + return func(ctx context.Context, status stage.Statuser[any]) (any, error) { for { select { case msg := <-st.updateMsgChan: if isTerminal, err := st.handleUpdateMessage(ctx, ec, msg, status); isTerminal { - return err + return nil, err } case <-ctx.Done(): if err := ctx.Err(); err != nil { if errors.Is(err, context.DeadlineExceeded) { - return timeoutErr + return nil, timeoutErr } - return fmt.Errorf("migration failed: %w", err) + return nil, fmt.Errorf("migration failed: %w", err) } } } } } -func (st *StartStages) handleUpdateMessage(ctx context.Context, ec plug.ExecContext, msg UpdateMessage, status stage.Statuser) (bool, error) { +func (st *StartStages) handleUpdateMessage(ctx context.Context, ec plug.ExecContext, msg UpdateMessage, status stage.Statuser[any]) (bool, error) { status.SetProgress(msg.CompletionPercentage) ec.PrintlnUnnecessary(msg.Message) if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, msg.Status) { diff --git a/base/commands/migration/status_stages.go b/base/commands/migration/status_stages.go index 64e8eeea0..48fe737c0 100644 --- a/base/commands/migration/status_stages.go +++ b/base/commands/migration/status_stages.go @@ -30,13 +30,13 @@ func NewStatusStages(logger log.Logger) *StatusStages { return &StatusStages{logger: logger} } -func (st *StatusStages) Build(ctx context.Context, ec plug.ExecContext) []stage.Stage { - return []stage.Stage{ +func (st *StatusStages) Build(ctx context.Context, ec plug.ExecContext) []stage.Stage[any] { + return []stage.Stage[any]{ { ProgressMsg: "Connecting to the migration cluster", SuccessMsg: "Connected to the migration cluster", FailureMsg: "Could not connect to the migration cluster", - Func: st.connectStage(ctx, ec), + Func: st.connectStage(ec), }, { ProgressMsg: "Fetching migration status", @@ -51,49 +51,49 @@ type MigrationInProgress struct { MigrationID string `json:"migrationId"` } -func (st *StatusStages) connectStage(ctx context.Context, ec plug.ExecContext) func(statuser stage.Statuser) error { - return func(status stage.Statuser) error { +func (st *StatusStages) connectStage(ec plug.ExecContext) func(context.Context, stage.Statuser[any]) (any, error) { + return func(ctx context.Context, status stage.Statuser[any]) (any, error) { var err error st.ci, err = ec.ClientInternal(ctx) if err != nil { - return err + return nil, err } st.migrationsInProgressList, err = st.ci.Client().GetList(ctx, MigrationsInProgressList) if err != nil { - return err + return nil, err } all, err := st.migrationsInProgressList.GetAll(ctx) if err != nil { - return err + return nil, err } if len(all) == 0 { - return fmt.Errorf("there are no migrations are in progress on migration cluster") + return nil, fmt.Errorf("there are no migrations are in progress on migration cluster") } var mip MigrationInProgress m := all[0].(serialization.JSON) err = json.Unmarshal(m, &mip) if err != nil { - return fmt.Errorf("parsing migration in progress: %w", err) + return nil, fmt.Errorf("parsing migration in progress: %w", err) } st.migrationID = mip.MigrationID st.statusMap, err = st.ci.Client().GetMap(ctx, MakeStatusMapName(st.migrationID)) if err != nil { - return err + return nil, err } st.updateTopic, err = st.ci.Client().GetTopic(ctx, MakeUpdateTopicName(st.migrationID)) - return err + return nil, err } } -func (st *StatusStages) fetchStage(ctx context.Context, ec plug.ExecContext) func(statuser stage.Statuser) error { - return func(stage.Statuser) error { +func (st *StatusStages) fetchStage(ctx context.Context, ec plug.ExecContext) func(context.Context, stage.Statuser[any]) (any, error) { + return func(context.Context, stage.Statuser[any]) (any, error) { ms, err := readMigrationStatus(ctx, st.statusMap) if err != nil { - return fmt.Errorf("reading status: %w", err) + return nil, fmt.Errorf("reading status: %w", err) } if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, ms.Status) { ec.PrintlnUnnecessary(ms.Report) - return nil + return nil, nil } st.updateMsgChan = make(chan UpdateMessage) id, err := st.updateTopic.AddMessageListener(ctx, st.topicListener) @@ -106,10 +106,10 @@ func (st *StatusStages) fetchStage(ctx context.Context, ec plug.ExecContext) fun if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, msg.Status) { ms, err := readMigrationStatus(ctx, st.statusMap) if err != nil { - return fmt.Errorf("reading status: %w", err) + return nil, fmt.Errorf("reading status: %w", err) } ec.PrintlnUnnecessary(ms.Report) - return nil + return nil, nil } } } From ae6c008b1ab07278effa9e7196a068c429a04065 Mon Sep 17 00:00:00 2001 From: kmetin Date: Tue, 19 Sep 2023 17:05:09 +0300 Subject: [PATCH 25/53] set status --- base/commands/migration/status_stages.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/base/commands/migration/status_stages.go b/base/commands/migration/status_stages.go index 48fe737c0..b6d5b1f74 100644 --- a/base/commands/migration/status_stages.go +++ b/base/commands/migration/status_stages.go @@ -42,7 +42,7 @@ func (st *StatusStages) Build(ctx context.Context, ec plug.ExecContext) []stage. ProgressMsg: "Fetching migration status", SuccessMsg: "Fetched migration status", FailureMsg: "Could not fetch migration status", - Func: st.fetchStage(ctx, ec), + Func: st.fetchStage(ec), }, } } @@ -85,8 +85,8 @@ func (st *StatusStages) connectStage(ec plug.ExecContext) func(context.Context, } } -func (st *StatusStages) fetchStage(ctx context.Context, ec plug.ExecContext) func(context.Context, stage.Statuser[any]) (any, error) { - return func(context.Context, stage.Statuser[any]) (any, error) { +func (st *StatusStages) fetchStage(ec plug.ExecContext) func(context.Context, stage.Statuser[any]) (any, error) { + return func(ctx context.Context, status stage.Statuser[any]) (any, error) { ms, err := readMigrationStatus(ctx, st.statusMap) if err != nil { return nil, fmt.Errorf("reading status: %w", err) @@ -102,7 +102,7 @@ func (st *StatusStages) fetchStage(ctx context.Context, ec plug.ExecContext) fun select { case msg := <-st.updateMsgChan: ec.PrintlnUnnecessary(msg.Message) - ec.PrintlnUnnecessary(fmt.Sprintf("Completion Percentage: %f", msg.CompletionPercentage)) + status.SetProgress(msg.CompletionPercentage) if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, msg.Status) { ms, err := readMigrationStatus(ctx, st.statusMap) if err != nil { From 476be4e18f6ba53a59214b8b0cc6be359b58dd27 Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 21 Sep 2023 14:25:03 +0300 Subject: [PATCH 26/53] query with sql --- base/commands/migration/common.go | 32 +-- base/commands/migration/const.go | 12 +- base/commands/migration/migration_stages.go | 184 ++++++++++++++++++ base/commands/migration/migration_start.go | 19 +- base/commands/migration/start_stages.go | 155 ++------------- .../migration/start_stages_it_test.go | 139 +++++-------- base/commands/migration/status_stages.go | 84 ++++---- .../migration/status_stages_it_test.go | 114 ----------- .../start/migration_success_completed.json | 69 +++++++ .../start/migration_success_failure.json | 70 +++++++ .../start/migration_success_initial.json | 70 +++++++ base/commands/migration/utils.go | 4 - 12 files changed, 533 insertions(+), 419 deletions(-) create mode 100644 base/commands/migration/migration_stages.go create mode 100644 base/commands/migration/testdata/start/migration_success_completed.json create mode 100644 base/commands/migration/testdata/start/migration_success_failure.json create mode 100644 base/commands/migration/testdata/start/migration_success_initial.json diff --git a/base/commands/migration/common.go b/base/commands/migration/common.go index a29cec7e7..2c564a301 100644 --- a/base/commands/migration/common.go +++ b/base/commands/migration/common.go @@ -12,24 +12,32 @@ import ( "github.com/hazelcast/hazelcast-go-client/serialization" ) -type MigrationStatus struct { - Status Status `json:"status"` - Logs []string `json:"logs"` - Errors []string `json:"errors"` - Report string `json:"report"` - CompletionPercentage float32 `json:"completionPercentage"` +type MigrationStatusTotal struct { + Status Status `json:"status"` + Logs []string `json:"logs"` + Errors []string `json:"errors"` + Report string `json:"report"` + CompletionPercentage float32 `json:"completionPercentage"` + Migrations []MigrationStatusRow `json:"migrations"` } -type UpdateMessage struct { +type DataStructureInfo struct { + Name string + Type string +} + +type MigrationStatusRow struct { + Name string `json:"name"` + Type string `json:"type"` Status Status `json:"status"` - CompletionPercentage float32 `json:"completionPercentage"` - Message string `json:"message"` + CompletionPercentage float32 `json:"completion_percentage"` + Error string `json:"error"` } var ErrInvalidStatus = errors.New("invalid status value") -func readMigrationStatus(ctx context.Context, statusMap *hazelcast.Map) (*MigrationStatus, error) { - v, err := statusMap.Get(ctx, StatusMapEntryName) +func readMigrationStatus(ctx context.Context, statusMap *hazelcast.Map, migrationID string) (*MigrationStatusTotal, error) { + v, err := statusMap.Get(ctx, migrationID) //TODO: read only status with sql if err != nil { return nil, fmt.Errorf("getting status: %w", err) } @@ -44,7 +52,7 @@ func readMigrationStatus(ctx context.Context, statusMap *hazelcast.Map) (*Migrat } else { return nil, ErrInvalidStatus } - var ms MigrationStatus + var ms MigrationStatusTotal if err := json.Unmarshal(b, &ms); err != nil { return nil, fmt.Errorf("parsing migration status: %w", err) } diff --git a/base/commands/migration/const.go b/base/commands/migration/const.go index cf07e60ae..5d9f684df 100644 --- a/base/commands/migration/const.go +++ b/base/commands/migration/const.go @@ -5,21 +5,21 @@ package migration const ( StartQueueName = "__datamigration_start_queue" StatusMapEntryName = "status" - StatusMapPrefix = "__datamigration_" + StatusMapName = "__datamigration_migrations" UpdateTopicPrefix = "__datamigration_updates_" DebugLogsListPrefix = "__datamigration_debug_logs_" MigrationsInProgressList = "__datamigrations_in_progress" - startQueueName = "__datamigration_start_queue" - statusMapEntryName = "status" - argDMTConfig = "dmtConfig" - argTitleDMTConfig = "DMT configuration" + startQueueName = "__datamigration_start_queue" + statusMapEntryName = "status" + argDMTConfig = "dmtConfig" + argTitleDMTConfig = "DMT configuration" ) type Status string const ( StatusStarted Status = "STARTED" - Canceling Status = "CANCELING" + StatusCanceling Status = "CANCELING" StatusComplete Status = "COMPLETED" StatusCanceled Status = "CANCELED" StatusFailed Status = "FAILED" diff --git a/base/commands/migration/migration_stages.go b/base/commands/migration/migration_stages.go new file mode 100644 index 000000000..ee7633bfe --- /dev/null +++ b/base/commands/migration/migration_stages.go @@ -0,0 +1,184 @@ +//go:build std || migration + +package migration + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + + "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" + errors2 "github.com/hazelcast/hazelcast-commandline-client/errors" + "github.com/hazelcast/hazelcast-commandline-client/internal/plug" + "github.com/hazelcast/hazelcast-go-client" + "github.com/hazelcast/hazelcast-go-client/serialization" + "golang.org/x/exp/slices" +) + +func migrationStages(ctx context.Context, ec plug.ExecContext, migrationID, reportOutputDir string, statusMap *hazelcast.Map) ([]stage.Stage[any], error) { + ci, err := ec.ClientInternal(ctx) + if err != nil { + return nil, err + } + if err = waitForMigrationToBeCreated(ctx, ci, migrationID); err != nil { + return nil, err + } + var stages []stage.Stage[any] + dss, err := dataStructuresToBeMigrated(ctx, ec, migrationID) + if err != nil { + return nil, err + } + for i, d := range dss { + i := i + stages = append(stages, stage.Stage[any]{ + ProgressMsg: fmt.Sprintf("Migrating %s: %s", d.Type, d.Name), + SuccessMsg: fmt.Sprintf("Migrated %s: %s ...", d.Type, d.Name), + FailureMsg: fmt.Sprintf("Failed migrating %s: %s ...", d.Type, d.Name), + Func: func(ct context.Context, status stage.Statuser[any]) (any, error) { + for { + if ctx.Err() != nil { + return nil, err + } + generalStatus, err := readMigrationStatus(ctx, statusMap, migrationID) + if err != nil { + return nil, err + } + if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, generalStatus.Status) { + err = saveMemberLogs(ctx, ec, ci, migrationID) + if err != nil { + return nil, err + } + var name string + if reportOutputDir == "" { + name = fmt.Sprintf("migration_report_%s.txt", migrationID) + } + err = saveReportToFile(name, generalStatus.Report) + if err != nil { + return nil, err + } + } + switch generalStatus.Status { + case StatusComplete: + return nil, nil + case StatusFailed: + return nil, errors.New(generalStatus.Errors[0]) //TODO + case StatusCanceled, StatusCanceling: + return nil, errors2.ErrUserCancelled + } + q := fmt.Sprintf(`SELECT JSON_QUERY(this, '$.migrations[%d]') FROM %s WHERE __key= '%s'`, i, StatusMapName, migrationID) + res, err := ci.Client().SQL().Execute(ctx, q) + if err != nil { + return nil, err + } + iter, err := res.Iterator() + if err != nil { + return nil, err + } + if iter.HasNext() { + row, err := iter.Next() + if err != nil { + return nil, err + } + rowStr, err := row.Get(0) + if err != nil { + return nil, err + } + var m MigrationStatusRow + if err = json.Unmarshal(rowStr.(serialization.JSON), &m); err != nil { + return nil, err + } + status.SetProgress(m.CompletionPercentage) + switch m.Status { + case StatusComplete: + return nil, nil + case StatusFailed: + return nil, stage.IgnoreError(errors.New(m.Error)) + case StatusCanceled: + return nil, errors2.ErrUserCancelled + } + } + } + }, + }) + } + return stages, nil +} + +func dataStructuresToBeMigrated(ctx context.Context, ec plug.ExecContext, migrationID string) ([]DataStructureInfo, error) { + var dss []DataStructureInfo + ci, err := ec.ClientInternal(ctx) + if err != nil { + return nil, err + } + q := fmt.Sprintf(`SELECT this FROM %s WHERE __key= '%s'`, StatusMapName, migrationID) + res, err := ci.Client().SQL().Execute(ctx, q) + if err != nil { + return nil, err + } + it, err := res.Iterator() + if err != nil { + return nil, err + } + if it.HasNext() { + row, err := it.Next() + if err != nil { + return nil, err + } + r, err := row.Get(0) + var status MigrationStatusTotal + if err = json.Unmarshal(r.(serialization.JSON), &status); err != nil { + return nil, err + } + for _, m := range status.Migrations { + dss = append(dss, DataStructureInfo{ + Name: m.Name, + Type: m.Type, + }) + } + } + return dss, nil +} + +func saveMemberLogs(ctx context.Context, ec plug.ExecContext, ci *hazelcast.ClientInternal, migrationID string) error { + for _, m := range ci.OrderedMembers() { + l, err := ci.Client().GetList(ctx, DebugLogsListPrefix+m.UUID.String()) + if err != nil { + return err + } + logs, err := l.GetAll(ctx) + if err != nil { + return err + } + for _, line := range logs { + ec.Logger().Debugf(fmt.Sprintf("[%s_%s] %s", migrationID, m.UUID.String(), line.(string))) + } + } + return nil +} + +func saveReportToFile(fileName, report string) error { + f, err := os.Create(fmt.Sprintf(fileName)) + if err != nil { + return err + } + defer f.Close() + return os.WriteFile(fileName, []byte(report), 0600) +} + +func waitForMigrationToBeCreated(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) error { + for { + statusMap, err := ci.Client().GetMap(ctx, StatusMapName) + if err != nil { + return err + } + ok, err := statusMap.ContainsKey(ctx, migrationID) + if err != nil { + return err + } + if ok { + return nil + } + } +} diff --git a/base/commands/migration/migration_start.go b/base/commands/migration/migration_start.go index 367d4632d..b0e22f256 100644 --- a/base/commands/migration/migration_start.go +++ b/base/commands/migration/migration_start.go @@ -11,7 +11,6 @@ import ( "github.com/hazelcast/hazelcast-commandline-client/internal/check" "github.com/hazelcast/hazelcast-commandline-client/internal/plug" "github.com/hazelcast/hazelcast-commandline-client/internal/prompt" - "github.com/hazelcast/hazelcast-go-client" ) type StartCmd struct{} @@ -47,17 +46,21 @@ Selected data structures in the source cluster will be migrated to the target cl } } ec.PrintlnUnnecessary("") - var updateTopic *hazelcast.Topic - sts := NewStartStages(ec.Logger(), updateTopic, MakeMigrationID(), ec.GetStringArg(argDMTConfig), ec.Props().GetString(flagOutputDir)) - if !sts.topicListenerID.Default() && sts.updateTopic != nil { - if err := sts.updateTopic.RemoveListener(ctx, sts.topicListenerID); err != nil { - return err - } - } + mID := MakeMigrationID() + sts := NewStartStages(ec.Logger(), mID, ec.GetStringArg(argDMTConfig)) sp := stage.NewFixedProvider(sts.Build(ctx, ec)...) if _, err := stage.Execute(ctx, ec, any(nil), sp); err != nil { return err } + mStages, err := migrationStages(ctx, ec, mID, ec.Props().GetString(flagOutputDir), sts.statusMap) + if err != nil { + return err + } + mp := stage.NewFixedProvider(mStages...) + if _, err := stage.Execute(ctx, ec, any(nil), mp); err != nil { + return err + } + ec.PrintlnUnnecessary("") ec.PrintlnUnnecessary("OK Migration completed successfully.") return nil diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index ac9ee82cd..13133edde 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -5,50 +5,36 @@ package migration import ( "context" "encoding/json" - "errors" "fmt" - "os" - "time" - clcerrors "github.com/hazelcast/hazelcast-commandline-client/errors" + "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" "github.com/hazelcast/hazelcast-commandline-client/internal/log" + "github.com/hazelcast/hazelcast-commandline-client/internal/plug" "github.com/hazelcast/hazelcast-go-client" - "github.com/hazelcast/hazelcast-go-client/cluster" "github.com/hazelcast/hazelcast-go-client/serialization" - "github.com/hazelcast/hazelcast-go-client/types" - "golang.org/x/exp/slices" - - "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" - "github.com/hazelcast/hazelcast-commandline-client/internal/plug" ) type StartStages struct { - migrationID string - configDir string - ci *hazelcast.ClientInternal - startQueue *hazelcast.Queue - statusMap *hazelcast.Map - updateTopic *hazelcast.Topic - topicListenerID types.UUID - updateMsgChan chan UpdateMessage - reportOutputDir string - logger log.Logger + migrationID string + configDir string + ci *hazelcast.ClientInternal + startQueue *hazelcast.Queue + statusMap *hazelcast.Map + logger log.Logger } var timeoutErr = fmt.Errorf("migration could not be completed: reached timeout while reading status: "+ "please ensure that you are using Hazelcast's migration cluster distribution and your DMT config points to that cluster: %w", context.DeadlineExceeded) -func NewStartStages(logger log.Logger, updateTopic *hazelcast.Topic, migrationID, configDir, reportOutputDir string) *StartStages { +func NewStartStages(logger log.Logger, migrationID, configDir string) *StartStages { if migrationID == "" { panic("migrationID is required") } return &StartStages{ - updateTopic: updateTopic, - migrationID: migrationID, - configDir: configDir, - reportOutputDir: reportOutputDir, - logger: logger, + migrationID: migrationID, + configDir: configDir, + logger: logger, } } @@ -64,13 +50,7 @@ func (st *StartStages) Build(ctx context.Context, ec plug.ExecContext) []stage.S ProgressMsg: "Starting the migration", SuccessMsg: "Started the migration", FailureMsg: "Could not start the migration", - Func: st.startStage(ec), - }, - { - ProgressMsg: "Migrating the cluster", - SuccessMsg: "Migrated the cluster", - FailureMsg: "Could not migrate the cluster", - Func: st.migrateStage(ec), + Func: st.startStage(), }, } } @@ -86,34 +66,15 @@ func (st *StartStages) connectStage(ec plug.ExecContext) func(context.Context, s if err != nil { return nil, fmt.Errorf("retrieving the start Queue: %w", err) } - st.statusMap, err = st.ci.Client().GetMap(ctx, MakeStatusMapName(st.migrationID)) + st.statusMap, err = st.ci.Client().GetMap(ctx, StatusMapName) if err != nil { return nil, fmt.Errorf("retrieving the status Map: %w", err) } - st.updateTopic, err = st.ci.Client().GetTopic(ctx, MakeUpdateTopicName(st.migrationID)) - if err != nil { - return nil, fmt.Errorf("retrieving the update Topic: %w", err) - } - st.updateMsgChan = make(chan UpdateMessage) - st.topicListenerID, err = st.updateTopic.AddMessageListener(ctx, st.topicListener) - if err != nil { - return nil, fmt.Errorf("adding message listener to update Topic: %w", err) - - } return nil, nil } } -func (st *StartStages) topicListener(event *hazelcast.MessagePublished) { - var u UpdateMessage - err := json.Unmarshal(event.Value.(serialization.JSON), &u) - if err != nil { - st.logger.Warn(fmt.Sprintf("receiving update from migration cluster: %s", err.Error())) - } - st.updateMsgChan <- u -} - -func (st *StartStages) startStage(ec plug.ExecContext) func(context.Context, stage.Statuser[any]) (any, error) { +func (st *StartStages) startStage() func(context.Context, stage.Statuser[any]) (any, error) { return func(ctx context.Context, status stage.Statuser[any]) (any, error) { cb, err := makeConfigBundle(st.configDir, st.migrationID) if err != nil { @@ -122,11 +83,6 @@ func (st *StartStages) startStage(ec plug.ExecContext) func(context.Context, sta if err = st.startQueue.Put(ctx, cb); err != nil { return nil, fmt.Errorf("updating start Queue: %w", err) } - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - if isTerminal, err := st.handleUpdateMessage(ctx, ec, <-st.updateMsgChan, status); isTerminal { - return nil, err - } return nil, nil } } @@ -143,84 +99,3 @@ func makeConfigBundle(configDir, migrationID string) (serialization.JSON, error) } return b, nil } - -func (st *StartStages) migrateStage(ec plug.ExecContext) func(context.Context, stage.Statuser[any]) (any, error) { - return func(ctx context.Context, status stage.Statuser[any]) (any, error) { - for { - select { - case msg := <-st.updateMsgChan: - if isTerminal, err := st.handleUpdateMessage(ctx, ec, msg, status); isTerminal { - return nil, err - } - case <-ctx.Done(): - if err := ctx.Err(); err != nil { - if errors.Is(err, context.DeadlineExceeded) { - return nil, timeoutErr - } - return nil, fmt.Errorf("migration failed: %w", err) - } - } - } - } -} - -func (st *StartStages) handleUpdateMessage(ctx context.Context, ec plug.ExecContext, msg UpdateMessage, status stage.Statuser[any]) (bool, error) { - status.SetProgress(msg.CompletionPercentage) - ec.PrintlnUnnecessary(msg.Message) - if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, msg.Status) { - ms, err := readMigrationStatus(ctx, st.statusMap) - if err != nil { - return true, fmt.Errorf("reading status: %w", err) - } - ec.PrintlnUnnecessary(ms.Report) - var name string - if st.reportOutputDir == "" { - name = fmt.Sprintf("migration_report_%s.txt", st.migrationID) - } - if err = saveReportToFile(name, ms.Report); err != nil { - return true, fmt.Errorf("writing report to file: %w", err) - } - if err = st.saveDebugLogs(ctx, ec, st.migrationID, st.ci.OrderedMembers()); err != nil { - return true, fmt.Errorf("writing debug logs to file: %w", err) - } - ec.PrintlnUnnecessary(fmt.Sprintf("migration report saved to file: %s", name)) - for _, l := range ms.Logs { - ec.Logger().Info(l) - } - switch ms.Status { - case StatusComplete: - return true, nil - case StatusCanceled: - return true, clcerrors.ErrUserCancelled - case StatusFailed: - return true, fmt.Errorf("migration failed") - } - } - return false, nil -} - -func saveReportToFile(fileName, report string) error { - f, err := os.Create(fmt.Sprintf(fileName)) - if err != nil { - return err - } - defer f.Close() - return os.WriteFile(fileName, []byte(report), 0600) -} - -func (st *StartStages) saveDebugLogs(ctx context.Context, ec plug.ExecContext, migrationID string, members []cluster.MemberInfo) error { - for _, m := range members { - l, err := st.ci.Client().GetList(ctx, DebugLogsListPrefix+m.UUID.String()) - if err != nil { - return err - } - logs, err := l.GetAll(ctx) - if err != nil { - return err - } - for _, line := range logs { - ec.Logger().Debugf(fmt.Sprintf("[%s_%s] %s", migrationID, m.UUID.String(), line.(string))) - } - } - return nil -} diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go index b4c1938a1..4514dddfa 100644 --- a/base/commands/migration/start_stages_it_test.go +++ b/base/commands/migration/start_stages_it_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestMigration(t *testing.T) { +func TestMigrationStages(t *testing.T) { testCases := []struct { name string f func(t *testing.T) @@ -34,6 +34,14 @@ func TestMigration(t *testing.T) { } func startTest_Successful(t *testing.T) { + startTest(t, successfulRunner, "OK Migration completed successfully.") +} + +func startTest_Failure(t *testing.T) { + startTest(t, failureRunner, "ERROR Failed migrating IMAP: imap5 ...: some error") +} + +func startTest(t *testing.T, runnerFunc func(context.Context, it.TestContext, string, *sync.WaitGroup), expectedOutput string) { tcx := it.TestContext{T: t} ctx := context.Background() tcx.Tester(func(tcx it.TestContext) { @@ -41,109 +49,45 @@ func startTest_Successful(t *testing.T) { wg.Add(1) go tcx.WithReset(func() { defer wg.Done() - Must(tcx.CLC().Execute(ctx, "start", "dmt-config", "--yes")) + tcx.CLC().Execute(ctx, "start", "dmt-config", "--yes") }) - c := make(chan string, 1) + wg.Add(1) go findMigrationID(ctx, tcx, c) - migrationID := <-c - migrationReport := successfulRunner(migrationID, tcx, ctx) + mID := <-c + wg.Done() + wg.Add(1) + go runnerFunc(ctx, tcx, mID, &wg) wg.Wait() - tcx.AssertStdoutContains(fmt.Sprintf(` -Hazelcast Data Migration Tool v5.3.0 -(c) 2023 Hazelcast, Inc. - -Selected data structures in the source cluster will be migrated to the target cluster. - - - OK [1/3] Connected to the migration cluster. -first message - OK [2/3] Started the migration. -second message -last message -status report -migration report saved to file: migration_report_%s.txt - OK [3/3] Migrated the cluster. - - OK Migration completed successfully.`, migrationID)) - tcx.WithReset(func() { - require.Equal(t, true, fileExists(migrationReport)) - }) - }) -} - -func startTest_Failure(t *testing.T) { - tcx := it.TestContext{T: t} - ctx := context.Background() - tcx.Tester(func(tcx it.TestContext) { - go tcx.WithReset(func() { - tcx.CLC().Execute(ctx, "start", "dmt-config", "--yes") - }) - migrationReport := failureRunner(tcx, ctx) - tcx.AssertStdoutContains(` -Hazelcast Data Migration Tool v5.3.0 -(c) 2023 Hazelcast, Inc. - -Selected data structures in the source cluster will be migrated to the target cluster. - - - OK [1/3] Connected to the migration cluster. -first message - OK [2/3] Started the migration. -second message -fail status report`) + tcx.AssertStdoutContains(expectedOutput) tcx.WithReset(func() { - require.Equal(t, true, fileExists(migrationReport)) + f := fmt.Sprintf("migration_report_%s.txt", mID) + require.Equal(t, true, fileExists(f)) + Must(os.Remove(f)) }) }) } -func fileExists(filename string) bool { - a := MustValue(os.Getwd()) - fmt.Println(a) - _, err := os.Stat(filename) - if os.IsNotExist(err) { - return false - } - Must(os.Remove(filename)) - return true +func successfulRunner(ctx context.Context, tcx it.TestContext, migrationID string, wg *sync.WaitGroup) { + mSQL := fmt.Sprintf(`CREATE MAPPING IF NOT EXISTS %s TYPE IMap OPTIONS('keyFormat'='varchar', 'valueFormat'='json')`, migration.StatusMapName) + MustValue(tcx.Client.SQL().Execute(ctx, mSQL)) + statusMap := MustValue(tcx.Client.GetMap(ctx, migration.StatusMapName)) + b := MustValue(os.ReadFile("testdata/start/migration_success_initial.json")) + MustValue(statusMap.Put(ctx, migrationID, serialization.JSON(b))) + b = MustValue(os.ReadFile("testdata/start/migration_success_completed.json")) + MustValue(statusMap.Put(ctx, migrationID, serialization.JSON(b))) + wg.Done() } -func successfulRunner(migrationID string, tcx it.TestContext, ctx context.Context) string { - topic := MustValue(tcx.Client.GetTopic(ctx, migration.MakeUpdateTopicName(migrationID))) - msg := MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusInProgress, Message: "first message", CompletionPercentage: 10})) - Must(topic.Publish(ctx, serialization.JSON(msg))) - msg = MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusInProgress, Message: "second message", CompletionPercentage: 20})) - Must(topic.Publish(ctx, serialization.JSON(msg))) - statusMap := MustValue(tcx.Client.GetMap(ctx, migration.MakeStatusMapName(migrationID))) - b := MustValue(json.Marshal(migration.MigrationStatus{ - Status: migration.StatusComplete, - Report: "status report", - Logs: []string{"log1", "log2"}, - })) - Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(b))) - msg = MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusComplete, Message: "last message", CompletionPercentage: 100})) - Must(topic.Publish(ctx, serialization.JSON(msg))) - return fmt.Sprintf("migration_report_%s.txt", migrationID) -} - -func failureRunner(tcx it.TestContext, ctx context.Context) string { - c := make(chan string, 1) - go findMigrationID(ctx, tcx, c) - migrationID := <-c - topic := MustValue(tcx.Client.GetTopic(ctx, migration.MakeUpdateTopicName(migrationID))) - msg := MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusInProgress, Message: "first message", CompletionPercentage: 20})) - Must(topic.Publish(ctx, serialization.JSON(msg))) - statusMap := MustValue(tcx.Client.GetMap(ctx, migration.MakeStatusMapName(migrationID))) - b := MustValue(json.Marshal(migration.MigrationStatus{ - Status: migration.StatusFailed, - Report: "fail status report", - Errors: []string{"error1", "error2"}, - })) - Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(b))) - msg = MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusFailed, Message: "second message", CompletionPercentage: 60})) - Must(topic.Publish(ctx, serialization.JSON(msg))) - return fmt.Sprintf("migration_report_%s.txt", migrationID) +func failureRunner(ctx context.Context, tcx it.TestContext, migrationID string, wg *sync.WaitGroup) { + mSQL := fmt.Sprintf(`CREATE MAPPING IF NOT EXISTS %s TYPE IMap OPTIONS('keyFormat'='varchar', 'valueFormat'='json')`, migration.StatusMapName) + MustValue(tcx.Client.SQL().Execute(ctx, mSQL)) + statusMap := MustValue(tcx.Client.GetMap(ctx, migration.StatusMapName)) + b := MustValue(os.ReadFile("testdata/start/migration_success_initial.json")) + MustValue(statusMap.Put(ctx, migrationID, serialization.JSON(b))) + b = MustValue(os.ReadFile("testdata/start/migration_success_failure.json")) + MustValue(statusMap.Put(ctx, migrationID, serialization.JSON(b))) + wg.Done() } func findMigrationID(ctx context.Context, tcx it.TestContext, c chan string) { @@ -158,3 +102,12 @@ func findMigrationID(ctx context.Context, tcx it.TestContext, c chan string) { } } } + +func fileExists(filename string) bool { + MustValue(os.Getwd()) + _, err := os.Stat(filename) + if os.IsNotExist(err) { + return false + } + return true +} diff --git a/base/commands/migration/status_stages.go b/base/commands/migration/status_stages.go index b6d5b1f74..a4f1a41fa 100644 --- a/base/commands/migration/status_stages.go +++ b/base/commands/migration/status_stages.go @@ -7,10 +7,8 @@ import ( "encoding/json" "fmt" - "github.com/hazelcast/hazelcast-commandline-client/internal/log" - "golang.org/x/exp/slices" - "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" + "github.com/hazelcast/hazelcast-commandline-client/internal/log" "github.com/hazelcast/hazelcast-commandline-client/internal/plug" "github.com/hazelcast/hazelcast-go-client" "github.com/hazelcast/hazelcast-go-client/serialization" @@ -22,8 +20,8 @@ type StatusStages struct { migrationsInProgressList *hazelcast.List statusMap *hazelcast.Map updateTopic *hazelcast.Topic - updateMsgChan chan UpdateMessage - logger log.Logger + // updateMsgChan chan UpdateMessage + logger log.Logger } func NewStatusStages(logger log.Logger) *StatusStages { @@ -76,7 +74,7 @@ func (st *StatusStages) connectStage(ec plug.ExecContext) func(context.Context, return nil, fmt.Errorf("parsing migration in progress: %w", err) } st.migrationID = mip.MigrationID - st.statusMap, err = st.ci.Client().GetMap(ctx, MakeStatusMapName(st.migrationID)) + // st.statusMap, err = st.ci.Client().GetMap(ctx, MakeStatusMapName(st.migrationID)) if err != nil { return nil, err } @@ -87,45 +85,47 @@ func (st *StatusStages) connectStage(ec plug.ExecContext) func(context.Context, func (st *StatusStages) fetchStage(ec plug.ExecContext) func(context.Context, stage.Statuser[any]) (any, error) { return func(ctx context.Context, status stage.Statuser[any]) (any, error) { - ms, err := readMigrationStatus(ctx, st.statusMap) - if err != nil { - return nil, fmt.Errorf("reading status: %w", err) - } - if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, ms.Status) { - ec.PrintlnUnnecessary(ms.Report) - return nil, nil - } - st.updateMsgChan = make(chan UpdateMessage) - id, err := st.updateTopic.AddMessageListener(ctx, st.topicListener) - defer st.updateTopic.RemoveListener(ctx, id) - for { - select { - case msg := <-st.updateMsgChan: - ec.PrintlnUnnecessary(msg.Message) - status.SetProgress(msg.CompletionPercentage) - if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, msg.Status) { - ms, err := readMigrationStatus(ctx, st.statusMap) - if err != nil { - return nil, fmt.Errorf("reading status: %w", err) - } - ec.PrintlnUnnecessary(ms.Report) - return nil, nil - } + return nil, nil + /* + ms, err := readMigrationStatus(ctx, st.statusMap) + if err != nil { + return nil, fmt.Errorf("reading status: %w", err) } - } + if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, ms.Status) { + ec.PrintlnUnnecessary(ms.Report) + return nil, nil + } + // st.updateMsgChan = make(chan UpdateMessage) + id, err := st.updateTopic.AddMessageListener(ctx, st.topicListener) + defer st.updateTopic.RemoveListener(ctx, id) + for { + select { + //case msg := <-st.updateMsgChan: + // ec.PrintlnUnnecessary(msg.Message) + // status.SetProgress(msg.CompletionPercentage) + // if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, msg.Status) { + // ms, err := readMigrationStatus(ctx, st.statusMap) + // if err != nil { + // return nil, fmt.Errorf("reading status: %w", err) + } + // ec.PrintlnUnnecessary(ms.Report) + // return nil, nil + // } + // } + }*/ } } func (st *StatusStages) topicListener(event *hazelcast.MessagePublished) { - var u UpdateMessage - v, ok := event.Value.(serialization.JSON) - if !ok { - st.logger.Warn(fmt.Sprintf("update message type is unexpected")) - return - } - err := json.Unmarshal(v, &u) - if err != nil { - st.logger.Warn(fmt.Sprintf("receiving update from migration cluster: %s", err.Error())) - } - st.updateMsgChan <- u + // var u UpdateMessage + // v, ok := event.Value.(serialization.JSON) + // if !ok { + // st.logger.Warn(fmt.Sprintf("update message type is unexpected")) + // return + // } + // err := json.Unmarshal(v, &u) + // if err != nil { + // st.logger.Warn(fmt.Sprintf("receiving update from migration cluster: %s", err.Error())) + // } + // st.updateMsgChan <- u } diff --git a/base/commands/migration/status_stages_it_test.go b/base/commands/migration/status_stages_it_test.go index 0e9317012..68c462bde 100644 --- a/base/commands/migration/status_stages_it_test.go +++ b/base/commands/migration/status_stages_it_test.go @@ -3,120 +3,6 @@ package migration_test import ( - "context" - "encoding/json" - "sync" - "testing" - "time" - _ "github.com/hazelcast/hazelcast-commandline-client/base" _ "github.com/hazelcast/hazelcast-commandline-client/base/commands" - "github.com/hazelcast/hazelcast-commandline-client/base/commands/migration" - . "github.com/hazelcast/hazelcast-commandline-client/internal/check" - "github.com/hazelcast/hazelcast-commandline-client/internal/it" - "github.com/hazelcast/hazelcast-go-client/serialization" - "github.com/stretchr/testify/require" ) - -func TestStatus(t *testing.T) { - testCases := []struct { - name string - f func(t *testing.T) - }{ - {name: "status", f: statusTest}, - {name: "noMigrationsStatus", f: noMigrationsStatusTest}, - } - for _, tc := range testCases { - t.Run(tc.name, tc.f) - } -} - -func noMigrationsStatusTest(t *testing.T) { - tcx := it.TestContext{T: t} - ctx := context.Background() - tcx.Tester(func(tcx it.TestContext) { - var wg sync.WaitGroup - wg.Add(1) - go tcx.WithReset(func() { - defer wg.Done() - tcx.CLC().Execute(ctx, "status") - }) - wg.Wait() - tcx.AssertStdoutContains("there are no migrations are in progress on migration cluster") - }) -} - -func statusTest(t *testing.T) { - tcx := it.TestContext{T: t} - ctx := context.Background() - tcx.Tester(func(tcx it.TestContext) { - mID := preStatusRunner(t, tcx, ctx) - var wg sync.WaitGroup - wg.Add(1) - go tcx.WithReset(func() { - defer wg.Done() - Must(tcx.CLC().Execute(ctx, "status")) - }) - time.Sleep(1 * time.Second) // give time to status command to register its topic listener - statusRunner(t, mID, tcx, ctx) - wg.Wait() - tcx.AssertStdoutContains(` -Hazelcast Data Migration Tool v5.3.0 -(c) 2023 Hazelcast, Inc. - - OK [1/2] Connected to the migration cluster. -first message -Completion Percentage: 60.000000 -last message -Completion Percentage: 100.000000 -status report - OK [2/2] Fetched migration status. - -OK`) - }) -} - -func preStatusRunner(t *testing.T, tcx it.TestContext, ctx context.Context) string { - // create a migration in the __datamigrations_in_progress list - mID := migration.MakeMigrationID() - l := MustValue(tcx.Client.GetList(ctx, migration.MigrationsInProgressList)) - m := MustValue(json.Marshal(migration.MigrationInProgress{ - MigrationID: mID, - })) - ok := MustValue(l.Add(ctx, serialization.JSON(m))) - require.Equal(t, true, ok) - // create a record in the status map - statusMap := MustValue(tcx.Client.GetMap(ctx, migration.MakeStatusMapName(mID))) - st := MustValue(json.Marshal(migration.MigrationStatus{ - Status: migration.StatusInProgress, - Report: "status report", - CompletionPercentage: 60, - })) - Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(st))) - return mID -} - -func statusRunner(t *testing.T, migrationID string, tcx it.TestContext, ctx context.Context) { - // publish the first message in the update topic - updateTopic := MustValue(tcx.Client.GetTopic(ctx, migration.MakeUpdateTopicName(migrationID))) - msg := MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusInProgress, Message: "first message", CompletionPercentage: 60})) - Must(updateTopic.Publish(ctx, serialization.JSON(msg))) - // create a terminal record in status map - statusMap := MustValue(tcx.Client.GetMap(ctx, migration.MakeStatusMapName(migrationID))) - st := MustValue(json.Marshal(migration.MigrationStatus{ - Status: migration.StatusComplete, - Report: "status report", - CompletionPercentage: 100, - })) - Must(statusMap.Set(ctx, migration.StatusMapEntryName, serialization.JSON(st))) - // publish the second message in the update topic - msg = MustValue(json.Marshal(migration.UpdateMessage{Status: migration.StatusComplete, Message: "last message", CompletionPercentage: 100})) - Must(updateTopic.Publish(ctx, serialization.JSON(msg))) - // remove the migration from the __datamigrations_in_progress list - l := MustValue(tcx.Client.GetList(ctx, migration.MigrationsInProgressList)) - m := MustValue(json.Marshal(migration.MigrationInProgress{ - MigrationID: migrationID, - })) - ok := MustValue(l.Remove(ctx, serialization.JSON(m))) - require.Equal(t, true, ok) -} diff --git a/base/commands/migration/testdata/start/migration_success_completed.json b/base/commands/migration/testdata/start/migration_success_completed.json new file mode 100644 index 000000000..41d0d8ff3 --- /dev/null +++ b/base/commands/migration/testdata/start/migration_success_completed.json @@ -0,0 +1,69 @@ +{ + "id": "e6e928d3-63af-4e72-8c42-0bfcf0ab6cf7", + "status": "COMPLETED", + "startTimestamp": "2023-01-01T00:00:00Z", + "finishTimestamp": "2023-01-01T00:01:00Z", + "type": "MIGRATION", + "completionPercentage": 12.123, + "migrations": [ + { + "name": "imap5", + "type": "IMAP", + "status": "COMPLETED", + "startTimestamp": "2023-01-01T00:00:00Z", + "entriesMigrated": 1000, + "totalEntries": 1000, + "completionPercentage": 100 + }, + { + "name": "rmap4", + "type": "REPLICATED_MAP", + "status": "COMPLETED", + "startTimestamp": "2023-01-01T00:00:00Z", + "finishTimestamp": "2023-01-01T00:01:00Z", + "entriesMigrated": 10000, + "totalEntries": 10000, + "completionPercentage": 100 + }, + { + "name": "rmap3", + "type": "REPLICATED_MAP", + "status": "COMPLETED", + "startTimestamp": "2023-09-01T12:11:48+0200", + "finishTimestamp": "2023-01-01T00:01:00Z", + "entriesMigrated": 10000, + "totalEntries": 10000, + "completionPercentage": 100 + }, + { + "name": "rmap2", + "type": "REPLICATED_MAP", + "status": "COMPLETED", + "startTimestamp": "2023-09-01T12:11:48+0200", + "finishTimestamp": "2023-01-01T00:01:00Z", + "entriesMigrated": 10000, + "totalEntries": 10000, + "completionPercentage": 100 + }, + { + "name": "imap1", + "type": "IMAP", + "status": "COMPLETED", + "startTimestamp": "2023-01-01T00:00:00Z", + "finishTimestamp": "2023-01-01T00:01:00Z", + "entriesMigrated": 10000, + "totalEntries": 10000, + "completionPercentage": 100 + }, + { + "name": "imap12", + "type": "IMAP", + "status": "COMPLETED", + "entriesMigrated": 10000, + "totalEntries": 10000, + "completionPercentage": 100 + } + ], + "logs": ["some user friendly log message", "another user friendly log message"], + "errors": [] +} \ No newline at end of file diff --git a/base/commands/migration/testdata/start/migration_success_failure.json b/base/commands/migration/testdata/start/migration_success_failure.json new file mode 100644 index 000000000..30b14cbfd --- /dev/null +++ b/base/commands/migration/testdata/start/migration_success_failure.json @@ -0,0 +1,70 @@ +{ + "id": "e6e928d3-63af-4e72-8c42-0bfcf0ab6cf7", + "status": "FAILED", + "startTimestamp": "2023-01-01T00:00:00Z", + "finishTimestamp": "2023-01-01T00:01:00Z", + "type": "MIGRATION", + "completionPercentage": 12.123, + "migrations": [ + { + "name": "imap5", + "type": "IMAP", + "status": "COMPLETED", + "startTimestamp": "2023-01-01T00:00:00Z", + "entriesMigrated": 1000, + "totalEntries": 1000, + "completionPercentage": 100 + }, + { + "name": "rmap4", + "type": "REPLICATED_MAP", + "status": "COMPLETED", + "startTimestamp": "2023-01-01T00:00:00Z", + "finishTimestamp": "2023-01-01T00:01:00Z", + "entriesMigrated": 10000, + "totalEntries": 10000, + "completionPercentage": 100 + }, + { + "name": "rmap3", + "type": "REPLICATED_MAP", + "status": "COMPLETED", + "startTimestamp": "2023-09-01T12:11:48+0200", + "finishTimestamp": "2023-01-01T00:01:00Z", + "entriesMigrated": 10000, + "totalEntries": 10000, + "completionPercentage": 100 + }, + { + "name": "rmap2", + "type": "REPLICATED_MAP", + "status": "COMPLETED", + "startTimestamp": "2023-09-01T12:11:48+0200", + "finishTimestamp": "2023-01-01T00:01:00Z", + "entriesMigrated": 10000, + "totalEntries": 10000, + "completionPercentage": 100 + }, + { + "name": "imap1", + "type": "IMAP", + "status": "COMPLETED", + "startTimestamp": "2023-01-01T00:00:00Z", + "finishTimestamp": "2023-01-01T00:01:00Z", + "entriesMigrated": 10000, + "totalEntries": 10000, + "completionPercentage": 100 + }, + { + "name": "imap12", + "type": "IMAP", + "status": "FAILED", + "entriesMigrated": 10000, + "totalEntries": 10000, + "completionPercentage": 100, + "error": "some error" + } + ], + "logs": ["some user friendly log message", "another user friendly log message"], + "errors": ["some error"] +} \ No newline at end of file diff --git a/base/commands/migration/testdata/start/migration_success_initial.json b/base/commands/migration/testdata/start/migration_success_initial.json new file mode 100644 index 000000000..5f90ba748 --- /dev/null +++ b/base/commands/migration/testdata/start/migration_success_initial.json @@ -0,0 +1,70 @@ +{ + "id": "e6e928d3-63af-4e72-8c42-0bfcf0ab6cf7", + "status": "IN_PROGRESS", + "startTimestamp": "2023-01-01T00:00:00Z", + "finishTimestamp": "2023-01-01T00:01:00Z", + "type": "MIGRATION", + "completionPercentage": 12.123, + "migrations": [ + { + "name": "imap5", + "type": "IMAP", + "status": "IN_PROGRESS", + "startTimestamp": "2023-01-01T00:00:00Z", + "entriesMigrated": 121, + "totalEntries": 1000, + "completionPercentage": 12.1 + }, + { + "name": "rmap4", + "type": "REPLICATED_MAP", + "status": "IN_PROGRESS", + "startTimestamp": "2023-01-01T00:00:00Z", + "finishTimestamp": "2023-01-01T00:01:00Z", + "entriesMigrated": 1212, + "totalEntries": 10000, + "completionPercentage": 12.12 + }, + { + "name": "rmap3", + "type": "REPLICATED_MAP", + "status": "IN_PROGRESS", + "startTimestamp": "2023-09-01T12:11:48+0200", + "finishTimestamp": "2023-01-01T00:01:00Z", + "entriesMigrated": 1212, + "totalEntries": 10000, + "completionPercentage": 12.12 + }, + { + "name": "rmap2", + "type": "REPLICATED_MAP", + "status": "IN_PROGRESS", + "startTimestamp": "2023-09-01T12:11:48+0200", + "finishTimestamp": "2023-01-01T00:01:00Z", + "entriesMigrated": 1212, + "totalEntries": 10000, + "completionPercentage": 12.12 + }, + { + "name": "imap1", + "type": "IMAP", + "status": "IN_PROGRESS", + "startTimestamp": "2023-01-01T00:00:00Z", + "finishTimestamp": "2023-01-01T00:01:00Z", + "entriesMigrated": 1212, + "totalEntries": 10000, + "completionPercentage": 12.12, + "error": "some error" + }, + { + "name": "imap12", + "type": "IMAP", + "status": "NOT_STARTED", + "entriesMigrated": 0, + "totalEntries": 10000, + "completionPercentage": 0 + } + ], + "logs": ["some user friendly log message", "another user friendly log message"], + "errors": [] +} \ No newline at end of file diff --git a/base/commands/migration/utils.go b/base/commands/migration/utils.go index e3a899d25..5af0aa0ed 100644 --- a/base/commands/migration/utils.go +++ b/base/commands/migration/utils.go @@ -125,10 +125,6 @@ func MakeMigrationID() string { return types.NewUUID().String() } -func MakeStatusMapName(migrationID string) string { - return StatusMapPrefix + migrationID -} - func MakeUpdateTopicName(migrationID string) string { return UpdateTopicPrefix + migrationID } From f48638557afb2187d1bc2661f0949d00bc928786 Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 21 Sep 2023 16:32:43 +0300 Subject: [PATCH 27/53] fix status command --- base/commands/migration/common.go | 2 +- base/commands/migration/migration_stages.go | 3 +- base/commands/migration/migration_start.go | 1 - base/commands/migration/migration_status.go | 14 ++- .../migration/start_stages_it_test.go | 18 ++-- base/commands/migration/status_stages.go | 68 +------------- .../migration/status_stages_it_test.go | 94 +++++++++++++++++++ 7 files changed, 124 insertions(+), 76 deletions(-) diff --git a/base/commands/migration/common.go b/base/commands/migration/common.go index 2c564a301..ad7417e53 100644 --- a/base/commands/migration/common.go +++ b/base/commands/migration/common.go @@ -30,7 +30,7 @@ type MigrationStatusRow struct { Name string `json:"name"` Type string `json:"type"` Status Status `json:"status"` - CompletionPercentage float32 `json:"completion_percentage"` + CompletionPercentage float32 `json:"completionPercentage"` Error string `json:"error"` } diff --git a/base/commands/migration/migration_stages.go b/base/commands/migration/migration_stages.go index ee7633bfe..a7bea01aa 100644 --- a/base/commands/migration/migration_stages.go +++ b/base/commands/migration/migration_stages.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "os" + "strings" "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" errors2 "github.com/hazelcast/hazelcast-commandline-client/errors" @@ -63,7 +64,7 @@ func migrationStages(ctx context.Context, ec plug.ExecContext, migrationID, repo case StatusComplete: return nil, nil case StatusFailed: - return nil, errors.New(generalStatus.Errors[0]) //TODO + return nil, errors.New(strings.Join(generalStatus.Errors, "\n")) case StatusCanceled, StatusCanceling: return nil, errors2.ErrUserCancelled } diff --git a/base/commands/migration/migration_start.go b/base/commands/migration/migration_start.go index b0e22f256..9bc0cb33c 100644 --- a/base/commands/migration/migration_start.go +++ b/base/commands/migration/migration_start.go @@ -60,7 +60,6 @@ Selected data structures in the source cluster will be migrated to the target cl if _, err := stage.Execute(ctx, ec, any(nil), mp); err != nil { return err } - ec.PrintlnUnnecessary("") ec.PrintlnUnnecessary("OK Migration completed successfully.") return nil diff --git a/base/commands/migration/migration_status.go b/base/commands/migration/migration_status.go index 88444a89d..8b8627b00 100644 --- a/base/commands/migration/migration_status.go +++ b/base/commands/migration/migration_status.go @@ -18,6 +18,7 @@ func (s StatusCmd) Init(cc plug.InitContext) error { cc.SetCommandUsage("status") cc.SetCommandGroup("migration") help := "Get status of the data migration in progress" + cc.AddStringFlag(flagOutputDir, "o", "", false, "output directory for the migration report, if not given current directory is used") cc.SetCommandHelp(help, help) return nil } @@ -25,9 +26,18 @@ func (s StatusCmd) Init(cc plug.InitContext) error { func (s StatusCmd) Exec(ctx context.Context, ec plug.ExecContext) error { ec.PrintlnUnnecessary("") ec.PrintlnUnnecessary(banner) - sts := NewStatusStages(ec.Logger()) + sts := NewStatusStages() sp := stage.NewFixedProvider(sts.Build(ctx, ec)...) - if _, err := stage.Execute(ctx, ec, any(nil), sp); err != nil { + mID, err := stage.Execute(ctx, ec, any(nil), sp) + if err != nil { + return err + } + mStages, err := migrationStages(ctx, ec, mID.(string), ec.Props().GetString(flagOutputDir), sts.statusMap) + if err != nil { + return err + } + mp := stage.NewFixedProvider(mStages...) + if _, err := stage.Execute(ctx, ec, any(nil), mp); err != nil { return err } ec.PrintlnUnnecessary("") diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go index 4514dddfa..0120de071 100644 --- a/base/commands/migration/start_stages_it_test.go +++ b/base/commands/migration/start_stages_it_test.go @@ -73,28 +73,32 @@ func successfulRunner(ctx context.Context, tcx it.TestContext, migrationID strin MustValue(tcx.Client.SQL().Execute(ctx, mSQL)) statusMap := MustValue(tcx.Client.GetMap(ctx, migration.StatusMapName)) b := MustValue(os.ReadFile("testdata/start/migration_success_initial.json")) - MustValue(statusMap.Put(ctx, migrationID, serialization.JSON(b))) + Must(statusMap.Set(ctx, migrationID, serialization.JSON(b))) b = MustValue(os.ReadFile("testdata/start/migration_success_completed.json")) - MustValue(statusMap.Put(ctx, migrationID, serialization.JSON(b))) + Must(statusMap.Set(ctx, migrationID, serialization.JSON(b))) wg.Done() } func failureRunner(ctx context.Context, tcx it.TestContext, migrationID string, wg *sync.WaitGroup) { - mSQL := fmt.Sprintf(`CREATE MAPPING IF NOT EXISTS %s TYPE IMap OPTIONS('keyFormat'='varchar', 'valueFormat'='json')`, migration.StatusMapName) - MustValue(tcx.Client.SQL().Execute(ctx, mSQL)) + createMapping(ctx, tcx) statusMap := MustValue(tcx.Client.GetMap(ctx, migration.StatusMapName)) b := MustValue(os.ReadFile("testdata/start/migration_success_initial.json")) - MustValue(statusMap.Put(ctx, migrationID, serialization.JSON(b))) + Must(statusMap.Set(ctx, migrationID, serialization.JSON(b))) b = MustValue(os.ReadFile("testdata/start/migration_success_failure.json")) - MustValue(statusMap.Put(ctx, migrationID, serialization.JSON(b))) + Must(statusMap.Set(ctx, migrationID, serialization.JSON(b))) wg.Done() } +func createMapping(ctx context.Context, tcx it.TestContext) { + mSQL := fmt.Sprintf(`CREATE MAPPING IF NOT EXISTS %s TYPE IMap OPTIONS('keyFormat'='varchar', 'valueFormat'='json')`, migration.StatusMapName) + MustValue(tcx.Client.SQL().Execute(ctx, mSQL)) +} + func findMigrationID(ctx context.Context, tcx it.TestContext, c chan string) { q := MustValue(tcx.Client.GetQueue(ctx, migration.StartQueueName)) var b migration.ConfigBundle for { - v := MustValue(q.PollWithTimeout(ctx, 2*time.Second)) + v := MustValue(q.PollWithTimeout(ctx, time.Second)) if v != nil { Must(json.Unmarshal(v.(serialization.JSON), &b)) c <- b.MigrationID diff --git a/base/commands/migration/status_stages.go b/base/commands/migration/status_stages.go index a4f1a41fa..e6d87fc5c 100644 --- a/base/commands/migration/status_stages.go +++ b/base/commands/migration/status_stages.go @@ -8,24 +8,19 @@ import ( "fmt" "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" - "github.com/hazelcast/hazelcast-commandline-client/internal/log" "github.com/hazelcast/hazelcast-commandline-client/internal/plug" "github.com/hazelcast/hazelcast-go-client" "github.com/hazelcast/hazelcast-go-client/serialization" ) type StatusStages struct { - migrationID string ci *hazelcast.ClientInternal migrationsInProgressList *hazelcast.List statusMap *hazelcast.Map - updateTopic *hazelcast.Topic - // updateMsgChan chan UpdateMessage - logger log.Logger } -func NewStatusStages(logger log.Logger) *StatusStages { - return &StatusStages{logger: logger} +func NewStatusStages() *StatusStages { + return &StatusStages{} } func (st *StatusStages) Build(ctx context.Context, ec plug.ExecContext) []stage.Stage[any] { @@ -36,12 +31,6 @@ func (st *StatusStages) Build(ctx context.Context, ec plug.ExecContext) []stage. FailureMsg: "Could not connect to the migration cluster", Func: st.connectStage(ec), }, - { - ProgressMsg: "Fetching migration status", - SuccessMsg: "Fetched migration status", - FailureMsg: "Could not fetch migration status", - Func: st.fetchStage(ec), - }, } } @@ -73,59 +62,10 @@ func (st *StatusStages) connectStage(ec plug.ExecContext) func(context.Context, if err != nil { return nil, fmt.Errorf("parsing migration in progress: %w", err) } - st.migrationID = mip.MigrationID - // st.statusMap, err = st.ci.Client().GetMap(ctx, MakeStatusMapName(st.migrationID)) + st.statusMap, err = st.ci.Client().GetMap(ctx, StatusMapName) if err != nil { return nil, err } - st.updateTopic, err = st.ci.Client().GetTopic(ctx, MakeUpdateTopicName(st.migrationID)) - return nil, err - } -} - -func (st *StatusStages) fetchStage(ec plug.ExecContext) func(context.Context, stage.Statuser[any]) (any, error) { - return func(ctx context.Context, status stage.Statuser[any]) (any, error) { - return nil, nil - /* - ms, err := readMigrationStatus(ctx, st.statusMap) - if err != nil { - return nil, fmt.Errorf("reading status: %w", err) - } - if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, ms.Status) { - ec.PrintlnUnnecessary(ms.Report) - return nil, nil - } - // st.updateMsgChan = make(chan UpdateMessage) - id, err := st.updateTopic.AddMessageListener(ctx, st.topicListener) - defer st.updateTopic.RemoveListener(ctx, id) - for { - select { - //case msg := <-st.updateMsgChan: - // ec.PrintlnUnnecessary(msg.Message) - // status.SetProgress(msg.CompletionPercentage) - // if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, msg.Status) { - // ms, err := readMigrationStatus(ctx, st.statusMap) - // if err != nil { - // return nil, fmt.Errorf("reading status: %w", err) - } - // ec.PrintlnUnnecessary(ms.Report) - // return nil, nil - // } - // } - }*/ + return mip.MigrationID, err } } - -func (st *StatusStages) topicListener(event *hazelcast.MessagePublished) { - // var u UpdateMessage - // v, ok := event.Value.(serialization.JSON) - // if !ok { - // st.logger.Warn(fmt.Sprintf("update message type is unexpected")) - // return - // } - // err := json.Unmarshal(v, &u) - // if err != nil { - // st.logger.Warn(fmt.Sprintf("receiving update from migration cluster: %s", err.Error())) - // } - // st.updateMsgChan <- u -} diff --git a/base/commands/migration/status_stages_it_test.go b/base/commands/migration/status_stages_it_test.go index 68c462bde..14db8b36b 100644 --- a/base/commands/migration/status_stages_it_test.go +++ b/base/commands/migration/status_stages_it_test.go @@ -3,6 +3,100 @@ package migration_test import ( + "context" + "encoding/json" + "fmt" + "os" + "sync" + "testing" + "time" + _ "github.com/hazelcast/hazelcast-commandline-client/base" _ "github.com/hazelcast/hazelcast-commandline-client/base/commands" + "github.com/hazelcast/hazelcast-commandline-client/base/commands/migration" + . "github.com/hazelcast/hazelcast-commandline-client/internal/check" + "github.com/hazelcast/hazelcast-commandline-client/internal/it" + "github.com/hazelcast/hazelcast-go-client/serialization" + "github.com/stretchr/testify/require" ) + +func TestStatus(t *testing.T) { + testCases := []struct { + name string + f func(t *testing.T) + }{ + {name: "status", f: statusTest}, + {name: "noMigrationsStatus", f: noMigrationsStatusTest}, + } + for _, tc := range testCases { + t.Run(tc.name, tc.f) + } +} + +func noMigrationsStatusTest(t *testing.T) { + tcx := it.TestContext{T: t} + ctx := context.Background() + tcx.Tester(func(tcx it.TestContext) { + var wg sync.WaitGroup + wg.Add(1) + go tcx.WithReset(func() { + defer wg.Done() + tcx.CLC().Execute(ctx, "status") + }) + wg.Wait() + tcx.AssertStdoutContains("there are no migrations are in progress on migration cluster") + }) +} + +func statusTest(t *testing.T) { + tcx := it.TestContext{T: t} + ctx := context.Background() + tcx.Tester(func(tcx it.TestContext) { + mID := preStatusRunner(t, tcx, ctx) + var wg sync.WaitGroup + wg.Add(1) + go tcx.WithReset(func() { + defer wg.Done() + Must(tcx.CLC().Execute(ctx, "status")) + }) + time.Sleep(1 * time.Second) + statusRunner(t, mID, tcx, ctx) + wg.Wait() + tcx.AssertStdoutContains("OK Connected to the migration cluster.") + tcx.WithReset(func() { + f := fmt.Sprintf("migration_report_%s.txt", mID) + require.Equal(t, true, fileExists(f)) + Must(os.Remove(f)) + }) + }) +} + +func preStatusRunner(t *testing.T, tcx it.TestContext, ctx context.Context) string { + createMapping(ctx, tcx) + // create a migration in the __datamigrations_in_progress list + mID := migration.MakeMigrationID() + l := MustValue(tcx.Client.GetList(ctx, migration.MigrationsInProgressList)) + m := MustValue(json.Marshal(migration.MigrationInProgress{ + MigrationID: mID, + })) + ok := MustValue(l.Add(ctx, serialization.JSON(m))) + require.Equal(t, true, ok) + // create a record in the status map + statusMap := MustValue(tcx.Client.GetMap(ctx, migration.StatusMapName)) + b := MustValue(os.ReadFile("testdata/start/migration_success_initial.json")) + Must(statusMap.Set(ctx, mID, serialization.JSON(b))) + return mID +} + +func statusRunner(t *testing.T, migrationID string, tcx it.TestContext, ctx context.Context) { + // create a terminal record in status map + statusMap := MustValue(tcx.Client.GetMap(ctx, migration.StatusMapName)) + b := MustValue(os.ReadFile("testdata/start/migration_success_completed.json")) + Must(statusMap.Set(ctx, migrationID, serialization.JSON(b))) + // remove the migration from the __datamigrations_in_progress list + l := MustValue(tcx.Client.GetList(ctx, migration.MigrationsInProgressList)) + m := MustValue(json.Marshal(migration.MigrationInProgress{ + MigrationID: migrationID, + })) + require.Equal(t, true, MustValue(l.Remove(ctx, serialization.JSON(m)))) +} From 547cf14ee2f42cabf7205ccf1c1bd4d5e271adb1 Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 21 Sep 2023 17:32:27 +0300 Subject: [PATCH 28/53] fix status command --- base/commands/migration/common.go | 96 +++++++++++++++---- base/commands/migration/migration_stages.go | 38 +++++--- base/commands/migration/start_stages.go | 4 - .../migration/status_stages_it_test.go | 4 +- .../start/migration_success_completed.json | 3 +- .../start/migration_success_failure.json | 3 +- 6 files changed, 110 insertions(+), 38 deletions(-) diff --git a/base/commands/migration/common.go b/base/commands/migration/common.go index ad7417e53..60dc61a90 100644 --- a/base/commands/migration/common.go +++ b/base/commands/migration/common.go @@ -5,8 +5,8 @@ package migration import ( "context" "encoding/json" - "errors" "fmt" + "strings" "github.com/hazelcast/hazelcast-go-client" "github.com/hazelcast/hazelcast-go-client/serialization" @@ -34,27 +34,87 @@ type MigrationStatusRow struct { Error string `json:"error"` } -var ErrInvalidStatus = errors.New("invalid status value") +func readMigrationStatus(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) (string, error) { + q := fmt.Sprintf(`SELECT JSON_QUERY(this, '$.status') FROM %s WHERE __key='%s'`, StatusMapName, migrationID) + res, err := ci.Client().SQL().Execute(ctx, q) + if err != nil { + return "", err + } + if err != nil { + return "", err + } + it, err := res.Iterator() + if err != nil { + return "", err + } + if it.HasNext() { // single iteration is enough that we are reading single result for a single migration + row, err := it.Next() + if err != nil { + return "", err + } + r, err := row.Get(0) + var m string + if err = json.Unmarshal(r.(serialization.JSON), &m); err != nil { + return "", err + } + return m, nil + } + return "", nil +} -func readMigrationStatus(ctx context.Context, statusMap *hazelcast.Map, migrationID string) (*MigrationStatusTotal, error) { - v, err := statusMap.Get(ctx, migrationID) //TODO: read only status with sql +func readMigrationReport(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) (string, error) { + q := fmt.Sprintf(`SELECT JSON_QUERY(this, '$.report') FROM %s WHERE __key='%s'`, StatusMapName, migrationID) + res, err := ci.Client().SQL().Execute(ctx, q) if err != nil { - return nil, fmt.Errorf("getting status: %w", err) + return "", err } - if v == nil { - return nil, ErrInvalidStatus + if err != nil { + return "", err } - var b []byte - if vv, ok := v.(string); ok { - b = []byte(vv) - } else if vv, ok := v.(serialization.JSON); ok { - b = vv - } else { - return nil, ErrInvalidStatus + it, err := res.Iterator() + if err != nil { + return "", err + } + if it.HasNext() { // single iteration is enough that we are reading single result for a single migration + row, err := it.Next() + if err != nil { + return "", err + } + r, err := row.Get(0) + var m string + if err = json.Unmarshal(r.(serialization.JSON), &m); err != nil { + return "", err + } + return m, nil + } + return "", nil +} + +func readMigrationErrors(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) (string, error) { + q := fmt.Sprintf(`SELECT JSON_QUERY(this, '$.errors') FROM %s WHERE __key='%s'`, StatusMapName, migrationID) + res, err := ci.Client().SQL().Execute(ctx, q) + if err != nil { + return "", err + } + if err != nil { + return "", err + } + it, err := res.Iterator() + if err != nil { + return "", err } - var ms MigrationStatusTotal - if err := json.Unmarshal(b, &ms); err != nil { - return nil, fmt.Errorf("parsing migration status: %w", err) + var errs []string + for it.HasNext() { // single iteration is enough that we are reading single result for a single migration + row, err := it.Next() + if err != nil { + return "", err + } + r, err := row.Get(0) + var m string + if err = json.Unmarshal(r.(serialization.JSON), &m); err != nil { + return "", err + } + errs = append(errs, m) } - return &ms, nil + return strings.Join(errs, "\n"), nil } diff --git a/base/commands/migration/migration_stages.go b/base/commands/migration/migration_stages.go index a7bea01aa..f61a5cdce 100644 --- a/base/commands/migration/migration_stages.go +++ b/base/commands/migration/migration_stages.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "os" - "strings" "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" errors2 "github.com/hazelcast/hazelcast-commandline-client/errors" @@ -18,13 +17,17 @@ import ( "golang.org/x/exp/slices" ) +var timeoutErr = fmt.Errorf("migration could not be completed: reached timeout while reading status: "+ + "please ensure that you are using Hazelcast's migration cluster distribution and your DMT config points to that cluster: %w", + context.DeadlineExceeded) + func migrationStages(ctx context.Context, ec plug.ExecContext, migrationID, reportOutputDir string, statusMap *hazelcast.Map) ([]stage.Stage[any], error) { ci, err := ec.ClientInternal(ctx) if err != nil { return nil, err } if err = waitForMigrationToBeCreated(ctx, ci, migrationID); err != nil { - return nil, err + return nil, fmt.Errorf("waiting migration to be created: %w", err) } var stages []stage.Stage[any] dss, err := dataStructuresToBeMigrated(ctx, ec, migrationID) @@ -40,13 +43,16 @@ func migrationStages(ctx context.Context, ec plug.ExecContext, migrationID, repo Func: func(ct context.Context, status stage.Statuser[any]) (any, error) { for { if ctx.Err() != nil { - return nil, err + if errors.Is(err, context.DeadlineExceeded) { + return nil, timeoutErr + } + return nil, fmt.Errorf("migration failed: %w", err) } - generalStatus, err := readMigrationStatus(ctx, statusMap, migrationID) + generalStatus, err := readMigrationStatus(ctx, ci, migrationID) if err != nil { - return nil, err + return nil, fmt.Errorf("reading migration status: %w", err) } - if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, generalStatus.Status) { + if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, Status(generalStatus)) { err = saveMemberLogs(ctx, ec, ci, migrationID) if err != nil { return nil, err @@ -55,16 +61,20 @@ func migrationStages(ctx context.Context, ec plug.ExecContext, migrationID, repo if reportOutputDir == "" { name = fmt.Sprintf("migration_report_%s.txt", migrationID) } - err = saveReportToFile(name, generalStatus.Report) + err = saveReportToFile(ctx, ci, migrationID, name) if err != nil { - return nil, err + return nil, fmt.Errorf("saving report to file: %w", err) } } - switch generalStatus.Status { + switch Status(generalStatus) { case StatusComplete: return nil, nil case StatusFailed: - return nil, errors.New(strings.Join(generalStatus.Errors, "\n")) + errs, err := readMigrationErrors(ctx, ci, migrationID) + if err != nil { + return nil, fmt.Errorf("saving report to file: %w", err) + } + return nil, errors.New(errs) case StatusCanceled, StatusCanceling: return nil, errors2.ErrUserCancelled } @@ -122,7 +132,7 @@ func dataStructuresToBeMigrated(ctx context.Context, ec plug.ExecContext, migrat if err != nil { return nil, err } - if it.HasNext() { + if it.HasNext() { // single iteration is enough that we are reading single result for a single migration row, err := it.Next() if err != nil { return nil, err @@ -159,7 +169,11 @@ func saveMemberLogs(ctx context.Context, ec plug.ExecContext, ci *hazelcast.Clie return nil } -func saveReportToFile(fileName, report string) error { +func saveReportToFile(ctx context.Context, ci *hazelcast.ClientInternal, migrationID, fileName string) error { + report, err := readMigrationReport(ctx, ci, migrationID) + if err != nil { + return err + } f, err := os.Create(fmt.Sprintf(fileName)) if err != nil { return err diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index 13133edde..5bf5b6f08 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -23,10 +23,6 @@ type StartStages struct { logger log.Logger } -var timeoutErr = fmt.Errorf("migration could not be completed: reached timeout while reading status: "+ - "please ensure that you are using Hazelcast's migration cluster distribution and your DMT config points to that cluster: %w", - context.DeadlineExceeded) - func NewStartStages(logger log.Logger, migrationID, configDir string) *StartStages { if migrationID == "" { panic("migrationID is required") diff --git a/base/commands/migration/status_stages_it_test.go b/base/commands/migration/status_stages_it_test.go index 14db8b36b..141670c3a 100644 --- a/base/commands/migration/status_stages_it_test.go +++ b/base/commands/migration/status_stages_it_test.go @@ -55,6 +55,7 @@ func statusTest(t *testing.T) { mID := preStatusRunner(t, tcx, ctx) var wg sync.WaitGroup wg.Add(1) + time.Sleep(1 * time.Second) go tcx.WithReset(func() { defer wg.Done() Must(tcx.CLC().Execute(ctx, "status")) @@ -79,8 +80,7 @@ func preStatusRunner(t *testing.T, tcx it.TestContext, ctx context.Context) stri m := MustValue(json.Marshal(migration.MigrationInProgress{ MigrationID: mID, })) - ok := MustValue(l.Add(ctx, serialization.JSON(m))) - require.Equal(t, true, ok) + require.Equal(t, true, MustValue(l.Add(ctx, serialization.JSON(m)))) // create a record in the status map statusMap := MustValue(tcx.Client.GetMap(ctx, migration.StatusMapName)) b := MustValue(os.ReadFile("testdata/start/migration_success_initial.json")) diff --git a/base/commands/migration/testdata/start/migration_success_completed.json b/base/commands/migration/testdata/start/migration_success_completed.json index 41d0d8ff3..132eb7460 100644 --- a/base/commands/migration/testdata/start/migration_success_completed.json +++ b/base/commands/migration/testdata/start/migration_success_completed.json @@ -65,5 +65,6 @@ } ], "logs": ["some user friendly log message", "another user friendly log message"], - "errors": [] + "errors": [], + "report" : "completed migration report" } \ No newline at end of file diff --git a/base/commands/migration/testdata/start/migration_success_failure.json b/base/commands/migration/testdata/start/migration_success_failure.json index 30b14cbfd..e4ae91f55 100644 --- a/base/commands/migration/testdata/start/migration_success_failure.json +++ b/base/commands/migration/testdata/start/migration_success_failure.json @@ -66,5 +66,6 @@ } ], "logs": ["some user friendly log message", "another user friendly log message"], - "errors": ["some error"] + "errors": ["some error"], + "report": "failed migration report" } \ No newline at end of file From 9c493b9c5c2cb65c9ae31950401e8a0ed203a9d1 Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 21 Sep 2023 17:51:40 +0300 Subject: [PATCH 29/53] refactor --- base/commands/migration/common.go | 120 -------------------- base/commands/migration/migration_stages.go | 112 +++++++++++++++++- 2 files changed, 110 insertions(+), 122 deletions(-) delete mode 100644 base/commands/migration/common.go diff --git a/base/commands/migration/common.go b/base/commands/migration/common.go deleted file mode 100644 index 60dc61a90..000000000 --- a/base/commands/migration/common.go +++ /dev/null @@ -1,120 +0,0 @@ -//go:build std || migration - -package migration - -import ( - "context" - "encoding/json" - "fmt" - "strings" - - "github.com/hazelcast/hazelcast-go-client" - "github.com/hazelcast/hazelcast-go-client/serialization" -) - -type MigrationStatusTotal struct { - Status Status `json:"status"` - Logs []string `json:"logs"` - Errors []string `json:"errors"` - Report string `json:"report"` - CompletionPercentage float32 `json:"completionPercentage"` - Migrations []MigrationStatusRow `json:"migrations"` -} - -type DataStructureInfo struct { - Name string - Type string -} - -type MigrationStatusRow struct { - Name string `json:"name"` - Type string `json:"type"` - Status Status `json:"status"` - CompletionPercentage float32 `json:"completionPercentage"` - Error string `json:"error"` -} - -func readMigrationStatus(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) (string, error) { - q := fmt.Sprintf(`SELECT JSON_QUERY(this, '$.status') FROM %s WHERE __key='%s'`, StatusMapName, migrationID) - res, err := ci.Client().SQL().Execute(ctx, q) - if err != nil { - return "", err - } - if err != nil { - return "", err - } - it, err := res.Iterator() - if err != nil { - return "", err - } - if it.HasNext() { // single iteration is enough that we are reading single result for a single migration - row, err := it.Next() - if err != nil { - return "", err - } - r, err := row.Get(0) - var m string - if err = json.Unmarshal(r.(serialization.JSON), &m); err != nil { - return "", err - } - return m, nil - } - return "", nil -} - -func readMigrationReport(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) (string, error) { - q := fmt.Sprintf(`SELECT JSON_QUERY(this, '$.report') FROM %s WHERE __key='%s'`, StatusMapName, migrationID) - res, err := ci.Client().SQL().Execute(ctx, q) - if err != nil { - return "", err - } - if err != nil { - return "", err - } - it, err := res.Iterator() - if err != nil { - return "", err - } - if it.HasNext() { // single iteration is enough that we are reading single result for a single migration - row, err := it.Next() - if err != nil { - return "", err - } - r, err := row.Get(0) - var m string - if err = json.Unmarshal(r.(serialization.JSON), &m); err != nil { - return "", err - } - return m, nil - } - return "", nil -} - -func readMigrationErrors(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) (string, error) { - q := fmt.Sprintf(`SELECT JSON_QUERY(this, '$.errors') FROM %s WHERE __key='%s'`, StatusMapName, migrationID) - res, err := ci.Client().SQL().Execute(ctx, q) - if err != nil { - return "", err - } - if err != nil { - return "", err - } - it, err := res.Iterator() - if err != nil { - return "", err - } - var errs []string - for it.HasNext() { // single iteration is enough that we are reading single result for a single migration - row, err := it.Next() - if err != nil { - return "", err - } - r, err := row.Get(0) - var m string - if err = json.Unmarshal(r.(serialization.JSON), &m); err != nil { - return "", err - } - errs = append(errs, m) - } - return strings.Join(errs, "\n"), nil -} diff --git a/base/commands/migration/migration_stages.go b/base/commands/migration/migration_stages.go index f61a5cdce..7196bbf1f 100644 --- a/base/commands/migration/migration_stages.go +++ b/base/commands/migration/migration_stages.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "os" + "strings" "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" errors2 "github.com/hazelcast/hazelcast-commandline-client/errors" @@ -96,7 +97,7 @@ func migrationStages(ctx context.Context, ec plug.ExecContext, migrationID, repo if err != nil { return nil, err } - var m MigrationStatusRow + var m DSMigrationStatus if err = json.Unmarshal(rowStr.(serialization.JSON), &m); err != nil { return nil, err } @@ -138,7 +139,7 @@ func dataStructuresToBeMigrated(ctx context.Context, ec plug.ExecContext, migrat return nil, err } r, err := row.Get(0) - var status MigrationStatusTotal + var status OverallMigrationStatus if err = json.Unmarshal(r.(serialization.JSON), &status); err != nil { return nil, err } @@ -197,3 +198,110 @@ func waitForMigrationToBeCreated(ctx context.Context, ci *hazelcast.ClientIntern } } } + +type OverallMigrationStatus struct { + Status Status `json:"status"` + Logs []string `json:"logs"` + Errors []string `json:"errors"` + Report string `json:"report"` + CompletionPercentage float32 `json:"completionPercentage"` + Migrations []DSMigrationStatus `json:"migrations"` +} + +type DataStructureInfo struct { + Name string + Type string +} + +type DSMigrationStatus struct { + Name string `json:"name"` + Type string `json:"type"` + Status Status `json:"status"` + CompletionPercentage float32 `json:"completionPercentage"` + Error string `json:"error"` +} + +func readMigrationStatus(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) (string, error) { + q := fmt.Sprintf(`SELECT JSON_QUERY(this, '$.status') FROM %s WHERE __key='%s'`, StatusMapName, migrationID) + res, err := ci.Client().SQL().Execute(ctx, q) + if err != nil { + return "", err + } + if err != nil { + return "", err + } + it, err := res.Iterator() + if err != nil { + return "", err + } + if it.HasNext() { // single iteration is enough that we are reading single result for a single migration + row, err := it.Next() + if err != nil { + return "", err + } + r, err := row.Get(0) + var m string + if err = json.Unmarshal(r.(serialization.JSON), &m); err != nil { + return "", err + } + return m, nil + } + return "", nil +} + +func readMigrationReport(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) (string, error) { + q := fmt.Sprintf(`SELECT JSON_QUERY(this, '$.report') FROM %s WHERE __key='%s'`, StatusMapName, migrationID) + res, err := ci.Client().SQL().Execute(ctx, q) + if err != nil { + return "", err + } + if err != nil { + return "", err + } + it, err := res.Iterator() + if err != nil { + return "", err + } + if it.HasNext() { // single iteration is enough that we are reading single result for a single migration + row, err := it.Next() + if err != nil { + return "", err + } + r, err := row.Get(0) + var m string + if err = json.Unmarshal(r.(serialization.JSON), &m); err != nil { + return "", err + } + return m, nil + } + return "", nil +} + +func readMigrationErrors(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) (string, error) { + q := fmt.Sprintf(`SELECT JSON_QUERY(this, '$.errors') FROM %s WHERE __key='%s'`, StatusMapName, migrationID) + res, err := ci.Client().SQL().Execute(ctx, q) + if err != nil { + return "", err + } + if err != nil { + return "", err + } + it, err := res.Iterator() + if err != nil { + return "", err + } + var errs []string + for it.HasNext() { // single iteration is enough that we are reading single result for a single migration + row, err := it.Next() + if err != nil { + return "", err + } + r, err := row.Get(0) + var m string + if err = json.Unmarshal(r.(serialization.JSON), &m); err != nil { + return "", err + } + errs = append(errs, m) + } + return strings.Join(errs, "\n"), nil +} From 1b95eb83108aff0bdfa7ac725bb79df7e767524c Mon Sep 17 00:00:00 2001 From: kmetin Date: Fri, 22 Sep 2023 09:54:25 +0300 Subject: [PATCH 30/53] add timeout --- base/commands/migration/migration_stages.go | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/base/commands/migration/migration_stages.go b/base/commands/migration/migration_stages.go index 7196bbf1f..a1dcec261 100644 --- a/base/commands/migration/migration_stages.go +++ b/base/commands/migration/migration_stages.go @@ -9,6 +9,7 @@ import ( "fmt" "os" "strings" + "time" "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" errors2 "github.com/hazelcast/hazelcast-commandline-client/errors" @@ -27,7 +28,9 @@ func migrationStages(ctx context.Context, ec plug.ExecContext, migrationID, repo if err != nil { return nil, err } - if err = waitForMigrationToBeCreated(ctx, ci, migrationID); err != nil { + childCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + if err = waitForMigrationToBeCreated(childCtx, ci, migrationID); err != nil { return nil, fmt.Errorf("waiting migration to be created: %w", err) } var stages []stage.Stage[any] @@ -49,7 +52,7 @@ func migrationStages(ctx context.Context, ec plug.ExecContext, migrationID, repo } return nil, fmt.Errorf("migration failed: %w", err) } - generalStatus, err := readMigrationStatus(ctx, ci, migrationID) + generalStatus, err := fetchMigrationStatus(ctx, ci, migrationID) if err != nil { return nil, fmt.Errorf("reading migration status: %w", err) } @@ -71,9 +74,9 @@ func migrationStages(ctx context.Context, ec plug.ExecContext, migrationID, repo case StatusComplete: return nil, nil case StatusFailed: - errs, err := readMigrationErrors(ctx, ci, migrationID) + errs, err := fetchMigrationErrors(ctx, ci, migrationID) if err != nil { - return nil, fmt.Errorf("saving report to file: %w", err) + return nil, fmt.Errorf("fetching migration errors: %w", err) } return nil, errors.New(errs) case StatusCanceled, StatusCanceling: @@ -171,7 +174,7 @@ func saveMemberLogs(ctx context.Context, ec plug.ExecContext, ci *hazelcast.Clie } func saveReportToFile(ctx context.Context, ci *hazelcast.ClientInternal, migrationID, fileName string) error { - report, err := readMigrationReport(ctx, ci, migrationID) + report, err := fetchMigrationReport(ctx, ci, migrationID) if err != nil { return err } @@ -221,7 +224,7 @@ type DSMigrationStatus struct { Error string `json:"error"` } -func readMigrationStatus(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) (string, error) { +func fetchMigrationStatus(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) (string, error) { q := fmt.Sprintf(`SELECT JSON_QUERY(this, '$.status') FROM %s WHERE __key='%s'`, StatusMapName, migrationID) res, err := ci.Client().SQL().Execute(ctx, q) if err != nil { @@ -249,7 +252,7 @@ func readMigrationStatus(ctx context.Context, ci *hazelcast.ClientInternal, migr return "", nil } -func readMigrationReport(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) (string, error) { +func fetchMigrationReport(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) (string, error) { q := fmt.Sprintf(`SELECT JSON_QUERY(this, '$.report') FROM %s WHERE __key='%s'`, StatusMapName, migrationID) res, err := ci.Client().SQL().Execute(ctx, q) if err != nil { @@ -277,7 +280,7 @@ func readMigrationReport(ctx context.Context, ci *hazelcast.ClientInternal, migr return "", nil } -func readMigrationErrors(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) (string, error) { +func fetchMigrationErrors(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) (string, error) { q := fmt.Sprintf(`SELECT JSON_QUERY(this, '$.errors') FROM %s WHERE __key='%s'`, StatusMapName, migrationID) res, err := ci.Client().SQL().Execute(ctx, q) if err != nil { From 752972f8f6ea7b9d675e4cfc2afd6cc345e2ef89 Mon Sep 17 00:00:00 2001 From: kmetin Date: Fri, 22 Sep 2023 10:05:15 +0300 Subject: [PATCH 31/53] refactor start test code --- .../migration/start_stages_it_test.go | 59 +++++++++---------- 1 file changed, 29 insertions(+), 30 deletions(-) diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go index 0120de071..5e02c2fbf 100644 --- a/base/commands/migration/start_stages_it_test.go +++ b/base/commands/migration/start_stages_it_test.go @@ -22,26 +22,35 @@ import ( func TestMigrationStages(t *testing.T) { testCases := []struct { - name string - f func(t *testing.T) + name string + statusMapStateFiles []string + expectedOutput string }{ - {name: "start_Successful", f: startTest_Successful}, - {name: "start_Failure", f: startTest_Failure}, + { + name: "successful", + statusMapStateFiles: []string{ + "testdata/start/migration_success_initial.json", + "testdata/start/migration_success_completed.json", + }, + expectedOutput: "OK Migration completed successfully.", + }, + { + name: "failure", + statusMapStateFiles: []string{ + "testdata/start/migration_success_initial.json", + "testdata/start/migration_success_failure.json", + }, + expectedOutput: "ERROR Failed migrating IMAP: imap5 ...: some error", + }, } for _, tc := range testCases { - t.Run(tc.name, tc.f) + t.Run(tc.name, func(t *testing.T) { + startMigrationTest(t, tc.expectedOutput, tc.statusMapStateFiles) + }) } } -func startTest_Successful(t *testing.T) { - startTest(t, successfulRunner, "OK Migration completed successfully.") -} - -func startTest_Failure(t *testing.T) { - startTest(t, failureRunner, "ERROR Failed migrating IMAP: imap5 ...: some error") -} - -func startTest(t *testing.T, runnerFunc func(context.Context, it.TestContext, string, *sync.WaitGroup), expectedOutput string) { +func startMigrationTest(t *testing.T, expectedOutput string, statusMapStateFiles []string) { tcx := it.TestContext{T: t} ctx := context.Background() tcx.Tester(func(tcx it.TestContext) { @@ -57,7 +66,7 @@ func startTest(t *testing.T, runnerFunc func(context.Context, it.TestContext, st mID := <-c wg.Done() wg.Add(1) - go runnerFunc(ctx, tcx, mID, &wg) + go migrationRunner(ctx, tcx, mID, &wg, statusMapStateFiles) wg.Wait() tcx.AssertStdoutContains(expectedOutput) tcx.WithReset(func() { @@ -68,24 +77,14 @@ func startTest(t *testing.T, runnerFunc func(context.Context, it.TestContext, st }) } -func successfulRunner(ctx context.Context, tcx it.TestContext, migrationID string, wg *sync.WaitGroup) { +func migrationRunner(ctx context.Context, tcx it.TestContext, migrationID string, wg *sync.WaitGroup, statusMapStateFiles []string) { mSQL := fmt.Sprintf(`CREATE MAPPING IF NOT EXISTS %s TYPE IMap OPTIONS('keyFormat'='varchar', 'valueFormat'='json')`, migration.StatusMapName) MustValue(tcx.Client.SQL().Execute(ctx, mSQL)) statusMap := MustValue(tcx.Client.GetMap(ctx, migration.StatusMapName)) - b := MustValue(os.ReadFile("testdata/start/migration_success_initial.json")) - Must(statusMap.Set(ctx, migrationID, serialization.JSON(b))) - b = MustValue(os.ReadFile("testdata/start/migration_success_completed.json")) - Must(statusMap.Set(ctx, migrationID, serialization.JSON(b))) - wg.Done() -} - -func failureRunner(ctx context.Context, tcx it.TestContext, migrationID string, wg *sync.WaitGroup) { - createMapping(ctx, tcx) - statusMap := MustValue(tcx.Client.GetMap(ctx, migration.StatusMapName)) - b := MustValue(os.ReadFile("testdata/start/migration_success_initial.json")) - Must(statusMap.Set(ctx, migrationID, serialization.JSON(b))) - b = MustValue(os.ReadFile("testdata/start/migration_success_failure.json")) - Must(statusMap.Set(ctx, migrationID, serialization.JSON(b))) + for _, f := range statusMapStateFiles { + b := MustValue(os.ReadFile(f)) + Must(statusMap.Set(ctx, migrationID, serialization.JSON(b))) + } wg.Done() } From 2f224bc17006f3f167ada69c66d65459b7ccf1dd Mon Sep 17 00:00:00 2001 From: kmetin Date: Fri, 22 Sep 2023 10:29:22 +0300 Subject: [PATCH 32/53] refactor status test code --- base/commands/migration/status_stages_it_test.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/base/commands/migration/status_stages_it_test.go b/base/commands/migration/status_stages_it_test.go index 141670c3a..43880689d 100644 --- a/base/commands/migration/status_stages_it_test.go +++ b/base/commands/migration/status_stages_it_test.go @@ -55,11 +55,11 @@ func statusTest(t *testing.T) { mID := preStatusRunner(t, tcx, ctx) var wg sync.WaitGroup wg.Add(1) - time.Sleep(1 * time.Second) go tcx.WithReset(func() { defer wg.Done() Must(tcx.CLC().Execute(ctx, "status")) }) + // statusRunner removes __datamigrations_in_progress list, so we should give some time to command to read it first time.Sleep(1 * time.Second) statusRunner(t, mID, tcx, ctx) wg.Wait() @@ -74,14 +74,12 @@ func statusTest(t *testing.T) { func preStatusRunner(t *testing.T, tcx it.TestContext, ctx context.Context) string { createMapping(ctx, tcx) - // create a migration in the __datamigrations_in_progress list mID := migration.MakeMigrationID() l := MustValue(tcx.Client.GetList(ctx, migration.MigrationsInProgressList)) m := MustValue(json.Marshal(migration.MigrationInProgress{ MigrationID: mID, })) require.Equal(t, true, MustValue(l.Add(ctx, serialization.JSON(m)))) - // create a record in the status map statusMap := MustValue(tcx.Client.GetMap(ctx, migration.StatusMapName)) b := MustValue(os.ReadFile("testdata/start/migration_success_initial.json")) Must(statusMap.Set(ctx, mID, serialization.JSON(b))) @@ -89,11 +87,9 @@ func preStatusRunner(t *testing.T, tcx it.TestContext, ctx context.Context) stri } func statusRunner(t *testing.T, migrationID string, tcx it.TestContext, ctx context.Context) { - // create a terminal record in status map statusMap := MustValue(tcx.Client.GetMap(ctx, migration.StatusMapName)) b := MustValue(os.ReadFile("testdata/start/migration_success_completed.json")) Must(statusMap.Set(ctx, migrationID, serialization.JSON(b))) - // remove the migration from the __datamigrations_in_progress list l := MustValue(tcx.Client.GetList(ctx, migration.MigrationsInProgressList)) m := MustValue(json.Marshal(migration.MigrationInProgress{ MigrationID: migrationID, From 7b2d93e9918165dfcb6e5839e8a03a1154c9f57a Mon Sep 17 00:00:00 2001 From: kmetin Date: Fri, 22 Sep 2023 11:55:47 +0300 Subject: [PATCH 33/53] remove member logs in test --- base/commands/migration/migration_stages.go | 50 +++++++------------ base/commands/migration/migration_start.go | 14 +++++- base/commands/migration/migration_status.go | 14 +++++- .../migration/start_stages_it_test.go | 11 ++++ .../migration/status_stages_it_test.go | 31 +++++++++++- 5 files changed, 83 insertions(+), 37 deletions(-) diff --git a/base/commands/migration/migration_stages.go b/base/commands/migration/migration_stages.go index a1dcec261..51f102b3c 100644 --- a/base/commands/migration/migration_stages.go +++ b/base/commands/migration/migration_stages.go @@ -16,21 +16,16 @@ import ( "github.com/hazelcast/hazelcast-commandline-client/internal/plug" "github.com/hazelcast/hazelcast-go-client" "github.com/hazelcast/hazelcast-go-client/serialization" - "golang.org/x/exp/slices" ) var timeoutErr = fmt.Errorf("migration could not be completed: reached timeout while reading status: "+ "please ensure that you are using Hazelcast's migration cluster distribution and your DMT config points to that cluster: %w", context.DeadlineExceeded) -func migrationStages(ctx context.Context, ec plug.ExecContext, migrationID, reportOutputDir string, statusMap *hazelcast.Map) ([]stage.Stage[any], error) { - ci, err := ec.ClientInternal(ctx) - if err != nil { - return nil, err - } +func createMigrationStages(ctx context.Context, ec plug.ExecContext, ci *hazelcast.ClientInternal, migrationID string) ([]stage.Stage[any], error) { childCtx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - if err = waitForMigrationToBeCreated(childCtx, ci, migrationID); err != nil { + if err := waitForMigrationToBeCreated(childCtx, ci, migrationID); err != nil { return nil, fmt.Errorf("waiting migration to be created: %w", err) } var stages []stage.Stage[any] @@ -56,20 +51,6 @@ func migrationStages(ctx context.Context, ec plug.ExecContext, migrationID, repo if err != nil { return nil, fmt.Errorf("reading migration status: %w", err) } - if slices.Contains([]Status{StatusComplete, StatusFailed, StatusCanceled}, Status(generalStatus)) { - err = saveMemberLogs(ctx, ec, ci, migrationID) - if err != nil { - return nil, err - } - var name string - if reportOutputDir == "" { - name = fmt.Sprintf("migration_report_%s.txt", migrationID) - } - err = saveReportToFile(ctx, ci, migrationID, name) - if err != nil { - return nil, fmt.Errorf("saving report to file: %w", err) - } - } switch Status(generalStatus) { case StatusComplete: return nil, nil @@ -167,7 +148,7 @@ func saveMemberLogs(ctx context.Context, ec plug.ExecContext, ci *hazelcast.Clie return err } for _, line := range logs { - ec.Logger().Debugf(fmt.Sprintf("[%s_%s] %s", migrationID, m.UUID.String(), line.(string))) + ec.Logger().Info(fmt.Sprintf("[%s_%s] %s", migrationID, m.UUID.String(), line.(string))) } } return nil @@ -230,9 +211,6 @@ func fetchMigrationStatus(ctx context.Context, ci *hazelcast.ClientInternal, mig if err != nil { return "", err } - if err != nil { - return "", err - } it, err := res.Iterator() if err != nil { return "", err @@ -258,9 +236,6 @@ func fetchMigrationReport(ctx context.Context, ci *hazelcast.ClientInternal, mig if err != nil { return "", err } - if err != nil { - return "", err - } it, err := res.Iterator() if err != nil { return "", err @@ -286,9 +261,6 @@ func fetchMigrationErrors(ctx context.Context, ci *hazelcast.ClientInternal, mig if err != nil { return "", err } - if err != nil { - return "", err - } it, err := res.Iterator() if err != nil { return "", err @@ -308,3 +280,19 @@ func fetchMigrationErrors(ctx context.Context, ci *hazelcast.ClientInternal, mig } return strings.Join(errs, "\n"), nil } + +func finalizeMigration(ctx context.Context, ec plug.ExecContext, ci *hazelcast.ClientInternal, migrationID, reportOutputDir string) error { + err := saveMemberLogs(ctx, ec, ci, migrationID) + if err != nil { + return err + } + var name string + if reportOutputDir == "" { + name = fmt.Sprintf("migration_report_%s.txt", migrationID) + } + err = saveReportToFile(ctx, ci, migrationID, name) + if err != nil { + return fmt.Errorf("saving report to file: %w", err) + } + return nil +} diff --git a/base/commands/migration/migration_start.go b/base/commands/migration/migration_start.go index 9bc0cb33c..d509fd082 100644 --- a/base/commands/migration/migration_start.go +++ b/base/commands/migration/migration_start.go @@ -4,6 +4,7 @@ package migration import ( "context" + "fmt" "github.com/hazelcast/hazelcast-commandline-client/clc" "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" @@ -29,6 +30,10 @@ func (StartCmd) Init(cc plug.InitContext) error { } func (StartCmd) Exec(ctx context.Context, ec plug.ExecContext) error { + ci, err := ec.ClientInternal(ctx) + if err != nil { + return err + } ec.PrintlnUnnecessary("") ec.PrintlnUnnecessary(`Hazelcast Data Migration Tool v5.3.0 (c) 2023 Hazelcast, Inc. @@ -52,12 +57,17 @@ Selected data structures in the source cluster will be migrated to the target cl if _, err := stage.Execute(ctx, ec, any(nil), sp); err != nil { return err } - mStages, err := migrationStages(ctx, ec, mID, ec.Props().GetString(flagOutputDir), sts.statusMap) + mStages, err := createMigrationStages(ctx, ec, ci, mID) if err != nil { return err } mp := stage.NewFixedProvider(mStages...) - if _, err := stage.Execute(ctx, ec, any(nil), mp); err != nil { + _, err = stage.Execute(ctx, ec, any(nil), mp) + err2 := finalizeMigration(ctx, ec, ci, mID, ec.Props().GetString(flagOutputDir)) + if err2 != nil { + return fmt.Errorf("finalizing migration: %w", err2) + } + if err != nil { return err } ec.PrintlnUnnecessary("") diff --git a/base/commands/migration/migration_status.go b/base/commands/migration/migration_status.go index 8b8627b00..84f6782ef 100644 --- a/base/commands/migration/migration_status.go +++ b/base/commands/migration/migration_status.go @@ -4,6 +4,7 @@ package migration import ( "context" + "fmt" "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" "github.com/hazelcast/hazelcast-commandline-client/internal/check" @@ -24,6 +25,10 @@ func (s StatusCmd) Init(cc plug.InitContext) error { } func (s StatusCmd) Exec(ctx context.Context, ec plug.ExecContext) error { + ci, err := ec.ClientInternal(ctx) + if err != nil { + return err + } ec.PrintlnUnnecessary("") ec.PrintlnUnnecessary(banner) sts := NewStatusStages() @@ -32,12 +37,17 @@ func (s StatusCmd) Exec(ctx context.Context, ec plug.ExecContext) error { if err != nil { return err } - mStages, err := migrationStages(ctx, ec, mID.(string), ec.Props().GetString(flagOutputDir), sts.statusMap) + mStages, err := createMigrationStages(ctx, ec, ci, mID.(string)) if err != nil { return err } mp := stage.NewFixedProvider(mStages...) - if _, err := stage.Execute(ctx, ec, any(nil), mp); err != nil { + _, err = stage.Execute(ctx, ec, any(nil), mp) + err2 := finalizeMigration(ctx, ec, ci, mID.(string), ec.Props().GetString(flagOutputDir)) + if err2 != nil { + return fmt.Errorf("finalizing migration: %w", err2) + } + if err != nil { return err } ec.PrintlnUnnecessary("") diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go index 5e02c2fbf..dd4d5b03c 100644 --- a/base/commands/migration/start_stages_it_test.go +++ b/base/commands/migration/start_stages_it_test.go @@ -14,8 +14,10 @@ import ( _ "github.com/hazelcast/hazelcast-commandline-client/base" _ "github.com/hazelcast/hazelcast-commandline-client/base/commands" "github.com/hazelcast/hazelcast-commandline-client/base/commands/migration" + "github.com/hazelcast/hazelcast-commandline-client/clc/paths" . "github.com/hazelcast/hazelcast-commandline-client/internal/check" "github.com/hazelcast/hazelcast-commandline-client/internal/it" + hz "github.com/hazelcast/hazelcast-go-client" "github.com/hazelcast/hazelcast-go-client/serialization" "github.com/stretchr/testify/require" ) @@ -54,6 +56,9 @@ func startMigrationTest(t *testing.T, expectedOutput string, statusMapStateFiles tcx := it.TestContext{T: t} ctx := context.Background() tcx.Tester(func(tcx it.TestContext) { + ci := hz.NewClientInternal(tcx.Client) + createMemberLogs(t, ctx, ci) + defer removeMembersLogs(ctx, ci) var wg sync.WaitGroup wg.Add(1) go tcx.WithReset(func() { @@ -73,6 +78,12 @@ func startMigrationTest(t *testing.T, expectedOutput string, statusMapStateFiles f := fmt.Sprintf("migration_report_%s.txt", mID) require.Equal(t, true, fileExists(f)) Must(os.Remove(f)) + b := MustValue(os.ReadFile(paths.DefaultLogPath(time.Now()))) + for _, m := range ci.OrderedMembers() { + require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log1", mID, m.UUID.String())) + require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log2", mID, m.UUID.String())) + require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log3", mID, m.UUID.String())) + } }) }) } diff --git a/base/commands/migration/status_stages_it_test.go b/base/commands/migration/status_stages_it_test.go index 43880689d..98a98b909 100644 --- a/base/commands/migration/status_stages_it_test.go +++ b/base/commands/migration/status_stages_it_test.go @@ -14,8 +14,10 @@ import ( _ "github.com/hazelcast/hazelcast-commandline-client/base" _ "github.com/hazelcast/hazelcast-commandline-client/base/commands" "github.com/hazelcast/hazelcast-commandline-client/base/commands/migration" + "github.com/hazelcast/hazelcast-commandline-client/clc/paths" . "github.com/hazelcast/hazelcast-commandline-client/internal/check" "github.com/hazelcast/hazelcast-commandline-client/internal/it" + hz "github.com/hazelcast/hazelcast-go-client" "github.com/hazelcast/hazelcast-go-client/serialization" "github.com/stretchr/testify/require" ) @@ -52,7 +54,9 @@ func statusTest(t *testing.T) { tcx := it.TestContext{T: t} ctx := context.Background() tcx.Tester(func(tcx it.TestContext) { - mID := preStatusRunner(t, tcx, ctx) + ci := hz.NewClientInternal(tcx.Client) + mID := preStatusRunner(t, tcx, ctx, ci) + defer removeMembersLogs(ctx, ci) var wg sync.WaitGroup wg.Add(1) go tcx.WithReset(func() { @@ -68,12 +72,19 @@ func statusTest(t *testing.T) { f := fmt.Sprintf("migration_report_%s.txt", mID) require.Equal(t, true, fileExists(f)) Must(os.Remove(f)) + b := MustValue(os.ReadFile(paths.DefaultLogPath(time.Now()))) + for _, m := range ci.OrderedMembers() { + require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log1", mID, m.UUID.String())) + require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log2", mID, m.UUID.String())) + require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log3", mID, m.UUID.String())) + } }) }) } -func preStatusRunner(t *testing.T, tcx it.TestContext, ctx context.Context) string { +func preStatusRunner(t *testing.T, tcx it.TestContext, ctx context.Context, ci *hz.ClientInternal) string { createMapping(ctx, tcx) + createMemberLogs(t, ctx, ci) mID := migration.MakeMigrationID() l := MustValue(tcx.Client.GetList(ctx, migration.MigrationsInProgressList)) m := MustValue(json.Marshal(migration.MigrationInProgress{ @@ -86,6 +97,22 @@ func preStatusRunner(t *testing.T, tcx it.TestContext, ctx context.Context) stri return mID } +func createMemberLogs(t *testing.T, ctx context.Context, ci *hz.ClientInternal) { + for _, m := range ci.OrderedMembers() { + l := MustValue(ci.Client().GetList(ctx, migration.DebugLogsListPrefix+m.UUID.String())) + require.Equal(t, true, MustValue(l.Add(ctx, "log1"))) + require.Equal(t, true, MustValue(l.Add(ctx, "log2"))) + require.Equal(t, true, MustValue(l.Add(ctx, "log3"))) + } +} + +func removeMembersLogs(ctx context.Context, ci *hz.ClientInternal) { + for _, m := range ci.OrderedMembers() { + l := MustValue(ci.Client().GetList(ctx, migration.DebugLogsListPrefix+m.UUID.String())) + Must(l.Destroy(ctx)) + } +} + func statusRunner(t *testing.T, migrationID string, tcx it.TestContext, ctx context.Context) { statusMap := MustValue(tcx.Client.GetMap(ctx, migration.StatusMapName)) b := MustValue(os.ReadFile("testdata/start/migration_success_completed.json")) From 6b7119b58b87fdcc37aee2615d022752cf40fb9d Mon Sep 17 00:00:00 2001 From: kmetin Date: Fri, 22 Sep 2023 12:52:38 +0300 Subject: [PATCH 34/53] comment log tests --- base/commands/migration/start_stages_it_test.go | 15 ++++++++------- base/commands/migration/status_stages_it_test.go | 15 ++++++++------- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go index dd4d5b03c..069ae2223 100644 --- a/base/commands/migration/start_stages_it_test.go +++ b/base/commands/migration/start_stages_it_test.go @@ -14,7 +14,6 @@ import ( _ "github.com/hazelcast/hazelcast-commandline-client/base" _ "github.com/hazelcast/hazelcast-commandline-client/base/commands" "github.com/hazelcast/hazelcast-commandline-client/base/commands/migration" - "github.com/hazelcast/hazelcast-commandline-client/clc/paths" . "github.com/hazelcast/hazelcast-commandline-client/internal/check" "github.com/hazelcast/hazelcast-commandline-client/internal/it" hz "github.com/hazelcast/hazelcast-go-client" @@ -78,12 +77,14 @@ func startMigrationTest(t *testing.T, expectedOutput string, statusMapStateFiles f := fmt.Sprintf("migration_report_%s.txt", mID) require.Equal(t, true, fileExists(f)) Must(os.Remove(f)) - b := MustValue(os.ReadFile(paths.DefaultLogPath(time.Now()))) - for _, m := range ci.OrderedMembers() { - require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log1", mID, m.UUID.String())) - require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log2", mID, m.UUID.String())) - require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log3", mID, m.UUID.String())) - } + /* + b := MustValue(os.ReadFile(paths.DefaultLogPath(time.Now()))) + for _, m := range ci.OrderedMembers() { + require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log1", mID, m.UUID.String())) + require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log2", mID, m.UUID.String())) + require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log3", mID, m.UUID.String())) + } + */ }) }) } diff --git a/base/commands/migration/status_stages_it_test.go b/base/commands/migration/status_stages_it_test.go index 98a98b909..7e1917238 100644 --- a/base/commands/migration/status_stages_it_test.go +++ b/base/commands/migration/status_stages_it_test.go @@ -14,7 +14,6 @@ import ( _ "github.com/hazelcast/hazelcast-commandline-client/base" _ "github.com/hazelcast/hazelcast-commandline-client/base/commands" "github.com/hazelcast/hazelcast-commandline-client/base/commands/migration" - "github.com/hazelcast/hazelcast-commandline-client/clc/paths" . "github.com/hazelcast/hazelcast-commandline-client/internal/check" "github.com/hazelcast/hazelcast-commandline-client/internal/it" hz "github.com/hazelcast/hazelcast-go-client" @@ -72,12 +71,14 @@ func statusTest(t *testing.T) { f := fmt.Sprintf("migration_report_%s.txt", mID) require.Equal(t, true, fileExists(f)) Must(os.Remove(f)) - b := MustValue(os.ReadFile(paths.DefaultLogPath(time.Now()))) - for _, m := range ci.OrderedMembers() { - require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log1", mID, m.UUID.String())) - require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log2", mID, m.UUID.String())) - require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log3", mID, m.UUID.String())) - } + /* + b := MustValue(os.ReadFile(paths.DefaultLogPath(time.Now()))) + for _, m := range ci.OrderedMembers() { + require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log1", mID, m.UUID.String())) + require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log2", mID, m.UUID.String())) + require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log3", mID, m.UUID.String())) + } + */ }) }) } From eb122a1e281dca47be21561788bdf53a083514d2 Mon Sep 17 00:00:00 2001 From: kmetin Date: Fri, 22 Sep 2023 15:22:40 +0300 Subject: [PATCH 35/53] yuce's fix for default log path in tests --- base/commands/migration/start_stages_it_test.go | 15 +++++++-------- internal/it/test_context.go | 6 ++++++ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go index 069ae2223..17d112cc2 100644 --- a/base/commands/migration/start_stages_it_test.go +++ b/base/commands/migration/start_stages_it_test.go @@ -14,6 +14,7 @@ import ( _ "github.com/hazelcast/hazelcast-commandline-client/base" _ "github.com/hazelcast/hazelcast-commandline-client/base/commands" "github.com/hazelcast/hazelcast-commandline-client/base/commands/migration" + "github.com/hazelcast/hazelcast-commandline-client/clc/paths" . "github.com/hazelcast/hazelcast-commandline-client/internal/check" "github.com/hazelcast/hazelcast-commandline-client/internal/it" hz "github.com/hazelcast/hazelcast-go-client" @@ -77,14 +78,12 @@ func startMigrationTest(t *testing.T, expectedOutput string, statusMapStateFiles f := fmt.Sprintf("migration_report_%s.txt", mID) require.Equal(t, true, fileExists(f)) Must(os.Remove(f)) - /* - b := MustValue(os.ReadFile(paths.DefaultLogPath(time.Now()))) - for _, m := range ci.OrderedMembers() { - require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log1", mID, m.UUID.String())) - require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log2", mID, m.UUID.String())) - require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log3", mID, m.UUID.String())) - } - */ + b := MustValue(os.ReadFile(paths.ResolveLogPath("test"))) + for _, m := range ci.OrderedMembers() { + require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log1", mID, m.UUID.String())) + require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log2", mID, m.UUID.String())) + require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log3", mID, m.UUID.String())) + } }) }) } diff --git a/internal/it/test_context.go b/internal/it/test_context.go index 16555c22b..e54130602 100644 --- a/internal/it/test_context.go +++ b/internal/it/test_context.go @@ -169,6 +169,12 @@ func (tcx TestContext) Tester(f func(tcx TestContext)) { d, _ := filepath.Split(p) check.Must(os.MkdirAll(d, 0700)) home.WithFile(p, bytesConfig, func(_ string) { + if tcx.LogPath == "" { + tcx.LogPath = paths.ResolveLogPath("test") + } + if tcx.LogLevel == "" { + tcx.LogLevel = "info" + } tcx.main = check.MustValue(tcx.createMain()) tcx.T.Logf("created CLC main") defer func() { From 01cd2f3b3e9d1d5fc819f2fd0923275a5eaae3cf Mon Sep 17 00:00:00 2001 From: kmetin Date: Fri, 22 Sep 2023 16:03:58 +0300 Subject: [PATCH 36/53] add sleep --- base/commands/migration/migration_stages.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/base/commands/migration/migration_stages.go b/base/commands/migration/migration_stages.go index 51f102b3c..e03d71c42 100644 --- a/base/commands/migration/migration_stages.go +++ b/base/commands/migration/migration_stages.go @@ -95,6 +95,7 @@ func createMigrationStages(ctx context.Context, ec plug.ExecContext, ci *hazelca return nil, errors2.ErrUserCancelled } } + time.Sleep(1 * time.Second) } }, }) @@ -123,6 +124,9 @@ func dataStructuresToBeMigrated(ctx context.Context, ec plug.ExecContext, migrat return nil, err } r, err := row.Get(0) + if err != nil { + return nil, err + } var status OverallMigrationStatus if err = json.Unmarshal(r.(serialization.JSON), &status); err != nil { return nil, err From d6f2e4b1b98a42268d4c9d0e85603ae4d5181423 Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 28 Sep 2023 11:21:45 +0300 Subject: [PATCH 37/53] fix tests --- internal/it/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/it/util.go b/internal/it/util.go index 8e6d1a2fa..5827ff30a 100644 --- a/internal/it/util.go +++ b/internal/it/util.go @@ -57,7 +57,7 @@ func UniqueClusterName() string { var defaultDedicatedClusterName = UniqueClusterName() var rc *RemoteControllerClientWrapper var rcMu = &sync.RWMutex{} -var DefaultDedicatedTestCluster = NewSingletonTestCluster(defaultDedicatedClusterName, func() TestCluster { +var defaultDedicatedTestCluster = NewSingletonTestCluster(defaultDedicatedClusterName, func() TestCluster { port := NextPort() return rc.startNewCluster(MemberCount(), XMLConfig(defaultDedicatedClusterName, port), port) }) From df939b2ae36dfc823712aa3ef94497fd1d98184c Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 28 Sep 2023 14:09:04 +0300 Subject: [PATCH 38/53] fix tests --- base/commands/migration/start_stages_it_test.go | 17 ++++++++++------- .../commands/migration/status_stages_it_test.go | 5 +++-- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go index 17d112cc2..db3d55154 100644 --- a/base/commands/migration/start_stages_it_test.go +++ b/base/commands/migration/start_stages_it_test.go @@ -5,6 +5,7 @@ package migration_test import ( "context" "encoding/json" + "errors" "fmt" "os" "sync" @@ -26,7 +27,7 @@ func TestMigrationStages(t *testing.T) { testCases := []struct { name string statusMapStateFiles []string - expectedOutput string + expectedErr error }{ { name: "successful", @@ -34,7 +35,6 @@ func TestMigrationStages(t *testing.T) { "testdata/start/migration_success_initial.json", "testdata/start/migration_success_completed.json", }, - expectedOutput: "OK Migration completed successfully.", }, { name: "failure", @@ -42,17 +42,17 @@ func TestMigrationStages(t *testing.T) { "testdata/start/migration_success_initial.json", "testdata/start/migration_success_failure.json", }, - expectedOutput: "ERROR Failed migrating IMAP: imap5 ...: some error", + expectedErr: errors.New("Failed migrating IMAP: imap5 ...: some error"), }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - startMigrationTest(t, tc.expectedOutput, tc.statusMapStateFiles) + startMigrationTest(t, tc.expectedErr, tc.statusMapStateFiles) }) } } -func startMigrationTest(t *testing.T, expectedOutput string, statusMapStateFiles []string) { +func startMigrationTest(t *testing.T, expectedErr error, statusMapStateFiles []string) { tcx := it.TestContext{T: t} ctx := context.Background() tcx.Tester(func(tcx it.TestContext) { @@ -61,9 +61,10 @@ func startMigrationTest(t *testing.T, expectedOutput string, statusMapStateFiles defer removeMembersLogs(ctx, ci) var wg sync.WaitGroup wg.Add(1) + var execErr error go tcx.WithReset(func() { defer wg.Done() - tcx.CLC().Execute(ctx, "start", "dmt-config", "--yes") + execErr = tcx.CLC().Execute(ctx, "start", "dmt-config", "--yes") }) c := make(chan string, 1) wg.Add(1) @@ -73,7 +74,9 @@ func startMigrationTest(t *testing.T, expectedOutput string, statusMapStateFiles wg.Add(1) go migrationRunner(ctx, tcx, mID, &wg, statusMapStateFiles) wg.Wait() - tcx.AssertStdoutContains(expectedOutput) + if expectedErr != nil { + require.Contains(t, execErr.Error(), expectedErr.Error()) + } tcx.WithReset(func() { f := fmt.Sprintf("migration_report_%s.txt", mID) require.Equal(t, true, fileExists(f)) diff --git a/base/commands/migration/status_stages_it_test.go b/base/commands/migration/status_stages_it_test.go index 7e1917238..044afd1b4 100644 --- a/base/commands/migration/status_stages_it_test.go +++ b/base/commands/migration/status_stages_it_test.go @@ -40,12 +40,13 @@ func noMigrationsStatusTest(t *testing.T) { tcx.Tester(func(tcx it.TestContext) { var wg sync.WaitGroup wg.Add(1) + var execErr error go tcx.WithReset(func() { defer wg.Done() - tcx.CLC().Execute(ctx, "status") + execErr = tcx.CLC().Execute(ctx, "status") }) wg.Wait() - tcx.AssertStdoutContains("there are no migrations are in progress on migration cluster") + require.Contains(t, execErr.Error(), "there are no migrations are in progress on migration cluster") }) } From cd61ae585a27f7823bd43d22d45271a9c0c89362 Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 28 Sep 2023 14:31:56 +0300 Subject: [PATCH 39/53] set text for individual migration --- base/commands/migration/migration_stages.go | 42 +++++++++++++++------ 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/base/commands/migration/migration_stages.go b/base/commands/migration/migration_stages.go index e03d71c42..901ba0f96 100644 --- a/base/commands/migration/migration_stages.go +++ b/base/commands/migration/migration_stages.go @@ -40,16 +40,21 @@ func createMigrationStages(ctx context.Context, ec plug.ExecContext, ci *hazelca SuccessMsg: fmt.Sprintf("Migrated %s: %s ...", d.Type, d.Name), FailureMsg: fmt.Sprintf("Failed migrating %s: %s ...", d.Type, d.Name), Func: func(ct context.Context, status stage.Statuser[any]) (any, error) { + var execErr error + StatusReaderLoop: for { if ctx.Err() != nil { if errors.Is(err, context.DeadlineExceeded) { - return nil, timeoutErr + execErr = timeoutErr + break StatusReaderLoop } - return nil, fmt.Errorf("migration failed: %w", err) + execErr = fmt.Errorf("migration failed: %w", err) + break StatusReaderLoop } generalStatus, err := fetchMigrationStatus(ctx, ci, migrationID) if err != nil { - return nil, fmt.Errorf("reading migration status: %w", err) + execErr = fmt.Errorf("reading migration status: %w", err) + break StatusReaderLoop } switch Status(generalStatus) { case StatusComplete: @@ -57,33 +62,41 @@ func createMigrationStages(ctx context.Context, ec plug.ExecContext, ci *hazelca case StatusFailed: errs, err := fetchMigrationErrors(ctx, ci, migrationID) if err != nil { - return nil, fmt.Errorf("fetching migration errors: %w", err) + execErr = fmt.Errorf("fetching migration errors: %w", err) + break StatusReaderLoop } - return nil, errors.New(errs) + execErr = errors.New(errs) + break StatusReaderLoop case StatusCanceled, StatusCanceling: - return nil, errors2.ErrUserCancelled + execErr = errors2.ErrUserCancelled + break StatusReaderLoop } q := fmt.Sprintf(`SELECT JSON_QUERY(this, '$.migrations[%d]') FROM %s WHERE __key= '%s'`, i, StatusMapName, migrationID) res, err := ci.Client().SQL().Execute(ctx, q) if err != nil { - return nil, err + execErr = err + break StatusReaderLoop } iter, err := res.Iterator() if err != nil { - return nil, err + execErr = err + break StatusReaderLoop } if iter.HasNext() { row, err := iter.Next() if err != nil { - return nil, err + execErr = err + break StatusReaderLoop } rowStr, err := row.Get(0) if err != nil { - return nil, err + execErr = err + break StatusReaderLoop } var m DSMigrationStatus if err = json.Unmarshal(rowStr.(serialization.JSON), &m); err != nil { - return nil, err + execErr = err + break StatusReaderLoop } status.SetProgress(m.CompletionPercentage) switch m.Status { @@ -92,11 +105,16 @@ func createMigrationStages(ctx context.Context, ec plug.ExecContext, ci *hazelca case StatusFailed: return nil, stage.IgnoreError(errors.New(m.Error)) case StatusCanceled: - return nil, errors2.ErrUserCancelled + execErr = errors2.ErrUserCancelled + break StatusReaderLoop } } time.Sleep(1 * time.Second) } + if execErr != nil { + status.SetText(execErr.Error()) + } + return nil, execErr }, }) } From c2f15d2e599cc5ebd8b5421eb0c0508f5b24ae12 Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 28 Sep 2023 16:55:03 +0300 Subject: [PATCH 40/53] add deleted build-dmt to Makefile again --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index 28b53c01d..fdff565d3 100644 --- a/Makefile +++ b/Makefile @@ -19,6 +19,9 @@ TARGZ ?= true build: CGO_ENABLED=0 go build -tags base,std,hazelcastinternal,hazelcastinternaltest -ldflags "$(LDFLAGS)" -o build/$(BINARY_NAME) ./cmd/clc +build-dmt: + CGO_ENABLED=0 go build -tags base,migration,config,home,version,hazelcastinternal,hazelcastinternaltest -ldflags "$(LDFLAGS)" -o build/$(BINARY_NAME) ./cmd/clc + test: go test -tags base,std,hazelcastinternal,hazelcastinternaltest -p 1 $(TEST_FLAGS) ./... From 9425bc79c094bdfb9573fe983c584a2a3b525ecd Mon Sep 17 00:00:00 2001 From: kmetin Date: Wed, 11 Oct 2023 10:27:05 +0300 Subject: [PATCH 41/53] fix wait logic --- base/commands/migration/migration_stages.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/base/commands/migration/migration_stages.go b/base/commands/migration/migration_stages.go index 901ba0f96..8d441a2dc 100644 --- a/base/commands/migration/migration_stages.go +++ b/base/commands/migration/migration_stages.go @@ -191,15 +191,11 @@ func saveReportToFile(ctx context.Context, ci *hazelcast.ClientInternal, migrati func waitForMigrationToBeCreated(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) error { for { - statusMap, err := ci.Client().GetMap(ctx, StatusMapName) + status, err := fetchMigrationStatus(ctx, ci, migrationID) if err != nil { return err } - ok, err := statusMap.ContainsKey(ctx, migrationID) - if err != nil { - return err - } - if ok { + if Status(status) == StatusInProgress { return nil } } @@ -288,7 +284,7 @@ func fetchMigrationErrors(ctx context.Context, ci *hazelcast.ClientInternal, mig return "", err } var errs []string - for it.HasNext() { // single iteration is enough that we are reading single result for a single migration + for it.HasNext() { row, err := it.Next() if err != nil { return "", err From 57e1df2e9146a0a65eff7fc35af27e094fe16e94 Mon Sep 17 00:00:00 2001 From: kmetin Date: Wed, 11 Oct 2023 13:51:08 +0300 Subject: [PATCH 42/53] add migration id info to the logs --- base/commands/migration/migration_stages.go | 4 ++-- base/commands/migration/migration_start.go | 10 +++++++++- base/commands/migration/start_stages.go | 2 +- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/base/commands/migration/migration_stages.go b/base/commands/migration/migration_stages.go index 8d441a2dc..a1524954b 100644 --- a/base/commands/migration/migration_stages.go +++ b/base/commands/migration/migration_stages.go @@ -37,8 +37,8 @@ func createMigrationStages(ctx context.Context, ec plug.ExecContext, ci *hazelca i := i stages = append(stages, stage.Stage[any]{ ProgressMsg: fmt.Sprintf("Migrating %s: %s", d.Type, d.Name), - SuccessMsg: fmt.Sprintf("Migrated %s: %s ...", d.Type, d.Name), - FailureMsg: fmt.Sprintf("Failed migrating %s: %s ...", d.Type, d.Name), + SuccessMsg: fmt.Sprintf("Migrated %s: %s", d.Type, d.Name), + FailureMsg: fmt.Sprintf("Failed migrating %s: %s", d.Type, d.Name), Func: func(ct context.Context, status stage.Statuser[any]) (any, error) { var execErr error StatusReaderLoop: diff --git a/base/commands/migration/migration_start.go b/base/commands/migration/migration_start.go index d509fd082..5b430641e 100644 --- a/base/commands/migration/migration_start.go +++ b/base/commands/migration/migration_start.go @@ -10,8 +10,10 @@ import ( "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" clcerrors "github.com/hazelcast/hazelcast-commandline-client/errors" "github.com/hazelcast/hazelcast-commandline-client/internal/check" + "github.com/hazelcast/hazelcast-commandline-client/internal/output" "github.com/hazelcast/hazelcast-commandline-client/internal/plug" "github.com/hazelcast/hazelcast-commandline-client/internal/prompt" + "github.com/hazelcast/hazelcast-commandline-client/internal/serialization" ) type StartCmd struct{} @@ -72,7 +74,13 @@ Selected data structures in the source cluster will be migrated to the target cl } ec.PrintlnUnnecessary("") ec.PrintlnUnnecessary("OK Migration completed successfully.") - return nil + return ec.AddOutputRows(ctx, output.Row{ + output.Column{ + Name: "Migration ID", + Type: serialization.TypeString, + Value: mID, + }, + }) } func init() { diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index 5bf5b6f08..ce3b089a8 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -44,7 +44,7 @@ func (st *StartStages) Build(ctx context.Context, ec plug.ExecContext) []stage.S }, { ProgressMsg: "Starting the migration", - SuccessMsg: "Started the migration", + SuccessMsg: fmt.Sprintf("Started the migration with ID: %s", st.migrationID), FailureMsg: "Could not start the migration", Func: st.startStage(), }, From f57aa140a569a90058da52f7887b5aee21ebf80d Mon Sep 17 00:00:00 2001 From: kmetin Date: Wed, 11 Oct 2023 14:30:37 +0300 Subject: [PATCH 43/53] fix tests --- base/commands/migration/start_stages_it_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go index db3d55154..682b52084 100644 --- a/base/commands/migration/start_stages_it_test.go +++ b/base/commands/migration/start_stages_it_test.go @@ -42,7 +42,7 @@ func TestMigrationStages(t *testing.T) { "testdata/start/migration_success_initial.json", "testdata/start/migration_success_failure.json", }, - expectedErr: errors.New("Failed migrating IMAP: imap5 ...: some error"), + expectedErr: errors.New("Failed migrating IMAP: imap5: some error"), }, } for _, tc := range testCases { @@ -98,6 +98,7 @@ func migrationRunner(ctx context.Context, tcx it.TestContext, migrationID string for _, f := range statusMapStateFiles { b := MustValue(os.ReadFile(f)) Must(statusMap.Set(ctx, migrationID, serialization.JSON(b))) + time.Sleep(2 * time.Second) } wg.Done() } From 677d4559bfc68d126fd7b8168186778b0949fdd4 Mon Sep 17 00:00:00 2001 From: kmetin Date: Wed, 11 Oct 2023 15:30:59 +0300 Subject: [PATCH 44/53] fix tests --- base/commands/migration/start_stages_it_test.go | 6 ++++-- base/commands/migration/status_stages_it_test.go | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go index 682b52084..242e90de3 100644 --- a/base/commands/migration/start_stages_it_test.go +++ b/base/commands/migration/start_stages_it_test.go @@ -57,6 +57,7 @@ func startMigrationTest(t *testing.T, expectedErr error, statusMapStateFiles []s ctx := context.Background() tcx.Tester(func(tcx it.TestContext) { ci := hz.NewClientInternal(tcx.Client) + createMapping(ctx, tcx) createMemberLogs(t, ctx, ci) defer removeMembersLogs(ctx, ci) var wg sync.WaitGroup @@ -74,6 +75,9 @@ func startMigrationTest(t *testing.T, expectedErr error, statusMapStateFiles []s wg.Add(1) go migrationRunner(ctx, tcx, mID, &wg, statusMapStateFiles) wg.Wait() + if expectedErr == nil { + require.Equal(t, nil, execErr) + } if expectedErr != nil { require.Contains(t, execErr.Error(), expectedErr.Error()) } @@ -92,8 +96,6 @@ func startMigrationTest(t *testing.T, expectedErr error, statusMapStateFiles []s } func migrationRunner(ctx context.Context, tcx it.TestContext, migrationID string, wg *sync.WaitGroup, statusMapStateFiles []string) { - mSQL := fmt.Sprintf(`CREATE MAPPING IF NOT EXISTS %s TYPE IMap OPTIONS('keyFormat'='varchar', 'valueFormat'='json')`, migration.StatusMapName) - MustValue(tcx.Client.SQL().Execute(ctx, mSQL)) statusMap := MustValue(tcx.Client.GetMap(ctx, migration.StatusMapName)) for _, f := range statusMapStateFiles { b := MustValue(os.ReadFile(f)) diff --git a/base/commands/migration/status_stages_it_test.go b/base/commands/migration/status_stages_it_test.go index 044afd1b4..715a5b649 100644 --- a/base/commands/migration/status_stages_it_test.go +++ b/base/commands/migration/status_stages_it_test.go @@ -67,7 +67,7 @@ func statusTest(t *testing.T) { time.Sleep(1 * time.Second) statusRunner(t, mID, tcx, ctx) wg.Wait() - tcx.AssertStdoutContains("OK Connected to the migration cluster.") + tcx.AssertStdoutContains("Connected to the migration cluster") tcx.WithReset(func() { f := fmt.Sprintf("migration_report_%s.txt", mID) require.Equal(t, true, fileExists(f)) From 8cde4fd9420f026bc3b60a9ae2384973df1e2c13 Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 12 Oct 2023 10:51:14 +0300 Subject: [PATCH 45/53] fix PR comment --- base/commands/migration/migration_stages.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/commands/migration/migration_stages.go b/base/commands/migration/migration_stages.go index a1524954b..fdd8e69a3 100644 --- a/base/commands/migration/migration_stages.go +++ b/base/commands/migration/migration_stages.go @@ -19,7 +19,7 @@ import ( ) var timeoutErr = fmt.Errorf("migration could not be completed: reached timeout while reading status: "+ - "please ensure that you are using Hazelcast's migration cluster distribution and your DMT config points to that cluster: %w", + "please ensure that you are using Hazelcast's migration cluster distribution and your DMT configuration points to that cluster: %w", context.DeadlineExceeded) func createMigrationStages(ctx context.Context, ec plug.ExecContext, ci *hazelcast.ClientInternal, migrationID string) ([]stage.Stage[any], error) { From ccfb42509560520de525e08f53962d059462f23b Mon Sep 17 00:00:00 2001 From: kmetin Date: Mon, 16 Oct 2023 10:09:54 +0300 Subject: [PATCH 46/53] fix Serkan's comments --- .github/workflows/test-all.yaml | 2 +- Makefile | 3 +++ base/commands/migration/migration_stages.go | 22 ++++++++++++++------- base/maps/maps.go | 3 +-- 4 files changed, 20 insertions(+), 10 deletions(-) diff --git a/.github/workflows/test-all.yaml b/.github/workflows/test-all.yaml index 1ecb75df5..d66fd8428 100644 --- a/.github/workflows/test-all.yaml +++ b/.github/workflows/test-all.yaml @@ -70,4 +70,4 @@ jobs: - name: "Run All Tests" run: | - make test + make test-dmt diff --git a/Makefile b/Makefile index fdff565d3..72f527b92 100644 --- a/Makefile +++ b/Makefile @@ -25,6 +25,9 @@ build-dmt: test: go test -tags base,std,hazelcastinternal,hazelcastinternaltest -p 1 $(TEST_FLAGS) ./... +test-dmt: + go test -tags base,migration,config,home,version,hazelcastinternal,hazelcastinternaltest -p 1 $(TEST_FLAGS) ./... + test-cover: go test -tags base,std,hazelcastinternal,hazelcastinternaltest -p 1 $(TEST_FLAGS) -coverprofile=coverage.out -coverpkg $(PACKAGES) -coverprofile=$(COVERAGE_OUT) ./... diff --git a/base/commands/migration/migration_stages.go b/base/commands/migration/migration_stages.go index fdd8e69a3..9cfd5e4b7 100644 --- a/base/commands/migration/migration_stages.go +++ b/base/commands/migration/migration_stages.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "os" + "path/filepath" "strings" "time" @@ -22,10 +23,12 @@ var timeoutErr = fmt.Errorf("migration could not be completed: reached timeout w "please ensure that you are using Hazelcast's migration cluster distribution and your DMT configuration points to that cluster: %w", context.DeadlineExceeded) +var migrationStatusNotFoundErr = fmt.Errorf("migration status not found") + func createMigrationStages(ctx context.Context, ec plug.ExecContext, ci *hazelcast.ClientInternal, migrationID string) ([]stage.Stage[any], error) { childCtx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - if err := waitForMigrationToBeCreated(childCtx, ci, migrationID); err != nil { + if err := waitForMigrationToBeInProgress(childCtx, ci, migrationID); err != nil { return nil, fmt.Errorf("waiting migration to be created: %w", err) } var stages []stage.Stage[any] @@ -181,6 +184,9 @@ func saveReportToFile(ctx context.Context, ci *hazelcast.ClientInternal, migrati if err != nil { return err } + if report == "" { + return nil + } f, err := os.Create(fmt.Sprintf(fileName)) if err != nil { return err @@ -189,10 +195,13 @@ func saveReportToFile(ctx context.Context, ci *hazelcast.ClientInternal, migrati return os.WriteFile(fileName, []byte(report), 0600) } -func waitForMigrationToBeCreated(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) error { +func waitForMigrationToBeInProgress(ctx context.Context, ci *hazelcast.ClientInternal, migrationID string) error { for { status, err := fetchMigrationStatus(ctx, ci, migrationID) if err != nil { + if errors.Is(err, migrationStatusNotFoundErr) { + continue // migration status will not be available for a while, so we should wait for it + } return err } if Status(status) == StatusInProgress { @@ -244,6 +253,8 @@ func fetchMigrationStatus(ctx context.Context, ci *hazelcast.ClientInternal, mig return "", err } return m, nil + } else { + return "", migrationStatusNotFoundErr } return "", nil } @@ -304,11 +315,8 @@ func finalizeMigration(ctx context.Context, ec plug.ExecContext, ci *hazelcast.C if err != nil { return err } - var name string - if reportOutputDir == "" { - name = fmt.Sprintf("migration_report_%s.txt", migrationID) - } - err = saveReportToFile(ctx, ci, migrationID, name) + outFile := filepath.Join(reportOutputDir, fmt.Sprintf("migration_report_%s.txt", migrationID)) + err = saveReportToFile(ctx, ci, migrationID, outFile) if err != nil { return fmt.Errorf("saving report to file: %w", err) } diff --git a/base/maps/maps.go b/base/maps/maps.go index e85aeba63..7d065516a 100644 --- a/base/maps/maps.go +++ b/base/maps/maps.go @@ -4,7 +4,6 @@ import ( "context" "fmt" - "github.com/hazelcast/hazelcast-commandline-client/base/commands/object" "github.com/hazelcast/hazelcast-commandline-client/base/objects" "github.com/hazelcast/hazelcast-commandline-client/clc" "github.com/hazelcast/hazelcast-commandline-client/internal/output" @@ -19,7 +18,7 @@ func Indexes(ctx context.Context, ec plug.ExecContext, mapName string) error { if mapName != "" { mapNames = append(mapNames, mapName) } else { - maps, err := objects.GetAll(ctx, ec, object.Map, false) + maps, err := objects.GetAll(ctx, ec, "map", false) if err != nil { return err } From 2d916cf7895ecd78b179608756937c21e96f730e Mon Sep 17 00:00:00 2001 From: kmetin Date: Wed, 18 Oct 2023 22:57:56 +0300 Subject: [PATCH 47/53] fix Serkan's comment --- base/commands/migration/migration_stages.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/base/commands/migration/migration_stages.go b/base/commands/migration/migration_stages.go index 9cfd5e4b7..72f045977 100644 --- a/base/commands/migration/migration_stages.go +++ b/base/commands/migration/migration_stages.go @@ -158,6 +158,8 @@ func dataStructuresToBeMigrated(ctx context.Context, ec plug.ExecContext, migrat Type: m.Type, }) } + } else { + return nil, fmt.Errorf("no datastructures found to migrate") } return dss, nil } @@ -280,6 +282,8 @@ func fetchMigrationReport(ctx context.Context, ci *hazelcast.ClientInternal, mig return "", err } return m, nil + } else { + return "", fmt.Errorf("migration report cannot be found") } return "", nil } From 0c7028cdae9d4dd0c53fd5a322d35f0b0c3e9fef Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 19 Oct 2023 16:07:12 +0300 Subject: [PATCH 48/53] fix Yuce's comments 1 --- base/commands/migration/migration_start.go | 5 ++++- base/commands/migration/start_stages.go | 7 ++++--- base/commands/migration/status_stages.go | 5 ++--- base/commands/migration/status_stages_it_test.go | 2 +- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/base/commands/migration/migration_start.go b/base/commands/migration/migration_start.go index 5b430641e..68c5bc033 100644 --- a/base/commands/migration/migration_start.go +++ b/base/commands/migration/migration_start.go @@ -54,7 +54,10 @@ Selected data structures in the source cluster will be migrated to the target cl } ec.PrintlnUnnecessary("") mID := MakeMigrationID() - sts := NewStartStages(ec.Logger(), mID, ec.GetStringArg(argDMTConfig)) + sts, err := NewStartStages(ec.Logger(), mID, ec.GetStringArg(argDMTConfig)) + if err != nil { + return err + } sp := stage.NewFixedProvider(sts.Build(ctx, ec)...) if _, err := stage.Execute(ctx, ec, any(nil), sp); err != nil { return err diff --git a/base/commands/migration/start_stages.go b/base/commands/migration/start_stages.go index ce3b089a8..d2d1842ad 100644 --- a/base/commands/migration/start_stages.go +++ b/base/commands/migration/start_stages.go @@ -5,6 +5,7 @@ package migration import ( "context" "encoding/json" + "errors" "fmt" "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" @@ -23,15 +24,15 @@ type StartStages struct { logger log.Logger } -func NewStartStages(logger log.Logger, migrationID, configDir string) *StartStages { +func NewStartStages(logger log.Logger, migrationID, configDir string) (*StartStages, error) { if migrationID == "" { - panic("migrationID is required") + return nil, errors.New("migrationID is required") } return &StartStages{ migrationID: migrationID, configDir: configDir, logger: logger, - } + }, nil } func (st *StartStages) Build(ctx context.Context, ec plug.ExecContext) []stage.Stage[any] { diff --git a/base/commands/migration/status_stages.go b/base/commands/migration/status_stages.go index e6d87fc5c..fbd29ef0f 100644 --- a/base/commands/migration/status_stages.go +++ b/base/commands/migration/status_stages.go @@ -54,12 +54,11 @@ func (st *StatusStages) connectStage(ec plug.ExecContext) func(context.Context, return nil, err } if len(all) == 0 { - return nil, fmt.Errorf("there are no migrations are in progress on migration cluster") + return nil, fmt.Errorf("there are no migrations in progress") } var mip MigrationInProgress m := all[0].(serialization.JSON) - err = json.Unmarshal(m, &mip) - if err != nil { + if err = json.Unmarshal(m, &mip); err != nil { return nil, fmt.Errorf("parsing migration in progress: %w", err) } st.statusMap, err = st.ci.Client().GetMap(ctx, StatusMapName) diff --git a/base/commands/migration/status_stages_it_test.go b/base/commands/migration/status_stages_it_test.go index 715a5b649..a5f7cd859 100644 --- a/base/commands/migration/status_stages_it_test.go +++ b/base/commands/migration/status_stages_it_test.go @@ -46,7 +46,7 @@ func noMigrationsStatusTest(t *testing.T) { execErr = tcx.CLC().Execute(ctx, "status") }) wg.Wait() - require.Contains(t, execErr.Error(), "there are no migrations are in progress on migration cluster") + require.Contains(t, execErr.Error(), "there are no migrations in progress") }) } From f50a1de3621a0e85a2f50804eca9223edc4fd15c Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 19 Oct 2023 16:49:19 +0300 Subject: [PATCH 49/53] fix Yuce's comments 2 --- .../commands/migration/start_stages_it_test.go | 18 +++--------------- .../migration/status_stages_it_test.go | 17 ++++++++--------- 2 files changed, 11 insertions(+), 24 deletions(-) diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go index 242e90de3..f93dc8e2f 100644 --- a/base/commands/migration/start_stages_it_test.go +++ b/base/commands/migration/start_stages_it_test.go @@ -67,23 +67,20 @@ func startMigrationTest(t *testing.T, expectedErr error, statusMapStateFiles []s defer wg.Done() execErr = tcx.CLC().Execute(ctx, "start", "dmt-config", "--yes") }) - c := make(chan string, 1) - wg.Add(1) + c := make(chan string) go findMigrationID(ctx, tcx, c) mID := <-c - wg.Done() wg.Add(1) go migrationRunner(ctx, tcx, mID, &wg, statusMapStateFiles) wg.Wait() if expectedErr == nil { require.Equal(t, nil, execErr) - } - if expectedErr != nil { + } else { require.Contains(t, execErr.Error(), expectedErr.Error()) } tcx.WithReset(func() { f := fmt.Sprintf("migration_report_%s.txt", mID) - require.Equal(t, true, fileExists(f)) + require.Equal(t, true, paths.Exists(f)) Must(os.Remove(f)) b := MustValue(os.ReadFile(paths.ResolveLogPath("test"))) for _, m := range ci.OrderedMembers() { @@ -122,12 +119,3 @@ func findMigrationID(ctx context.Context, tcx it.TestContext, c chan string) { } } } - -func fileExists(filename string) bool { - MustValue(os.Getwd()) - _, err := os.Stat(filename) - if os.IsNotExist(err) { - return false - } - return true -} diff --git a/base/commands/migration/status_stages_it_test.go b/base/commands/migration/status_stages_it_test.go index a5f7cd859..e4b42043e 100644 --- a/base/commands/migration/status_stages_it_test.go +++ b/base/commands/migration/status_stages_it_test.go @@ -14,6 +14,7 @@ import ( _ "github.com/hazelcast/hazelcast-commandline-client/base" _ "github.com/hazelcast/hazelcast-commandline-client/base/commands" "github.com/hazelcast/hazelcast-commandline-client/base/commands/migration" + "github.com/hazelcast/hazelcast-commandline-client/clc/paths" . "github.com/hazelcast/hazelcast-commandline-client/internal/check" "github.com/hazelcast/hazelcast-commandline-client/internal/it" hz "github.com/hazelcast/hazelcast-go-client" @@ -70,16 +71,14 @@ func statusTest(t *testing.T) { tcx.AssertStdoutContains("Connected to the migration cluster") tcx.WithReset(func() { f := fmt.Sprintf("migration_report_%s.txt", mID) - require.Equal(t, true, fileExists(f)) + require.Equal(t, true, paths.Exists(f)) Must(os.Remove(f)) - /* - b := MustValue(os.ReadFile(paths.DefaultLogPath(time.Now()))) - for _, m := range ci.OrderedMembers() { - require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log1", mID, m.UUID.String())) - require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log2", mID, m.UUID.String())) - require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log3", mID, m.UUID.String())) - } - */ + b := MustValue(os.ReadFile(paths.ResolveLogPath("test"))) + for _, m := range ci.OrderedMembers() { + require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log1", mID, m.UUID.String())) + require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log2", mID, m.UUID.String())) + require.Contains(t, string(b), fmt.Sprintf("[%s_%s] log3", mID, m.UUID.String())) + } }) }) } From db6348f191c5d907c13b6049dd891daec44761b5 Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 19 Oct 2023 18:38:55 +0300 Subject: [PATCH 50/53] try fixing tests --- .github/workflows/test-all.yaml | 2 +- Makefile | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test-all.yaml b/.github/workflows/test-all.yaml index d66fd8428..1ecb75df5 100644 --- a/.github/workflows/test-all.yaml +++ b/.github/workflows/test-all.yaml @@ -70,4 +70,4 @@ jobs: - name: "Run All Tests" run: | - make test-dmt + make test diff --git a/Makefile b/Makefile index 72f527b92..0ace23886 100644 --- a/Makefile +++ b/Makefile @@ -23,10 +23,7 @@ build-dmt: CGO_ENABLED=0 go build -tags base,migration,config,home,version,hazelcastinternal,hazelcastinternaltest -ldflags "$(LDFLAGS)" -o build/$(BINARY_NAME) ./cmd/clc test: - go test -tags base,std,hazelcastinternal,hazelcastinternaltest -p 1 $(TEST_FLAGS) ./... - -test-dmt: - go test -tags base,migration,config,home,version,hazelcastinternal,hazelcastinternaltest -p 1 $(TEST_FLAGS) ./... + go test -tags base,std,migration,config,home,version,hazelcastinternal,hazelcastinternaltest -p 1 $(TEST_FLAGS) ./... test-cover: go test -tags base,std,hazelcastinternal,hazelcastinternaltest -p 1 $(TEST_FLAGS) -coverprofile=coverage.out -coverpkg $(PACKAGES) -coverprofile=$(COVERAGE_OUT) ./... From d8a8917b8cc2158921ffc3af921540007fbc3dea Mon Sep 17 00:00:00 2001 From: kmetin Date: Thu, 19 Oct 2023 18:50:50 +0300 Subject: [PATCH 51/53] try fixing tests --- .github/workflows/test-all-386.yaml | 2 +- .github/workflows/test-all.yaml | 2 +- Makefile | 3 +++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-all-386.yaml b/.github/workflows/test-all-386.yaml index f839be0b3..c20513006 100644 --- a/.github/workflows/test-all-386.yaml +++ b/.github/workflows/test-all-386.yaml @@ -57,4 +57,4 @@ jobs: - name: "Run All Tests" run: | - GOARCH=386 make test TEST_FLAGS="-v -count 1 -timeout 30m" + GOARCH=386 make test-dmt TEST_FLAGS="-v -count 1 -timeout 30m" diff --git a/.github/workflows/test-all.yaml b/.github/workflows/test-all.yaml index 1ecb75df5..d66fd8428 100644 --- a/.github/workflows/test-all.yaml +++ b/.github/workflows/test-all.yaml @@ -70,4 +70,4 @@ jobs: - name: "Run All Tests" run: | - make test + make test-dmt diff --git a/Makefile b/Makefile index 0ace23886..71aaa40ee 100644 --- a/Makefile +++ b/Makefile @@ -23,6 +23,9 @@ build-dmt: CGO_ENABLED=0 go build -tags base,migration,config,home,version,hazelcastinternal,hazelcastinternaltest -ldflags "$(LDFLAGS)" -o build/$(BINARY_NAME) ./cmd/clc test: + go test -tags base,std,hazelcastinternal,hazelcastinternaltest -p 1 $(TEST_FLAGS) ./... + +test-dmt: go test -tags base,std,migration,config,home,version,hazelcastinternal,hazelcastinternaltest -p 1 $(TEST_FLAGS) ./... test-cover: From 4a9371d8712bc79ca6ccf69382301e6952401d83 Mon Sep 17 00:00:00 2001 From: kmetin Date: Fri, 20 Oct 2023 12:00:05 +0300 Subject: [PATCH 52/53] fix comments --- base/commands/migration/migration_start.go | 15 +++++---- base/commands/migration/migration_status.go | 13 ++++---- .../migration/status_stages_it_test.go | 32 +++++++++---------- 3 files changed, 31 insertions(+), 29 deletions(-) diff --git a/base/commands/migration/migration_start.go b/base/commands/migration/migration_start.go index 68c5bc033..843681998 100644 --- a/base/commands/migration/migration_start.go +++ b/base/commands/migration/migration_start.go @@ -4,7 +4,6 @@ package migration import ( "context" - "fmt" "github.com/hazelcast/hazelcast-commandline-client/clc" "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" @@ -31,7 +30,7 @@ func (StartCmd) Init(cc plug.InitContext) error { return nil } -func (StartCmd) Exec(ctx context.Context, ec plug.ExecContext) error { +func (StartCmd) Exec(ctx context.Context, ec plug.ExecContext) (err error) { ci, err := ec.ClientInternal(ctx) if err != nil { return err @@ -54,12 +53,18 @@ Selected data structures in the source cluster will be migrated to the target cl } ec.PrintlnUnnecessary("") mID := MakeMigrationID() + defer func() { + finalizeErr := finalizeMigration(ctx, ec, ci, mID, ec.Props().GetString(flagOutputDir)) + if err == nil { + err = finalizeErr + } + }() sts, err := NewStartStages(ec.Logger(), mID, ec.GetStringArg(argDMTConfig)) if err != nil { return err } sp := stage.NewFixedProvider(sts.Build(ctx, ec)...) - if _, err := stage.Execute(ctx, ec, any(nil), sp); err != nil { + if _, err = stage.Execute(ctx, ec, any(nil), sp); err != nil { return err } mStages, err := createMigrationStages(ctx, ec, ci, mID) @@ -68,10 +73,6 @@ Selected data structures in the source cluster will be migrated to the target cl } mp := stage.NewFixedProvider(mStages...) _, err = stage.Execute(ctx, ec, any(nil), mp) - err2 := finalizeMigration(ctx, ec, ci, mID, ec.Props().GetString(flagOutputDir)) - if err2 != nil { - return fmt.Errorf("finalizing migration: %w", err2) - } if err != nil { return err } diff --git a/base/commands/migration/migration_status.go b/base/commands/migration/migration_status.go index 84f6782ef..c9484a0b2 100644 --- a/base/commands/migration/migration_status.go +++ b/base/commands/migration/migration_status.go @@ -4,7 +4,6 @@ package migration import ( "context" - "fmt" "github.com/hazelcast/hazelcast-commandline-client/clc/ux/stage" "github.com/hazelcast/hazelcast-commandline-client/internal/check" @@ -24,7 +23,7 @@ func (s StatusCmd) Init(cc plug.InitContext) error { return nil } -func (s StatusCmd) Exec(ctx context.Context, ec plug.ExecContext) error { +func (s StatusCmd) Exec(ctx context.Context, ec plug.ExecContext) (err error) { ci, err := ec.ClientInternal(ctx) if err != nil { return err @@ -37,16 +36,18 @@ func (s StatusCmd) Exec(ctx context.Context, ec plug.ExecContext) error { if err != nil { return err } + defer func() { + finalizeErr := finalizeMigration(ctx, ec, ci, mID.(string), ec.Props().GetString(flagOutputDir)) + if err == nil { + err = finalizeErr + } + }() mStages, err := createMigrationStages(ctx, ec, ci, mID.(string)) if err != nil { return err } mp := stage.NewFixedProvider(mStages...) _, err = stage.Execute(ctx, ec, any(nil), mp) - err2 := finalizeMigration(ctx, ec, ci, mID.(string), ec.Props().GetString(flagOutputDir)) - if err2 != nil { - return fmt.Errorf("finalizing migration: %w", err2) - } if err != nil { return err } diff --git a/base/commands/migration/status_stages_it_test.go b/base/commands/migration/status_stages_it_test.go index e4b42043e..0a780c661 100644 --- a/base/commands/migration/status_stages_it_test.go +++ b/base/commands/migration/status_stages_it_test.go @@ -56,21 +56,24 @@ func statusTest(t *testing.T) { ctx := context.Background() tcx.Tester(func(tcx it.TestContext) { ci := hz.NewClientInternal(tcx.Client) - mID := preStatusRunner(t, tcx, ctx, ci) + progressList := MustValue(tcx.Client.GetList(ctx, migration.MigrationsInProgressList)) + mID := preStatusRunner(t, tcx, ctx, ci, progressList) + defer postStatusRunner(ctx, mID, progressList) defer removeMembersLogs(ctx, ci) + outDir := MustValue(os.MkdirTemp("", "clc-")) var wg sync.WaitGroup wg.Add(1) go tcx.WithReset(func() { defer wg.Done() - Must(tcx.CLC().Execute(ctx, "status")) + Must(tcx.CLC().Execute(ctx, "status", "-o", outDir)) }) // statusRunner removes __datamigrations_in_progress list, so we should give some time to command to read it first time.Sleep(1 * time.Second) - statusRunner(t, mID, tcx, ctx) + statusRunner(mID, tcx, ctx) wg.Wait() tcx.AssertStdoutContains("Connected to the migration cluster") tcx.WithReset(func() { - f := fmt.Sprintf("migration_report_%s.txt", mID) + f := paths.Join(outDir, fmt.Sprintf("migration_report_%s.txt", mID)) require.Equal(t, true, paths.Exists(f)) Must(os.Remove(f)) b := MustValue(os.ReadFile(paths.ResolveLogPath("test"))) @@ -83,15 +86,12 @@ func statusTest(t *testing.T) { }) } -func preStatusRunner(t *testing.T, tcx it.TestContext, ctx context.Context, ci *hz.ClientInternal) string { +func preStatusRunner(t *testing.T, tcx it.TestContext, ctx context.Context, ci *hz.ClientInternal, progressList *hz.List) string { createMapping(ctx, tcx) createMemberLogs(t, ctx, ci) mID := migration.MakeMigrationID() - l := MustValue(tcx.Client.GetList(ctx, migration.MigrationsInProgressList)) - m := MustValue(json.Marshal(migration.MigrationInProgress{ - MigrationID: mID, - })) - require.Equal(t, true, MustValue(l.Add(ctx, serialization.JSON(m)))) + m := MustValue(json.Marshal(migration.MigrationInProgress{MigrationID: mID})) + require.Equal(t, true, MustValue(progressList.Add(ctx, serialization.JSON(m)))) statusMap := MustValue(tcx.Client.GetMap(ctx, migration.StatusMapName)) b := MustValue(os.ReadFile("testdata/start/migration_success_initial.json")) Must(statusMap.Set(ctx, mID, serialization.JSON(b))) @@ -114,13 +114,13 @@ func removeMembersLogs(ctx context.Context, ci *hz.ClientInternal) { } } -func statusRunner(t *testing.T, migrationID string, tcx it.TestContext, ctx context.Context) { +func statusRunner(migrationID string, tcx it.TestContext, ctx context.Context) { statusMap := MustValue(tcx.Client.GetMap(ctx, migration.StatusMapName)) b := MustValue(os.ReadFile("testdata/start/migration_success_completed.json")) Must(statusMap.Set(ctx, migrationID, serialization.JSON(b))) - l := MustValue(tcx.Client.GetList(ctx, migration.MigrationsInProgressList)) - m := MustValue(json.Marshal(migration.MigrationInProgress{ - MigrationID: migrationID, - })) - require.Equal(t, true, MustValue(l.Remove(ctx, serialization.JSON(m)))) +} + +func postStatusRunner(ctx context.Context, migrationID string, progressList *hz.List) { + m := MustValue(json.Marshal(migration.MigrationInProgress{MigrationID: migrationID})) + MustValue(progressList.Remove(ctx, serialization.JSON(m))) } From 1037acb578c5379213a301c084ed50778296a8a2 Mon Sep 17 00:00:00 2001 From: kmetin Date: Fri, 20 Oct 2023 12:00:47 +0300 Subject: [PATCH 53/53] fix comments --- base/commands/migration/start_stages_it_test.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/base/commands/migration/start_stages_it_test.go b/base/commands/migration/start_stages_it_test.go index f93dc8e2f..568172ecb 100644 --- a/base/commands/migration/start_stages_it_test.go +++ b/base/commands/migration/start_stages_it_test.go @@ -60,18 +60,19 @@ func startMigrationTest(t *testing.T, expectedErr error, statusMapStateFiles []s createMapping(ctx, tcx) createMemberLogs(t, ctx, ci) defer removeMembersLogs(ctx, ci) + outDir := MustValue(os.MkdirTemp("", "clc-")) var wg sync.WaitGroup wg.Add(1) var execErr error go tcx.WithReset(func() { defer wg.Done() - execErr = tcx.CLC().Execute(ctx, "start", "dmt-config", "--yes") + execErr = tcx.CLC().Execute(ctx, "start", "dmt-config", "--yes", "-o", outDir) }) c := make(chan string) go findMigrationID(ctx, tcx, c) mID := <-c wg.Add(1) - go migrationRunner(ctx, tcx, mID, &wg, statusMapStateFiles) + go migrationRunner(t, ctx, tcx, mID, &wg, statusMapStateFiles) wg.Wait() if expectedErr == nil { require.Equal(t, nil, execErr) @@ -79,7 +80,7 @@ func startMigrationTest(t *testing.T, expectedErr error, statusMapStateFiles []s require.Contains(t, execErr.Error(), expectedErr.Error()) } tcx.WithReset(func() { - f := fmt.Sprintf("migration_report_%s.txt", mID) + f := paths.Join(outDir, fmt.Sprintf("migration_report_%s.txt", mID)) require.Equal(t, true, paths.Exists(f)) Must(os.Remove(f)) b := MustValue(os.ReadFile(paths.ResolveLogPath("test"))) @@ -92,12 +93,13 @@ func startMigrationTest(t *testing.T, expectedErr error, statusMapStateFiles []s }) } -func migrationRunner(ctx context.Context, tcx it.TestContext, migrationID string, wg *sync.WaitGroup, statusMapStateFiles []string) { +func migrationRunner(t *testing.T, ctx context.Context, tcx it.TestContext, migrationID string, wg *sync.WaitGroup, statusMapStateFiles []string) { statusMap := MustValue(tcx.Client.GetMap(ctx, migration.StatusMapName)) for _, f := range statusMapStateFiles { b := MustValue(os.ReadFile(f)) - Must(statusMap.Set(ctx, migrationID, serialization.JSON(b))) - time.Sleep(2 * time.Second) + it.Eventually(t, func() bool { + return statusMap.Set(ctx, migrationID, serialization.JSON(b)) == nil + }) } wg.Done() }