diff --git a/cmd/microcloud/add.go b/cmd/microcloud/add.go index 955f82e92..4eaf6e459 100644 --- a/cmd/microcloud/add.go +++ b/cmd/microcloud/add.go @@ -17,7 +17,6 @@ import ( type cmdAdd struct { common *CmdControl - flagAutoSetup bool flagWipe bool flagPreseed bool flagLookupTimeout int64 @@ -30,7 +29,6 @@ func (c *cmdAdd) Command() *cobra.Command { RunE: c.Run, } - cmd.Flags().BoolVar(&c.flagAutoSetup, "auto", false, "Automatic setup with default configuration") cmd.Flags().BoolVar(&c.flagWipe, "wipe", false, "Wipe disks to add to MicroCeph") cmd.Flags().BoolVar(&c.flagPreseed, "preseed", false, "Expect Preseed YAML for configuring MicroCloud in stdin") cmd.Flags().Int64Var(&c.flagLookupTimeout, "lookup-timeout", 0, "Amount of seconds to wait for systems to show up. Defaults: 60s for interactive, 5s for automatic and preseed") @@ -46,7 +44,6 @@ func (c *cmdAdd) Run(cmd *cobra.Command, args []string) error { cfg := initConfig{ bootstrap: false, setupMany: true, - autoSetup: c.flagAutoSetup, wipeAllDisks: c.flagWipe, common: c.common, asker: &c.common.asker, @@ -57,7 +54,7 @@ func (c *cmdAdd) Run(cmd *cobra.Command, args []string) error { cfg.lookupTimeout = DefaultLookupTimeout if c.flagLookupTimeout > 0 { cfg.lookupTimeout = time.Duration(c.flagLookupTimeout) * time.Second - } else if c.flagAutoSetup || c.flagPreseed { + } else if c.flagPreseed { cfg.lookupTimeout = DefaultAutoLookupTimeout } diff --git a/cmd/microcloud/ask.go b/cmd/microcloud/ask.go index 2403b4165..2aa92cd66 100644 --- a/cmd/microcloud/ask.go +++ b/cmd/microcloud/ask.go @@ -53,7 +53,7 @@ func (c *initConfig) askUpdateProfile(profile api.ProfilesPost, profiles []strin } } - if !c.autoSetup && len(askConflictingConfig) > 0 || len(askConflictingDevices) > 0 { + if len(askConflictingConfig) > 0 || len(askConflictingDevices) > 0 { replace, err := c.asker.AskBool("Replace existing default profile configuration? (yes/no) [default=no]: ", "no") if err != nil { return nil, err @@ -83,11 +83,9 @@ func (c *initConfig) askRetry(question string, f func() error) error { if err != nil { fmt.Println(err) - if !c.autoSetup { - retry, err = c.asker.AskBool(fmt.Sprintf("%s (yes/no) [default=yes]: ", question), "yes") - if err != nil { - return err - } + retry, err = c.asker.AskBool(fmt.Sprintf("%s (yes/no) [default=yes]: ", question), "yes") + if err != nil { + return err } } @@ -109,6 +107,8 @@ func (c *initConfig) askMissingServices(services []types.ServiceType, stateDirs if len(missingServices) > 0 { serviceStr := strings.Join(missingServices, ", ") + + // Ignore missing services in case of preseed. if !c.autoSetup { confirm, err := c.asker.AskBool(fmt.Sprintf("%s not found. Continue anyway? (yes/no) [default=yes]: ", serviceStr), "yes") if err != nil { @@ -278,13 +278,6 @@ func (c *initConfig) askLocalPool(sh *service.Handler) error { data := [][]string{} selectedDisks := map[string]string{} for peer, disks := range availableDisks { - // In auto mode, if there's no spare disk, then we can't add a remote storage pool, so skip local pool creation. - if c.autoSetup && len(disks) < 2 { - logger.Infof("Skipping local storage pool creation, peer %q has too few disks", peer) - - return nil - } - sortedDisks := []api.ResourcesStorageDisk{} for _, disk := range disks { sortedDisks = append(sortedDisks, disk) @@ -297,24 +290,12 @@ func (c *initConfig) askLocalPool(sh *service.Handler) error { for _, disk := range sortedDisks { devicePath := parseDiskPath(disk) data = append(data, []string{peer, disk.Model, units.GetByteSizeStringIEC(int64(disk.Size), 2), disk.Type, devicePath}) - - // Add the first disk for each peer. - if c.autoSetup { - _, ok := selectedDisks[peer] - if !ok { - selectedDisks[peer] = devicePath - } - } } } - var err error - wantsDisks := true - if !c.autoSetup { - wantsDisks, err = c.asker.AskBool("Would you like to set up local storage? (yes/no) [default=yes]: ", "yes") - if err != nil { - return err - } + wantsDisks, err := c.asker.AskBool("Would you like to set up local storage? (yes/no) [default=yes]: ", "yes") + if err != nil { + return err } if !wantsDisks { @@ -328,69 +309,67 @@ func (c *initConfig) askLocalPool(sh *service.Handler) error { return fmt.Errorf("Failed to check for source.wipe extension: %w", err) } - if !c.autoSetup { - err := c.askRetry("Retry selecting disks?", func() error { - selected := map[string]string{} - sort.Sort(cli.SortColumnsNaturally(data)) - header := []string{"LOCATION", "MODEL", "CAPACITY", "TYPE", "PATH"} - table := NewSelectableTable(header, data) - fmt.Println("Select exactly one disk from each cluster member:") - err := table.Render(table.rows) - if err != nil { - return err - } + err = c.askRetry("Retry selecting disks?", func() error { + selected := map[string]string{} + sort.Sort(cli.SortColumnsNaturally(data)) + header := []string{"LOCATION", "MODEL", "CAPACITY", "TYPE", "PATH"} + table := NewSelectableTable(header, data) + fmt.Println("Select exactly one disk from each cluster member:") + err := table.Render(table.rows) + if err != nil { + return err + } - selectedRows, err := table.GetSelections() - if err != nil { - return fmt.Errorf("Failed to confirm local LXD disk selection: %w", err) - } + selectedRows, err := table.GetSelections() + if err != nil { + return fmt.Errorf("Failed to confirm local LXD disk selection: %w", err) + } + + if len(selectedRows) == 0 { + return fmt.Errorf("No disks selected") + } + + for _, entry := range selectedRows { + target := table.SelectionValue(entry, "LOCATION") + path := table.SelectionValue(entry, "PATH") - if len(selectedRows) == 0 { - return fmt.Errorf("No disks selected") + _, ok := selected[target] + if ok { + return fmt.Errorf("Failed to add local storage pool: Selected more than one disk for target peer %q", target) } - for _, entry := range selectedRows { - target := table.SelectionValue(entry, "LOCATION") - path := table.SelectionValue(entry, "PATH") + selected[target] = path + } - _, ok := selected[target] - if ok { - return fmt.Errorf("Failed to add local storage pool: Selected more than one disk for target peer %q", target) - } + if len(selected) != len(askSystems) { + return fmt.Errorf("Failed to add local storage pool: Some peers don't have an available disk") + } - selected[target] = path + if !c.wipeAllDisks && wipeable { + fmt.Println("Select which disks to wipe:") + err := table.Render(selectedRows) + if err != nil { + return err } - if len(selected) != len(askSystems) { - return fmt.Errorf("Failed to add local storage pool: Some peers don't have an available disk") + wipeRows, err := table.GetSelections() + if err != nil { + return fmt.Errorf("Failed to confirm which disks to wipe: %w", err) } - if !c.wipeAllDisks && wipeable { - fmt.Println("Select which disks to wipe:") - err := table.Render(selectedRows) - if err != nil { - return err - } - - wipeRows, err := table.GetSelections() - if err != nil { - return fmt.Errorf("Failed to confirm which disks to wipe: %w", err) - } - - for _, entry := range wipeRows { - target := table.SelectionValue(entry, "LOCATION") - path := table.SelectionValue(entry, "PATH") - toWipe[target] = path - } + for _, entry := range wipeRows { + target := table.SelectionValue(entry, "LOCATION") + path := table.SelectionValue(entry, "PATH") + toWipe[target] = path } + } - selectedDisks = selected + selectedDisks = selected - return nil - }) - if err != nil { - return err - } + return nil + }) + if err != nil { + return err } if len(selectedDisks) == 0 { @@ -603,21 +582,17 @@ func (c *initConfig) askRemotePool(sh *service.Handler) error { return nil } - var err error - wantsDisks := true - if !c.autoSetup { - wantsDisks, err = c.asker.AskBool("Would you like to set up distributed storage? (yes/no) [default=yes]: ", "yes") + wantsDisks, err := c.asker.AskBool("Would you like to set up distributed storage? (yes/no) [default=yes]: ", "yes") + if err != nil { + return err + } + + // Ask if the user is okay with fully remote ceph on some systems. + if len(askSystemsRemote) != availableDiskCount && wantsDisks { + wantsDisks, err = c.asker.AskBool("Unable to find disks on some systems. Continue anyway? (yes/no) [default=yes]: ", "yes") if err != nil { return err } - - // Ask if the user is okay with fully remote ceph on some systems. - if len(askSystemsRemote) != availableDiskCount && wantsDisks { - wantsDisks, err = c.asker.AskBool("Unable to find disks on some systems. Continue anyway? (yes/no) [default=yes]: ", "yes") - if err != nil { - return err - } - } } if !wantsDisks { @@ -663,30 +638,28 @@ func (c *initConfig) askRemotePool(sh *service.Handler) error { return nil } - if !c.autoSetup { - fmt.Println("Select from the available unpartitioned disks:") - err := table.Render(table.rows) + fmt.Println("Select from the available unpartitioned disks:") + err := table.Render(table.rows) + if err != nil { + return err + } + + selected, err = table.GetSelections() + if err != nil { + return fmt.Errorf("Invalid disk configuration: %w", err) + } + + if len(selected) > 0 && !c.wipeAllDisks { + fmt.Println("Select which disks to wipe:") + err := table.Render(selected) if err != nil { return err } - selected, err = table.GetSelections() + toWipe, err = table.GetSelections() if err != nil { return fmt.Errorf("Invalid disk configuration: %w", err) } - - if len(selected) > 0 && !c.wipeAllDisks { - fmt.Println("Select which disks to wipe:") - err := table.Render(selected) - if err != nil { - return err - } - - toWipe, err = table.GetSelections() - if err != nil { - return fmt.Errorf("Invalid disk configuration: %w", err) - } - } } targetDisks := map[string][]string{} @@ -700,11 +673,6 @@ func (c *initConfig) askRemotePool(sh *service.Handler) error { targetDisks[target] = append(targetDisks[target], path) } - insufficientDisks := !useJoinConfigRemote && len(targetDisks) < RecommendedOSDHosts - if c.autoSetup && insufficientDisks { - return fmt.Errorf("Unable to add remote storage pool: At least %d peers must have allocated disks", RecommendedOSDHosts) - } - wipeDisks = map[string]map[string]bool{} for _, entry := range toWipe { target := table.SelectionValue(entry, "LOCATION") @@ -722,6 +690,8 @@ func (c *initConfig) askRemotePool(sh *service.Handler) error { return fmt.Errorf("No disks were selected") } + insufficientDisks := !useJoinConfigRemote && len(targetDisks) < RecommendedOSDHosts + if insufficientDisks { // This error will be printed to STDOUT as a normal message, so it includes a new-line for readability. return fmt.Errorf("Disk configuration does not meet recommendations for fault tolerance. At least %d systems must supply disks.\nContinuing with this configuration will leave MicroCloud susceptible to data loss", RecommendedOSDHosts) @@ -749,7 +719,7 @@ func (c *initConfig) askRemotePool(sh *service.Handler) error { } encryptDisks := c.encryptAllDisks - if !c.autoSetup && !c.encryptAllDisks && len(selectedDisks) > 0 { + if !c.encryptAllDisks && len(selectedDisks) > 0 { var err error encryptDisks, err = c.asker.AskBool("Do you want to encrypt the selected disks? (yes/no) [default=no]: ", "no") if err != nil { @@ -760,19 +730,17 @@ func (c *initConfig) askRemotePool(sh *service.Handler) error { // If a cephfs pool has already been set up, we will extend it automatically, so no need to ask the question. setupCephFS := useJoinConfigRemoteFS if !useJoinConfigRemoteFS { - if !c.autoSetup { - lxd := sh.Services[types.LXD].(*service.LXDService) - ext := "storage_cephfs_create_missing" - hasCephFS, err := lxd.HasExtension(context.Background(), lxd.Name(), lxd.Address(), "", ext) - if err != nil { - return fmt.Errorf("Failed to check for the %q LXD API extension: %w", ext, err) - } + lxd := sh.Services[types.LXD].(*service.LXDService) + ext := "storage_cephfs_create_missing" + hasCephFS, err := lxd.HasExtension(context.Background(), lxd.Name(), lxd.Address(), "", ext) + if err != nil { + return fmt.Errorf("Failed to check for the %q LXD API extension: %w", ext, err) + } - if hasCephFS { - setupCephFS, err = c.asker.AskBool("Would you like to set up CephFS remote storage? (yes/no) [default=yes]: ", "yes") - if err != nil { - return err - } + if hasCephFS { + setupCephFS, err = c.asker.AskBool("Would you like to set up CephFS remote storage? (yes/no) [default=yes]: ", "yes") + if err != nil { + return err } } } @@ -882,7 +850,7 @@ func (c *initConfig) askRemotePool(sh *service.Handler) error { } func (c *initConfig) askOVNNetwork(sh *service.Handler) error { - if c.autoSetup || sh.Services[types.MicroOVN] == nil { + if sh.Services[types.MicroOVN] == nil { return nil } @@ -1247,11 +1215,6 @@ func (c *initConfig) askNetwork(sh *service.Handler) error { return err } - if !supportsFAN && c.autoSetup { - logger.Warn("Skipping FAN network setup, some systems don't support it") - return nil - } - if !supportsFAN { proceedWithNoOverlayNetworking, err := c.asker.AskBool("FAN networking is not usable. Do you want to proceed with setting up an inoperable cluster? (yes/no) [default=no]: ", "no") if err != nil { @@ -1299,10 +1262,6 @@ func (c *initConfig) askNetwork(sh *service.Handler) error { } func (c *initConfig) askCephNetwork(sh *service.Handler) error { - if c.autoSetup { - return nil - } - availableCephNetworkInterfaces := map[string]map[string]service.DedicatedInterface{} for name, state := range c.state { if len(state.AvailableCephInterfaces) == 0 { @@ -1391,10 +1350,6 @@ func (c *initConfig) askClustered(s *service.Handler, expectedServices []types.S } if info.ServiceClustered(serviceType) { - if c.autoSetup { - return fmt.Errorf("%s is already clustered on %q, aborting setup", serviceType, info.ClusterName) - } - question := fmt.Sprintf("%q is already part of a %s cluster. Do you want to add this cluster to Microcloud? (add/skip) [default=add]", info.ClusterName, serviceType) validator := func(s string) error { if !shared.ValueInSlice(s, []string{"add", "skip"}) { diff --git a/cmd/microcloud/main_init.go b/cmd/microcloud/main_init.go index 25d8d1613..fc88ba2de 100644 --- a/cmd/microcloud/main_init.go +++ b/cmd/microcloud/main_init.go @@ -112,7 +112,6 @@ type initConfig struct { type cmdInit struct { common *CmdControl - flagAutoSetup bool flagLookupTimeout int64 flagWipeAllDisks bool flagEncryptAllDisks bool @@ -128,7 +127,6 @@ func (c *cmdInit) Command() *cobra.Command { RunE: c.Run, } - cmd.Flags().BoolVar(&c.flagAutoSetup, "auto", false, "Automatic setup with default configuration") cmd.Flags().BoolVar(&c.flagWipeAllDisks, "wipe", false, "Wipe disks to add to MicroCeph") cmd.Flags().BoolVar(&c.flagEncryptAllDisks, "encrypt", false, "Encrypt disks to add to MicroCeph") cmd.Flags().StringVar(&c.flagAddress, "address", "", "Address to use for MicroCloud") @@ -147,7 +145,6 @@ func (c *cmdInit) Run(cmd *cobra.Command, args []string) error { bootstrap: true, setupMany: true, address: c.flagAddress, - autoSetup: c.flagAutoSetup, wipeAllDisks: c.flagWipeAllDisks, encryptAllDisks: c.flagEncryptAllDisks, common: c.common, @@ -159,7 +156,7 @@ func (c *cmdInit) Run(cmd *cobra.Command, args []string) error { cfg.lookupTimeout = DefaultLookupTimeout if c.flagLookupTimeout > 0 { cfg.lookupTimeout = time.Duration(c.flagLookupTimeout) * time.Second - } else if c.flagAutoSetup || c.flagPreseed { + } else if c.flagPreseed { cfg.lookupTimeout = DefaultAutoLookupTimeout } @@ -183,11 +180,9 @@ func (c *initConfig) RunInteractive(cmd *cobra.Command, args []string) error { return err } - if !c.autoSetup { - c.setupMany, err = c.common.asker.AskBool("Do you want to set up more than one cluster member? (yes/no) [default=yes]: ", "yes") - if err != nil { - return err - } + c.setupMany, err = c.common.asker.AskBool("Do you want to set up more than one cluster member? (yes/no) [default=yes]: ", "yes") + if err != nil { + return err } err = c.askAddress() diff --git a/doc/how-to/add_machine.md b/doc/how-to/add_machine.md index 1c48ce028..55f112fab 100644 --- a/doc/how-to/add_machine.md +++ b/doc/how-to/add_machine.md @@ -6,5 +6,4 @@ If you want to add a machine to the MicroCloud cluster after the initialisation, sudo microcloud add Answer the prompts to add the machine. -Alternatively, you can add the `--auto` flag to accept the default configuration instead of an interactive setup. -You can also add the `--wipe` flag to automatically wipe any disks you add to the cluster. +You can add the `--wipe` flag to automatically wipe any disks you add to the cluster. diff --git a/test/includes/microcloud.sh b/test/includes/microcloud.sh index 38e396cb3..61f18c468 100644 --- a/test/includes/microcloud.sh +++ b/test/includes/microcloud.sh @@ -575,8 +575,9 @@ reset_snaps() { snap disable microceph > /dev/null 2>&1 || true # Kill any remaining processes. - if ps -e -o '%p %a' | grep -v grep | grep -qe 'ceph-' -qe 'microceph' ; then - kill -9 \$(ps -e -o '%p %a' | grep -e 'ceph-' -e 'microceph' | grep -v grep | awk '{print \$1}') || true + # Filter out the subshell too to not kill our own invocation as it shows as 'sh -c ...microceph...' in the process list. + if ps -e -o '%p %a' | grep -Ev '(grep|sh)' | grep -qe 'ceph-' -qe 'microceph' ; then + kill -9 \$(ps -e -o '%p %a' | grep -Ev '(grep|sh)' | grep -e 'ceph-' -e 'microceph' | awk '{print \$1}') || true fi # Remove modules to get rid of any kernel owned processes. @@ -602,8 +603,9 @@ reset_snaps() { snap disable microovn > /dev/null 2>&1 || true # Kill any remaining processes. - if ps -e -o '%p %a' | grep -v grep | grep -qe 'ovs-' -qe 'ovn-' -qe 'microovn' ; then - kill -9 \$(ps -e -o '%p %a' | grep -e 'ovs-' -e 'ovn-' -e 'microovn' | grep -v grep | awk '{print \$1}') || true + # Filter out the subshell too to not kill our own invocation as it shows as 'sh -c ...microovn...' in the process list. + if ps -e -o '%p %a' | grep -Ev '(grep|sh)' | grep -qe 'ovs-' -qe 'ovn-' -qe 'microovn' ; then + kill -9 \$(ps -e -o '%p %a' | grep -Ev '(grep|sh)' | grep -e 'ovs-' -e 'ovn-' -e 'microovn' | awk '{print \$1}') || true fi # Wipe the snap state so we can start fresh. diff --git a/test/main.sh b/test/main.sh index 8478604c6..ffd0d2d94 100755 --- a/test/main.sh +++ b/test/main.sh @@ -208,7 +208,6 @@ testbed_setup() { # test groups run_add_tests() { run_test test_add_interactive "add interactive" - run_test test_add_auto "add auto" } run_instances_tests() { @@ -219,7 +218,6 @@ run_instances_tests() { run_basic_tests() { run_test test_reuse_cluster "reuse_cluster" run_test test_add_services "add_services" - run_test test_auto "auto" run_test test_remove_cluster_member "remove_cluster_member" run_test test_non_ha "non_ha" } diff --git a/test/suites/add.sh b/test/suites/add.sh index 2ad5d299d..6524e8cf8 100644 --- a/test/suites/add.sh +++ b/test/suites/add.sh @@ -1,105 +1,5 @@ #!/bin/bash -test_add_auto() { - reset_systems 4 0 0 - - # Test with just LXD and MicroCloud, and no disks. - for m in micro01 micro02 micro03 ; do - lxc exec "${m}" -- snap disable microovn || true - lxc exec "${m}" -- snap disable microceph || true - done - - # Disable extra nodes so we don't add them yet. - for m in micro03 micro04 ; do - lxc exec "${m}" -- snap disable microcloud - done - - lxc exec micro01 -- sh -c "TEST_CONSOLE=0 microcloud init --auto --lookup-timeout 10 > out" - lxc exec micro01 -- tail -1 out | grep "MicroCloud is ready" -q - - # Re-enable the nodes. - for m in micro03 micro04 ; do - lxc exec "${m}" -- snap enable microcloud - lxc exec "${m}" -- snap start microcloud - done - - # Add the nodes. - lxc exec micro01 -- sh -c "TEST_CONSOLE=0 microcloud add --auto --lookup-timeout 10 > out" - lxc exec micro01 -- tail -1 out | grep "MicroCloud is ready" -q - - for m in micro01 micro02 micro03 micro04 ; do - validate_system_lxd "${m}" 4 - - # Supress the first message from LXD. - lxc exec "${m}" -- lxc list > /dev/null 2>&1 || true - - # Ensure we created no storage devices. - [ "$(lxc exec "${m}" -- lxc storage ls -f csv | wc -l)" = "0" ] - done - - # Test with all systems. - reset_systems 4 0 0 - - # Disable extra nodes so we don't add them yet. - for m in micro03 micro04 ; do - lxc exec "${m}" -- snap disable microcloud - done - - lxc exec micro01 -- sh -c "TEST_CONSOLE=0 microcloud init --auto --lookup-timeout 10 > out" - lxc exec micro01 -- tail -1 out | grep "MicroCloud is ready" -q - - # Re-enable the nodes. - for m in micro03 micro04 ; do - lxc exec "${m}" -- snap enable microcloud - lxc exec "${m}" -- snap start microcloud - done - - # Add the nodes. - lxc exec micro01 -- sh -c "TEST_CONSOLE=0 microcloud add --auto --lookup-timeout 10 > out" - lxc exec micro01 -- tail -1 out | grep "MicroCloud is ready" -q - - for m in micro01 micro02 micro03 micro04 ; do - validate_system_lxd "${m}" 4 - validate_system_microceph "${m}" - validate_system_microovn "${m}" - - # Supress the first message from LXD. - lxc exec "${m}" -- lxc list > /dev/null 2>&1 || true - - # Ensure we created no storage devices. - [ "$(lxc exec "${m}" -- lxc storage ls -f csv | wc -l)" = "0" ] - done - - # Test with ZFS and Ceph disks. - reset_systems 4 2 0 - - # Disable extra nodes so we don't add them yet. - # shellcheck disable=SC2043 - for m in micro04 ; do - lxc exec "${m}" -- snap disable microcloud - done - - lxc exec micro01 -- sh -c "TEST_CONSOLE=0 microcloud init --auto --lookup-timeout 10 > out" - lxc exec micro01 -- tail -1 out | grep "MicroCloud is ready" -q - - # Re-enable the nodes. - # shellcheck disable=SC2043 - for m in micro04 ; do - lxc exec "${m}" -- snap enable microcloud - lxc exec "${m}" -- snap start microcloud - done - - # Add the nodes. - lxc exec micro01 -- sh -c "TEST_CONSOLE=0 microcloud add --auto --lookup-timeout 10 > out" - lxc exec micro01 -- tail -1 out | grep "MicroCloud is ready" -q - - for m in micro01 micro02 micro03 micro04 ; do - validate_system_lxd "${m}" 4 disk1 1 0 - validate_system_microceph "${m}" 0 disk2 - validate_system_microovn "${m}" - done -} - test_add_interactive() { reset_systems 4 2 2 @@ -173,6 +73,15 @@ test_add_interactive() { reset_systems 4 2 1 echo "Test growing a MicroCloud with missing services" + unset_interactive_vars + export MULTI_NODE="yes" + export LOOKUP_IFACE="enp5s0" + export LIMIT_SUBNET="yes" + export SKIP_SERVICE="yes" + export EXPECT_PEERS=2 + export SETUP_ZFS="no" + export SETUP_CEPH="no" + export SETUP_OVN="no" # Disable optional services on the initial cluster only. for m in micro01 micro02 micro03 ; do @@ -181,7 +90,8 @@ test_add_interactive() { done lxc exec micro04 -- snap disable microcloud - lxc exec micro01 -- sh -c "TEST_CONSOLE=0 microcloud init --auto --lookup-timeout 10 > out" + + microcloud_interactive | lxc exec micro01 -- sh -c "microcloud init > out" lxc exec micro01 -- tail -1 out | grep "MicroCloud is ready" -q lxc exec micro04 -- snap enable microcloud lxc exec micro04 -- snap start microcloud diff --git a/test/suites/basic.sh b/test/suites/basic.sh index 7da15316b..6c925cf31 100644 --- a/test/suites/basic.sh +++ b/test/suites/basic.sh @@ -1029,132 +1029,6 @@ test_disk_mismatch() { validate_system_microceph "micro04" } -# Test automatic setup with a variety of devices. -test_auto() { - reset_systems 2 0 0 - - lxc exec micro02 -- snap stop microcloud - - echo MicroCloud auto setup without any peers. - ! lxc exec micro01 -- sh -c "TEST_CONSOLE=0 microcloud init --auto > out 2>&1" || false - lxc exec micro01 -- tail -1 out | grep -q "Error: Found no available systems" - - lxc exec micro02 -- snap start microcloud - - echo Auto-create a MicroCloud with 2 systems with no disks/interfaces. - lxc exec micro01 -- sh -c "TEST_CONSOLE=0 microcloud init --auto --lookup-timeout 10 > out" - for m in micro01 micro02 ; do - validate_system_lxd "${m}" 2 - validate_system_microceph "${m}" - validate_system_microovn "${m}" - - # Supress the first message from LXD. - lxc exec ${m} -- lxc list > /dev/null 2>&1 || true - - # Ensure we created no storage devices. - [ "$(lxc exec ${m} -- lxc storage ls -f csv | wc -l)" = "0" ] - done - - reset_systems 2 0 1 - - echo Auto-create a MicroCloud with 2 systems with 1 interface each. - lxc exec micro01 -- sh -c "TEST_CONSOLE=0 microcloud init --auto --lookup-timeout 10 > out" - for m in micro01 micro02 ; do - validate_system_lxd "${m}" 2 - validate_system_microceph "${m}" - validate_system_microovn "${m}" - - # Ensure we didn't create any other network devices. - ! lxc exec ${m} -- lxc network ls -f csv | grep -q "^default," || false - ! lxc exec ${m} -- lxc network ls -f csv | grep -q "^UPLINK," || false - - # Ensure we created no storage devices. - [ "$(lxc exec ${m} -- lxc storage ls -f csv | wc -l)" = "0" ] - done - - - reset_systems 2 3 1 - - echo Auto-create a MicroCloud with 2 systems with 3 disks and 1 interface each. - lxc exec micro01 -- sh -c "TEST_CONSOLE=0 microcloud init --auto --lookup-timeout 10 > out" - for m in micro01 micro02 ; do - validate_system_lxd "${m}" 2 disk1 - validate_system_microceph "${m}" - validate_system_microovn "${m}" - - # Ensure we didn't create any other network devices. - ! lxc exec ${m} -- lxc network ls -f csv | grep -q "^default," || false - ! lxc exec ${m} -- lxc network ls -f csv | grep -q "^UPLINK," || false - - # Ensure we created no ceph storage devices. - ! lxc exec ${m} -- lxc storage ls -f csv | grep -q "^remote,ceph" || false - done - - reset_systems 3 0 0 - - echo Auto-create a MicroCloud with 3 systems with no disks/interfaces. - lxc exec micro01 -- sh -c "TEST_CONSOLE=0 microcloud init --auto --lookup-timeout 10 > out" - for m in micro01 micro02 micro03 ; do - validate_system_lxd "${m}" 3 - validate_system_microceph "${m}" - validate_system_microovn "${m}" - - # Supress the first message from LXD. - lxc exec ${m} -- lxc list > /dev/null 2>&1 || true - - # Ensure we created no storage devices. - [ "$(lxc exec ${m} -- lxc storage ls -f csv | wc -l)" = "0" ] - done - - reset_systems 3 0 1 - - echo Auto-create a MicroCloud with 3 systems with 1 interface each. - lxc exec micro01 -- sh -c "TEST_CONSOLE=0 microcloud init --auto --lookup-timeout 10 > out" - for m in micro01 micro02 micro03; do - validate_system_lxd "${m}" 3 - validate_system_microceph "${m}" - validate_system_microovn "${m}" - - # Ensure we didn't create any other network devices. - ! lxc exec ${m} -- lxc network ls -f csv | grep -q "^default," || false - ! lxc exec ${m} -- lxc network ls -f csv | grep -q "^UPLINK," || false - - # Ensure we created no storage devices. - [ "$(lxc exec ${m} -- lxc storage ls -f csv | wc -l)" = "0" ] - done - - reset_systems 3 1 1 - - echo Auto-create a MicroCloud with 3 systems with 1 disk and 1 interface each. - lxc exec micro01 -- sh -c "TEST_CONSOLE=0 microcloud init --auto --lookup-timeout 10 > out" - for m in micro01 micro02 micro03; do - validate_system_lxd "${m}" 3 "" 1 0 - validate_system_microceph "${m}" 0 disk1 - validate_system_microovn "${m}" - - # Ensure we didn't create any other network devices. - ! lxc exec ${m} -- lxc network ls -f csv | grep -q "^default," || false - ! lxc exec ${m} -- lxc network ls -f csv | grep -q "^UPLINK," || false - - # Ensure we created no zfs storage devices. - ! lxc exec ${m} -- lxc storage ls -f csv | grep -q "^local,zfs" || false - done - - reset_systems 3 3 1 - - echo Auto-create a MicroCloud with 3 systems with 3 disks and 1 interface each. - lxc exec micro01 -- sh -c "TEST_CONSOLE=0 microcloud init --auto --lookup-timeout 10 > out" - for m in micro01 micro02 micro03 ; do - validate_system_lxd "${m}" 3 disk1 2 0 - validate_system_microceph "${m}" 0 disk2 disk3 - validate_system_microovn "${m}" - - # Ensure we didn't create any other network devices. - ! lxc exec ${m} -- lxc network ls -f csv | grep -q "^default," || false - ! lxc exec ${m} -- lxc network ls -f csv | grep -q "^UPLINK," || false - done -} - # services_validator: A basic validator of 3 systems with typical expected inputs. services_validator() { for m in micro01 micro02 micro03 ; do @@ -1250,12 +1124,6 @@ test_reuse_cluster() { validate_system_microceph micro04 1 reset_systems 3 3 3 - echo "Fail to create a MicroCloud due to an existing service if --auto specified" - lxc exec micro02 -- microceph cluster bootstrap - ! lxc exec micro01 -- sh -c "TEST_CONSOLE=0 microcloud init --auto > out" || true - - - echo "Fail to create a MicroCloud due to conflicting existing services" lxc exec micro03 -- microceph cluster bootstrap ! microcloud_interactive | lxc exec micro01 -- sh -c "microcloud init > out" || true diff --git a/test/suites/recover.sh b/test/suites/recover.sh index 2bb740c8b..12299c36e 100644 --- a/test/suites/recover.sh +++ b/test/suites/recover.sh @@ -5,7 +5,16 @@ test_recover() { systems=("micro01" "micro02" "micro03" "micro04") - lxc exec micro01 -- sh -c "TEST_CONSOLE=0 microcloud init --auto > out" + unset_interactive_vars + export MULTI_NODE="yes" + export LOOKUP_IFACE="enp5s0" + export LIMIT_SUBNET="yes" + export EXPECT_PEERS=3 + export SETUP_ZFS="no" + export SETUP_CEPH="no" + export SETUP_OVN="no" + + microcloud_interactive | lxc exec micro01 -- sh -c "microcloud init > out" for m in "${systems[@]}" ; do validate_system_lxd "${m}" 4 validate_system_microceph "${m}" @@ -13,7 +22,7 @@ test_recover() { done # MicroCluster takes a while to update the core_cluster_members table - while lxc exec micro01 -- microcloud cluster list -f csv | grep -q PENDING; do + while lxc exec micro01 --env "TEST_CONSOLE=0" -- microcloud cluster list -f csv | grep -q PENDING; do sleep 2 done @@ -21,7 +30,7 @@ test_recover() { lxc exec "${m}" -- sudo snap stop microcloud done - lxc exec micro01 -- microcloud cluster list --local -f yaml + lxc exec micro01 --env "TEST_CONSOLE=0" -- microcloud cluster list --local -f yaml lxc exec micro01 -- sh -c " TEST_CONSOLE=0 microcloud cluster list --local -f yaml | @@ -44,7 +53,7 @@ test_recover() { sleep 90 for m in micro01 micro02; do - cluster_list=$(lxc exec "${m}" -- microcloud cluster list -f csv) + cluster_list=$(lxc exec "${m}" --env "TEST_CONSOLE=0" -- microcloud cluster list -f csv) # assert_member_role(member_name, role) assert_member_role() {