Skip to content

Commit

Permalink
cmd/microcloud: Remove obsolete checks for autoSetup
Browse files Browse the repository at this point in the history
After removing the --auto flag there are some redundant and not required checks which also
have to be removed.

Some have to be kept for functions which are also called as part of running preseed.

Signed-off-by: Julian Pelizäus <[email protected]>
  • Loading branch information
roosterfish committed Sep 12, 2024
1 parent 7b58645 commit b2aaf48
Show file tree
Hide file tree
Showing 2 changed files with 97 additions and 144 deletions.
233 changes: 94 additions & 139 deletions cmd/microcloud/ask.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ func (c *initConfig) askUpdateProfile(profile api.ProfilesPost, profiles []strin
}
}

if !c.autoSetup && len(askConflictingConfig) > 0 || len(askConflictingDevices) > 0 {
if len(askConflictingConfig) > 0 || len(askConflictingDevices) > 0 {
replace, err := c.asker.AskBool("Replace existing default profile configuration? (yes/no) [default=no]: ", "no")
if err != nil {
return nil, err
Expand Down Expand Up @@ -83,11 +83,9 @@ func (c *initConfig) askRetry(question string, f func() error) error {
if err != nil {
fmt.Println(err)

if !c.autoSetup {
retry, err = c.asker.AskBool(fmt.Sprintf("%s (yes/no) [default=yes]: ", question), "yes")
if err != nil {
return err
}
retry, err = c.asker.AskBool(fmt.Sprintf("%s (yes/no) [default=yes]: ", question), "yes")
if err != nil {
return err
}
}

Expand All @@ -109,6 +107,8 @@ func (c *initConfig) askMissingServices(services []types.ServiceType, stateDirs

if len(missingServices) > 0 {
serviceStr := strings.Join(missingServices, ", ")

// Ignore missing services in case of preseed.
if !c.autoSetup {
confirm, err := c.asker.AskBool(fmt.Sprintf("%s not found. Continue anyway? (yes/no) [default=yes]: ", serviceStr), "yes")
if err != nil {
Expand Down Expand Up @@ -278,13 +278,6 @@ func (c *initConfig) askLocalPool(sh *service.Handler) error {
data := [][]string{}
selectedDisks := map[string]string{}
for peer, disks := range availableDisks {
// In auto mode, if there's no spare disk, then we can't add a remote storage pool, so skip local pool creation.
if c.autoSetup && len(disks) < 2 {
logger.Infof("Skipping local storage pool creation, peer %q has too few disks", peer)

return nil
}

sortedDisks := []api.ResourcesStorageDisk{}
for _, disk := range disks {
sortedDisks = append(sortedDisks, disk)
Expand All @@ -297,24 +290,12 @@ func (c *initConfig) askLocalPool(sh *service.Handler) error {
for _, disk := range sortedDisks {
devicePath := parseDiskPath(disk)
data = append(data, []string{peer, disk.Model, units.GetByteSizeStringIEC(int64(disk.Size), 2), disk.Type, devicePath})

// Add the first disk for each peer.
if c.autoSetup {
_, ok := selectedDisks[peer]
if !ok {
selectedDisks[peer] = devicePath
}
}
}
}

var err error
wantsDisks := true
if !c.autoSetup {
wantsDisks, err = c.asker.AskBool("Would you like to set up local storage? (yes/no) [default=yes]: ", "yes")
if err != nil {
return err
}
wantsDisks, err := c.asker.AskBool("Would you like to set up local storage? (yes/no) [default=yes]: ", "yes")
if err != nil {
return err
}

if !wantsDisks {
Expand All @@ -328,69 +309,67 @@ func (c *initConfig) askLocalPool(sh *service.Handler) error {
return fmt.Errorf("Failed to check for source.wipe extension: %w", err)
}

if !c.autoSetup {
err := c.askRetry("Retry selecting disks?", func() error {
selected := map[string]string{}
sort.Sort(cli.SortColumnsNaturally(data))
header := []string{"LOCATION", "MODEL", "CAPACITY", "TYPE", "PATH"}
table := NewSelectableTable(header, data)
fmt.Println("Select exactly one disk from each cluster member:")
err := table.Render(table.rows)
if err != nil {
return err
}
err = c.askRetry("Retry selecting disks?", func() error {
selected := map[string]string{}
sort.Sort(cli.SortColumnsNaturally(data))
header := []string{"LOCATION", "MODEL", "CAPACITY", "TYPE", "PATH"}
table := NewSelectableTable(header, data)
fmt.Println("Select exactly one disk from each cluster member:")
err := table.Render(table.rows)
if err != nil {
return err
}

selectedRows, err := table.GetSelections()
if err != nil {
return fmt.Errorf("Failed to confirm local LXD disk selection: %w", err)
}
selectedRows, err := table.GetSelections()
if err != nil {
return fmt.Errorf("Failed to confirm local LXD disk selection: %w", err)
}

if len(selectedRows) == 0 {
return fmt.Errorf("No disks selected")
}

for _, entry := range selectedRows {
target := table.SelectionValue(entry, "LOCATION")
path := table.SelectionValue(entry, "PATH")

if len(selectedRows) == 0 {
return fmt.Errorf("No disks selected")
_, ok := selected[target]
if ok {
return fmt.Errorf("Failed to add local storage pool: Selected more than one disk for target peer %q", target)
}

for _, entry := range selectedRows {
target := table.SelectionValue(entry, "LOCATION")
path := table.SelectionValue(entry, "PATH")
selected[target] = path
}

_, ok := selected[target]
if ok {
return fmt.Errorf("Failed to add local storage pool: Selected more than one disk for target peer %q", target)
}
if len(selected) != len(askSystems) {
return fmt.Errorf("Failed to add local storage pool: Some peers don't have an available disk")
}

selected[target] = path
if !c.wipeAllDisks && wipeable {
fmt.Println("Select which disks to wipe:")
err := table.Render(selectedRows)
if err != nil {
return err
}

if len(selected) != len(askSystems) {
return fmt.Errorf("Failed to add local storage pool: Some peers don't have an available disk")
wipeRows, err := table.GetSelections()
if err != nil {
return fmt.Errorf("Failed to confirm which disks to wipe: %w", err)
}

if !c.wipeAllDisks && wipeable {
fmt.Println("Select which disks to wipe:")
err := table.Render(selectedRows)
if err != nil {
return err
}

wipeRows, err := table.GetSelections()
if err != nil {
return fmt.Errorf("Failed to confirm which disks to wipe: %w", err)
}

for _, entry := range wipeRows {
target := table.SelectionValue(entry, "LOCATION")
path := table.SelectionValue(entry, "PATH")
toWipe[target] = path
}
for _, entry := range wipeRows {
target := table.SelectionValue(entry, "LOCATION")
path := table.SelectionValue(entry, "PATH")
toWipe[target] = path
}
}

selectedDisks = selected
selectedDisks = selected

return nil
})
if err != nil {
return err
}
return nil
})
if err != nil {
return err
}

if len(selectedDisks) == 0 {
Expand Down Expand Up @@ -603,21 +582,17 @@ func (c *initConfig) askRemotePool(sh *service.Handler) error {
return nil
}

var err error
wantsDisks := true
if !c.autoSetup {
wantsDisks, err = c.asker.AskBool("Would you like to set up distributed storage? (yes/no) [default=yes]: ", "yes")
wantsDisks, err := c.asker.AskBool("Would you like to set up distributed storage? (yes/no) [default=yes]: ", "yes")
if err != nil {
return err
}

// Ask if the user is okay with fully remote ceph on some systems.
if len(askSystemsRemote) != availableDiskCount && wantsDisks {
wantsDisks, err = c.asker.AskBool("Unable to find disks on some systems. Continue anyway? (yes/no) [default=yes]: ", "yes")
if err != nil {
return err
}

// Ask if the user is okay with fully remote ceph on some systems.
if len(askSystemsRemote) != availableDiskCount && wantsDisks {
wantsDisks, err = c.asker.AskBool("Unable to find disks on some systems. Continue anyway? (yes/no) [default=yes]: ", "yes")
if err != nil {
return err
}
}
}

if !wantsDisks {
Expand Down Expand Up @@ -663,30 +638,28 @@ func (c *initConfig) askRemotePool(sh *service.Handler) error {
return nil
}

if !c.autoSetup {
fmt.Println("Select from the available unpartitioned disks:")
err := table.Render(table.rows)
fmt.Println("Select from the available unpartitioned disks:")
err := table.Render(table.rows)
if err != nil {
return err
}

selected, err = table.GetSelections()
if err != nil {
return fmt.Errorf("Invalid disk configuration: %w", err)
}

if len(selected) > 0 && !c.wipeAllDisks {
fmt.Println("Select which disks to wipe:")
err := table.Render(selected)
if err != nil {
return err
}

selected, err = table.GetSelections()
toWipe, err = table.GetSelections()
if err != nil {
return fmt.Errorf("Invalid disk configuration: %w", err)
}

if len(selected) > 0 && !c.wipeAllDisks {
fmt.Println("Select which disks to wipe:")
err := table.Render(selected)
if err != nil {
return err
}

toWipe, err = table.GetSelections()
if err != nil {
return fmt.Errorf("Invalid disk configuration: %w", err)
}
}
}

targetDisks := map[string][]string{}
Expand All @@ -700,11 +673,6 @@ func (c *initConfig) askRemotePool(sh *service.Handler) error {
targetDisks[target] = append(targetDisks[target], path)
}

insufficientDisks := !useJoinConfigRemote && len(targetDisks) < RecommendedOSDHosts
if c.autoSetup && insufficientDisks {
return fmt.Errorf("Unable to add remote storage pool: At least %d peers must have allocated disks", RecommendedOSDHosts)
}

wipeDisks = map[string]map[string]bool{}
for _, entry := range toWipe {
target := table.SelectionValue(entry, "LOCATION")
Expand All @@ -722,6 +690,8 @@ func (c *initConfig) askRemotePool(sh *service.Handler) error {
return fmt.Errorf("No disks were selected")
}

insufficientDisks := !useJoinConfigRemote && len(targetDisks) < RecommendedOSDHosts

if insufficientDisks {
// This error will be printed to STDOUT as a normal message, so it includes a new-line for readability.
return fmt.Errorf("Disk configuration does not meet recommendations for fault tolerance. At least %d systems must supply disks.\nContinuing with this configuration will leave MicroCloud susceptible to data loss", RecommendedOSDHosts)
Expand Down Expand Up @@ -749,7 +719,7 @@ func (c *initConfig) askRemotePool(sh *service.Handler) error {
}

encryptDisks := c.encryptAllDisks
if !c.autoSetup && !c.encryptAllDisks && len(selectedDisks) > 0 {
if !c.encryptAllDisks && len(selectedDisks) > 0 {
var err error
encryptDisks, err = c.asker.AskBool("Do you want to encrypt the selected disks? (yes/no) [default=no]: ", "no")
if err != nil {
Expand All @@ -760,19 +730,17 @@ func (c *initConfig) askRemotePool(sh *service.Handler) error {
// If a cephfs pool has already been set up, we will extend it automatically, so no need to ask the question.
setupCephFS := useJoinConfigRemoteFS
if !useJoinConfigRemoteFS {
if !c.autoSetup {
lxd := sh.Services[types.LXD].(*service.LXDService)
ext := "storage_cephfs_create_missing"
hasCephFS, err := lxd.HasExtension(context.Background(), lxd.Name(), lxd.Address(), "", ext)
if err != nil {
return fmt.Errorf("Failed to check for the %q LXD API extension: %w", ext, err)
}
lxd := sh.Services[types.LXD].(*service.LXDService)
ext := "storage_cephfs_create_missing"
hasCephFS, err := lxd.HasExtension(context.Background(), lxd.Name(), lxd.Address(), "", ext)
if err != nil {
return fmt.Errorf("Failed to check for the %q LXD API extension: %w", ext, err)
}

if hasCephFS {
setupCephFS, err = c.asker.AskBool("Would you like to set up CephFS remote storage? (yes/no) [default=yes]: ", "yes")
if err != nil {
return err
}
if hasCephFS {
setupCephFS, err = c.asker.AskBool("Would you like to set up CephFS remote storage? (yes/no) [default=yes]: ", "yes")
if err != nil {
return err
}
}
}
Expand Down Expand Up @@ -882,7 +850,7 @@ func (c *initConfig) askRemotePool(sh *service.Handler) error {
}

func (c *initConfig) askOVNNetwork(sh *service.Handler) error {
if c.autoSetup || sh.Services[types.MicroOVN] == nil {
if sh.Services[types.MicroOVN] == nil {
return nil
}

Expand Down Expand Up @@ -1247,11 +1215,6 @@ func (c *initConfig) askNetwork(sh *service.Handler) error {
return err
}

if !supportsFAN && c.autoSetup {
logger.Warn("Skipping FAN network setup, some systems don't support it")
return nil
}

if !supportsFAN {
proceedWithNoOverlayNetworking, err := c.asker.AskBool("FAN networking is not usable. Do you want to proceed with setting up an inoperable cluster? (yes/no) [default=no]: ", "no")
if err != nil {
Expand Down Expand Up @@ -1299,10 +1262,6 @@ func (c *initConfig) askNetwork(sh *service.Handler) error {
}

func (c *initConfig) askCephNetwork(sh *service.Handler) error {
if c.autoSetup {
return nil
}

availableCephNetworkInterfaces := map[string]map[string]service.DedicatedInterface{}
for name, state := range c.state {
if len(state.AvailableCephInterfaces) == 0 {
Expand Down Expand Up @@ -1391,10 +1350,6 @@ func (c *initConfig) askClustered(s *service.Handler, expectedServices []types.S
}

if info.ServiceClustered(serviceType) {
if c.autoSetup {
return fmt.Errorf("%s is already clustered on %q, aborting setup", serviceType, info.ClusterName)
}

question := fmt.Sprintf("%q is already part of a %s cluster. Do you want to add this cluster to Microcloud? (add/skip) [default=add]", info.ClusterName, serviceType)
validator := func(s string) error {
if !shared.ValueInSlice(s, []string{"add", "skip"}) {
Expand Down
8 changes: 3 additions & 5 deletions cmd/microcloud/main_init.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,11 +180,9 @@ func (c *initConfig) RunInteractive(cmd *cobra.Command, args []string) error {
return err
}

if !c.autoSetup {
c.setupMany, err = c.common.asker.AskBool("Do you want to set up more than one cluster member? (yes/no) [default=yes]: ", "yes")
if err != nil {
return err
}
c.setupMany, err = c.common.asker.AskBool("Do you want to set up more than one cluster member? (yes/no) [default=yes]: ", "yes")
if err != nil {
return err
}

err = c.askAddress()
Expand Down

0 comments on commit b2aaf48

Please sign in to comment.