Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 0 additions & 47 deletions internal/cluster/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -188,53 +188,6 @@ func getKnownMastersNodesIds(c *common.Cluster, db *gorm.DB) ([]*strfmt.UUID, er
return masterNodesIds, nil
}

func HostsInStatus(c *common.Cluster, statuses []string) (int, int, int) {
mappedMastersByRole := MapMasterHostsByStatus(c)
mappedArbitersByRole := MapArbiterHostsByStatus(c)
mappedWorkersByRole := MapWorkersHostsByStatus(c)
mastersInSomeInstallingStatus := 0
arbitersInSomeInstallingStatus := 0
workersInSomeInstallingStatus := 0

for _, status := range statuses {
mastersInSomeInstallingStatus += len(mappedMastersByRole[status])
arbitersInSomeInstallingStatus += len(mappedArbitersByRole[status])
workersInSomeInstallingStatus += len(mappedWorkersByRole[status])
}
return mastersInSomeInstallingStatus, arbitersInSomeInstallingStatus, workersInSomeInstallingStatus
}

func MapMasterHostsByStatus(c *common.Cluster) map[string][]*models.Host {
return mapHostsByStatus(c, models.HostRoleMaster)
}

func MapArbiterHostsByStatus(c *common.Cluster) map[string][]*models.Host {
return mapHostsByStatus(c, models.HostRoleArbiter)
}

func MapWorkersHostsByStatus(c *common.Cluster) map[string][]*models.Host {
return mapHostsByStatus(c, models.HostRoleWorker)
}

func mapHostsByStatus(c *common.Cluster, role models.HostRole) map[string][]*models.Host {
hostMap := make(map[string][]*models.Host)
for _, host := range c.Hosts {
if role != "" && common.GetEffectiveRole(host) != role {
continue
}
if _, ok := hostMap[swag.StringValue(host.Status)]; ok {
hostMap[swag.StringValue(host.Status)] = append(hostMap[swag.StringValue(host.Status)], host)
} else {
hostMap[swag.StringValue(host.Status)] = []*models.Host{host}
}
}
return hostMap
}

func MapHostsByStatus(c *common.Cluster) map[string][]*models.Host {
return mapHostsByStatus(c, "")
}

func UpdateMachineNetwork(db *gorm.DB, cluster *common.Cluster, machineNetwork []string) error {
if len(machineNetwork) > 2 {
return common.NewApiError(http.StatusInternalServerError,
Expand Down
49 changes: 3 additions & 46 deletions internal/cluster/transition.go
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ func (th *transitionHandler) createClusterCompletionStatusInfo(ctx context.Conte
statusInfo = StatusInfoDegraded
statusInfo += ". Failed OLM operators: " + strings.Join(statuses[models.OperatorTypeOlm][models.OperatorStatusFailed], ", ")
} else {
_, _, installedWorkers := HostsInStatus(cluster, []string{models.HostStatusInstalled})
_, _, installedWorkers := common.HostsInStatus(cluster, []string{models.HostStatusInstalled})
if installedWorkers < common.NumberOfWorkers(cluster) {
statusInfo = StatusInfoNotAllWorkersInstalled
}
Expand Down Expand Up @@ -418,7 +418,7 @@ func (th *transitionHandler) IsFinalizing(sw stateswitch.StateSwitch, args state
sCluster, ok := sw.(*stateCluster)
installedStatus := []string{models.HostStatusInstalled}

if ok && th.enoughMastersAndWorkers(sCluster, installedStatus) {
if ok && common.HasEnoughMastersAndWorkers(sCluster.cluster, installedStatus) {
th.log.Infof("Cluster %s has at least required number of installed hosts, "+
"cluster is finalizing.", sCluster.cluster.ID)
return true, nil
Expand All @@ -431,7 +431,7 @@ func (th *transitionHandler) IsInstalling(sw stateswitch.StateSwitch, args state
sCluster, _ := sw.(*stateCluster)
installingStatuses := []string{models.HostStatusInstalling, models.HostStatusInstallingInProgress,
models.HostStatusInstalled, models.HostStatusInstallingPendingUserAction, models.HostStatusPreparingSuccessful}
return th.enoughMastersAndWorkers(sCluster, installingStatuses), nil
return common.HasEnoughMastersAndWorkers(sCluster.cluster, installingStatuses), nil
}

// check if we should stay in installing state
Expand Down Expand Up @@ -507,49 +507,6 @@ func (th *transitionHandler) PostUpdateFinalizingAMSConsoleUrl(sw stateswitch.St
return nil
}

// enoughMastersAndWorkers returns whether the number of master and worker nodes in the specified cluster with the given status
// meets the required criteria. The conditions are as follows:
// - For SNO (Single Node OpenShift), there must be exactly one master node and zero worker nodes.
// - For High Availability cluster, the number of master nodes should match the user's request, and not less than the minimum. The worker node requirement depends on this request:
// If the user requested at least two workers, there must be at least two, indicating non-schedulable masters were intended.
// If the user requested fewer than two workers, any number of workers is acceptable.
// - For TNA Clusters the same conditions apply as for High Availability Clusters, but we also need to check that at least one arbiter node is in the correct status.
func (th *transitionHandler) enoughMastersAndWorkers(sCluster *stateCluster, statuses []string) bool {
mastersInSomeInstallingStatus, arbitersInSomeInstallingStatus, workersInSomeInstallingStatus := HostsInStatus(sCluster.cluster, statuses)

if sCluster.cluster.ControlPlaneCount == 1 {
return mastersInSomeInstallingStatus == common.AllowedNumberOfMasterHostsInNoneHaMode &&
workersInSomeInstallingStatus == common.AllowedNumberOfWorkersInNoneHaMode
}

// hosts roles are known at this stage
masters, arbiters, workers, _ := common.GetHostsByEachRole(&sCluster.cluster.Cluster, false)
numberOfExpectedMasters := len(masters)
numberOfExpectedArbiters := len(arbiters)

minMasterHostsNeeded := common.MinMasterHostsNeededForInstallationInHaMode
if numberOfExpectedArbiters != 0 {
minMasterHostsNeeded = common.MinMasterHostsNeededForInstallationInHaArbiterMode
// validate arbiters
if arbitersInSomeInstallingStatus == 0 {
return false
}
}

// validate masters
if numberOfExpectedMasters < minMasterHostsNeeded ||
mastersInSomeInstallingStatus < numberOfExpectedMasters {
return false
}

numberOfExpectedWorkers := len(workers)

// validate workers
return numberOfExpectedWorkers < common.MinimumNumberOfWorkersForNonSchedulableMastersClusterInHaMode ||
numberOfExpectedWorkers >= common.MinimumNumberOfWorkersForNonSchedulableMastersClusterInHaMode &&
workersInSomeInstallingStatus >= common.MinimumNumberOfWorkersForNonSchedulableMastersClusterInHaMode
}

// check if installation reach to timeout
func (th *transitionHandler) IsInstallationTimedOut(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) (bool, error) {
sCluster, ok := sw.(*stateCluster)
Expand Down
59 changes: 59 additions & 0 deletions internal/common/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -781,3 +781,62 @@ func GetDefaultHighAvailabilityAndMasterCountParams(highAvailabilityMode *string
func IsClusterTopologyHighlyAvailableArbiter(cluster *Cluster) bool {
return funk.NotEmpty(GetHostsByRole(cluster, models.HostRoleArbiter))
}

func HostsInStatus(c *Cluster, statuses []string) (masters, arbiters, workers int) {
for _, host := range c.Hosts {
if funk.ContainsString(statuses, swag.StringValue(host.Status)) {
switch GetEffectiveRole(host) {
case models.HostRoleMaster, models.HostRoleBootstrap:
masters++
case models.HostRoleArbiter:
arbiters++
case models.HostRoleWorker:
workers++
}
}
}
return
}

// HasEnoughMastersAndWorkers returns whether the number of master and worker nodes in the specified cluster with the given status
// meets the required criteria. The conditions are as follows:
// - For SNO (Single Node OpenShift), there must be exactly one master node and zero worker nodes.
// - For High Availability cluster, the number of master nodes should match the user's request, and not less than the minimum. The worker node requirement depends on this request:
// If the user requested at least two workers, there must be at least two, indicating non-schedulable masters were intended.
// If the user requested fewer than two workers, any number of workers is acceptable.
// - For TNA Clusters the same conditions apply as for High Availability Clusters, but we also need to check that at least one arbiter node is in the correct status.
func HasEnoughMastersAndWorkers(c *Cluster, statuses []string) bool {
mastersInStatus, arbitersInStatus, workersInStatus := HostsInStatus(c, statuses)

if c.ControlPlaneCount == 1 {
return mastersInStatus == AllowedNumberOfMasterHostsInNoneHaMode &&
workersInStatus == AllowedNumberOfWorkersInNoneHaMode
}

// hosts roles are known at this stage
masters, arbiters, workers, _ := GetHostsByEachRole(&c.Cluster, false)
numberOfExpectedMasters := len(masters)
numberOfExpectedArbiters := len(arbiters)

minMasterHostsNeeded := MinMasterHostsNeededForInstallationInHaMode
if numberOfExpectedArbiters != 0 {
minMasterHostsNeeded = MinMasterHostsNeededForInstallationInHaArbiterMode
// validate arbiters
if arbitersInStatus == 0 {
return false
}
}

// validate masters
if numberOfExpectedMasters < minMasterHostsNeeded ||
mastersInStatus < numberOfExpectedMasters {
return false
}

numberOfExpectedWorkers := len(workers)

// validate workers
return numberOfExpectedWorkers < MinimumNumberOfWorkersForNonSchedulableMastersClusterInHaMode ||
numberOfExpectedWorkers >= MinimumNumberOfWorkersForNonSchedulableMastersClusterInHaMode &&
workersInStatus >= MinimumNumberOfWorkersForNonSchedulableMastersClusterInHaMode
}
Loading