|
| 1 | +/* |
| 2 | +Copyright 2019 The Tekton Authors |
| 3 | +
|
| 4 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +you may not use this file except in compliance with the License. |
| 6 | +You may obtain a copy of the License at |
| 7 | +
|
| 8 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +
|
| 10 | +Unless required by applicable law or agreed to in writing, software |
| 11 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +See the License for the specific language governing permissions and |
| 14 | +limitations under the License. |
| 15 | +*/ |
| 16 | + |
| 17 | +package pod |
| 18 | + |
| 19 | +import ( |
| 20 | + "fmt" |
| 21 | + "sort" |
| 22 | + "strings" |
| 23 | + "time" |
| 24 | + |
| 25 | + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" |
| 26 | + corev1 "k8s.io/api/core/v1" |
| 27 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 28 | + "knative.dev/pkg/apis" |
| 29 | +) |
| 30 | + |
| 31 | +const ( |
| 32 | + // reasonCouldntGetTask indicates that the reason for the failure status is that the |
| 33 | + // Task couldn't be found |
| 34 | + ReasonCouldntGetTask = "CouldntGetTask" |
| 35 | + |
| 36 | + // reasonFailedResolution indicated that the reason for failure status is |
| 37 | + // that references within the TaskRun could not be resolved |
| 38 | + ReasonFailedResolution = "TaskRunResolutionFailed" |
| 39 | + |
| 40 | + // reasonFailedValidation indicated that the reason for failure status is |
| 41 | + // that taskrun failed runtime validation |
| 42 | + ReasonFailedValidation = "TaskRunValidationFailed" |
| 43 | + |
| 44 | + // reasonRunning indicates that the reason for the inprogress status is that the TaskRun |
| 45 | + // is just starting to be reconciled |
| 46 | + ReasonRunning = "Running" |
| 47 | + |
| 48 | + // reasonTimedOut indicates that the TaskRun has taken longer than its configured timeout |
| 49 | + ReasonTimedOut = "TaskRunTimeout" |
| 50 | + |
| 51 | + // reasonExceededResourceQuota indicates that the TaskRun failed to create a pod due to |
| 52 | + // a ResourceQuota in the namespace |
| 53 | + ReasonExceededResourceQuota = "ExceededResourceQuota" |
| 54 | + |
| 55 | + // reasonExceededNodeResources indicates that the TaskRun's pod has failed to start due |
| 56 | + // to resource constraints on the node |
| 57 | + ReasonExceededNodeResources = "ExceededNodeResources" |
| 58 | + |
| 59 | + // ReasonSucceeded indicates that the reason for the finished status is that all of the steps |
| 60 | + // completed successfully |
| 61 | + ReasonSucceeded = "Succeeded" |
| 62 | + |
| 63 | + // ReasonFailed indicates that the reason for the failure status is unknown or that one of the steps failed |
| 64 | + ReasonFailed = "Failed" |
| 65 | +) |
| 66 | + |
| 67 | +// SidecarsReady returns true if all of the Pod's sidecars are Ready or |
| 68 | +// Terminated. |
| 69 | +func SidecarsReady(podStatus corev1.PodStatus) bool { |
| 70 | + if podStatus.Phase != corev1.PodRunning { |
| 71 | + return false |
| 72 | + } |
| 73 | + for _, s := range podStatus.ContainerStatuses { |
| 74 | + if !isContainerSidecar(s.Name) { |
| 75 | + continue |
| 76 | + } |
| 77 | + if s.State.Running != nil && s.Ready { |
| 78 | + continue |
| 79 | + } |
| 80 | + if s.State.Terminated != nil { |
| 81 | + continue |
| 82 | + } |
| 83 | + return false |
| 84 | + } |
| 85 | + return true |
| 86 | +} |
| 87 | + |
| 88 | +// MakeTaskRunStatus returns a TaskRunStatus based on the Pod's status. |
| 89 | +func MakeTaskRunStatus(tr v1alpha1.TaskRun, pod *corev1.Pod, taskSpec v1alpha1.TaskSpec) v1alpha1.TaskRunStatus { |
| 90 | + trs := &tr.Status |
| 91 | + if trs.GetCondition(apis.ConditionSucceeded) == nil || trs.GetCondition(apis.ConditionSucceeded).Status == corev1.ConditionUnknown { |
| 92 | + // If the taskRunStatus doesn't exist yet, it's because we just started running |
| 93 | + trs.SetCondition(&apis.Condition{ |
| 94 | + Type: apis.ConditionSucceeded, |
| 95 | + Status: corev1.ConditionUnknown, |
| 96 | + Reason: ReasonRunning, |
| 97 | + Message: "Not all Steps in the Task have finished executing", |
| 98 | + }) |
| 99 | + } |
| 100 | + |
| 101 | + trs.PodName = pod.Name |
| 102 | + |
| 103 | + trs.Steps = []v1alpha1.StepState{} |
| 104 | + trs.Sidecars = []v1alpha1.SidecarState{} |
| 105 | + for _, s := range pod.Status.ContainerStatuses { |
| 106 | + if isContainerStep(s.Name) { |
| 107 | + trs.Steps = append(trs.Steps, v1alpha1.StepState{ |
| 108 | + ContainerState: *s.State.DeepCopy(), |
| 109 | + Name: trimStepPrefix(s.Name), |
| 110 | + ContainerName: s.Name, |
| 111 | + ImageID: s.ImageID, |
| 112 | + }) |
| 113 | + } else if isContainerSidecar(s.Name) { |
| 114 | + trs.Sidecars = append(trs.Sidecars, v1alpha1.SidecarState{ |
| 115 | + Name: trimSidecarPrefix(s.Name), |
| 116 | + ImageID: s.ImageID, |
| 117 | + }) |
| 118 | + } |
| 119 | + } |
| 120 | + |
| 121 | + // Complete if we did not find a step that is not complete, or the pod is in a definitely complete phase |
| 122 | + complete := areStepsComplete(pod) || pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed |
| 123 | + |
| 124 | + if complete { |
| 125 | + updateCompletedTaskRun(trs, pod) |
| 126 | + } else { |
| 127 | + updateIncompleteTaskRun(trs, pod) |
| 128 | + } |
| 129 | + |
| 130 | + // Sort step states according to the order specified in the TaskRun spec's steps. |
| 131 | + trs.Steps = sortTaskRunStepOrder(trs.Steps, taskSpec.Steps) |
| 132 | + |
| 133 | + return *trs |
| 134 | +} |
| 135 | + |
| 136 | +func updateCompletedTaskRun(trs *v1alpha1.TaskRunStatus, pod *corev1.Pod) { |
| 137 | + if didTaskRunFail(pod) { |
| 138 | + msg := getFailureMessage(pod) |
| 139 | + trs.SetCondition(&apis.Condition{ |
| 140 | + Type: apis.ConditionSucceeded, |
| 141 | + Status: corev1.ConditionFalse, |
| 142 | + Reason: ReasonFailed, |
| 143 | + Message: msg, |
| 144 | + }) |
| 145 | + } else { |
| 146 | + trs.SetCondition(&apis.Condition{ |
| 147 | + Type: apis.ConditionSucceeded, |
| 148 | + Status: corev1.ConditionTrue, |
| 149 | + Reason: ReasonSucceeded, |
| 150 | + Message: "All Steps have completed executing", |
| 151 | + }) |
| 152 | + } |
| 153 | + // update tr completed time |
| 154 | + trs.CompletionTime = &metav1.Time{Time: time.Now()} |
| 155 | +} |
| 156 | + |
| 157 | +func updateIncompleteTaskRun(trs *v1alpha1.TaskRunStatus, pod *corev1.Pod) { |
| 158 | + switch pod.Status.Phase { |
| 159 | + case corev1.PodRunning: |
| 160 | + trs.SetCondition(&apis.Condition{ |
| 161 | + Type: apis.ConditionSucceeded, |
| 162 | + Status: corev1.ConditionUnknown, |
| 163 | + Reason: ReasonRunning, |
| 164 | + Message: "Not all Steps in the Task have finished executing", |
| 165 | + }) |
| 166 | + case corev1.PodPending: |
| 167 | + var reason, msg string |
| 168 | + if IsPodExceedingNodeResources(pod) { |
| 169 | + reason = ReasonExceededNodeResources |
| 170 | + msg = "TaskRun Pod exceeded available resources" |
| 171 | + } else { |
| 172 | + reason = "Pending" |
| 173 | + msg = getWaitingMessage(pod) |
| 174 | + } |
| 175 | + trs.SetCondition(&apis.Condition{ |
| 176 | + Type: apis.ConditionSucceeded, |
| 177 | + Status: corev1.ConditionUnknown, |
| 178 | + Reason: reason, |
| 179 | + Message: msg, |
| 180 | + }) |
| 181 | + } |
| 182 | +} |
| 183 | + |
| 184 | +func didTaskRunFail(pod *corev1.Pod) bool { |
| 185 | + f := pod.Status.Phase == corev1.PodFailed |
| 186 | + for _, s := range pod.Status.ContainerStatuses { |
| 187 | + if isContainerStep(s.Name) { |
| 188 | + if s.State.Terminated != nil { |
| 189 | + f = f || s.State.Terminated.ExitCode != 0 |
| 190 | + } |
| 191 | + } |
| 192 | + } |
| 193 | + return f |
| 194 | +} |
| 195 | + |
| 196 | +func areStepsComplete(pod *corev1.Pod) bool { |
| 197 | + stepsComplete := len(pod.Status.ContainerStatuses) > 0 && pod.Status.Phase == corev1.PodRunning |
| 198 | + for _, s := range pod.Status.ContainerStatuses { |
| 199 | + if isContainerStep(s.Name) { |
| 200 | + if s.State.Terminated == nil { |
| 201 | + stepsComplete = false |
| 202 | + } |
| 203 | + } |
| 204 | + } |
| 205 | + return stepsComplete |
| 206 | +} |
| 207 | + |
| 208 | +func getFailureMessage(pod *corev1.Pod) string { |
| 209 | + // First, try to surface an error about the actual build step that failed. |
| 210 | + for _, status := range pod.Status.ContainerStatuses { |
| 211 | + term := status.State.Terminated |
| 212 | + if term != nil && term.ExitCode != 0 { |
| 213 | + return fmt.Sprintf("%q exited with code %d (image: %q); for logs run: kubectl -n %s logs %s -c %s", |
| 214 | + status.Name, term.ExitCode, status.ImageID, |
| 215 | + pod.Namespace, pod.Name, status.Name) |
| 216 | + } |
| 217 | + } |
| 218 | + // Next, return the Pod's status message if it has one. |
| 219 | + if pod.Status.Message != "" { |
| 220 | + return pod.Status.Message |
| 221 | + } |
| 222 | + // Lastly fall back on a generic error message. |
| 223 | + return "build failed for unspecified reasons." |
| 224 | +} |
| 225 | + |
| 226 | +// IsPodExceedingNodeResources returns true if the Pod's status indicates there |
| 227 | +// are insufficient resources to schedule the Pod. |
| 228 | +func IsPodExceedingNodeResources(pod *corev1.Pod) bool { |
| 229 | + for _, podStatus := range pod.Status.Conditions { |
| 230 | + if podStatus.Reason == corev1.PodReasonUnschedulable && strings.Contains(podStatus.Message, "Insufficient") { |
| 231 | + return true |
| 232 | + } |
| 233 | + } |
| 234 | + return false |
| 235 | +} |
| 236 | + |
| 237 | +func getWaitingMessage(pod *corev1.Pod) string { |
| 238 | + // First, try to surface reason for pending/unknown about the actual build step. |
| 239 | + for _, status := range pod.Status.ContainerStatuses { |
| 240 | + wait := status.State.Waiting |
| 241 | + if wait != nil && wait.Message != "" { |
| 242 | + return fmt.Sprintf("build step %q is pending with reason %q", |
| 243 | + status.Name, wait.Message) |
| 244 | + } |
| 245 | + } |
| 246 | + // Try to surface underlying reason by inspecting pod's recent status if condition is not true |
| 247 | + for i, podStatus := range pod.Status.Conditions { |
| 248 | + if podStatus.Status != corev1.ConditionTrue { |
| 249 | + return fmt.Sprintf("pod status %q:%q; message: %q", |
| 250 | + pod.Status.Conditions[i].Type, |
| 251 | + pod.Status.Conditions[i].Status, |
| 252 | + pod.Status.Conditions[i].Message) |
| 253 | + } |
| 254 | + } |
| 255 | + // Next, return the Pod's status message if it has one. |
| 256 | + if pod.Status.Message != "" { |
| 257 | + return pod.Status.Message |
| 258 | + } |
| 259 | + |
| 260 | + // Lastly fall back on a generic pending message. |
| 261 | + return "Pending" |
| 262 | +} |
| 263 | + |
| 264 | +// sortTaskRunStepOrder sorts the StepStates in the same order as the original |
| 265 | +// TaskSpec steps. |
| 266 | +func sortTaskRunStepOrder(taskRunSteps []v1alpha1.StepState, taskSpecSteps []v1alpha1.Step) []v1alpha1.StepState { |
| 267 | + trt := &stepStateSorter{ |
| 268 | + taskRunSteps: taskRunSteps, |
| 269 | + } |
| 270 | + trt.mapForSort = trt.constructTaskStepsSorter(taskSpecSteps) |
| 271 | + sort.Sort(trt) |
| 272 | + return trt.taskRunSteps |
| 273 | +} |
| 274 | + |
| 275 | +// stepStateSorter implements a sorting mechanism to align the order of the steps in TaskRun |
| 276 | +// with the spec steps in Task. |
| 277 | +type stepStateSorter struct { |
| 278 | + taskRunSteps []v1alpha1.StepState |
| 279 | + mapForSort map[string]int |
| 280 | +} |
| 281 | + |
| 282 | +// constructTaskStepsSorter constructs a map matching the names of |
| 283 | +// the steps to their indices for a task. |
| 284 | +func (trt *stepStateSorter) constructTaskStepsSorter(taskSpecSteps []v1alpha1.Step) map[string]int { |
| 285 | + sorter := make(map[string]int) |
| 286 | + for index, step := range taskSpecSteps { |
| 287 | + sorter[step.Name] = index |
| 288 | + } |
| 289 | + return sorter |
| 290 | +} |
| 291 | + |
| 292 | +// changeIndex sorts the steps of the task run, based on the |
| 293 | +// order of the steps in the task. Instead of changing the element with the one next to it, |
| 294 | +// we directly swap it with the desired index. |
| 295 | +func (trt *stepStateSorter) changeIndex(index int) { |
| 296 | + // Check if the current index is equal to the desired index. If they are equal, do not swap; if they |
| 297 | + // are not equal, swap index j with the desired index. |
| 298 | + desiredIndex, exist := trt.mapForSort[trt.taskRunSteps[index].Name] |
| 299 | + if exist && index != desiredIndex { |
| 300 | + trt.taskRunSteps[desiredIndex], trt.taskRunSteps[index] = trt.taskRunSteps[index], trt.taskRunSteps[desiredIndex] |
| 301 | + } |
| 302 | +} |
| 303 | + |
| 304 | +func (trt *stepStateSorter) Len() int { return len(trt.taskRunSteps) } |
| 305 | + |
| 306 | +func (trt *stepStateSorter) Swap(i, j int) { |
| 307 | + trt.changeIndex(j) |
| 308 | + // The index j is unable to reach the last index. |
| 309 | + // When i reaches the end of the array, we need to check whether the last one needs a swap. |
| 310 | + if i == trt.Len()-1 { |
| 311 | + trt.changeIndex(i) |
| 312 | + } |
| 313 | +} |
| 314 | + |
| 315 | +func (trt *stepStateSorter) Less(i, j int) bool { |
| 316 | + // Since the logic is complicated, we move it into the Swap function to decide whether |
| 317 | + // and how to change the index. We set it to true here in order to iterate all the |
| 318 | + // elements of the array in the Swap function. |
| 319 | + return true |
| 320 | +} |
0 commit comments