Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,9 @@ package controller

import (
"context"
"math/rand"
"reflect"
"strings"
"time"

. "github.com/onsi/ginkgo/v2"
Expand All @@ -35,177 +37,161 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"

cronjobv1 "tutorial.kubebuilder.io/project/api/v1"
)

// +kubebuilder:docs-gen:collapse=Imports

// Helper function to check if a specific condition exists with expected status
func hasCondition(conditions []metav1.Condition, conditionType string, expectedStatus metav1.ConditionStatus) bool {
for _, condition := range conditions {
if condition.Type == conditionType && condition.Status == expectedStatus {
return true
// Helper function to check condition status with proper timestamp validation
func assertCondition(conditions []metav1.Condition, conditionType string, expectedStatus metav1.ConditionStatus) bool {
for _, cond := range conditions {
if cond.Type == conditionType {
return cond.Status == expectedStatus && !cond.LastTransitionTime.IsZero()
}
}
return false
}

/*
The first step to writing a simple integration test is to actually create an instance of CronJob you can run tests against.
Note that to create a CronJob, you’ll need to create a stub CronJob struct that contains your CronJob’s specifications.
// Manually implement random string generation (compatible with Ginkgo versions below v2.1.0)
func randomString(length int) string {
r := rand.New(rand.NewSource(time.Now().UnixNano() + int64(GinkgoParallelProcess())))
chars := []rune("abcdefghijklmnopqrstuvwxyz0123456789")
var sb strings.Builder
for i := 0; i < length; i++ {
sb.WriteRune(chars[r.Intn(len(chars))])
}
return sb.String()
}

Note that when we create a stub CronJob, the CronJob also needs stubs of its required downstream objects.
Without the stubbed Job template spec and the Pod template spec below, the Kubernetes API will not be able to
create the CronJob.
*/
var _ = Describe("CronJob controller", func() {
// Helper to create test CronJob
func createTestCronJob(ctx context.Context, name, namespace, schedule string, suspend bool) *cronjobv1.CronJob {
cj := &cronjobv1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: cronjobv1.CronJobSpec{
Schedule: schedule,
Suspend: ptr.To(suspend),
JobTemplate: batchv1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "test-container",
Image: "busybox",
}},
RestartPolicy: v1.RestartPolicyOnFailure,
},
},
},
},
},
}
Expect(k8sClient.Create(ctx, cj)).To(Succeed())
return cj
}

// Define utility constants for object names and testing timeouts/durations and intervals.
const (
CronjobName = "test-cronjob"
CronjobNamespace = "default"
JobName = "test-job"
// Helper to create owned Job
func createOwnedJob(ctx context.Context, cj *cronjobv1.CronJob, jobName string, active int32) *batchv1.Job {
gvk := cronjobv1.GroupVersion.WithKind(reflect.TypeOf(cronjobv1.CronJob{}).Name())
job := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: jobName,
Namespace: cj.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(cj, gvk),
},
},
Spec: cj.Spec.JobTemplate.Spec,
}

Expect(k8sClient.Create(ctx, job)).To(Succeed())

timeout = time.Second * 10
duration = time.Second * 10
interval = time.Millisecond * 250
// Update job status
job.Status.Active = active
Expect(k8sClient.Status().Update(ctx, job)).To(Succeed())
return job
}

var _ = Describe("CronJob controller", func() {
const (
timeout = time.Second * 15
interval = time.Millisecond * 500
)

Context("When updating CronJob Status", func() {
It("Should increase CronJob Status.Active count when new Jobs are created", func() {
By("By creating a new CronJob")
ctx := context.Background()
cronJob := &cronjobv1.CronJob{
TypeMeta: metav1.TypeMeta{
APIVersion: "batch.tutorial.kubebuilder.io/v1",
Kind: "CronJob",
},
ObjectMeta: metav1.ObjectMeta{
Name: CronjobName,
Namespace: CronjobNamespace,
},
Spec: cronjobv1.CronJobSpec{
Schedule: "1 * * * *",
JobTemplate: batchv1.JobTemplateSpec{
Spec: batchv1.JobSpec{
// For simplicity, we only fill out the required fields.
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
// For simplicity, we only fill out the required fields.
Containers: []v1.Container{
{
Name: "test-container",
Image: "test-image",
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
},
},
},
},
},
}
Expect(k8sClient.Create(ctx, cronJob)).To(Succeed())
// Use unique names for each test to prevent interference
var (
ctx context.Context
cronJobName string
ns string
namespacedName types.NamespacedName
)

/*
After creating this CronJob, let's check that the CronJob's Spec fields match what we passed in.
Note that, because the k8s apiserver may not have finished creating a CronJob after our `Create()` call from earlier, we will use Gomega’s Eventually() testing function instead of Expect() to give the apiserver an opportunity to finish creating our CronJob.
// Setup test namespace before each test
BeforeEach(func() {
ctx = context.Background()
ns = "cronjob-test-" + randomString(5) // Using manually implemented function
cronJobName = "test-cj-" + randomString(5) // Using manually implemented function
namespacedName = types.NamespacedName{Name: cronJobName, Namespace: ns}

`Eventually()` will repeatedly run the function provided as an argument every interval seconds until
(a) the assertions done by the passed-in `Gomega` succeed, or
(b) the number of attempts * interval period exceed the provided timeout value.
// Create test namespace
nsObj := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}
Expect(k8sClient.Create(ctx, nsObj)).To(Succeed())
})

In the examples below, timeout and interval are Go Duration values of our choosing.
*/
// Cleanup after each test
AfterEach(func() {
nsObj := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}
Expect(k8sClient.Delete(ctx, nsObj)).To(Succeed())
})

cronjobLookupKey := types.NamespacedName{Name: CronjobName, Namespace: CronjobNamespace}
createdCronjob := &cronjobv1.CronJob{}
Context("Basic CronJob reconciliation", func() {
It("Should create and reconcile CronJob successfully", func() {
By("Creating initial CronJob")
createTestCronJob(ctx, cronJobName, ns, "*/1 * * * *", false)

// We'll need to retry getting this newly created CronJob, given that creation may not immediately happen.
// Verify creation
createdCj := &cronjobv1.CronJob{}
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(ctx, cronjobLookupKey, createdCronjob)).To(Succeed())
g.Expect(k8sClient.Get(ctx, namespacedName, createdCj)).To(Succeed())
g.Expect(createdCj.Spec.Schedule).To(Equal("*/1 * * * *"))
g.Expect(*createdCj.Spec.Suspend).To(BeFalse())
}, timeout, interval).Should(Succeed())
// Let's make sure our Schedule string value was properly converted/handled.
Expect(createdCronjob.Spec.Schedule).To(Equal("1 * * * *"))
/*
Now that we've created a CronJob in our test cluster, the next step is to write a test that actually tests our CronJob controller’s behavior.
Let’s test the CronJob controller’s logic responsible for updating CronJob.Status.Active with actively running jobs.
We’ll verify that when a CronJob has a single active downstream Job, its CronJob.Status.Active field contains a reference to this Job.

First, we should get the test CronJob we created earlier, and verify that it currently does not have any active jobs.
We use Gomega's `Consistently()` check here to ensure that the active job count remains 0 over a duration of time.
*/
By("By checking the CronJob has zero active Jobs")
})
})

Context("Active Jobs tracking", func() {
It("Should update Active count when Jobs are created", func() {
By("Creating base CronJob")
cj := createTestCronJob(ctx, cronJobName, ns, "*/1 * * * *", false)

By("Verifying initial state has no active jobs")
createdCj := &cronjobv1.CronJob{}
Consistently(func(g Gomega) {
g.Expect(k8sClient.Get(ctx, cronjobLookupKey, createdCronjob)).To(Succeed())
g.Expect(createdCronjob.Status.Active).To(BeEmpty())
}, duration, interval).Should(Succeed())
/*
Next, we actually create a stubbed Job that will belong to our CronJob, as well as its downstream template specs.
We set the Job's status's "Active" count to 2 to simulate the Job running two pods, which means the Job is actively running.

We then take the stubbed Job and set its owner reference to point to our test CronJob.
This ensures that the test Job belongs to, and is tracked by, our test CronJob.
Once that’s done, we create our new Job instance.
*/
By("By creating a new Job")
testJob := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: JobName,
Namespace: CronjobNamespace,
},
Spec: batchv1.JobSpec{
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
// For simplicity, we only fill out the required fields.
Containers: []v1.Container{
{
Name: "test-container",
Image: "test-image",
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
},
},
},
}

// Note that your CronJob’s GroupVersionKind is required to set up this owner reference.
kind := reflect.TypeOf(cronjobv1.CronJob{}).Name()
gvk := cronjobv1.GroupVersion.WithKind(kind)

controllerRef := metav1.NewControllerRef(createdCronjob, gvk)
testJob.SetOwnerReferences([]metav1.OwnerReference{*controllerRef})
Expect(k8sClient.Create(ctx, testJob)).To(Succeed())
// Note that you can not manage the status values while creating the resource.
// The status field is managed separately to reflect the current state of the resource.
// Therefore, it should be updated using a PATCH or PUT operation after the resource has been created.
// Additionally, it is recommended to use StatusConditions to manage the status. For further information see:
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
testJob.Status.Active = 2
Expect(k8sClient.Status().Update(ctx, testJob)).To(Succeed())
/*
Adding this Job to our test CronJob should trigger our controller’s reconciler logic.
After that, we can write a test that evaluates whether our controller eventually updates our CronJob’s Status field as expected!
*/
By("By checking that the CronJob has one active Job")
g.Expect(k8sClient.Get(ctx, namespacedName, createdCj)).To(Succeed())
g.Expect(createdCj.Status.Active).To(BeEmpty())
}, time.Second*3, interval).Should(Succeed())

By("Creating owned Job")
job := createOwnedJob(ctx, cj, "test-job-1", 1)

By("Verifying Active count updates")
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(ctx, cronjobLookupKey, createdCronjob)).To(Succeed(), "should GET the CronJob")
g.Expect(createdCronjob.Status.Active).To(HaveLen(1), "should have exactly one active job")
g.Expect(createdCronjob.Status.Active[0].Name).To(Equal(JobName), "the wrong job is active")
}, timeout, interval).Should(Succeed(), "should list our active job %s in the active jobs list in status", JobName)
g.Expect(k8sClient.Get(ctx, namespacedName, createdCj)).To(Succeed())
g.Expect(createdCj.Status.Active).To(HaveLen(1))
g.Expect(createdCj.Status.Active[0].Name).To(Equal(job.Name))
}, timeout, interval).Should(Succeed())

By("By checking that the CronJob status conditions are properly set")
By("Verifying Available condition is set")
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(ctx, cronjobLookupKey, createdCronjob)).To(Succeed())
// Check that the Available condition is set to True when job is active
g.Expect(hasCondition(createdCronjob.Status.Conditions, "Available", metav1.ConditionTrue)).To(BeTrue(),
"CronJob should have Available condition set to True")
g.Expect(k8sClient.Get(ctx, namespacedName, createdCj)).To(Succeed())
g.Expect(assertCondition(createdCj.Status.Conditions, "Available", metav1.ConditionTrue)).To(BeTrue())
}, timeout, interval).Should(Succeed())
})
})

})

/*
Expand Down