Skip to content

Commit b685eea

Browse files
committed
feat: rebase to v1.11 cluster api core
1 parent 9543e87 commit b685eea

File tree

8 files changed

+65
-60
lines changed

8 files changed

+65
-60
lines changed

bootstrap/eks/api/v1beta2/nodeadmconfig_types.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ import (
44
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
55
"k8s.io/apimachinery/pkg/runtime"
66

7-
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
7+
clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1"
88
)
99

1010
// NodeadmConfigSpec defines the desired state of NodeadmConfig.
@@ -82,12 +82,12 @@ const (
8282
)
8383

8484
// GetConditions returns the observations of the operational state of the NodeadmConfig resource.
85-
func (r *NodeadmConfig) GetConditions() clusterv1.Conditions {
85+
func (r *NodeadmConfig) GetConditions() clusterv1beta1.Conditions {
8686
return r.Status.Conditions
8787
}
8888

8989
// SetConditions sets the underlying service state of the NodeadmConfig to the predescribed clusterv1.Conditions.
90-
func (r *NodeadmConfig) SetConditions(conditions clusterv1.Conditions) {
90+
func (r *NodeadmConfig) SetConditions(conditions clusterv1beta1.Conditions) {
9191
r.Status.Conditions = conditions
9292
}
9393

@@ -115,7 +115,7 @@ type NodeadmConfigStatus struct {
115115

116116
// Conditions defines current service state of the NodeadmConfig.
117117
// +optional
118-
Conditions clusterv1.Conditions `json:"conditions,omitempty"`
118+
Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"`
119119
}
120120

121121
// +kubebuilder:object:root=true

bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -286,19 +286,18 @@ func newCluster(name string) *clusterv1.Cluster {
286286
},
287287
Spec: clusterv1.ClusterSpec{
288288
ControlPlaneRef: clusterv1.ContractVersionedObjectReference{
289-
Name: name,
290-
Kind: "AWSManagedControlPlane",
289+
Name: name,
290+
Kind: "AWSManagedControlPlane",
291+
APIGroup: ekscontrolplanev1.GroupVersion.Group,
291292
},
292293
},
293294
Status: clusterv1.ClusterStatus{
294295
Initialization: clusterv1.ClusterInitializationStatus{
295296
InfrastructureProvisioned: ptr.To(true),
297+
ControlPlaneInitialized: ptr.To(true),
296298
},
297299
},
298300
}
299-
cluster.Status.Initialization.ControlPlaneInitialized = ptr.To(true)
300-
v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneReadyCondition)
301-
v1beta1conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyCondition)
302301
return cluster
303302
}
304303

bootstrap/eks/controllers/nodeadmconfig_controller.go

Lines changed: 34 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -26,12 +26,12 @@ import (
2626
ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
2727
"sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
2828
"sigs.k8s.io/cluster-api-provider-aws/v2/util/paused"
29-
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
29+
clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1"
30+
clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2"
3031
bsutil "sigs.k8s.io/cluster-api/bootstrap/util"
31-
expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
3232
"sigs.k8s.io/cluster-api/feature"
3333
"sigs.k8s.io/cluster-api/util"
34-
"sigs.k8s.io/cluster-api/util/conditions"
34+
v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions"
3535
kubeconfigutil "sigs.k8s.io/cluster-api/util/kubeconfig"
3636
"sigs.k8s.io/cluster-api/util/patch"
3737
"sigs.k8s.io/cluster-api/util/predicates"
@@ -110,11 +110,11 @@ func (r *NodeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques
110110

111111
// set up defer block for updating config
112112
defer func() {
113-
conditions.SetSummary(config,
114-
conditions.WithConditions(
113+
v1beta1conditions.SetSummary(config,
114+
v1beta1conditions.WithConditions(
115115
eksbootstrapv1.DataSecretAvailableCondition,
116116
),
117-
conditions.WithStepCounter(),
117+
v1beta1conditions.WithStepCounter(),
118118
)
119119

120120
patchOpts := []patch.Option{}
@@ -153,35 +153,39 @@ func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clust
153153
}
154154
}
155155

156-
if cluster.Spec.ControlPlaneRef == nil || cluster.Spec.ControlPlaneRef.Kind != "AWSManagedControlPlane" {
156+
if cluster.Spec.ControlPlaneRef.Kind != "AWSManagedControlPlane" {
157157
return ctrl.Result{}, errors.New("Cluster's controlPlaneRef needs to be an AWSManagedControlPlane in order to use the EKS bootstrap provider")
158158
}
159159

160-
if !cluster.Status.InfrastructureReady {
160+
if !ptr.Deref(cluster.Status.Initialization.InfrastructureProvisioned, false) {
161161
log.Info("Cluster infrastructure is not ready")
162-
conditions.MarkFalse(config,
162+
v1beta1conditions.MarkFalse(config,
163163
eksbootstrapv1.DataSecretAvailableCondition,
164164
eksbootstrapv1.WaitingForClusterInfrastructureReason,
165-
clusterv1.ConditionSeverityInfo, "")
165+
clusterv1beta1.ConditionSeverityInfo, "")
166166
return ctrl.Result{}, nil
167167
}
168168

169-
if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) {
169+
if !ptr.Deref(cluster.Status.Initialization.ControlPlaneInitialized, false) {
170170
log.Info("Control Plane has not yet been initialized")
171-
conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.WaitingForControlPlaneInitializationReason, clusterv1.ConditionSeverityInfo, "")
171+
v1beta1conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.WaitingForControlPlaneInitializationReason, clusterv1beta1.ConditionSeverityInfo, "")
172172
return ctrl.Result{RequeueAfter: 30 * time.Second}, nil
173173
}
174174

175175
controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{}
176-
if err := r.Get(ctx, client.ObjectKey{Name: cluster.Spec.ControlPlaneRef.Name, Namespace: cluster.Spec.ControlPlaneRef.Namespace}, controlPlane); err != nil {
176+
if err := r.Get(ctx, client.ObjectKey{Name: cluster.Spec.ControlPlaneRef.Name, Namespace: cluster.Namespace}, controlPlane); err != nil {
177177
return ctrl.Result{}, errors.Wrap(err, "failed to get control plane")
178178
}
179179
// Check if control plane is ready
180-
if !conditions.IsTrue(controlPlane, ekscontrolplanev1.EKSControlPlaneReadyCondition) {
180+
if !v1beta1conditions.IsTrue(controlPlane, ekscontrolplanev1.EKSControlPlaneReadyCondition) {
181181
log.Info("Waiting for control plane to be ready")
182-
conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition,
182+
v1beta1conditions.MarkFalse(
183+
config,
184+
eksbootstrapv1.DataSecretAvailableCondition,
183185
eksbootstrapv1.DataSecretGenerationFailedReason,
184-
clusterv1.ConditionSeverityInfo, "Control plane is not initialized yet")
186+
clusterv1beta1.ConditionSeverityInfo,
187+
"Control plane is not initialized yet",
188+
)
185189
return ctrl.Result{RequeueAfter: 30 * time.Second}, nil
186190
}
187191
log.Info("Control plane is ready, proceeding with userdata generation")
@@ -191,12 +195,12 @@ func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clust
191195
files, err := fileResolver.ResolveFiles(ctx, config.Namespace, config.Spec.Files)
192196
if err != nil {
193197
log.Info("Failed to resolve files for user data")
194-
conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "%s", err.Error())
198+
v1beta1conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, "%s", err.Error())
195199
return ctrl.Result{}, err
196200
}
197201

198202
serviceCIDR := ""
199-
if cluster.Spec.ClusterNetwork != nil && cluster.Spec.ClusterNetwork.Services != nil && len(cluster.Spec.ClusterNetwork.Services.CIDRBlocks) > 0 {
203+
if len(cluster.Spec.ClusterNetwork.Services.CIDRBlocks) > 0 {
200204
serviceCIDR = cluster.Spec.ClusterNetwork.Services.CIDRBlocks[0]
201205
}
202206
nodeInput := &userdata.NodeadmInput{
@@ -235,9 +239,9 @@ func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clust
235239
ca, err := extractCAFromSecret(ctx, r.Client, obj)
236240
if err != nil {
237241
log.Error(err, "Failed to extract CA from kubeconfig secret")
238-
conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition,
242+
v1beta1conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition,
239243
eksbootstrapv1.DataSecretGenerationFailedReason,
240-
clusterv1.ConditionSeverityWarning,
244+
clusterv1beta1.ConditionSeverityWarning,
241245
"Failed to extract CA from kubeconfig secret: %v", err)
242246
return ctrl.Result{}, err
243247
}
@@ -250,18 +254,18 @@ func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clust
250254
userDataScript, err := userdata.NewNodeadmUserdata(nodeInput)
251255
if err != nil {
252256
log.Error(err, "Failed to create a worker join configuration")
253-
conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "")
257+
v1beta1conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, "")
254258
return ctrl.Result{}, err
255259
}
256260

257261
// store userdata as secret
258262
if err := r.storeBootstrapData(ctx, cluster, config, userDataScript); err != nil {
259263
log.Error(err, "Failed to store bootstrap data")
260-
conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "")
264+
v1beta1conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, "")
261265
return ctrl.Result{}, err
262266
}
263267

264-
conditions.MarkTrue(config, eksbootstrapv1.DataSecretAvailableCondition)
268+
v1beta1conditions.MarkTrue(config, eksbootstrapv1.DataSecretAvailableCondition)
265269
return ctrl.Result{}, nil
266270
}
267271

@@ -299,7 +303,7 @@ func (r *NodeadmConfigReconciler) storeBootstrapData(ctx context.Context, cluste
299303

300304
config.Status.DataSecretName = ptr.To(secret.Name)
301305
config.Status.Ready = true
302-
conditions.MarkTrue(config, eksbootstrapv1.DataSecretAvailableCondition)
306+
v1beta1conditions.MarkTrue(config, eksbootstrapv1.DataSecretAvailableCondition)
303307
return nil
304308
}
305309

@@ -354,7 +358,7 @@ func (r *NodeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl
354358

355359
if feature.Gates.Enabled(feature.MachinePool) {
356360
b = b.Watches(
357-
&expclusterv1.MachinePool{},
361+
&clusterv1beta1.MachinePool{},
358362
handler.EnqueueRequestsFromMapFunc(r.MachinePoolToBootstrapMapFunc),
359363
)
360364
}
@@ -367,7 +371,7 @@ func (r *NodeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl
367371
err = c.Watch(
368372
source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{},
369373
handler.EnqueueRequestsFromMapFunc((r.ClusterToNodeadmConfigs)),
370-
predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), logger.FromContext(ctx).GetLogger())),
374+
predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), logger.FromContext(ctx).GetLogger())),
371375
)
372376
if err != nil {
373377
return errors.Wrap(err, "failed adding watch for Clusters to controller manager")
@@ -384,7 +388,7 @@ func (r *NodeadmConfigReconciler) MachineToBootstrapMapFunc(_ context.Context, o
384388
if !ok {
385389
klog.Errorf("Expected a Machine but got a %T", o)
386390
}
387-
if m.Spec.Bootstrap.ConfigRef != nil && m.Spec.Bootstrap.ConfigRef.GroupVersionKind() == eksbootstrapv1.GroupVersion.WithKind("NodeadmConfig") {
391+
if m.Spec.Bootstrap.ConfigRef.IsDefined() && m.Spec.Bootstrap.ConfigRef.APIGroup == eksbootstrapv1.GroupVersion.Group && m.Spec.Bootstrap.ConfigRef.Kind == eksConfigKind {
388392
name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name}
389393
result = append(result, ctrl.Request{NamespacedName: name})
390394
}
@@ -396,7 +400,7 @@ func (r *NodeadmConfigReconciler) MachineToBootstrapMapFunc(_ context.Context, o
396400
func (r *NodeadmConfigReconciler) MachinePoolToBootstrapMapFunc(_ context.Context, o client.Object) []ctrl.Request {
397401
result := []ctrl.Request{}
398402

399-
m, ok := o.(*expclusterv1.MachinePool)
403+
m, ok := o.(*clusterv1beta1.MachinePool)
400404
if !ok {
401405
klog.Errorf("Expected a MachinePool but got a %T", o)
402406
}
@@ -432,8 +436,8 @@ func (r *NodeadmConfigReconciler) ClusterToNodeadmConfigs(_ context.Context, o c
432436
}
433437

434438
for _, m := range machineList.Items {
435-
if m.Spec.Bootstrap.ConfigRef != nil &&
436-
m.Spec.Bootstrap.ConfigRef.GroupVersionKind().GroupKind() == eksbootstrapv1.GroupVersion.WithKind("NodeadmConfig").GroupKind() {
439+
if m.Spec.Bootstrap.ConfigRef.IsDefined() &&
440+
m.Spec.Bootstrap.ConfigRef.GroupKind() == eksbootstrapv1.GroupVersion.WithKind("NodeadmConfig").GroupKind() {
437441
name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name}
438442
result = append(result, ctrl.Request{NamespacedName: name})
439443
}

bootstrap/eks/controllers/nodeadmconfig_controller_reconciler_test.go

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -28,16 +28,15 @@ import (
2828
"sigs.k8s.io/controller-runtime/pkg/client"
2929

3030
eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2"
31-
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
32-
expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
31+
clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1"
32+
clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2"
3333
)
3434

3535
func TestNodeadmConfigReconciler_CreateSecret(t *testing.T) {
3636
g := NewWithT(t)
3737

3838
amcp := newAMCP("test-cluster")
3939
endpoint := clusterv1.APIEndpoint{Host: "https://9.9.9.9", Port: 6443}
40-
amcp.Spec.ControlPlaneEndpoint = endpoint
4140
cluster := newCluster(amcp.Name)
4241
cluster.Spec.ControlPlaneEndpoint = endpoint
4342
newStatus := cluster.Status
@@ -76,7 +75,6 @@ func TestNodeadmConfigReconciler_UpdateSecret_ForMachinePool(t *testing.T) {
7675

7776
amcp := newAMCP("test-cluster")
7877
endpoint := clusterv1.APIEndpoint{Host: "https://9.9.9.9", Port: 6443}
79-
amcp.Spec.ControlPlaneEndpoint = endpoint
8078
cluster := newCluster(amcp.Name)
8179
cluster.Spec.ControlPlaneEndpoint = endpoint
8280
newStatus := cluster.Status
@@ -96,7 +94,7 @@ func TestNodeadmConfigReconciler_UpdateSecret_ForMachinePool(t *testing.T) {
9694
cfg.ObjectMeta.UID = types.UID(fmt.Sprintf("%s uid", mp.Name))
9795
cfg.ObjectMeta.OwnerReferences = []metav1.OwnerReference{{
9896
Kind: "MachinePool",
99-
APIVersion: expclusterv1.GroupVersion.String(),
97+
APIVersion: clusterv1beta1.GroupVersion.String(),
10098
Name: mp.Name,
10199
UID: types.UID(fmt.Sprintf("%s uid", mp.Name)),
102100
}}
@@ -137,7 +135,6 @@ func TestNodeadmConfigReconciler_ResolvesSecretFileReference(t *testing.T) {
137135

138136
amcp := newAMCP("test-cluster")
139137
endpoint := clusterv1.APIEndpoint{Host: "https://9.9.9.9", Port: 6443}
140-
amcp.Spec.ControlPlaneEndpoint = endpoint
141138
cluster := newCluster(amcp.Name)
142139
cluster.Spec.ControlPlaneEndpoint = endpoint
143140
newStatus := cluster.Status

bootstrap/eks/controllers/nodeadmconfig_controller_test.go

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,11 @@ import (
88
. "github.com/onsi/gomega"
99
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1010
"k8s.io/apimachinery/pkg/types"
11+
"k8s.io/utils/ptr"
1112

1213
eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2"
13-
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
14+
clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1"
15+
clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2"
1416
)
1517

1618
func TestNodeadmConfigReconcilerReturnEarlyIfClusterInfraNotReady(t *testing.T) {
@@ -21,7 +23,9 @@ func TestNodeadmConfigReconcilerReturnEarlyIfClusterInfraNotReady(t *testing.T)
2123
config := newNodeadmConfig(machine)
2224

2325
cluster.Status = clusterv1.ClusterStatus{
24-
InfrastructureReady: false,
26+
Initialization: clusterv1.ClusterInitializationStatus{
27+
InfrastructureProvisioned: ptr.To(false),
28+
},
2529
}
2630

2731
reconciler := NodeadmConfigReconciler{
@@ -42,7 +46,9 @@ func TestNodeadmConfigReconcilerReturnEarlyIfClusterControlPlaneNotInitialized(t
4246
config := newNodeadmConfig(machine)
4347

4448
cluster.Status = clusterv1.ClusterStatus{
45-
InfrastructureReady: true,
49+
Initialization: clusterv1.ClusterInitializationStatus{
50+
InfrastructureProvisioned: ptr.To(true),
51+
},
4652
}
4753

4854
reconciler := NodeadmConfigReconciler{
@@ -71,14 +77,13 @@ func newNodeadmConfig(machine *clusterv1.Machine) *eksbootstrapv1.NodeadmConfig
7177
config.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
7278
{
7379
Kind: "Machine",
74-
APIVersion: clusterv1.GroupVersion.String(),
80+
APIVersion: clusterv1beta1.GroupVersion.String(),
7581
Name: machine.Name,
7682
UID: types.UID(fmt.Sprintf("%s uid", machine.Name)),
7783
},
7884
}
7985
config.Status.DataSecretName = &machine.Name
8086
machine.Spec.Bootstrap.ConfigRef.Name = config.Name
81-
machine.Spec.Bootstrap.ConfigRef.Namespace = config.Namespace
8287
}
8388
if machine != nil {
8489
config.ObjectMeta.Name = machine.Name
@@ -93,7 +98,6 @@ func newNodeadmConfig(machine *clusterv1.Machine) *eksbootstrapv1.NodeadmConfig
9398
}
9499
config.Status.DataSecretName = &machine.Name
95100
machine.Spec.Bootstrap.ConfigRef.Name = config.Name
96-
machine.Spec.Bootstrap.ConfigRef.Namespace = config.Namespace
97101
}
98102
return config
99103
}

test/e2e/suites/managed/eks_upgrade_to_nodeadm_test.go

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,13 +28,13 @@ import (
2828
. "github.com/onsi/gomega"
2929
corev1 "k8s.io/api/core/v1"
3030
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
31-
ref "k8s.io/client-go/tools/reference"
3231
"k8s.io/utils/ptr"
3332

3433
infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
3534
eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2"
3635
ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
3736
"sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared"
37+
clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2"
3838
"sigs.k8s.io/cluster-api/test/framework"
3939
"sigs.k8s.io/cluster-api/util"
4040
)
@@ -181,9 +181,11 @@ var _ = ginkgo.Describe("EKS Cluster upgrade test", func() {
181181
WaitForMachinesToBeUpgraded: e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes"),
182182
}
183183
if nodeadmConfigTemplate != nil {
184-
nodeadmRef, err := ref.GetReference(initScheme(), nodeadmConfigTemplate)
185-
Expect(err).To(BeNil(), "object should have ref")
186-
input.UpgradeBootstrapTemplate = nodeadmRef
184+
input.UpgradeBootstrapTemplate = clusterv1.ContractVersionedObjectReference{
185+
Kind: "NodeadmConfigTemplate",
186+
APIGroup: eksbootstrapv1.GroupVersion.Group,
187+
Name: nodeadmConfigTemplate.Name,
188+
}
187189
}
188190
if awsMT != nil {
189191
input.UpgradeMachineTemplate = ptr.To(awsMT.Name)

0 commit comments

Comments
 (0)