From 33c245f352d11ddcc735a8d7c119730bccbff84f Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Tue, 14 Oct 2025 16:44:09 +0200 Subject: [PATCH 1/2] KCP: Implement CanUpdateMachine --- .../hooks/v1alpha1/inplaceupdate_types.go | 5 + controlplane/kubeadm/controllers/alias.go | 3 + .../internal/controllers/controller.go | 19 +- .../kubeadm/internal/controllers/inplace.go | 23 +- .../controllers/inplace_canupdatemachine.go | 460 +++++++ .../inplace_canupdatemachine_test.go | 1154 +++++++++++++++++ .../internal/controllers/inplace_test.go | 130 ++ .../kubeadm/internal/controllers/scale.go | 26 +- .../internal/controllers/scale_test.go | 3 +- .../controllers/{upgrade.go => update.go} | 0 .../{upgrade_test.go => update_test.go} | 0 controlplane/kubeadm/internal/filters.go | 4 +- controlplane/kubeadm/main.go | 2 +- exp/runtime/client/client.go | 3 + .../topology/cluster/patches/engine.go | 3 +- .../topology/cluster/patches/engine_test.go | 2 +- .../external/external_patch_generator_test.go | 4 + .../topology/cluster/patches/patch.go | 94 +- internal/runtime/client/client.go | 33 + internal/runtime/client/client_test.go | 144 ++ internal/runtime/client/fake/fake_client.go | 26 +- internal/util/patch/patch.go | 123 ++ .../patches => util/patch}/patch_test.go | 110 +- internal/util/ssa/patch.go | 12 + .../handler_integration_test.go | 4 + 25 files changed, 2217 insertions(+), 170 deletions(-) create mode 100644 controlplane/kubeadm/internal/controllers/inplace_canupdatemachine.go create mode 100644 controlplane/kubeadm/internal/controllers/inplace_canupdatemachine_test.go create mode 100644 controlplane/kubeadm/internal/controllers/inplace_test.go rename controlplane/kubeadm/internal/controllers/{upgrade.go => update.go} (100%) rename controlplane/kubeadm/internal/controllers/{upgrade_test.go => update_test.go} (100%) create mode 100644 internal/util/patch/patch.go rename internal/{controllers/topology/cluster/patches => util/patch}/patch_test.go (81%) diff --git a/api/runtime/hooks/v1alpha1/inplaceupdate_types.go b/api/runtime/hooks/v1alpha1/inplaceupdate_types.go index 83fed16fc787..1f2accce966a 100644 --- a/api/runtime/hooks/v1alpha1/inplaceupdate_types.go +++ b/api/runtime/hooks/v1alpha1/inplaceupdate_types.go @@ -90,6 +90,11 @@ type Patch struct { Patch []byte `json:"patch,omitempty"` } +// IsDefined returns true if one of the fields of Patch is set. +func (p *Patch) IsDefined() bool { + return p.PatchType != "" || len(p.Patch) > 0 +} + // CanUpdateMachine is the hook that will be called to determine if an extension // can handle specific machine changes for in-place updates. func CanUpdateMachine(*CanUpdateMachineRequest, *CanUpdateMachineResponse) {} diff --git a/controlplane/kubeadm/controllers/alias.go b/controlplane/kubeadm/controllers/alias.go index b0733f825be5..924a5d808550 100644 --- a/controlplane/kubeadm/controllers/alias.go +++ b/controlplane/kubeadm/controllers/alias.go @@ -27,12 +27,14 @@ import ( "sigs.k8s.io/cluster-api/controllers/clustercache" kubeadmcontrolplanecontrollers "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/controllers" + runtimeclient "sigs.k8s.io/cluster-api/exp/runtime/client" ) // KubeadmControlPlaneReconciler reconciles a KubeadmControlPlane object. type KubeadmControlPlaneReconciler struct { Client client.Client SecretCachingClient client.Client + RuntimeClient runtimeclient.Client ClusterCache clustercache.ClusterCache EtcdDialTimeout time.Duration @@ -50,6 +52,7 @@ func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mg return (&kubeadmcontrolplanecontrollers.KubeadmControlPlaneReconciler{ Client: r.Client, SecretCachingClient: r.SecretCachingClient, + RuntimeClient: r.RuntimeClient, ClusterCache: r.ClusterCache, EtcdDialTimeout: r.EtcdDialTimeout, EtcdCallTimeout: r.EtcdCallTimeout, diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index 2f53c2081db3..fb1ec3aa22a3 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -46,6 +46,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/clustercache" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + runtimeclient "sigs.k8s.io/cluster-api/exp/runtime/client" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/util/ssa" @@ -80,6 +81,7 @@ const ( type KubeadmControlPlaneReconciler struct { Client client.Client SecretCachingClient client.Client + RuntimeClient runtimeclient.Client controller controller.Controller recorder record.EventRecorder ClusterCache clustercache.ClusterCache @@ -97,10 +99,13 @@ type KubeadmControlPlaneReconciler struct { managementClusterUncached internal.ManagementCluster ssaCache ssa.Cache - // Only used for testing - overrideTryInPlaceUpdateFunc func(ctx context.Context, controlPlane *internal.ControlPlane, machineToInPlaceUpdate *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult) (bool, ctrl.Result, error) - overrideScaleUpControlPlaneFunc func(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) - overrideScaleDownControlPlaneFunc func(ctx context.Context, controlPlane *internal.ControlPlane, machineToDelete *clusterv1.Machine) (ctrl.Result, error) + // Only used for testing. + overrideTryInPlaceUpdateFunc func(ctx context.Context, controlPlane *internal.ControlPlane, machineToInPlaceUpdate *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult) (bool, ctrl.Result, error) + overrideScaleUpControlPlaneFunc func(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) + overrideScaleDownControlPlaneFunc func(ctx context.Context, controlPlane *internal.ControlPlane, machineToDelete *clusterv1.Machine) (ctrl.Result, error) + overridePreflightChecksFunc func(ctx context.Context, controlPlane *internal.ControlPlane, excludeFor ...*clusterv1.Machine) ctrl.Result + overrideCanUpdateMachineFunc func(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult) (bool, error) + overrideCanExtensionsUpdateMachine func(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult, extensionHandlers []string) (bool, []string, error) } func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { @@ -115,6 +120,9 @@ func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mg "EtcdDialTimeout and EtcdCallTimeout must not be 0 and " + "RemoteConditionsGracePeriod must not be < 2m") } + if feature.Gates.Enabled(feature.InPlaceUpdates) && r.RuntimeClient == nil { + return errors.New("RuntimeClient must not be nil when InPlaceUpdates feature gate is enabled") + } predicateLog := ctrl.LoggerFrom(ctx).WithValues("controller", "kubeadmcontrolplane") c, err := ctrl.NewControllerManagedBy(mgr). @@ -814,7 +822,8 @@ func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, contro if err != nil { return errors.Wrapf(err, "failed to update Machine: %s", klog.KObj(m)) } - // Note: Ensure ControlPlane has the latest version of the Machine. + // Note: Ensure ControlPlane has the latest version of the Machine. This is required because + // e.g. the in-place update code that is called later has to use the latest version of the Machine. controlPlane.Machines[machineName] = updatedMachine if _, ok := controlPlane.MachinesNotUpToDate[machineName]; ok { controlPlane.MachinesNotUpToDate[machineName] = updatedMachine diff --git a/controlplane/kubeadm/internal/controllers/inplace.go b/controlplane/kubeadm/internal/controllers/inplace.go index 3e67f66ea7ac..e25b5e84dcdb 100644 --- a/controlplane/kubeadm/internal/controllers/inplace.go +++ b/controlplane/kubeadm/internal/controllers/inplace.go @@ -19,6 +19,7 @@ package controllers import ( "context" + "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" @@ -35,6 +36,26 @@ func (r *KubeadmControlPlaneReconciler) tryInPlaceUpdate( return r.overrideTryInPlaceUpdateFunc(ctx, controlPlane, machineToInPlaceUpdate, machineUpToDateResult) } - // Always fallback to scale down until in-place is implemented. + // Run preflight checks to ensure that the control plane is stable before proceeding with in-place update operation. + if resultForAllMachines := r.preflightChecks(ctx, controlPlane); !resultForAllMachines.IsZero() { + // We should not block a scale down of an unhealthy Machine that would work. + if result := r.preflightChecks(ctx, controlPlane, machineToInPlaceUpdate); result.IsZero() { + // Fallback to scale down. + return true, ctrl.Result{}, nil + } + + return false, resultForAllMachines, nil + } + + canUpdate, err := r.canUpdateMachine(ctx, machineToInPlaceUpdate, machineUpToDateResult) + if err != nil { + return false, ctrl.Result{}, errors.Wrapf(err, "failed to determine if Machine %s can be updated in-place", machineToInPlaceUpdate.Name) + } + + if !canUpdate { + return true, ctrl.Result{}, nil + } + + // Always fallback to scale down until triggering in-place updates is implemented. return true, ctrl.Result{}, nil } diff --git a/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine.go b/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine.go new file mode 100644 index 000000000000..d8c0aaabaa75 --- /dev/null +++ b/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine.go @@ -0,0 +1,460 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "runtime/debug" + "strings" + + jsonpatch "github.com/evanphx/json-patch/v5" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + "sigs.k8s.io/cluster-api/feature" + "sigs.k8s.io/cluster-api/internal/util/compare" + patchutil "sigs.k8s.io/cluster-api/internal/util/patch" + "sigs.k8s.io/cluster-api/internal/util/ssa" +) + +func (r *KubeadmControlPlaneReconciler) canUpdateMachine(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult) (bool, error) { + if r.overrideCanUpdateMachineFunc != nil { + return r.overrideCanUpdateMachineFunc(ctx, machine, machineUpToDateResult) + } + + log := ctrl.LoggerFrom(ctx) + + // Machine cannot be updated in-place if the feature gate is not enabled. + if !feature.Gates.Enabled(feature.InPlaceUpdates) { + return false, nil + } + + // Machine cannot be updated in-place if the UpToDate func was not able to provide all objects, + // e.g. if the InfraMachine or KubeadmConfig was deleted. + if machineUpToDateResult.DesiredMachine == nil || + machineUpToDateResult.CurrentInfraMachine == nil || + machineUpToDateResult.DesiredInfraMachine == nil || + machineUpToDateResult.CurrentKubeadmConfig == nil || + machineUpToDateResult.DesiredKubeadmConfig == nil { + return false, nil + } + + extensionHandlers, err := r.RuntimeClient.GetAllExtensions(ctx, runtimehooksv1.CanUpdateMachine, machine) + if err != nil { + return false, err + } + // Machine cannot be updated in-place if no CanUpdateMachine extensions are registered. + if len(extensionHandlers) == 0 { + return false, nil + } + + canUpdateMachine, reasons, err := r.canExtensionsUpdateMachine(ctx, machine, machineUpToDateResult, extensionHandlers) + if err != nil { + return false, err + } + if !canUpdateMachine { + log.Info(fmt.Sprintf("Machine cannot be updated in-place: %s", strings.Join(reasons, ",")), "Machine", klog.KObj(machine)) + return false, nil + } + return true, nil +} + +// canExtensionsUpdateMachine calls CanUpdateMachine extensions to decide if a Machine can be updated in-place. +// Note: This is following the same general structure that is used in the Apply func in +// internal/controllers/topology/cluster/patches/engine.go. +func (r *KubeadmControlPlaneReconciler) canExtensionsUpdateMachine(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult, extensionHandlers []string) (bool, []string, error) { + if r.overrideCanExtensionsUpdateMachine != nil { + return r.overrideCanExtensionsUpdateMachine(ctx, machine, machineUpToDateResult, extensionHandlers) + } + + log := ctrl.LoggerFrom(ctx) + + // Create the CanUpdateMachine request. + req, err := createRequest(ctx, r.Client, machine, machineUpToDateResult) + if err != nil { + return false, nil, errors.Wrapf(err, "failed to generate CanUpdateMachine request") + } + + var reasons []string + for _, extensionHandler := range extensionHandlers { + // Call CanUpdateMachine extension. + resp := &runtimehooksv1.CanUpdateMachineResponse{} + if err := r.RuntimeClient.CallExtension(ctx, runtimehooksv1.CanUpdateMachine, machine, extensionHandler, req, resp); err != nil { + return false, nil, err + } + + // Apply patches from the CanUpdateMachine response to the request. + if err := applyPatchesToRequest(ctx, req, resp); err != nil { + return false, nil, errors.Wrapf(err, "failed to apply patches from extension %s to the CanUpdateMachine request", extensionHandler) + } + + // Check if current and desired objects are now matching. + var matches bool + matches, reasons, err = matchesMachine(req) + if err != nil { + return false, nil, errors.Wrapf(err, "failed to compare current and desired objects after calling extension %s", extensionHandler) + } + if matches { + return true, nil, nil + } + log.V(5).Info(fmt.Sprintf("Machine cannot be updated in-place yet after calling extension %s: %s", extensionHandler, strings.Join(reasons, ",")), "Machine", klog.KObj(&req.Current.Machine)) + } + + return false, reasons, nil +} + +func createRequest(ctx context.Context, c client.Client, currentMachine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult) (*runtimehooksv1.CanUpdateMachineRequest, error) { + // DeepCopy objects to avoid mutations. + currentMachineForDiff := currentMachine.DeepCopy() + currentKubeadmConfigForDiff := machineUpToDateResult.CurrentKubeadmConfig.DeepCopy() + currentInfraMachineForDiff := machineUpToDateResult.CurrentInfraMachine.DeepCopy() + + desiredMachineForDiff := machineUpToDateResult.DesiredMachine.DeepCopy() + desiredKubeadmConfigForDiff := machineUpToDateResult.DesiredKubeadmConfig.DeepCopy() + desiredInfraMachineForDiff := machineUpToDateResult.DesiredInfraMachine.DeepCopy() + + // Sync in-place mutable changes from desired to current KubeadmConfig / InfraMachine. + // Note: Writing these fields is handled by syncMachines and not the responsibility of in-place updates. + // Note: Desired KubeadmConfig / InfraMachine already contain the latest labels & annotations. + currentKubeadmConfigForDiff.SetLabels(desiredKubeadmConfigForDiff.GetLabels()) + currentKubeadmConfigForDiff.SetAnnotations(desiredKubeadmConfigForDiff.GetAnnotations()) + currentInfraMachineForDiff.SetLabels(desiredInfraMachineForDiff.GetLabels()) + currentInfraMachineForDiff.SetAnnotations(desiredInfraMachineForDiff.GetAnnotations()) + + // Apply defaulting to current / desired Machine / KubeadmConfig / InfraMachine. + // Machine + // Note: currentMachineForDiff doesn't need a dry-run as it was just written in syncMachines and then + // update in controlPlane to ensure the Machine we get here is the latest version of the Machine. + // Note: desiredMachineForDiff needs a dry-run because otherwise we have unintended diffs, e.g. dataSecretName, + // providerID and nodeDeletionTimeout don't exist on the newly computed desired Machine. + if err := ssa.Patch(ctx, c, kcpManagerName, desiredMachineForDiff, ssa.WithDryRun{}); err != nil { + return nil, errors.Wrap(err, "server side apply dry-run failed for desired Machine") + } + // InfraMachine + // Note: Both currentInfraMachineForDiff and desiredInfraMachineForDiff need a dry-run to ensure changes + // in defaulting logic and fields added by other controllers don't lead to an unintended diff. + // TODO(in-place) change the fieldManager to the one we use later when we trigger the in-place update. + if err := ssa.Patch(ctx, c, kcpManagerName, currentInfraMachineForDiff, ssa.WithDryRun{}); err != nil { + return nil, errors.Wrap(err, "server side apply dry-run failed for current InfraMachine") + } + if err := ssa.Patch(ctx, c, kcpManagerName, desiredInfraMachineForDiff, ssa.WithDryRun{}); err != nil { + return nil, errors.Wrap(err, "server side apply dry-run failed for desired InfraMachine") + } + // KubeadmConfig + // Note: Both currentKubeadmConfigForDiff and desiredKubeadmConfigForDiff don't need a dry-run as + // PrepareKubeadmConfigsForDiff already has to perfectly handle differences between current / desired + // KubeadmConfig. Otherwise the regular rollout logic would not detect correctly if a Machine needs a rollout. + // Note: KubeadmConfig doesn't have a defaulting webhook and no API defaulting anymore. + desiredKubeadmConfigForDiff, currentKubeadmConfigForDiff = internal.PrepareKubeadmConfigsForDiff(desiredKubeadmConfigForDiff, currentKubeadmConfigForDiff, true) + + // Cleanup objects and create request. + req := &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *cleanupMachine(currentMachineForDiff), + }, + Desired: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *cleanupMachine(desiredMachineForDiff), + }, + } + var err error + req.Current.BootstrapConfig, err = convertToRawExtension(cleanupKubeadmConfig(currentKubeadmConfigForDiff)) + if err != nil { + return nil, err + } + req.Desired.BootstrapConfig, err = convertToRawExtension(cleanupKubeadmConfig(desiredKubeadmConfigForDiff)) + if err != nil { + return nil, err + } + req.Current.InfrastructureMachine, err = convertToRawExtension(cleanupUnstructured(currentInfraMachineForDiff)) + if err != nil { + return nil, err + } + req.Desired.InfrastructureMachine, err = convertToRawExtension(cleanupUnstructured(desiredInfraMachineForDiff)) + if err != nil { + return nil, err + } + + return req, nil +} + +func cleanupMachine(machine *clusterv1.Machine) *clusterv1.Machine { + return &clusterv1.Machine{ + // Set GVK because object is later marshalled with json.Marshal. + TypeMeta: metav1.TypeMeta{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: machine.Name, + Namespace: machine.Namespace, + Labels: machine.Labels, + Annotations: machine.Annotations, + }, + Spec: *machine.Spec.DeepCopy(), + } +} + +func cleanupKubeadmConfig(kubeadmConfig *bootstrapv1.KubeadmConfig) *bootstrapv1.KubeadmConfig { + return &bootstrapv1.KubeadmConfig{ + // Set GVK because object is later marshalled with json.Marshal. + TypeMeta: metav1.TypeMeta{ + APIVersion: bootstrapv1.GroupVersion.String(), + Kind: "KubeadmConfig", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfig.Name, + Namespace: kubeadmConfig.Namespace, + Labels: kubeadmConfig.Labels, + Annotations: kubeadmConfig.Annotations, + }, + Spec: *kubeadmConfig.Spec.DeepCopy(), + } +} + +func cleanupUnstructured(u *unstructured.Unstructured) *unstructured.Unstructured { + cleanedUpU := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": u.GetAPIVersion(), + "kind": u.GetKind(), + "spec": u.Object["spec"], + }, + } + cleanedUpU.SetName(u.GetName()) + cleanedUpU.SetNamespace(u.GetNamespace()) + cleanedUpU.SetLabels(u.GetLabels()) + cleanedUpU.SetAnnotations(u.GetAnnotations()) + return cleanedUpU +} + +func convertToRawExtension(object runtime.Object) (runtime.RawExtension, error) { + objectBytes, err := json.Marshal(object) + if err != nil { + return runtime.RawExtension{}, errors.Wrap(err, "failed to marshal object to JSON") + } + + objectUnstructured, ok := object.(*unstructured.Unstructured) + if !ok { + objectUnstructured = &unstructured.Unstructured{} + // Note: This only succeeds if object has apiVersion & kind set (which is always the case). + if err := json.Unmarshal(objectBytes, objectUnstructured); err != nil { + return runtime.RawExtension{}, errors.Wrap(err, "failed to Unmarshal object into Unstructured") + } + } + + // Note: Raw and Object are always both set and Object is always set as an Unstructured + // to simplify subsequent code in matchesUnstructuredSpec. + return runtime.RawExtension{ + Raw: objectBytes, + Object: objectUnstructured, + }, nil +} + +func applyPatchesToRequest(ctx context.Context, req *runtimehooksv1.CanUpdateMachineRequest, resp *runtimehooksv1.CanUpdateMachineResponse) error { + if resp.MachinePatch.IsDefined() { + if err := applyPatchToMachine(ctx, &req.Current.Machine, resp.MachinePatch); err != nil { + return err + } + } + + if resp.BootstrapConfigPatch.IsDefined() { + if _, err := applyPatchToObject(ctx, &req.Current.BootstrapConfig, resp.BootstrapConfigPatch); err != nil { + return err + } + } + + if resp.InfrastructureMachinePatch.IsDefined() { + if _, err := applyPatchToObject(ctx, &req.Current.InfrastructureMachine, resp.InfrastructureMachinePatch); err != nil { + return err + } + } + + return nil +} + +func applyPatchToMachine(ctx context.Context, currentMachine *clusterv1.Machine, machinePath runtimehooksv1.Patch) error { + // Note: Machine needs special handling because it is not a runtime.RawExtension. Simply converting it here to + // a runtime.RawExtension so we can avoid making the code in applyPatchToObject more complex. + currentMachineRaw, err := convertToRawExtension(currentMachine) + if err != nil { + return err + } + + machineChanged, err := applyPatchToObject(ctx, ¤tMachineRaw, machinePath) + if err != nil { + return err + } + + if !machineChanged { + return nil + } + + // Note: json.Unmarshal can't be used directly on *currentMachine as json.Unmarshal does not unset fields. + patchedCurrentMachine := &clusterv1.Machine{} + if err := json.Unmarshal(currentMachineRaw.Raw, patchedCurrentMachine); err != nil { + return err + } + *currentMachine = *patchedCurrentMachine + return nil +} + +// applyPatchToObject applies the patch to the obj. +// Note: This is following the same general structure that is used in the applyPatchToRequest func in +// internal/controllers/topology/cluster/patches/engine.go. +func applyPatchToObject(ctx context.Context, obj *runtime.RawExtension, patch runtimehooksv1.Patch) (objChanged bool, reterr error) { + log := ctrl.LoggerFrom(ctx) + + if patch.PatchType == "" { + return false, errors.Errorf("failed to apply patch: patchType is not set") + } + + defer func() { + if r := recover(); r != nil { + log.Info(fmt.Sprintf("Observed a panic when applying patch: %v\n%s", r, string(debug.Stack()))) + reterr = kerrors.NewAggregate([]error{reterr, fmt.Errorf("observed a panic when applying patch: %v", r)}) + } + }() + + // Create a copy of obj.Raw. + // The patches will be applied to the copy and then only spec changes will be copied back to the obj. + patchedObject := bytes.Clone(obj.Raw) + var err error + + switch patch.PatchType { + case runtimehooksv1.JSONPatchType: + log.V(5).Info("Accumulating JSON patch", "patch", string(patch.Patch)) + jsonPatch, err := jsonpatch.DecodePatch(patch.Patch) + if err != nil { + log.Error(err, "Failed to apply patch: error decoding json patch (RFC6902)", "patch", string(patch.Patch)) + return false, errors.Wrap(err, "failed to apply patch: error decoding json patch (RFC6902)") + } + + if len(jsonPatch) == 0 { + // Return if there are no patches, nothing to do. + return false, nil + } + + patchedObject, err = jsonPatch.Apply(patchedObject) + if err != nil { + log.Error(err, "Failed to apply patch: error applying json patch (RFC6902)", "patch", string(patch.Patch)) + return false, errors.Wrap(err, "failed to apply patch: error applying json patch (RFC6902)") + } + case runtimehooksv1.JSONMergePatchType: + if len(patch.Patch) == 0 || bytes.Equal(patch.Patch, []byte("{}")) { + // Return if there are no patches, nothing to do. + return false, nil + } + + log.V(5).Info("Accumulating JSON merge patch", "patch", string(patch.Patch)) + patchedObject, err = jsonpatch.MergePatch(patchedObject, patch.Patch) + if err != nil { + log.Error(err, "Failed to apply patch: error applying json merge patch (RFC7386)", "patch", string(patch.Patch)) + return false, errors.Wrap(err, "failed to apply patch: error applying json merge patch (RFC7386)") + } + default: + return false, errors.Errorf("failed to apply patch: unknown patchType %s", patch.PatchType) + } + + // Overwrite the spec of obj with the spec of the patchedObject, + // to ensure that we only pick up changes to the spec. + if err := patchutil.PatchSpec(obj, patchedObject); err != nil { + return false, errors.Wrap(err, "failed to apply patch to object") + } + + return true, nil +} + +func matchesMachine(req *runtimehooksv1.CanUpdateMachineRequest) (bool, []string, error) { + var reasons []string + match, diff, err := matchesMachineSpec(&req.Current.Machine, &req.Desired.Machine) + if err != nil { + return false, nil, errors.Wrapf(err, "failed to match Machine") + } + if !match { + reasons = append(reasons, fmt.Sprintf("Machine cannot be updated in-place: %s", diff)) + } + match, diff, err = matchesUnstructuredSpec(req.Current.BootstrapConfig, req.Desired.BootstrapConfig) + if err != nil { + return false, nil, errors.Wrapf(err, "failed to match KubeadmConfig") + } + if !match { + reasons = append(reasons, fmt.Sprintf("KubeadmConfig cannot be updated in-place: %s", diff)) + } + match, diff, err = matchesUnstructuredSpec(req.Current.InfrastructureMachine, req.Desired.InfrastructureMachine) + if err != nil { + return false, nil, errors.Wrapf(err, "failed to match %s", req.Current.InfrastructureMachine.Object.GetObjectKind().GroupVersionKind().Kind) + } + if !match { + reasons = append(reasons, fmt.Sprintf("%s cannot be updated in-place: %s", req.Current.InfrastructureMachine.Object.GetObjectKind().GroupVersionKind().Kind, diff)) + } + + if len(reasons) > 0 { + return false, reasons, nil + } + + return true, nil, nil +} + +func matchesMachineSpec(patched, desired *clusterv1.Machine) (equal bool, diff string, matchErr error) { + // Note: Wrapping Machine specs in a Machine for proper formatting of the diff. + return compare.Diff( + &clusterv1.Machine{ + Spec: patched.Spec, + }, + &clusterv1.Machine{ + Spec: desired.Spec, + }, + ) +} + +func matchesUnstructuredSpec(patched, desired runtime.RawExtension) (equal bool, diff string, matchErr error) { + // Note: Both patched and desired objects are always Unstructured as createRequest and + // applyPatchToObject are always setting objects as Unstructured. + patchedUnstructured, ok := patched.Object.(*unstructured.Unstructured) + if !ok { + return false, "", errors.Errorf("patched object is not an Unstructured") + } + desiredUnstructured, ok := desired.Object.(*unstructured.Unstructured) + if !ok { + return false, "", errors.Errorf("desired object is not an Unstructured") + } + // Note: Wrapping Unstructured specs in an Unstructured for proper formatting of the diff. + return compare.Diff( + &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": patchedUnstructured.Object["spec"], + }, + }, + &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": desiredUnstructured.Object["spec"], + }, + }, + ) +} diff --git a/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine_test.go b/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine_test.go new file mode 100644 index 000000000000..4becff527bd0 --- /dev/null +++ b/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine_test.go @@ -0,0 +1,1154 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "testing" + + . "github.com/onsi/gomega" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + utilfeature "k8s.io/component-base/featuregate/testing" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" + "sigs.k8s.io/cluster-api/bootstrap/kubeadm/defaulting" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" + "sigs.k8s.io/cluster-api/feature" + fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" + "sigs.k8s.io/cluster-api/internal/util/compare" + "sigs.k8s.io/cluster-api/util/test/builder" +) + +func Test_canUpdateMachine(t *testing.T) { + machineToInPlaceUpdate := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-to-in-place-update", + }, + } + nonEmptyMachineUpToDateResult := internal.UpToDateResult{ + // No real content needed for this, fields should just not be nil, + EligibleForInPlaceUpdate: true, + DesiredMachine: &clusterv1.Machine{}, + CurrentInfraMachine: &unstructured.Unstructured{}, + DesiredInfraMachine: &unstructured.Unstructured{}, + CurrentKubeadmConfig: &bootstrapv1.KubeadmConfig{}, + DesiredKubeadmConfig: &bootstrapv1.KubeadmConfig{}, + } + catalog := runtimecatalog.New() + _ = runtimehooksv1.AddToCatalog(catalog) + canUpdateMachineGVH, err := catalog.GroupVersionHook(runtimehooksv1.CanUpdateMachine) + if err != nil { + panic("unable to compute GVH") + } + + tests := []struct { + name string + machineUpToDateResult internal.UpToDateResult + enableInPlaceUpdatesFeatureGate bool + canExtensionsUpdateMachineFunc func(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult, extensionHandlers []string) (bool, []string, error) + getAllExtensionsResponses map[runtimecatalog.GroupVersionHook][]string + wantCanExtensionsUpdateMachineCalled bool + wantCanUpdateMachine bool + wantError bool + wantErrorMessage string + }{ + { + name: "Return false if feature gate is not enabled", + enableInPlaceUpdatesFeatureGate: false, + wantCanUpdateMachine: false, + }, + { + name: "Return false if objects in machineUpToDateResult are nil", + enableInPlaceUpdatesFeatureGate: true, + wantCanUpdateMachine: false, + }, + { + name: "Return false if no CanUpdateMachine extensions registered", + enableInPlaceUpdatesFeatureGate: true, + machineUpToDateResult: nonEmptyMachineUpToDateResult, + getAllExtensionsResponses: map[runtimecatalog.GroupVersionHook][]string{}, + wantCanUpdateMachine: false, + }, + { + name: "Return false if canExtensionsUpdateMachine returns false", + enableInPlaceUpdatesFeatureGate: true, + machineUpToDateResult: nonEmptyMachineUpToDateResult, + getAllExtensionsResponses: map[runtimecatalog.GroupVersionHook][]string{ + canUpdateMachineGVH: {"test-update-extension"}, + }, + canExtensionsUpdateMachineFunc: func(_ context.Context, _ *clusterv1.Machine, _ internal.UpToDateResult, extensionHandlers []string) (bool, []string, error) { + if len(extensionHandlers) != 1 || extensionHandlers[0] != "test-update-extension" { + return false, nil, errors.Errorf("unexpected error") + } + return false, []string{"can not update"}, nil + }, + wantCanExtensionsUpdateMachineCalled: true, + wantCanUpdateMachine: false, + }, + { + name: "Return true if canExtensionsUpdateMachine returns true", + enableInPlaceUpdatesFeatureGate: true, + machineUpToDateResult: nonEmptyMachineUpToDateResult, + getAllExtensionsResponses: map[runtimecatalog.GroupVersionHook][]string{ + canUpdateMachineGVH: {"test-update-extension"}, + }, + canExtensionsUpdateMachineFunc: func(_ context.Context, _ *clusterv1.Machine, _ internal.UpToDateResult, extensionHandlers []string) (bool, []string, error) { + if len(extensionHandlers) != 1 || extensionHandlers[0] != "test-update-extension" { + return false, nil, errors.Errorf("unexpected error") + } + return true, nil, nil + }, + wantCanExtensionsUpdateMachineCalled: true, + wantCanUpdateMachine: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + if tt.enableInPlaceUpdatesFeatureGate { + utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.InPlaceUpdates, true) + } + + runtimeClient := fakeruntimeclient.NewRuntimeClientBuilder(). + WithCatalog(catalog). + WithGetAllExtensionResponses(tt.getAllExtensionsResponses). + Build() + + var canExtensionsUpdateMachineCalled bool + r := &KubeadmControlPlaneReconciler{ + RuntimeClient: runtimeClient, + overrideCanExtensionsUpdateMachine: func(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult, extensionHandlers []string) (bool, []string, error) { + canExtensionsUpdateMachineCalled = true + return tt.canExtensionsUpdateMachineFunc(ctx, machine, machineUpToDateResult, extensionHandlers) + }, + } + + canUpdateMachine, err := r.canUpdateMachine(ctx, machineToInPlaceUpdate, tt.machineUpToDateResult) + if tt.wantError { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.wantErrorMessage)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(canUpdateMachine).To(Equal(tt.wantCanUpdateMachine)) + + g.Expect(canExtensionsUpdateMachineCalled).To(Equal(tt.wantCanExtensionsUpdateMachineCalled), "canExtensionsUpdateMachineCalled: actual: %t expected: %t", canExtensionsUpdateMachineCalled, tt.wantCanExtensionsUpdateMachineCalled) + }) + } +} + +func Test_canExtensionsUpdateMachine(t *testing.T) { + currentMachine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-to-in-place-update", + Namespace: metav1.NamespaceDefault, + }, + Spec: clusterv1.MachineSpec{ + Version: "v1.30.0", + }, + } + desiredMachine := currentMachine.DeepCopy() + desiredMachine.Spec.Version = "v1.31.0" + + currentKubeadmConfig := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-to-in-place-update", + Namespace: metav1.NamespaceDefault, + }, + Spec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + Etcd: bootstrapv1.Etcd{ + Local: bootstrapv1.LocalEtcd{ + ImageTag: "3.5.0-0", + }, + }, + }, + }, + } + desiredKubeadmConfig := currentKubeadmConfig.DeepCopy() + desiredKubeadmConfig.Spec.ClusterConfiguration.Etcd.Local.ImageTag = "3.6.4-0" + + currentInfraMachine := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": builder.InfrastructureGroupVersion.String(), + "kind": builder.TestInfrastructureMachineKind, + "metadata": map[string]interface{}{ + "name": "machine-to-in-place-update", + "namespace": metav1.NamespaceDefault, + "annotations": map[string]interface{}{ + clusterv1.TemplateClonedFromNameAnnotation: "infra-machine-template-1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", + }, + }, + "spec": map[string]interface{}{ + "hello": "world", + }, + }, + } + desiredInfraMachine := currentInfraMachine.DeepCopy() + _ = unstructured.SetNestedField(desiredInfraMachine.Object, "in-place updated world", "spec", "hello") + + responseWithEmptyPatches := &runtimehooksv1.CanUpdateMachineResponse{ + MachinePatch: runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte("[]"), + }, + InfrastructureMachinePatch: runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONMergePatchType, + Patch: []byte{}, + }, + BootstrapConfigPatch: runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONMergePatchType, + Patch: []byte("{}"), + }, + } + patchToUpdateMachine := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte(`[{"op":"replace","path":"/spec/version","value":"v1.31.0"}]`), + } + patchToUpdateKubeadmConfig := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte(`[{"op":"replace","path":"/spec/clusterConfiguration/etcd/local/imageTag","value":"3.6.4-0"}]`), + } + patchToUpdateInfraMachine := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte(`[{"op":"replace","path":"/spec/hello","value":"in-place updated world"}]`), + } + emptyPatch := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONMergePatchType, + Patch: []byte{}, + } + + tests := []struct { + name string + machineUpToDateResult internal.UpToDateResult + extensionHandlers []string + callExtensionResponses map[string]runtimehooksv1.ResponseObject + callExtensionExpectedChanges map[string]func(runtime.Object) + wantCanUpdateMachine bool + wantReasons []string + wantError bool + wantErrorMessage string + }{ + { + name: "Return true if current and desired objects are equal and no patches are returned", + // Note: canExtensionsUpdateMachine should never be called if the objects are equal, but this is a simple first test case. + machineUpToDateResult: internal.UpToDateResult{ + DesiredMachine: currentMachine, + CurrentInfraMachine: currentInfraMachine, + DesiredInfraMachine: currentInfraMachine, + CurrentKubeadmConfig: currentKubeadmConfig, + DesiredKubeadmConfig: currentKubeadmConfig, + }, + extensionHandlers: []string{"test-update-extension"}, + callExtensionResponses: map[string]runtimehooksv1.ResponseObject{ + "test-update-extension": responseWithEmptyPatches, + }, + wantCanUpdateMachine: true, + }, + { + name: "Return false if current and desired objects are not equal and no patches are returned", + machineUpToDateResult: internal.UpToDateResult{ + DesiredMachine: desiredMachine, + CurrentInfraMachine: currentInfraMachine, + DesiredInfraMachine: desiredInfraMachine, + CurrentKubeadmConfig: currentKubeadmConfig, + DesiredKubeadmConfig: desiredKubeadmConfig, + }, + extensionHandlers: []string{"test-update-extension"}, + callExtensionResponses: map[string]runtimehooksv1.ResponseObject{ + "test-update-extension": responseWithEmptyPatches, + }, + wantCanUpdateMachine: false, + wantReasons: []string{ + `Machine cannot be updated in-place: &v1beta2.Machine{ + TypeMeta: {}, + ObjectMeta: {}, + Spec: v1beta2.MachineSpec{ + ClusterName: "", + Bootstrap: {}, + InfrastructureRef: {}, +- Version: "v1.30.0", ++ Version: "v1.31.0", + ProviderID: "", + FailureDomain: "", + ... // 3 identical fields + }, + Status: {}, + }`, + `KubeadmConfig cannot be updated in-place: &unstructured.Unstructured{ + Object: map[string]any{ + "spec": map[string]any{ +- "clusterConfiguration": map[string]any{"etcd": map[string]any{"local": map[string]any{"imageTag": string("3.5.0-0")}}}, ++ "clusterConfiguration": map[string]any{"etcd": map[string]any{"local": map[string]any{"imageTag": string("3.6.4-0")}}}, + "format": string("cloud-config"), + "initConfiguration": map[string]any{"nodeRegistration": map[string]any{"imagePullPolicy": string("IfNotPresent")}}, + "joinConfiguration": map[string]any{"nodeRegistration": map[string]any{"imagePullPolicy": string("IfNotPresent")}}, + }, + }, + }`, + `TestInfrastructureMachine cannot be updated in-place: &unstructured.Unstructured{ +- Object: map[string]any{"spec": map[string]any{"hello": string("world")}}, ++ Object: map[string]any{"spec": map[string]any{"hello": string("in-place updated world")}}, + }`, + }, + }, + { + name: "Return true if current and desired objects are not equal and patches are returned that account for all diffs", + machineUpToDateResult: internal.UpToDateResult{ + DesiredMachine: desiredMachine, + CurrentInfraMachine: currentInfraMachine, + DesiredInfraMachine: desiredInfraMachine, + CurrentKubeadmConfig: currentKubeadmConfig, + DesiredKubeadmConfig: desiredKubeadmConfig, + }, + extensionHandlers: []string{"test-update-extension"}, + callExtensionResponses: map[string]runtimehooksv1.ResponseObject{ + "test-update-extension": &runtimehooksv1.CanUpdateMachineResponse{ + MachinePatch: patchToUpdateMachine, + InfrastructureMachinePatch: patchToUpdateInfraMachine, + BootstrapConfigPatch: patchToUpdateKubeadmConfig, + }, + }, + wantCanUpdateMachine: true, + }, + { + name: "Return true if current and desired objects are not equal and patches are returned that account for all diffs (multiple extensions)", + machineUpToDateResult: internal.UpToDateResult{ + DesiredMachine: desiredMachine, + CurrentInfraMachine: currentInfraMachine, + DesiredInfraMachine: desiredInfraMachine, + CurrentKubeadmConfig: currentKubeadmConfig, + DesiredKubeadmConfig: desiredKubeadmConfig, + }, + extensionHandlers: []string{"test-update-extension-1", "test-update-extension-2", "test-update-extension-3"}, + callExtensionResponses: map[string]runtimehooksv1.ResponseObject{ + "test-update-extension-1": &runtimehooksv1.CanUpdateMachineResponse{ + MachinePatch: patchToUpdateMachine, + }, + "test-update-extension-2": &runtimehooksv1.CanUpdateMachineResponse{ + InfrastructureMachinePatch: patchToUpdateInfraMachine, + }, + "test-update-extension-3": &runtimehooksv1.CanUpdateMachineResponse{ + BootstrapConfigPatch: patchToUpdateKubeadmConfig, + }, + }, + callExtensionExpectedChanges: map[string]func(runtime.Object){ + "test-update-extension-2": func(object runtime.Object) { + if machine, ok := object.(*clusterv1.Machine); ok { + // After the call to test-update-extension-1 we expect that patchToUpdateMachine is already applied. + machine.Spec.Version = "v1.31.0" + } + }, + "test-update-extension-3": func(object runtime.Object) { + if machine, ok := object.(*clusterv1.Machine); ok { + // After the call to test-update-extension-1 we expect that patchToUpdateMachine is already applied. + machine.Spec.Version = "v1.31.0" + } + if infraMachine, ok := object.(*unstructured.Unstructured); ok { + // After the call to test-update-extension-2 we expect that patchToUpdateInfraMachine is already applied. + _ = unstructured.SetNestedField(infraMachine.Object, "in-place updated world", "spec", "hello") + } + }, + }, + wantCanUpdateMachine: true, + }, + { + name: "Return false if current and desired objects are not equal and patches are returned that only account for some diffs", + machineUpToDateResult: internal.UpToDateResult{ + DesiredMachine: desiredMachine, + CurrentInfraMachine: currentInfraMachine, + DesiredInfraMachine: desiredInfraMachine, + CurrentKubeadmConfig: currentKubeadmConfig, + DesiredKubeadmConfig: desiredKubeadmConfig, + }, + extensionHandlers: []string{"test-update-extension"}, + callExtensionResponses: map[string]runtimehooksv1.ResponseObject{ + "test-update-extension": &runtimehooksv1.CanUpdateMachineResponse{ + MachinePatch: patchToUpdateMachine, + InfrastructureMachinePatch: emptyPatch, + BootstrapConfigPatch: emptyPatch, + }, + }, + wantCanUpdateMachine: false, + wantReasons: []string{ + `KubeadmConfig cannot be updated in-place: &unstructured.Unstructured{ + Object: map[string]any{ + "spec": map[string]any{ +- "clusterConfiguration": map[string]any{"etcd": map[string]any{"local": map[string]any{"imageTag": string("3.5.0-0")}}}, ++ "clusterConfiguration": map[string]any{"etcd": map[string]any{"local": map[string]any{"imageTag": string("3.6.4-0")}}}, + "format": string("cloud-config"), + "initConfiguration": map[string]any{"nodeRegistration": map[string]any{"imagePullPolicy": string("IfNotPresent")}}, + "joinConfiguration": map[string]any{"nodeRegistration": map[string]any{"imagePullPolicy": string("IfNotPresent")}}, + }, + }, + }`, + `TestInfrastructureMachine cannot be updated in-place: &unstructured.Unstructured{ +- Object: map[string]any{"spec": map[string]any{"hello": string("world")}}, ++ Object: map[string]any{"spec": map[string]any{"hello": string("in-place updated world")}}, + }`, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + fakeClient := fake.NewClientBuilder(). + WithObjects(currentMachine, currentInfraMachine, currentKubeadmConfig). + Build() + + catalog := runtimecatalog.New() + _ = runtimehooksv1.AddToCatalog(catalog) + runtimeClient := fakeruntimeclient.NewRuntimeClientBuilder(). + WithCatalog(catalog). + WithCallExtensionValidations(validateCanUpdateMachineRequests(currentMachine, tt.machineUpToDateResult, tt.callExtensionExpectedChanges)). + WithCallExtensionResponses(tt.callExtensionResponses). + Build() + + r := &KubeadmControlPlaneReconciler{ + Client: fakeClient, + RuntimeClient: runtimeClient, + } + + canUpdateMachine, reasons, err := r.canExtensionsUpdateMachine(ctx, currentMachine, tt.machineUpToDateResult, tt.extensionHandlers) + if tt.wantError { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.wantErrorMessage)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(canUpdateMachine).To(Equal(tt.wantCanUpdateMachine)) + g.Expect(reasons).To(Equal(tt.wantReasons)) + }) + } +} + +func validateCanUpdateMachineRequests(currentMachine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult, callExtensionExpectedChanges map[string]func(runtime.Object)) func(name string, object runtimehooksv1.RequestObject) error { + return func(name string, req runtimehooksv1.RequestObject) error { + switch req := req.(type) { + case *runtimehooksv1.CanUpdateMachineRequest: + // Compare Machine + currentMachine := currentMachine.DeepCopy() + currentMachine.SetGroupVersionKind(clusterv1.GroupVersion.WithKind("Machine")) + currentMachine.ResourceVersion = "" // cleanupMachine drops ResourceVersion. + if mutator, ok := callExtensionExpectedChanges[name]; ok { + mutator(currentMachine) + } + if d := diff(req.Current.Machine, *currentMachine); d != "" { + return fmt.Errorf("expected currentMachine to be equal, got diff: %s", d) + } + desiredMachine := machineUpToDateResult.DesiredMachine.DeepCopy() + desiredMachine.SetGroupVersionKind(clusterv1.GroupVersion.WithKind("Machine")) + desiredMachine.ResourceVersion = "" // cleanupMachine drops ResourceVersion. + if d := diff(req.Desired.Machine, *desiredMachine); d != "" { + return fmt.Errorf("expected desiredMachine to be equal, got diff: %s", d) + } + + // Compare KubeadmConfig + currentKubeadmConfig := machineUpToDateResult.CurrentKubeadmConfig.DeepCopy() + currentKubeadmConfig.SetGroupVersionKind(bootstrapv1.GroupVersion.WithKind("KubeadmConfig")) + currentKubeadmConfig.ResourceVersion = "" // cleanupKubeadmConfig drops ResourceVersion. + defaulting.ApplyPreviousKubeadmConfigDefaults(¤tKubeadmConfig.Spec) // PrepareKubeadmConfigsForDiff applies defaults. + if mutator, ok := callExtensionExpectedChanges[name]; ok { + mutator(currentKubeadmConfig) + } + currentKubeadmConfigBytes, _ := json.Marshal(currentKubeadmConfig) + if d := diff(req.Current.BootstrapConfig.Raw, currentKubeadmConfigBytes); d != "" { + return fmt.Errorf("expected currentKubeadmConfig to be equal, got diff: %s", d) + } + desiredKubeadmConfig := machineUpToDateResult.DesiredKubeadmConfig.DeepCopy() + desiredKubeadmConfig.SetGroupVersionKind(bootstrapv1.GroupVersion.WithKind("KubeadmConfig")) + desiredKubeadmConfig.ResourceVersion = "" // cleanupKubeadmConfig drops ResourceVersion. + defaulting.ApplyPreviousKubeadmConfigDefaults(&desiredKubeadmConfig.Spec) // PrepareKubeadmConfigsForDiff applies defaults. + desiredKubeadmConfigBytes, _ := json.Marshal(desiredKubeadmConfig) + if d := diff(req.Desired.BootstrapConfig.Raw, desiredKubeadmConfigBytes); d != "" { + return fmt.Errorf("expected desiredKubeadmConfig to be equal, got diff: %s", d) + } + + // Compare InfraMachine + currentInfraMachine := machineUpToDateResult.CurrentInfraMachine.DeepCopy() + currentInfraMachine.SetResourceVersion("") // cleanupUnstructured drops ResourceVersion. + if mutator, ok := callExtensionExpectedChanges[name]; ok { + mutator(currentInfraMachine) + } + currentInfraMachineBytes, _ := json.Marshal(currentInfraMachine) + reqCurrentInfraMachineBytes := bytes.TrimSuffix(req.Current.InfrastructureMachine.Raw, []byte("\n")) // Note: Somehow PatchSpec introduces a trailing \n. + if d := diff(reqCurrentInfraMachineBytes, currentInfraMachineBytes); d != "" { + return fmt.Errorf("expected currentInfraMachine to be equal, got diff: %s", d) + } + desiredInfraMachine := machineUpToDateResult.DesiredInfraMachine.DeepCopy() + desiredInfraMachine.SetResourceVersion("") // cleanupUnstructured drops ResourceVersion. + desiredInfraMachineBytes, _ := json.Marshal(desiredInfraMachine) + if d := diff(req.Desired.InfrastructureMachine.Raw, desiredInfraMachineBytes); d != "" { + return fmt.Errorf("expected desiredInfraMachine to be equal, got diff: %s", d) + } + + return nil + default: + return fmt.Errorf("unhandled request type %T", req) + } + } +} + +func Test_createRequest(t *testing.T) { + g := NewWithT(t) + + ns, err := env.CreateNamespace(ctx, "in-place-create-request") + g.Expect(err).ToNot(HaveOccurred()) + + currentMachine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-to-in-place-update", + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "cluster-1", + "label-1": "label-value-1", + }, + Annotations: map[string]string{ + "annotation-1": "annotation-value-1", + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: "cluster-1", + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: bootstrapv1.GroupVersion.Group, + Kind: "KubeadmConfig", + Name: "machine-to-in-place-update", + }, + }, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: builder.InfrastructureGroupVersion.Group, + Kind: builder.TestInfrastructureMachineKind, + Name: "machine-to-in-place-update", + }, + Deletion: clusterv1.MachineDeletionSpec{ + NodeDeletionTimeoutSeconds: ptr.To[int32](10), + }, + Version: "v1.30.0", + }, + Status: clusterv1.MachineStatus{ + NodeRef: clusterv1.MachineNodeReference{ + Name: "machine-to-in-place-update", + }, + }, + } + currentMachineCleanedUp := currentMachine.DeepCopy() + currentMachineCleanedUp.SetGroupVersionKind(clusterv1.GroupVersion.WithKind("Machine")) // cleanupMachine adds GVK. + currentMachineCleanedUp.Status = clusterv1.MachineStatus{} // cleanupMachine drops status. + currentMachineWithFieldsSetByMachineController := currentMachine.DeepCopy() + currentMachineWithFieldsSetByMachineController.Spec.ProviderID = "test://provider-id" + currentMachineWithFieldsSetByMachineController.Spec.Bootstrap.DataSecretName = ptr.To("data-secret") + currentMachineWithFieldsSetByMachineControllerCleanedUp := currentMachineCleanedUp.DeepCopy() + currentMachineWithFieldsSetByMachineControllerCleanedUp.Spec.ProviderID = "test://provider-id" + currentMachineWithFieldsSetByMachineControllerCleanedUp.Spec.Bootstrap.DataSecretName = ptr.To("data-secret") + + desiredMachine := currentMachine.DeepCopy() + desiredMachine.Spec.Version = "v1.31.0" + desiredMachineCleanedUp := desiredMachine.DeepCopy() + desiredMachineCleanedUp.SetGroupVersionKind(clusterv1.GroupVersion.WithKind("Machine")) // cleanupMachine adds GVK. + desiredMachineCleanedUp.Status = clusterv1.MachineStatus{} // cleanupMachine drops status. + desiredMachineWithFieldsSetByMachineControllerCleanedUp := desiredMachineCleanedUp.DeepCopy() + desiredMachineWithFieldsSetByMachineControllerCleanedUp.Spec.ProviderID = "test://provider-id" + desiredMachineWithFieldsSetByMachineControllerCleanedUp.Spec.Bootstrap.DataSecretName = ptr.To("data-secret") + + currentKubeadmConfig := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-to-in-place-update", + Namespace: ns.Name, + Labels: map[string]string{ + "label-1": "label-value-1", + }, + Annotations: map[string]string{ + "annotation-1": "annotation-value-1", + }, + }, + Spec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + Etcd: bootstrapv1.Etcd{ + Local: bootstrapv1.LocalEtcd{ + ImageTag: "3.5.0-0", + }, + }, + }, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + KubeletExtraArgs: []bootstrapv1.Arg{{ + Name: "v", + Value: ptr.To("8"), + }}, + }, + }, + }, + Status: bootstrapv1.KubeadmConfigStatus{ + ObservedGeneration: 5, + }, + } + currentKubeadmConfigCleanedUp := currentKubeadmConfig.DeepCopy() + currentKubeadmConfigCleanedUp.SetGroupVersionKind(bootstrapv1.GroupVersion.WithKind("KubeadmConfig")) // cleanupKubeadmConfig adds GVK. + currentKubeadmConfigCleanedUp.Status = bootstrapv1.KubeadmConfigStatus{} // cleanupKubeadmConfig drops status. + defaulting.ApplyPreviousKubeadmConfigDefaults(¤tKubeadmConfigCleanedUp.Spec) // PrepareKubeadmConfigsForDiff applies defaults. + currentKubeadmConfigWithOutdatedLabelsAndAnnotations := currentKubeadmConfig.DeepCopy() + currentKubeadmConfigWithOutdatedLabelsAndAnnotations.Labels["outdated-label-1"] = "outdated-label-value-1" + currentKubeadmConfigWithOutdatedLabelsAndAnnotations.Annotations["outdated-annotation-1"] = "outdated-annotation-value-1" + currentKubeadmConfigWithInitConfiguration := currentKubeadmConfig.DeepCopy() + currentKubeadmConfigWithInitConfiguration.Spec.InitConfiguration.NodeRegistration = currentKubeadmConfigWithInitConfiguration.Spec.JoinConfiguration.NodeRegistration + currentKubeadmConfigWithInitConfiguration.Spec.JoinConfiguration = bootstrapv1.JoinConfiguration{} + + desiredKubeadmConfig := currentKubeadmConfig.DeepCopy() + desiredKubeadmConfig.Spec.ClusterConfiguration.Etcd.Local.ImageTag = "3.6.4-0" + desiredKubeadmConfigCleanedUp := desiredKubeadmConfig.DeepCopy() + desiredKubeadmConfigCleanedUp.SetGroupVersionKind(bootstrapv1.GroupVersion.WithKind("KubeadmConfig")) // cleanupKubeadmConfig adds GVK. + desiredKubeadmConfigCleanedUp.Status = bootstrapv1.KubeadmConfigStatus{} // cleanupKubeadmConfig drops status. + defaulting.ApplyPreviousKubeadmConfigDefaults(&desiredKubeadmConfigCleanedUp.Spec) // PrepareKubeadmConfigsForDiff applies defaults. + + currentInfraMachine := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": builder.InfrastructureGroupVersion.String(), + "kind": builder.TestInfrastructureMachineKind, + "metadata": map[string]interface{}{ + "name": "machine-to-in-place-update", + "namespace": ns.Name, + "labels": map[string]interface{}{ + "label-1": "label-value-1", + }, + "annotations": map[string]interface{}{ + clusterv1.TemplateClonedFromNameAnnotation: "infra-machine-template-1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", + }, + }, + "spec": map[string]interface{}{ + "foo": "hello world", + }, + "status": map[string]interface{}{ + "foo": "hello world", + }, + }, + } + currentInfraMachineCleanedUp := currentInfraMachine.DeepCopy() + unstructured.RemoveNestedField(currentInfraMachineCleanedUp.Object, "status") // cleanupUnstructured drops status. + currentInfraMachineWithOutdatedLabelsAndAnnotations := currentInfraMachine.DeepCopy() + currentInfraMachineWithOutdatedLabelsAndAnnotations.SetLabels(map[string]string{"outdated-label-1": "outdated-label-value-1"}) + currentInfraMachineWithOutdatedLabelsAndAnnotations.SetAnnotations(map[string]string{"outdated-annotation-1": "outdated-annotation-value-1"}) + currentInfraMachineWithFieldsSetByMachineController := currentInfraMachine.DeepCopy() + g.Expect(unstructured.SetNestedField(currentInfraMachineWithFieldsSetByMachineController.Object, "hello world from the infra machine controller", "spec", "bar")).To(Succeed()) + currentInfraMachineWithFieldsSetByMachineControllerCleanedUp := currentInfraMachineCleanedUp.DeepCopy() + g.Expect(unstructured.SetNestedField(currentInfraMachineWithFieldsSetByMachineControllerCleanedUp.Object, "hello world from the infra machine controller", "spec", "bar")).To(Succeed()) + + desiredInfraMachine := currentInfraMachine.DeepCopy() + g.Expect(unstructured.SetNestedField(desiredInfraMachine.Object, "hello in-place updated world", "spec", "foo")).To(Succeed()) + desiredInfraMachineCleanedUp := desiredInfraMachine.DeepCopy() + unstructured.RemoveNestedField(desiredInfraMachineCleanedUp.Object, "status") // cleanupUnstructured drops status. + desiredInfraMachineWithFieldsSetByMachineControllerCleanedUp := desiredInfraMachineCleanedUp.DeepCopy() + g.Expect(unstructured.SetNestedField(desiredInfraMachineWithFieldsSetByMachineControllerCleanedUp.Object, "hello world from the infra machine controller", "spec", "bar")).To(Succeed()) + + tests := []struct { + name string + currentMachine *clusterv1.Machine + currentInfraMachine *unstructured.Unstructured + currentKubeadmConfig *bootstrapv1.KubeadmConfig + desiredMachine *clusterv1.Machine + desiredInfraMachine *unstructured.Unstructured + desiredKubeadmConfig *bootstrapv1.KubeadmConfig + modifyMachineAfterCreate func(ctx context.Context, c client.Client, machine *clusterv1.Machine) error + modifyInfraMachineAfterCreate func(ctx context.Context, c client.Client, infraMachine *unstructured.Unstructured) error + modifyUpToDateResult func(result *internal.UpToDateResult) + wantReq *runtimehooksv1.CanUpdateMachineRequest + wantError bool + wantErrorMessage string + }{ + { + name: "Should prepare all objects for diff", + currentMachine: currentMachine, + currentInfraMachine: currentInfraMachine, + currentKubeadmConfig: currentKubeadmConfig, + desiredMachine: desiredMachine, + desiredInfraMachine: desiredInfraMachine, + desiredKubeadmConfig: desiredKubeadmConfig, + wantReq: &runtimehooksv1.CanUpdateMachineRequest{ + // Objects have been cleaned up for the diff. + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachineCleanedUp, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachineCleanedUp), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfigCleanedUp), + }, + Desired: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *desiredMachineCleanedUp, + InfrastructureMachine: mustConvertToRawExtension(desiredInfraMachineCleanedUp), + BootstrapConfig: mustConvertToRawExtension(desiredKubeadmConfigCleanedUp), + }, + }, + }, + { + name: "Should prepare all objects for diff: syncs BootstrapConfig/InfraMachine labels and annotations", + currentMachine: currentMachine, + currentInfraMachine: currentInfraMachine, + currentKubeadmConfig: currentKubeadmConfig, + desiredMachine: desiredMachine, + desiredInfraMachine: desiredInfraMachine, + desiredKubeadmConfig: desiredKubeadmConfig, + modifyUpToDateResult: func(result *internal.UpToDateResult) { + // Modify the UpToDateResult before it is passed into createRequest. + // This covers the scenario where the "local" current objects are outdated. + result.CurrentInfraMachine = currentInfraMachineWithOutdatedLabelsAndAnnotations + result.CurrentKubeadmConfig = currentKubeadmConfigWithOutdatedLabelsAndAnnotations + }, + wantReq: &runtimehooksv1.CanUpdateMachineRequest{ + // Current / desired BootstrapConfig / InfraMachine all contain the latest labels / annotations. + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachineCleanedUp, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachineCleanedUp), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfigCleanedUp), + }, + Desired: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *desiredMachineCleanedUp, + InfrastructureMachine: mustConvertToRawExtension(desiredInfraMachineCleanedUp), + BootstrapConfig: mustConvertToRawExtension(desiredKubeadmConfigCleanedUp), + }, + }, + }, + { + name: "Should prepare all objects for diff: desiredMachine picks up changes from currentMachine via SSA dry-run", + currentMachine: currentMachine, + currentInfraMachine: currentInfraMachine, + currentKubeadmConfig: currentKubeadmConfig, + desiredMachine: desiredMachine, + desiredInfraMachine: desiredInfraMachine, + desiredKubeadmConfig: desiredKubeadmConfig, + modifyMachineAfterCreate: func(ctx context.Context, c client.Client, machine *clusterv1.Machine) error { + // Write additional fields like the Machine controller would do. + machineOrig := machine.DeepCopy() + machine.Spec.ProviderID = "test://provider-id" + machine.Spec.Bootstrap.DataSecretName = ptr.To("data-secret") + return c.Patch(ctx, machine, client.MergeFrom(machineOrig)) + }, + wantReq: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + // currentMachine always contained the fields written by the Machine controller + // as we pass the Machine object after modifyMachineAfterCreate into createRequest. + Machine: *currentMachineWithFieldsSetByMachineControllerCleanedUp, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachineCleanedUp), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfigCleanedUp), + }, + Desired: runtimehooksv1.CanUpdateMachineRequestObjects{ + // desiredMachine picked up the fields written by the Machine controller via SSA dry-run. + Machine: *desiredMachineWithFieldsSetByMachineControllerCleanedUp, + InfrastructureMachine: mustConvertToRawExtension(desiredInfraMachineCleanedUp), + BootstrapConfig: mustConvertToRawExtension(desiredKubeadmConfigCleanedUp), + }, + }, + }, + { + name: "Should prepare all objects for diff: desiredInfraMachine picks up changes from currentMachine via SSA dry-run", + currentMachine: currentMachine, + currentInfraMachine: currentInfraMachine, + currentKubeadmConfig: currentKubeadmConfig, + desiredMachine: desiredMachine, + desiredInfraMachine: desiredInfraMachine, + desiredKubeadmConfig: desiredKubeadmConfig, + modifyInfraMachineAfterCreate: func(ctx context.Context, c client.Client, infraMachine *unstructured.Unstructured) error { + // Write additional fields like the Infra Machine controller would do. + infraMachineOrig := infraMachine.DeepCopy() + g.Expect(unstructured.SetNestedField(infraMachine.Object, "hello world from the infra machine controller", "spec", "bar")).To(Succeed()) + return c.Patch(ctx, infraMachine, client.MergeFrom(infraMachineOrig)) + }, + wantReq: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachineCleanedUp, + // currentInfraMachine always contained the fields written by the InfraMachine controller + // as we pass the InfraMachine object after modifyInfraMachineAfterCreate into createRequest. + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachineWithFieldsSetByMachineControllerCleanedUp), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfigCleanedUp), + }, + Desired: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *desiredMachineCleanedUp, + // desiredInfraMachine picked up the fields written by the InfraMachine controller via SSA dry-run. + InfrastructureMachine: mustConvertToRawExtension(desiredInfraMachineWithFieldsSetByMachineControllerCleanedUp), + BootstrapConfig: mustConvertToRawExtension(desiredKubeadmConfigCleanedUp), + }, + }, + }, + { + name: "Should prepare all objects for diff: currentKubeadmConfig & currentKubeadmConfig are prepared for diff", + currentMachine: currentMachine, + currentInfraMachine: currentInfraMachine, + currentKubeadmConfig: currentKubeadmConfigWithInitConfiguration, + desiredMachine: desiredMachine, + desiredInfraMachine: desiredInfraMachine, + desiredKubeadmConfig: desiredKubeadmConfig, + wantReq: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachineCleanedUp, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachineCleanedUp), + // currentKubeadmConfig was converted from InitConfiguration to JoinConfiguration via PrepareKubeadmConfigsForDiff. + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfigCleanedUp), + }, + Desired: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *desiredMachineCleanedUp, + InfrastructureMachine: mustConvertToRawExtension(desiredInfraMachineCleanedUp), + BootstrapConfig: mustConvertToRawExtension(desiredKubeadmConfigCleanedUp), + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := t.Context() + g := NewWithT(t) + + // TODO(in-place) change the fieldManagers to the correct ones later after the SSA refactoring + currentMachineForPatch := tt.currentMachine.DeepCopy() + currentMachineForPatch.SetGroupVersionKind(clusterv1.GroupVersion.WithKind("Machine")) // Has to be set for env.PatchAndWait + g.Expect(env.PatchAndWait(ctx, currentMachineForPatch, client.ForceOwnership, client.FieldOwner(kcpManagerName))).To(Succeed()) + t.Cleanup(func() { + g.Expect(env.CleanupAndWait(context.Background(), tt.currentMachine)).To(Succeed()) + }) + currentInfraMachineForPatch := tt.currentInfraMachine.DeepCopy() + g.Expect(env.PatchAndWait(ctx, currentInfraMachineForPatch, client.ForceOwnership, client.FieldOwner(kcpManagerName))).To(Succeed()) + t.Cleanup(func() { + g.Expect(env.CleanupAndWait(context.Background(), tt.currentInfraMachine)).To(Succeed()) + }) + currentKubeadmConfigForPatch := tt.currentKubeadmConfig.DeepCopy() + currentKubeadmConfigForPatch.SetGroupVersionKind(bootstrapv1.GroupVersion.WithKind("KubeadmConfig")) // Has to be set for env.PatchAndWait + g.Expect(env.PatchAndWait(ctx, currentKubeadmConfigForPatch, client.ForceOwnership, client.FieldOwner(kcpManagerName))).To(Succeed()) + t.Cleanup(func() { + g.Expect(env.CleanupAndWait(context.Background(), tt.currentKubeadmConfig)).To(Succeed()) + }) + if tt.modifyMachineAfterCreate != nil { + g.Expect(tt.modifyMachineAfterCreate(ctx, env.Client, currentMachineForPatch)).To(Succeed()) + } + if tt.modifyInfraMachineAfterCreate != nil { + g.Expect(tt.modifyInfraMachineAfterCreate(ctx, env.Client, currentInfraMachineForPatch)).To(Succeed()) + } + + upToDateResult := internal.UpToDateResult{ + CurrentInfraMachine: currentInfraMachineForPatch, + CurrentKubeadmConfig: currentKubeadmConfigForPatch, + DesiredMachine: tt.desiredMachine, + DesiredInfraMachine: tt.desiredInfraMachine, + DesiredKubeadmConfig: tt.desiredKubeadmConfig, + } + if tt.modifyUpToDateResult != nil { + tt.modifyUpToDateResult(&upToDateResult) + } + + req, err := createRequest(ctx, env.Client, currentMachineForPatch, upToDateResult) + if tt.wantError { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.wantErrorMessage)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(req).To(BeComparableTo(tt.wantReq)) + }) + } +} + +func Test_applyPatchesToRequest(t *testing.T) { + currentMachine := &clusterv1.Machine{ + // Set GVK because this is required by convertToRawExtension. + TypeMeta: metav1.TypeMeta{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-to-in-place-update", + Namespace: metav1.NamespaceDefault, + }, + Spec: clusterv1.MachineSpec{ + Version: "v1.30.0", + }, + } + patchedMachine := currentMachine.DeepCopy() + patchedMachine.Spec.Version = "v1.31.0" + + currentKubeadmConfig := &bootstrapv1.KubeadmConfig{ + // Set GVK because this is required by convertToRawExtension. + TypeMeta: metav1.TypeMeta{ + APIVersion: bootstrapv1.GroupVersion.String(), + Kind: "KubeadmConfig", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-to-in-place-update", + Namespace: metav1.NamespaceDefault, + }, + Spec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + Etcd: bootstrapv1.Etcd{ + Local: bootstrapv1.LocalEtcd{ + ImageTag: "3.5.0-0", + }, + }, + }, + }, + } + patchedKubeadmConfig := currentKubeadmConfig.DeepCopy() + patchedKubeadmConfig.Spec.ClusterConfiguration.Etcd.Local.ImageTag = "3.6.4-0" + + currentInfraMachine := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": builder.InfrastructureGroupVersion.String(), + "kind": builder.TestInfrastructureMachineKind, + "metadata": map[string]interface{}{ + "name": "machine-to-in-place-update", + "namespace": metav1.NamespaceDefault, + "annotations": map[string]interface{}{ + clusterv1.TemplateClonedFromNameAnnotation: "infra-machine-template-1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", + }, + }, + "spec": map[string]interface{}{ + "hello": "world", + }, + }, + } + patchedInfraMachine := currentInfraMachine.DeepCopy() + _ = unstructured.SetNestedField(patchedInfraMachine.Object, "in-place updated world", "spec", "hello") + + responseWithEmptyPatches := &runtimehooksv1.CanUpdateMachineResponse{ + MachinePatch: runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte("[]"), + }, + InfrastructureMachinePatch: runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONMergePatchType, + Patch: []byte{}, + }, + BootstrapConfigPatch: runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONMergePatchType, + Patch: []byte("{}"), + }, + } + patchToUpdateMachine := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte(`[{"op":"replace","path":"/spec/version","value":"v1.31.0"}]`), + } + patchToUpdateKubeadmConfig := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte(`[{"op":"replace","path":"/spec/clusterConfiguration/etcd/local/imageTag","value":"3.6.4-0"}]`), + } + patchToUpdateInfraMachine := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte(`[{"op":"replace","path":"/spec/hello","value":"in-place updated world"}]`), + } + jsonMergePatchToUpdateMachine := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONMergePatchType, + Patch: []byte(`{"spec":{"version":"v1.31.0"}}`), + } + jsonMergePatchToUpdateKubeadmConfig := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONMergePatchType, + Patch: []byte(`{"spec":{"clusterConfiguration":{"etcd":{"local":{"imageTag":"3.6.4-0"}}}}}`), + } + jsonMergePatchToUpdateInfraMachine := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONMergePatchType, + Patch: []byte(`{"spec":{"hello":"in-place updated world"}}`), + } + patchToUpdateMachineStatus := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte(`[{"op":"add","path":"/status","value":{"observedGeneration": 10}}]`), + } + patchToUpdateKubeadmConfigStatus := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte(`[{"op":"add","path":"/status","value":{"observedGeneration": 10}}]`), + } + patchToUpdateInfraMachineStatus := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte(`[{"op":"add","path":"/status","value":{"observedGeneration": 10}}]`), + } + + tests := []struct { + name string + req *runtimehooksv1.CanUpdateMachineRequest + resp *runtimehooksv1.CanUpdateMachineResponse + wantReq *runtimehooksv1.CanUpdateMachineRequest + wantError bool + wantErrorMessage string + }{ + { + name: "No changes with no patches", + req: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachine, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachine), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfig), + }, + }, + resp: responseWithEmptyPatches, + wantReq: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachine, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachine), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfig), + }, + }, + }, + { + name: "Changes with patches", + req: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachine, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachine), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfig), + }, + }, + resp: &runtimehooksv1.CanUpdateMachineResponse{ + MachinePatch: patchToUpdateMachine, + InfrastructureMachinePatch: patchToUpdateInfraMachine, + BootstrapConfigPatch: patchToUpdateKubeadmConfig, + }, + wantReq: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *patchedMachine, + InfrastructureMachine: mustConvertToRawExtension(patchedInfraMachine), + BootstrapConfig: mustConvertToRawExtension(patchedKubeadmConfig), + }, + }, + }, + { + name: "Changes with JSON merge patches", + req: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachine, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachine), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfig), + }, + }, + resp: &runtimehooksv1.CanUpdateMachineResponse{ + MachinePatch: jsonMergePatchToUpdateMachine, + InfrastructureMachinePatch: jsonMergePatchToUpdateInfraMachine, + BootstrapConfigPatch: jsonMergePatchToUpdateKubeadmConfig, + }, + wantReq: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *patchedMachine, + InfrastructureMachine: mustConvertToRawExtension(patchedInfraMachine), + BootstrapConfig: mustConvertToRawExtension(patchedKubeadmConfig), + }, + }, + }, + { + name: "No changes with status patches", + req: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachine, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachine), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfig), + }, + }, + resp: &runtimehooksv1.CanUpdateMachineResponse{ + MachinePatch: patchToUpdateMachineStatus, + InfrastructureMachinePatch: patchToUpdateInfraMachineStatus, + BootstrapConfigPatch: patchToUpdateKubeadmConfigStatus, + }, + wantReq: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachine, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachine), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfig), + }, + }, + }, + { + name: "Error if PatchType is not set but Patch is", + req: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachine, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachine), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfig), + }, + }, + resp: &runtimehooksv1.CanUpdateMachineResponse{ + MachinePatch: runtimehooksv1.Patch{ + // PatchType is missing + Patch: []byte(`[{"op":"add","path":"/status","value":{"observedGeneration": 10}}]`), + }, + }, + wantError: true, + wantErrorMessage: "failed to apply patch: patchType is not set", + }, + { + name: "Error if PatchType is set to an unknown value", + req: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachine, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachine), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfig), + }, + }, + resp: &runtimehooksv1.CanUpdateMachineResponse{ + MachinePatch: runtimehooksv1.Patch{ + PatchType: "UnknownType", + Patch: []byte(`[{"op":"add","path":"/status","value":{"observedGeneration": 10}}]`), + }, + }, + wantError: true, + wantErrorMessage: "failed to apply patch: unknown patchType UnknownType", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + err := applyPatchesToRequest(ctx, tt.req, tt.resp) + if tt.wantError { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.wantErrorMessage)) + return + } + g.Expect(err).ToNot(HaveOccurred()) + + // Compare only the objects and avoid comparing runtime.RawExtension.Raw because + // Raw is slightly non-deterministic because it doesn't guarantee order of map keys. + g.Expect(tt.req.Current.Machine).To(BeComparableTo(tt.wantReq.Current.Machine)) + g.Expect(tt.req.Current.InfrastructureMachine.Object).To(BeComparableTo(tt.wantReq.Current.InfrastructureMachine.Object)) + g.Expect(tt.req.Current.BootstrapConfig.Object).To(BeComparableTo(tt.wantReq.Current.BootstrapConfig.Object)) + }) + } +} + +func diff(a, b any) string { + _, d, err := compare.Diff(a, b) + if err != nil { + return fmt.Sprintf("error during diff: %v", err) + } + return d +} + +func mustConvertToRawExtension(object runtime.Object) runtime.RawExtension { + raw, err := convertToRawExtension(object) + if err != nil { + panic(err) + } + return raw +} diff --git a/controlplane/kubeadm/internal/controllers/inplace_test.go b/controlplane/kubeadm/internal/controllers/inplace_test.go new file mode 100644 index 000000000000..a6356277ac02 --- /dev/null +++ b/controlplane/kubeadm/internal/controllers/inplace_test.go @@ -0,0 +1,130 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" +) + +func Test_tryInPlaceUpdate(t *testing.T) { + machineToInPlaceUpdate := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-to-in-place-update", + }, + } + + tests := []struct { + name string + preflightChecksFunc func(ctx context.Context, controlPlane *internal.ControlPlane, excludeFor ...*clusterv1.Machine) ctrl.Result + canUpdateMachineFunc func(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult) (bool, error) + wantCanUpdateMachineCalled bool + wantFallbackToScaleDown bool + wantError bool + wantErrorMessage string + wantRes ctrl.Result + }{ + { + name: "Requeue if preflight checks for all Machines failed", + preflightChecksFunc: func(_ context.Context, _ *internal.ControlPlane, _ ...*clusterv1.Machine) ctrl.Result { + return ctrl.Result{RequeueAfter: preflightFailedRequeueAfter} + }, + wantRes: ctrl.Result{RequeueAfter: preflightFailedRequeueAfter}, + }, + { + name: "Fallback to scale down if checks for all Machines failed, but checks succeed when excluding machineToInPlaceUpdate", + preflightChecksFunc: func(_ context.Context, _ *internal.ControlPlane, excludeFor ...*clusterv1.Machine) ctrl.Result { + if len(excludeFor) == 1 && excludeFor[0] == machineToInPlaceUpdate { + return ctrl.Result{} // If machineToInPlaceUpdate is excluded preflight checks succeed => scale down + } + return ctrl.Result{RequeueAfter: preflightFailedRequeueAfter} + }, + wantFallbackToScaleDown: true, + }, + { + name: "Return error if canUpdateMachine returns an error", + preflightChecksFunc: func(_ context.Context, _ *internal.ControlPlane, _ ...*clusterv1.Machine) ctrl.Result { + return ctrl.Result{} + }, + canUpdateMachineFunc: func(_ context.Context, _ *clusterv1.Machine, _ internal.UpToDateResult) (bool, error) { + return false, errors.New("canUpdateMachine error") + }, + wantCanUpdateMachineCalled: true, + wantError: true, + wantErrorMessage: "failed to determine if Machine machine-to-in-place-update can be updated in-place: canUpdateMachine error", + }, + { + name: "Fallback to scale down if canUpdateMachine returns false", + preflightChecksFunc: func(_ context.Context, _ *internal.ControlPlane, _ ...*clusterv1.Machine) ctrl.Result { + return ctrl.Result{} + }, + canUpdateMachineFunc: func(_ context.Context, _ *clusterv1.Machine, _ internal.UpToDateResult) (bool, error) { + return false, nil + }, + wantCanUpdateMachineCalled: true, + wantFallbackToScaleDown: true, + }, + { + name: "Trigger in-place update if canUpdateMachine returns true", + preflightChecksFunc: func(_ context.Context, _ *internal.ControlPlane, _ ...*clusterv1.Machine) ctrl.Result { + return ctrl.Result{} + }, + canUpdateMachineFunc: func(_ context.Context, _ *clusterv1.Machine, _ internal.UpToDateResult) (bool, error) { + return true, nil + }, + wantCanUpdateMachineCalled: true, + // TODO(in-place): Will be modified once tryInPlaceUpdate triggers in-place updates. + wantFallbackToScaleDown: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + var canUpdateMachineCalled bool + r := &KubeadmControlPlaneReconciler{ + overridePreflightChecksFunc: func(ctx context.Context, controlPlane *internal.ControlPlane, excludeFor ...*clusterv1.Machine) ctrl.Result { + return tt.preflightChecksFunc(ctx, controlPlane, excludeFor...) + }, + overrideCanUpdateMachineFunc: func(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult) (bool, error) { + canUpdateMachineCalled = true + return tt.canUpdateMachineFunc(ctx, machine, machineUpToDateResult) + }, + } + + fallbackToScaleDown, res, err := r.tryInPlaceUpdate(ctx, nil, machineToInPlaceUpdate, internal.UpToDateResult{}) + if tt.wantError { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.wantErrorMessage)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(res).To(Equal(tt.wantRes)) + g.Expect(fallbackToScaleDown).To(Equal(tt.wantFallbackToScaleDown)) + + g.Expect(canUpdateMachineCalled).To(Equal(tt.wantCanUpdateMachineCalled), "canUpdateMachineCalled: actual: %t expected: %t", canUpdateMachineCalled, tt.wantCanUpdateMachineCalled) + }) + } +} diff --git a/controlplane/kubeadm/internal/controllers/scale.go b/controlplane/kubeadm/internal/controllers/scale.go index 671d9de75ea3..515e9c979ed9 100644 --- a/controlplane/kubeadm/internal/controllers/scale.go +++ b/controlplane/kubeadm/internal/controllers/scale.go @@ -53,7 +53,7 @@ func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Conte } log.WithValues(controlPlane.StatusToLogKeyAndValues(newMachine, nil)...). - Info("Machine created (scale up)", + Info("Machine created (init)", "Machine", klog.KObj(newMachine), newMachine.Spec.InfrastructureRef.Kind, klog.KRef(newMachine.Namespace, newMachine.Spec.InfrastructureRef.Name), newMachine.Spec.Bootstrap.ConfigRef.Kind, klog.KRef(newMachine.Namespace, newMachine.Spec.Bootstrap.ConfigRef.Name)) @@ -70,8 +70,8 @@ func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, log := ctrl.LoggerFrom(ctx) // Run preflight checks to ensure that the control plane is stable before proceeding with a scale up/scale down operation; if not, wait. - if result, err := r.preflightChecks(ctx, controlPlane); err != nil || !result.IsZero() { - return result, err + if result := r.preflightChecks(ctx, controlPlane); !result.IsZero() { + return result, nil } fd, err := controlPlane.NextFailureDomainForScaleUp(ctx) @@ -109,8 +109,8 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( // Run preflight checks ensuring the control plane is stable before proceeding with a scale up/scale down operation; if not, wait. // Given that we're scaling down, we can exclude the machineToDelete from the preflight checks. - if result, err := r.preflightChecks(ctx, controlPlane, machineToDelete); err != nil || !result.IsZero() { - return result, err + if result := r.preflightChecks(ctx, controlPlane, machineToDelete); !result.IsZero() { + return result, nil } workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) @@ -158,13 +158,17 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( // If the control plane is not passing preflight checks, it requeue. // // NOTE: this func uses KCP conditions, it is required to call reconcileControlPlaneAndMachinesConditions before this. -func (r *KubeadmControlPlaneReconciler) preflightChecks(ctx context.Context, controlPlane *internal.ControlPlane, excludeFor ...*clusterv1.Machine) (ctrl.Result, error) { //nolint:unparam +func (r *KubeadmControlPlaneReconciler) preflightChecks(ctx context.Context, controlPlane *internal.ControlPlane, excludeFor ...*clusterv1.Machine) ctrl.Result { + if r.overridePreflightChecksFunc != nil { + return r.overridePreflightChecksFunc(ctx, controlPlane, excludeFor...) + } + log := ctrl.LoggerFrom(ctx) // If there is no KCP-owned control-plane machines, then control-plane has not been initialized yet, // so it is considered ok to proceed. if controlPlane.Machines.Len() == 0 { - return ctrl.Result{}, nil + return ctrl.Result{} } if feature.Gates.Enabled(feature.ClusterTopology) { @@ -182,7 +186,7 @@ func (r *KubeadmControlPlaneReconciler) preflightChecks(ctx context.Context, con } log.Info(fmt.Sprintf("Waiting for a version upgrade to %s to be propagated", v)) controlPlane.PreflightCheckResults.TopologyVersionMismatch = true - return ctrl.Result{RequeueAfter: preflightFailedRequeueAfter}, nil + return ctrl.Result{RequeueAfter: preflightFailedRequeueAfter} } } @@ -190,7 +194,7 @@ func (r *KubeadmControlPlaneReconciler) preflightChecks(ctx context.Context, con if controlPlane.HasDeletingMachine() { controlPlane.PreflightCheckResults.HasDeletingMachine = true log.Info("Waiting for machines to be deleted", "machines", strings.Join(controlPlane.Machines.Filter(collections.HasDeletionTimestamp).Names(), ", ")) - return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil + return ctrl.Result{RequeueAfter: deleteRequeueAfter} } // Check machine health conditions; if there are conditions with False or Unknown, then wait. @@ -247,10 +251,10 @@ loopmachines: "Waiting for control plane to pass preflight checks to continue reconciliation: %v", aggregatedError) log.Info("Waiting for control plane to pass preflight checks", "failures", aggregatedError.Error()) - return ctrl.Result{RequeueAfter: preflightFailedRequeueAfter}, nil + return ctrl.Result{RequeueAfter: preflightFailedRequeueAfter} } - return ctrl.Result{}, nil + return ctrl.Result{} } func preflightCheckCondition(kind string, obj *clusterv1.Machine, conditionType string) error { diff --git a/controlplane/kubeadm/internal/controllers/scale_test.go b/controlplane/kubeadm/internal/controllers/scale_test.go index 728a6bb6ae1a..84cfa119f654 100644 --- a/controlplane/kubeadm/internal/controllers/scale_test.go +++ b/controlplane/kubeadm/internal/controllers/scale_test.go @@ -792,8 +792,7 @@ func TestPreflightChecks(t *testing.T) { KCP: tt.kcp, Machines: collections.FromMachines(tt.machines...), } - result, err := r.preflightChecks(context.TODO(), controlPlane) - g.Expect(err).ToNot(HaveOccurred()) + result := r.preflightChecks(context.TODO(), controlPlane) g.Expect(result).To(BeComparableTo(tt.expectResult)) g.Expect(controlPlane.PreflightCheckResults).To(Equal(tt.expectPreflight)) }) diff --git a/controlplane/kubeadm/internal/controllers/upgrade.go b/controlplane/kubeadm/internal/controllers/update.go similarity index 100% rename from controlplane/kubeadm/internal/controllers/upgrade.go rename to controlplane/kubeadm/internal/controllers/update.go diff --git a/controlplane/kubeadm/internal/controllers/upgrade_test.go b/controlplane/kubeadm/internal/controllers/update_test.go similarity index 100% rename from controlplane/kubeadm/internal/controllers/upgrade_test.go rename to controlplane/kubeadm/internal/controllers/update_test.go diff --git a/controlplane/kubeadm/internal/filters.go b/controlplane/kubeadm/internal/filters.go index a0e986c9359d..b0aa09bcc5ba 100644 --- a/controlplane/kubeadm/internal/filters.go +++ b/controlplane/kubeadm/internal/filters.go @@ -200,7 +200,7 @@ func matchesInfraMachine( desiredInfraMachine, err := desiredstate.ComputeDesiredInfraMachine(ctx, c, kcp, cluster, machine.Name, currentInfraMachine) if err != nil { - return "", nil, nil, false, errors.Wrapf(err, "failed to match InfraMachine") + return "", nil, nil, false, errors.Wrapf(err, "failed to match %s", currentInfraMachine.GetKind()) } // Check if the machine's infrastructure reference has been created from the current KCP infrastructure template. @@ -290,7 +290,7 @@ func matchesKubeadmConfig( } // PrepareKubeadmConfigsForDiff cleans up all fields that are not relevant for the comparison. -func PrepareKubeadmConfigsForDiff(desiredKubeadmConfig, currentKubeadmConfig *bootstrapv1.KubeadmConfig, convertCurrentInitConfigurationToJoinConfiguration bool) (desired *bootstrapv1.KubeadmConfig, current *bootstrapv1.KubeadmConfig) { +func PrepareKubeadmConfigsForDiff(desiredKubeadmConfig, currentKubeadmConfig *bootstrapv1.KubeadmConfig, convertCurrentInitConfigurationToJoinConfiguration bool) (desired, current *bootstrapv1.KubeadmConfig) { // DeepCopy to ensure the passed in KubeadmConfigs are not modified. // This has to be done because we eventually want to be able to apply the desiredKubeadmConfig // (without the modifications that we make here). diff --git a/controlplane/kubeadm/main.go b/controlplane/kubeadm/main.go index 0576fa26ad8c..b89bcddeda20 100644 --- a/controlplane/kubeadm/main.go +++ b/controlplane/kubeadm/main.go @@ -493,7 +493,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { EtcdCallTimeout: etcdCallTimeout, EtcdLogger: etcdLogger, RemoteConditionsGracePeriod: remoteConditionsGracePeriod, - //RuntimeClient: runtimeClient, // TODO(in-place): enable once we want to use it, also validate in SetupWithManager that RuntimeClient is set if feature gate is enabled. + RuntimeClient: runtimeClient, }).SetupWithManager(ctx, mgr, concurrency(kubeadmControlPlaneConcurrency)); err != nil { setupLog.Error(err, "unable to create controller", "controller", "KubeadmControlPlane") os.Exit(1) diff --git a/exp/runtime/client/client.go b/exp/runtime/client/client.go index 3fb39f763414..211a0313396e 100644 --- a/exp/runtime/client/client.go +++ b/exp/runtime/client/client.go @@ -86,6 +86,9 @@ type Client interface { // Unregister unregisters the ExtensionConfig. Unregister(extensionConfig *runtimev1.ExtensionConfig) error + // GetAllExtensions gets all the ExtensionHandlers registered for the hook. + GetAllExtensions(ctx context.Context, hook runtimecatalog.Hook, forObject metav1.Object) ([]string, error) + // CallAllExtensions calls all the ExtensionHandler registered for the hook. CallAllExtensions(ctx context.Context, hook runtimecatalog.Hook, forObject metav1.Object, request runtimehooksv1.RequestObject, response runtimehooksv1.ResponseObject) error diff --git a/internal/controllers/topology/cluster/patches/engine.go b/internal/controllers/topology/cluster/patches/engine.go index 60f524e6e6e8..cb66e9aa55a6 100644 --- a/internal/controllers/topology/cluster/patches/engine.go +++ b/internal/controllers/topology/cluster/patches/engine.go @@ -41,6 +41,7 @@ import ( "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/external" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/inline" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/variables" + patchutil "sigs.k8s.io/cluster-api/internal/util/patch" ) // Engine is a patch engine which applies patches defined in a ClusterBlueprint to a ClusterState. @@ -504,7 +505,7 @@ func applyPatchToRequest(ctx context.Context, req *runtimehooksv1.GeneratePatche // Overwrite the spec of template.Template with the spec of the patchedTemplate, // to ensure that we only pick up changes to the spec. - if err := patchTemplateSpec(&requestItem.Object, patchedTemplate); err != nil { + if err := patchutil.PatchSpec(&requestItem.Object, patchedTemplate); err != nil { log.Error(err, fmt.Sprintf("Failed to apply patch to template with uid %q", requestItem.UID)) return errors.Wrap(err, "failed to apply patch to template") } diff --git a/internal/controllers/topology/cluster/patches/engine_test.go b/internal/controllers/topology/cluster/patches/engine_test.go index 4a6efb503212..3e56e6dca1e7 100644 --- a/internal/controllers/topology/cluster/patches/engine_test.go +++ b/internal/controllers/topology/cluster/patches/engine_test.go @@ -965,7 +965,7 @@ func TestApply(t *testing.T) { } runtimeClient = fakeruntimeclient.NewRuntimeClientBuilder(). WithCallExtensionResponses(tt.externalPatchResponses). - WithCallExtensionValidations(func(req runtimehooksv1.RequestObject) error { + WithCallExtensionValidations(func(_ string, req runtimehooksv1.RequestObject) error { switch req := req.(type) { case *runtimehooksv1.GeneratePatchesRequest: for _, item := range req.Items { diff --git a/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go b/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go index a298910dddda..48702956035d 100644 --- a/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go +++ b/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go @@ -124,6 +124,10 @@ func (f *fakeRuntimeClient) Unregister(_ *runtimev1.ExtensionConfig) error { panic("implement me") } +func (f *fakeRuntimeClient) GetAllExtensions(_ context.Context, _ runtimecatalog.Hook, _ metav1.Object) ([]string, error) { + panic("implement me") +} + func (f *fakeRuntimeClient) CallAllExtensions(_ context.Context, _ runtimecatalog.Hook, _ metav1.Object, _ runtimehooksv1.RequestObject, _ runtimehooksv1.ResponseObject) error { panic("implement me") } diff --git a/internal/controllers/topology/cluster/patches/patch.go b/internal/controllers/topology/cluster/patches/patch.go index 174d4538959d..06b1bb6dac38 100644 --- a/internal/controllers/topology/cluster/patches/patch.go +++ b/internal/controllers/topology/cluster/patches/patch.go @@ -21,16 +21,15 @@ import ( "context" "encoding/json" "fmt" - "strings" jsonpatch "github.com/evanphx/json-patch/v5" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/cluster-api/internal/contract" + patchutil "sigs.k8s.io/cluster-api/internal/util/patch" ) // PatchOption represents an option for the patchObject and patchTemplate funcs. @@ -88,12 +87,12 @@ func patchUnstructured(ctx context.Context, original, modified *unstructured.Uns patched := original.DeepCopy() // copySpec overwrites patched.destSpecPath with modified.srcSpecPath. - if err := copySpec(copySpecInput{ - src: modified, - dest: patched, - srcSpecPath: srcSpecPath, - destSpecPath: destSpecPath, - fieldsToPreserve: patchOptions.preserveFields, + if err := patchutil.CopySpec(patchutil.CopySpecInput{ + Src: modified, + Dest: patched, + SrcSpecPath: srcSpecPath, + DestSpecPath: destSpecPath, + FieldsToPreserve: patchOptions.preserveFields, }); err != nil { return errors.Wrapf(err, "failed to apply patch to %s %s", original.GetKind(), klog.KObj(original)) } @@ -135,82 +134,3 @@ func calculateDiff(original, patched *unstructured.Unstructured) ([]byte, error) } return diff, nil } - -// patchTemplateSpec overwrites spec in templateJSON with spec of patchedTemplateBytes. -func patchTemplateSpec(templateJSON *runtime.RawExtension, patchedTemplateBytes []byte) error { - // Convert templates to Unstructured. - template, err := bytesToUnstructured(templateJSON.Raw) - if err != nil { - return errors.Wrap(err, "failed to convert template to Unstructured") - } - patchedTemplate, err := bytesToUnstructured(patchedTemplateBytes) - if err != nil { - return errors.Wrap(err, "failed to convert patched template to Unstructured") - } - - // Copy spec from patchedTemplate to template. - if err := copySpec(copySpecInput{ - src: patchedTemplate, - dest: template, - srcSpecPath: "spec", - destSpecPath: "spec", - }); err != nil { - return errors.Wrap(err, "failed to apply patch to template") - } - - // Marshal template and store it in templateJSON. - templateBytes, err := template.MarshalJSON() - if err != nil { - return errors.Wrapf(err, "failed to marshal patched template") - } - templateJSON.Object = template - templateJSON.Raw = templateBytes - return nil -} - -type copySpecInput struct { - src *unstructured.Unstructured - dest *unstructured.Unstructured - srcSpecPath string - destSpecPath string - fieldsToPreserve []contract.Path -} - -// copySpec copies a field from a srcSpecPath in src to a destSpecPath in dest, -// while preserving fieldsToPreserve. -func copySpec(in copySpecInput) error { - // Backup fields that should be preserved from dest. - preservedFields := map[string]interface{}{} - for _, field := range in.fieldsToPreserve { - value, found, err := unstructured.NestedFieldNoCopy(in.dest.Object, field...) - if !found { - // Continue if the field does not exist in src. fieldsToPreserve don't have to exist. - continue - } else if err != nil { - return errors.Wrapf(err, "failed to get field %q from %s %s", strings.Join(field, "."), in.dest.GetKind(), klog.KObj(in.dest)) - } - preservedFields[strings.Join(field, ".")] = value - } - - // Get spec from src. - srcSpec, found, err := unstructured.NestedFieldNoCopy(in.src.Object, strings.Split(in.srcSpecPath, ".")...) - if !found { - // Return if srcSpecPath does not exist in src, nothing to do. - return nil - } else if err != nil { - return errors.Wrapf(err, "failed to get field %q from %s %s", in.srcSpecPath, in.src.GetKind(), klog.KObj(in.src)) - } - - // Set spec in dest. - if err := unstructured.SetNestedField(in.dest.Object, srcSpec, strings.Split(in.destSpecPath, ".")...); err != nil { - return errors.Wrapf(err, "failed to set field %q on %s %s", in.destSpecPath, in.dest.GetKind(), klog.KObj(in.dest)) - } - - // Restore preserved fields. - for path, value := range preservedFields { - if err := unstructured.SetNestedField(in.dest.Object, value, strings.Split(path, ".")...); err != nil { - return errors.Wrapf(err, "failed to set field %q on %s %s", path, in.dest.GetKind(), klog.KObj(in.dest)) - } - } - return nil -} diff --git a/internal/runtime/client/client.go b/internal/runtime/client/client.go index 1d260e7450fe..be7bb57bd1a9 100644 --- a/internal/runtime/client/client.go +++ b/internal/runtime/client/client.go @@ -172,6 +172,39 @@ func (c *client) Unregister(extensionConfig *runtimev1.ExtensionConfig) error { return nil } +func (c *client) GetAllExtensions(ctx context.Context, hook runtimecatalog.Hook, forObject metav1.Object) ([]string, error) { + hookName := runtimecatalog.HookName(hook) + log := ctrl.LoggerFrom(ctx).WithValues("hook", hookName) + ctx = ctrl.LoggerInto(ctx, log) + gvh, err := c.catalog.GroupVersionHook(hook) + if err != nil { + return nil, errors.Wrapf(err, "failed to get extension handlers for hook %q: failed to compute GroupVersionHook", hookName) + } + + registrations, err := c.registry.List(gvh.GroupHook()) + if err != nil { + return nil, errors.Wrapf(err, "failed to get extension handlers for hook %q", gvh.GroupHook()) + } + + log.V(4).Info(fmt.Sprintf("Getting all extensions of hook %q", hookName)) + matchingRegistrations := []string{} + for _, registration := range registrations { + // Compute whether the object the get is being made for matches the namespaceSelector + namespaceMatches, err := c.matchNamespace(ctx, registration.NamespaceSelector, forObject.GetNamespace()) + if err != nil { + return nil, errors.Wrapf(err, "failed to get extension handlers for hook %q: failed to get extension handler %q", gvh.GroupHook(), registration.Name) + } + // If the object namespace isn't matched by the registration NamespaceSelector don't return it. + if !namespaceMatches { + log.V(5).Info(fmt.Sprintf("skipping extension handler %q as object '%s/%s' does not match selector %q of ExtensionConfig", registration.Name, forObject.GetNamespace(), forObject.GetName(), registration.NamespaceSelector)) + continue + } + matchingRegistrations = append(matchingRegistrations, registration.Name) + } + + return matchingRegistrations, nil +} + // CallAllExtensions calls all the ExtensionHandlers registered for the hook. // The ExtensionHandlers are called sequentially. The function exits immediately after any of the ExtensionHandlers return an error. // This ensures we don't end up waiting for timeout from multiple unreachable Extensions. diff --git a/internal/runtime/client/client_test.go b/internal/runtime/client/client_test.go index 99dcd50d9109..ccd269723b6d 100644 --- a/internal/runtime/client/client_test.go +++ b/internal/runtime/client/client_test.go @@ -1027,6 +1027,150 @@ func TestPrepareRequest(t *testing.T) { }) } +func TestClient_GetAllExtensions(t *testing.T) { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "kubernetes.io/metadata.name": "foo", + }, + }, + } + nsDifferent := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "different", + Labels: map[string]string{ + "kubernetes.io/metadata.name": "different", + }, + }, + } + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + Namespace: "foo", + }, + } + clusterDifferentNamespace := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + Namespace: "different", + }, + } + + extensionConfig := runtimev1.ExtensionConfig{ + Spec: runtimev1.ExtensionConfigSpec{ + ClientConfig: runtimev1.ClientConfig{ + // Set a fake URL, in test cases where we start the test server the URL will be overridden. + URL: "https://127.0.0.1/", + CABundle: testcerts.CACert, + }, + // The extensions in this ExtensionConfig will be only registered for the foo namespace. + NamespaceSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "kubernetes.io/metadata.name", + Operator: metav1.LabelSelectorOpIn, + Values: []string{ns.Name}, + }, + }, + }, + }, + Status: runtimev1.ExtensionConfigStatus{ + Handlers: []runtimev1.ExtensionHandler{ + { + Name: "first-extension", + RequestHook: runtimev1.GroupVersionHook{ + APIVersion: fakev1alpha1.GroupVersion.String(), + Hook: "FakeHook", + }, + TimeoutSeconds: 1, + FailurePolicy: runtimev1.FailurePolicyFail, + }, + { + Name: "second-extension", + RequestHook: runtimev1.GroupVersionHook{ + APIVersion: fakev1alpha1.GroupVersion.String(), + Hook: "FakeHook", + }, + TimeoutSeconds: 1, + FailurePolicy: runtimev1.FailurePolicyFail, + }, + { + Name: "third-extension", + RequestHook: runtimev1.GroupVersionHook{ + APIVersion: fakev1alpha1.GroupVersion.String(), + Hook: "FakeHook", + }, + TimeoutSeconds: 1, + FailurePolicy: runtimev1.FailurePolicyFail, + }, + }, + }, + } + + tests := []struct { + name string + registeredExtensionConfigs []runtimev1.ExtensionConfig + hook runtimecatalog.Hook + cluster *clusterv1.Cluster + wantExtensions []string + wantErr bool + }{ + { + name: "should return extensions if ExtensionHandlers are registered for the hook", + registeredExtensionConfigs: []runtimev1.ExtensionConfig{extensionConfig}, + hook: fakev1alpha1.FakeHook, + cluster: cluster, + wantExtensions: []string{"first-extension", "second-extension", "third-extension"}, + }, + { + name: "should return no extensions if ExtensionHandlers are registered for the hook in a different namespace", + registeredExtensionConfigs: []runtimev1.ExtensionConfig{extensionConfig}, + hook: fakev1alpha1.FakeHook, + cluster: clusterDifferentNamespace, + wantExtensions: []string{}, + }, + { + name: "should return no extensions if no ExtensionHandlers are registered for the hook", + registeredExtensionConfigs: []runtimev1.ExtensionConfig{}, + hook: fakev1alpha1.SecondFakeHook, + cluster: cluster, + wantExtensions: []string{}, + }, + { + name: "should return error if hook is not registered in the catalog", + registeredExtensionConfigs: []runtimev1.ExtensionConfig{}, + hook: "UnknownHook", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + cat := runtimecatalog.New() + _ = fakev1alpha1.AddToCatalog(cat) + _ = fakev1alpha2.AddToCatalog(cat) + fakeClient := fake.NewClientBuilder(). + WithObjects(ns, nsDifferent). + Build() + c := New(Options{ + Catalog: cat, + Registry: registry(tt.registeredExtensionConfigs), + Client: fakeClient, + }) + + gotExtensions, err := c.GetAllExtensions(context.Background(), tt.hook, tt.cluster) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(gotExtensions).To(ConsistOf(tt.wantExtensions)) + }) + } +} + func TestClient_CallAllExtensions(t *testing.T) { ns := &corev1.Namespace{ TypeMeta: metav1.TypeMeta{ diff --git a/internal/runtime/client/fake/fake_client.go b/internal/runtime/client/fake/fake_client.go index 039f70a129b3..1565931c1bad 100644 --- a/internal/runtime/client/fake/fake_client.go +++ b/internal/runtime/client/fake/fake_client.go @@ -34,10 +34,11 @@ import ( type RuntimeClientBuilder struct { ready bool catalog *runtimecatalog.Catalog + getAllResponses map[runtimecatalog.GroupVersionHook][]string callAllResponses map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject callAllValidations func(object runtimehooksv1.RequestObject) error callResponses map[string]runtimehooksv1.ResponseObject - callValidations func(object runtimehooksv1.RequestObject) error + callValidations func(name string, object runtimehooksv1.RequestObject) error } // NewRuntimeClientBuilder returns a new builder for the fake runtime client. @@ -51,6 +52,12 @@ func (f *RuntimeClientBuilder) WithCatalog(catalog *runtimecatalog.Catalog) *Run return f } +// WithGetAllExtensionResponses can be used to dictate the responses for GetAllExtensions. +func (f *RuntimeClientBuilder) WithGetAllExtensionResponses(responses map[runtimecatalog.GroupVersionHook][]string) *RuntimeClientBuilder { + f.getAllResponses = responses + return f +} + // WithCallAllExtensionResponses can be used to dictate the responses for CallAllExtensions. func (f *RuntimeClientBuilder) WithCallAllExtensionResponses(responses map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject) *RuntimeClientBuilder { f.callAllResponses = responses @@ -70,7 +77,7 @@ func (f *RuntimeClientBuilder) WithCallExtensionResponses(responses map[string]r } // WithCallExtensionValidations can be used to validate the incoming request for CallExtensions. -func (f *RuntimeClientBuilder) WithCallExtensionValidations(callValidations func(object runtimehooksv1.RequestObject) error) *RuntimeClientBuilder { +func (f *RuntimeClientBuilder) WithCallExtensionValidations(callValidations func(name string, object runtimehooksv1.RequestObject) error) *RuntimeClientBuilder { f.callValidations = callValidations return f } @@ -85,6 +92,7 @@ func (f *RuntimeClientBuilder) MarkReady(ready bool) *RuntimeClientBuilder { func (f *RuntimeClientBuilder) Build() *RuntimeClient { return &RuntimeClient{ isReady: f.ready, + getAllResponses: f.getAllResponses, callAllResponses: f.callAllResponses, callAllValidations: f.callAllValidations, callResponses: f.callResponses, @@ -100,14 +108,24 @@ var _ runtimeclient.Client = &RuntimeClient{} type RuntimeClient struct { isReady bool catalog *runtimecatalog.Catalog + getAllResponses map[runtimecatalog.GroupVersionHook][]string callAllResponses map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject callAllValidations func(object runtimehooksv1.RequestObject) error callResponses map[string]runtimehooksv1.ResponseObject - callValidations func(object runtimehooksv1.RequestObject) error + callValidations func(name string, object runtimehooksv1.RequestObject) error callAllTracker map[string]int } +// GetAllExtensions implements Client. +func (fc *RuntimeClient) GetAllExtensions(_ context.Context, hook runtimecatalog.Hook, _ metav1.Object) ([]string, error) { + gvh, err := fc.catalog.GroupVersionHook(hook) + if err != nil { + return nil, errors.Wrap(err, "failed to compute GVH") + } + return fc.getAllResponses[gvh], nil +} + // CallAllExtensions implements Client. func (fc *RuntimeClient) CallAllExtensions(ctx context.Context, hook runtimecatalog.Hook, _ metav1.Object, req runtimehooksv1.RequestObject, response runtimehooksv1.ResponseObject) error { defer func() { @@ -145,7 +163,7 @@ func (fc *RuntimeClient) CallAllExtensions(ctx context.Context, hook runtimecata // CallExtension implements Client. func (fc *RuntimeClient) CallExtension(ctx context.Context, _ runtimecatalog.Hook, _ metav1.Object, name string, req runtimehooksv1.RequestObject, response runtimehooksv1.ResponseObject, _ ...runtimeclient.CallExtensionOption) error { if fc.callValidations != nil { - if err := fc.callValidations(req); err != nil { + if err := fc.callValidations(name, req); err != nil { return err } } diff --git a/internal/util/patch/patch.go b/internal/util/patch/patch.go new file mode 100644 index 000000000000..39a06afe8c11 --- /dev/null +++ b/internal/util/patch/patch.go @@ -0,0 +1,123 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package patch contains patch utils. +package patch + +import ( + "strings" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/klog/v2" + + "sigs.k8s.io/cluster-api/internal/contract" +) + +// PatchSpec overwrites spec in object with spec of patchedObjectBytes. +func PatchSpec(object *runtime.RawExtension, patchedObjectBytes []byte) error { //nolint:revive // not going to call this func ObjectSpe to avoid stuttering + objectUnstructured, err := bytesToUnstructured(object.Raw) + if err != nil { + return errors.Wrap(err, "failed to convert object to Unstructured") + } + patchedObjectUnstructured, err := bytesToUnstructured(patchedObjectBytes) + if err != nil { + return errors.Wrap(err, "failed to convert patched object to Unstructured") + } + + // Copy spec from patchedObjectUnstructured to objectUnstructured. + if err := CopySpec(CopySpecInput{ + Src: patchedObjectUnstructured, + Dest: objectUnstructured, + SrcSpecPath: "spec", + DestSpecPath: "spec", + }); err != nil { + return errors.Wrap(err, "failed to apply patch to object") + } + + // Marshal objectUnstructured and store it in object. + objectBytes, err := objectUnstructured.MarshalJSON() + if err != nil { + return errors.Wrapf(err, "failed to marshal patched object") + } + object.Object = objectUnstructured + object.Raw = objectBytes + return nil +} + +// CopySpecInput is a struct containing the input parameters of CopySpec. +type CopySpecInput struct { + Src *unstructured.Unstructured + Dest *unstructured.Unstructured + SrcSpecPath string + DestSpecPath string + FieldsToPreserve []contract.Path +} + +// CopySpec copies a field from a srcSpecPath in src to a destSpecPath in dest, +// while preserving fieldsToPreserve. +func CopySpec(in CopySpecInput) error { + // Backup fields that should be preserved from dest. + preservedFields := map[string]interface{}{} + for _, field := range in.FieldsToPreserve { + value, found, err := unstructured.NestedFieldNoCopy(in.Dest.Object, field...) + if !found { + // Continue if the field does not exist in src. fieldsToPreserve don't have to exist. + continue + } else if err != nil { + return errors.Wrapf(err, "failed to get field %q from %s %s", strings.Join(field, "."), in.Dest.GetKind(), klog.KObj(in.Dest)) + } + preservedFields[strings.Join(field, ".")] = value + } + + // Get spec from src. + srcSpec, found, err := unstructured.NestedFieldNoCopy(in.Src.Object, strings.Split(in.SrcSpecPath, ".")...) + if !found { + // Return if srcSpecPath does not exist in src, nothing to do. + return nil + } else if err != nil { + return errors.Wrapf(err, "failed to get field %q from %s %s", in.SrcSpecPath, in.Src.GetKind(), klog.KObj(in.Src)) + } + + // Set spec in dest. + if err := unstructured.SetNestedField(in.Dest.Object, srcSpec, strings.Split(in.DestSpecPath, ".")...); err != nil { + return errors.Wrapf(err, "failed to set field %q on %s %s", in.DestSpecPath, in.Dest.GetKind(), klog.KObj(in.Dest)) + } + + // Restore preserved fields. + for path, value := range preservedFields { + if err := unstructured.SetNestedField(in.Dest.Object, value, strings.Split(path, ".")...); err != nil { + return errors.Wrapf(err, "failed to set field %q on %s %s", path, in.Dest.GetKind(), klog.KObj(in.Dest)) + } + } + return nil +} + +// unstructuredDecoder is used to decode byte arrays into Unstructured objects. +var unstructuredDecoder = serializer.NewCodecFactory(nil).UniversalDeserializer() + +// bytesToUnstructured provides a utility method that converts a (JSON) byte array into an Unstructured object. +func bytesToUnstructured(b []byte) (*unstructured.Unstructured, error) { + // Unmarshal the JSON. + u := &unstructured.Unstructured{} + if _, _, err := unstructuredDecoder.Decode(b, nil, u); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal object from json") + } + + return u, nil +} diff --git a/internal/controllers/topology/cluster/patches/patch_test.go b/internal/util/patch/patch_test.go similarity index 81% rename from internal/controllers/topology/cluster/patches/patch_test.go rename to internal/util/patch/patch_test.go index 8037891bb10e..c95e3d55d394 100644 --- a/internal/controllers/topology/cluster/patches/patch_test.go +++ b/internal/util/patch/patch_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package patches +package patch import ( "testing" @@ -28,29 +28,29 @@ import ( func TestCopySpec(t *testing.T) { tests := []struct { name string - input copySpecInput + input CopySpecInput want *unstructured.Unstructured wantErr bool }{ { name: "Field both in src and dest, no-op when equal", - input: copySpecInput{ - src: &unstructured.Unstructured{ + input: CopySpecInput{ + Src: &unstructured.Unstructured{ Object: map[string]interface{}{ "spec": map[string]interface{}{ "A": "A", }, }, }, - dest: &unstructured.Unstructured{ + Dest: &unstructured.Unstructured{ Object: map[string]interface{}{ "spec": map[string]interface{}{ "A": "A", }, }, }, - srcSpecPath: "spec", - destSpecPath: "spec", + SrcSpecPath: "spec", + DestSpecPath: "spec", }, want: &unstructured.Unstructured{ Object: map[string]interface{}{ @@ -62,23 +62,23 @@ func TestCopySpec(t *testing.T) { }, { name: "Field both in src and dest, overwrite dest when different", - input: copySpecInput{ - src: &unstructured.Unstructured{ + input: CopySpecInput{ + Src: &unstructured.Unstructured{ Object: map[string]interface{}{ "spec": map[string]interface{}{ "A": "A", }, }, }, - dest: &unstructured.Unstructured{ + Dest: &unstructured.Unstructured{ Object: map[string]interface{}{ "spec": map[string]interface{}{ "A": "A-different", }, }, }, - srcSpecPath: "spec", - destSpecPath: "spec", + SrcSpecPath: "spec", + DestSpecPath: "spec", }, want: &unstructured.Unstructured{ Object: map[string]interface{}{ @@ -90,8 +90,8 @@ func TestCopySpec(t *testing.T) { }, { name: "Nested field both in src and dest, no-op when equal", - input: copySpecInput{ - src: &unstructured.Unstructured{ + input: CopySpecInput{ + Src: &unstructured.Unstructured{ Object: map[string]interface{}{ "spec": map[string]interface{}{ "template": map[string]interface{}{ @@ -102,7 +102,7 @@ func TestCopySpec(t *testing.T) { }, }, }, - dest: &unstructured.Unstructured{ + Dest: &unstructured.Unstructured{ Object: map[string]interface{}{ "spec": map[string]interface{}{ "template": map[string]interface{}{ @@ -113,8 +113,8 @@ func TestCopySpec(t *testing.T) { }, }, }, - srcSpecPath: "spec", - destSpecPath: "spec", + SrcSpecPath: "spec", + DestSpecPath: "spec", }, want: &unstructured.Unstructured{ Object: map[string]interface{}{ @@ -130,8 +130,8 @@ func TestCopySpec(t *testing.T) { }, { name: "Nested field both in src and dest, overwrite dest when different", - input: copySpecInput{ - src: &unstructured.Unstructured{ + input: CopySpecInput{ + Src: &unstructured.Unstructured{ Object: map[string]interface{}{ "spec": map[string]interface{}{ "template": map[string]interface{}{ @@ -142,7 +142,7 @@ func TestCopySpec(t *testing.T) { }, }, }, - dest: &unstructured.Unstructured{ + Dest: &unstructured.Unstructured{ Object: map[string]interface{}{ "spec": map[string]interface{}{ "template": map[string]interface{}{ @@ -153,8 +153,8 @@ func TestCopySpec(t *testing.T) { }, }, }, - srcSpecPath: "spec", - destSpecPath: "spec", + SrcSpecPath: "spec", + DestSpecPath: "spec", }, want: &unstructured.Unstructured{ Object: map[string]interface{}{ @@ -170,19 +170,19 @@ func TestCopySpec(t *testing.T) { }, { name: "Field only in src, copy to dest", - input: copySpecInput{ - src: &unstructured.Unstructured{ + input: CopySpecInput{ + Src: &unstructured.Unstructured{ Object: map[string]interface{}{ "spec": map[string]interface{}{ "foo": "bar", }, }, }, - dest: &unstructured.Unstructured{ + Dest: &unstructured.Unstructured{ Object: map[string]interface{}{}, }, - srcSpecPath: "spec", - destSpecPath: "spec", + SrcSpecPath: "spec", + DestSpecPath: "spec", }, want: &unstructured.Unstructured{ Object: map[string]interface{}{ @@ -194,8 +194,8 @@ func TestCopySpec(t *testing.T) { }, { name: "Nested field only in src, copy to dest", - input: copySpecInput{ - src: &unstructured.Unstructured{ + input: CopySpecInput{ + Src: &unstructured.Unstructured{ Object: map[string]interface{}{ "spec": map[string]interface{}{ "template": map[string]interface{}{ @@ -206,11 +206,11 @@ func TestCopySpec(t *testing.T) { }, }, }, - dest: &unstructured.Unstructured{ + Dest: &unstructured.Unstructured{ Object: map[string]interface{}{}, }, - srcSpecPath: "spec", - destSpecPath: "spec", + SrcSpecPath: "spec", + DestSpecPath: "spec", }, want: &unstructured.Unstructured{ Object: map[string]interface{}{ @@ -226,8 +226,8 @@ func TestCopySpec(t *testing.T) { }, { name: "Copy field from spec.template.spec in src to spec in dest", - input: copySpecInput{ - src: &unstructured.Unstructured{ + input: CopySpecInput{ + Src: &unstructured.Unstructured{ Object: map[string]interface{}{ "spec": map[string]interface{}{ "template": map[string]interface{}{ @@ -238,11 +238,11 @@ func TestCopySpec(t *testing.T) { }, }, }, - dest: &unstructured.Unstructured{ + Dest: &unstructured.Unstructured{ Object: map[string]interface{}{}, }, - srcSpecPath: "spec.template.spec", - destSpecPath: "spec", + SrcSpecPath: "spec.template.spec", + DestSpecPath: "spec", }, want: &unstructured.Unstructured{ Object: map[string]interface{}{ @@ -254,8 +254,8 @@ func TestCopySpec(t *testing.T) { }, { name: "Copy field from spec.template.spec in src to spec in dest (overwrite when different)", - input: copySpecInput{ - src: &unstructured.Unstructured{ + input: CopySpecInput{ + Src: &unstructured.Unstructured{ Object: map[string]interface{}{ "spec": map[string]interface{}{ "template": map[string]interface{}{ @@ -266,15 +266,15 @@ func TestCopySpec(t *testing.T) { }, }, }, - dest: &unstructured.Unstructured{ + Dest: &unstructured.Unstructured{ Object: map[string]interface{}{ "spec": map[string]interface{}{ "A": "A-different", }, }, }, - srcSpecPath: "spec.template.spec", - destSpecPath: "spec", + SrcSpecPath: "spec.template.spec", + DestSpecPath: "spec", }, want: &unstructured.Unstructured{ Object: map[string]interface{}{ @@ -286,8 +286,8 @@ func TestCopySpec(t *testing.T) { }, { name: "Field both in src and dest, overwrite when different and preserve fields", - input: copySpecInput{ - src: &unstructured.Unstructured{ + input: CopySpecInput{ + Src: &unstructured.Unstructured{ Object: map[string]interface{}{ "spec": map[string]interface{}{ "template": map[string]interface{}{ @@ -308,7 +308,7 @@ func TestCopySpec(t *testing.T) { }, }, }, - dest: &unstructured.Unstructured{ + Dest: &unstructured.Unstructured{ Object: map[string]interface{}{ "spec": map[string]interface{}{ "machineTemplate": map[string]interface{}{ @@ -325,9 +325,9 @@ func TestCopySpec(t *testing.T) { }, }, }, - srcSpecPath: "spec.template.spec", - destSpecPath: "spec", - fieldsToPreserve: []contract.Path{ + SrcSpecPath: "spec.template.spec", + DestSpecPath: "spec", + FieldsToPreserve: []contract.Path{ {"spec", "machineTemplate", "infrastructureRef"}, {"spec", "replicas"}, {"spec", "version"}, @@ -353,23 +353,23 @@ func TestCopySpec(t *testing.T) { }, { name: "Field not in src, no-op", - input: copySpecInput{ - src: &unstructured.Unstructured{ + input: CopySpecInput{ + Src: &unstructured.Unstructured{ Object: map[string]interface{}{ "differentSpec": map[string]interface{}{ "B": "B", }, }, }, - dest: &unstructured.Unstructured{ + Dest: &unstructured.Unstructured{ Object: map[string]interface{}{ "spec": map[string]interface{}{ "A": "A", }, }, }, - srcSpecPath: "spec", - destSpecPath: "spec", + SrcSpecPath: "spec", + DestSpecPath: "spec", }, want: &unstructured.Unstructured{ Object: map[string]interface{}{ @@ -385,14 +385,14 @@ func TestCopySpec(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := copySpec(tt.input) + err := CopySpec(tt.input) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } g.Expect(err).ToNot(HaveOccurred()) - g.Expect(tt.input.dest).To(BeComparableTo(tt.want)) + g.Expect(tt.input.Dest).To(BeComparableTo(tt.want)) }) } } diff --git a/internal/util/ssa/patch.go b/internal/util/ssa/patch.go index 4ff6dfa84d80..f94f0416518b 100644 --- a/internal/util/ssa/patch.go +++ b/internal/util/ssa/patch.go @@ -35,6 +35,14 @@ type Option interface { ApplyToOptions(*Options) } +// WithDryRun enables the DryRunAll option. +type WithDryRun struct{} + +// ApplyToOptions applies WithDryRun to the given Options. +func (w WithDryRun) ApplyToOptions(in *Options) { + in.WithDryRun = true +} + // WithCachingProxy enables caching for the patch request. // The original and modified object will be used to generate an // identifier for the request. @@ -53,6 +61,7 @@ func (w WithCachingProxy) ApplyToOptions(in *Options) { // Options contains the options for the Patch func. type Options struct { + WithDryRun bool WithCachingProxy bool Cache Cache Original client.Object @@ -103,6 +112,9 @@ func Patch(ctx context.Context, c client.Client, fieldManager string, modified c client.ForceOwnership, client.FieldOwner(fieldManager), } + if options.WithDryRun { + applyOptions = append(applyOptions, client.DryRunAll) + } // Note: Intentionally not including the name of the object in the error message // as during create the name might be random generated in every reconcile. // If these errors are written to conditions this would lead to an infinite reconcile. diff --git a/test/extension/handlers/topologymutation/handler_integration_test.go b/test/extension/handlers/topologymutation/handler_integration_test.go index 092d2fcd9041..0419df80694a 100644 --- a/test/extension/handlers/topologymutation/handler_integration_test.go +++ b/test/extension/handlers/topologymutation/handler_integration_test.go @@ -421,6 +421,10 @@ type injectRuntimeClient struct { runtimeExtension TopologyMutationHook } +func (i *injectRuntimeClient) GetAllExtensions(_ context.Context, _ runtimecatalog.Hook, _ metav1.Object) ([]string, error) { + panic("implement me") +} + func (i injectRuntimeClient) CallExtension(ctx context.Context, hook runtimecatalog.Hook, _ metav1.Object, _ string, req runtimehooksv1.RequestObject, resp runtimehooksv1.ResponseObject, _ ...runtimeclient.CallExtensionOption) error { // Note: We have to copy the requests. Otherwise we could get side effect by Runtime Extensions // modifying the request instead of properly returning a response. Also after Unmarshal, From 99ea82dab076dcda0a19332b8fdf4cb716663af9 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Mon, 20 Oct 2025 12:12:12 +0200 Subject: [PATCH 2/2] Fix review findings --- .../kubeadm/internal/controllers/inplace.go | 5 +++-- .../controllers/inplace_canupdatemachine.go | 5 ++++- .../inplace_canupdatemachine_test.go | 10 ++++++++++ .../kubeadm/internal/controllers/scale.go | 4 ++++ exp/runtime/client/client.go | 8 ++++---- .../external/external_patch_generator_test.go | 8 ++++---- internal/runtime/client/client.go | 20 ++++++++++++++----- internal/runtime/client/client_test.go | 10 ++++++++++ internal/runtime/client/fake/fake_client.go | 8 ++++---- internal/util/ssa/patch.go | 3 ++- .../handler_integration_test.go | 6 +++--- 11 files changed, 63 insertions(+), 24 deletions(-) diff --git a/controlplane/kubeadm/internal/controllers/inplace.go b/controlplane/kubeadm/internal/controllers/inplace.go index e25b5e84dcdb..ac4409668b8c 100644 --- a/controlplane/kubeadm/internal/controllers/inplace.go +++ b/controlplane/kubeadm/internal/controllers/inplace.go @@ -38,9 +38,10 @@ func (r *KubeadmControlPlaneReconciler) tryInPlaceUpdate( // Run preflight checks to ensure that the control plane is stable before proceeding with in-place update operation. if resultForAllMachines := r.preflightChecks(ctx, controlPlane); !resultForAllMachines.IsZero() { - // We should not block a scale down of an unhealthy Machine that would work. + // If the control plane is not stable, check if the issues are only for machineToInPlaceUpdate. if result := r.preflightChecks(ctx, controlPlane, machineToInPlaceUpdate); result.IsZero() { - // Fallback to scale down. + // The issues are only for machineToInPlaceUpdate, fallback to scale down. + // Note: The consequence of this is that a Machine with issues is scaled down and not in-place updated. return true, ctrl.Result{}, nil } diff --git a/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine.go b/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine.go index d8c0aaabaa75..246d63435eb7 100644 --- a/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine.go +++ b/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine.go @@ -74,13 +74,16 @@ func (r *KubeadmControlPlaneReconciler) canUpdateMachine(ctx context.Context, ma if len(extensionHandlers) == 0 { return false, nil } + if len(extensionHandlers) > 1 { + return false, errors.Errorf("found multiple CanUpdateMachine hooks (%s) (more than one is not supported yet)", strings.Join(extensionHandlers, ",")) + } canUpdateMachine, reasons, err := r.canExtensionsUpdateMachine(ctx, machine, machineUpToDateResult, extensionHandlers) if err != nil { return false, err } if !canUpdateMachine { - log.Info(fmt.Sprintf("Machine cannot be updated in-place: %s", strings.Join(reasons, ",")), "Machine", klog.KObj(machine)) + log.Info(fmt.Sprintf("Machine cannot be updated in-place by extensions: %s", strings.Join(reasons, ",")), "Machine", klog.KObj(machine)) return false, nil } return true, nil diff --git a/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine_test.go b/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine_test.go index 4becff527bd0..52fe84b37e86 100644 --- a/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine_test.go +++ b/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine_test.go @@ -95,6 +95,16 @@ func Test_canUpdateMachine(t *testing.T) { getAllExtensionsResponses: map[runtimecatalog.GroupVersionHook][]string{}, wantCanUpdateMachine: false, }, + { + name: "Return error if more than one CanUpdateMachine extensions registered", + enableInPlaceUpdatesFeatureGate: true, + machineUpToDateResult: nonEmptyMachineUpToDateResult, + getAllExtensionsResponses: map[runtimecatalog.GroupVersionHook][]string{ + canUpdateMachineGVH: {"test-update-extension-1", "test-update-extension-2"}, + }, + wantError: true, + wantErrorMessage: "found multiple CanUpdateMachine hooks (test-update-extension-1,test-update-extension-2) (more than one is not supported yet)", + }, { name: "Return false if canExtensionsUpdateMachine returns false", enableInPlaceUpdatesFeatureGate: true, diff --git a/controlplane/kubeadm/internal/controllers/scale.go b/controlplane/kubeadm/internal/controllers/scale.go index 515e9c979ed9..90af3f894a07 100644 --- a/controlplane/kubeadm/internal/controllers/scale.go +++ b/controlplane/kubeadm/internal/controllers/scale.go @@ -163,6 +163,10 @@ func (r *KubeadmControlPlaneReconciler) preflightChecks(ctx context.Context, con return r.overridePreflightChecksFunc(ctx, controlPlane, excludeFor...) } + // Reset PreflightCheckResults in case this function is called multiple times (e.g. for in-place update code paths) + // Note: The PreflightCheckResults field is only written by this func, so this is safe. + controlPlane.PreflightCheckResults = internal.PreflightCheckResults{} + log := ctrl.LoggerFrom(ctx) // If there is no KCP-owned control-plane machines, then control-plane has not been initialized yet, diff --git a/exp/runtime/client/client.go b/exp/runtime/client/client.go index 211a0313396e..714817f5f648 100644 --- a/exp/runtime/client/client.go +++ b/exp/runtime/client/client.go @@ -20,7 +20,7 @@ package client import ( "context" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" runtimev1 "sigs.k8s.io/cluster-api/api/runtime/v1beta2" @@ -87,11 +87,11 @@ type Client interface { Unregister(extensionConfig *runtimev1.ExtensionConfig) error // GetAllExtensions gets all the ExtensionHandlers registered for the hook. - GetAllExtensions(ctx context.Context, hook runtimecatalog.Hook, forObject metav1.Object) ([]string, error) + GetAllExtensions(ctx context.Context, hook runtimecatalog.Hook, forObject client.Object) ([]string, error) // CallAllExtensions calls all the ExtensionHandler registered for the hook. - CallAllExtensions(ctx context.Context, hook runtimecatalog.Hook, forObject metav1.Object, request runtimehooksv1.RequestObject, response runtimehooksv1.ResponseObject) error + CallAllExtensions(ctx context.Context, hook runtimecatalog.Hook, forObject client.Object, request runtimehooksv1.RequestObject, response runtimehooksv1.ResponseObject) error // CallExtension calls the ExtensionHandler with the given name. - CallExtension(ctx context.Context, hook runtimecatalog.Hook, forObject metav1.Object, name string, request runtimehooksv1.RequestObject, response runtimehooksv1.ResponseObject, opts ...CallExtensionOption) error + CallExtension(ctx context.Context, hook runtimecatalog.Hook, forObject client.Object, name string, request runtimehooksv1.RequestObject, response runtimehooksv1.ResponseObject, opts ...CallExtensionOption) error } diff --git a/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go b/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go index 48702956035d..b20946ed8275 100644 --- a/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go +++ b/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go @@ -21,8 +21,8 @@ import ( "testing" . "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/component-base/featuregate/testing" + "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" @@ -124,15 +124,15 @@ func (f *fakeRuntimeClient) Unregister(_ *runtimev1.ExtensionConfig) error { panic("implement me") } -func (f *fakeRuntimeClient) GetAllExtensions(_ context.Context, _ runtimecatalog.Hook, _ metav1.Object) ([]string, error) { +func (f *fakeRuntimeClient) GetAllExtensions(_ context.Context, _ runtimecatalog.Hook, _ client.Object) ([]string, error) { panic("implement me") } -func (f *fakeRuntimeClient) CallAllExtensions(_ context.Context, _ runtimecatalog.Hook, _ metav1.Object, _ runtimehooksv1.RequestObject, _ runtimehooksv1.ResponseObject) error { +func (f *fakeRuntimeClient) CallAllExtensions(_ context.Context, _ runtimecatalog.Hook, _ client.Object, _ runtimehooksv1.RequestObject, _ runtimehooksv1.ResponseObject) error { panic("implement me") } -func (f *fakeRuntimeClient) CallExtension(_ context.Context, _ runtimecatalog.Hook, _ metav1.Object, _ string, request runtimehooksv1.RequestObject, _ runtimehooksv1.ResponseObject, _ ...runtimeclient.CallExtensionOption) error { +func (f *fakeRuntimeClient) CallExtension(_ context.Context, _ runtimecatalog.Hook, _ client.Object, _ string, request runtimehooksv1.RequestObject, _ runtimehooksv1.ResponseObject, _ ...runtimeclient.CallExtensionOption) error { // Keep a copy of the request object. // We keep a copy because the request is modified after the call is made. So we keep a copy to perform assertions. f.callExtensionRequest = request.DeepCopyObject().(runtimehooksv1.RequestObject) diff --git a/internal/runtime/client/client.go b/internal/runtime/client/client.go index be7bb57bd1a9..9550bb1eb7fe 100644 --- a/internal/runtime/client/client.go +++ b/internal/runtime/client/client.go @@ -41,9 +41,11 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/client-go/transport" + "k8s.io/klog/v2" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" runtimev1 "sigs.k8s.io/cluster-api/api/runtime/v1beta2" @@ -172,7 +174,7 @@ func (c *client) Unregister(extensionConfig *runtimev1.ExtensionConfig) error { return nil } -func (c *client) GetAllExtensions(ctx context.Context, hook runtimecatalog.Hook, forObject metav1.Object) ([]string, error) { +func (c *client) GetAllExtensions(ctx context.Context, hook runtimecatalog.Hook, forObject ctrlclient.Object) ([]string, error) { hookName := runtimecatalog.HookName(hook) log := ctrl.LoggerFrom(ctx).WithValues("hook", hookName) ctx = ctrl.LoggerInto(ctx, log) @@ -180,13 +182,17 @@ func (c *client) GetAllExtensions(ctx context.Context, hook runtimecatalog.Hook, if err != nil { return nil, errors.Wrapf(err, "failed to get extension handlers for hook %q: failed to compute GroupVersionHook", hookName) } + forObjectGVK, err := apiutil.GVKForObject(forObject, c.client.Scheme()) + if err != nil { + return nil, errors.Wrapf(err, "failed to get extension handlers for hook %q: failed to get GroupVersionKind for the object the hook is executed for", hookName) + } registrations, err := c.registry.List(gvh.GroupHook()) if err != nil { return nil, errors.Wrapf(err, "failed to get extension handlers for hook %q", gvh.GroupHook()) } - log.V(4).Info(fmt.Sprintf("Getting all extensions of hook %q", hookName)) + log.V(4).Info(fmt.Sprintf("Getting all extensions of hook %q for %s %s", hookName, forObjectGVK.Kind, klog.KObj(forObject))) matchingRegistrations := []string{} for _, registration := range registrations { // Compute whether the object the get is being made for matches the namespaceSelector @@ -210,7 +216,7 @@ func (c *client) GetAllExtensions(ctx context.Context, hook runtimecatalog.Hook, // This ensures we don't end up waiting for timeout from multiple unreachable Extensions. // See CallExtension for more details on when an ExtensionHandler returns an error. // The aggregated result of the ExtensionHandlers is updated into the response object passed to the function. -func (c *client) CallAllExtensions(ctx context.Context, hook runtimecatalog.Hook, forObject metav1.Object, request runtimehooksv1.RequestObject, response runtimehooksv1.ResponseObject) error { +func (c *client) CallAllExtensions(ctx context.Context, hook runtimecatalog.Hook, forObject ctrlclient.Object, request runtimehooksv1.RequestObject, response runtimehooksv1.ResponseObject) error { hookName := runtimecatalog.HookName(hook) log := ctrl.LoggerFrom(ctx).WithValues("hook", hookName) ctx = ctrl.LoggerInto(ctx, log) @@ -218,6 +224,10 @@ func (c *client) CallAllExtensions(ctx context.Context, hook runtimecatalog.Hook if err != nil { return errors.Wrapf(err, "failed to call extension handlers for hook %q: failed to compute GroupVersionHook", hookName) } + forObjectGVK, err := apiutil.GVKForObject(forObject, c.client.Scheme()) + if err != nil { + return errors.Wrapf(err, "failed to call extension handlers for hook %q: failed to get GroupVersionKind for the object the hook is executed for", hookName) + } // Make sure the request is compatible with the hook. if err := c.catalog.ValidateRequest(gvh, request); err != nil { return errors.Wrapf(err, "failed to call extension handlers for hook %q: request object is invalid for hook", gvh.GroupHook()) @@ -232,7 +242,7 @@ func (c *client) CallAllExtensions(ctx context.Context, hook runtimecatalog.Hook return errors.Wrapf(err, "failed to call extension handlers for hook %q", gvh.GroupHook()) } - log.V(4).Info(fmt.Sprintf("Calling all extensions of hook %q", hookName)) + log.V(4).Info(fmt.Sprintf("Calling all extensions of hook %q for %s %s", hookName, forObjectGVK.Kind, klog.KObj(forObject))) responses := []runtimehooksv1.ResponseObject{} for _, registration := range registrations { // Creates a new instance of the response parameter. @@ -304,7 +314,7 @@ func aggregateSuccessfulResponses(aggregatedResponse runtimehooksv1.ResponseObje // Nb. FailurePolicy does not affect the following kinds of errors: // - Internal errors. Examples: hooks is incompatible with ExtensionHandler, ExtensionHandler information is missing. // - Error when ExtensionHandler returns a response with `Status` set to `Failure`. -func (c *client) CallExtension(ctx context.Context, hook runtimecatalog.Hook, forObject metav1.Object, name string, request runtimehooksv1.RequestObject, response runtimehooksv1.ResponseObject, opts ...runtimeclient.CallExtensionOption) error { +func (c *client) CallExtension(ctx context.Context, hook runtimecatalog.Hook, forObject ctrlclient.Object, name string, request runtimehooksv1.RequestObject, response runtimehooksv1.ResponseObject, opts ...runtimeclient.CallExtensionOption) error { // Calculate the options. options := &runtimeclient.CallExtensionOptions{} for _, opt := range opts { diff --git a/internal/runtime/client/client_test.go b/internal/runtime/client/client_test.go index ccd269723b6d..9947cb775c92 100644 --- a/internal/runtime/client/client_test.go +++ b/internal/runtime/client/client_test.go @@ -1148,10 +1148,15 @@ func TestClient_GetAllExtensions(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + scheme := runtime.NewScheme() + g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) + g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) + cat := runtimecatalog.New() _ = fakev1alpha1.AddToCatalog(cat) _ = fakev1alpha2.AddToCatalog(cat) fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). WithObjects(ns, nsDifferent). Build() c := New(Options{ @@ -1346,10 +1351,15 @@ func TestClient_CallAllExtensions(t *testing.T) { } } + scheme := runtime.NewScheme() + g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) + g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) + cat := runtimecatalog.New() _ = fakev1alpha1.AddToCatalog(cat) _ = fakev1alpha2.AddToCatalog(cat) fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). WithObjects(ns). Build() c := New(Options{ diff --git a/internal/runtime/client/fake/fake_client.go b/internal/runtime/client/fake/fake_client.go index 1565931c1bad..9b9159a49c1c 100644 --- a/internal/runtime/client/fake/fake_client.go +++ b/internal/runtime/client/fake/fake_client.go @@ -22,7 +22,7 @@ import ( "fmt" "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" runtimev1 "sigs.k8s.io/cluster-api/api/runtime/v1beta2" @@ -118,7 +118,7 @@ type RuntimeClient struct { } // GetAllExtensions implements Client. -func (fc *RuntimeClient) GetAllExtensions(_ context.Context, hook runtimecatalog.Hook, _ metav1.Object) ([]string, error) { +func (fc *RuntimeClient) GetAllExtensions(_ context.Context, hook runtimecatalog.Hook, _ client.Object) ([]string, error) { gvh, err := fc.catalog.GroupVersionHook(hook) if err != nil { return nil, errors.Wrap(err, "failed to compute GVH") @@ -127,7 +127,7 @@ func (fc *RuntimeClient) GetAllExtensions(_ context.Context, hook runtimecatalog } // CallAllExtensions implements Client. -func (fc *RuntimeClient) CallAllExtensions(ctx context.Context, hook runtimecatalog.Hook, _ metav1.Object, req runtimehooksv1.RequestObject, response runtimehooksv1.ResponseObject) error { +func (fc *RuntimeClient) CallAllExtensions(ctx context.Context, hook runtimecatalog.Hook, _ client.Object, req runtimehooksv1.RequestObject, response runtimehooksv1.ResponseObject) error { defer func() { fc.callAllTracker[runtimecatalog.HookName(hook)]++ }() @@ -161,7 +161,7 @@ func (fc *RuntimeClient) CallAllExtensions(ctx context.Context, hook runtimecata } // CallExtension implements Client. -func (fc *RuntimeClient) CallExtension(ctx context.Context, _ runtimecatalog.Hook, _ metav1.Object, name string, req runtimehooksv1.RequestObject, response runtimehooksv1.ResponseObject, _ ...runtimeclient.CallExtensionOption) error { +func (fc *RuntimeClient) CallExtension(ctx context.Context, _ runtimecatalog.Hook, _ client.Object, name string, req runtimehooksv1.RequestObject, response runtimehooksv1.ResponseObject, _ ...runtimeclient.CallExtensionOption) error { if fc.callValidations != nil { if err := fc.callValidations(name, req); err != nil { return err diff --git a/internal/util/ssa/patch.go b/internal/util/ssa/patch.go index f94f0416518b..dad0ab7b7f06 100644 --- a/internal/util/ssa/patch.go +++ b/internal/util/ssa/patch.go @@ -130,7 +130,8 @@ func Patch(ctx context.Context, c client.Client, fieldManager string, modified c // Recover gvk e.g. for logging. modified.GetObjectKind().SetGroupVersionKind(gvk) - if options.WithCachingProxy { + // Add the request to the cache only if dry-run was not used. + if options.WithCachingProxy && !options.WithDryRun { // If the SSA call did not update the object, add the request to the cache. if options.Original.GetResourceVersion() == modifiedUnstructured.GetResourceVersion() { options.Cache.Add(requestIdentifier) diff --git a/test/extension/handlers/topologymutation/handler_integration_test.go b/test/extension/handlers/topologymutation/handler_integration_test.go index 0419df80694a..ef8098b14778 100644 --- a/test/extension/handlers/topologymutation/handler_integration_test.go +++ b/test/extension/handlers/topologymutation/handler_integration_test.go @@ -421,11 +421,11 @@ type injectRuntimeClient struct { runtimeExtension TopologyMutationHook } -func (i *injectRuntimeClient) GetAllExtensions(_ context.Context, _ runtimecatalog.Hook, _ metav1.Object) ([]string, error) { +func (i *injectRuntimeClient) GetAllExtensions(_ context.Context, _ runtimecatalog.Hook, _ client.Object) ([]string, error) { panic("implement me") } -func (i injectRuntimeClient) CallExtension(ctx context.Context, hook runtimecatalog.Hook, _ metav1.Object, _ string, req runtimehooksv1.RequestObject, resp runtimehooksv1.ResponseObject, _ ...runtimeclient.CallExtensionOption) error { +func (i injectRuntimeClient) CallExtension(ctx context.Context, hook runtimecatalog.Hook, _ client.Object, _ string, req runtimehooksv1.RequestObject, resp runtimehooksv1.ResponseObject, _ ...runtimeclient.CallExtensionOption) error { // Note: We have to copy the requests. Otherwise we could get side effect by Runtime Extensions // modifying the request instead of properly returning a response. Also after Unmarshal, // only the Raw fields in runtime.RawExtension fields should be filled out and Object should be nil. @@ -500,6 +500,6 @@ func (i injectRuntimeClient) Unregister(_ *runtimev1.ExtensionConfig) error { panic("implement me") } -func (i injectRuntimeClient) CallAllExtensions(_ context.Context, _ runtimecatalog.Hook, _ metav1.Object, _ runtimehooksv1.RequestObject, _ runtimehooksv1.ResponseObject) error { +func (i injectRuntimeClient) CallAllExtensions(_ context.Context, _ runtimecatalog.Hook, _ client.Object, _ runtimehooksv1.RequestObject, _ runtimehooksv1.ResponseObject) error { panic("implement me") }