diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index fb66f1b1453d..26d264f0995f 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -797,13 +797,6 @@ func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, contro continue } - // Cleanup managed fields of all Machines. - // We do this so that Machines that were created/patched before the controller adopted Server-Side-Apply (SSA) - // (< v1.4.0) can also work with SSA. Otherwise, fields would be co-owned by our "old" "manager" and - // "capi-kubeadmcontrolplane" and then we would not be able to e.g. drop labels and annotations. - if err := ssa.CleanUpManagedFieldsForSSAAdoption(ctx, r.Client, m, kcpManagerName); err != nil { - return errors.Wrapf(err, "failed to update Machine: failed to adjust the managedFields of the Machine %s", klog.KObj(m)) - } // Update Machine to propagate in-place mutable fields from KCP. updatedMachine, err := r.updateMachine(ctx, m, controlPlane.KCP, controlPlane.Cluster) if err != nil { diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 20f6f760168b..2be1a060fe67 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -1803,7 +1803,7 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { InfrastructureRef: *infraMachineRef, Version: "v1.25.3", FailureDomain: fd, - ProviderID: "provider-id", + // ProviderID is intentionally not set here, this field is set by the Machine controller. Deletion: clusterv1.MachineDeletionSpec{ NodeDrainTimeoutSeconds: duration5s, NodeVolumeDetachTimeoutSeconds: duration5s, @@ -1811,8 +1811,7 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { }, }, } - // Note: use "manager" as the field owner to mimic the manager used before ClusterAPI v1.4.0. - g.Expect(env.Create(ctx, inPlaceMutatingMachine, client.FieldOwner("manager"))).To(Succeed()) + g.Expect(env.PatchAndWait(ctx, inPlaceMutatingMachine, client.FieldOwner(kcpManagerName))).To(Succeed()) // Existing machine that is in deleting state deletingMachine := &clusterv1.Machine{ @@ -1845,7 +1844,7 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { ReadinessGates: mandatoryMachineReadinessGates, }, } - g.Expect(env.Create(ctx, deletingMachine, client.FieldOwner(classicManager))).To(Succeed()) + g.Expect(env.PatchAndWait(ctx, deletingMachine, client.FieldOwner(kcpManagerName))).To(Succeed()) // Delete the machine to put it in the deleting state g.Expect(env.Delete(ctx, deletingMachine)).To(Succeed()) // Wait till the machine is marked for deletion @@ -2094,13 +2093,13 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedDeletingMachine), updatedDeletingMachine)).To(Succeed()) // Verify ManagedFields - g.Expect(updatedDeletingMachine.ManagedFields).ShouldNot( + g.Expect(updatedDeletingMachine.ManagedFields).Should( ContainElement(ssa.MatchManagedFieldsEntry(kcpManagerName, metav1.ManagedFieldsOperationApply)), - "deleting machine should not contain an entry for SSA manager", + "deleting machine should contain an entry for SSA manager", ) - g.Expect(updatedDeletingMachine.ManagedFields).Should( - ContainElement(ssa.MatchManagedFieldsEntry("manager", metav1.ManagedFieldsOperationUpdate)), - "in-place mutable machine should still contain an entry for old manager", + g.Expect(updatedDeletingMachine.ManagedFields).ShouldNot( + ContainElement(ssa.MatchManagedFieldsEntry(classicManager, metav1.ManagedFieldsOperationUpdate)), + "deleting machine should not contain an entry for old manager", ) // Verify the machine labels and annotations are unchanged. diff --git a/internal/controllers/machinedeployment/machinedeployment_controller.go b/internal/controllers/machinedeployment/machinedeployment_controller.go index 9e12bcd7ac85..679294153355 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller.go @@ -286,19 +286,6 @@ func (r *Reconciler) reconcile(ctx context.Context, s *scope) error { } } - // Loop over all MachineSets and cleanup managed fields. - // We do this so that MachineSets that were created/patched before (< v1.4.0) the controller adopted - // Server-Side-Apply (SSA) can also work with SSA. Otherwise, fields would be co-owned by our "old" "manager" and - // "capi-machinedeployment" and then we would not be able to e.g. drop labels and annotations. - // Note: We are cleaning up managed fields for all MachineSets, so we're able to remove this code in a few - // Cluster API releases. If we do this only for selected MachineSets, we would have to keep this code forever. - for idx := range s.machineSets { - machineSet := s.machineSets[idx] - if err := ssa.CleanUpManagedFieldsForSSAAdoption(ctx, r.Client, machineSet, machineDeploymentManagerName); err != nil { - return errors.Wrapf(err, "failed to clean up managedFields of MachineSet %s", klog.KObj(machineSet)) - } - } - templateExists := s.infrastructureTemplateExists && (!md.Spec.Template.Spec.Bootstrap.ConfigRef.IsDefined() || s.bootstrapTemplateExists) if ptr.Deref(md.Spec.Paused, false) { diff --git a/internal/controllers/machinedeployment/machinedeployment_controller_test.go b/internal/controllers/machinedeployment/machinedeployment_controller_test.go index 8b2b26e09a09..7631075c898a 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller_test.go @@ -33,7 +33,6 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/external" - "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/patch" @@ -500,196 +499,6 @@ func TestMachineDeploymentReconciler(t *testing.T) { }) } -func TestMachineDeploymentReconciler_CleanUpManagedFieldsForSSAAdoption(t *testing.T) { - setup := func(t *testing.T, g *WithT) (*corev1.Namespace, *clusterv1.Cluster) { - t.Helper() - - t.Log("Creating the namespace") - ns, err := env.CreateNamespace(ctx, machineDeploymentNamespace) - g.Expect(err).ToNot(HaveOccurred()) - - t.Log("Creating the Cluster") - cluster := &clusterv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns.Name, - Name: "test-cluster", - }, - Spec: clusterv1.ClusterSpec{ - ControlPlaneRef: clusterv1.ContractVersionedObjectReference{ - APIGroup: builder.ControlPlaneGroupVersion.Group, - Kind: builder.GenericControlPlaneKind, - Name: "cp1", - }, - }, - } - g.Expect(env.Create(ctx, cluster)).To(Succeed()) - - t.Log("Creating the Cluster Kubeconfig Secret") - g.Expect(env.CreateKubeconfigSecret(ctx, cluster)).To(Succeed()) - - return ns, cluster - } - - teardown := func(t *testing.T, g *WithT, ns *corev1.Namespace, cluster *clusterv1.Cluster) { - t.Helper() - - t.Log("Deleting the Cluster") - g.Expect(env.Delete(ctx, cluster)).To(Succeed()) - t.Log("Deleting the namespace") - g.Expect(env.Delete(ctx, ns)).To(Succeed()) - } - - g := NewWithT(t) - namespace, testCluster := setup(t, g) - defer teardown(t, g, namespace, testCluster) - - labels := map[string]string{ - "foo": "bar", - clusterv1.ClusterNameLabel: testCluster.Name, - } - version := "v1.10.3" - deployment := &clusterv1.MachineDeployment{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "md-", - Namespace: namespace.Name, - Labels: map[string]string{ - clusterv1.ClusterNameLabel: testCluster.Name, - }, - }, - Spec: clusterv1.MachineDeploymentSpec{ - Paused: ptr.To(true), // Set this to true as we do not want to test the other parts of the reconciler in this test. - ClusterName: testCluster.Name, - Replicas: ptr.To[int32](2), - Selector: metav1.LabelSelector{ - // We're using the same labels for spec.selector and spec.template.labels. - MatchLabels: labels, - }, - Rollout: clusterv1.MachineDeploymentRolloutSpec{ - Strategy: clusterv1.MachineDeploymentRolloutStrategy{ - Type: clusterv1.RollingUpdateMachineDeploymentStrategyType, - RollingUpdate: clusterv1.MachineDeploymentRolloutStrategyRollingUpdate{ - MaxUnavailable: intOrStrPtr(0), - MaxSurge: intOrStrPtr(1), - }, - }, - }, - Deletion: clusterv1.MachineDeploymentDeletionSpec{ - Order: clusterv1.OldestMachineSetDeletionOrder, - }, - Template: clusterv1.MachineTemplateSpec{ - ObjectMeta: clusterv1.ObjectMeta{ - Labels: labels, - }, - Spec: clusterv1.MachineSpec{ - ClusterName: testCluster.Name, - Version: version, - InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - APIGroup: clusterv1.GroupVersionInfrastructure.Group, - Kind: "GenericInfrastructureMachineTemplate", - Name: "md-template", - }, - Bootstrap: clusterv1.Bootstrap{ - DataSecretName: ptr.To("data-secret-name"), - }, - }, - }, - }, - } - msListOpts := []client.ListOption{ - client.InNamespace(namespace.Name), - client.MatchingLabels(labels), - } - - // Create infrastructure template resource. - infraResource := map[string]interface{}{ - "kind": "GenericInfrastructureMachine", - "apiVersion": clusterv1.GroupVersionInfrastructure.String(), - "metadata": map[string]interface{}{}, - "spec": map[string]interface{}{ - "size": "3xlarge", - }, - } - infraTmpl := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "GenericInfrastructureMachineTemplate", - "apiVersion": clusterv1.GroupVersionInfrastructure.String(), - "metadata": map[string]interface{}{ - "name": "md-template", - "namespace": namespace.Name, - }, - "spec": map[string]interface{}{ - "template": infraResource, - }, - }, - } - t.Log("Creating the infrastructure template") - g.Expect(env.Create(ctx, infraTmpl)).To(Succeed()) - - // Create the MachineDeployment object and expect Reconcile to be called. - t.Log("Creating the MachineDeployment") - g.Expect(env.Create(ctx, deployment)).To(Succeed()) - - // Create a MachineSet for the MachineDeployment. - classicManagerMS := &clusterv1.MachineSet{ - TypeMeta: metav1.TypeMeta{ - Kind: "MachineSet", - APIVersion: clusterv1.GroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: deployment.Name + "-" + "classic-ms", - Namespace: testCluster.Namespace, - Labels: labels, - }, - Spec: clusterv1.MachineSetSpec{ - ClusterName: testCluster.Name, - Replicas: ptr.To[int32](0), - Selector: metav1.LabelSelector{ - MatchLabels: labels, - }, - Template: clusterv1.MachineTemplateSpec{ - ObjectMeta: clusterv1.ObjectMeta{ - Labels: labels, - }, - Spec: clusterv1.MachineSpec{ - ClusterName: testCluster.Name, - InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - APIGroup: clusterv1.GroupVersionInfrastructure.Group, - Kind: "GenericInfrastructureMachineTemplate", - Name: "md-template", - }, - Bootstrap: clusterv1.Bootstrap{ - DataSecretName: ptr.To("data-secret-name"), - }, - Version: version, - }, - }, - }, - } - ssaManagerMS := classicManagerMS.DeepCopy() - ssaManagerMS.Name = deployment.Name + "-" + "ssa-ms" - - // Create one using the "old manager". - g.Expect(env.Create(ctx, classicManagerMS, client.FieldOwner("manager"))).To(Succeed()) - - // Create one using SSA. - g.Expect(env.Patch(ctx, ssaManagerMS, client.Apply, client.FieldOwner(machineDeploymentManagerName), client.ForceOwnership)).To(Succeed()) - - // Verify that for both the MachineSets the ManagedFields are updated. - g.Eventually(func(g Gomega) { - machineSets := &clusterv1.MachineSetList{} - g.Expect(env.List(ctx, machineSets, msListOpts...)).To(Succeed()) - - g.Expect(machineSets.Items).To(HaveLen(2)) - for _, ms := range machineSets.Items { - // Verify the ManagedFields are updated. - g.Expect(ms.GetManagedFields()).Should( - ContainElement(ssa.MatchManagedFieldsEntry(machineDeploymentManagerName, metav1.ManagedFieldsOperationApply))) - g.Expect(ms.GetManagedFields()).ShouldNot( - ContainElement(ssa.MatchManagedFieldsEntry("manager", metav1.ManagedFieldsOperationUpdate))) - } - }).Should(Succeed()) -} - func TestMachineSetToDeployments(t *testing.T) { g := NewWithT(t) diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index c39bf6537a2c..b8415a610814 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -544,14 +544,6 @@ func (r *Reconciler) syncMachines(ctx context.Context, s *scope) (ctrl.Result, e } } - // Cleanup managed fields of all Machines. - // We do this so that Machines that were created/patched before the controller adopted Server-Side-Apply (SSA) - // (< v1.4.0) can also work with SSA. Otherwise, fields would be co-owned by our "old" "manager" and - // "capi-machineset" and then we would not be able to e.g. drop labels and annotations. - if err := ssa.CleanUpManagedFieldsForSSAAdoption(ctx, r.Client, m, machineSetManagerName); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to update machine: failed to adjust the managedFields of the Machine %q", m.Name) - } - // Update Machine to propagate in-place mutable fields from the MachineSet. updatedMachine, err := r.computeDesiredMachine(machineSet, m) if err != nil { diff --git a/internal/controllers/machineset/machineset_controller_test.go b/internal/controllers/machineset/machineset_controller_test.go index 5990445b666e..89e6f9554b66 100644 --- a/internal/controllers/machineset/machineset_controller_test.go +++ b/internal/controllers/machineset/machineset_controller_test.go @@ -1244,7 +1244,6 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { Kind: "Machine", }, ObjectMeta: metav1.ObjectMeta{ - UID: "abc-123-uid", Name: "in-place-mutating-machine", Namespace: namespace.Name, Labels: map[string]string{ @@ -1274,7 +1273,7 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { }, }, } - g.Expect(env.Create(ctx, inPlaceMutatingMachine, client.FieldOwner(classicManager))).To(Succeed()) + g.Expect(env.PatchAndWait(ctx, inPlaceMutatingMachine, client.FieldOwner(machineSetManagerName))).To(Succeed()) deletingMachine := &clusterv1.Machine{ TypeMeta: metav1.TypeMeta{ @@ -1282,7 +1281,6 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { Kind: "Machine", }, ObjectMeta: metav1.ObjectMeta{ - UID: "abc-123-uid", Name: "deleting-machine", Namespace: namespace.Name, Labels: map[string]string{}, @@ -1301,7 +1299,7 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { }, }, } - g.Expect(env.Create(ctx, deletingMachine, client.FieldOwner(classicManager))).To(Succeed()) + g.Expect(env.PatchAndWait(ctx, deletingMachine, client.FieldOwner(machineSetManagerName))).To(Succeed()) // Delete the machine to put it in the deleting state g.Expect(env.Delete(ctx, deletingMachine)).To(Succeed()) // Wait till the machine is marked for deletion @@ -1476,13 +1474,13 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedDeletingMachine), updatedDeletingMachine)).To(Succeed()) // Verify ManagedFields - g.Expect(updatedDeletingMachine.ManagedFields).ShouldNot( + g.Expect(updatedDeletingMachine.ManagedFields).Should( ContainElement(ssa.MatchManagedFieldsEntry(machineSetManagerName, metav1.ManagedFieldsOperationApply)), - "deleting machine should not contain an entry for SSA manager", + "deleting machine should contain an entry for SSA manager", ) - g.Expect(updatedDeletingMachine.ManagedFields).Should( - ContainElement(ssa.MatchManagedFieldsEntry("manager", metav1.ManagedFieldsOperationUpdate)), - "in-place mutable machine should still contain an entry for old manager", + g.Expect(updatedDeletingMachine.ManagedFields).ShouldNot( + ContainElement(ssa.MatchManagedFieldsEntry(classicManager, metav1.ManagedFieldsOperationUpdate)), + "deleting machine should not contain an entry for old manager", ) // Verify in-place mutable fields are still the same. diff --git a/internal/util/ssa/managedfields.go b/internal/util/ssa/managedfields.go index 7db03983a006..d3725e53894b 100644 --- a/internal/util/ssa/managedfields.go +++ b/internal/util/ssa/managedfields.go @@ -23,9 +23,7 @@ import ( "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/cluster-api/internal/contract" ) @@ -95,79 +93,6 @@ func DropManagedFields(ctx context.Context, c client.Client, obj client.Object, return c.Patch(ctx, obj, client.MergeFrom(base)) } -// CleanUpManagedFieldsForSSAAdoption deletes the managedFields entries on the object that belong to "manager" (Operation=Update) -// if there is no field yet that is managed by `ssaManager`. -// It adds an "empty" entry in managedFields of the object if no field is currently managed by `ssaManager`. -// -// In previous versions of Cluster API (< v1.4.0) we were writing objects with Create and Patch which resulted in fields -// being owned by the "manager". After switching to Server-Side-Apply (SSA), fields will be owned by `ssaManager`. -// -// If we want to be able to drop fields that were previously owned by the "manager" we have to ensure that -// fields are not co-owned by "manager" and `ssaManager`. Otherwise, when we drop the fields with SSA -// (i.e. `ssaManager`) the fields would remain as they are still owned by "manager". -// The following code will do a one-time update on the managed fields to drop all entries for "manager". -// We won't do this on subsequent reconciles. This case will be identified by checking if `ssaManager` owns any fields. -// Dropping all existing "manager" entries (which could also be from other controllers) is safe, as we assume that if -// other controllers are still writing fields on the object they will just do it again and thus gain ownership again. -func CleanUpManagedFieldsForSSAAdoption(ctx context.Context, c client.Client, obj client.Object, ssaManager string) error { - // Return if `ssaManager` already owns any fields. - if hasFieldsManagedBy(obj, ssaManager) { - return nil - } - - // Since there is no field managed by `ssaManager` it means that - // this is the first time this object is being processed after the controller calling this function - // started to use SSA patches. - // It is required to clean-up managedFields from entries created by the regular patches. - // This will ensure that `ssaManager` will be able to modify the fields that - // were originally owned by "manager". - base := obj.DeepCopyObject().(client.Object) - - // Remove managedFieldEntry for manager=manager and operation=update to prevent having two managers holding values. - originalManagedFields := obj.GetManagedFields() - managedFields := make([]metav1.ManagedFieldsEntry, 0, len(originalManagedFields)) - for i := range originalManagedFields { - if originalManagedFields[i].Manager == classicManager && - originalManagedFields[i].Operation == metav1.ManagedFieldsOperationUpdate { - continue - } - managedFields = append(managedFields, originalManagedFields[i]) - } - - // Add a seeding managedFieldEntry for SSA executed by the management controller, to prevent SSA to create/infer - // a default managedFieldEntry when the first SSA is applied. - // More specifically, if an existing object doesn't have managedFields when applying the first SSA the API server - // creates an entry with operation=Update (kind of guessing where the object comes from), but this entry ends up - // acting as a co-ownership and we want to prevent this. - // NOTE: fieldV1Map cannot be empty, so we add metadata.name which will be cleaned up at the first SSA patch of the same fieldManager. - fieldV1Map := map[string]interface{}{ - "f:metadata": map[string]interface{}{ - "f:name": map[string]interface{}{}, - }, - } - fieldV1, err := json.Marshal(fieldV1Map) - if err != nil { - return errors.Wrap(err, "failed to create seeding fieldV1Map for cleaning up legacy managed fields") - } - now := metav1.Now() - gvk, err := apiutil.GVKForObject(obj, c.Scheme()) - if err != nil { - return errors.Wrapf(err, "failed to get GroupVersionKind of object %s", klog.KObj(obj)) - } - managedFields = append(managedFields, metav1.ManagedFieldsEntry{ - Manager: ssaManager, - Operation: metav1.ManagedFieldsOperationApply, - APIVersion: gvk.GroupVersion().String(), - Time: &now, - FieldsType: "FieldsV1", - FieldsV1: &metav1.FieldsV1{Raw: fieldV1}, - }) - - obj.SetManagedFields(managedFields) - - return c.Patch(ctx, obj, client.MergeFrom(base)) -} - // hasFieldsManagedBy returns true if any of the fields in obj are managed by manager. func hasFieldsManagedBy(obj client.Object, manager string) bool { managedFields := obj.GetManagedFields() diff --git a/internal/util/ssa/managedfields_test.go b/internal/util/ssa/managedfields_test.go index cb90f9e0fa57..24ea8eabad62 100644 --- a/internal/util/ssa/managedfields_test.go +++ b/internal/util/ssa/managedfields_test.go @@ -267,247 +267,3 @@ func TestDropManagedFieldsWithFakeClient(t *testing.T) { }) } } - -func TestCleanUpManagedFieldsForSSAAdoption(t *testing.T) { - ctx := context.Background() - - ssaManager := "ssa-manager" - - type managedFields struct { - Manager string - Operation metav1.ManagedFieldsOperationType - } - tests := []struct { - name string - createManager string - updateManager string - wantManagedFields []managedFields - }{ - { - name: "should add an entry for ssaManager if it does not have one", - createManager: "unknown-manager", - wantManagedFields: []managedFields{ - { - Manager: "unknown-manager", - Operation: metav1.ManagedFieldsOperationUpdate, - }, - { - Manager: ssaManager, - Operation: metav1.ManagedFieldsOperationApply, - }, - }, - }, - { - name: "should add an entry for ssaManager and drop entry for classic manager if it exists", - createManager: classicManager, - wantManagedFields: []managedFields{ - { - Manager: ssaManager, - Operation: metav1.ManagedFieldsOperationApply, - }, - }, - }, - { - name: "should keep the entry for ssa-manager if it already has one (no-op)", - createManager: ssaManager, - wantManagedFields: []managedFields{ - { - Manager: ssaManager, - Operation: metav1.ManagedFieldsOperationApply, - }, - }, - }, - { - name: "should keep the entry with ssa-manager if it already has one - should not drop other manager entries (no-op)", - createManager: classicManager, - updateManager: ssaManager, - wantManagedFields: []managedFields{ - { - Manager: classicManager, - Operation: metav1.ManagedFieldsOperationUpdate, - }, - { - Manager: ssaManager, - Operation: metav1.ManagedFieldsOperationApply, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - createCM := &corev1.ConfigMap{ - // Have to set TypeMeta explicitly when using SSA with typed objects. - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ConfigMap", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "cm-1", - Namespace: "default", - }, - Data: map[string]string{ - "test-key": "test-value", - }, - } - updateCM := createCM.DeepCopy() - updateCM.Data["test-key-update"] = "test-value-update" - - if tt.createManager == ssaManager { - // If createManager is ssaManager, use SSA - g.Expect(env.Client.Patch(ctx, createCM, client.Apply, client.FieldOwner(tt.createManager))).To(Succeed()) - } else { - // Otherwise use regular Create - g.Expect(env.Client.Create(ctx, createCM, client.FieldOwner(tt.createManager))).To(Succeed()) - } - t.Cleanup(func() { - g.Expect(env.CleanupAndWait(ctx, createCM)).To(Succeed()) - }) - - if tt.updateManager != "" { - // If updateManager is set, update the object with SSA - g.Expect(env.Client.Patch(ctx, updateCM, client.Apply, client.FieldOwner(tt.updateManager))).To(Succeed()) - } - - // Validate object has exactly the field manager we expect. - gotObj := createCM.DeepCopyObject().(client.Object) - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(createCM), gotObj)).To(Succeed()) - expectedManager := []string{tt.createManager} - if tt.updateManager != "" { - expectedManager = append(expectedManager, tt.updateManager) - } - var gotManager []string - for _, managedFields := range gotObj.GetManagedFields() { - gotManager = append(gotManager, managedFields.Manager) - } - g.Expect(gotManager).To(ConsistOf(expectedManager)) - - // Cleanup managed fields. - g.Expect(CleanUpManagedFieldsForSSAAdoption(ctx, env.Client, gotObj, ssaManager)).Should(Succeed()) - - // Validate object has been cleaned up correctly - gotObj = createCM.DeepCopyObject().(client.Object) - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(createCM), gotObj)).To(Succeed()) - g.Expect(gotObj.GetManagedFields()).To(HaveLen(len(tt.wantManagedFields))) - for _, mf := range tt.wantManagedFields { - g.Expect(gotObj.GetManagedFields()).Should( - ContainElement(MatchManagedFieldsEntry(mf.Manager, mf.Operation))) - } - }) - } -} - -func TestCleanUpManagedFieldsForSSAAdoptionWithFakeClient(t *testing.T) { - ctx := context.Background() - - ssaManager := "ssa-manager" - - objWithoutAnyManager := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cm-1", - Namespace: "default", - }, - Data: map[string]string{ - "test-key": "test-value", - }, - } - objWithOnlyClassicManager := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cm-1", - Namespace: "default", - ManagedFields: []metav1.ManagedFieldsEntry{{ - Manager: classicManager, - Operation: metav1.ManagedFieldsOperationUpdate, - FieldsType: "FieldsV1", - APIVersion: "v1", - }}, - }, - Data: map[string]string{ - "test-key": "test-value", - }, - } - objWithOnlySSAManager := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cm-1", - Namespace: "default", - ManagedFields: []metav1.ManagedFieldsEntry{{ - Manager: ssaManager, - Operation: metav1.ManagedFieldsOperationApply, - FieldsType: "FieldsV1", - APIVersion: "v1", - }}, - }, - Data: map[string]string{ - "test-key": "test-value", - }, - } - objWithClassicManagerAndSSAManager := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cm-1", - Namespace: "default", - ManagedFields: []metav1.ManagedFieldsEntry{ - { - Manager: classicManager, - Operation: metav1.ManagedFieldsOperationUpdate, - FieldsType: "FieldsV1", - APIVersion: "v1", - }, - { - Manager: ssaManager, - Operation: metav1.ManagedFieldsOperationApply, - FieldsType: "FieldsV1", - APIVersion: "v1", - }, - }, - }, - Data: map[string]string{ - "test-key": "test-value", - }, - } - - tests := []struct { - name string - obj client.Object - wantEntryForClassicManager bool - }{ - { - name: "should add an entry for ssaManager if it does not have one", - obj: objWithoutAnyManager, - wantEntryForClassicManager: false, - }, - { - name: "should add an entry for ssaManager and drop entry for classic manager if it exists", - obj: objWithOnlyClassicManager, - wantEntryForClassicManager: false, - }, - { - name: "should keep the entry for ssa-manager if it already has one (no-op)", - obj: objWithOnlySSAManager, - wantEntryForClassicManager: false, - }, - { - name: "should keep the entry with ssa-manager if it already has one - should not drop other manager entries (no-op)", - obj: objWithClassicManagerAndSSAManager, - wantEntryForClassicManager: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - fakeClient := fake.NewClientBuilder().WithObjects(tt.obj).WithReturnManagedFields().Build() - g.Expect(CleanUpManagedFieldsForSSAAdoption(ctx, fakeClient, tt.obj, ssaManager)).Should(Succeed()) - g.Expect(tt.obj.GetManagedFields()).Should( - ContainElement(MatchManagedFieldsEntry(ssaManager, metav1.ManagedFieldsOperationApply))) - if tt.wantEntryForClassicManager { - g.Expect(tt.obj.GetManagedFields()).Should( - ContainElement(MatchManagedFieldsEntry(classicManager, metav1.ManagedFieldsOperationUpdate))) - } else { - g.Expect(tt.obj.GetManagedFields()).ShouldNot( - ContainElement(MatchManagedFieldsEntry(classicManager, metav1.ManagedFieldsOperationUpdate))) - } - }) - } -}