From a174d0e8dc28e84dfb0a002c1fced81267a231b3 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Thu, 10 Apr 2025 17:41:23 +0200 Subject: [PATCH 1/5] Move v1beta1 conditions utils to deprecated --- .golangci.yml | 4 +- .../controllers/kubeadmconfig_controller.go | 54 ++-- .../kubeadmconfig_controller_test.go | 37 +-- cmd/clusterctl/client/cluster/mover.go | 5 +- cmd/clusterctl/client/cluster/mover_test.go | 12 +- cmd/clusterctl/client/tree/tree.go | 4 +- cmd/clusterctl/client/tree/tree_test.go | 40 +-- cmd/clusterctl/client/tree/util.go | 20 +- .../remote/cluster_cache_healthcheck_test.go | 4 +- controllers/remote/cluster_cache_tracker.go | 5 +- .../remote/cluster_cache_tracker_test.go | 4 +- .../kubeadm/internal/control_plane.go | 7 +- .../kubeadm/internal/control_plane_test.go | 18 +- .../internal/controllers/controller.go | 39 +-- .../internal/controllers/controller_test.go | 38 +-- .../kubeadm/internal/controllers/helpers.go | 8 +- .../internal/controllers/helpers_test.go | 4 +- .../internal/controllers/remediation.go | 38 +-- .../internal/controllers/remediation_test.go | 82 +++--- .../kubeadm/internal/controllers/scale.go | 7 +- .../internal/controllers/scale_test.go | 42 +-- .../kubeadm/internal/controllers/status.go | 14 +- .../internal/controllers/status_test.go | 8 +- .../internal/workload_cluster_conditions.go | 79 +++--- .../workload_cluster_conditions_test.go | 152 +++++----- .../controllers/machinepool_controller.go | 9 +- .../machinepool_controller_noderef.go | 8 +- .../machinepool_controller_phases.go | 18 +- .../machinepool_controller_test.go | 48 ++-- .../controllers/extensionconfig_controller.go | 6 +- internal/apis/core/v1alpha3/conversion.go | 6 +- .../controllers/cluster/cluster_controller.go | 19 +- .../cluster/cluster_controller_phases.go | 26 +- .../cluster/cluster_controller_phases_test.go | 8 +- .../cluster/cluster_controller_test.go | 10 +- .../clusterclass_controller_status.go | 16 +- .../clusterresourceset_controller.go | 14 +- .../clusterresourceset_controller_test.go | 4 +- .../controllers/machine/machine_controller.go | 44 +-- .../machine/machine_controller_noderef.go | 18 +- .../machine/machine_controller_phases.go | 16 +- .../machine/machine_controller_status.go | 5 +- .../machine/machine_controller_status_test.go | 6 +- .../machine/machine_controller_test.go | 72 ++--- .../machinedeployment_controller.go | 6 +- .../machinedeployment_controller_test.go | 4 +- .../machinedeployment_sync.go | 12 +- .../machinedeployment_sync_test.go | 12 +- .../machinehealthcheck_controller.go | 20 +- .../machinehealthcheck_controller_test.go | 34 +-- .../machinehealthcheck_status_matcher_test.go | 4 +- .../machinehealthcheck_targets.go | 33 +-- .../machinehealthcheck_targets_test.go | 8 +- .../machineset/machineset_controller.go | 37 +-- .../machineset/machineset_controller_test.go | 60 ++-- .../machineset/machineset_delete_policy.go | 7 +- .../topology/cluster/cluster_controller.go | 7 +- .../cluster/cluster_controller_test.go | 18 +- .../topology/cluster/conditions.go | 30 +- .../topology/cluster/conditions_test.go | 4 +- internal/util/tree/tree_test.go | 36 +-- internal/webhooks/cluster.go | 7 +- internal/webhooks/cluster_test.go | 24 +- test/e2e/cluster_upgrade_runtimesdk.go | 19 +- test/e2e/cluster_upgrade_test.go | 4 +- test/e2e/node_drain.go | 20 +- test/framework/machine_helpers.go | 4 +- .../dockermachinepool_controller.go | 22 +- .../dockermachinepool_controller_phases.go | 8 +- .../backends/docker/dockercluster_backend.go | 23 +- .../backends/docker/dockermachine_backend.go | 46 +-- .../inmemory/inmemorymachine_backend.go | 63 +++-- .../inmemorymachine_controller_test.go | 30 +- .../controllers/dockercluster_controller.go | 8 +- .../controllers/dockermachine_controller.go | 8 +- util/collections/machine_collection.go | 6 +- util/collections/machine_filters.go | 14 +- util/collections/machine_filters_test.go | 52 ++-- .../{ => deprecated/v1beta1}/doc.go | 4 +- .../{ => deprecated/v1beta1}/getter.go | 2 +- .../{ => deprecated/v1beta1}/getter_test.go | 2 +- .../{ => deprecated/v1beta1}/matcher.go | 2 +- .../{ => deprecated/v1beta1}/matcher_test.go | 2 +- .../{ => deprecated/v1beta1}/matchers.go | 2 +- .../{ => deprecated/v1beta1}/merge.go | 2 +- .../v1beta1}/merge_strategies.go | 2 +- .../v1beta1}/merge_strategies_test.go | 2 +- .../{ => deprecated/v1beta1}/merge_test.go | 2 +- .../{ => deprecated/v1beta1}/patch.go | 2 +- .../{ => deprecated/v1beta1}/patch_test.go | 2 +- .../{ => deprecated/v1beta1}/setter.go | 2 +- .../{ => deprecated/v1beta1}/setter_test.go | 2 +- .../{ => deprecated/v1beta1}/suite_test.go | 2 +- .../{ => deprecated/v1beta1}/unstructured.go | 2 +- .../v1beta1}/unstructured_test.go | 2 +- util/patch/patch.go | 14 +- util/patch/patch_test.go | 266 +++++++++--------- util/predicates/cluster_predicates.go | 7 +- util/predicates/cluster_predicates_test.go | 6 +- 99 files changed, 1076 insertions(+), 1021 deletions(-) rename util/conditions/{ => deprecated/v1beta1}/doc.go (88%) rename util/conditions/{ => deprecated/v1beta1}/getter.go (99%) rename util/conditions/{ => deprecated/v1beta1}/getter_test.go (99%) rename util/conditions/{ => deprecated/v1beta1}/matcher.go (99%) rename util/conditions/{ => deprecated/v1beta1}/matcher_test.go (99%) rename util/conditions/{ => deprecated/v1beta1}/matchers.go (98%) rename util/conditions/{ => deprecated/v1beta1}/merge.go (99%) rename util/conditions/{ => deprecated/v1beta1}/merge_strategies.go (99%) rename util/conditions/{ => deprecated/v1beta1}/merge_strategies_test.go (99%) rename util/conditions/{ => deprecated/v1beta1}/merge_test.go (99%) rename util/conditions/{ => deprecated/v1beta1}/patch.go (99%) rename util/conditions/{ => deprecated/v1beta1}/patch_test.go (99%) rename util/conditions/{ => deprecated/v1beta1}/setter.go (99%) rename util/conditions/{ => deprecated/v1beta1}/setter_test.go (99%) rename util/conditions/{ => deprecated/v1beta1}/suite_test.go (97%) rename util/conditions/{ => deprecated/v1beta1}/unstructured.go (99%) rename util/conditions/{ => deprecated/v1beta1}/unstructured_test.go (99%) diff --git a/.golangci.yml b/.golangci.yml index ba99dc864290..18d31989dab0 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -183,6 +183,8 @@ linters-settings: - pkg: sigs.k8s.io/cluster-api/internal/webhooks/runtime alias: runtimewebhooks # CAPI utils + - pkg: sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1 + alias: v1beta1conditions - pkg: sigs.k8s.io/cluster-api/util/conditions/v1beta2 alias: v1beta2conditions - pkg: sigs.k8s.io/cluster-api/internal/topology/names @@ -386,4 +388,4 @@ issues: # Ignore non-constant format string in call to condition utils - linters: - govet - text: "non-constant format string in call to sigs\\.k8s\\.io\\/cluster-api\\/util\\/conditions\\." + text: "non-constant format string in call to sigs\\.k8s\\.io\\/cluster-api\\/util\\/conditions\\/deprecated\\/v1beta1\\." diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go index ef78a06f474c..f3277cd53f85 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go @@ -55,7 +55,7 @@ import ( "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/util/taints" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" clog "sigs.k8s.io/cluster-api/util/log" "sigs.k8s.io/cluster-api/util/patch" @@ -227,8 +227,8 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques // Attempt to Patch the KubeadmConfig object and status after each reconciliation if no error occurs. defer func() { // always update the readyCondition; the summary is represented using the "1 of x completed" notation. - conditions.SetSummary(config, - conditions.WithConditions( + v1beta1conditions.SetSummary(config, + v1beta1conditions.WithConditions( bootstrapv1.DataSecretAvailableCondition, bootstrapv1.CertificatesAvailableCondition, ), @@ -294,7 +294,7 @@ func (r *KubeadmConfigReconciler) reconcile(ctx context.Context, scope *Scope, c // Wait for the infrastructure to be ready. case !cluster.Status.InfrastructureReady: log.Info("Cluster infrastructure is not ready, waiting") - conditions.MarkFalse(config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, @@ -307,7 +307,7 @@ func (r *KubeadmConfigReconciler) reconcile(ctx context.Context, scope *Scope, c case configOwner.DataSecretName() != nil && (!config.Status.Ready || config.Status.DataSecretName == nil): config.Status.Ready = true config.Status.DataSecretName = configOwner.DataSecretName() - conditions.MarkTrue(config, bootstrapv1.DataSecretAvailableCondition) + v1beta1conditions.MarkTrue(config, bootstrapv1.DataSecretAvailableCondition) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionTrue, @@ -324,7 +324,7 @@ func (r *KubeadmConfigReconciler) reconcile(ctx context.Context, scope *Scope, c case config.Status.Ready: // Based on existing code paths status.Ready is only true if status.dataSecretName is set // So we can assume that the DataSecret is available. - conditions.MarkTrue(config, bootstrapv1.DataSecretAvailableCondition) + v1beta1conditions.MarkTrue(config, bootstrapv1.DataSecretAvailableCondition) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionTrue, @@ -355,7 +355,8 @@ func (r *KubeadmConfigReconciler) reconcile(ctx context.Context, scope *Scope, c } // Note: can't use IsFalse here because we need to handle the absence of the condition as well as false. - if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if !v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { return r.handleClusterNotInitialized(ctx, scope) } @@ -479,8 +480,9 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex // initialize the DataSecretAvailableCondition if missing. // this is required in order to avoid the condition's LastTransitionTime to flicker in case of errors surfacing // using the DataSecretGeneratedFailedReason - if conditions.GetReason(scope.Config, bootstrapv1.DataSecretAvailableCondition) != bootstrapv1.DataSecretGenerationFailedReason { - conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") + // TODO (v1beta2): test for v1beta2 conditions + if v1beta1conditions.GetReason(scope.Config, bootstrapv1.DataSecretAvailableCondition) != bootstrapv1.DataSecretGenerationFailedReason { + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, @@ -589,7 +591,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex util.ObjectKey(scope.Cluster)) } if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, @@ -599,7 +601,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex return ctrl.Result{}, err } - conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) + v1beta1conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionTrue, @@ -613,7 +615,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex files, err := r.resolveFiles(ctx, scope.Config) if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, @@ -625,7 +627,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex users, err := r.resolveUsers(ctx, scope.Config) if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, @@ -688,7 +690,7 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) util.ObjectKey(scope.Cluster), ) if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, @@ -698,7 +700,7 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) return ctrl.Result{}, err } if err := certificates.EnsureAllExist(); err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, @@ -707,7 +709,7 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) }) return ctrl.Result{}, err } - conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) + v1beta1conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionTrue, @@ -755,7 +757,7 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) files, err := r.resolveFiles(ctx, scope.Config) if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, @@ -767,7 +769,7 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) users, err := r.resolveUsers(ctx, scope.Config) if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, @@ -780,7 +782,7 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) if discoveryFile := scope.Config.Spec.JoinConfiguration.Discovery.File; discoveryFile != nil && discoveryFile.KubeConfig != nil { kubeconfig, err := r.resolveDiscoveryKubeConfig(discoveryFile) if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, @@ -853,7 +855,7 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S util.ObjectKey(scope.Cluster), ) if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, @@ -863,7 +865,7 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S return ctrl.Result{}, err } if err := certificates.EnsureAllExist(); err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, @@ -873,7 +875,7 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S return ctrl.Result{}, err } - conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) + v1beta1conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionTrue, @@ -908,7 +910,7 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S files, err := r.resolveFiles(ctx, scope.Config) if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, @@ -920,7 +922,7 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S users, err := r.resolveUsers(ctx, scope.Config) if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, @@ -933,7 +935,7 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S if discoveryFile := scope.Config.Spec.JoinConfiguration.Discovery.File; discoveryFile != nil && discoveryFile.KubeConfig != nil { kubeconfig, err := r.resolveDiscoveryKubeConfig(discoveryFile) if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, @@ -1415,7 +1417,7 @@ func (r *KubeadmConfigReconciler) storeBootstrapData(ctx context.Context, scope } scope.Config.Status.DataSecretName = ptr.To(secret.Name) scope.Config.Status.Ready = true - conditions.MarkTrue(scope.Config, bootstrapv1.DataSecretAvailableCondition) + v1beta1conditions.MarkTrue(scope.Config, bootstrapv1.DataSecretAvailableCondition) v1beta2conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionTrue, diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go index 30b094829037..bd0cdb0d9255 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go @@ -45,7 +45,7 @@ import ( "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/secret" @@ -489,7 +489,7 @@ func TestKubeadmConfigReconciler_Reconcile_GenerateCloudConfigData(t *testing.T) cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "validhost", Port: 6443} cluster.Status.InfrastructureReady = true - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") controlPlaneInitConfig := newControlPlaneInitKubeadmConfig(controlPlaneInitMachine.Namespace, configName) @@ -552,7 +552,7 @@ func TestKubeadmConfigReconciler_Reconcile_ErrorIfJoiningControlPlaneHasInvalidC // TODO: extract this kind of code into a setup function that puts the state of objects into an initialized controlplane (implies secrets exist) cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Status.InfrastructureReady = true - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") controlPlaneInitConfig := newControlPlaneInitKubeadmConfig(controlPlaneInitMachine.Namespace, "control-plane-init-cfg") @@ -598,7 +598,7 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueIfControlPlaneIsMissingAPIEndp cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Status.InfrastructureReady = true - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") controlPlaneInitConfig := newControlPlaneInitKubeadmConfig(controlPlaneInitMachine.Namespace, "control-plane-init-cfg") addKubeadmConfigToMachine(controlPlaneInitConfig, controlPlaneInitMachine) @@ -637,14 +637,15 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueIfControlPlaneIsMissingAPIEndp g.Expect(myclient.Get(ctx, client.ObjectKey{Namespace: workerJoinConfig.Namespace, Name: workerJoinConfig.Name}, actualConfig)).To(Succeed()) // At this point the DataSecretAvailableCondition should not be set. CertificatesAvailableCondition should be true. - g.Expect(conditions.Get(actualConfig, bootstrapv1.DataSecretAvailableCondition)).To(BeNil()) + // TODO (v1beta2): test for v1beta2 conditions + g.Expect(v1beta1conditions.Get(actualConfig, bootstrapv1.DataSecretAvailableCondition)).To(BeNil()) assertHasTrueCondition(g, myclient, request, bootstrapv1.CertificatesAvailableCondition) } func TestReconcileIfJoinCertificatesAvailableConditioninNodesAndControlPlaneIsReady(t *testing.T) { cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Status.InfrastructureReady = true - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} useCases := []struct { @@ -731,7 +732,7 @@ func TestReconcileIfJoinNodePoolsAndControlPlaneIsReady(t *testing.T) { cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Status.InfrastructureReady = true - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} useCases := []struct { @@ -843,7 +844,7 @@ func TestBootstrapDataFormat(t *testing.T) { cluster.Status.InfrastructureReady = true cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} if tc.clusterInitialized { - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) } var machine *clusterv1.Machine @@ -934,7 +935,7 @@ func TestKubeadmConfigSecretCreatedStatusNotPatched(t *testing.T) { cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Status.InfrastructureReady = true - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") @@ -1006,7 +1007,7 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Status.InfrastructureReady = true - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") @@ -1257,7 +1258,7 @@ func TestBootstrapTokenRotationMachinePool(t *testing.T) { cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Status.InfrastructureReady = true - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") @@ -1449,7 +1450,7 @@ func TestBootstrapTokenRefreshIfTokenSecretCleaned(t *testing.T) { cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Status.InfrastructureReady = true - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") @@ -1522,7 +1523,7 @@ func TestBootstrapTokenRefreshIfTokenSecretCleaned(t *testing.T) { cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Status.InfrastructureReady = true - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") @@ -1915,7 +1916,7 @@ func TestKubeadmConfigReconciler_Reconcile_AlwaysCheckCAVerificationUnlessReques // Setup work for an initialized cluster clusterName := "my-cluster" cluster := builder.Cluster(metav1.NamespaceDefault, clusterName).Build() - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) cluster.Status.InfrastructureReady = true cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ Host: "example.com", @@ -2725,7 +2726,8 @@ func assertHasFalseCondition(g *WithT, myclient client.Client, req ctrl.Request, configKey := client.ObjectKeyFromObject(config) g.Expect(myclient.Get(ctx, configKey, config)).To(Succeed()) - c := conditions.Get(config, t) + // TODO (v1beta2): test for v1beta2 conditions + c := v1beta1conditions.Get(config, t) g.Expect(c).ToNot(BeNil()) g.Expect(c.Status).To(Equal(corev1.ConditionFalse)) g.Expect(c.Severity).To(Equal(s)) @@ -2741,7 +2743,8 @@ func assertHasTrueCondition(g *WithT, myclient client.Client, req ctrl.Request, } configKey := client.ObjectKeyFromObject(config) g.Expect(myclient.Get(ctx, configKey, config)).To(Succeed()) - c := conditions.Get(config, t) + // TODO (v1beta2): test for v1beta2 conditions + c := v1beta1conditions.Get(config, t) g.Expect(c).ToNot(BeNil()) g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) } @@ -2750,7 +2753,7 @@ func TestKubeadmConfigReconciler_Reconcile_v1beta2_conditions(t *testing.T) { // Setup work for an initialized cluster clusterName := "my-cluster" cluster := builder.Cluster(metav1.NamespaceDefault, clusterName).Build() - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) cluster.Status.InfrastructureReady = true cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ Host: "example.com", diff --git a/cmd/clusterctl/client/cluster/mover.go b/cmd/clusterctl/client/cluster/mover.go index 36f53a14fa93..f6a654fbf156 100644 --- a/cmd/clusterctl/client/cluster/mover.go +++ b/cmd/clusterctl/client/cluster/mover.go @@ -40,7 +40,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/yaml" ) @@ -246,7 +246,8 @@ func (o *objectMover) checkProvisioningCompleted(ctx context.Context, graph *obj } // Note: can't use IsFalse here because we need to handle the absence of the condition as well as false. - if !conditions.IsTrue(clusterObj, clusterv1.ControlPlaneInitializedCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if !v1beta1conditions.IsTrue(clusterObj, clusterv1.ControlPlaneInitializedCondition) { errList = append(errList, errors.Errorf("cannot start the move operation while the control plane for %q %s/%s is not yet initialized", clusterObj.GroupVersionKind(), clusterObj.GetNamespace(), clusterObj.GetName())) continue } diff --git a/cmd/clusterctl/client/cluster/mover_test.go b/cmd/clusterctl/client/cluster/mover_test.go index 5bcdf8ee8796..14e96b9ee298 100644 --- a/cmd/clusterctl/client/cluster/mover_test.go +++ b/cmd/clusterctl/client/cluster/mover_test.go @@ -42,7 +42,7 @@ import ( clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test" "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test/providers/infrastructure" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) type moveTestsFields struct { @@ -1486,7 +1486,7 @@ func Test_objectMover_checkProvisioningCompleted(t *testing.T) { Deprecated: &clusterv1.ClusterDeprecatedStatus{ V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ Conditions: clusterv1.Conditions{ - *conditions.TrueCondition(clusterv1.ControlPlaneInitializedCondition), + *v1beta1conditions.TrueCondition(clusterv1.ControlPlaneInitializedCondition), }, }, }, @@ -1535,7 +1535,7 @@ func Test_objectMover_checkProvisioningCompleted(t *testing.T) { Deprecated: &clusterv1.ClusterDeprecatedStatus{ V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ Conditions: clusterv1.Conditions{ - *conditions.FalseCondition(clusterv1.ControlPlaneInitializedCondition, "", clusterv1.ConditionSeverityInfo, ""), + *v1beta1conditions.FalseCondition(clusterv1.ControlPlaneInitializedCondition, "", clusterv1.ConditionSeverityInfo, ""), }, }, }, @@ -1566,7 +1566,7 @@ func Test_objectMover_checkProvisioningCompleted(t *testing.T) { Deprecated: &clusterv1.ClusterDeprecatedStatus{ V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ Conditions: clusterv1.Conditions{ - *conditions.TrueCondition(clusterv1.ControlPlaneInitializedCondition), + *v1beta1conditions.TrueCondition(clusterv1.ControlPlaneInitializedCondition), }, }, }, @@ -1596,7 +1596,7 @@ func Test_objectMover_checkProvisioningCompleted(t *testing.T) { Deprecated: &clusterv1.ClusterDeprecatedStatus{ V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ Conditions: clusterv1.Conditions{ - *conditions.TrueCondition(clusterv1.ControlPlaneInitializedCondition), + *v1beta1conditions.TrueCondition(clusterv1.ControlPlaneInitializedCondition), }, }, }, @@ -1646,7 +1646,7 @@ func Test_objectMover_checkProvisioningCompleted(t *testing.T) { Deprecated: &clusterv1.ClusterDeprecatedStatus{ V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ Conditions: clusterv1.Conditions{ - *conditions.TrueCondition(clusterv1.ControlPlaneInitializedCondition), + *v1beta1conditions.TrueCondition(clusterv1.ControlPlaneInitializedCondition), }, }, }, diff --git a/cmd/clusterctl/client/tree/tree.go b/cmd/clusterctl/client/tree/tree.go index 3c746e2d2371..f0eed2699020 100644 --- a/cmd/clusterctl/client/tree/tree.go +++ b/cmd/clusterctl/client/tree/tree.go @@ -411,7 +411,7 @@ func createGroupNode(sibling client.Object, siblingReady *clusterv1.Condition, o if objReady != nil { objReady.LastTransitionTime = minLastTransitionTime(objReady, siblingReady) objReady.Message = "" - setReadyCondition(groupNode, objReady) + setReadyV1Beta1Condition(groupNode, objReady) } return groupNode } @@ -487,7 +487,7 @@ func updateGroupNode(groupObj client.Object, groupReady *clusterv1.Condition, ob if groupReady != nil { groupReady.LastTransitionTime = minLastTransitionTime(objReady, groupReady) groupReady.Message = "" - setReadyCondition(groupObj, groupReady) + setReadyV1Beta1Condition(groupObj, groupReady) } } diff --git a/cmd/clusterctl/client/tree/tree_test.go b/cmd/clusterctl/client/tree/tree_test.go index 32af74333a38..6c8ad30915eb 100644 --- a/cmd/clusterctl/client/tree/tree_test.go +++ b/cmd/clusterctl/client/tree/tree_test.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) @@ -247,10 +247,10 @@ func Test_hasSameAvailableReadyUptoDateStatusAndReason(t *testing.T) { } func Test_hasSameReadyStatusSeverityAndReason(t *testing.T) { - readyTrue := conditions.TrueCondition(clusterv1.ReadyCondition) - readyFalseReasonInfo := conditions.FalseCondition(clusterv1.ReadyCondition, "Reason", clusterv1.ConditionSeverityInfo, "message falseInfo1") - readyFalseAnotherReasonInfo := conditions.FalseCondition(clusterv1.ReadyCondition, "AnotherReason", clusterv1.ConditionSeverityInfo, "message falseInfo1") - readyFalseReasonWarning := conditions.FalseCondition(clusterv1.ReadyCondition, "Reason", clusterv1.ConditionSeverityWarning, "message falseInfo1") + readyTrue := v1beta1conditions.TrueCondition(clusterv1.ReadyCondition) + readyFalseReasonInfo := v1beta1conditions.FalseCondition(clusterv1.ReadyCondition, "Reason", clusterv1.ConditionSeverityInfo, "message falseInfo1") + readyFalseAnotherReasonInfo := v1beta1conditions.FalseCondition(clusterv1.ReadyCondition, "AnotherReason", clusterv1.ConditionSeverityInfo, "message falseInfo1") + readyFalseReasonWarning := v1beta1conditions.FalseCondition(clusterv1.ReadyCondition, "Reason", clusterv1.ConditionSeverityWarning, "message falseInfo1") type args struct { a *clusterv1.Condition @@ -1083,7 +1083,7 @@ func Test_Add_NoEcho_v1Beta2(t *testing.T) { func Test_Add_NoEcho(t *testing.T) { parent := fakeCluster("parent", - withClusterCondition(conditions.TrueCondition(clusterv1.ReadyCondition)), + withClusterV1Beta1Condition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ) type args struct { @@ -1102,7 +1102,7 @@ func Test_Add_NoEcho(t *testing.T) { treeOptions: ObjectTreeOptions{}, addOptions: nil, obj: fakeMachine("my-machine", - withMachineCondition(conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), }, wantNode: true, @@ -1113,7 +1113,7 @@ func Test_Add_NoEcho(t *testing.T) { treeOptions: ObjectTreeOptions{}, addOptions: []AddObjectOption{NoEcho(true)}, obj: fakeMachine("my-machine", - withMachineCondition(conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), }, wantNode: false, @@ -1124,7 +1124,7 @@ func Test_Add_NoEcho(t *testing.T) { treeOptions: ObjectTreeOptions{}, addOptions: []AddObjectOption{NoEcho(true)}, obj: fakeMachine("my-machine", - withMachineCondition(conditions.FalseCondition(clusterv1.ReadyCondition, "", clusterv1.ConditionSeverityInfo, "")), + withMachineCondition(v1beta1conditions.FalseCondition(clusterv1.ReadyCondition, "", clusterv1.ConditionSeverityInfo, "")), ), }, wantNode: true, @@ -1135,7 +1135,7 @@ func Test_Add_NoEcho(t *testing.T) { treeOptions: ObjectTreeOptions{Echo: true}, addOptions: []AddObjectOption{NoEcho(true)}, obj: fakeMachine("my-machine", - withMachineCondition(conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), }, wantNode: true, @@ -1304,11 +1304,11 @@ func Test_Add_Grouping(t *testing.T) { args: args{ siblings: []client.Object{ fakeMachine("first-machine", - withMachineCondition(conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), }, obj: fakeMachine("second-machine", - withMachineCondition(conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), }, wantNodesPrefix: []string{"zz_True"}, @@ -1320,14 +1320,14 @@ func Test_Add_Grouping(t *testing.T) { args: args{ siblings: []client.Object{ fakeMachine("first-machine", - withMachineCondition(conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), fakeMachine("second-machine", - withMachineCondition(conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), }, obj: fakeMachine("third-machine", - withMachineCondition(conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), }, wantNodesPrefix: []string{"zz_True"}, @@ -1339,10 +1339,10 @@ func Test_Add_Grouping(t *testing.T) { args: args{ siblings: []client.Object{ fakeMachine("first-machine", - withMachineCondition(conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), fakeMachine("second-machine", - withMachineCondition(conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), }, obj: VirtualObject("ns", "NotAMachine", "other-object"), @@ -1414,9 +1414,9 @@ func withClusterAnnotation(name, value string) func(*clusterv1.Cluster) { } } -func withClusterCondition(c *clusterv1.Condition) func(*clusterv1.Cluster) { +func withClusterV1Beta1Condition(c *clusterv1.Condition) func(*clusterv1.Cluster) { return func(m *clusterv1.Cluster) { - conditions.Set(m, c) + v1beta1conditions.Set(m, c) } } @@ -1447,7 +1447,7 @@ func fakeMachine(name string, options ...machineOption) *clusterv1.Machine { func withMachineCondition(c *clusterv1.Condition) func(*clusterv1.Machine) { return func(m *clusterv1.Machine) { - conditions.Set(m, c) + v1beta1conditions.Set(m, c) } } diff --git a/cmd/clusterctl/client/tree/util.go b/cmd/clusterctl/client/tree/util.go index 451c469e6e89..3a9058a61ad0 100644 --- a/cmd/clusterctl/client/tree/util.go +++ b/cmd/clusterctl/client/tree/util.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) @@ -84,7 +84,7 @@ func GetReadyCondition(obj client.Object) *clusterv1.Condition { if getter == nil { return nil } - return conditions.Get(getter, clusterv1.ReadyCondition) + return v1beta1conditions.Get(getter, clusterv1.ReadyCondition) } // GetAllV1Beta2Conditions returns the other conditions (all the conditions except ready) for an object, if defined. @@ -140,16 +140,16 @@ func setUpToDateV1Beta2Condition(obj client.Object, upToDate *metav1.Condition) } } -func setReadyCondition(obj client.Object, ready *clusterv1.Condition) { +func setReadyV1Beta1Condition(obj client.Object, ready *clusterv1.Condition) { setter := objToSetter(obj) if setter == nil { return } - conditions.Set(setter, ready) + v1beta1conditions.Set(setter, ready) } -func objToGetter(obj client.Object) conditions.Getter { - if getter, ok := obj.(conditions.Getter); ok { +func objToGetter(obj client.Object) v1beta1conditions.Getter { + if getter, ok := obj.(v1beta1conditions.Getter); ok { return getter } @@ -157,12 +157,12 @@ func objToGetter(obj client.Object) conditions.Getter { if !ok { return nil } - getter := conditions.UnstructuredGetter(objUnstructured) + getter := v1beta1conditions.UnstructuredGetter(objUnstructured) return getter } -func objToSetter(obj client.Object) conditions.Setter { - if setter, ok := obj.(conditions.Setter); ok { +func objToSetter(obj client.Object) v1beta1conditions.Setter { + if setter, ok := obj.(v1beta1conditions.Setter); ok { return setter } @@ -170,7 +170,7 @@ func objToSetter(obj client.Object) conditions.Setter { if !ok { return nil } - setter := conditions.UnstructuredSetter(objUnstructured) + setter := v1beta1conditions.UnstructuredSetter(objUnstructured) return setter } diff --git a/controllers/remote/cluster_cache_healthcheck_test.go b/controllers/remote/cluster_cache_healthcheck_test.go index 14ed0960f6ec..9feae7611c04 100644 --- a/controllers/remote/cluster_cache_healthcheck_test.go +++ b/controllers/remote/cluster_cache_healthcheck_test.go @@ -39,7 +39,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) func TestClusterCacheHealthCheck(t *testing.T) { @@ -98,7 +98,7 @@ func TestClusterCacheHealthCheck(t *testing.T) { }, } g.Expect(env.CreateAndWait(ctx, testCluster)).To(Succeed()) - conditions.MarkTrue(testCluster, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(testCluster, clusterv1.ControlPlaneInitializedCondition) testCluster.Status.InfrastructureReady = true g.Expect(k8sClient.Status().Update(ctx, testCluster)).To(Succeed()) diff --git a/controllers/remote/cluster_cache_tracker.go b/controllers/remote/cluster_cache_tracker.go index 37aeb6bd1025..094e3c12491e 100644 --- a/controllers/remote/cluster_cache_tracker.go +++ b/controllers/remote/cluster_cache_tracker.go @@ -50,7 +50,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/util/certs" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) const ( @@ -699,7 +699,8 @@ func (t *ClusterCacheTracker) healthCheckCluster(ctx context.Context, in *health return false, nil } - if !cluster.Status.InfrastructureReady || !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if !cluster.Status.InfrastructureReady || !v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { // If the infrastructure or control plane aren't marked as ready, we should requeue and wait. return false, nil } diff --git a/controllers/remote/cluster_cache_tracker_test.go b/controllers/remote/cluster_cache_tracker_test.go index eb5aff958bde..fb9dd72627dd 100644 --- a/controllers/remote/cluster_cache_tracker_test.go +++ b/controllers/remote/cluster_cache_tracker_test.go @@ -37,7 +37,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) func mapper(_ context.Context, i client.Object) []reconcile.Request { @@ -114,7 +114,7 @@ func TestClusterCacheTracker(t *testing.T) { }, } g.Expect(k8sClient.Create(ctx, clusterA)).To(Succeed()) - conditions.MarkTrue(clusterA, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(clusterA, clusterv1.ControlPlaneInitializedCondition) clusterA.Status.InfrastructureReady = true g.Expect(k8sClient.Status().Update(ctx, clusterA)).To(Succeed()) diff --git a/controlplane/kubeadm/internal/control_plane.go b/controlplane/kubeadm/internal/control_plane.go index 60915172327b..ed5ab5d45845 100644 --- a/controlplane/kubeadm/internal/control_plane.go +++ b/controlplane/kubeadm/internal/control_plane.go @@ -35,7 +35,7 @@ import ( controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta2" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/failuredomains" "sigs.k8s.io/cluster-api/util/patch" ) @@ -423,11 +423,12 @@ func (c *ControlPlane) StatusToLogKeyAndValues(newMachine, deletedMachine *clust notes = append(notes, "marked for remediation") } + // TODO (v1beta2): test for v1beta2 conditions for _, condition := range controlPlaneMachineHealthConditions { - if conditions.IsUnknown(m, condition) { + if v1beta1conditions.IsUnknown(m, condition) { notes = append(notes, strings.Replace(string(condition), "Healthy", " health unknown", -1)) } - if conditions.IsFalse(m, condition) { + if v1beta1conditions.IsFalse(m, condition) { notes = append(notes, strings.Replace(string(condition), "Healthy", " not healthy", -1)) } } diff --git a/controlplane/kubeadm/internal/control_plane_test.go b/controlplane/kubeadm/internal/control_plane_test.go index d255806b8111..e5b852b172bd 100644 --- a/controlplane/kubeadm/internal/control_plane_test.go +++ b/controlplane/kubeadm/internal/control_plane_test.go @@ -30,7 +30,7 @@ import ( controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta2" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) func TestControlPlane(t *testing.T) { @@ -151,14 +151,14 @@ func TestHasMachinesToBeRemediated(t *testing.T) { healthyMachineNotProvisioned := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "healthyMachine1"}} // healthy machine (with MachineHealthCheckSucceded == true) healthyMachineProvisioned := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "healthyMachine2"}, Status: clusterv1.MachineStatus{NodeRef: &corev1.ObjectReference{Kind: "Node", Name: "node1"}}} - conditions.MarkTrue(healthyMachineProvisioned, clusterv1.MachineHealthCheckSucceededCondition) + v1beta1conditions.MarkTrue(healthyMachineProvisioned, clusterv1.MachineHealthCheckSucceededCondition) // unhealthy machine NOT eligible for KCP remediation (with MachineHealthCheckSucceded == False, but without MachineOwnerRemediated condition) unhealthyMachineNOTOwnerRemediated := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "unhealthyMachineNOTOwnerRemediated"}, Status: clusterv1.MachineStatus{NodeRef: &corev1.ObjectReference{Kind: "Node", Name: "node2"}}} - conditions.MarkFalse(unhealthyMachineNOTOwnerRemediated, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "Something is wrong") + v1beta1conditions.MarkFalse(unhealthyMachineNOTOwnerRemediated, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "Something is wrong") // unhealthy machine eligible for KCP remediation (with MachineHealthCheckSucceded == False, with MachineOwnerRemediated condition) unhealthyMachineOwnerRemediated := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "unhealthyMachineOwnerRemediated"}, Status: clusterv1.MachineStatus{NodeRef: &corev1.ObjectReference{Kind: "Node", Name: "node3"}}} - conditions.MarkFalse(unhealthyMachineOwnerRemediated, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "Something is wrong") - conditions.MarkFalse(unhealthyMachineOwnerRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP should remediate this issue") + v1beta1conditions.MarkFalse(unhealthyMachineOwnerRemediated, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "Something is wrong") + v1beta1conditions.MarkFalse(unhealthyMachineOwnerRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP should remediate this issue") t.Run("One unhealthy machine to be remediated by KCP", func(t *testing.T) { c := ControlPlane{ @@ -219,14 +219,14 @@ func TestHasHealthyMachineStillProvisioning(t *testing.T) { // unhealthy machine (with MachineHealthCheckSucceded condition) still provisioning (without NodeRef) unhealthyMachineStillProvisioning1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "unhealthyMachineStillProvisioning1"}} - conditions.MarkFalse(unhealthyMachineStillProvisioning1, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "Something is wrong") - conditions.MarkFalse(unhealthyMachineStillProvisioning1, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP should remediate this issue") + v1beta1conditions.MarkFalse(unhealthyMachineStillProvisioning1, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "Something is wrong") + v1beta1conditions.MarkFalse(unhealthyMachineStillProvisioning1, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP should remediate this issue") // unhealthy machine (with MachineHealthCheckSucceded condition) provisioned (with NodeRef) unhealthyMachineProvisioned1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "unhealthyMachineProvisioned1"}} unhealthyMachineProvisioned1.Status.NodeRef = &corev1.ObjectReference{} - conditions.MarkFalse(unhealthyMachineProvisioned1, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "Something is wrong") - conditions.MarkFalse(unhealthyMachineProvisioned1, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP should remediate this issue") + v1beta1conditions.MarkFalse(unhealthyMachineProvisioned1, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "Something is wrong") + v1beta1conditions.MarkFalse(unhealthyMachineProvisioned1, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP should remediate this issue") t.Run("Healthy machine still provisioning", func(t *testing.T) { c := ControlPlane{ diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index d7725a8f9c51..8d430e34036a 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -51,7 +51,7 @@ import ( "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/finalizers" clog "sigs.k8s.io/cluster-api/util/log" @@ -250,7 +250,8 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. // status without waiting for a full resync (by default 10 minutes). // Otherwise this condition can lead to a delay in provisioning MachineDeployments when MachineSet preflight checks are enabled. // The alternative solution to this requeue would be watching the relevant pods inside each workload cluster which would be very expensive. - if conditions.IsFalse(kcp, controlplanev1.ControlPlaneComponentsHealthyCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if v1beta1conditions.IsFalse(kcp, controlplanev1.ControlPlaneComponentsHealthyCondition) { res = ctrl.Result{RequeueAfter: 20 * time.Second} } } @@ -312,8 +313,8 @@ func (r *KubeadmControlPlaneReconciler) initControlPlaneScope(ctx context.Contex func patchKubeadmControlPlane(ctx context.Context, patchHelper *patch.Helper, kcp *controlplanev1.KubeadmControlPlane, options ...patch.Option) error { // Always update the readyCondition by summarizing the state of other conditions. - conditions.SetSummary(kcp, - conditions.WithConditions( + v1beta1conditions.SetSummary(kcp, + v1beta1conditions.WithConditions( controlplanev1.MachinesCreatedCondition, controlplanev1.MachinesSpecUpToDateCondition, controlplanev1.ResizedCondition, @@ -427,7 +428,7 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPl // Aggregate the operational state of all the machines; while aggregating we are adding the // source ref (reason@machine/name) so the problem can be easily tracked down to its source machine. - conditions.SetAggregate(controlPlane.KCP, controlplanev1.MachinesReadyCondition, controlPlane.Machines.ConditionGetters(), conditions.AddSourceRef()) + v1beta1conditions.SetAggregate(controlPlane.KCP, controlplanev1.MachinesReadyCondition, controlPlane.Machines.ConditionGetters(), v1beta1conditions.AddSourceRef()) // Updates conditions reporting the status of static pods and the status of the etcd cluster. // NOTE: Conditions reporting KCP operation progress like e.g. Resized or SpecUpToDate are inlined with the rest of the execution. @@ -462,14 +463,14 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPl allMessages = append(allMessages, fmt.Sprintf("Machine %s needs rollout: %s", machine, strings.Join(messages, ","))) } log.Info(fmt.Sprintf("Rolling out Control Plane machines: %s", strings.Join(allMessages, ",")), "machinesNeedingRollout", machinesNeedingRollout.Names()) - conditions.MarkFalse(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition, controlplanev1.RollingUpdateInProgressReason, clusterv1.ConditionSeverityWarning, "Rolling %d replicas with outdated spec (%d replicas up to date)", len(machinesNeedingRollout), len(controlPlane.Machines)-len(machinesNeedingRollout)) + v1beta1conditions.MarkFalse(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition, controlplanev1.RollingUpdateInProgressReason, clusterv1.ConditionSeverityWarning, "Rolling %d replicas with outdated spec (%d replicas up to date)", len(machinesNeedingRollout), len(controlPlane.Machines)-len(machinesNeedingRollout)) return r.upgradeControlPlane(ctx, controlPlane, machinesNeedingRollout) default: // make sure last upgrade operation is marked as completed. // NOTE: we are checking the condition already exists in order to avoid to set this condition at the first // reconciliation/before a rolling upgrade actually starts. - if conditions.Has(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition) { - conditions.MarkTrue(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition) + if v1beta1conditions.Has(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition) { + v1beta1conditions.MarkTrue(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition) } } @@ -482,7 +483,7 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPl case numMachines < desiredReplicas && numMachines == 0: // Create new Machine w/ init log.Info("Initializing control plane", "desired", desiredReplicas, "existing", numMachines) - conditions.MarkFalse(controlPlane.KCP, controlplanev1.AvailableCondition, controlplanev1.WaitingForKubeadmInitReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(controlPlane.KCP, controlplanev1.AvailableCondition, controlplanev1.WaitingForKubeadmInitReason, clusterv1.ConditionSeverityInfo, "") return r.initializeControlPlane(ctx, controlPlane) // We are scaling up case numMachines < desiredReplicas && numMachines > 0: @@ -548,7 +549,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileClusterCertificates(ctx context certificates := secret.NewCertificatesForInitialControlPlane(config.ClusterConfiguration) controllerRef := metav1.NewControllerRef(controlPlane.KCP, controlplanev1.GroupVersion.WithKind(kubeadmControlPlaneKind)) if err := certificates.LookupOrGenerateCached(ctx, r.SecretCachingClient, r.Client, util.ObjectKey(controlPlane.Cluster), *controllerRef); err != nil { - conditions.MarkFalse(controlPlane.KCP, controlplanev1.CertificatesAvailableCondition, controlplanev1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(controlPlane.KCP, controlplanev1.CertificatesAvailableCondition, controlplanev1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) v1beta2conditions.Set(controlPlane.KCP, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneCertificatesAvailableV1Beta2Condition, @@ -570,7 +571,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileClusterCertificates(ctx context return errors.Wrap(err, "error in ensuring cluster certificates ownership") } - conditions.MarkTrue(controlPlane.KCP, controlplanev1.CertificatesAvailableCondition) + v1beta1conditions.MarkTrue(controlPlane.KCP, controlplanev1.CertificatesAvailableCondition) v1beta2conditions.Set(controlPlane.KCP, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneCertificatesAvailableV1Beta2Condition, @@ -606,7 +607,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, con // source ref (reason@machine/name) so the problem can be easily tracked down to its source machine. // However, during delete we are hiding the counter (1 of x) because it does not make sense given that // all the machines are deleted in parallel. - conditions.SetAggregate(controlPlane.KCP, controlplanev1.MachinesReadyCondition, controlPlane.Machines.ConditionGetters(), conditions.AddSourceRef()) + v1beta1conditions.SetAggregate(controlPlane.KCP, controlplanev1.MachinesReadyCondition, controlPlane.Machines.ConditionGetters(), v1beta1conditions.AddSourceRef()) // Gets all machines, not just control plane machines. allMachines, err := r.managementCluster.GetMachinesForCluster(ctx, controlPlane.Cluster) @@ -629,7 +630,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, con // Verify that only control plane machines remain if len(allMachines) != len(controlPlane.Machines) || len(allMachinePools.Items) != 0 { log.Info("Waiting for worker nodes to be deleted first") - conditions.MarkFalse(controlPlane.KCP, controlplanev1.ResizedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "Waiting for worker nodes to be deleted first") + v1beta1conditions.MarkFalse(controlPlane.KCP, controlplanev1.ResizedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "Waiting for worker nodes to be deleted first") controlPlane.DeletingReason = controlplanev1.KubeadmControlPlaneDeletingWaitingForWorkersDeletionV1Beta2Reason names := objectsPendingDeleteNames(allMachines, allMachinePools, controlPlane.Cluster) @@ -683,7 +684,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, con log.Info("Waiting for control plane Machines to not exist anymore") - conditions.MarkFalse(controlPlane.KCP, controlplanev1.ResizedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(controlPlane.KCP, controlplanev1.ResizedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") message := "" if len(machines) > 0 { @@ -892,8 +893,8 @@ func (r *KubeadmControlPlaneReconciler) reconcileControlPlaneAndMachinesConditio // Note: The Machine controller uses the ControlPlaneInitialized condition on the Cluster instead for // the same check. We don't use the ControlPlaneInitialized condition from the Cluster here because KCP // Reconcile does (currently) not get triggered from condition changes to the Cluster object. - // TODO: Once we moved to v1beta2 conditions we should use the `Initialized` condition instead. - controlPlaneInitialized := conditions.Get(controlPlane.KCP, controlplanev1.AvailableCondition) + // TODO (v1beta2): Once we moved to v1beta2 conditions we should use the `Initialized` condition instead. + controlPlaneInitialized := v1beta1conditions.Get(controlPlane.KCP, controlplanev1.AvailableCondition) if !controlPlane.KCP.Status.Initialized || controlPlaneInitialized == nil || controlPlaneInitialized.Status != corev1.ConditionTrue { // Overwrite conditions to InspectionFailed. @@ -1101,7 +1102,8 @@ func (r *KubeadmControlPlaneReconciler) reconcileEtcdMembers(ctx context.Context // Potential inconsistencies between the list of members and the list of machines/nodes are // surfaced using the EtcdClusterHealthyCondition; if this condition is true, meaning no inconsistencies exists, return early. - if conditions.IsTrue(controlPlane.KCP, controlplanev1.EtcdClusterHealthyCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if v1beta1conditions.IsTrue(controlPlane.KCP, controlplanev1.EtcdClusterHealthyCondition) { return nil } @@ -1172,7 +1174,8 @@ func (r *KubeadmControlPlaneReconciler) reconcilePreTerminateHook(ctx context.Co } // Return early because the Machine controller is not yet waiting for the pre-terminate hook. - c := conditions.Get(deletingMachine, clusterv1.PreTerminateDeleteHookSucceededCondition) + // TODO (v1beta2): test for v1beta2 conditions + c := v1beta1conditions.Get(deletingMachine, clusterv1.PreTerminateDeleteHookSucceededCondition) if c == nil || c.Status != corev1.ConditionFalse || c.Reason != clusterv1.WaitingExternalHookReason { return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil } diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 5630aaf3757f..1ab570f1ee02 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -62,7 +62,7 @@ import ( "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/patch" @@ -1382,7 +1382,7 @@ kubernetesVersion: metav1.16.1 g.Expect(kcp.Status.Selector).NotTo(BeEmpty()) g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(1)) - g.Expect(conditions.IsFalse(kcp, controlplanev1.AvailableCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsFalse(kcp, controlplanev1.AvailableCondition)).To(BeTrue()) g.Expect(v1beta2conditions.IsFalse(kcp, controlplanev1.KubeadmControlPlaneInitializedV1Beta2Condition)).To(BeTrue()) s, err := secret.GetFromNamespacedName(ctx, env, client.ObjectKey{Namespace: cluster.Namespace, Name: "foo"}, secret.ClusterCA) @@ -1623,7 +1623,7 @@ kubernetesVersion: metav1.16.1`, g.Expect(kcp.Status.Selector).NotTo(BeEmpty()) g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(1)) - g.Expect(conditions.IsFalse(kcp, controlplanev1.AvailableCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsFalse(kcp, controlplanev1.AvailableCondition)).To(BeTrue()) // Verify that the kubeconfig is using the custom CA kBytes, err := kubeconfig.FromSecret(ctx, env, util.ObjectKey(cluster)) @@ -2160,7 +2160,7 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition KCP: func() *controlplanev1.KubeadmControlPlane { kcp := defaultKCP.DeepCopy() kcp.Status.Initialized = false - conditions.MarkFalse(kcp, controlplanev1.AvailableCondition, "", clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(kcp, controlplanev1.AvailableCondition, "", clusterv1.ConditionSeverityError, "") v1beta2conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneInitializedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -2965,7 +2965,7 @@ func TestKubeadmControlPlaneReconciler_reconcilePreTerminateHook(t *testing.T) { Machines: collections.Machines{ deletingMachineWithKCPPreTerminateHook.Name: func() *clusterv1.Machine { m := deletingMachineWithKCPPreTerminateHook.DeepCopy() - conditions.MarkTrue(m, clusterv1.PreTerminateDeleteHookSucceededCondition) + v1beta1conditions.MarkTrue(m, clusterv1.PreTerminateDeleteHookSucceededCondition) return m }(), }, @@ -2987,7 +2987,7 @@ func TestKubeadmControlPlaneReconciler_reconcilePreTerminateHook(t *testing.T) { Machines: collections.Machines{ deletingMachineWithKCPPreTerminateHook.Name: func() *clusterv1.Machine { m := deletingMachineWithKCPPreTerminateHook.DeepCopy() - conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededCondition, "some-other-reason", clusterv1.ConditionSeverityInfo, "some message") + v1beta1conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededCondition, "some-other-reason", clusterv1.ConditionSeverityInfo, "some message") return m }(), }, @@ -3011,7 +3011,7 @@ func TestKubeadmControlPlaneReconciler_reconcilePreTerminateHook(t *testing.T) { machine.Name: machine, // Leadership will be forwarded to this Machine. deletingMachineWithKCPPreTerminateHook.Name: func() *clusterv1.Machine { m := deletingMachineWithKCPPreTerminateHook.DeepCopy() - conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededCondition, clusterv1.WaitingExternalHookReason, clusterv1.ConditionSeverityInfo, "some message") + v1beta1conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededCondition, clusterv1.WaitingExternalHookReason, clusterv1.ConditionSeverityInfo, "some message") return m }(), }, @@ -3037,13 +3037,13 @@ func TestKubeadmControlPlaneReconciler_reconcilePreTerminateHook(t *testing.T) { deletingMachineWithKCPPreTerminateHook.Name: func() *clusterv1.Machine { m := deletingMachineWithKCPPreTerminateHook.DeepCopy() m.DeletionTimestamp.Time = m.DeletionTimestamp.Add(-1 * time.Duration(1) * time.Second) // Make sure this (the oldest) Machine is selected to run the pre-terminate hook. - conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededCondition, clusterv1.WaitingExternalHookReason, clusterv1.ConditionSeverityInfo, "some message") + v1beta1conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededCondition, clusterv1.WaitingExternalHookReason, clusterv1.ConditionSeverityInfo, "some message") return m }(), deletingMachineWithKCPPreTerminateHook.Name + "-2": func() *clusterv1.Machine { m := deletingMachineWithKCPPreTerminateHook.DeepCopy() m.Name += "-2" - conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededCondition, clusterv1.WaitingExternalHookReason, clusterv1.ConditionSeverityInfo, "some message") + v1beta1conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededCondition, clusterv1.WaitingExternalHookReason, clusterv1.ConditionSeverityInfo, "some message") return m }(), }, @@ -3068,7 +3068,7 @@ func TestKubeadmControlPlaneReconciler_reconcilePreTerminateHook(t *testing.T) { Machines: collections.Machines{ deletingMachineWithKCPPreTerminateHook.Name: func() *clusterv1.Machine { m := deletingMachineWithKCPPreTerminateHook.DeepCopy() - conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededCondition, clusterv1.WaitingExternalHookReason, clusterv1.ConditionSeverityInfo, "some message") + v1beta1conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededCondition, clusterv1.WaitingExternalHookReason, clusterv1.ConditionSeverityInfo, "some message") return m }(), }, @@ -3102,7 +3102,7 @@ func TestKubeadmControlPlaneReconciler_reconcilePreTerminateHook(t *testing.T) { machine.Name: machine, deletingMachineWithKCPPreTerminateHook.Name: func() *clusterv1.Machine { m := deletingMachineWithKCPPreTerminateHook.DeepCopy() - conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededCondition, clusterv1.WaitingExternalHookReason, clusterv1.ConditionSeverityInfo, "some message") + v1beta1conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededCondition, clusterv1.WaitingExternalHookReason, clusterv1.ConditionSeverityInfo, "some message") return m }(), }, @@ -3846,8 +3846,9 @@ func createClusterWithControlPlane(namespace string) (*clusterv1.Cluster, *contr } func setKCPHealthy(kcp *controlplanev1.KubeadmControlPlane) { - conditions.MarkTrue(kcp, controlplanev1.ControlPlaneComponentsHealthyCondition) - conditions.MarkTrue(kcp, controlplanev1.EtcdClusterHealthyCondition) + // TODO (v1beta2):use v1beta2 conditions + v1beta1conditions.MarkTrue(kcp, controlplanev1.ControlPlaneComponentsHealthyCondition) + v1beta1conditions.MarkTrue(kcp, controlplanev1.EtcdClusterHealthyCondition) } func createMachineNodePair(name string, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, ready bool) (*clusterv1.Machine, *corev1.Node) { @@ -3911,11 +3912,12 @@ func setMachineHealthy(m *clusterv1.Machine) { Kind: "Node", Name: "node-1", } - conditions.MarkTrue(m, controlplanev1.MachineAPIServerPodHealthyCondition) - conditions.MarkTrue(m, controlplanev1.MachineControllerManagerPodHealthyCondition) - conditions.MarkTrue(m, controlplanev1.MachineSchedulerPodHealthyCondition) - conditions.MarkTrue(m, controlplanev1.MachineEtcdPodHealthyCondition) - conditions.MarkTrue(m, controlplanev1.MachineEtcdMemberHealthyCondition) + // TODO (v1beta2):use v1beta2 conditions + v1beta1conditions.MarkTrue(m, controlplanev1.MachineAPIServerPodHealthyCondition) + v1beta1conditions.MarkTrue(m, controlplanev1.MachineControllerManagerPodHealthyCondition) + v1beta1conditions.MarkTrue(m, controlplanev1.MachineSchedulerPodHealthyCondition) + v1beta1conditions.MarkTrue(m, controlplanev1.MachineEtcdPodHealthyCondition) + v1beta1conditions.MarkTrue(m, controlplanev1.MachineEtcdMemberHealthyCondition) } // newCluster return a CAPI cluster object. diff --git a/controlplane/kubeadm/internal/controllers/helpers.go b/controlplane/kubeadm/internal/controllers/helpers.go index fde7392a114b..421d50aeb720 100644 --- a/controlplane/kubeadm/internal/controllers/helpers.go +++ b/controlplane/kubeadm/internal/controllers/helpers.go @@ -41,7 +41,7 @@ import ( "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/patch" @@ -217,7 +217,7 @@ func (r *KubeadmControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx conte }) if err != nil { // Safe to return early here since no resources have been created yet. - conditions.MarkFalse(kcp, controlplanev1.MachinesCreatedCondition, controlplanev1.InfrastructureTemplateCloningFailedReason, + v1beta1conditions.MarkFalse(kcp, controlplanev1.MachinesCreatedCondition, controlplanev1.InfrastructureTemplateCloningFailedReason, clusterv1.ConditionSeverityError, err.Error()) return nil, errors.Wrap(err, "failed to clone infrastructure template") } @@ -226,7 +226,7 @@ func (r *KubeadmControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx conte // Clone the bootstrap configuration bootstrapRef, err := r.generateKubeadmConfig(ctx, kcp, cluster, bootstrapSpec, machine.Name) if err != nil { - conditions.MarkFalse(kcp, controlplanev1.MachinesCreatedCondition, controlplanev1.BootstrapTemplateCloningFailedReason, + v1beta1conditions.MarkFalse(kcp, controlplanev1.MachinesCreatedCondition, controlplanev1.BootstrapTemplateCloningFailedReason, clusterv1.ConditionSeverityError, err.Error()) errs = append(errs, errors.Wrap(err, "failed to generate bootstrap config")) } @@ -236,7 +236,7 @@ func (r *KubeadmControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx conte machine.Spec.Bootstrap.ConfigRef = bootstrapRef if err := r.createMachine(ctx, kcp, machine); err != nil { - conditions.MarkFalse(kcp, controlplanev1.MachinesCreatedCondition, controlplanev1.MachineGenerationFailedReason, + v1beta1conditions.MarkFalse(kcp, controlplanev1.MachinesCreatedCondition, controlplanev1.MachineGenerationFailedReason, clusterv1.ConditionSeverityError, err.Error()) errs = append(errs, errors.Wrap(err, "failed to create Machine")) } diff --git a/controlplane/kubeadm/internal/controllers/helpers_test.go b/controlplane/kubeadm/internal/controllers/helpers_test.go index 48390ab339c8..c3e570ab861e 100644 --- a/controlplane/kubeadm/internal/controllers/helpers_test.go +++ b/controlplane/kubeadm/internal/controllers/helpers_test.go @@ -37,7 +37,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta2" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/secret" ) @@ -466,7 +466,7 @@ func TestCloneConfigsAndGenerateMachineFail(t *testing.T) { kcp.Spec.MachineTemplate.InfrastructureRef.Name = "something_invalid" _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, bootstrapSpec, nil) g.Expect(err).To(HaveOccurred()) - g.Expect(&kcp.GetConditions()[0]).Should(conditions.HaveSameStateOf(&clusterv1.Condition{ + g.Expect(&kcp.GetConditions()[0]).Should(v1beta1conditions.HaveSameStateOf(&clusterv1.Condition{ Type: controlplanev1.MachinesCreatedCondition, Status: corev1.ConditionFalse, Severity: clusterv1.ConditionSeverityError, diff --git a/controlplane/kubeadm/internal/controllers/remediation.go b/controlplane/kubeadm/internal/controllers/remediation.go index e230bb8ab895..100ff4170f7b 100644 --- a/controlplane/kubeadm/internal/controllers/remediation.go +++ b/controlplane/kubeadm/internal/controllers/remediation.go @@ -37,7 +37,7 @@ import ( "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" ) @@ -56,7 +56,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C continue } - shouldCleanup := conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededCondition) && conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedCondition) + shouldCleanup := v1beta1conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededCondition) && v1beta1conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedCondition) shouldCleanupV1Beta2 := v1beta2conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededV1Beta2Condition) && v1beta2conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) if !(shouldCleanup || shouldCleanupV1Beta2) { @@ -70,7 +70,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C } if shouldCleanup { - conditions.Delete(m, clusterv1.MachineOwnerRemediatedCondition) + v1beta1conditions.Delete(m, clusterv1.MachineOwnerRemediatedCondition) } if shouldCleanupV1Beta2 { @@ -174,7 +174,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C if controlPlane.Cluster.Spec.Topology != nil && controlPlane.Cluster.Spec.Topology.Version != controlPlane.KCP.Spec.Version { message := fmt.Sprintf("KubeadmControlPlane can't remediate while waiting for a version upgrade to %s to be propagated from Cluster.spec.topology", controlPlane.Cluster.Spec.Topology.Version) log.Info(fmt.Sprintf("A control plane machine needs remediation, but %s. Skipping remediation", message)) - conditions.MarkFalse(machineToBeRemediated, + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, @@ -211,7 +211,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C // The cluster MUST have more than one replica, because this is the smallest cluster size that allows any etcd failure tolerance. if controlPlane.Machines.Len() <= 1 { log.Info("A control plane machine needs remediation, but the number of current replicas is less or equal to 1. Skipping remediation", "replicas", controlPlane.Machines.Len()) - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate if current replicas are less or equal to 1") + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate if current replicas are less or equal to 1") v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, @@ -225,7 +225,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C // The cluster MUST NOT have healthy machines still being provisioned. This rule prevents KCP taking actions while the cluster is in a transitional state. if controlPlane.HasHealthyMachineStillProvisioning() { log.Info("A control plane machine needs remediation, but there are other control-plane machines being provisioned. Skipping remediation") - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine provisioning to complete before triggering remediation") + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine provisioning to complete before triggering remediation") v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, @@ -239,7 +239,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C // The cluster MUST have no machines with a deletion timestamp. This rule prevents KCP taking actions while the cluster is in a transitional state. if controlPlane.HasDeletingMachine() { log.Info("A control plane machine needs remediation, but there are other control-plane machines being deleted. Skipping remediation") - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine deletion to complete before triggering remediation") + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine deletion to complete before triggering remediation") v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, @@ -255,7 +255,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C if controlPlane.IsEtcdManaged() { canSafelyRemediate, err := r.canSafelyRemoveEtcdMember(ctx, controlPlane, machineToBeRemediated) if err != nil { - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityError, err.Error()) v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, @@ -267,7 +267,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C } if !canSafelyRemediate { log.Info("A control plane machine needs remediation, but removing this machine could result in etcd quorum loss. Skipping remediation") - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because this could result in etcd loosing quorum") + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because this could result in etcd loosing quorum") v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, @@ -297,7 +297,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C etcdLeaderCandidate := controlPlane.HealthyMachines().Newest() if etcdLeaderCandidate == nil { log.Info("A control plane machine needs remediation, but there is no healthy machine to forward etcd leadership to") - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityWarning, + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityWarning, "A control plane machine needs remediation, but there is no healthy machine to forward etcd leadership to. Skipping remediation") v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ @@ -310,7 +310,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C } if err := workloadCluster.ForwardEtcdLeadership(ctx, machineToBeRemediated, etcdLeaderCandidate); err != nil { log.Error(err, "Failed to move etcd leadership to candidate machine", "candidate", klog.KObj(etcdLeaderCandidate)) - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityError, err.Error()) v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, @@ -327,7 +327,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C // Delete the machine if err := r.Client.Delete(ctx, machineToBeRemediated); err != nil { - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityError, err.Error()) v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, @@ -343,7 +343,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C // Also, setting DeletionTimestamp doesn't mean the Machine is actually deleted (deletion takes some time). log.WithValues(controlPlane.StatusToLogKeyAndValues(nil, machineToBeRemediated)...). Info("Deleting Machine (remediating unhealthy Machine)") - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, @@ -448,8 +448,9 @@ func pickMachineToBeRemediated(i, j *clusterv1.Machine, isEtcdManaged bool) bool // pickMachineToBeRemediatedByConditionState returns true if condition t report issue on machine i and not on machine j, // false if the vice-versa apply, or nil if condition t doesn't provide a discriminating criteria for picking one machine or another for remediation. func pickMachineToBeRemediatedByConditionState(i, j *clusterv1.Machine, t clusterv1.ConditionType) *bool { - iCondition := conditions.IsTrue(i, t) - jCondition := conditions.IsTrue(j, t) + // TODO (v1beta2): test for v1beta2 conditions + iCondition := v1beta1conditions.IsTrue(i, t) + jCondition := v1beta1conditions.IsTrue(j, t) if !iCondition && jCondition { return ptr.To(true) @@ -529,7 +530,7 @@ func (r *KubeadmControlPlaneReconciler) checkRetryLimits(log logr.Logger, machin // Check if remediation can happen because retryPeriod is passed. if lastRemediationTime.Add(retryPeriod).After(reconciliationTime) { log.Info(fmt.Sprintf("A control plane machine needs remediation, but the operation already failed in the latest %s. Skipping remediation", retryPeriod)) - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed in the latest %s (RetryPeriod)", retryPeriod) + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed in the latest %s (RetryPeriod)", retryPeriod) v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, @@ -545,7 +546,7 @@ func (r *KubeadmControlPlaneReconciler) checkRetryLimits(log logr.Logger, machin maxRetry := int(*controlPlane.KCP.Spec.RemediationStrategy.MaxRetry) if remediationInProgressData.RetryCount >= maxRetry { log.Info(fmt.Sprintf("A control plane machine needs remediation, but the operation already failed %d times (MaxRetry %d). Skipping remediation", remediationInProgressData.RetryCount, maxRetry)) - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed %d times (MaxRetry)", maxRetry) + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed %d times (MaxRetry)", maxRetry) v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, @@ -636,7 +637,8 @@ func (r *KubeadmControlPlaneReconciler) canSafelyRemoveEtcdMember(ctx context.Co } // Check member health as reported by machine's health conditions - if !conditions.IsTrue(machine, controlplanev1.MachineEtcdMemberHealthyCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if !v1beta1conditions.IsTrue(machine, controlplanev1.MachineEtcdMemberHealthyCondition) { targetUnhealthyMembers++ unhealthyMembers = append(unhealthyMembers, fmt.Sprintf("%s (%s)", etcdMember, machine.Name)) continue diff --git a/controlplane/kubeadm/internal/controllers/remediation_test.go b/controlplane/kubeadm/internal/controllers/remediation_test.go index 02c2b6bc31bd..5f58d19d3116 100644 --- a/controlplane/kubeadm/internal/controllers/remediation_test.go +++ b/controlplane/kubeadm/internal/controllers/remediation_test.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" ) @@ -170,7 +170,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { if err := env.Get(ctx, client.ObjectKey{Namespace: m.Namespace, Name: m.Name}, m); err != nil { return err } - if c := conditions.Get(m, clusterv1.MachineOwnerRemediatedCondition); c != nil { + if c := v1beta1conditions.Get(m, clusterv1.MachineOwnerRemediatedCondition); c != nil { return errors.Errorf("condition %s still exists", clusterv1.MachineOwnerRemediatedCondition) } if c := v1beta2conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition); c != nil { @@ -250,7 +250,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.Machine).To(Equal(m.Name)) g.Expect(remediationData.RetryCount).To(Equal(0)) - assertMachineCondition(ctx, g, m, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, m, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta2Condition(ctx, g, m, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m.Namespace, Name: m.Name}, m) @@ -320,7 +320,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(ret.IsZero()).To(BeTrue()) // Remediation skipped g.Expect(err).ToNot(HaveOccurred()) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate while waiting for a version upgrade to v1.20.1 to be propagated from Cluster.spec.topology") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate while waiting for a version upgrade to v1.20.1 to be propagated from Cluster.spec.topology") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, "KubeadmControlPlane can't remediate while waiting for a version upgrade to v1.20.1 to be propagated from Cluster.spec.topology") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) @@ -371,7 +371,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed 3 times (MaxRetry)") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed 3 times (MaxRetry)") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, "KubeadmControlPlane can't remediate this machine because the operation already failed 3 times (MaxRetry)") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) @@ -427,7 +427,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.Machine).To(Equal(m1.Name)) g.Expect(remediationData.RetryCount).To(Equal(0)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) @@ -486,7 +486,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.Machine).To(Equal(m1.Name)) g.Expect(remediationData.RetryCount).To(Equal(0)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) @@ -539,7 +539,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed in the latest 1h0m0s (RetryPeriod)") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed in the latest 1h0m0s (RetryPeriod)") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, "KubeadmControlPlane can't remediate this machine because the operation already failed in the latest 1h0m0s (RetryPeriod)") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) @@ -585,7 +585,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) - assertMachineCondition(ctx, g, m, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate if current replicas are less or equal to 1") + assertMachineV1beta1Condition(ctx, g, m, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate if current replicas are less or equal to 1") assertMachineV1beta2Condition(ctx, g, m, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, "KubeadmControlPlane can't remediate if current replicas are less or equal to 1") g.Expect(env.Cleanup(ctx, m)).To(Succeed()) @@ -615,7 +615,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine deletion to complete before triggering remediation") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine deletion to complete before triggering remediation") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, "KubeadmControlPlane waiting for control plane Machine deletion to complete before triggering remediation") g.Expect(env.Cleanup(ctx, m1, m2)).To(Succeed()) @@ -645,7 +645,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine provisioning to complete before triggering remediation") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine provisioning to complete before triggering remediation") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, "KubeadmControlPlane waiting for control plane Machine provisioning to complete before triggering remediation") g.Expect(env.Cleanup(ctx, m1, m2)).To(Succeed()) @@ -676,7 +676,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine provisioning to complete before triggering remediation") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine provisioning to complete before triggering remediation") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, "KubeadmControlPlane waiting for control plane Machine provisioning to complete before triggering remediation") g.Expect(env.Cleanup(ctx, m1, m2)).To(Succeed()) @@ -719,7 +719,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because this could result in etcd loosing quorum") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because this could result in etcd loosing quorum") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, "KubeadmControlPlane can't remediate this Machine because this could result in etcd loosing quorum") g.Expect(env.Cleanup(ctx, m1, m2, m3)).To(Succeed()) @@ -764,7 +764,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because this could result in etcd loosing quorum") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because this could result in etcd loosing quorum") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, "KubeadmControlPlane can't remediate this Machine because this could result in etcd loosing quorum") g.Expect(env.Cleanup(ctx, m1, m2, m3, m4, m5)).To(Succeed()) @@ -812,7 +812,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.Machine).To(Equal(m1.Name)) g.Expect(remediationData.RetryCount).To(Equal(0)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) @@ -863,7 +863,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.Machine).To(Equal(m1.Name)) g.Expect(remediationData.RetryCount).To(Equal(0)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) @@ -899,7 +899,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.Machine).To(Equal(mi.Name)) g.Expect(remediationData.RetryCount).To(Equal(i - 1)) - assertMachineCondition(ctx, g, mi, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, mi, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta2Condition(ctx, g, mi, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: mi.Namespace, Name: mi.Name}, mi) @@ -955,7 +955,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.Machine).To(Equal(m1.Name)) g.Expect(remediationData.RetryCount).To(Equal(0)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) @@ -1008,7 +1008,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.Machine).To(Equal(m1.Name)) g.Expect(remediationData.RetryCount).To(Equal(0)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) @@ -1061,7 +1061,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.Machine).To(Equal(m1.Name)) g.Expect(remediationData.RetryCount).To(Equal(0)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) @@ -1115,7 +1115,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.Machine).To(Equal(m1.Name)) g.Expect(remediationData.RetryCount).To(Equal(0)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) @@ -1169,7 +1169,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.Machine).To(Equal(m1.Name)) g.Expect(remediationData.RetryCount).To(Equal(0)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) @@ -1217,7 +1217,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityWarning, + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityWarning, "A control plane machine needs remediation, but there is no healthy machine to forward etcd leadership to. Skipping remediation") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, "KubeadmControlPlane can't remediate this Machine because there is no healthy Machine to forward etcd leadership to") @@ -1268,7 +1268,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.Machine).To(Equal(m1.Name)) g.Expect(remediationData.RetryCount).To(Equal(0)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) @@ -1304,7 +1304,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.Machine).To(Equal(mi.Name)) g.Expect(remediationData.RetryCount).To(Equal(i - 4)) - assertMachineCondition(ctx, g, mi, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, mi, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta2Condition(ctx, g, mi, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: mi.Namespace, Name: mi.Name}, mi) @@ -1375,7 +1375,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { g.Expect(remediationData.Machine).To(Equal(m1.Name)) g.Expect(remediationData.RetryCount).To(Equal(0)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) @@ -1411,7 +1411,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { g.Expect(remediationData.Machine).To(Equal(m2.Name)) g.Expect(remediationData.RetryCount).To(Equal(1)) - assertMachineCondition(ctx, g, m2, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, m2, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta2Condition(ctx, g, m2, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m2.Namespace, Name: m2.Name}, m1) @@ -1487,7 +1487,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { g.Expect(remediationData.Machine).To(Equal(m2.Name)) g.Expect(remediationData.RetryCount).To(Equal(0)) - assertMachineCondition(ctx, g, m2, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, m2, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta2Condition(ctx, g, m2, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m2.Namespace, Name: m2.Name}, m2) @@ -1524,7 +1524,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { g.Expect(remediationData.Machine).To(Equal(m3.Name)) g.Expect(remediationData.RetryCount).To(Equal(1)) - assertMachineCondition(ctx, g, m3, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, m3, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta2Condition(ctx, g, m3, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m3.Namespace, Name: m3.Name}, m3) @@ -1601,8 +1601,8 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { g.Expect(remediationData.Machine).To(Equal(m2.Name)) g.Expect(remediationData.RetryCount).To(Equal(0)) - assertMachineCondition(ctx, g, m2, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - assertMachineCondition(ctx, g, m3, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, m2, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + assertMachineV1beta1Condition(ctx, g, m3, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta2Condition(ctx, g, m2, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") assertMachineV1beta2Condition(ctx, g, m3, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, clusterv1.MachineOwnerRemediatedWaitingForRemediationV1Beta2Reason, "Waiting for remediation") @@ -2031,8 +2031,8 @@ type machineOption func(*clusterv1.Machine) func withMachineHealthCheckFailed() machineOption { return func(machine *clusterv1.Machine) { - conditions.MarkFalse(machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "") - conditions.MarkFalse(machine, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(machine, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "") v1beta2conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, @@ -2050,8 +2050,8 @@ func withMachineHealthCheckFailed() machineOption { func withStuckRemediation() machineOption { return func(machine *clusterv1.Machine) { - conditions.MarkTrue(machine, clusterv1.MachineHealthCheckSucceededCondition) - conditions.MarkFalse(machine, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "") + v1beta1conditions.MarkTrue(machine, clusterv1.MachineHealthCheckSucceededCondition) + v1beta1conditions.MarkFalse(machine, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "") v1beta2conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, @@ -2069,25 +2069,25 @@ func withStuckRemediation() machineOption { func withHealthyEtcdMember() machineOption { return func(machine *clusterv1.Machine) { - conditions.MarkTrue(machine, controlplanev1.MachineEtcdMemberHealthyCondition) + v1beta1conditions.MarkTrue(machine, controlplanev1.MachineEtcdMemberHealthyCondition) } } func withUnhealthyEtcdMember() machineOption { return func(machine *clusterv1.Machine) { - conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "") } } func withHealthyAPIServerPod() machineOption { return func(machine *clusterv1.Machine) { - conditions.MarkTrue(machine, controlplanev1.MachineAPIServerPodHealthyCondition) + v1beta1conditions.MarkTrue(machine, controlplanev1.MachineAPIServerPodHealthyCondition) } } func withUnhealthyAPIServerPod() machineOption { return func(machine *clusterv1.Machine) { - conditions.MarkFalse(machine, controlplanev1.MachineAPIServerPodHealthyCondition, controlplanev1.ControlPlaneComponentsUnhealthyReason, clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(machine, controlplanev1.MachineAPIServerPodHealthyCondition, controlplanev1.ControlPlaneComponentsUnhealthyReason, clusterv1.ConditionSeverityError, "") } } @@ -2169,12 +2169,12 @@ func getDeletingMachine(namespace, name string, options ...machineOption) *clust return m } -func assertMachineCondition(ctx context.Context, g *WithT, m *clusterv1.Machine, t clusterv1.ConditionType, status corev1.ConditionStatus, reason string, severity clusterv1.ConditionSeverity, message string) { +func assertMachineV1beta1Condition(ctx context.Context, g *WithT, m *clusterv1.Machine, t clusterv1.ConditionType, status corev1.ConditionStatus, reason string, severity clusterv1.ConditionSeverity, message string) { g.Eventually(func() error { if err := env.Get(ctx, client.ObjectKey{Namespace: m.Namespace, Name: m.Name}, m); err != nil { return err } - c := conditions.Get(m, t) + c := v1beta1conditions.Get(m, t) if c == nil { return errors.Errorf("condition %q was nil", t) } diff --git a/controlplane/kubeadm/internal/controllers/scale.go b/controlplane/kubeadm/internal/controllers/scale.go index 40f742b89a7a..5d8e2cfe8d84 100644 --- a/controlplane/kubeadm/internal/controllers/scale.go +++ b/controlplane/kubeadm/internal/controllers/scale.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/version" ) @@ -262,8 +262,9 @@ loopmachines: return ctrl.Result{}, nil } -func preflightCheckCondition(kind string, obj conditions.Getter, condition clusterv1.ConditionType) error { - c := conditions.Get(obj, condition) +func preflightCheckCondition(kind string, obj v1beta1conditions.Getter, condition clusterv1.ConditionType) error { + // TODO (v1beta2): test for v1beta2 conditions + c := v1beta1conditions.Get(obj, condition) if c == nil { return errors.Errorf("%s %s does not have %s condition", kind, obj.GetName(), condition) } diff --git a/controlplane/kubeadm/internal/controllers/scale_test.go b/controlplane/kubeadm/internal/controllers/scale_test.go index 7cd15736ddec..4b6debc60ab0 100644 --- a/controlplane/kubeadm/internal/controllers/scale_test.go +++ b/controlplane/kubeadm/internal/controllers/scale_test.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) func TestKubeadmControlPlaneReconciler_initializeControlPlane(t *testing.T) { @@ -613,11 +613,11 @@ func TestPreflightChecks(t *testing.T) { }, Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ Conditions: clusterv1.Conditions{ - *conditions.FalseCondition(controlplanev1.MachineAPIServerPodHealthyCondition, "fooReason", clusterv1.ConditionSeverityError, ""), - *conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineEtcdPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition), + *v1beta1conditions.FalseCondition(controlplanev1.MachineAPIServerPodHealthyCondition, "fooReason", clusterv1.ConditionSeverityError, ""), + *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition), }, }}, }, @@ -643,11 +643,11 @@ func TestPreflightChecks(t *testing.T) { }, Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ Conditions: clusterv1.Conditions{ - *conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineEtcdPodHealthyCondition), - *conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, "fooReason", clusterv1.ConditionSeverityError, ""), + *v1beta1conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdPodHealthyCondition), + *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, "fooReason", clusterv1.ConditionSeverityError, ""), }, }}, }, @@ -667,8 +667,8 @@ func TestPreflightChecks(t *testing.T) { Status: controlplanev1.KubeadmControlPlaneStatus{ Deprecated: &controlplanev1.KubeadmControlPlaneDeprecatedStatus{V1Beta1: &controlplanev1.KubeadmControlPlaneV1Beta1DeprecatedStatus{ Conditions: clusterv1.Conditions{ - *conditions.TrueCondition(controlplanev1.ControlPlaneComponentsHealthyCondition), - *conditions.TrueCondition(controlplanev1.EtcdClusterHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.ControlPlaneComponentsHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.EtcdClusterHealthyCondition), }, }}, }, @@ -682,11 +682,11 @@ func TestPreflightChecks(t *testing.T) { }, Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ Conditions: clusterv1.Conditions{ - *conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineEtcdPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition), }, }}, }, @@ -744,7 +744,7 @@ func TestPreflightCheckCondition(t *testing.T) { Status: clusterv1.MachineStatus{ Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ Conditions: clusterv1.Conditions{ - *conditions.FalseCondition(condition, "fooReason", clusterv1.ConditionSeverityError, ""), + *v1beta1conditions.FalseCondition(condition, "fooReason", clusterv1.ConditionSeverityError, ""), }, }}, }, @@ -757,7 +757,7 @@ func TestPreflightCheckCondition(t *testing.T) { Status: clusterv1.MachineStatus{ Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ Conditions: clusterv1.Conditions{ - *conditions.UnknownCondition(condition, "fooReason", ""), + *v1beta1conditions.UnknownCondition(condition, "fooReason", ""), }, }}, }, @@ -770,7 +770,7 @@ func TestPreflightCheckCondition(t *testing.T) { Status: clusterv1.MachineStatus{ Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ Conditions: clusterv1.Conditions{ - *conditions.TrueCondition(condition), + *v1beta1conditions.TrueCondition(condition), }, }}, }, diff --git a/controlplane/kubeadm/internal/controllers/status.go b/controlplane/kubeadm/internal/controllers/status.go index 68a52fca8104..cbaad73a5830 100644 --- a/controlplane/kubeadm/internal/controllers/status.go +++ b/controlplane/kubeadm/internal/controllers/status.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" clog "sigs.k8s.io/cluster-api/util/log" ) @@ -78,24 +78,24 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, contro switch { // We are scaling up case replicas < desiredReplicas: - conditions.MarkFalse(controlPlane.KCP, controlplanev1.ResizedCondition, controlplanev1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up control plane to %d replicas (actual %d)", desiredReplicas, replicas) + v1beta1conditions.MarkFalse(controlPlane.KCP, controlplanev1.ResizedCondition, controlplanev1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up control plane to %d replicas (actual %d)", desiredReplicas, replicas) // We are scaling down case replicas > desiredReplicas: - conditions.MarkFalse(controlPlane.KCP, controlplanev1.ResizedCondition, controlplanev1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down control plane to %d replicas (actual %d)", desiredReplicas, replicas) + v1beta1conditions.MarkFalse(controlPlane.KCP, controlplanev1.ResizedCondition, controlplanev1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down control plane to %d replicas (actual %d)", desiredReplicas, replicas) // This means that there was no error in generating the desired number of machine objects - conditions.MarkTrue(controlPlane.KCP, controlplanev1.MachinesCreatedCondition) + v1beta1conditions.MarkTrue(controlPlane.KCP, controlplanev1.MachinesCreatedCondition) default: // make sure last resize operation is marked as completed. // NOTE: we are checking the number of machines ready so we report resize completed only when the machines // are actually provisioned (vs reporting completed immediately after the last machine object is created). readyMachines := controlPlane.Machines.Filter(collections.IsReady()) if int32(len(readyMachines)) == replicas { - conditions.MarkTrue(controlPlane.KCP, controlplanev1.ResizedCondition) + v1beta1conditions.MarkTrue(controlPlane.KCP, controlplanev1.ResizedCondition) } // This means that there was no error in generating the desired number of machine objects - conditions.MarkTrue(controlPlane.KCP, controlplanev1.MachinesCreatedCondition) + v1beta1conditions.MarkTrue(controlPlane.KCP, controlplanev1.MachinesCreatedCondition) } workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) @@ -112,7 +112,7 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, contro // This only gets initialized once and does not change if the kubeadm config map goes away. if status.HasKubeadmConfig { controlPlane.KCP.Status.Initialized = true - conditions.MarkTrue(controlPlane.KCP, controlplanev1.AvailableCondition) + v1beta1conditions.MarkTrue(controlPlane.KCP, controlplanev1.AvailableCondition) } if controlPlane.KCP.Status.Deprecated.V1Beta1.ReadyReplicas > 0 { diff --git a/controlplane/kubeadm/internal/controllers/status_test.go b/controlplane/kubeadm/internal/controllers/status_test.go index 6eac17deffe2..4671a5840e00 100644 --- a/controlplane/kubeadm/internal/controllers/status_test.go +++ b/controlplane/kubeadm/internal/controllers/status_test.go @@ -35,7 +35,7 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" controlplanev1webhooks "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/webhooks" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) @@ -2040,8 +2040,8 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesReady(t *testing.T g.Expect(kcp.Status.Deprecated.V1Beta1.FailureMessage).To(BeNil()) g.Expect(kcp.Status.Deprecated.V1Beta1.FailureReason).To(BeEquivalentTo("")) g.Expect(kcp.Status.Initialized).To(BeTrue()) - g.Expect(conditions.IsTrue(kcp, controlplanev1.AvailableCondition)).To(BeTrue()) - g.Expect(conditions.IsTrue(kcp, controlplanev1.MachinesCreatedCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(kcp, controlplanev1.AvailableCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(kcp, controlplanev1.MachinesCreatedCondition)).To(BeTrue()) g.Expect(kcp.Status.Ready).To(BeTrue()) } @@ -2200,7 +2200,7 @@ func TestKubeadmControlPlaneReconciler_machinesCreatedIsIsTrueEvenWhenTheNodesAr g.Expect(kcp.Status.Deprecated.V1Beta1.ReadyReplicas).To(BeEquivalentTo(0)) g.Expect(kcp.Status.Deprecated.V1Beta1.UnavailableReplicas).To(BeEquivalentTo(3)) g.Expect(kcp.Status.Ready).To(BeFalse()) - g.Expect(conditions.IsTrue(kcp, controlplanev1.MachinesCreatedCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(kcp, controlplanev1.MachinesCreatedCondition)).To(BeTrue()) } func kubeadmConfigMap() *corev1.ConfigMap { diff --git a/controlplane/kubeadm/internal/workload_cluster_conditions.go b/controlplane/kubeadm/internal/workload_cluster_conditions.go index 7137f1928886..0c047604ab64 100644 --- a/controlplane/kubeadm/internal/workload_cluster_conditions.go +++ b/controlplane/kubeadm/internal/workload_cluster_conditions.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" etcdutil "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd/util" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" clog "sigs.k8s.io/cluster-api/util/log" ) @@ -56,7 +56,7 @@ func (w *Workload) UpdateEtcdConditions(ctx context.Context, controlPlane *Contr func (w *Workload) updateExternalEtcdConditions(_ context.Context, controlPlane *ControlPlane) { // When KCP is not responsible for external etcd, we are reporting only health at KCP level. - conditions.MarkTrue(controlPlane.KCP, controlplanev1.EtcdClusterHealthyCondition) + v1beta1conditions.MarkTrue(controlPlane.KCP, controlplanev1.EtcdClusterHealthyCondition) // Note: KCP is going to stop setting the `EtcdClusterHealthy` condition to true in case of external etcd. // This will allow tools managing the external etcd instance to use the `EtcdClusterHealthy` to report back status into @@ -72,7 +72,7 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane controlPlaneNodes, err := w.getControlPlaneNodes(ctx) if err != nil { for _, m := range controlPlane.Machines { - conditions.MarkUnknown(m, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberInspectionFailedReason, "Failed to get the Node which is hosting the etcd member") + v1beta1conditions.MarkUnknown(m, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberInspectionFailedReason, "Failed to get the Node which is hosting the etcd member") v1beta2conditions.Set(m, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, @@ -82,7 +82,7 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane }) } - conditions.MarkUnknown(controlPlane.KCP, controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterInspectionFailedReason, "Failed to list Nodes which are hosting the etcd members") + v1beta1conditions.MarkUnknown(controlPlane.KCP, controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterInspectionFailedReason, "Failed to list Nodes which are hosting the etcd members") v1beta2conditions.Set(controlPlane.KCP, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, @@ -115,7 +115,7 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane // Update etcd member healthy conditions for machines being deleted (machines where we cannot rely on the status of kubelet/etcd member). for _, machine := range controlPlane.Machines.Filter(collections.HasDeletionTimestamp) { - conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") v1beta2conditions.Set(machine, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, @@ -137,7 +137,7 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane // If not found, report the issue on the machine. member := etcdutil.MemberForName(currentMembers, machine.Status.NodeRef.Name) if member == nil { - conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd member reports the cluster is composed by members %s, but the member hosted on this Machine is not included", etcdutil.MemberNames(currentMembers)) + v1beta1conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd member reports the cluster is composed by members %s, but the member hosted on this Machine is not included", etcdutil.MemberNames(currentMembers)) v1beta2conditions.Set(machine, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, @@ -163,7 +163,7 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane } } if len(alarmList) > 0 { - conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd member reports alarms: %s", strings.Join(alarmList, ", ")) + v1beta1conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd member reports alarms: %s", strings.Join(alarmList, ", ")) v1beta2conditions.Set(machine, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, @@ -175,7 +175,7 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane } // Otherwise consider the member healthy - conditions.MarkTrue(machine, controlplanev1.MachineEtcdMemberHealthyCondition) + v1beta1conditions.MarkTrue(machine, controlplanev1.MachineEtcdMemberHealthyCondition) v1beta2conditions.Set(machine, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, @@ -261,7 +261,7 @@ func (w *Workload) getCurrentEtcdMembersAndAlarms(ctx context.Context, machines etcdClient, err := w.etcdClientGenerator.forFirstAvailableNode(ctx, nodeNames) if err != nil { for _, m := range machines { - conditions.MarkUnknown(m, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberInspectionFailedReason, "Failed to connect to etcd: %s", err) + v1beta1conditions.MarkUnknown(m, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberInspectionFailedReason, "Failed to connect to etcd: %s", err) v1beta2conditions.Set(m, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, @@ -277,7 +277,7 @@ func (w *Workload) getCurrentEtcdMembersAndAlarms(ctx context.Context, machines // While creating a new client, forFirstAvailableNode also reads the status for the endpoint we are connected to; check if the endpoint has errors. if len(etcdClient.Errors) > 0 { for _, m := range machines { - conditions.MarkFalse(m, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd endpoint %s reports errors: %s", etcdClient.Endpoint, strings.Join(etcdClient.Errors, ", ")) + v1beta1conditions.MarkFalse(m, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd endpoint %s reports errors: %s", etcdClient.Endpoint, strings.Join(etcdClient.Errors, ", ")) v1beta2conditions.Set(m, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, @@ -293,7 +293,7 @@ func (w *Workload) getCurrentEtcdMembersAndAlarms(ctx context.Context, machines currentMembers, err := etcdClient.Members(ctx) if err != nil { for _, m := range machines { - conditions.MarkFalse(m, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Failed to get etcd members") + v1beta1conditions.MarkFalse(m, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Failed to get etcd members") v1beta2conditions.Set(m, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, @@ -309,7 +309,7 @@ func (w *Workload) getCurrentEtcdMembersAndAlarms(ctx context.Context, machines alarms, err := etcdClient.Alarms(ctx) if err != nil { for _, m := range machines { - conditions.MarkFalse(m, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Failed to get etcd alarms") + v1beta1conditions.MarkFalse(m, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Failed to get etcd alarms") v1beta2conditions.Set(m, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, @@ -345,7 +345,8 @@ func getNodeNamesSortedByLastKnownEtcdHealth(nodes *corev1.NodeList, machines co } eligibleNodes.Insert(node.Name) - if c := conditions.Get(machine, controlplanev1.MachineEtcdMemberHealthyCondition); c != nil { + // TODO (v1beta2): test for v1beta2 conditions + if c := v1beta1conditions.Get(machine, controlplanev1.MachineEtcdMemberHealthyCondition); c != nil { nodeEtcdHealthyCondition[node.Name] = *c continue } @@ -406,7 +407,7 @@ func compareMachinesAndMembers(controlPlane *ControlPlane, nodes *corev1.NodeLis if !found { // Surface there is a machine without etcd member on machine's EtcdMemberHealthy condition. // The same info will also surface into the EtcdClusterHealthy condition on kcp. - conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Missing etcd member") + v1beta1conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Missing etcd member") v1beta2conditions.Set(machine, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, @@ -495,7 +496,7 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * for i := range controlPlane.Machines { machine := controlPlane.Machines[i] for _, condition := range allMachinePodConditions { - conditions.MarkUnknown(machine, condition, controlplanev1.PodInspectionFailedReason, "Failed to get the Node which is hosting this component: %v", err) + v1beta1conditions.MarkUnknown(machine, condition, controlplanev1.PodInspectionFailedReason, "Failed to get the Node which is hosting this component: %v", err) } for _, condition := range allMachinePodV1beta2Conditions { @@ -508,7 +509,7 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * } } - conditions.MarkUnknown(controlPlane.KCP, controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsInspectionFailedReason, "Failed to list Nodes which are hosting control plane components: %v", err) + v1beta1conditions.MarkUnknown(controlPlane.KCP, controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsInspectionFailedReason, "Failed to list Nodes which are hosting control plane components: %v", err) v1beta2conditions.Set(controlPlane.KCP, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, @@ -567,7 +568,7 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * // If the machine is deleting, report all the conditions as deleting if !machine.ObjectMeta.DeletionTimestamp.IsZero() { for _, condition := range allMachinePodConditions { - conditions.MarkFalse(machine, condition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machine, condition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") } for _, condition := range allMachinePodV1beta2Conditions { @@ -586,7 +587,7 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * // NOTE: We are assuming unreachable as a temporary condition, leaving to MHC // the responsibility to determine if the node is unhealthy or not. for _, condition := range allMachinePodConditions { - conditions.MarkUnknown(machine, condition, controlplanev1.PodInspectionFailedReason, "Node is unreachable") + v1beta1conditions.MarkUnknown(machine, condition, controlplanev1.PodInspectionFailedReason, "Node is unreachable") } for _, condition := range allMachinePodV1beta2Conditions { @@ -624,7 +625,7 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * } if !found { for _, condition := range allMachinePodConditions { - conditions.MarkFalse(machine, condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Missing Node") + v1beta1conditions.MarkFalse(machine, condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Missing Node") } for _, condition := range allMachinePodV1beta2Conditions { @@ -680,7 +681,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // If node ready is unknown there is a good chance that kubelet is not updating mirror pods, so we consider pod status // to be unknown as well without further investigations. if nodeReadyUnknown(node) { - conditions.MarkUnknown(machine, staticPodCondition, controlplanev1.PodInspectionFailedReason, "Node Ready condition is Unknown, Pod data might be stale") + v1beta1conditions.MarkUnknown(machine, staticPodCondition, controlplanev1.PodInspectionFailedReason, "Node Ready condition is Unknown, Pod data might be stale") v1beta2conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, @@ -700,7 +701,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste if err := w.Client.Get(ctx, podKey, &pod); err != nil { // If there is an error getting the Pod, do not set any conditions. if apierrors.IsNotFound(err) { - conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodMissingReason, clusterv1.ConditionSeverityError, "Pod %s is missing", podKey.Name) + v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodMissingReason, clusterv1.ConditionSeverityError, "Pod %s is missing", podKey.Name) v1beta2conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, @@ -710,7 +711,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste }) return } - conditions.MarkUnknown(machine, staticPodCondition, controlplanev1.PodInspectionFailedReason, "Failed to get Pod status") + v1beta1conditions.MarkUnknown(machine, staticPodCondition, controlplanev1.PodInspectionFailedReason, "Failed to get Pod status") v1beta2conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, @@ -731,7 +732,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // Check if the container is still to be scheduled // NOTE: This should never happen for static pods, however this check is implemented for completeness. if podCondition(pod, corev1.PodScheduled) != corev1.ConditionTrue { - conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting to be scheduled") + v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting to be scheduled") v1beta2conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, @@ -745,7 +746,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // Check if the container is still running init containers // NOTE: As of today there are not init containers in static pods generated by kubeadm, however this check is implemented for completeness. if podCondition(pod, corev1.PodInitialized) != corev1.ConditionTrue { - conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Running init containers") + v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Running init containers") v1beta2conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, @@ -757,7 +758,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste } // If there are no error from containers, report provisioning without further details. - conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "") v1beta2conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, @@ -772,7 +773,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // PodReady condition means the pod is able to service requests if podCondition(pod, corev1.PodReady) == corev1.ConditionTrue { - conditions.MarkTrue(machine, staticPodCondition) + v1beta1conditions.MarkTrue(machine, staticPodCondition) v1beta2conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, @@ -797,7 +798,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste } if len(containerWaitingMessages) > 0 { if terminatedWithError { - conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, strings.Join(containerWaitingMessages, ", ")) + v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, strings.Join(containerWaitingMessages, ", ")) v1beta2conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, @@ -809,7 +810,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste } // Note: Some error cases cannot be caught when container state == "Waiting", // e.g., "waiting.reason: ErrImagePull" is an error, but since LastTerminationState does not exist, this cannot be differentiated from "PodProvisioningReason" - conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, strings.Join(containerWaitingMessages, ", ")) + v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, strings.Join(containerWaitingMessages, ", ")) v1beta2conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, @@ -828,7 +829,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste } } if len(containerTerminatedMessages) > 0 { - conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, strings.Join(containerTerminatedMessages, ", ")) + v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, strings.Join(containerTerminatedMessages, ", ")) v1beta2conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, @@ -841,7 +842,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // If the pod is not yet ready, most probably it is waiting for startup or readiness probes. // Report this as part of the provisioning process because the corresponding control plane component is not ready yet. - conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting for startup or readiness probes") + v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting for startup or readiness probes") v1beta2conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, @@ -853,7 +854,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // PodSucceeded means that all containers in the pod have voluntarily terminated // with a container exit code of 0, and the system is not going to restart any of these containers. // NOTE: This should never happen for the static pods running control plane components. - conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated") + v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated") v1beta2conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, @@ -865,7 +866,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // PodFailed means that all containers in the pod have terminated, and at least one container has // terminated in a failure (exited with a non-zero exit code or was stopped by the system). // NOTE: This should never happen for the static pods running control plane components. - conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated") + v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated") v1beta2conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, @@ -876,7 +877,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste case corev1.PodUnknown: // PodUnknown means that for some reason the state of the pod could not be obtained, typically due // to an error in communicating with the host of the pod. - conditions.MarkUnknown(machine, staticPodCondition, controlplanev1.PodInspectionFailedReason, "Pod is reporting Unknown status") + v1beta1conditions.MarkUnknown(machine, staticPodCondition, controlplanev1.PodInspectionFailedReason, "Pod is reporting Unknown status") v1beta2conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, @@ -930,7 +931,7 @@ func aggregateConditionsFromMachinesToKCP(input aggregateConditionsFromMachinesT for i := range input.controlPlane.Machines { machine := input.controlPlane.Machines[i] for _, condition := range input.machineConditions { - if machineCondition := conditions.Get(machine, condition); machineCondition != nil { + if machineCondition := v1beta1conditions.Get(machine, condition); machineCondition != nil { switch machineCondition.Status { case corev1.ConditionTrue: kcpMachinesWithTrue.Insert(machine.Name) @@ -955,31 +956,31 @@ func aggregateConditionsFromMachinesToKCP(input aggregateConditionsFromMachinesT input.kcpErrors = append(input.kcpErrors, fmt.Sprintf("Following Machines are reporting %s errors: %s", input.note, strings.Join(sets.List(kcpMachinesWithErrors), ", "))) } if len(input.kcpErrors) > 0 { - conditions.MarkFalse(input.controlPlane.KCP, input.condition, input.unhealthyReason, clusterv1.ConditionSeverityError, strings.Join(input.kcpErrors, "; ")) + v1beta1conditions.MarkFalse(input.controlPlane.KCP, input.condition, input.unhealthyReason, clusterv1.ConditionSeverityError, strings.Join(input.kcpErrors, "; ")) return } // In case of no errors and at least one machine with warnings, report false, warnings. if len(kcpMachinesWithWarnings) > 0 { - conditions.MarkFalse(input.controlPlane.KCP, input.condition, input.unhealthyReason, clusterv1.ConditionSeverityWarning, "Following Machines are reporting %s warnings: %s", input.note, strings.Join(sets.List(kcpMachinesWithWarnings), ", ")) + v1beta1conditions.MarkFalse(input.controlPlane.KCP, input.condition, input.unhealthyReason, clusterv1.ConditionSeverityWarning, "Following Machines are reporting %s warnings: %s", input.note, strings.Join(sets.List(kcpMachinesWithWarnings), ", ")) return } // In case of no errors, no warning, and at least one machine with info, report false, info. if len(kcpMachinesWithInfo) > 0 { - conditions.MarkFalse(input.controlPlane.KCP, input.condition, input.unhealthyReason, clusterv1.ConditionSeverityInfo, "Following Machines are reporting %s info: %s", input.note, strings.Join(sets.List(kcpMachinesWithInfo), ", ")) + v1beta1conditions.MarkFalse(input.controlPlane.KCP, input.condition, input.unhealthyReason, clusterv1.ConditionSeverityInfo, "Following Machines are reporting %s info: %s", input.note, strings.Join(sets.List(kcpMachinesWithInfo), ", ")) return } // In case of no errors, no warning, no Info, and at least one machine with true conditions, report true. if len(kcpMachinesWithTrue) > 0 { - conditions.MarkTrue(input.controlPlane.KCP, input.condition) + v1beta1conditions.MarkTrue(input.controlPlane.KCP, input.condition) return } // Otherwise, if there is at least one machine with unknown, report unknown. if len(kcpMachinesWithUnknown) > 0 { - conditions.MarkUnknown(input.controlPlane.KCP, input.condition, input.unknownReason, "Following Machines are reporting unknown %s status: %s", input.note, strings.Join(sets.List(kcpMachinesWithUnknown), ", ")) + v1beta1conditions.MarkUnknown(input.controlPlane.KCP, input.condition, input.unknownReason, "Following Machines are reporting unknown %s status: %s", input.note, strings.Join(sets.List(kcpMachinesWithUnknown), ", ")) return } diff --git a/controlplane/kubeadm/internal/workload_cluster_conditions_test.go b/controlplane/kubeadm/internal/workload_cluster_conditions_test.go index 59b889cb6d67..a5d03ee17620 100644 --- a/controlplane/kubeadm/internal/workload_cluster_conditions_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_conditions_test.go @@ -40,7 +40,7 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" fake2 "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd/fake" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) @@ -187,10 +187,10 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { injectClient: &fakeClient{ listErr: errors.New("something went wrong"), }, - expectedKCPCondition: conditions.UnknownCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterInspectionFailedReason, "Failed to list Nodes which are hosting the etcd members"), + expectedKCPCondition: v1beta1conditions.UnknownCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterInspectionFailedReason, "Failed to list Nodes which are hosting the etcd members"), expectedMachineConditions: map[string]clusterv1.Conditions{ "m1": { - *conditions.UnknownCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberInspectionFailedReason, "Failed to get the Node which is hosting the etcd member"), + *v1beta1conditions.UnknownCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberInspectionFailedReason, "Failed to get the Node which is hosting the etcd member"), }, }, expectedKCPV1Beta2Condition: &metav1.Condition{ @@ -271,7 +271,7 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { Items: []corev1.Node{*fakeNode("n1")}, }, }, - expectedKCPCondition: conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Control plane Node %s does not have a corresponding Machine", "n1"), + expectedKCPCondition: v1beta1conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Control plane Node %s does not have a corresponding Machine", "n1"), expectedKCPV1Beta2Condition: &metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, Status: metav1.ConditionFalse, @@ -293,10 +293,10 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { injectEtcdClientGenerator: &fakeEtcdClientGenerator{ forNodesErr: errors.New("something went wrong"), }, - expectedKCPCondition: conditions.UnknownCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnknownReason, "Following Machines are reporting unknown etcd member status: m1"), + expectedKCPCondition: v1beta1conditions.UnknownCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnknownReason, "Following Machines are reporting unknown etcd member status: m1"), expectedMachineConditions: map[string]clusterv1.Conditions{ "m1": { - *conditions.UnknownCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberInspectionFailedReason, "Failed to connect to etcd: something went wrong"), + *v1beta1conditions.UnknownCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberInspectionFailedReason, "Failed to connect to etcd: something went wrong"), }, }, expectedKCPV1Beta2Condition: &metav1.Condition{ @@ -332,10 +332,10 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { Errors: []string{"something went wrong"}, }, }, - expectedKCPCondition: conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting etcd member errors: %s", "m1"), + expectedKCPCondition: v1beta1conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting etcd member errors: %s", "m1"), expectedMachineConditions: map[string]clusterv1.Conditions{ "m1": { - *conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd endpoint n1 reports errors: %s", "something went wrong"), + *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd endpoint n1 reports errors: %s", "something went wrong"), }, }, expectedKCPV1Beta2Condition: &metav1.Condition{ @@ -370,10 +370,10 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { }, }, }, - expectedKCPCondition: conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting etcd member errors: %s", "m1"), + expectedKCPCondition: v1beta1conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting etcd member errors: %s", "m1"), expectedMachineConditions: map[string]clusterv1.Conditions{ "m1": { - *conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Failed to get etcd members"), + *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Failed to get etcd members"), }, }, expectedKCPV1Beta2Condition: &metav1.Condition{ @@ -417,10 +417,10 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { }, }, }, - expectedKCPCondition: conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting etcd member errors: %s", "m1"), + expectedKCPCondition: v1beta1conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting etcd member errors: %s", "m1"), expectedMachineConditions: map[string]clusterv1.Conditions{ "m1": { - *conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd member reports alarms: %s", "NOSPACE"), + *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd member reports alarms: %s", "NOSPACE"), }, }, expectedKCPV1Beta2Condition: &metav1.Condition{ @@ -482,13 +482,13 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { return nil, errors.Wrapf(kerrors.NewAggregate(errs), "could not establish a connection to etcd members hosted on %s", strings.Join(nodeNames, ",")) }, }, - expectedKCPCondition: conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting etcd member errors: %s", "m2"), + expectedKCPCondition: v1beta1conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting etcd member errors: %s", "m2"), expectedMachineConditions: map[string]clusterv1.Conditions{ "m1": { - *conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition), }, "m2": { - *conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Missing etcd member"), + *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Missing etcd member"), }, }, expectedKCPV1Beta2Condition: &metav1.Condition{ @@ -571,13 +571,13 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { return nil, errors.Wrapf(kerrors.NewAggregate(errs), "could not establish a connection to etcd members hosted on %s", strings.Join(nodeNames, ",")) }, }, - expectedKCPCondition: conditions.TrueCondition(controlplanev1.EtcdClusterHealthyCondition), + expectedKCPCondition: v1beta1conditions.TrueCondition(controlplanev1.EtcdClusterHealthyCondition), expectedMachineConditions: map[string]clusterv1.Conditions{ "m1": { - *conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition), }, "m2": { - *conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition), }, }, expectedKCPV1Beta2Condition: &metav1.Condition{ @@ -616,7 +616,7 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { w.updateManagedEtcdConditions(ctx, controlPane) if tt.expectedKCPCondition != nil { - g.Expect(*conditions.Get(tt.kcp, controlplanev1.EtcdClusterHealthyCondition)).To(conditions.MatchCondition(*tt.expectedKCPCondition)) + g.Expect(*v1beta1conditions.Get(tt.kcp, controlplanev1.EtcdClusterHealthyCondition)).To(v1beta1conditions.MatchCondition(*tt.expectedKCPCondition)) } if tt.expectedKCPV1Beta2Condition != nil { g.Expect(*v1beta2conditions.Get(tt.kcp, controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition)).To(v1beta2conditions.MatchCondition(*tt.expectedKCPV1Beta2Condition, v1beta2conditions.IgnoreLastTransitionTime(true))) @@ -624,7 +624,7 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { for _, m := range tt.machines { g.Expect(tt.expectedMachineConditions).To(HaveKey(m.Name)) - g.Expect(m.GetConditions()).To(conditions.MatchConditions(tt.expectedMachineConditions[m.Name]), "unexpected conditions for Machine %s", m.Name) + g.Expect(m.GetConditions()).To(v1beta1conditions.MatchConditions(tt.expectedMachineConditions[m.Name]), "unexpected conditions for Machine %s", m.Name) g.Expect(m.GetV1Beta2Conditions()).To(v1beta2conditions.MatchConditions(tt.expectedMachineV1Beta2Conditions[m.Name], v1beta2conditions.IgnoreLastTransitionTime(true)), "unexpected conditions for Machine %s", m.Name) } @@ -659,7 +659,7 @@ func TestUpdateExternalEtcdConditions(t *testing.T) { }, }, }, - expectedKCPCondition: conditions.TrueCondition(controlplanev1.EtcdClusterHealthyCondition), + expectedKCPCondition: v1beta1conditions.TrueCondition(controlplanev1.EtcdClusterHealthyCondition), expectedKCPV1Beta2Condition: nil, }, } @@ -677,7 +677,7 @@ func TestUpdateExternalEtcdConditions(t *testing.T) { w.updateExternalEtcdConditions(ctx, controlPane) if tt.expectedKCPCondition != nil { - g.Expect(*conditions.Get(tt.kcp, controlplanev1.EtcdClusterHealthyCondition)).To(conditions.MatchCondition(*tt.expectedKCPCondition)) + g.Expect(*v1beta1conditions.Get(tt.kcp, controlplanev1.EtcdClusterHealthyCondition)).To(v1beta1conditions.MatchCondition(*tt.expectedKCPCondition)) } if tt.expectedKCPV1Beta2Condition != nil { g.Expect(*v1beta2conditions.Get(tt.kcp, controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition)).To(v1beta2conditions.MatchCondition(*tt.expectedKCPV1Beta2Condition, v1beta2conditions.IgnoreLastTransitionTime(true))) @@ -725,13 +725,13 @@ func TestUpdateStaticPodConditions(t *testing.T) { injectClient: &fakeClient{ listErr: errors.New("failed to list Nodes"), }, - expectedKCPCondition: conditions.UnknownCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsInspectionFailedReason, "Failed to list Nodes which are hosting control plane components: failed to list Nodes"), + expectedKCPCondition: v1beta1conditions.UnknownCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsInspectionFailedReason, "Failed to list Nodes which are hosting control plane components: failed to list Nodes"), expectedMachineConditions: map[string]clusterv1.Conditions{ "m1": { - *conditions.UnknownCondition(controlplanev1.MachineAPIServerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Failed to get the Node which is hosting this component: failed to list Nodes"), - *conditions.UnknownCondition(controlplanev1.MachineControllerManagerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Failed to get the Node which is hosting this component: failed to list Nodes"), - *conditions.UnknownCondition(controlplanev1.MachineSchedulerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Failed to get the Node which is hosting this component: failed to list Nodes"), - *conditions.UnknownCondition(controlplanev1.MachineEtcdPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Failed to get the Node which is hosting this component: failed to list Nodes"), + *v1beta1conditions.UnknownCondition(controlplanev1.MachineAPIServerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Failed to get the Node which is hosting this component: failed to list Nodes"), + *v1beta1conditions.UnknownCondition(controlplanev1.MachineControllerManagerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Failed to get the Node which is hosting this component: failed to list Nodes"), + *v1beta1conditions.UnknownCondition(controlplanev1.MachineSchedulerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Failed to get the Node which is hosting this component: failed to list Nodes"), + *v1beta1conditions.UnknownCondition(controlplanev1.MachineEtcdPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Failed to get the Node which is hosting this component: failed to list Nodes"), }, }, expectedKCPV1Beta2Condition: metav1.Condition{ @@ -816,7 +816,7 @@ func TestUpdateStaticPodConditions(t *testing.T) { Items: []corev1.Node{*fakeNode("n1")}, }, }, - expectedKCPCondition: conditions.FalseCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnhealthyReason, clusterv1.ConditionSeverityError, "Control plane Node %s does not have a corresponding Machine", "n1"), + expectedKCPCondition: v1beta1conditions.FalseCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnhealthyReason, clusterv1.ConditionSeverityError, "Control plane Node %s does not have a corresponding Machine", "n1"), expectedKCPV1Beta2Condition: metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionFalse, @@ -834,13 +834,13 @@ func TestUpdateStaticPodConditions(t *testing.T) { Items: []corev1.Node{*fakeNode("n1", withUnreachableTaint())}, }, }, - expectedKCPCondition: conditions.UnknownCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnknownReason, "Following Machines are reporting unknown control plane status: m1"), + expectedKCPCondition: v1beta1conditions.UnknownCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnknownReason, "Following Machines are reporting unknown control plane status: m1"), expectedMachineConditions: map[string]clusterv1.Conditions{ "m1": { - *conditions.UnknownCondition(controlplanev1.MachineAPIServerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Node is unreachable"), - *conditions.UnknownCondition(controlplanev1.MachineControllerManagerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Node is unreachable"), - *conditions.UnknownCondition(controlplanev1.MachineSchedulerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Node is unreachable"), - *conditions.UnknownCondition(controlplanev1.MachineEtcdPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Node is unreachable"), + *v1beta1conditions.UnknownCondition(controlplanev1.MachineAPIServerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Node is unreachable"), + *v1beta1conditions.UnknownCondition(controlplanev1.MachineControllerManagerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Node is unreachable"), + *v1beta1conditions.UnknownCondition(controlplanev1.MachineSchedulerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Node is unreachable"), + *v1beta1conditions.UnknownCondition(controlplanev1.MachineEtcdPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Node is unreachable"), }, }, expectedKCPV1Beta2Condition: metav1.Condition{ @@ -895,13 +895,13 @@ func TestUpdateStaticPodConditions(t *testing.T) { injectClient: &fakeClient{ list: &corev1.NodeList{}, }, - expectedKCPCondition: conditions.FalseCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting control plane errors: %s", "m1"), + expectedKCPCondition: v1beta1conditions.FalseCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting control plane errors: %s", "m1"), expectedMachineConditions: map[string]clusterv1.Conditions{ "m1": { - *conditions.FalseCondition(controlplanev1.MachineAPIServerPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Missing Node"), - *conditions.FalseCondition(controlplanev1.MachineControllerManagerPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Missing Node"), - *conditions.FalseCondition(controlplanev1.MachineEtcdPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Missing Node"), - *conditions.FalseCondition(controlplanev1.MachineSchedulerPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Missing Node"), + *v1beta1conditions.FalseCondition(controlplanev1.MachineAPIServerPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Missing Node"), + *v1beta1conditions.FalseCondition(controlplanev1.MachineControllerManagerPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Missing Node"), + *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Missing Node"), + *v1beta1conditions.FalseCondition(controlplanev1.MachineSchedulerPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Missing Node"), }, }, expectedKCPV1Beta2Condition: metav1.Condition{ @@ -946,13 +946,13 @@ func TestUpdateStaticPodConditions(t *testing.T) { ), }, }, - expectedKCPCondition: conditions.FalseCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting control plane errors: %s", "m1"), + expectedKCPCondition: v1beta1conditions.FalseCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting control plane errors: %s", "m1"), expectedMachineConditions: map[string]clusterv1.Conditions{ "m1": { - *conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), - *conditions.FalseCondition(controlplanev1.MachineControllerManagerPodHealthyCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting to be scheduled"), - *conditions.FalseCondition(controlplanev1.MachineSchedulerPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated"), - *conditions.FalseCondition(controlplanev1.MachineEtcdPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated"), + *v1beta1conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), + *v1beta1conditions.FalseCondition(controlplanev1.MachineControllerManagerPodHealthyCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting to be scheduled"), + *v1beta1conditions.FalseCondition(controlplanev1.MachineSchedulerPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated"), + *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated"), }, }, expectedKCPV1Beta2Condition: metav1.Condition{ @@ -1001,13 +1001,13 @@ func TestUpdateStaticPodConditions(t *testing.T) { ), }, }, - expectedKCPCondition: conditions.TrueCondition(controlplanev1.ControlPlaneComponentsHealthyCondition), + expectedKCPCondition: v1beta1conditions.TrueCondition(controlplanev1.ControlPlaneComponentsHealthyCondition), expectedMachineConditions: map[string]clusterv1.Conditions{ "m1": { - *conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineEtcdPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdPodHealthyCondition), }, }, expectedKCPV1Beta2Condition: metav1.Condition{ @@ -1060,12 +1060,12 @@ func TestUpdateStaticPodConditions(t *testing.T) { // no static pod for etcd }, }, - expectedKCPCondition: conditions.TrueCondition(controlplanev1.ControlPlaneComponentsHealthyCondition), + expectedKCPCondition: v1beta1conditions.TrueCondition(controlplanev1.ControlPlaneComponentsHealthyCondition), expectedMachineConditions: map[string]clusterv1.Conditions{ "m1": { - *conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), // no condition for etcd Pod }, }, @@ -1102,13 +1102,13 @@ func TestUpdateStaticPodConditions(t *testing.T) { w.UpdateStaticPodConditions(ctx, controlPane) if tt.expectedKCPCondition != nil { - g.Expect(*conditions.Get(tt.kcp, controlplanev1.ControlPlaneComponentsHealthyCondition)).To(conditions.MatchCondition(*tt.expectedKCPCondition)) + g.Expect(*v1beta1conditions.Get(tt.kcp, controlplanev1.ControlPlaneComponentsHealthyCondition)).To(v1beta1conditions.MatchCondition(*tt.expectedKCPCondition)) } g.Expect(*v1beta2conditions.Get(tt.kcp, controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition)).To(v1beta2conditions.MatchCondition(tt.expectedKCPV1Beta2Condition, v1beta2conditions.IgnoreLastTransitionTime(true))) for _, m := range tt.machines { g.Expect(tt.expectedMachineConditions).To(HaveKey(m.Name)) - g.Expect(m.GetConditions()).To(conditions.MatchConditions(tt.expectedMachineConditions[m.Name])) + g.Expect(m.GetConditions()).To(v1beta1conditions.MatchConditions(tt.expectedMachineConditions[m.Name])) g.Expect(m.GetV1Beta2Conditions()).To(v1beta2conditions.MatchConditions(tt.expectedMachineV1Beta2Conditions[m.Name], v1beta2conditions.IgnoreLastTransitionTime(true))) } }) @@ -1137,7 +1137,7 @@ func TestUpdateStaticPodCondition(t *testing.T) { { name: "if node Ready is unknown, assume pod status is stale", node: fakeNode(nodeName, withReadyCondition(corev1.ConditionUnknown)), - expectedCondition: *conditions.UnknownCondition(condition, controlplanev1.PodInspectionFailedReason, "Node Ready condition is Unknown, Pod data might be stale"), + expectedCondition: *v1beta1conditions.UnknownCondition(condition, controlplanev1.PodInspectionFailedReason, "Node Ready condition is Unknown, Pod data might be stale"), expectedV1Beta2Condition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionUnknown, @@ -1151,7 +1151,7 @@ func TestUpdateStaticPodCondition(t *testing.T) { getErr: apierrors.NewNotFound(schema.ParseGroupResource("Pod"), component), }, node: fakeNode(nodeName), - expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodMissingReason, clusterv1.ConditionSeverityError, "Pod kube-component-node is missing"), + expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodMissingReason, clusterv1.ConditionSeverityError, "Pod kube-component-node is missing"), expectedV1Beta2Condition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, @@ -1165,7 +1165,7 @@ func TestUpdateStaticPodCondition(t *testing.T) { getErr: errors.New("get failure"), }, node: fakeNode(nodeName), - expectedCondition: *conditions.UnknownCondition(condition, controlplanev1.PodInspectionFailedReason, "Failed to get Pod status"), + expectedCondition: *v1beta1conditions.UnknownCondition(condition, controlplanev1.PodInspectionFailedReason, "Failed to get Pod status"), expectedV1Beta2Condition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionUnknown, @@ -1184,7 +1184,7 @@ func TestUpdateStaticPodCondition(t *testing.T) { }, }, node: fakeNode(nodeName), - expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting to be scheduled"), + expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting to be scheduled"), expectedV1Beta2Condition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, @@ -1204,7 +1204,7 @@ func TestUpdateStaticPodCondition(t *testing.T) { }, }, node: fakeNode(nodeName), - expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Running init containers"), + expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Running init containers"), expectedV1Beta2Condition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, @@ -1224,7 +1224,7 @@ func TestUpdateStaticPodCondition(t *testing.T) { }, }, node: fakeNode(nodeName), - expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, ""), + expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, ""), expectedV1Beta2Condition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, @@ -1243,7 +1243,7 @@ func TestUpdateStaticPodCondition(t *testing.T) { }, }, node: fakeNode(nodeName), - expectedCondition: *conditions.TrueCondition(condition), + expectedCondition: *v1beta1conditions.TrueCondition(condition), expectedV1Beta2Condition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionTrue, @@ -1266,7 +1266,7 @@ func TestUpdateStaticPodCondition(t *testing.T) { }, }, node: fakeNode(nodeName), - expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting something"), + expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting something"), expectedV1Beta2Condition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, @@ -1294,7 +1294,7 @@ func TestUpdateStaticPodCondition(t *testing.T) { }, }, node: fakeNode(nodeName), - expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Waiting something"), + expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Waiting something"), expectedV1Beta2Condition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, @@ -1317,7 +1317,7 @@ func TestUpdateStaticPodCondition(t *testing.T) { }, }, node: fakeNode(nodeName), - expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Something failed"), + expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Something failed"), expectedV1Beta2Condition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, @@ -1335,7 +1335,7 @@ func TestUpdateStaticPodCondition(t *testing.T) { }, }, node: fakeNode(nodeName), - expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting for startup or readiness probes"), + expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting for startup or readiness probes"), expectedV1Beta2Condition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, @@ -1353,7 +1353,7 @@ func TestUpdateStaticPodCondition(t *testing.T) { }, }, node: fakeNode(nodeName), - expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated"), + expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated"), expectedV1Beta2Condition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, @@ -1371,7 +1371,7 @@ func TestUpdateStaticPodCondition(t *testing.T) { }, }, node: fakeNode(nodeName), - expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated"), + expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated"), expectedV1Beta2Condition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, @@ -1389,7 +1389,7 @@ func TestUpdateStaticPodCondition(t *testing.T) { }, }, node: fakeNode(nodeName), - expectedCondition: *conditions.UnknownCondition(condition, controlplanev1.PodInspectionFailedReason, "Pod is reporting Unknown status"), + expectedCondition: *v1beta1conditions.UnknownCondition(condition, controlplanev1.PodInspectionFailedReason, "Pod is reporting Unknown status"), expectedV1Beta2Condition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionUnknown, @@ -1408,7 +1408,7 @@ func TestUpdateStaticPodCondition(t *testing.T) { } w.updateStaticPodCondition(ctx, machine, *tt.node, component, condition, v1beta2Condition) - g.Expect(*conditions.Get(machine, condition)).To(conditions.MatchCondition(tt.expectedCondition)) + g.Expect(*v1beta1conditions.Get(machine, condition)).To(v1beta1conditions.MatchCondition(tt.expectedCondition)) g.Expect(*v1beta2conditions.Get(machine, v1beta2Condition)).To(v1beta2conditions.MatchCondition(tt.expectedV1Beta2Condition, v1beta2conditions.IgnoreLastTransitionTime(true))) }) } @@ -1565,7 +1565,7 @@ func TestAggregateConditionsFromMachinesToKCP(t *testing.T) { machines: []*clusterv1.Machine{ fakeMachine("m1", withMachineReadyCondition(corev1.ConditionFalse, clusterv1.ConditionSeverityError)), }, - expectedCondition: *conditions.FalseCondition(conditionType, unhealthyReason, clusterv1.ConditionSeverityError, fmt.Sprintf("Following Machines are reporting %s errors: %s", note, "m1")), + expectedCondition: *v1beta1conditions.FalseCondition(conditionType, unhealthyReason, clusterv1.ConditionSeverityError, fmt.Sprintf("Following Machines are reporting %s errors: %s", note, "m1")), }, { name: "input kcp errors", @@ -1573,35 +1573,35 @@ func TestAggregateConditionsFromMachinesToKCP(t *testing.T) { fakeMachine("m1", withMachineReadyCondition(corev1.ConditionTrue, clusterv1.ConditionSeverityNone)), }, kcpErrors: []string{"something error"}, - expectedCondition: *conditions.FalseCondition(conditionType, unhealthyReason, clusterv1.ConditionSeverityError, "something error"), + expectedCondition: *v1beta1conditions.FalseCondition(conditionType, unhealthyReason, clusterv1.ConditionSeverityError, "something error"), }, { name: "kcp machines with warnings", machines: []*clusterv1.Machine{ fakeMachine("m1", withMachineReadyCondition(corev1.ConditionFalse, clusterv1.ConditionSeverityWarning)), }, - expectedCondition: *conditions.FalseCondition(conditionType, unhealthyReason, clusterv1.ConditionSeverityWarning, fmt.Sprintf("Following Machines are reporting %s warnings: %s", note, "m1")), + expectedCondition: *v1beta1conditions.FalseCondition(conditionType, unhealthyReason, clusterv1.ConditionSeverityWarning, fmt.Sprintf("Following Machines are reporting %s warnings: %s", note, "m1")), }, { name: "kcp machines with info", machines: []*clusterv1.Machine{ fakeMachine("m1", withMachineReadyCondition(corev1.ConditionFalse, clusterv1.ConditionSeverityInfo)), }, - expectedCondition: *conditions.FalseCondition(conditionType, unhealthyReason, clusterv1.ConditionSeverityInfo, fmt.Sprintf("Following Machines are reporting %s info: %s", note, "m1")), + expectedCondition: *v1beta1conditions.FalseCondition(conditionType, unhealthyReason, clusterv1.ConditionSeverityInfo, fmt.Sprintf("Following Machines are reporting %s info: %s", note, "m1")), }, { name: "kcp machines with true", machines: []*clusterv1.Machine{ fakeMachine("m1", withMachineReadyCondition(corev1.ConditionTrue, clusterv1.ConditionSeverityNone)), }, - expectedCondition: *conditions.TrueCondition(conditionType), + expectedCondition: *v1beta1conditions.TrueCondition(conditionType), }, { name: "kcp machines with unknown", machines: []*clusterv1.Machine{ fakeMachine("m1", withMachineReadyCondition(corev1.ConditionUnknown, clusterv1.ConditionSeverityNone)), }, - expectedCondition: *conditions.UnknownCondition(conditionType, unknownReason, fmt.Sprintf("Following Machines are reporting unknown %s status: %s", note, "m1")), + expectedCondition: *v1beta1conditions.UnknownCondition(conditionType, unknownReason, fmt.Sprintf("Following Machines are reporting unknown %s status: %s", note, "m1")), }, } @@ -1623,7 +1623,7 @@ func TestAggregateConditionsFromMachinesToKCP(t *testing.T) { } aggregateConditionsFromMachinesToKCP(input) - g.Expect(*conditions.Get(input.controlPlane.KCP, conditionType)).To(conditions.MatchCondition(tt.expectedCondition)) + g.Expect(*v1beta1conditions.Get(input.controlPlane.KCP, conditionType)).To(v1beta1conditions.MatchCondition(tt.expectedCondition)) }) } } diff --git a/exp/internal/controllers/machinepool_controller.go b/exp/internal/controllers/machinepool_controller.go index b8abd6c071c7..c66719196392 100644 --- a/exp/internal/controllers/machinepool_controller.go +++ b/exp/internal/controllers/machinepool_controller.go @@ -47,7 +47,7 @@ import ( expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta2" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/finalizers" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/paused" @@ -194,8 +194,8 @@ func (r *MachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) // TODO(jpang): add support for metrics. // Always update the readyCondition with the summary of the machinepool conditions. - conditions.SetSummary(mp, - conditions.WithConditions( + v1beta1conditions.SetSummary(mp, + v1beta1conditions.WithConditions( clusterv1.BootstrapReadyCondition, clusterv1.InfrastructureReadyCondition, expv1.ReplicasReadyCondition, @@ -363,7 +363,8 @@ func (r *MachinePoolReconciler) reconcileDeleteExternal(ctx context.Context, mac func (r *MachinePoolReconciler) watchClusterNodes(ctx context.Context, cluster *clusterv1.Cluster) error { log := ctrl.LoggerFrom(ctx) - if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if !v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { log.V(5).Info("Skipping node watching setup because control plane is not initialized") return nil } diff --git a/exp/internal/controllers/machinepool_controller_noderef.go b/exp/internal/controllers/machinepool_controller_noderef.go index 88e8c3897213..cfbd2f59ea23 100644 --- a/exp/internal/controllers/machinepool_controller_noderef.go +++ b/exp/internal/controllers/machinepool_controller_noderef.go @@ -35,7 +35,7 @@ import ( "sigs.k8s.io/cluster-api/internal/util/taints" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/patch" ) @@ -70,7 +70,7 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, s *scope) readyReplicas = mp.Status.Deprecated.V1Beta1.ReadyReplicas } if mp.Status.Replicas == readyReplicas && len(mp.Status.NodeRefs) == int(readyReplicas) { - conditions.MarkTrue(mp, expv1.ReplicasReadyCondition) + v1beta1conditions.MarkTrue(mp, expv1.ReplicasReadyCondition) return ctrl.Result{}, nil } @@ -128,12 +128,12 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, s *scope) if mp.Status.Replicas != mp.Status.Deprecated.V1Beta1.ReadyReplicas || len(nodeRefsResult.references) != int(mp.Status.Deprecated.V1Beta1.ReadyReplicas) { log.Info("Not enough ready replicas or node references", "nodeRefs", len(nodeRefsResult.references), "readyReplicas", mp.Status.ReadyReplicas, "replicas", mp.Status.Replicas) - conditions.MarkFalse(mp, expv1.ReplicasReadyCondition, expv1.WaitingForReplicasReadyReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(mp, expv1.ReplicasReadyCondition, expv1.WaitingForReplicasReadyReason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } // At this point, the required number of replicas are ready - conditions.MarkTrue(mp, expv1.ReplicasReadyCondition) + v1beta1conditions.MarkTrue(mp, expv1.ReplicasReadyCondition) return ctrl.Result{}, nil } diff --git a/exp/internal/controllers/machinepool_controller_phases.go b/exp/internal/controllers/machinepool_controller_phases.go index e7b7f4394f92..a0b80a931549 100644 --- a/exp/internal/controllers/machinepool_controller_phases.go +++ b/exp/internal/controllers/machinepool_controller_phases.go @@ -44,7 +44,7 @@ import ( "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" "sigs.k8s.io/cluster-api/util/labels" "sigs.k8s.io/cluster-api/util/labels/format" @@ -213,9 +213,9 @@ func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, s *scope } // Report a summary of current status of the bootstrap object defined for this machine pool. - conditions.SetMirror(m, clusterv1.BootstrapReadyCondition, - conditions.UnstructuredGetter(bootstrapConfig), - conditions.WithFallbackValue(ready, clusterv1.WaitingForDataSecretFallbackReason, clusterv1.ConditionSeverityInfo, ""), + v1beta1conditions.SetMirror(m, clusterv1.BootstrapReadyCondition, + v1beta1conditions.UnstructuredGetter(bootstrapConfig), + v1beta1conditions.WithFallbackValue(ready, clusterv1.WaitingForDataSecretFallbackReason, clusterv1.ConditionSeverityInfo, ""), ) if !ready { @@ -240,7 +240,7 @@ func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, s *scope // If dataSecretName is set without a ConfigRef, this means the user brought their own bootstrap data. if m.Spec.Template.Spec.Bootstrap.DataSecretName != nil { m.Status.BootstrapReady = true - conditions.MarkTrue(m, clusterv1.BootstrapReadyCondition) + v1beta1conditions.MarkTrue(m, clusterv1.BootstrapReadyCondition) return ctrl.Result{}, nil } @@ -271,7 +271,7 @@ func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, s * mp.Status.Deprecated.V1Beta1.FailureMessage = ptr.To(fmt.Sprintf("MachinePool infrastructure resource %v with name %q has been deleted after being ready", mp.Spec.Template.Spec.InfrastructureRef.GroupVersionKind(), mp.Spec.Template.Spec.InfrastructureRef.Name)) } - conditions.MarkFalse(mp, clusterv1.InfrastructureReadyCondition, clusterv1.IncorrectExternalRefReason, clusterv1.ConditionSeverityError, fmt.Sprintf("could not find infra reference of kind %s with name %s", mp.Spec.Template.Spec.InfrastructureRef.Kind, mp.Spec.Template.Spec.InfrastructureRef.Name)) + v1beta1conditions.MarkFalse(mp, clusterv1.InfrastructureReadyCondition, clusterv1.IncorrectExternalRefReason, clusterv1.ConditionSeverityError, fmt.Sprintf("could not find infra reference of kind %s with name %s", mp.Spec.Template.Spec.InfrastructureRef.Kind, mp.Spec.Template.Spec.InfrastructureRef.Name)) } return ctrl.Result{}, err } @@ -289,9 +289,9 @@ func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, s * mp.Status.InfrastructureReady = ready // Report a summary of current status of the infrastructure object defined for this machine pool. - conditions.SetMirror(mp, clusterv1.InfrastructureReadyCondition, - conditions.UnstructuredGetter(infraConfig), - conditions.WithFallbackValue(ready, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""), + v1beta1conditions.SetMirror(mp, clusterv1.InfrastructureReadyCondition, + v1beta1conditions.UnstructuredGetter(infraConfig), + v1beta1conditions.WithFallbackValue(ready, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""), ) clusterClient, err := r.ClusterCache.GetClient(ctx, util.ObjectKey(cluster)) diff --git a/exp/internal/controllers/machinepool_controller_test.go b/exp/internal/controllers/machinepool_controller_test.go index 594f7cef476c..54110dc35cf8 100644 --- a/exp/internal/controllers/machinepool_controller_test.go +++ b/exp/internal/controllers/machinepool_controller_test.go @@ -46,7 +46,7 @@ import ( externalfake "sigs.k8s.io/cluster-api/controllers/external/fake" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/test/builder" ) @@ -1022,7 +1022,7 @@ func TestMachinePoolConditions(t *testing.T) { infrastructureReady bool expectError bool beforeFunc func(bootstrap, infra *unstructured.Unstructured, mp *expv1.MachinePool, nodeList *corev1.NodeList) - conditionAssertFunc func(t *testing.T, getter conditions.Getter) + conditionAssertFunc func(t *testing.T, getter v1beta1conditions.Getter) }{ { name: "all conditions true", @@ -1043,7 +1043,7 @@ func TestMachinePoolConditions(t *testing.T) { } mp.Status.Deprecated.V1Beta1.ReadyReplicas = 2 }, - conditionAssertFunc: func(t *testing.T, getter conditions.Getter) { + conditionAssertFunc: func(t *testing.T, getter v1beta1conditions.Getter) { t.Helper() g := NewWithT(t) @@ -1067,12 +1067,12 @@ func TestMachinePoolConditions(t *testing.T) { }, }) }, - conditionAssertFunc: func(t *testing.T, getter conditions.Getter) { + conditionAssertFunc: func(t *testing.T, getter v1beta1conditions.Getter) { t.Helper() g := NewWithT(t) - g.Expect(conditions.Has(getter, clusterv1.BootstrapReadyCondition)).To(BeTrue()) - infraReadyCondition := conditions.Get(getter, clusterv1.BootstrapReadyCondition) + g.Expect(v1beta1conditions.Has(getter, clusterv1.BootstrapReadyCondition)).To(BeTrue()) + infraReadyCondition := v1beta1conditions.Get(getter, clusterv1.BootstrapReadyCondition) g.Expect(infraReadyCondition.Status).To(Equal(corev1.ConditionFalse)) g.Expect(infraReadyCondition.Reason).To(Equal("Custom reason")) }, @@ -1081,16 +1081,16 @@ func TestMachinePoolConditions(t *testing.T) { name: "bootstrap not ready with fallback condition", bootstrapReady: false, infrastructureReady: true, - conditionAssertFunc: func(t *testing.T, getter conditions.Getter) { + conditionAssertFunc: func(t *testing.T, getter v1beta1conditions.Getter) { t.Helper() g := NewWithT(t) - g.Expect(conditions.Has(getter, clusterv1.BootstrapReadyCondition)).To(BeTrue()) - bootstrapReadyCondition := conditions.Get(getter, clusterv1.BootstrapReadyCondition) + g.Expect(v1beta1conditions.Has(getter, clusterv1.BootstrapReadyCondition)).To(BeTrue()) + bootstrapReadyCondition := v1beta1conditions.Get(getter, clusterv1.BootstrapReadyCondition) g.Expect(bootstrapReadyCondition.Status).To(Equal(corev1.ConditionFalse)) - g.Expect(conditions.Has(getter, clusterv1.ReadyCondition)).To(BeTrue()) - readyCondition := conditions.Get(getter, clusterv1.ReadyCondition) + g.Expect(v1beta1conditions.Has(getter, clusterv1.ReadyCondition)).To(BeTrue()) + readyCondition := v1beta1conditions.Get(getter, clusterv1.ReadyCondition) g.Expect(readyCondition.Status).To(Equal(corev1.ConditionFalse)) }, }, @@ -1108,13 +1108,13 @@ func TestMachinePoolConditions(t *testing.T) { }, }) }, - conditionAssertFunc: func(t *testing.T, getter conditions.Getter) { + conditionAssertFunc: func(t *testing.T, getter v1beta1conditions.Getter) { t.Helper() g := NewWithT(t) - g.Expect(conditions.Has(getter, clusterv1.InfrastructureReadyCondition)).To(BeTrue()) - infraReadyCondition := conditions.Get(getter, clusterv1.InfrastructureReadyCondition) + g.Expect(v1beta1conditions.Has(getter, clusterv1.InfrastructureReadyCondition)).To(BeTrue()) + infraReadyCondition := v1beta1conditions.Get(getter, clusterv1.InfrastructureReadyCondition) g.Expect(infraReadyCondition.Status).To(Equal(corev1.ConditionFalse)) g.Expect(infraReadyCondition.Reason).To(Equal("Custom reason")) }, @@ -1123,16 +1123,16 @@ func TestMachinePoolConditions(t *testing.T) { name: "infrastructure not ready with fallback condition", bootstrapReady: true, infrastructureReady: false, - conditionAssertFunc: func(t *testing.T, getter conditions.Getter) { + conditionAssertFunc: func(t *testing.T, getter v1beta1conditions.Getter) { t.Helper() g := NewWithT(t) - g.Expect(conditions.Has(getter, clusterv1.InfrastructureReadyCondition)).To(BeTrue()) - infraReadyCondition := conditions.Get(getter, clusterv1.InfrastructureReadyCondition) + g.Expect(v1beta1conditions.Has(getter, clusterv1.InfrastructureReadyCondition)).To(BeTrue()) + infraReadyCondition := v1beta1conditions.Get(getter, clusterv1.InfrastructureReadyCondition) g.Expect(infraReadyCondition.Status).To(Equal(corev1.ConditionFalse)) - g.Expect(conditions.Has(getter, clusterv1.ReadyCondition)).To(BeTrue()) - readyCondition := conditions.Get(getter, clusterv1.ReadyCondition) + g.Expect(v1beta1conditions.Has(getter, clusterv1.ReadyCondition)).To(BeTrue()) + readyCondition := v1beta1conditions.Get(getter, clusterv1.ReadyCondition) g.Expect(readyCondition.Status).To(Equal(corev1.ConditionFalse)) }, }, @@ -1147,12 +1147,12 @@ func TestMachinePoolConditions(t *testing.T) { Name: "does-not-exist", } }, - conditionAssertFunc: func(t *testing.T, getter conditions.Getter) { + conditionAssertFunc: func(t *testing.T, getter v1beta1conditions.Getter) { t.Helper() g := NewWithT(t) - g.Expect(conditions.Has(getter, clusterv1.InfrastructureReadyCondition)).To(BeTrue()) - infraReadyCondition := conditions.Get(getter, clusterv1.InfrastructureReadyCondition) + g.Expect(v1beta1conditions.Has(getter, clusterv1.InfrastructureReadyCondition)).To(BeTrue()) + infraReadyCondition := v1beta1conditions.Get(getter, clusterv1.InfrastructureReadyCondition) g.Expect(infraReadyCondition.Status).To(Equal(corev1.ConditionFalse)) }, }, @@ -1211,9 +1211,9 @@ func TestMachinePoolConditions(t *testing.T) { // adds a condition list to an external object. func addConditionsToExternal(u *unstructured.Unstructured, newConditions clusterv1.Conditions) { existingConditions := clusterv1.Conditions{} - if cs := conditions.UnstructuredGetter(u).GetConditions(); len(cs) != 0 { + if cs := v1beta1conditions.UnstructuredGetter(u).GetConditions(); len(cs) != 0 { existingConditions = cs } existingConditions = append(existingConditions, newConditions...) - conditions.UnstructuredSetter(u).SetConditions(existingConditions) + v1beta1conditions.UnstructuredSetter(u).SetConditions(existingConditions) } diff --git a/exp/runtime/internal/controllers/extensionconfig_controller.go b/exp/runtime/internal/controllers/extensionconfig_controller.go index 7cec8b923e30..725154b1ad7b 100644 --- a/exp/runtime/internal/controllers/extensionconfig_controller.go +++ b/exp/runtime/internal/controllers/extensionconfig_controller.go @@ -39,7 +39,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimeclient "sigs.k8s.io/cluster-api/exp/runtime/client" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/paused" @@ -231,7 +231,7 @@ func discoverExtensionConfig(ctx context.Context, runtimeClient runtimeclient.Cl discoveredExtension, err := runtimeClient.Discover(ctx, extensionConfig.DeepCopy()) if err != nil { modifiedExtensionConfig := extensionConfig.DeepCopy() - conditions.MarkFalse(modifiedExtensionConfig, runtimev1.RuntimeExtensionDiscoveredCondition, runtimev1.DiscoveryFailedReason, clusterv1.ConditionSeverityError, "Error in discovery: %v", err) + v1beta1conditions.MarkFalse(modifiedExtensionConfig, runtimev1.RuntimeExtensionDiscoveredCondition, runtimev1.DiscoveryFailedReason, clusterv1.ConditionSeverityError, "Error in discovery: %v", err) v1beta2conditions.Set(modifiedExtensionConfig, metav1.Condition{ Type: runtimev1.ExtensionConfigDiscoveredV1Beta2Condition, Status: metav1.ConditionFalse, @@ -241,7 +241,7 @@ func discoverExtensionConfig(ctx context.Context, runtimeClient runtimeclient.Cl return modifiedExtensionConfig, errors.Wrapf(err, "failed to discover ExtensionConfig %s", klog.KObj(extensionConfig)) } - conditions.MarkTrue(discoveredExtension, runtimev1.RuntimeExtensionDiscoveredCondition) + v1beta1conditions.MarkTrue(discoveredExtension, runtimev1.RuntimeExtensionDiscoveredCondition) v1beta2conditions.Set(discoveredExtension, metav1.Condition{ Type: runtimev1.ExtensionConfigDiscoveredV1Beta2Condition, Status: metav1.ConditionTrue, diff --git a/internal/apis/core/v1alpha3/conversion.go b/internal/apis/core/v1alpha3/conversion.go index c887f395eb4b..b8da09735674 100644 --- a/internal/apis/core/v1alpha3/conversion.go +++ b/internal/apis/core/v1alpha3/conversion.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/conversion" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) @@ -54,7 +54,7 @@ func (src *Cluster) ConvertTo(dstRaw conversion.Hub) error { // will be "now". See https://github.com/kubernetes-sigs/cluster-api/issues/3798#issuecomment-708619826 for more // discussion. if src.Status.ControlPlaneInitialized { - conditions.MarkTrue(dst, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(dst, clusterv1.ControlPlaneInitializedCondition) } // Manually restore data. @@ -97,7 +97,7 @@ func (dst *Cluster) ConvertFrom(srcRaw conversion.Hub) error { } // Set the v1alpha3 boolean status field if the v1alpha4 condition was true - if conditions.IsTrue(src, clusterv1.ControlPlaneInitializedCondition) { + if v1beta1conditions.IsTrue(src, clusterv1.ControlPlaneInitializedCondition) { dst.Status.ControlPlaneInitialized = true } diff --git a/internal/controllers/cluster/cluster_controller.go b/internal/controllers/cluster/cluster_controller.go index 82fa596f9200..7e1a9289f127 100644 --- a/internal/controllers/cluster/cluster_controller.go +++ b/internal/controllers/cluster/cluster_controller.go @@ -51,7 +51,7 @@ import ( "sigs.k8s.io/cluster-api/internal/hooks" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/finalizers" clog "sigs.k8s.io/cluster-api/util/log" @@ -259,8 +259,8 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (retRes ct func patchCluster(ctx context.Context, patchHelper *patch.Helper, cluster *clusterv1.Cluster, options ...patch.Option) error { // Always update the readyCondition by summarizing the state of other conditions. - conditions.SetSummary(cluster, - conditions.WithConditions( + v1beta1conditions.SetSummary(cluster, + v1beta1conditions.WithConditions( clusterv1.ControlPlaneReadyCondition, clusterv1.InfrastructureReadyCondition, ), @@ -447,7 +447,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (reconcile.R return ctrl.Result{}, nil } // All good - the control plane resource has been deleted - conditions.MarkFalse(cluster, clusterv1.ControlPlaneReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(cluster, clusterv1.ControlPlaneReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") } if s.controlPlane != nil { @@ -487,7 +487,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (reconcile.R return ctrl.Result{}, nil } // All good - the infra resource has been deleted - conditions.MarkFalse(cluster, clusterv1.InfrastructureReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(cluster, clusterv1.InfrastructureReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") } if s.infraCluster != nil { @@ -715,7 +715,7 @@ func (r *Reconciler) reconcileControlPlaneInitialized(ctx context.Context, s *sc return ctrl.Result{}, nil } - if conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + if v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { log.V(4).Info("Skipping reconcileControlPlaneInitialized because control plane already initialized") return ctrl.Result{}, nil } @@ -730,12 +730,12 @@ func (r *Reconciler) reconcileControlPlaneInitialized(ctx context.Context, s *sc for _, m := range machines { if util.IsControlPlaneMachine(m) && m.Status.NodeRef != nil { - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) return ctrl.Result{}, nil } } - conditions.MarkFalse(cluster, clusterv1.ControlPlaneInitializedCondition, clusterv1.MissingNodeRefReason, clusterv1.ConditionSeverityInfo, "Waiting for the first control plane machine to have its status.nodeRef set") + v1beta1conditions.MarkFalse(cluster, clusterv1.ControlPlaneInitializedCondition, clusterv1.MissingNodeRefReason, clusterv1.ConditionSeverityInfo, "Waiting for the first control plane machine to have its status.nodeRef set") return ctrl.Result{}, nil } @@ -759,7 +759,8 @@ func (r *Reconciler) controlPlaneMachineToCluster(ctx context.Context, o client. return nil } - if conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { return nil } diff --git a/internal/controllers/cluster/cluster_controller_phases.go b/internal/controllers/cluster/cluster_controller_phases.go index 32a6b1092ee7..7620af851739 100644 --- a/internal/controllers/cluster/cluster_controller_phases.go +++ b/internal/controllers/cluster/cluster_controller_phases.go @@ -37,7 +37,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/patch" @@ -183,7 +183,7 @@ func (r *Reconciler) reconcileInfrastructure(ctx context.Context, s *scope) (ctr // if the cluster is not deleted, and the cluster is not using a ClusterClass, mark the infrastructure as ready to unblock other provisioning workflows. if s.cluster.DeletionTimestamp.IsZero() { cluster.Status.InfrastructureReady = true - conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyCondition) } return ctrl.Result{}, nil } @@ -220,12 +220,12 @@ func (r *Reconciler) reconcileInfrastructure(ctx context.Context, s *scope) (ctr } // Report a summary of current status of the infrastructure object defined for this cluster. - fallBack := conditions.WithFallbackValue(ready, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, "") + fallBack := v1beta1conditions.WithFallbackValue(ready, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, "") if !s.cluster.DeletionTimestamp.IsZero() { - fallBack = conditions.WithFallbackValue(false, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + fallBack = v1beta1conditions.WithFallbackValue(false, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") } - conditions.SetMirror(cluster, clusterv1.InfrastructureReadyCondition, - conditions.UnstructuredGetter(s.infraCluster), + v1beta1conditions.SetMirror(cluster, clusterv1.InfrastructureReadyCondition, + v1beta1conditions.UnstructuredGetter(s.infraCluster), fallBack, ) @@ -306,12 +306,12 @@ func (r *Reconciler) reconcileControlPlane(ctx context.Context, s *scope) (ctrl. } // Report a summary of current status of the control plane object defined for this cluster. - fallBack := conditions.WithFallbackValue(ready, clusterv1.WaitingForControlPlaneFallbackReason, clusterv1.ConditionSeverityInfo, "") + fallBack := v1beta1conditions.WithFallbackValue(ready, clusterv1.WaitingForControlPlaneFallbackReason, clusterv1.ConditionSeverityInfo, "") if !s.cluster.DeletionTimestamp.IsZero() { - fallBack = conditions.WithFallbackValue(false, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + fallBack = v1beta1conditions.WithFallbackValue(false, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") } - conditions.SetMirror(cluster, clusterv1.ControlPlaneReadyCondition, - conditions.UnstructuredGetter(s.controlPlane), + v1beta1conditions.SetMirror(cluster, clusterv1.ControlPlaneReadyCondition, + v1beta1conditions.UnstructuredGetter(s.controlPlane), fallBack, ) @@ -322,15 +322,15 @@ func (r *Reconciler) reconcileControlPlane(ctx context.Context, s *scope) (ctrl. // Update cluster.Status.ControlPlaneInitialized if it hasn't already been set // Determine if the control plane provider is initialized. - if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + if !v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { initialized, err := external.IsInitialized(s.controlPlane) if err != nil { return ctrl.Result{}, err } if initialized { - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) } else { - conditions.MarkFalse(cluster, clusterv1.ControlPlaneInitializedCondition, clusterv1.WaitingForControlPlaneProviderInitializedReason, clusterv1.ConditionSeverityInfo, "Waiting for control plane provider to indicate the control plane has been initialized") + v1beta1conditions.MarkFalse(cluster, clusterv1.ControlPlaneInitializedCondition, clusterv1.WaitingForControlPlaneProviderInitializedReason, clusterv1.ConditionSeverityInfo, "Waiting for control plane provider to indicate the control plane has been initialized") } } diff --git a/internal/controllers/cluster/cluster_controller_phases_test.go b/internal/controllers/cluster/cluster_controller_phases_test.go index 51820ce90b3f..1dfb9b886a69 100644 --- a/internal/controllers/cluster/cluster_controller_phases_test.go +++ b/internal/controllers/cluster/cluster_controller_phases_test.go @@ -37,7 +37,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" externalfake "sigs.k8s.io/cluster-api/controllers/external/fake" capierrors "sigs.k8s.io/cluster-api/errors" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/test/builder" ) @@ -95,7 +95,7 @@ func TestClusterReconcileInfrastructure(t *testing.T) { expectErr: false, check: func(g *GomegaWithT, in *clusterv1.Cluster) { g.Expect(in.Status.InfrastructureReady).To(BeTrue()) - g.Expect(conditions.IsTrue(in, clusterv1.InfrastructureReadyCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(in, clusterv1.InfrastructureReadyCondition)).To(BeTrue()) }, }, { @@ -461,8 +461,8 @@ func TestClusterReconcileControlPlane(t *testing.T) { }, expectErr: false, check: func(g *GomegaWithT, in *clusterv1.Cluster) { - g.Expect(conditions.IsTrue(in, clusterv1.ControlPlaneReadyCondition)).To(BeTrue()) - g.Expect(conditions.IsTrue(in, clusterv1.ControlPlaneInitializedCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(in, clusterv1.ControlPlaneReadyCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(in, clusterv1.ControlPlaneInitializedCondition)).To(BeTrue()) }, }, { diff --git a/internal/controllers/cluster/cluster_controller_test.go b/internal/controllers/cluster/cluster_controller_test.go index 7e55c17fe742..e9793ecdd998 100644 --- a/internal/controllers/cluster/cluster_controller_test.go +++ b/internal/controllers/cluster/cluster_controller_test.go @@ -36,7 +36,7 @@ import ( "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/test/builder" @@ -226,7 +226,7 @@ func TestClusterReconciler(t *testing.T) { g.Eventually(func() bool { ph, err := patch.NewHelper(cluster, env) g.Expect(err).ToNot(HaveOccurred()) - conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyCondition) g.Expect(ph.Patch(ctx, cluster, patch.WithStatusObservedGeneration{})).To(Succeed()) return true }, timeout).Should(BeTrue()) @@ -237,7 +237,7 @@ func TestClusterReconciler(t *testing.T) { if err := env.Get(ctx, key, instance); err != nil { return false } - return conditions.IsTrue(cluster, clusterv1.InfrastructureReadyCondition) + return v1beta1conditions.IsTrue(cluster, clusterv1.InfrastructureReadyCondition) }, timeout).Should(BeTrue()) }) @@ -418,7 +418,7 @@ func TestClusterReconciler(t *testing.T) { if err := env.Get(ctx, key, cluster); err != nil { return false } - return conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + return v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) }, timeout).Should(BeTrue()) }) } @@ -922,5 +922,5 @@ func TestReconcileControlPlaneInitializedControlPlaneRef(t *testing.T) { res, err := r.reconcileControlPlaneInitialized(ctx, s) g.Expect(res.IsZero()).To(BeTrue()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(conditions.Has(c, clusterv1.ControlPlaneInitializedCondition)).To(BeFalse()) + g.Expect(v1beta1conditions.Has(c, clusterv1.ControlPlaneInitializedCondition)).To(BeFalse()) } diff --git a/internal/controllers/clusterclass/clusterclass_controller_status.go b/internal/controllers/clusterclass/clusterclass_controller_status.go index a3a79e5ea212..5cbf750d0ae5 100644 --- a/internal/controllers/clusterclass/clusterclass_controller_status.go +++ b/internal/controllers/clusterclass/clusterclass_controller_status.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) @@ -35,7 +35,7 @@ func updateStatus(ctx context.Context, s *scope) { func setRefVersionsUpToDateCondition(_ context.Context, clusterClass *clusterv1.ClusterClass, outdatedRefs []outdatedRef, reconcileExternalReferencesError error) { if reconcileExternalReferencesError != nil { - conditions.MarkUnknown(clusterClass, + v1beta1conditions.MarkUnknown(clusterClass, clusterv1.ClusterClassRefVersionsUpToDateCondition, clusterv1.ClusterClassRefVersionsUpToDateInternalErrorReason, "Please check controller logs for errors", @@ -54,8 +54,8 @@ func setRefVersionsUpToDateCondition(_ context.Context, clusterClass *clusterv1. for _, outdatedRef := range outdatedRefs { msg = append(msg, fmt.Sprintf("* Ref %q should be %q", refString(outdatedRef.Outdated), refString(outdatedRef.UpToDate))) } - conditions.Set(clusterClass, - conditions.FalseCondition( + v1beta1conditions.Set(clusterClass, + v1beta1conditions.FalseCondition( clusterv1.ClusterClassRefVersionsUpToDateCondition, clusterv1.ClusterClassOutdatedRefVersionsReason, clusterv1.ConditionSeverityWarning, @@ -71,8 +71,8 @@ func setRefVersionsUpToDateCondition(_ context.Context, clusterClass *clusterv1. return } - conditions.Set(clusterClass, - conditions.TrueCondition(clusterv1.ClusterClassRefVersionsUpToDateCondition), + v1beta1conditions.Set(clusterClass, + v1beta1conditions.TrueCondition(clusterv1.ClusterClassRefVersionsUpToDateCondition), ) v1beta2conditions.Set(clusterClass, metav1.Condition{ Type: clusterv1.ClusterClassRefVersionsUpToDateV1Beta2Condition, @@ -83,7 +83,7 @@ func setRefVersionsUpToDateCondition(_ context.Context, clusterClass *clusterv1. func setVariablesReconciledCondition(_ context.Context, clusterClass *clusterv1.ClusterClass, variableDiscoveryError error) { if variableDiscoveryError != nil { - conditions.MarkFalse(clusterClass, + v1beta1conditions.MarkFalse(clusterClass, clusterv1.ClusterClassVariablesReconciledCondition, clusterv1.VariableDiscoveryFailedReason, clusterv1.ConditionSeverityError, @@ -98,7 +98,7 @@ func setVariablesReconciledCondition(_ context.Context, clusterClass *clusterv1. return } - conditions.MarkTrue(clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) + v1beta1conditions.MarkTrue(clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) v1beta2conditions.Set(clusterClass, metav1.Condition{ Type: clusterv1.ClusterClassVariablesReadyV1Beta2Condition, Status: metav1.ConditionTrue, diff --git a/internal/controllers/clusterresourceset/clusterresourceset_controller.go b/internal/controllers/clusterresourceset/clusterresourceset_controller.go index 4e7f0e76c204..106b675a76ec 100644 --- a/internal/controllers/clusterresourceset/clusterresourceset_controller.go +++ b/internal/controllers/clusterresourceset/clusterresourceset_controller.go @@ -45,7 +45,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/clustercache" resourcepredicates "sigs.k8s.io/cluster-api/internal/controllers/clusterresourceset/predicates" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/finalizers" "sigs.k8s.io/cluster-api/util/patch" @@ -172,7 +172,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re clusters, err := r.getClustersByClusterResourceSetSelector(ctx, clusterResourceSet) if err != nil { log.Error(err, "Failed fetching clusters that matches ClusterResourceSet labels", "ClusterResourceSet", klog.KObj(clusterResourceSet)) - conditions.MarkFalse(clusterResourceSet, addonsv1.ResourcesAppliedCondition, addonsv1.ClusterMatchFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(clusterResourceSet, addonsv1.ResourcesAppliedCondition, addonsv1.ClusterMatchFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) v1beta2conditions.Set(clusterResourceSet, metav1.Condition{ Type: addonsv1.ResourcesAppliedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -314,7 +314,7 @@ func (r *Reconciler) ApplyClusterResourceSet(ctx context.Context, cluster *clust unstructuredObj, err := r.getResource(ctx, resource, cluster.GetNamespace()) if err != nil { if err == ErrSecretTypeNotSupported { - conditions.MarkFalse(clusterResourceSet, addonsv1.ResourcesAppliedCondition, addonsv1.WrongSecretTypeReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(clusterResourceSet, addonsv1.ResourcesAppliedCondition, addonsv1.WrongSecretTypeReason, clusterv1.ConditionSeverityWarning, err.Error()) v1beta2conditions.Set(clusterResourceSet, metav1.Condition{ Type: addonsv1.ResourcesAppliedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -322,7 +322,7 @@ func (r *Reconciler) ApplyClusterResourceSet(ctx context.Context, cluster *clust Message: fmt.Sprintf("Secret type of resource %s is not supported", resource.Name), }) } else { - conditions.MarkFalse(clusterResourceSet, addonsv1.ResourcesAppliedCondition, addonsv1.RetrievingResourceFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(clusterResourceSet, addonsv1.ResourcesAppliedCondition, addonsv1.RetrievingResourceFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) v1beta2conditions.Set(clusterResourceSet, metav1.Condition{ Type: addonsv1.ResourcesAppliedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -380,7 +380,7 @@ func (r *Reconciler) ApplyClusterResourceSet(ctx context.Context, cluster *clust remoteClient, err := r.ClusterCache.GetClient(ctx, util.ObjectKey(cluster)) if err != nil { - conditions.MarkFalse(clusterResourceSet, addonsv1.ResourcesAppliedCondition, addonsv1.RemoteClusterClientFailedReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta1conditions.MarkFalse(clusterResourceSet, addonsv1.ResourcesAppliedCondition, addonsv1.RemoteClusterClientFailedReason, clusterv1.ConditionSeverityError, err.Error()) v1beta2conditions.Set(clusterResourceSet, metav1.Condition{ Type: addonsv1.ResourcesAppliedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -437,7 +437,7 @@ func (r *Reconciler) ApplyClusterResourceSet(ctx context.Context, cluster *clust if err := resourceScope.apply(ctx, remoteClient); err != nil { isSuccessful = false log.Error(err, "Failed to apply ClusterResourceSet resource", resource.Kind, klog.KRef(clusterResourceSet.Namespace, resource.Name)) - conditions.MarkFalse(clusterResourceSet, addonsv1.ResourcesAppliedCondition, addonsv1.ApplyFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(clusterResourceSet, addonsv1.ResourcesAppliedCondition, addonsv1.ApplyFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) v1beta2conditions.Set(clusterResourceSet, metav1.Condition{ Type: addonsv1.ResourcesAppliedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -458,7 +458,7 @@ func (r *Reconciler) ApplyClusterResourceSet(ctx context.Context, cluster *clust return kerrors.NewAggregate(errList) } - conditions.MarkTrue(clusterResourceSet, addonsv1.ResourcesAppliedCondition) + v1beta1conditions.MarkTrue(clusterResourceSet, addonsv1.ResourcesAppliedCondition) v1beta2conditions.Set(clusterResourceSet, metav1.Condition{ Type: addonsv1.ResourcesAppliedV1Beta2Condition, Status: metav1.ConditionTrue, diff --git a/internal/controllers/clusterresourceset/clusterresourceset_controller_test.go b/internal/controllers/clusterresourceset/clusterresourceset_controller_test.go index 128a09f5f32a..7945eadfb55a 100644 --- a/internal/controllers/clusterresourceset/clusterresourceset_controller_test.go +++ b/internal/controllers/clusterresourceset/clusterresourceset_controller_test.go @@ -35,7 +35,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/internal/test/envtest" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) @@ -893,7 +893,7 @@ metadata: crs := &addonsv1.ClusterResourceSet{} g.Expect(env.Get(ctx, clusterResourceSetKey, crs)).To(Succeed()) - appliedCondition := conditions.Get(crs, addonsv1.ResourcesAppliedCondition) + appliedCondition := v1beta1conditions.Get(crs, addonsv1.ResourcesAppliedCondition) g.Expect(appliedCondition).NotTo(BeNil()) g.Expect(appliedCondition.Status).To(Equal(corev1.ConditionFalse)) g.Expect(appliedCondition.Reason).To(Equal(addonsv1.ApplyFailedReason)) diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 3937facbd481..7eb5439e31cb 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -58,7 +58,7 @@ import ( "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/cache" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/finalizers" clog "sigs.k8s.io/cluster-api/util/log" "sigs.k8s.io/cluster-api/util/patch" @@ -287,8 +287,8 @@ func patchMachine(ctx context.Context, patchHelper *patch.Helper, machine *clust // Always update the readyCondition by summarizing the state of other conditions. // A step counter is added to represent progress during the provisioning process (instead we are hiding it // after provisioning - e.g. when a MHC condition exists - or during the deletion process). - conditions.SetSummary(machine, - conditions.WithConditions( + v1beta1conditions.SetSummary(machine, + v1beta1conditions.WithConditions( // Infrastructure problems should take precedence over all the other conditions clusterv1.InfrastructureReadyCondition, // Bootstrap comes after, but it is relevant only during initial machine provisioning. @@ -298,8 +298,8 @@ func patchMachine(ctx context.Context, patchHelper *patch.Helper, machine *clust clusterv1.MachineOwnerRemediatedCondition, clusterv1.DrainingSucceededCondition, ), - conditions.WithStepCounterIf(machine.ObjectMeta.DeletionTimestamp.IsZero() && machine.Spec.ProviderID == nil), - conditions.WithStepCounterIfOnly( + v1beta1conditions.WithStepCounterIf(machine.ObjectMeta.DeletionTimestamp.IsZero() && machine.Spec.ProviderID == nil), + v1beta1conditions.WithStepCounterIfOnly( clusterv1.BootstrapReadyCondition, clusterv1.InfrastructureReadyCondition, ), @@ -464,12 +464,12 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result } slices.Sort(hooks) log.Info("Waiting for pre-drain hooks to succeed", "hooks", strings.Join(hooks, ",")) - conditions.MarkFalse(m, clusterv1.PreDrainDeleteHookSucceededCondition, clusterv1.WaitingExternalHookReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(m, clusterv1.PreDrainDeleteHookSucceededCondition, clusterv1.WaitingExternalHookReason, clusterv1.ConditionSeverityInfo, "") s.deletingReason = clusterv1.MachineDeletingWaitingForPreDrainHookV1Beta2Reason s.deletingMessage = fmt.Sprintf("Waiting for pre-drain hooks to succeed (hooks: %s)", strings.Join(hooks, ",")) return ctrl.Result{}, nil } - conditions.MarkTrue(m, clusterv1.PreDrainDeleteHookSucceededCondition) + v1beta1conditions.MarkTrue(m, clusterv1.PreDrainDeleteHookSucceededCondition) // Drain node before deletion and issue a patch in order to make this operation visible to the users. if r.isNodeDrainAllowed(m) { @@ -488,8 +488,9 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result } // The DrainingSucceededCondition never exists before the node is drained for the first time. - if conditions.Get(m, clusterv1.DrainingSucceededCondition) == nil { - conditions.MarkFalse(m, clusterv1.DrainingSucceededCondition, clusterv1.DrainingReason, clusterv1.ConditionSeverityInfo, "Draining the node before deletion") + // TODO (v1beta2): test for v1beta2 conditions + if v1beta1conditions.Get(m, clusterv1.DrainingSucceededCondition) == nil { + v1beta1conditions.MarkFalse(m, clusterv1.DrainingSucceededCondition, clusterv1.DrainingReason, clusterv1.ConditionSeverityInfo, "Draining the node before deletion") } s.deletingReason = clusterv1.MachineDeletingDrainingNodeV1Beta2Reason s.deletingMessage = fmt.Sprintf("Drain not completed yet (started at %s):", m.Status.Deletion.NodeDrainStartTime.Format(time.RFC3339)) @@ -502,7 +503,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result result, err := r.drainNode(ctx, s) if err != nil { - conditions.MarkFalse(m, clusterv1.DrainingSucceededCondition, clusterv1.DrainingFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(m, clusterv1.DrainingSucceededCondition, clusterv1.DrainingFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) s.deletingReason = clusterv1.MachineDeletingDrainingNodeV1Beta2Reason s.deletingMessage = "Error draining Node, please check controller logs for errors" r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedDrainNode", "error draining Machine's node %q: %v", m.Status.NodeRef.Name, err) @@ -513,7 +514,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result return result, nil } - conditions.MarkTrue(m, clusterv1.DrainingSucceededCondition) + v1beta1conditions.MarkTrue(m, clusterv1.DrainingSucceededCondition) r.recorder.Eventf(m, corev1.EventTypeNormal, "SuccessfulDrainNode", "success draining Machine's node %q", m.Status.NodeRef.Name) } @@ -529,8 +530,9 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result } // The VolumeDetachSucceededCondition never exists before we wait for volume detachment for the first time. - if conditions.Get(m, clusterv1.VolumeDetachSucceededCondition) == nil { - conditions.MarkFalse(m, clusterv1.VolumeDetachSucceededCondition, clusterv1.WaitingForVolumeDetachReason, clusterv1.ConditionSeverityInfo, "Waiting for node volumes to be detached") + // TODO (v1beta2): test for v1beta2 conditions + if v1beta1conditions.Get(m, clusterv1.VolumeDetachSucceededCondition) == nil { + v1beta1conditions.MarkFalse(m, clusterv1.VolumeDetachSucceededCondition, clusterv1.WaitingForVolumeDetachReason, clusterv1.ConditionSeverityInfo, "Waiting for node volumes to be detached") } s.deletingReason = clusterv1.MachineDeletingWaitingForVolumeDetachV1Beta2Reason s.deletingMessage = fmt.Sprintf("Waiting for Node volumes to be detached (started at %s)", m.Status.Deletion.WaitForNodeVolumeDetachStartTime.Format(time.RFC3339)) @@ -545,7 +547,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result if !result.IsZero() { return result, nil } - conditions.MarkTrue(m, clusterv1.VolumeDetachSucceededCondition) + v1beta1conditions.MarkTrue(m, clusterv1.VolumeDetachSucceededCondition) r.recorder.Eventf(m, corev1.EventTypeNormal, "NodeVolumesDetached", "success waiting for node volumes detaching Machine's node %q", m.Status.NodeRef.Name) } } @@ -561,12 +563,12 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result } slices.Sort(hooks) log.Info("Waiting for pre-terminate hooks to succeed", "hooks", strings.Join(hooks, ",")) - conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededCondition, clusterv1.WaitingExternalHookReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededCondition, clusterv1.WaitingExternalHookReason, clusterv1.ConditionSeverityInfo, "") s.deletingReason = clusterv1.MachineDeletingWaitingForPreTerminateHookV1Beta2Reason s.deletingMessage = fmt.Sprintf("Waiting for pre-terminate hooks to succeed (hooks: %s)", strings.Join(hooks, ",")) return ctrl.Result{}, nil } - conditions.MarkTrue(m, clusterv1.PreTerminateDeleteHookSucceededCondition) + v1beta1conditions.MarkTrue(m, clusterv1.PreTerminateDeleteHookSucceededCondition) infrastructureDeleted, err := r.reconcileDeleteInfrastructure(ctx, s) if err != nil { @@ -610,7 +612,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result }) if waitErr != nil { log.Error(deleteNodeErr, "Timed out deleting node", "Node", klog.KRef("", m.Status.NodeRef.Name)) - conditions.MarkFalse(m, clusterv1.MachineNodeHealthyCondition, clusterv1.DeletionFailedReason, clusterv1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(m, clusterv1.MachineNodeHealthyCondition, clusterv1.DeletionFailedReason, clusterv1.ConditionSeverityWarning, "") r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedDeleteNode", "error deleting Machine's node: %v", deleteNodeErr) // If the node deletion timeout is not expired yet, requeue the Machine for reconciliation. @@ -876,7 +878,7 @@ func (r *Reconciler) drainNode(ctx context.Context, s *scope) (ctrl.Result, erro r.reconcileDeleteCache.Add(cache.NewReconcileEntry(machine, time.Now().Add(drainRetryInterval))) conditionMessage := evictionResult.ConditionMessage(machine.Status.Deletion.NodeDrainStartTime) - conditions.MarkFalse(machine, clusterv1.DrainingSucceededCondition, clusterv1.DrainingReason, clusterv1.ConditionSeverityInfo, conditionMessage) + v1beta1conditions.MarkFalse(machine, clusterv1.DrainingSucceededCondition, clusterv1.DrainingReason, clusterv1.ConditionSeverityInfo, conditionMessage) s.deletingReason = clusterv1.MachineDeletingDrainingNodeV1Beta2Reason s.deletingMessage = conditionMessage podsFailedEviction := []*corev1.Pod{} @@ -990,7 +992,7 @@ func (r *Reconciler) deleteNode(ctx context.Context, cluster *clusterv1.Cluster, func (r *Reconciler) reconcileDeleteBootstrap(ctx context.Context, s *scope) (bool, error) { if s.bootstrapConfig == nil && s.bootstrapConfigIsNotFound { - conditions.MarkFalse(s.machine, clusterv1.BootstrapReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.machine, clusterv1.BootstrapReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") return true, nil } @@ -1007,7 +1009,7 @@ func (r *Reconciler) reconcileDeleteBootstrap(ctx context.Context, s *scope) (bo func (r *Reconciler) reconcileDeleteInfrastructure(ctx context.Context, s *scope) (bool, error) { if s.infraMachine == nil && s.infraMachineIsNotFound { - conditions.MarkFalse(s.machine, clusterv1.InfrastructureReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.machine, clusterv1.InfrastructureReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") return true, nil } @@ -1052,7 +1054,7 @@ func (r *Reconciler) shouldAdopt(m *clusterv1.Machine) bool { func (r *Reconciler) watchClusterNodes(ctx context.Context, cluster *clusterv1.Cluster) error { log := ctrl.LoggerFrom(ctx) - if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + if !v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { log.V(5).Info("Skipping node watching setup because control plane is not initialized") return nil } diff --git a/internal/controllers/machine/machine_controller_noderef.go b/internal/controllers/machine/machine_controller_noderef.go index a7ca11ed8537..9cb1e738dabc 100644 --- a/internal/controllers/machine/machine_controller_noderef.go +++ b/internal/controllers/machine/machine_controller_noderef.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/cluster-api/internal/util/taints" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/labels" ) @@ -70,7 +70,7 @@ func (r *Reconciler) reconcileNode(ctx context.Context, s *scope) (ctrl.Result, // Check that the Machine has a valid ProviderID. if machine.Spec.ProviderID == nil || *machine.Spec.ProviderID == "" { log.Info("Waiting for infrastructure provider to report spec.providerID", machine.Spec.InfrastructureRef.Kind, klog.KRef(machine.Spec.InfrastructureRef.Namespace, machine.Spec.InfrastructureRef.Name)) - conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyCondition, clusterv1.WaitingForNodeRefReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyCondition, clusterv1.WaitingForNodeRefReason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -92,17 +92,17 @@ func (r *Reconciler) reconcileNode(ctx context.Context, s *scope) (ctrl.Result, // While a NodeRef is set in the status, failing to get that node means the node is deleted. // If Status.NodeRef is not set before, node still can be in the provisioning state. if machine.Status.NodeRef != nil { - conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityError, "") return ctrl.Result{}, errors.Wrapf(err, "no matching Node for Machine %q in namespace %q", machine.Name, machine.Namespace) } - conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeProvisioningReason, clusterv1.ConditionSeverityWarning, "Waiting for a node with matching ProviderID to exist") + v1beta1conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeProvisioningReason, clusterv1.ConditionSeverityWarning, "Waiting for a node with matching ProviderID to exist") log.Info("Infrastructure provider reporting spec.providerID, matching Kubernetes node is not yet available", machine.Spec.InfrastructureRef.Kind, klog.KRef(machine.Spec.InfrastructureRef.Namespace, machine.Spec.InfrastructureRef.Name), "providerID", *machine.Spec.ProviderID) // No need to requeue here. Nodes emit an event that triggers reconciliation. return ctrl.Result{}, nil } s.nodeGetError = err r.recorder.Event(machine, corev1.EventTypeWarning, "Failed to retrieve Node by ProviderID", err.Error()) - conditions.MarkUnknown(machine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeInspectionFailedReason, "Failed to get the Node for this Machine by ProviderID") + v1beta1conditions.MarkUnknown(machine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeInspectionFailedReason, "Failed to get the Node for this Machine by ProviderID") return ctrl.Result{}, err } s.node = node @@ -158,22 +158,22 @@ func (r *Reconciler) reconcileNode(ctx context.Context, s *scope) (ctrl.Result, } if s.infraMachine == nil || !s.infraMachine.GetDeletionTimestamp().IsZero() { - conditions.MarkFalse(s.machine, clusterv1.MachineNodeHealthyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.machine, clusterv1.MachineNodeHealthyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } // Do the remaining node health checks, then set the node health to true if all checks pass. status, message := summarizeNodeConditions(s.node) if status == corev1.ConditionFalse { - conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeConditionsFailedReason, clusterv1.ConditionSeverityWarning, message) + v1beta1conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeConditionsFailedReason, clusterv1.ConditionSeverityWarning, message) return ctrl.Result{}, nil } if status == corev1.ConditionUnknown { - conditions.MarkUnknown(machine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeConditionsFailedReason, message) + v1beta1conditions.MarkUnknown(machine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeConditionsFailedReason, message) return ctrl.Result{}, nil } - conditions.MarkTrue(machine, clusterv1.MachineNodeHealthyCondition) + v1beta1conditions.MarkTrue(machine, clusterv1.MachineNodeHealthyCondition) return ctrl.Result{}, nil } diff --git a/internal/controllers/machine/machine_controller_phases.go b/internal/controllers/machine/machine_controller_phases.go index 0e73d5bebccd..676cca58a2bd 100644 --- a/internal/controllers/machine/machine_controller_phases.go +++ b/internal/controllers/machine/machine_controller_phases.go @@ -38,7 +38,7 @@ import ( capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" @@ -189,7 +189,7 @@ func (r *Reconciler) reconcileBootstrap(ctx context.Context, s *scope) (ctrl.Res // If the bootstrap data is populated, set ready and return. if m.Spec.Bootstrap.DataSecretName != nil { m.Status.BootstrapReady = true - conditions.MarkTrue(m, clusterv1.BootstrapReadyCondition) + v1beta1conditions.MarkTrue(m, clusterv1.BootstrapReadyCondition) return ctrl.Result{}, nil } @@ -206,11 +206,11 @@ func (r *Reconciler) reconcileBootstrap(ctx context.Context, s *scope) (ctrl.Res } // Report a summary of current status of the bootstrap object defined for this machine. - fallBack := conditions.WithFallbackValue(*dataSecretCreated, clusterv1.WaitingForDataSecretFallbackReason, clusterv1.ConditionSeverityInfo, "") + fallBack := v1beta1conditions.WithFallbackValue(*dataSecretCreated, clusterv1.WaitingForDataSecretFallbackReason, clusterv1.ConditionSeverityInfo, "") if !s.machine.DeletionTimestamp.IsZero() { - fallBack = conditions.WithFallbackValue(*dataSecretCreated, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + fallBack = v1beta1conditions.WithFallbackValue(*dataSecretCreated, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") } - conditions.SetMirror(m, clusterv1.BootstrapReadyCondition, conditions.UnstructuredGetter(s.bootstrapConfig), fallBack) + v1beta1conditions.SetMirror(m, clusterv1.BootstrapReadyCondition, v1beta1conditions.UnstructuredGetter(s.bootstrapConfig), fallBack) if !s.bootstrapConfig.GetDeletionTimestamp().IsZero() { return ctrl.Result{}, nil @@ -299,11 +299,11 @@ func (r *Reconciler) reconcileInfrastructure(ctx context.Context, s *scope) (ctr } // Report a summary of current status of the InfrastructureMachine for this Machine. - fallBack := conditions.WithFallbackValue(*provisioned, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, "") + fallBack := v1beta1conditions.WithFallbackValue(*provisioned, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, "") if !s.machine.DeletionTimestamp.IsZero() { - fallBack = conditions.WithFallbackValue(*provisioned, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + fallBack = v1beta1conditions.WithFallbackValue(*provisioned, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") } - conditions.SetMirror(m, clusterv1.InfrastructureReadyCondition, conditions.UnstructuredGetter(s.infraMachine), fallBack) + v1beta1conditions.SetMirror(m, clusterv1.InfrastructureReadyCondition, v1beta1conditions.UnstructuredGetter(s.infraMachine), fallBack) if !s.infraMachine.GetDeletionTimestamp().IsZero() { return ctrl.Result{}, nil diff --git a/internal/controllers/machine/machine_controller_status.go b/internal/controllers/machine/machine_controller_status.go index f10feb761bf2..cb7f8ed2f030 100644 --- a/internal/controllers/machine/machine_controller_status.go +++ b/internal/controllers/machine/machine_controller_status.go @@ -34,7 +34,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/controllers/clustercache" "sigs.k8s.io/cluster-api/internal/contract" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) @@ -263,7 +263,8 @@ func setNodeHealthyAndReadyConditions(ctx context.Context, cluster *clusterv1.Cl return } - controlPlaneInitialized := conditions.Get(cluster, clusterv1.ControlPlaneInitializedCondition) + // TODO (v1beta2): test for v1beta2 conditions + controlPlaneInitialized := v1beta1conditions.Get(cluster, clusterv1.ControlPlaneInitializedCondition) if controlPlaneInitialized == nil || controlPlaneInitialized.Status != corev1.ConditionTrue { setNodeConditions(machine, metav1.ConditionUnknown, clusterv1.MachineNodeInspectionFailedV1Beta2Reason, diff --git a/internal/controllers/machine/machine_controller_status_test.go b/internal/controllers/machine/machine_controller_status_test.go index f8314ca0247c..5b3bc1acb895 100644 --- a/internal/controllers/machine/machine_controller_status_test.go +++ b/internal/controllers/machine/machine_controller_status_test.go @@ -32,7 +32,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/controllers/clustercache" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/kubeconfig" ) @@ -704,7 +704,7 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { name: "Cluster control plane is not initialized", cluster: func() *clusterv1.Cluster { c := defaultCluster.DeepCopy() - conditions.MarkFalse(c, clusterv1.ControlPlaneInitializedCondition, "", clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(c, clusterv1.ControlPlaneInitializedCondition, "", clusterv1.ConditionSeverityError, "") return c }(), machine: defaultMachine.DeepCopy(), @@ -2534,7 +2534,7 @@ func TestReconcileMachinePhases(t *testing.T) { return false } g.Expect(machine.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhaseDeleting)) - nodeHealthyCondition := conditions.Get(machine, clusterv1.MachineNodeHealthyCondition) + nodeHealthyCondition := v1beta1conditions.Get(machine, clusterv1.MachineNodeHealthyCondition) g.Expect(nodeHealthyCondition).ToNot(BeNil()) g.Expect(nodeHealthyCondition.Status).To(Equal(corev1.ConditionFalse)) g.Expect(nodeHealthyCondition.Reason).To(Equal(clusterv1.DeletingReason)) diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index 80257098a333..de2cccc3aeb3 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -50,7 +50,7 @@ import ( "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/cache" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/test/builder" ) @@ -492,10 +492,10 @@ func TestMachine_Reconcile(t *testing.T) { if err := env.Get(ctx, key, machine); err != nil { return false } - if !conditions.Has(machine, clusterv1.InfrastructureReadyCondition) { + if !v1beta1conditions.Has(machine, clusterv1.InfrastructureReadyCondition) { return false } - readyCondition := conditions.Get(machine, clusterv1.ReadyCondition) + readyCondition := v1beta1conditions.Get(machine, clusterv1.ReadyCondition) return readyCondition.Status == corev1.ConditionTrue }, timeout).Should(BeTrue()) @@ -1095,15 +1095,15 @@ func TestMachineConditions(t *testing.T) { bootstrapReady: true, beforeFunc: func(_, _ *unstructured.Unstructured, m *clusterv1.Machine) { // since these conditions are set by an external controller - conditions.MarkTrue(m, clusterv1.MachineHealthCheckSucceededCondition) - conditions.MarkTrue(m, clusterv1.MachineOwnerRemediatedCondition) + v1beta1conditions.MarkTrue(m, clusterv1.MachineHealthCheckSucceededCondition) + v1beta1conditions.MarkTrue(m, clusterv1.MachineOwnerRemediatedCondition) }, conditionsToAssert: []*clusterv1.Condition{ - conditions.TrueCondition(clusterv1.InfrastructureReadyCondition), - conditions.TrueCondition(clusterv1.BootstrapReadyCondition), - conditions.TrueCondition(clusterv1.MachineOwnerRemediatedCondition), - conditions.TrueCondition(clusterv1.MachineHealthCheckSucceededCondition), - conditions.TrueCondition(clusterv1.ReadyCondition), + v1beta1conditions.TrueCondition(clusterv1.InfrastructureReadyCondition), + v1beta1conditions.TrueCondition(clusterv1.BootstrapReadyCondition), + v1beta1conditions.TrueCondition(clusterv1.MachineOwnerRemediatedCondition), + v1beta1conditions.TrueCondition(clusterv1.MachineHealthCheckSucceededCondition), + v1beta1conditions.TrueCondition(clusterv1.ReadyCondition), }, }, { @@ -1121,7 +1121,7 @@ func TestMachineConditions(t *testing.T) { }) }, conditionsToAssert: []*clusterv1.Condition{ - conditions.FalseCondition(clusterv1.InfrastructureReadyCondition, "Custom reason", clusterv1.ConditionSeverityInfo, ""), + v1beta1conditions.FalseCondition(clusterv1.InfrastructureReadyCondition, "Custom reason", clusterv1.ConditionSeverityInfo, ""), }, }, { @@ -1129,8 +1129,8 @@ func TestMachineConditions(t *testing.T) { infraReady: false, bootstrapReady: true, conditionsToAssert: []*clusterv1.Condition{ - conditions.FalseCondition(clusterv1.InfrastructureReadyCondition, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""), - conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""), + v1beta1conditions.FalseCondition(clusterv1.InfrastructureReadyCondition, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""), + v1beta1conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""), }, }, { @@ -1148,7 +1148,7 @@ func TestMachineConditions(t *testing.T) { }) }, conditionsToAssert: []*clusterv1.Condition{ - conditions.FalseCondition(clusterv1.BootstrapReadyCondition, "Custom reason", clusterv1.ConditionSeverityInfo, ""), + v1beta1conditions.FalseCondition(clusterv1.BootstrapReadyCondition, "Custom reason", clusterv1.ConditionSeverityInfo, ""), }, }, { @@ -1156,8 +1156,8 @@ func TestMachineConditions(t *testing.T) { infraReady: true, bootstrapReady: false, conditionsToAssert: []*clusterv1.Condition{ - conditions.FalseCondition(clusterv1.BootstrapReadyCondition, clusterv1.WaitingForDataSecretFallbackReason, clusterv1.ConditionSeverityInfo, ""), - conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.WaitingForDataSecretFallbackReason, clusterv1.ConditionSeverityInfo, ""), + v1beta1conditions.FalseCondition(clusterv1.BootstrapReadyCondition, clusterv1.WaitingForDataSecretFallbackReason, clusterv1.ConditionSeverityInfo, ""), + v1beta1conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.WaitingForDataSecretFallbackReason, clusterv1.ConditionSeverityInfo, ""), }, }, // Assert summary conditions @@ -1167,7 +1167,7 @@ func TestMachineConditions(t *testing.T) { infraReady: false, bootstrapReady: false, conditionsToAssert: []*clusterv1.Condition{ - conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""), + v1beta1conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""), }, }, { @@ -1175,10 +1175,10 @@ func TestMachineConditions(t *testing.T) { infraReady: true, bootstrapReady: true, beforeFunc: func(_, _ *unstructured.Unstructured, m *clusterv1.Machine) { - conditions.MarkFalse(m, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "MHC failed") + v1beta1conditions.MarkFalse(m, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "MHC failed") }, conditionsToAssert: []*clusterv1.Condition{ - conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "MHC failed"), + v1beta1conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "MHC failed"), }, }, { @@ -1186,10 +1186,10 @@ func TestMachineConditions(t *testing.T) { infraReady: true, bootstrapReady: true, beforeFunc: func(_, _ *unstructured.Unstructured, m *clusterv1.Machine) { - conditions.MarkFalse(m, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(m, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityWarning, "") }, conditionsToAssert: []*clusterv1.Condition{ - conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityWarning, ""), + v1beta1conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityWarning, ""), }, }, { @@ -1206,10 +1206,10 @@ func TestMachineConditions(t *testing.T) { }}, wantErr: true, conditionsToAssert: []*clusterv1.Condition{ - conditions.TrueCondition(clusterv1.InfrastructureReadyCondition), - conditions.TrueCondition(clusterv1.BootstrapReadyCondition), - conditions.TrueCondition(clusterv1.ReadyCondition), - conditions.UnknownCondition(clusterv1.MachineNodeHealthyCondition, clusterv1.NodeInspectionFailedReason, "Failed to get the Node for this Machine by ProviderID"), + v1beta1conditions.TrueCondition(clusterv1.InfrastructureReadyCondition), + v1beta1conditions.TrueCondition(clusterv1.BootstrapReadyCondition), + v1beta1conditions.TrueCondition(clusterv1.ReadyCondition), + v1beta1conditions.UnknownCondition(clusterv1.MachineNodeHealthyCondition, clusterv1.NodeInspectionFailedReason, "Failed to get the Node for this Machine by ProviderID"), }, }, { @@ -1217,10 +1217,10 @@ func TestMachineConditions(t *testing.T) { infraReady: true, bootstrapReady: true, beforeFunc: func(_, _ *unstructured.Unstructured, m *clusterv1.Machine) { - conditions.MarkFalse(m, clusterv1.DrainingSucceededCondition, clusterv1.DrainingFailedReason, clusterv1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(m, clusterv1.DrainingSucceededCondition, clusterv1.DrainingFailedReason, clusterv1.ConditionSeverityWarning, "") }, conditionsToAssert: []*clusterv1.Condition{ - conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.DrainingFailedReason, clusterv1.ConditionSeverityWarning, ""), + v1beta1conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.DrainingFailedReason, clusterv1.ConditionSeverityWarning, ""), }, }, } @@ -1736,7 +1736,7 @@ func TestDrainNode(t *testing.T) { g.Expect(err.Error()).To(BeComparableTo(tt.wantErr)) } - gotCondition := conditions.Get(testMachine, clusterv1.DrainingSucceededCondition) + gotCondition := v1beta1conditions.Get(testMachine, clusterv1.DrainingSucceededCondition) if tt.wantCondition == nil { g.Expect(gotCondition).To(BeNil()) } else { @@ -1858,7 +1858,7 @@ func TestDrainNode_withCaching(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) g.Expect(res).To(BeComparableTo(ctrl.Result{RequeueAfter: drainRetryInterval})) // Condition should report the one Pod that has been evicted. - gotCondition := conditions.Get(testMachine, clusterv1.DrainingSucceededCondition) + gotCondition := v1beta1conditions.Get(testMachine, clusterv1.DrainingSucceededCondition) g.Expect(gotCondition).ToNot(BeNil()) // Cleanup for easier comparison gotCondition.LastTransitionTime = metav1.Time{} @@ -3498,16 +3498,16 @@ func TestNodeDeletionWithoutNodeRefFallback(t *testing.T) { // adds a condition list to an external object. func addConditionsToExternal(u *unstructured.Unstructured, newConditions clusterv1.Conditions) { existingConditions := clusterv1.Conditions{} - if cs := conditions.UnstructuredGetter(u).GetConditions(); len(cs) != 0 { + if cs := v1beta1conditions.UnstructuredGetter(u).GetConditions(); len(cs) != 0 { existingConditions = cs } existingConditions = append(existingConditions, newConditions...) - conditions.UnstructuredSetter(u).SetConditions(existingConditions) + v1beta1conditions.UnstructuredSetter(u).SetConditions(existingConditions) } // asserts the conditions set on the Getter object. // TODO: replace this with util.condition.MatchConditions (or a new matcher in controller runtime komega). -func assertConditions(t *testing.T, from conditions.Getter, conditions ...*clusterv1.Condition) { +func assertConditions(t *testing.T, from v1beta1conditions.Getter, conditions ...*clusterv1.Condition) { t.Helper() for _, condition := range conditions { @@ -3518,16 +3518,16 @@ func assertConditions(t *testing.T, from conditions.Getter, conditions ...*clust // asserts whether a condition of type is set on the Getter object // when the condition is true, asserting the reason/severity/message // for the condition are avoided. -func assertCondition(t *testing.T, from conditions.Getter, condition *clusterv1.Condition) { +func assertCondition(t *testing.T, from v1beta1conditions.Getter, condition *clusterv1.Condition) { t.Helper() g := NewWithT(t) - g.Expect(conditions.Has(from, condition.Type)).To(BeTrue()) + g.Expect(v1beta1conditions.Has(from, condition.Type)).To(BeTrue()) if condition.Status == corev1.ConditionTrue { - g.Expect(conditions.IsTrue(from, condition.Type)).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(from, condition.Type)).To(BeTrue()) } else { - conditionToBeAsserted := conditions.Get(from, condition.Type) + conditionToBeAsserted := v1beta1conditions.Get(from, condition.Type) g.Expect(conditionToBeAsserted.Status).To(Equal(condition.Status)) g.Expect(conditionToBeAsserted.Severity).To(Equal(condition.Severity)) g.Expect(conditionToBeAsserted.Reason).To(Equal(condition.Reason)) diff --git a/internal/controllers/machinedeployment/machinedeployment_controller.go b/internal/controllers/machinedeployment/machinedeployment_controller.go index 451a566a5ff8..31a84bfdcc1a 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller.go @@ -40,7 +40,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" "sigs.k8s.io/cluster-api/util/finalizers" clog "sigs.k8s.io/cluster-api/util/log" @@ -202,8 +202,8 @@ type scope struct { func patchMachineDeployment(ctx context.Context, patchHelper *patch.Helper, md *clusterv1.MachineDeployment, options ...patch.Option) error { // Always update the readyCondition by summarizing the state of other conditions. - conditions.SetSummary(md, - conditions.WithConditions( + v1beta1conditions.SetSummary(md, + v1beta1conditions.WithConditions( clusterv1.MachineDeploymentAvailableCondition, ), ) diff --git a/internal/controllers/machinedeployment/machinedeployment_controller_test.go b/internal/controllers/machinedeployment/machinedeployment_controller_test.go index 53895718c2c0..d20020fe6a9d 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller_test.go @@ -36,7 +36,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/test/builder" ) @@ -470,7 +470,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { g.Eventually(func() bool { key := client.ObjectKey{Name: deployment.Name, Namespace: deployment.Namespace} g.Expect(env.Get(ctx, key, deployment)).To(Succeed()) - return conditions.IsTrue(deployment, clusterv1.MachineDeploymentAvailableCondition) + return v1beta1conditions.IsTrue(deployment, clusterv1.MachineDeploymentAvailableCondition) }, timeout).Should(BeTrue()) // Validate that the controller set the cluster name label in selector. diff --git a/internal/controllers/machinedeployment/machinedeployment_sync.go b/internal/controllers/machinedeployment/machinedeployment_sync.go index 539214c5f836..87ec03d79068 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync.go @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/cluster-api/internal/controllers/machinedeployment/mdutil" "sigs.k8s.io/cluster-api/internal/util/hash" "sigs.k8s.io/cluster-api/internal/util/ssa" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/patch" ) @@ -489,19 +489,19 @@ func (r *Reconciler) syncDeploymentStatus(allMSs []*clusterv1.MachineSet, newMS } if availableReplicas >= minReplicasNeeded { // NOTE: The structure of calculateStatus() does not allow us to update the machinedeployment directly, we can only update the status obj it returns. Ideally, we should change calculateStatus() --> updateStatus() to be consistent with the rest of the code base, until then, we update conditions here. - conditions.MarkTrue(md, clusterv1.MachineDeploymentAvailableCondition) + v1beta1conditions.MarkTrue(md, clusterv1.MachineDeploymentAvailableCondition) } else { - conditions.MarkFalse(md, clusterv1.MachineDeploymentAvailableCondition, clusterv1.WaitingForAvailableMachinesReason, clusterv1.ConditionSeverityWarning, "Minimum availability requires %d replicas, current %d available", minReplicasNeeded, md.Status.AvailableReplicas) + v1beta1conditions.MarkFalse(md, clusterv1.MachineDeploymentAvailableCondition, clusterv1.WaitingForAvailableMachinesReason, clusterv1.ConditionSeverityWarning, "Minimum availability requires %d replicas, current %d available", minReplicasNeeded, md.Status.AvailableReplicas) } if newMS != nil { // Report a summary of current status of the MachineSet object owned by this MachineDeployment. - conditions.SetMirror(md, clusterv1.MachineSetReadyCondition, + v1beta1conditions.SetMirror(md, clusterv1.MachineSetReadyCondition, newMS, - conditions.WithFallbackValue(false, clusterv1.WaitingForMachineSetFallbackReason, clusterv1.ConditionSeverityInfo, ""), + v1beta1conditions.WithFallbackValue(false, clusterv1.WaitingForMachineSetFallbackReason, clusterv1.ConditionSeverityInfo, ""), ) } else { - conditions.MarkFalse(md, clusterv1.MachineSetReadyCondition, clusterv1.WaitingForMachineSetFallbackReason, clusterv1.ConditionSeverityInfo, "MachineSet not found") + v1beta1conditions.MarkFalse(md, clusterv1.MachineSetReadyCondition, clusterv1.WaitingForMachineSetFallbackReason, clusterv1.ConditionSeverityInfo, "MachineSet not found") } // Set v1beta replica counters on MD status. diff --git a/internal/controllers/machinedeployment/machinedeployment_sync_test.go b/internal/controllers/machinedeployment/machinedeployment_sync_test.go index c4fac8757f2a..bfc0513110a0 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync_test.go @@ -37,7 +37,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/internal/controllers/machinedeployment/mdutil" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) func TestCalculateStatus(t *testing.T) { @@ -886,7 +886,7 @@ func assertMachineSet(g *WithT, actualMS *clusterv1.MachineSet, expectedMS *clus // asserts the conditions set on the Getter object. // TODO: replace this with util.condition.MatchConditions (or a new matcher in controller runtime komega). -func assertConditions(t *testing.T, from conditions.Getter, conditions ...*clusterv1.Condition) { +func assertConditions(t *testing.T, from v1beta1conditions.Getter, conditions ...*clusterv1.Condition) { t.Helper() for _, condition := range conditions { @@ -897,16 +897,16 @@ func assertConditions(t *testing.T, from conditions.Getter, conditions ...*clust // asserts whether a condition of type is set on the Getter object // when the condition is true, asserting the reason/severity/message // for the condition are avoided. -func assertCondition(t *testing.T, from conditions.Getter, condition *clusterv1.Condition) { +func assertCondition(t *testing.T, from v1beta1conditions.Getter, condition *clusterv1.Condition) { t.Helper() g := NewWithT(t) - g.Expect(conditions.Has(from, condition.Type)).To(BeTrue()) + g.Expect(v1beta1conditions.Has(from, condition.Type)).To(BeTrue()) if condition.Status == corev1.ConditionTrue { - conditions.IsTrue(from, condition.Type) + v1beta1conditions.IsTrue(from, condition.Type) } else { - conditionToBeAsserted := conditions.Get(from, condition.Type) + conditionToBeAsserted := v1beta1conditions.Get(from, condition.Type) g.Expect(conditionToBeAsserted.Status).To(Equal(condition.Status)) g.Expect(conditionToBeAsserted.Severity).To(Equal(condition.Severity)) g.Expect(conditionToBeAsserted.Reason).To(Equal(condition.Reason)) diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go index 4a3fc0ea99ad..557112ba5da7 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go @@ -51,7 +51,7 @@ import ( "sigs.k8s.io/cluster-api/internal/controllers/machine" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/paused" @@ -203,7 +203,7 @@ func (r *Reconciler) reconcile(ctx context.Context, logger logr.Logger, cluster // If the cluster is already initialized, get the remote cluster cache to use as a client.Reader. var remoteClient client.Client - if conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + if v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { var err error remoteClient, err = r.ClusterCache.GetClient(ctx, util.ObjectKey(cluster)) if err != nil { @@ -276,7 +276,7 @@ func (r *Reconciler) reconcile(ctx context.Context, logger logr.Logger, cluster // Remediation not allowed, the number of not started or unhealthy machines either exceeds maxUnhealthy (or) not within unhealthyRange m.Status.RemediationsAllowed = 0 - conditions.Set(m, &clusterv1.Condition{ + v1beta1conditions.Set(m, &clusterv1.Condition{ Type: clusterv1.RemediationAllowedCondition, Status: corev1.ConditionFalse, Severity: clusterv1.ConditionSeverityWarning, @@ -342,7 +342,7 @@ func (r *Reconciler) reconcile(ctx context.Context, logger logr.Logger, cluster // Remediation is allowed so unhealthyMachineCount is within unhealthyRange (or) maxUnhealthy - unhealthyMachineCount >= 0 m.Status.RemediationsAllowed = remediationCount - conditions.MarkTrue(m, clusterv1.RemediationAllowedCondition) + v1beta1conditions.MarkTrue(m, clusterv1.RemediationAllowedCondition) v1beta2conditions.Set(m, metav1.Condition{ Type: clusterv1.MachineHealthCheckRemediationAllowedV1Beta2Condition, @@ -418,7 +418,8 @@ func (r *Reconciler) patchUnhealthyTargets(ctx context.Context, logger logr.Logg errList := []error{} for _, t := range unhealthy { logger := logger.WithValues("Machine", klog.KObj(t.Machine), "Node", klog.KObj(t.Node)) - condition := conditions.Get(t.Machine, clusterv1.MachineHealthCheckSucceededCondition) + // TODO (v1beta2): test for v1beta2 conditions + condition := v1beta1conditions.Get(t.Machine, clusterv1.MachineHealthCheckSucceededCondition) if annotations.IsPaused(cluster, t.Machine) { logger.Info("Machine has failed health check, but machine is paused so skipping remediation", "reason", condition.Reason, "message", condition.Message) @@ -439,7 +440,7 @@ func (r *Reconciler) patchUnhealthyTargets(ctx context.Context, logger logr.Logg from, err := external.Get(ctx, r.Client, m.Spec.RemediationTemplate) if err != nil { - conditions.MarkFalse(m, clusterv1.ExternalRemediationTemplateAvailableCondition, clusterv1.ExternalRemediationTemplateNotFoundReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta1conditions.MarkFalse(m, clusterv1.ExternalRemediationTemplateAvailableCondition, clusterv1.ExternalRemediationTemplateNotFoundReason, clusterv1.ConditionSeverityError, err.Error()) v1beta2conditions.Set(t.Machine, metav1.Condition{ Type: clusterv1.MachineExternallyRemediatedV1Beta2Condition, @@ -475,7 +476,7 @@ func (r *Reconciler) patchUnhealthyTargets(ctx context.Context, logger logr.Logg logger.Info("Machine has failed health check, creating an external remediation request", "remediation request name", to.GetName(), "reason", condition.Reason, "message", condition.Message) // Create the external clone. if err := r.Client.Create(ctx, to); err != nil { - conditions.MarkFalse(m, clusterv1.ExternalRemediationRequestAvailableCondition, clusterv1.ExternalRemediationRequestCreationFailedReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta1conditions.MarkFalse(m, clusterv1.ExternalRemediationRequestAvailableCondition, clusterv1.ExternalRemediationRequestCreationFailedReason, clusterv1.ConditionSeverityError, err.Error()) v1beta2conditions.Set(t.Machine, metav1.Condition{ Type: clusterv1.MachineExternallyRemediatedV1Beta2Condition, @@ -496,8 +497,9 @@ func (r *Reconciler) patchUnhealthyTargets(ctx context.Context, logger logr.Logg logger.Info("Machine has failed health check, marking for remediation", "reason", condition.Reason, "message", condition.Message) // NOTE: MHC is responsible for creating MachineOwnerRemediatedCondition if missing or to trigger another remediation if the previous one is completed; // instead, if a remediation is in already progress, the remediation owner is responsible for completing the process and MHC should not overwrite the condition. - if !conditions.Has(t.Machine, clusterv1.MachineOwnerRemediatedCondition) || conditions.IsTrue(t.Machine, clusterv1.MachineOwnerRemediatedCondition) { - conditions.MarkFalse(t.Machine, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "") + // TODO (v1beta2): test for v1beta2 conditions + if !v1beta1conditions.Has(t.Machine, clusterv1.MachineOwnerRemediatedCondition) || v1beta1conditions.IsTrue(t.Machine, clusterv1.MachineOwnerRemediatedCondition) { + v1beta1conditions.MarkFalse(t.Machine, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "") } if ownerRemediatedCondition := v1beta2conditions.Get(t.Machine, clusterv1.MachineOwnerRemediatedV1Beta2Condition); ownerRemediatedCondition == nil || ownerRemediatedCondition.Status == metav1.ConditionTrue { diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go index 76da975d2131..b738d7a6275c 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go @@ -47,7 +47,7 @@ import ( capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/internal/webhooks" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/test/builder" @@ -262,7 +262,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { patchHelper, err := patch.NewHelper(cluster, env.Client) g.Expect(err).ToNot(HaveOccurred()) - conditions.MarkFalse(cluster, clusterv1.InfrastructureReadyCondition, "SomeReason", clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(cluster, clusterv1.InfrastructureReadyCondition, "SomeReason", clusterv1.ConditionSeverityError, "") g.Expect(patchHelper.Patch(ctx, cluster)).To(Succeed()) mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) @@ -552,7 +552,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { } for i := range machines.Items { - if !conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededCondition) { + if !v1beta1conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededCondition) { continue } @@ -571,10 +571,10 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { } for i := range machines.Items { - if !conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededCondition) { + if !v1beta1conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededCondition) { continue } - if !conditions.Has(&machines.Items[i], clusterv1.MachineOwnerRemediatedCondition) { + if !v1beta1conditions.Has(&machines.Items[i], clusterv1.MachineOwnerRemediatedCondition) { continue } @@ -662,7 +662,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { } for i := range machines.Items { - if !conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededCondition) { + if !v1beta1conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededCondition) { continue } @@ -681,10 +681,10 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { } for i := range machines.Items { - if !conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededCondition) { + if !v1beta1conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededCondition) { continue } - if !conditions.Has(&machines.Items[i], clusterv1.MachineOwnerRemediatedCondition) { + if !v1beta1conditions.Has(&machines.Items[i], clusterv1.MachineOwnerRemediatedCondition) { continue } @@ -930,7 +930,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { patchHelper, err := patch.NewHelper(cluster, env.GetClient()) g.Expect(err).ToNot(HaveOccurred()) - conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyCondition) g.Expect(patchHelper.Patch(ctx, cluster)).To(Succeed()) mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) @@ -1430,7 +1430,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { })).To(Succeed()) for i := range machines.Items { - if conditions.Get(&machines.Items[i], clusterv1.MachineOwnerRemediatedCondition) != nil { + if v1beta1conditions.Get(&machines.Items[i], clusterv1.MachineOwnerRemediatedCondition) != nil { unhealthyMachine = machines.Items[i].DeepCopy() } } @@ -1937,7 +1937,7 @@ func assertMachinesNotHealthy(g *WithT, mhc *clusterv1.MachineHealthCheck, expec } for i := range machines.Items { - if !conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededCondition) { + if !v1beta1conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededCondition) { continue } if c := v1beta2conditions.Get(&machines.Items[i], clusterv1.MachineHealthCheckSucceededV1Beta2Condition); c == nil || c.Status != metav1.ConditionFalse { @@ -1962,10 +1962,10 @@ func assertMachinesOwnerRemediated(g *WithT, mhc *clusterv1.MachineHealthCheck, } for i := range machines.Items { - if !conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededCondition) { + if !v1beta1conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededCondition) { continue } - if !conditions.Has(&machines.Items[i], clusterv1.MachineOwnerRemediatedCondition) { + if !v1beta1conditions.Has(&machines.Items[i], clusterv1.MachineOwnerRemediatedCondition) { continue } @@ -2451,13 +2451,13 @@ func createCluster(g *WithT, namespaceName string) *clusterv1.Cluster { // This is required for MHC to perform checks patchHelper, err := patch.NewHelper(cluster, env.Client) g.Expect(err).ToNot(HaveOccurred()) - conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyCondition) g.Expect(patchHelper.Patch(ctx, cluster)).To(Succeed()) // Wait for cluster in the cached client to be updated post-patch g.Eventually(func(g Gomega) { g.Expect(env.Get(ctx, util.ObjectKey(cluster), cluster)).To(Succeed()) - g.Expect(conditions.IsTrue(cluster, clusterv1.InfrastructureReadyCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(cluster, clusterv1.InfrastructureReadyCondition)).To(BeTrue()) }, timeout, 100*time.Millisecond).Should(Succeed()) return cluster @@ -2795,7 +2795,7 @@ func TestPatchTargets(t *testing.T) { machine1 := newTestMachine("machine1", namespace, clusterName, "nodeName", labels) machine1.ResourceVersion = "999" - conditions.MarkTrue(machine1, clusterv1.MachineHealthCheckSucceededCondition) + v1beta1conditions.MarkTrue(machine1, clusterv1.MachineHealthCheckSucceededCondition) v1beta2conditions.Set(machine1, metav1.Condition{ Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: metav1.ConditionTrue, @@ -2842,7 +2842,7 @@ func TestPatchTargets(t *testing.T) { // Target with wrong patch helper will fail but the other one will be patched. g.Expect(r.patchUnhealthyTargets(context.TODO(), logr.New(log.NullLogSink{}), []healthCheckTarget{target1, target3}, defaultCluster, mhc)).ToNot(BeEmpty()) g.Expect(cl.Get(ctx, client.ObjectKey{Name: machine2.Name, Namespace: machine2.Namespace}, machine2)).ToNot(HaveOccurred()) - g.Expect(conditions.Get(machine2, clusterv1.MachineOwnerRemediatedCondition).Status).To(Equal(corev1.ConditionFalse)) + g.Expect(v1beta1conditions.Get(machine2, clusterv1.MachineOwnerRemediatedCondition).Status).To(Equal(corev1.ConditionFalse)) g.Expect(v1beta2conditions.Get(machine2, clusterv1.MachineOwnerRemediatedV1Beta2Condition).Status).To(Equal(metav1.ConditionFalse)) // Target with wrong patch helper will fail but the other one will be patched. diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_status_matcher_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_status_matcher_test.go index 2402521d7817..c5e96f10c07d 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_status_matcher_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_status_matcher_test.go @@ -23,7 +23,7 @@ import ( "github.com/onsi/gomega/types" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) // MatchMachineHealthCheckStatus returns a custom matcher to check equality of clusterv1.MachineHealthCheckStatus. @@ -67,7 +67,7 @@ func (m machineHealthCheckStatusMatcher) Match(actual interface{}) (success bool if actualStatus.Deprecated != nil && actualStatus.Deprecated.V1Beta1 != nil { actualConditions = actualStatus.Deprecated.V1Beta1.Conditions } - ok, err = conditions.MatchConditions(mConditions).Match(actualConditions) + ok, err = v1beta1conditions.MatchConditions(mConditions).Match(actualConditions) return ok, err } diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go index b2a833071c54..4a963cfc01fb 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go @@ -33,7 +33,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" ) @@ -87,7 +87,7 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi now := time.Now() if annotations.HasRemediateMachine(t.Machine) { - conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.HasRemediateMachineAnnotationReason, clusterv1.ConditionSeverityWarning, "Marked for remediation via remediate-machine annotation") + v1beta1conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.HasRemediateMachineAnnotationReason, clusterv1.ConditionSeverityWarning, "Marked for remediation via remediate-machine annotation") logger.V(3).Info("Target is marked for remediation via remediate-machine annotation") v1beta2conditions.Set(t.Machine, metav1.Condition{ @@ -100,13 +100,13 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi } if t.Machine.Status.Deprecated != nil && t.Machine.Status.Deprecated.V1Beta1 != nil && t.Machine.Status.Deprecated.V1Beta1.FailureReason != nil { - conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "FailureReason: %v", *t.Machine.Status.Deprecated.V1Beta1.FailureReason) + v1beta1conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "FailureReason: %v", *t.Machine.Status.Deprecated.V1Beta1.FailureReason) logger.V(3).Info("Target is unhealthy", "failureReason", t.Machine.Status.Deprecated.V1Beta1.FailureReason) return true, time.Duration(0) } if t.Machine.Status.Deprecated != nil && t.Machine.Status.Deprecated.V1Beta1 != nil && t.Machine.Status.Deprecated.V1Beta1.FailureMessage != nil { - conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "FailureMessage: %v", *t.Machine.Status.Deprecated.V1Beta1.FailureMessage) + v1beta1conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "FailureMessage: %v", *t.Machine.Status.Deprecated.V1Beta1.FailureMessage) logger.V(3).Info("Target is unhealthy", "failureMessage", t.Machine.Status.Deprecated.V1Beta1.FailureMessage) return true, time.Duration(0) } @@ -114,7 +114,7 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi // Machine has Status.NodeRef set, although we couldn't find the node in the workload cluster. if t.nodeMissing { logger.V(3).Info("Target is unhealthy: node is missing") - conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityWarning, "") v1beta2conditions.Set(t.Machine, metav1.Condition{ Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, @@ -127,14 +127,15 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi // Don't penalize any Machine/Node if the control plane has not been initialized // Exception of this rule are control plane machine itself, so the first control plane machine can be remediated. - if !conditions.IsTrue(t.Cluster, clusterv1.ControlPlaneInitializedCondition) && !util.IsControlPlaneMachine(t.Machine) { + // TODO (v1beta2): test for v1beta2 conditions + if !v1beta1conditions.IsTrue(t.Cluster, clusterv1.ControlPlaneInitializedCondition) && !util.IsControlPlaneMachine(t.Machine) { logger.V(5).Info("Not evaluating target health because the control plane has not yet been initialized") // Return a nextCheck time of 0 because we'll get requeued when the Cluster is updated. return false, 0 } // Don't penalize any Machine/Node if the cluster infrastructure is not ready. - if !conditions.IsTrue(t.Cluster, clusterv1.InfrastructureReadyCondition) { + if !v1beta1conditions.IsTrue(t.Cluster, clusterv1.InfrastructureReadyCondition) { logger.V(5).Info("Not evaluating target health because the cluster infrastructure is not ready") // Return a nextCheck time of 0 because we'll get requeued when the Cluster is updated. return false, 0 @@ -148,9 +149,9 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi return false, 0 } - controlPlaneInitialized := conditions.GetLastTransitionTime(t.Cluster, clusterv1.ControlPlaneInitializedCondition) - clusterInfraReady := conditions.GetLastTransitionTime(t.Cluster, clusterv1.InfrastructureReadyCondition) - machineInfraReady := conditions.GetLastTransitionTime(t.Machine, clusterv1.InfrastructureReadyCondition) + controlPlaneInitialized := v1beta1conditions.GetLastTransitionTime(t.Cluster, clusterv1.ControlPlaneInitializedCondition) + clusterInfraReady := v1beta1conditions.GetLastTransitionTime(t.Cluster, clusterv1.InfrastructureReadyCondition) + machineInfraReady := v1beta1conditions.GetLastTransitionTime(t.Machine, clusterv1.InfrastructureReadyCondition) machineCreationTime := t.Machine.CreationTimestamp.Time // Use the latest of the following timestamps. @@ -161,20 +162,20 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi "controlPlaneInitializedTime", controlPlaneInitialized, "machineInfraReadyTime", machineInfraReady, ) - if conditions.IsTrue(t.Cluster, clusterv1.ControlPlaneInitializedCondition) && controlPlaneInitialized != nil && controlPlaneInitialized.Time.After(comparisonTime) { + if v1beta1conditions.IsTrue(t.Cluster, clusterv1.ControlPlaneInitializedCondition) && controlPlaneInitialized != nil && controlPlaneInitialized.Time.After(comparisonTime) { comparisonTime = controlPlaneInitialized.Time } - if conditions.IsTrue(t.Cluster, clusterv1.InfrastructureReadyCondition) && clusterInfraReady != nil && clusterInfraReady.Time.After(comparisonTime) { + if v1beta1conditions.IsTrue(t.Cluster, clusterv1.InfrastructureReadyCondition) && clusterInfraReady != nil && clusterInfraReady.Time.After(comparisonTime) { comparisonTime = clusterInfraReady.Time } - if conditions.IsTrue(t.Machine, clusterv1.InfrastructureReadyCondition) && machineInfraReady != nil && machineInfraReady.Time.After(comparisonTime) { + if v1beta1conditions.IsTrue(t.Machine, clusterv1.InfrastructureReadyCondition) && machineInfraReady != nil && machineInfraReady.Time.After(comparisonTime) { comparisonTime = machineInfraReady.Time } logger.V(5).Info("Using comparison time", "time", comparisonTime) timeoutDuration := timeoutForMachineToHaveNode.Duration if comparisonTime.Add(timeoutForMachineToHaveNode.Duration).Before(now) { - conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.NodeStartupTimeoutReason, clusterv1.ConditionSeverityWarning, "Node failed to report startup in %s", timeoutDuration) + v1beta1conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.NodeStartupTimeoutReason, clusterv1.ConditionSeverityWarning, "Node failed to report startup in %s", timeoutDuration) logger.V(3).Info("Target is unhealthy: machine has no node", "duration", timeoutDuration) v1beta2conditions.Set(t.Machine, metav1.Condition{ @@ -205,7 +206,7 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi // If the condition has been in the unhealthy state for longer than the // timeout, return true with no requeue time. if nodeCondition.LastTransitionTime.Add(c.Timeout.Duration).Before(now) { - conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.UnhealthyNodeConditionReason, clusterv1.ConditionSeverityWarning, "Condition %s on node is reporting status %s for more than %s", c.Type, c.Status, c.Timeout.Duration.String()) + v1beta1conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.UnhealthyNodeConditionReason, clusterv1.ConditionSeverityWarning, "Condition %s on node is reporting status %s for more than %s", c.Type, c.Status, c.Timeout.Duration.String()) logger.V(3).Info("Target is unhealthy: condition is in state longer than allowed timeout", "condition", c.Type, "state", c.Status, "timeout", c.Timeout.Duration.String()) v1beta2conditions.Set(t.Machine, metav1.Condition{ @@ -346,7 +347,7 @@ func (r *Reconciler) healthCheckTargets(targets []healthCheckTarget, logger logr } if t.Machine.DeletionTimestamp.IsZero() && t.Node != nil { - conditions.MarkTrue(t.Machine, clusterv1.MachineHealthCheckSucceededCondition) + v1beta1conditions.MarkTrue(t.Machine, clusterv1.MachineHealthCheckSucceededCondition) v1beta2conditions.Set(t.Machine, metav1.Condition{ Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go index 3677ddea3f1a..de63d93a190c 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go @@ -31,7 +31,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/errors" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/patch" ) @@ -195,8 +195,8 @@ func TestHealthCheckTargets(t *testing.T) { Name: clusterName, }, } - conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyCondition) - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyCondition) + v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) // Ensure the control plane was initialized earlier to prevent it interfering with // NodeStartupTimeout testing. @@ -689,7 +689,7 @@ func newTestUnhealthyNode(name string, condition corev1.NodeConditionType, statu } func newFailedHealthCheckCondition(reason string, messageFormat string, messageArgs ...interface{}) clusterv1.Condition { - return *conditions.FalseCondition(clusterv1.MachineHealthCheckSucceededCondition, reason, clusterv1.ConditionSeverityWarning, messageFormat, messageArgs...) + return *v1beta1conditions.FalseCondition(clusterv1.MachineHealthCheckSucceededCondition, reason, clusterv1.ConditionSeverityWarning, messageFormat, messageArgs...) } func newFailedHealthCheckV1Beta2Condition(reason string, messageFormat string, messageArgs ...interface{}) metav1.Condition { diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index 9d302a63c3e0..30f57e00ac9f 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -56,7 +56,7 @@ import ( "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" utilconversion "sigs.k8s.io/cluster-api/util/conversion" "sigs.k8s.io/cluster-api/util/finalizers" @@ -306,8 +306,8 @@ func doReconcile(ctx context.Context, s *scope, phases []machineSetReconcileFunc func patchMachineSet(ctx context.Context, patchHelper *patch.Helper, machineSet *clusterv1.MachineSet) error { // Always update the readyCondition by summarizing the state of other conditions. - conditions.SetSummary(machineSet, - conditions.WithConditions( + v1beta1conditions.SetSummary(machineSet, + v1beta1conditions.WithConditions( clusterv1.MachinesCreatedCondition, clusterv1.ResizedCondition, clusterv1.MachinesReadyCondition, @@ -688,7 +688,7 @@ func (r *Reconciler) syncReplicas(ctx context.Context, s *scope) (ctrl.Result, e } s.scaleUpPreflightCheckErrMessages = preflightCheckErrMessages - conditions.MarkFalse(ms, clusterv1.MachinesCreatedCondition, clusterv1.PreflightCheckFailedReason, clusterv1.ConditionSeverityError, strings.Join(preflightCheckErrMessages, "; ")) + v1beta1conditions.MarkFalse(ms, clusterv1.MachinesCreatedCondition, clusterv1.PreflightCheckFailedReason, clusterv1.ConditionSeverityError, strings.Join(preflightCheckErrMessages, "; ")) if err != nil { return ctrl.Result{}, err } @@ -731,7 +731,7 @@ func (r *Reconciler) syncReplicas(ctx context.Context, s *scope) (ctrl.Result, e }, }) if err != nil { - conditions.MarkFalse(ms, clusterv1.MachinesCreatedCondition, clusterv1.BootstrapTemplateCloningFailedReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta1conditions.MarkFalse(ms, clusterv1.MachinesCreatedCondition, clusterv1.BootstrapTemplateCloningFailedReason, clusterv1.ConditionSeverityError, err.Error()) return ctrl.Result{}, errors.Wrapf(err, "failed to clone bootstrap configuration from %s %s while creating a machine", ms.Spec.Template.Spec.Bootstrap.ConfigRef.Kind, klog.KRef(ms.Spec.Template.Spec.Bootstrap.ConfigRef.Namespace, ms.Spec.Template.Spec.Bootstrap.ConfigRef.Name)) @@ -764,7 +764,7 @@ func (r *Reconciler) syncReplicas(ctx context.Context, s *scope) (ctrl.Result, e deleteErr = errors.Wrapf(err, "failed to cleanup %s %s after %s creation failed", bootstrapRef.Kind, klog.KRef(bootstrapRef.Namespace, bootstrapRef.Name), (&ms.Spec.Template.Spec.InfrastructureRef).Kind) } } - conditions.MarkFalse(ms, clusterv1.MachinesCreatedCondition, clusterv1.InfrastructureTemplateCloningFailedReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta1conditions.MarkFalse(ms, clusterv1.MachinesCreatedCondition, clusterv1.InfrastructureTemplateCloningFailedReason, clusterv1.ConditionSeverityError, err.Error()) return ctrl.Result{}, kerrors.NewAggregate([]error{errors.Wrapf(err, "failed to clone infrastructure machine from %s %s while creating a machine", ms.Spec.Template.Spec.InfrastructureRef.Kind, klog.KRef(ms.Spec.Template.Spec.InfrastructureRef.Namespace, ms.Spec.Template.Spec.InfrastructureRef.Name)), deleteErr}) @@ -777,7 +777,7 @@ func (r *Reconciler) syncReplicas(ctx context.Context, s *scope) (ctrl.Result, e log.Error(err, "Error while creating a machine") r.recorder.Eventf(ms, corev1.EventTypeWarning, "FailedCreate", "Failed to create machine: %v", err) errs = append(errs, err) - conditions.MarkFalse(ms, clusterv1.MachinesCreatedCondition, clusterv1.MachineCreationFailedReason, + v1beta1conditions.MarkFalse(ms, clusterv1.MachinesCreatedCondition, clusterv1.MachineCreationFailedReason, clusterv1.ConditionSeverityError, err.Error()) // Try to cleanup the external objects if the Machine creation failed. @@ -1253,30 +1253,30 @@ func (r *Reconciler) reconcileStatus(ctx context.Context, s *scope) error { switch { // We are scaling up case newStatus.Replicas < desiredReplicas: - conditions.MarkFalse(ms, clusterv1.ResizedCondition, clusterv1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up MachineSet to %d replicas (actual %d)", desiredReplicas, newStatus.Replicas) + v1beta1conditions.MarkFalse(ms, clusterv1.ResizedCondition, clusterv1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up MachineSet to %d replicas (actual %d)", desiredReplicas, newStatus.Replicas) // We are scaling down case newStatus.Replicas > desiredReplicas: - conditions.MarkFalse(ms, clusterv1.ResizedCondition, clusterv1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down MachineSet to %d replicas (actual %d)", desiredReplicas, newStatus.Replicas) + v1beta1conditions.MarkFalse(ms, clusterv1.ResizedCondition, clusterv1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down MachineSet to %d replicas (actual %d)", desiredReplicas, newStatus.Replicas) // This means that there was no error in generating the desired number of machine objects - conditions.MarkTrue(ms, clusterv1.MachinesCreatedCondition) + v1beta1conditions.MarkTrue(ms, clusterv1.MachinesCreatedCondition) default: // Make sure last resize operation is marked as completed. // NOTE: we are checking the number of machines ready so we report resize completed only when the machines // are actually provisioned (vs reporting completed immediately after the last machine object is created). This convention is also used by KCP. // TODO (v1beta2) Use new replica counters if newStatus.Deprecated.V1Beta1.ReadyReplicas == newStatus.Replicas { - if conditions.IsFalse(ms, clusterv1.ResizedCondition) { + if v1beta1conditions.IsFalse(ms, clusterv1.ResizedCondition) { log.Info("All the replicas are ready", "replicas", newStatus.Deprecated.V1Beta1.ReadyReplicas) } - conditions.MarkTrue(ms, clusterv1.ResizedCondition) + v1beta1conditions.MarkTrue(ms, clusterv1.ResizedCondition) } // This means that there was no error in generating the desired number of machine objects - conditions.MarkTrue(ms, clusterv1.MachinesCreatedCondition) + v1beta1conditions.MarkTrue(ms, clusterv1.MachinesCreatedCondition) } // Aggregate the operational state of all the machines; while aggregating we are adding the // source ref (reason@machine/name) so the problem can be easily tracked down to its source machine. - conditions.SetAggregate(ms, clusterv1.MachinesReadyCondition, collections.FromMachines(filteredMachines...).ConditionGetters(), conditions.AddSourceRef()) + v1beta1conditions.SetAggregate(ms, clusterv1.MachinesReadyCondition, collections.FromMachines(filteredMachines...).ConditionGetters(), v1beta1conditions.AddSourceRef()) return nil } @@ -1338,7 +1338,7 @@ func (r *Reconciler) reconcileUnhealthyMachines(ctx context.Context, s *scope) ( continue } - shouldCleanup := conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededCondition) && conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedCondition) + shouldCleanup := v1beta1conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededCondition) && v1beta1conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedCondition) shouldCleanupV1Beta2 := v1beta2conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededV1Beta2Condition) && v1beta2conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) if !(shouldCleanup || shouldCleanupV1Beta2) { @@ -1352,7 +1352,7 @@ func (r *Reconciler) reconcileUnhealthyMachines(ctx context.Context, s *scope) ( } if shouldCleanup { - conditions.Delete(m, clusterv1.MachineOwnerRemediatedCondition) + v1beta1conditions.Delete(m, clusterv1.MachineOwnerRemediatedCondition) } if shouldCleanupV1Beta2 { @@ -1418,9 +1418,10 @@ func (r *Reconciler) reconcileUnhealthyMachines(ctx context.Context, s *scope) ( // reports that remediation has been completed and the Machine has been deleted. for _, m := range machines { if !m.DeletionTimestamp.IsZero() { + // TODO (v1beta2): test for v1beta2 conditions // TODO: Check for Status: False and Reason: MachineSetMachineRemediationMachineDeletingV1Beta2Reason // instead when starting to use v1beta2 conditions for control flow. - if conditions.IsTrue(m, clusterv1.MachineOwnerRemediatedCondition) { + if v1beta1conditions.IsTrue(m, clusterv1.MachineOwnerRemediatedCondition) { // Remediation for this Machine has been triggered by this controller but it is still in flight, // i.e. it still goes through the deletion workflow and exists in etcd. maxInFlight-- @@ -1546,7 +1547,7 @@ func patchMachineConditions(ctx context.Context, c client.Client, machines []*cl } if condition != nil { - conditions.Set(m, condition) + v1beta1conditions.Set(m, condition) } v1beta2conditions.Set(m, v1beta2Condition) diff --git a/internal/controllers/machineset/machineset_controller_test.go b/internal/controllers/machineset/machineset_controller_test.go index fc9520f35b23..04c84b15e92e 100644 --- a/internal/controllers/machineset/machineset_controller_test.go +++ b/internal/controllers/machineset/machineset_controller_test.go @@ -47,7 +47,7 @@ import ( "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/test/builder" @@ -430,7 +430,7 @@ func TestMachineSetReconciler(t *testing.T) { if err := env.Get(ctx, key, instance); err != nil { return false } - return conditions.IsTrue(instance, clusterv1.MachinesCreatedCondition) + return v1beta1conditions.IsTrue(instance, clusterv1.MachinesCreatedCondition) }, timeout).Should(BeTrue()) t.Log("Verifying MachineSet has ResizedCondition") @@ -439,7 +439,7 @@ func TestMachineSetReconciler(t *testing.T) { if err := env.Get(ctx, key, instance); err != nil { return false } - return conditions.IsTrue(instance, clusterv1.ResizedCondition) + return v1beta1conditions.IsTrue(instance, clusterv1.ResizedCondition) }, timeout).Should(BeTrue()) t.Log("Verifying MachineSet has MachinesReadyCondition") @@ -448,7 +448,7 @@ func TestMachineSetReconciler(t *testing.T) { if err := env.Get(ctx, key, instance); err != nil { return false } - return conditions.IsTrue(instance, clusterv1.MachinesReadyCondition) + return v1beta1conditions.IsTrue(instance, clusterv1.MachinesReadyCondition) }, timeout).Should(BeTrue()) // Validate that the controller set the cluster name label in selector. @@ -995,7 +995,7 @@ func TestMachineSetReconcile_MachinesCreatedConditionFalseOnBadInfraRef(t *testi _, err := msr.Reconcile(ctx, request) g.Expect(err).To(HaveOccurred()) g.Expect(fakeClient.Get(ctx, key, ms)).To(Succeed()) - gotCond := conditions.Get(ms, clusterv1.MachinesCreatedCondition) + gotCond := v1beta1conditions.Get(ms, clusterv1.MachinesCreatedCondition) g.Expect(gotCond).ToNot(BeNil()) g.Expect(gotCond.Status).To(Equal(corev1.ConditionFalse)) g.Expect(gotCond.Reason).To(Equal(clusterv1.InfrastructureTemplateCloningFailedReason)) @@ -1059,7 +1059,7 @@ func TestMachineSetReconciler_updateStatusResizedCondition(t *testing.T) { } setReplicas(ctx, s.machineSet, s.machines, tc.machines != nil) g.Expect(msr.reconcileStatus(ctx, s)).To(Succeed()) - gotCond := conditions.Get(tc.machineSet, clusterv1.ResizedCondition) + gotCond := v1beta1conditions.Get(tc.machineSet, clusterv1.ResizedCondition) g.Expect(gotCond).ToNot(BeNil()) g.Expect(gotCond.Status).To(Equal(corev1.ConditionFalse)) g.Expect(gotCond.Reason).To(Equal(tc.expectedReason)) @@ -1606,7 +1606,7 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { m := &clusterv1.Machine{} g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(unhealthyMachine), m)).To(Succeed()) g.Expect(m.DeletionTimestamp.IsZero()).To(BeFalse()) - g.Expect(conditions.IsTrue(m, clusterv1.MachineOwnerRemediatedCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(m, clusterv1.MachineOwnerRemediatedCondition)).To(BeTrue()) c := v1beta2conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) g.Expect(c).ToNot(BeNil()) g.Expect(*c).To(v1beta2conditions.MatchCondition(metav1.Condition{ @@ -1620,7 +1620,7 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { m = &clusterv1.Machine{} g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(healthyMachine), m)).Should(Succeed()) g.Expect(m.DeletionTimestamp.IsZero()).To(BeTrue()) - g.Expect(conditions.Has(m, clusterv1.MachineOwnerRemediatedCondition)).To(BeFalse()) + g.Expect(v1beta1conditions.Has(m, clusterv1.MachineOwnerRemediatedCondition)).To(BeFalse()) g.Expect(v1beta2conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) }) @@ -1739,9 +1739,9 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { m := &clusterv1.Machine{} g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(unhealthyMachine), m)).To(Succeed()) g.Expect(m.DeletionTimestamp.IsZero()).To(BeTrue()) - g.Expect(conditions.Has(m, condition)). + g.Expect(v1beta1conditions.Has(m, condition)). To(BeTrue(), "Machine should have the %s condition set", condition) - machineOwnerRemediatedCondition := conditions.Get(m, condition) + machineOwnerRemediatedCondition := v1beta1conditions.Get(m, condition) g.Expect(machineOwnerRemediatedCondition.Status). To(Equal(corev1.ConditionFalse), "%s condition status should be false", condition) g.Expect(machineOwnerRemediatedCondition.Reason). @@ -1759,7 +1759,7 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { m = &clusterv1.Machine{} g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(healthyMachine), m)).To(Succeed()) g.Expect(m.DeletionTimestamp.IsZero()).To(BeTrue()) - g.Expect(conditions.Has(m, condition)). + g.Expect(v1beta1conditions.Has(m, condition)). To(BeFalse(), "Machine should not have the %s condition set", condition) g.Expect(v1beta2conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) }) @@ -1921,9 +1921,9 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { // Verify that no action was taken on the Machine: MachineOwnerRemediated should be false // and the Machine wasn't deleted. g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(unhealthyMachine), m)).To(Succeed()) - g.Expect(conditions.Has(m, condition)). + g.Expect(v1beta1conditions.Has(m, condition)). To(BeTrue(), "Machine should have the %s condition set", condition) - machineOwnerRemediatedCondition := conditions.Get(m, condition) + machineOwnerRemediatedCondition := v1beta1conditions.Get(m, condition) g.Expect(machineOwnerRemediatedCondition.Status). To(Equal(corev1.ConditionFalse), "%s condition status should be false", condition) g.Expect(unhealthyMachine.DeletionTimestamp).Should(BeZero()) @@ -1940,7 +1940,7 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { m = &clusterv1.Machine{} g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(healthyMachine), m)).To(Succeed()) g.Expect(m.DeletionTimestamp.IsZero()).To(BeTrue()) - g.Expect(conditions.Has(m, condition)). + g.Expect(v1beta1conditions.Has(m, condition)). To(BeFalse(), "Machine should not have the %s condition set", condition) g.Expect(v1beta2conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) @@ -1958,7 +1958,7 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { // Verify the unhealthy machine has been deleted. g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(unhealthyMachine), m)).To(Succeed()) g.Expect(m.DeletionTimestamp.IsZero()).To(BeFalse()) - g.Expect(conditions.IsTrue(m, clusterv1.MachineOwnerRemediatedCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(m, clusterv1.MachineOwnerRemediatedCondition)).To(BeTrue()) c = v1beta2conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) g.Expect(c).ToNot(BeNil()) g.Expect(*c).To(v1beta2conditions.MatchCondition(metav1.Condition{ @@ -1972,7 +1972,7 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { m = &clusterv1.Machine{} g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(healthyMachine), m)).To(Succeed()) g.Expect(m.DeletionTimestamp.IsZero()).To(BeTrue()) - g.Expect(conditions.Has(m, condition)). + g.Expect(v1beta1conditions.Has(m, condition)). To(BeFalse(), "Machine should not have the %s condition set", condition) g.Expect(v1beta2conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) }) @@ -2143,9 +2143,9 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { if i < total-maxInFlight { // Machines before the maxInFlight should not be deleted. g.Expect(err).ToNot(HaveOccurred()) - g.Expect(conditions.Has(m, condition)). + g.Expect(v1beta1conditions.Has(m, condition)). To(BeTrue(), "Machine should have the %s condition set", condition) - machineOwnerRemediatedCondition := conditions.Get(m, condition) + machineOwnerRemediatedCondition := v1beta1conditions.Get(m, condition) g.Expect(machineOwnerRemediatedCondition.Status). To(Equal(corev1.ConditionFalse), "%s condition status should be false", condition) c := v1beta2conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) @@ -2166,7 +2166,7 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { m := &clusterv1.Machine{} g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(healthyMachine), m)).To(Succeed()) g.Expect(m.DeletionTimestamp.IsZero()).To(BeTrue()) - g.Expect(conditions.Has(m, condition)). + g.Expect(v1beta1conditions.Has(m, condition)). To(BeFalse(), "Machine should not have the %s condition set", condition) g.Expect(v1beta2conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) @@ -2210,9 +2210,9 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { if i < total-(maxInFlight*2) { // Machines before the maxInFlight*2 should not be deleted, and should have the remediated condition to false. g.Expect(err).ToNot(HaveOccurred()) - g.Expect(conditions.Has(m, condition)). + g.Expect(v1beta1conditions.Has(m, condition)). To(BeTrue(), "Machine should have the %s condition set", condition) - machineOwnerRemediatedCondition := conditions.Get(m, condition) + machineOwnerRemediatedCondition := v1beta1conditions.Get(m, condition) g.Expect(machineOwnerRemediatedCondition.Status). To(Equal(corev1.ConditionFalse), "%s condition status should be false", condition) c := v1beta2conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) @@ -2227,9 +2227,9 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { } else if i < total-maxInFlight { // Machines before the maxInFlight should have a deletion timestamp g.Expect(err).ToNot(HaveOccurred()) - g.Expect(conditions.Has(m, condition)). + g.Expect(v1beta1conditions.Has(m, condition)). To(BeTrue(), "Machine should have the %s condition set", condition) - machineOwnerRemediatedCondition := conditions.Get(m, condition) + machineOwnerRemediatedCondition := v1beta1conditions.Get(m, condition) g.Expect(machineOwnerRemediatedCondition.Status). To(Equal(corev1.ConditionTrue), "%s condition status should be true", condition) c := v1beta2conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) @@ -2257,7 +2257,7 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { // Verify (again) the healthy machine is not deleted and does not have the OwnerRemediated condition. g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(healthyMachine), m)).To(Succeed()) g.Expect(m.DeletionTimestamp.IsZero()).To(BeTrue()) - g.Expect(conditions.Has(m, condition)). + g.Expect(v1beta1conditions.Has(m, condition)). To(BeFalse(), "Machine should not have the %s condition set", condition) g.Expect(v1beta2conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) @@ -2280,7 +2280,7 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { // Verify (again) the healthy machine is not deleted and does not have the OwnerRemediated condition. g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(healthyMachine), m)).To(Succeed()) g.Expect(m.DeletionTimestamp.IsZero()).To(BeTrue()) - g.Expect(conditions.Has(m, condition)). + g.Expect(v1beta1conditions.Has(m, condition)). To(BeFalse(), "Machine should not have the %s condition set", condition) g.Expect(v1beta2conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) @@ -2306,7 +2306,7 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { // Verify (again) the healthy machine is not deleted and does not have the OwnerRemediated condition. g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(healthyMachine), m)).To(Succeed()) g.Expect(m.DeletionTimestamp.IsZero()).To(BeTrue()) - g.Expect(conditions.Has(m, condition)). + g.Expect(v1beta1conditions.Has(m, condition)). To(BeFalse(), "Machine should not have the %s condition set", condition) g.Expect(v1beta2conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) }) @@ -2359,9 +2359,9 @@ func TestMachineSetReconciler_syncReplicas(t *testing.T) { // Verify the proper condition is set on the MachineSet. condition := clusterv1.MachinesCreatedCondition - g.Expect(conditions.Has(machineSet, condition)). + g.Expect(v1beta1conditions.Has(machineSet, condition)). To(BeTrue(), "MachineSet should have the %s condition set", condition) - machinesCreatedCondition := conditions.Get(machineSet, condition) + machinesCreatedCondition := v1beta1conditions.Get(machineSet, condition) g.Expect(machinesCreatedCondition.Status). To(Equal(corev1.ConditionFalse), "%s condition status should be %s", condition, corev1.ConditionFalse) g.Expect(machinesCreatedCondition.Reason). @@ -2497,9 +2497,9 @@ func TestMachineSetReconciler_syncReplicas_WithErrors(t *testing.T) { // Verify the proper condition is set on the MachineSet. condition := clusterv1.MachinesCreatedCondition - g.Expect(conditions.Has(machineSet, condition)).To(BeTrue(), "MachineSet should have the %s condition set", condition) + g.Expect(v1beta1conditions.Has(machineSet, condition)).To(BeTrue(), "MachineSet should have the %s condition set", condition) - machinesCreatedCondition := conditions.Get(machineSet, condition) + machinesCreatedCondition := v1beta1conditions.Get(machineSet, condition) g.Expect(machinesCreatedCondition.Status). To(Equal(corev1.ConditionFalse), "%s condition status should be %s", condition, corev1.ConditionFalse) g.Expect(machinesCreatedCondition.Reason). diff --git a/internal/controllers/machineset/machineset_delete_policy.go b/internal/controllers/machineset/machineset_delete_policy.go index 25dc5ad5da4a..fb6b87cb1ad0 100644 --- a/internal/controllers/machineset/machineset_delete_policy.go +++ b/internal/controllers/machineset/machineset_delete_policy.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) type ( @@ -147,11 +147,12 @@ func isMachineHealthy(machine *clusterv1.Machine) bool { return false } // Note: for the sake of prioritization, we are not making any assumption about Health when ConditionUnknown. - nodeHealthyCondition := conditions.Get(machine, clusterv1.MachineNodeHealthyCondition) + // TODO (v1beta2): test for v1beta2 conditions + nodeHealthyCondition := v1beta1conditions.Get(machine, clusterv1.MachineNodeHealthyCondition) if nodeHealthyCondition != nil && nodeHealthyCondition.Status == corev1.ConditionFalse { return false } - healthCheckCondition := conditions.Get(machine, clusterv1.MachineHealthCheckSucceededCondition) + healthCheckCondition := v1beta1conditions.Get(machine, clusterv1.MachineHealthCheckSucceededCondition) if healthCheckCondition != nil && healthCheckCondition.Status == corev1.ConditionFalse { return false } diff --git a/internal/controllers/topology/cluster/cluster_controller.go b/internal/controllers/topology/cluster/cluster_controller.go index 26169d53f7cb..d64737bd27dd 100644 --- a/internal/controllers/topology/cluster/cluster_controller.go +++ b/internal/controllers/topology/cluster/cluster_controller.go @@ -62,7 +62,7 @@ import ( "sigs.k8s.io/cluster-api/internal/webhooks" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -350,8 +350,9 @@ func (r *Reconciler) reconcile(ctx context.Context, s *scope.Scope) (ctrl.Result // is not up to date. // Note: This doesn't require requeue as a change to ClusterClass observedGeneration will cause an additional reconcile // in the Cluster. - if !conditions.Has(clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) || - conditions.IsFalse(clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if !v1beta1conditions.Has(clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) || + v1beta1conditions.IsFalse(clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) { return ctrl.Result{}, errors.Errorf("ClusterClass is not successfully reconciled: status of %s condition on ClusterClass must be \"True\"", clusterv1.ClusterClassVariablesReconciledCondition) } if clusterClass.GetGeneration() != clusterClass.Status.ObservedGeneration { diff --git a/internal/controllers/topology/cluster/cluster_controller_test.go b/internal/controllers/topology/cluster/cluster_controller_test.go index 208e0ecee572..9684e6874c59 100644 --- a/internal/controllers/topology/cluster/cluster_controller_test.go +++ b/internal/controllers/topology/cluster/cluster_controller_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/hooks" fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/test/builder" @@ -930,7 +930,7 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error) } func assertClusterTopologyReconciledCondition(cluster *clusterv1.Cluster) error { - if !conditions.Has(cluster, clusterv1.TopologyReconciledCondition) { + if !v1beta1conditions.Has(cluster, clusterv1.TopologyReconciledCondition) { return fmt.Errorf("cluster should have the TopologyReconciled condition set") } return nil @@ -1314,7 +1314,7 @@ func TestReconciler_DefaultCluster(t *testing.T) { }, }, }). - WithConditions(*conditions.TrueCondition(clusterv1.ClusterClassVariablesReconciledCondition)). + WithConditions(*v1beta1conditions.TrueCondition(clusterv1.ClusterClassVariablesReconciledCondition)). Build(), initialCluster: clusterBuilder.DeepCopy(). Build(), @@ -1342,7 +1342,7 @@ func TestReconciler_DefaultCluster(t *testing.T) { }, }, }). - WithConditions(*conditions.TrueCondition(clusterv1.ClusterClassVariablesReconciledCondition)). + WithConditions(*v1beta1conditions.TrueCondition(clusterv1.ClusterClassVariablesReconciledCondition)). Build(), initialCluster: clusterBuilder.DeepCopy().WithTopology(topologyBase.DeepCopy().WithVariables( clusterv1.ClusterVariable{Name: "location", Value: apiextensionsv1.JSON{Raw: []byte(`"us-west"`)}}). @@ -1398,7 +1398,7 @@ func TestReconciler_DefaultCluster(t *testing.T) { }, }, }...). - WithConditions(*conditions.TrueCondition(clusterv1.ClusterClassVariablesReconciledCondition)). + WithConditions(*v1beta1conditions.TrueCondition(clusterv1.ClusterClassVariablesReconciledCondition)). Build(), initialCluster: clusterBuilder.DeepCopy(). WithTopology(topologyBase.DeepCopy(). @@ -1505,7 +1505,7 @@ func TestReconciler_ValidateCluster(t *testing.T) { }, }, }). - WithConditions(*conditions.TrueCondition(clusterv1.ClusterClassVariablesReconciledCondition)). + WithConditions(*v1beta1conditions.TrueCondition(clusterv1.ClusterClassVariablesReconciledCondition)). Build(), cluster: clusterBuilder.DeepCopy(). Build(), @@ -1523,7 +1523,7 @@ func TestReconciler_ValidateCluster(t *testing.T) { }, }, }). - WithConditions(*conditions.TrueCondition(clusterv1.ClusterClassVariablesReconciledCondition)). + WithConditions(*v1beta1conditions.TrueCondition(clusterv1.ClusterClassVariablesReconciledCondition)). Build(), cluster: clusterBuilder. Build(), @@ -1541,7 +1541,7 @@ func TestReconciler_ValidateCluster(t *testing.T) { }, }, }). - WithConditions(*conditions.FalseCondition(clusterv1.ClusterClassVariablesReconciledCondition, clusterv1.VariableDiscoveryFailedReason, clusterv1.ConditionSeverityError, "error message")). + WithConditions(*v1beta1conditions.FalseCondition(clusterv1.ClusterClassVariablesReconciledCondition, clusterv1.VariableDiscoveryFailedReason, clusterv1.ConditionSeverityError, "error message")). Build(), cluster: clusterBuilder. Build(), @@ -1560,7 +1560,7 @@ func TestReconciler_ValidateCluster(t *testing.T) { }, }, }). - WithConditions(*conditions.TrueCondition(clusterv1.ClusterClassVariablesReconciledCondition)). + WithConditions(*v1beta1conditions.TrueCondition(clusterv1.ClusterClassVariablesReconciledCondition)). Build(), cluster: clusterBuilder.WithTopology( builder.ClusterTopology().DeepCopy(). diff --git a/internal/controllers/topology/cluster/conditions.go b/internal/controllers/topology/cluster/conditions.go index 11d616074487..44886ecb42ca 100644 --- a/internal/controllers/topology/cluster/conditions.go +++ b/internal/controllers/topology/cluster/conditions.go @@ -27,7 +27,7 @@ import ( "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) @@ -56,8 +56,8 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste if annotations.HasPaused(cluster) { messages = append(messages, "Cluster has the cluster.x-k8s.io/paused annotation") } - conditions.Set(cluster, - conditions.FalseCondition( + v1beta1conditions.Set(cluster, + v1beta1conditions.FalseCondition( clusterv1.TopologyReconciledCondition, clusterv1.TopologyReconciledPausedReason, clusterv1.ConditionSeverityInfo, @@ -75,8 +75,8 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste // Mark TopologyReconciled as false due to cluster deletion. if !cluster.ObjectMeta.DeletionTimestamp.IsZero() { - conditions.Set(cluster, - conditions.FalseCondition( + v1beta1conditions.Set(cluster, + v1beta1conditions.FalseCondition( clusterv1.TopologyReconciledCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, @@ -95,8 +95,8 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste // If an error occurred during reconciliation set the TopologyReconciled condition to false. // Add the error message from the reconcile function to the message of the condition. if reconcileErr != nil { - conditions.Set(cluster, - conditions.FalseCondition( + v1beta1conditions.Set(cluster, + v1beta1conditions.FalseCondition( clusterv1.TopologyReconciledCondition, clusterv1.TopologyReconcileFailedReason, clusterv1.ConditionSeverityError, @@ -118,8 +118,8 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste // is not up to date. if s.Blueprint != nil && s.Blueprint.ClusterClass != nil && s.Blueprint.ClusterClass.GetGeneration() != s.Blueprint.ClusterClass.Status.ObservedGeneration { - conditions.Set(cluster, - conditions.FalseCondition( + v1beta1conditions.Set(cluster, + v1beta1conditions.FalseCondition( clusterv1.TopologyReconciledCondition, clusterv1.TopologyReconciledClusterClassNotReconciledReason, clusterv1.ConditionSeverityInfo, @@ -140,8 +140,8 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste // If any of the lifecycle hooks are blocking any part of the reconciliation then topology // is not considered as fully reconciled. if s.HookResponseTracker.AggregateRetryAfter() != 0 { - conditions.Set(cluster, - conditions.FalseCondition( + v1beta1conditions.Set(cluster, + v1beta1conditions.FalseCondition( clusterv1.TopologyReconciledCondition, clusterv1.TopologyReconciledHookBlockingReason, clusterv1.ConditionSeverityInfo, @@ -249,8 +249,8 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste ) } - conditions.Set(cluster, - conditions.FalseCondition( + v1beta1conditions.Set(cluster, + v1beta1conditions.FalseCondition( clusterv1.TopologyReconciledCondition, reason, clusterv1.ConditionSeverityInfo, @@ -269,8 +269,8 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste // If there are no errors while reconciling and if the topology is not holding out changes // we can consider that spec of all the objects is reconciled to match the topology. Set the // TopologyReconciled condition to true. - conditions.Set(cluster, - conditions.TrueCondition(clusterv1.TopologyReconciledCondition), + v1beta1conditions.Set(cluster, + v1beta1conditions.TrueCondition(clusterv1.TopologyReconciledCondition), ) v1beta2conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterTopologyReconciledV1Beta2Condition, diff --git a/internal/controllers/topology/cluster/conditions_test.go b/internal/controllers/topology/cluster/conditions_test.go index ae4e0f7941f2..4ae3b8ce895b 100644 --- a/internal/controllers/topology/cluster/conditions_test.go +++ b/internal/controllers/topology/cluster/conditions_test.go @@ -34,7 +34,7 @@ import ( runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/feature" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/test/builder" ) @@ -978,7 +978,7 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { } else { g.Expect(err).ToNot(HaveOccurred()) - actualCondition := conditions.Get(tt.cluster, clusterv1.TopologyReconciledCondition) + actualCondition := v1beta1conditions.Get(tt.cluster, clusterv1.TopologyReconciledCondition) g.Expect(actualCondition).ToNot(BeNil()) g.Expect(actualCondition.Status).To(BeEquivalentTo(tt.wantConditionStatus)) g.Expect(actualCondition.Reason).To(BeEquivalentTo(tt.wantConditionReason)) diff --git a/internal/util/tree/tree_test.go b/internal/util/tree/tree_test.go index 4a9d2eee7e22..cf554d9cacea 100644 --- a/internal/util/tree/tree_test.go +++ b/internal/util/tree/tree_test.go @@ -32,7 +32,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/tree" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) func Test_getRowName(t *testing.T) { @@ -88,27 +88,27 @@ func Test_newConditionDescriptor_readyColor(t *testing.T) { }{ { name: "True condition should be green", - condition: conditions.TrueCondition("C"), + condition: v1beta1conditions.TrueCondition("C"), expectReadyColor: green, }, { name: "Unknown condition should be white", - condition: conditions.UnknownCondition("C", "", ""), + condition: v1beta1conditions.UnknownCondition("C", "", ""), expectReadyColor: white, }, { name: "False condition, severity error should be red", - condition: conditions.FalseCondition("C", "", clusterv1.ConditionSeverityError, ""), + condition: v1beta1conditions.FalseCondition("C", "", clusterv1.ConditionSeverityError, ""), expectReadyColor: red, }, { name: "False condition, severity warning should be yellow", - condition: conditions.FalseCondition("C", "", clusterv1.ConditionSeverityWarning, ""), + condition: v1beta1conditions.FalseCondition("C", "", clusterv1.ConditionSeverityWarning, ""), expectReadyColor: yellow, }, { name: "False condition, severity info should be white", - condition: conditions.FalseCondition("C", "", clusterv1.ConditionSeverityInfo, ""), + condition: v1beta1conditions.FalseCondition("C", "", clusterv1.ConditionSeverityInfo, ""), expectReadyColor: white, }, { @@ -134,12 +134,12 @@ func Test_newConditionDescriptor_truncateMessages(t *testing.T) { }{ { name: "Short messages are not changed", - condition: conditions.UnknownCondition("C", "", "short message"), + condition: v1beta1conditions.UnknownCondition("C", "", "short message"), expectMessage: "short message", }, { name: "Long message are truncated", - condition: conditions.UnknownCondition("C", "", strings.Repeat("s", 150)), + condition: v1beta1conditions.UnknownCondition("C", "", strings.Repeat("s", 150)), expectMessage: fmt.Sprintf("%s ...", strings.Repeat("s", 100)), }, } @@ -215,13 +215,13 @@ func Test_TreePrefix(t *testing.T) { o1 := fakeObject("child1", withAnnotation(tree.ShowObjectConditionsAnnotation, "True"), - withCondition(conditions.TrueCondition("C1.1")), - withCondition(conditions.TrueCondition("C1.2")), + withCondition(v1beta1conditions.TrueCondition("C1.1")), + withCondition(v1beta1conditions.TrueCondition("C1.2")), ) o2 := fakeObject("child2", withAnnotation(tree.ShowObjectConditionsAnnotation, "True"), - withCondition(conditions.TrueCondition("C2.1")), - withCondition(conditions.TrueCondition("C2.2")), + withCondition(v1beta1conditions.TrueCondition("C2.1")), + withCondition(v1beta1conditions.TrueCondition("C2.2")), ) obectjTree.Add(root, o1) obectjTree.Add(root, o2) @@ -245,15 +245,15 @@ func Test_TreePrefix(t *testing.T) { o1 := fakeObject("child1", withAnnotation(tree.ShowObjectConditionsAnnotation, "True"), - withCondition(conditions.TrueCondition("C1.1")), - withCondition(conditions.TrueCondition("C1.2")), + withCondition(v1beta1conditions.TrueCondition("C1.1")), + withCondition(v1beta1conditions.TrueCondition("C1.2")), ) o1_1 := fakeObject("child1.1") o2 := fakeObject("child2", withAnnotation(tree.ShowObjectConditionsAnnotation, "True"), - withCondition(conditions.TrueCondition("C2.1")), - withCondition(conditions.TrueCondition("C2.2")), + withCondition(v1beta1conditions.TrueCondition("C2.1")), + withCondition(v1beta1conditions.TrueCondition("C2.2")), ) o2_1 := fakeObject("child2.1") obectjTree.Add(root, o1) @@ -573,8 +573,8 @@ func withAnnotation(name, value string) func(ctrlclient.Object) { func withCondition(c *clusterv1.Condition) func(ctrlclient.Object) { return func(m ctrlclient.Object) { - setter := m.(conditions.Setter) - conditions.Set(setter, c) + setter := m.(v1beta1conditions.Setter) + v1beta1conditions.Set(setter, c) } } diff --git a/internal/webhooks/cluster.go b/internal/webhooks/cluster.go index 63e4c0009e22..02ab5b0b9907 100644 --- a/internal/webhooks/cluster.go +++ b/internal/webhooks/cluster.go @@ -44,7 +44,7 @@ import ( "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/topology/check" "sigs.k8s.io/cluster-api/internal/topology/variables" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/version" ) @@ -1000,8 +1000,9 @@ func clusterClassIsReconciled(clusterClass *clusterv1.ClusterClass) error { return errClusterClassNotReconciled } // If the clusterClass does not have ClusterClassVariablesReconciled==True, the ClusterClass has not been successfully reconciled. - if !conditions.Has(clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) || - conditions.IsFalse(clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if !v1beta1conditions.Has(clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) || + v1beta1conditions.IsFalse(clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) { return errClusterClassNotReconciled } return nil diff --git a/internal/webhooks/cluster_test.go b/internal/webhooks/cluster_test.go index f698f217d9eb..cd3f13d89cf8 100644 --- a/internal/webhooks/cluster_test.go +++ b/internal/webhooks/cluster_test.go @@ -43,7 +43,7 @@ import ( expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta2" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/webhooks/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/test/builder" ) @@ -100,7 +100,7 @@ func TestClusterTopologyDefaultNamespaces(t *testing.T) { WithControlPlaneInfrastructureMachineTemplate(&unstructured.Unstructured{}). WithWorkerMachineDeploymentClasses(*builder.MachineDeploymentClass("aa").Build()). Build() - conditions.MarkTrue(clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) + v1beta1conditions.MarkTrue(clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) // Sets up the fakeClient for the test case. This is required because the test uses a Managed Topology. fakeClient := fake.NewClientBuilder(). WithObjects(clusterClass). @@ -1304,7 +1304,7 @@ func TestClusterDefaultAndValidateVariables(t *testing.T) { Build() // Mark this condition to true so the webhook sees the ClusterClass as up to date. - conditions.MarkTrue(tt.clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) + v1beta1conditions.MarkTrue(tt.clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) fakeClient := fake.NewClientBuilder(). WithObjects(tt.clusterClass). WithScheme(fakeScheme). @@ -1373,7 +1373,7 @@ func TestClusterDefaultTopologyVersion(t *testing.T) { Build() clusterClass := builder.ClusterClass("fooboo", "foo").Build() - conditions.MarkTrue(clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) + v1beta1conditions.MarkTrue(clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) // Sets up the fakeClient for the test case. This is required because the test uses a Managed Topology. fakeClient := fake.NewClientBuilder(). WithObjects(clusterClass). @@ -2186,7 +2186,7 @@ func TestClusterTopologyValidation(t *testing.T) { Build() // Mark this condition to true so the webhook sees the ClusterClass as up to date. - conditions.MarkTrue(class, clusterv1.ClusterClassVariablesReconciledCondition) + v1beta1conditions.MarkTrue(class, clusterv1.ClusterClassVariablesReconciledCondition) // Sets up the fakeClient for the test case. fakeClient := fake.NewClientBuilder(). WithObjects(class). @@ -2508,7 +2508,7 @@ func TestClusterTopologyValidationWithClient(t *testing.T) { t.Run(tt.name, func(*testing.T) { // Mark this condition to true so the webhook sees the ClusterClass as up to date. if tt.classReconciled { - conditions.MarkTrue(tt.class, clusterv1.ClusterClassVariablesReconciledCondition) + v1beta1conditions.MarkTrue(tt.class, clusterv1.ClusterClassVariablesReconciledCondition) } // Sets up the fakeClient for the test case. fakeClient := fake.NewClientBuilder(). @@ -3040,8 +3040,8 @@ func TestClusterTopologyValidationForTopologyClassChange(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(*testing.T) { // Mark this condition to true so the webhook sees the ClusterClass as up to date. - conditions.MarkTrue(tt.firstClass, clusterv1.ClusterClassVariablesReconciledCondition) - conditions.MarkTrue(tt.secondClass, clusterv1.ClusterClassVariablesReconciledCondition) + v1beta1conditions.MarkTrue(tt.firstClass, clusterv1.ClusterClassVariablesReconciledCondition) + v1beta1conditions.MarkTrue(tt.secondClass, clusterv1.ClusterClassVariablesReconciledCondition) // Sets up the fakeClient for the test case. fakeClient := fake.NewClientBuilder(). @@ -3165,7 +3165,7 @@ func TestMovingBetweenManagedAndUnmanaged(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(*testing.T) { // Mark this condition to true so the webhook sees the ClusterClass as up to date. - conditions.MarkTrue(tt.clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) + v1beta1conditions.MarkTrue(tt.clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) // Sets up the fakeClient for the test case. fakeClient := fake.NewClientBuilder(). WithObjects(tt.clusterClass, tt.cluster). @@ -3216,7 +3216,7 @@ func TestClusterClassPollingErrors(t *testing.T) { ccFullyReconciled := baseClusterClass.DeepCopy().Build() ccFullyReconciled.Generation = 1 ccFullyReconciled.Status.ObservedGeneration = 1 - conditions.MarkTrue(ccFullyReconciled, clusterv1.ClusterClassVariablesReconciledCondition) + v1beta1conditions.MarkTrue(ccFullyReconciled, clusterv1.ClusterClassVariablesReconciledCondition) // secondFullyReconciled is a second ClusterClass with a matching generation and observed generation, and VariablesReconciled=True. secondFullyReconciled := ccFullyReconciled.DeepCopy() @@ -3226,11 +3226,11 @@ func TestClusterClassPollingErrors(t *testing.T) { ccGenerationMismatch := baseClusterClass.DeepCopy().Build() ccGenerationMismatch.Generation = 999 ccGenerationMismatch.Status.ObservedGeneration = 1 - conditions.MarkTrue(ccGenerationMismatch, clusterv1.ClusterClassVariablesReconciledCondition) + v1beta1conditions.MarkTrue(ccGenerationMismatch, clusterv1.ClusterClassVariablesReconciledCondition) // ccVariablesReconciledFalse with VariablesReconciled=False. ccVariablesReconciledFalse := baseClusterClass.DeepCopy().Build() - conditions.MarkFalse(ccGenerationMismatch, clusterv1.ClusterClassVariablesReconciledCondition, "", clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(ccGenerationMismatch, clusterv1.ClusterClassVariablesReconciledCondition, "", clusterv1.ConditionSeverityError, "") tests := []struct { name string diff --git a/test/e2e/cluster_upgrade_runtimesdk.go b/test/e2e/cluster_upgrade_runtimesdk.go index 86076d6a919b..1f90c64fbd35 100644 --- a/test/e2e/cluster_upgrade_runtimesdk.go +++ b/test/e2e/cluster_upgrade_runtimesdk.go @@ -42,7 +42,7 @@ import ( "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/patch" ) @@ -248,7 +248,8 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl for i := range machineList.Items { machine := &machineList.Items[i] - if !conditions.IsTrue(machine, clusterv1.MachineNodeHealthyCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if !v1beta1conditions.IsTrue(machine, clusterv1.MachineNodeHealthyCondition) { return errors.Errorf("machine %q does not have %q condition set to true", machine.GetName(), clusterv1.MachineNodeHealthyCondition) } } @@ -455,8 +456,8 @@ func machineSetPreflightChecksTestHandler(ctx context.Context, c client.Client, MDName: md.Name, Namespace: md.Namespace, }) - g.Expect(conditions.IsFalse(machineSets[0], clusterv1.MachinesCreatedCondition)).To(BeTrue()) - machinesCreatedCondition := conditions.Get(machineSets[0], clusterv1.MachinesCreatedCondition) + g.Expect(v1beta1conditions.IsFalse(machineSets[0], clusterv1.MachinesCreatedCondition)).To(BeTrue()) + machinesCreatedCondition := v1beta1conditions.Get(machineSets[0], clusterv1.MachinesCreatedCondition) g.Expect(machinesCreatedCondition).NotTo(BeNil()) g.Expect(machinesCreatedCondition.Reason).To(Equal(clusterv1.PreflightCheckFailedReason)) g.Expect(machineSets[0].Spec.Replicas).To(Equal(md.Spec.Replicas)) @@ -595,10 +596,10 @@ func beforeClusterUpgradeAnnotationIsBlocking(ctx context.Context, c client.Clie cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{ Name: clusterRef.Name, Namespace: clusterRef.Namespace, Getter: c}) - if conditions.GetReason(cluster, clusterv1.TopologyReconciledCondition) != clusterv1.TopologyReconciledHookBlockingReason { + if v1beta1conditions.GetReason(cluster, clusterv1.TopologyReconciledCondition) != clusterv1.TopologyReconciledHookBlockingReason { return fmt.Errorf("hook %s (via annotation) should lead to LifecycleHookBlocking reason", hookName) } - if !strings.Contains(conditions.GetMessage(cluster, clusterv1.TopologyReconciledCondition), expectedBlockingMessage) { + if !strings.Contains(v1beta1conditions.GetMessage(cluster, clusterv1.TopologyReconciledCondition), expectedBlockingMessage) { return fmt.Errorf("hook %[1]s (via annotation) should show hook %[1]s is blocking as message with: %[2]s", hookName, expectedBlockingMessage) } @@ -633,7 +634,7 @@ func beforeClusterUpgradeAnnotationIsBlocking(ctx context.Context, c client.Clie cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{ Name: clusterRef.Name, Namespace: clusterRef.Namespace, Getter: c}) - if strings.Contains(conditions.GetMessage(cluster, clusterv1.TopologyReconciledCondition), expectedBlockingMessage) { + if strings.Contains(v1beta1conditions.GetMessage(cluster, clusterv1.TopologyReconciledCondition), expectedBlockingMessage) { return fmt.Errorf("hook %s (via annotation %s) should not be blocking anymore with message: %s", hookName, annotation, expectedBlockingMessage) } @@ -751,8 +752,8 @@ func runtimeHookTestHandler(ctx context.Context, c client.Client, cluster types. // clusterConditionShowsHookBlocking checks if the TopologyReconciled condition message contains both the hook name and hookFailedMessage. func clusterConditionShowsHookBlocking(cluster *clusterv1.Cluster, hookName string) bool { - return conditions.GetReason(cluster, clusterv1.TopologyReconciledCondition) == clusterv1.TopologyReconciledHookBlockingReason && - strings.Contains(conditions.GetMessage(cluster, clusterv1.TopologyReconciledCondition), hookName) + return v1beta1conditions.GetReason(cluster, clusterv1.TopologyReconciledCondition) == clusterv1.TopologyReconciledHookBlockingReason && + strings.Contains(v1beta1conditions.GetMessage(cluster, clusterv1.TopologyReconciledCondition), hookName) } func dumpAndDeleteCluster(ctx context.Context, proxy framework.ClusterProxy, clusterctlConfigPath, namespace, clusterName, artifactFolder string) { diff --git a/test/e2e/cluster_upgrade_test.go b/test/e2e/cluster_upgrade_test.go index 5f03bb4272c6..56ddccfe0d5b 100644 --- a/test/e2e/cluster_upgrade_test.go +++ b/test/e2e/cluster_upgrade_test.go @@ -33,7 +33,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/test/e2e/internal/log" "sigs.k8s.io/cluster-api/test/framework" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/patch" ) @@ -116,7 +116,7 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass with a HA var upgradedAndHealthy int64 deletingMachines := []clusterv1.Machine{} for _, m := range machines { - if *m.Spec.Version == cluster.Spec.Topology.Version && conditions.IsTrue(&m, clusterv1.MachineNodeHealthyCondition) { + if *m.Spec.Version == cluster.Spec.Topology.Version && v1beta1conditions.IsTrue(&m, clusterv1.MachineNodeHealthyCondition) { upgradedAndHealthy++ } if !m.DeletionTimestamp.IsZero() { diff --git a/test/e2e/node_drain.go b/test/e2e/node_drain.go index 0c97ececc04f..77defd373fa7 100644 --- a/test/e2e/node_drain.go +++ b/test/e2e/node_drain.go @@ -37,7 +37,7 @@ import ( "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) // NodeDrainTimeoutSpecInput is the input for NodeDrainTimeoutSpec. @@ -338,7 +338,7 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo }) var condition *clusterv1.Condition for _, machine := range controlPlaneMachines { - condition = conditions.Get(&machine, clusterv1.DrainingSucceededCondition) + condition = v1beta1conditions.Get(&machine, clusterv1.DrainingSucceededCondition) if condition != nil { // We only expect to find the condition on one Machine (as KCP will only try to drain one Machine at a time) drainingCPMachineKey = client.ObjectKeyFromObject(&machine) @@ -467,7 +467,7 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo drainedCPMachine := &clusterv1.Machine{} g.Expect(input.BootstrapClusterProxy.GetClient().Get(ctx, drainingCPMachineKey, drainedCPMachine)).To(Succeed()) - condition := conditions.Get(drainedCPMachine, clusterv1.DrainingSucceededCondition) + condition := v1beta1conditions.Get(drainedCPMachine, clusterv1.DrainingSucceededCondition) g.Expect(condition).ToNot(BeNil()) g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) // The evictable Pod should be gone now. @@ -480,7 +480,7 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo drainedMDMachine := &clusterv1.Machine{} g.Expect(input.BootstrapClusterProxy.GetClient().Get(ctx, drainingMDMachineKeys[md.Name], drainedMDMachine)).To(Succeed()) - condition := conditions.Get(drainedMDMachine, clusterv1.DrainingSucceededCondition) + condition := v1beta1conditions.Get(drainedMDMachine, clusterv1.DrainingSucceededCondition) g.Expect(condition).ToNot(BeNil()) g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) // The evictable Pod should be gone now. @@ -504,7 +504,7 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo drainedCPMachine := &clusterv1.Machine{} g.Expect(input.BootstrapClusterProxy.GetClient().Get(ctx, drainingCPMachineKey, drainedCPMachine)).To(Succeed()) - condition := conditions.Get(drainedCPMachine, clusterv1.DrainingSucceededCondition) + condition := v1beta1conditions.Get(drainedCPMachine, clusterv1.DrainingSucceededCondition) g.Expect(condition).ToNot(BeNil()) g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) // The evictable Pod should be gone now. @@ -517,7 +517,7 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo drainedMDMachine := &clusterv1.Machine{} g.Expect(input.BootstrapClusterProxy.GetClient().Get(ctx, drainingMDMachineKeys[md.Name], drainedMDMachine)).To(Succeed()) - condition := conditions.Get(drainedMDMachine, clusterv1.DrainingSucceededCondition) + condition := v1beta1conditions.Get(drainedMDMachine, clusterv1.DrainingSucceededCondition) g.Expect(condition).ToNot(BeNil()) g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) // The evictable Pod should be gone now. @@ -547,7 +547,7 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo waitingCPMachine := &clusterv1.Machine{} g.Expect(input.BootstrapClusterProxy.GetClient().Get(ctx, drainingCPMachineKey, waitingCPMachine)).To(Succeed()) - condition := conditions.Get(waitingCPMachine, clusterv1.VolumeDetachSucceededCondition) + condition := v1beta1conditions.Get(waitingCPMachine, clusterv1.VolumeDetachSucceededCondition) g.Expect(condition).ToNot(BeNil()) g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) // Deletion still not be blocked because of the volume. @@ -558,7 +558,7 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo drainedMDMachine := &clusterv1.Machine{} g.Expect(input.BootstrapClusterProxy.GetClient().Get(ctx, machineKey, drainedMDMachine)).To(Succeed()) - condition := conditions.Get(drainedMDMachine, clusterv1.VolumeDetachSucceededCondition) + condition := v1beta1conditions.Get(drainedMDMachine, clusterv1.VolumeDetachSucceededCondition) g.Expect(condition).ToNot(BeNil()) g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) // Deletion still not be blocked because of the volume. g.Expect(condition.Message).To(ContainSubstring("Waiting for node volumes to be detached")) @@ -705,7 +705,7 @@ func verifyNodeDrainsBlockedAndUnblock(ctx context.Context, input verifyNodeDrai g.Expect(input.BootstrapClusterProxy.GetClient().Get(ctx, input.DrainedCPMachineKey, drainedCPMachine)).To(Succeed()) // Verify condition on drained CP Machine. - condition := conditions.Get(drainedCPMachine, clusterv1.DrainingSucceededCondition) + condition := v1beta1conditions.Get(drainedCPMachine, clusterv1.DrainingSucceededCondition) g.Expect(condition).ToNot(BeNil()) g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) for _, messageSubstring := range input.CPConditionMessageSubstrings { @@ -733,7 +733,7 @@ func verifyNodeDrainsBlockedAndUnblock(ctx context.Context, input verifyNodeDrai g.Expect(input.BootstrapClusterProxy.GetClient().Get(ctx, input.DrainedMDMachineKeys[md.Name], drainedMDMachine)).To(Succeed()) // Verify condition on drained MD Machine. - condition := conditions.Get(drainedMDMachine, clusterv1.DrainingSucceededCondition) + condition := v1beta1conditions.Get(drainedMDMachine, clusterv1.DrainingSucceededCondition) g.Expect(condition).ToNot(BeNil()) g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) for _, messageSubstring := range input.MDConditionMessageSubstrings[md.Name] { diff --git a/test/framework/machine_helpers.go b/test/framework/machine_helpers.go index 939654dddff2..9abc33bf025e 100644 --- a/test/framework/machine_helpers.go +++ b/test/framework/machine_helpers.go @@ -29,7 +29,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" . "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions" "sigs.k8s.io/cluster-api/test/framework/internal/log" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/patch" ) @@ -170,7 +170,7 @@ func WaitForControlPlaneMachinesToBeUpgraded(ctx context.Context, input WaitForC upgraded := 0 for _, machine := range machines { m := machine - if *m.Spec.Version == input.KubernetesUpgradeVersion && conditions.IsTrue(&m, clusterv1.MachineNodeHealthyCondition) { + if *m.Spec.Version == input.KubernetesUpgradeVersion && v1beta1conditions.IsTrue(&m, clusterv1.MachineNodeHealthyCondition) { upgraded++ } } diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go index 98d25e009ef6..2ec89150b2ee 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go @@ -47,7 +47,7 @@ import ( infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/docker" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -316,7 +316,7 @@ func (r *DockerMachinePoolReconciler) reconcileNormal(ctx context.Context, clust if len(dockerMachinePool.Spec.ProviderIDList) == int(*machinePool.Spec.Replicas) && len(dockerMachineList.Items) == int(*machinePool.Spec.Replicas) { dockerMachinePool.Status.Ready = true - conditions.MarkTrue(dockerMachinePool, expv1.ReplicasReadyCondition) + v1beta1conditions.MarkTrue(dockerMachinePool, expv1.ReplicasReadyCondition) return ctrl.Result{}, nil } @@ -407,39 +407,39 @@ func (r *DockerMachinePoolReconciler) updateStatus(ctx context.Context, cluster switch { // We are scaling up case readyReplicaCount < desiredReplicas: - conditions.MarkFalse(dockerMachinePool, clusterv1.ResizedCondition, clusterv1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up MachinePool to %d replicas (actual %d)", desiredReplicas, readyReplicaCount) + v1beta1conditions.MarkFalse(dockerMachinePool, clusterv1.ResizedCondition, clusterv1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up MachinePool to %d replicas (actual %d)", desiredReplicas, readyReplicaCount) // We are scaling down case readyReplicaCount > desiredReplicas: - conditions.MarkFalse(dockerMachinePool, clusterv1.ResizedCondition, clusterv1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down MachinePool to %d replicas (actual %d)", desiredReplicas, readyReplicaCount) + v1beta1conditions.MarkFalse(dockerMachinePool, clusterv1.ResizedCondition, clusterv1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down MachinePool to %d replicas (actual %d)", desiredReplicas, readyReplicaCount) default: // Make sure last resize operation is marked as completed. // NOTE: we are checking the number of machines ready so we report resize completed only when the machines // are actually provisioned (vs reporting completed immediately after the last machine object is created). This convention is also used by KCP. if len(dockerMachines) == readyReplicaCount { - if conditions.IsFalse(dockerMachinePool, clusterv1.ResizedCondition) { + if v1beta1conditions.IsFalse(dockerMachinePool, clusterv1.ResizedCondition) { log.Info("All the replicas are ready", "replicas", readyReplicaCount) } - conditions.MarkTrue(dockerMachinePool, clusterv1.ResizedCondition) + v1beta1conditions.MarkTrue(dockerMachinePool, clusterv1.ResizedCondition) } // This means that there was no error in generating the desired number of machine objects - conditions.MarkTrue(dockerMachinePool, clusterv1.MachinesCreatedCondition) + v1beta1conditions.MarkTrue(dockerMachinePool, clusterv1.MachinesCreatedCondition) } - getters := make([]conditions.Getter, 0, len(dockerMachines)) + getters := make([]v1beta1conditions.Getter, 0, len(dockerMachines)) for i := range dockerMachines { getters = append(getters, &dockerMachines[i]) } // Aggregate the operational state of all the machines; while aggregating we are adding the // source ref (reason@machine/name) so the problem can be easily tracked down to its source machine. - conditions.SetAggregate(dockerMachinePool, expv1.ReplicasReadyCondition, getters, conditions.AddSourceRef()) + v1beta1conditions.SetAggregate(dockerMachinePool, expv1.ReplicasReadyCondition, getters, v1beta1conditions.AddSourceRef()) return ctrl.Result{}, nil } func patchDockerMachinePool(ctx context.Context, patchHelper *patch.Helper, dockerMachinePool *infraexpv1.DockerMachinePool) error { - conditions.SetSummary(dockerMachinePool, - conditions.WithConditions( + v1beta1conditions.SetSummary(dockerMachinePool, + v1beta1conditions.WithConditions( expv1.ReplicasReadyCondition, ), ) diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go index efe3df08c705..b51b645503f1 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/docker" "sigs.k8s.io/cluster-api/test/infrastructure/kind" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/labels/format" ) @@ -201,7 +201,8 @@ func (r *DockerMachinePoolReconciler) reconcileDockerMachines(ctx context.Contex totalReadyMachines := 0 for i := range orderedDockerMachines { dockerMachine := orderedDockerMachines[i] - if dockerMachine.Status.Ready || conditions.IsTrue(&dockerMachine, clusterv1.ReadyCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if dockerMachine.Status.Ready || v1beta1conditions.IsTrue(&dockerMachine, clusterv1.ReadyCondition) { totalReadyMachines++ } } @@ -386,9 +387,10 @@ func (r *DockerMachinePoolReconciler) getDeletionCandidates(ctx context.Context, return nil, nil, errors.Errorf("failed to find externalMachine for DockerMachine %s/%s", dockerMachine.Namespace, dockerMachine.Name) } + // TODO (v1beta2): test for v1beta2 conditions if !isMachineMatchingInfrastructureSpec(ctx, externalMachine, machinePool, dockerMachinePool) { outdatedMachines = append(outdatedMachines, dockerMachine) - } else if dockerMachine.Status.Ready || conditions.IsTrue(&dockerMachine, clusterv1.ReadyCondition) { + } else if dockerMachine.Status.Ready || v1beta1conditions.IsTrue(&dockerMachine, clusterv1.ReadyCondition) { readyMatchingMachines = append(readyMatchingMachines, dockerMachine) } } diff --git a/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go b/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go index 470f9c7cc53f..80062e36e28b 100644 --- a/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api/test/infrastructure/container" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/docker" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" ) @@ -61,7 +61,7 @@ func (r *ClusterBackEndReconciler) ReconcileNormal(ctx context.Context, cluster dockerCluster.Spec.Backend.Docker.LoadBalancer.ImageTag, strconv.Itoa(dockerCluster.Spec.ControlPlaneEndpoint.Port)) if err != nil { - conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, infrav1.LoadBalancerProvisioningFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, infrav1.LoadBalancerProvisioningFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) v1beta2conditions.Set(dockerCluster, metav1.Condition{ Type: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition, Status: metav1.ConditionFalse, @@ -73,7 +73,7 @@ func (r *ClusterBackEndReconciler) ReconcileNormal(ctx context.Context, cluster // Create the docker container hosting the load balancer. if err := externalLoadBalancer.Create(ctx); err != nil { - conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, infrav1.LoadBalancerProvisioningFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, infrav1.LoadBalancerProvisioningFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) v1beta2conditions.Set(dockerCluster, metav1.Condition{ Type: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition, Status: metav1.ConditionFalse, @@ -86,7 +86,7 @@ func (r *ClusterBackEndReconciler) ReconcileNormal(ctx context.Context, cluster // Set APIEndpoints with the load balancer IP so the Cluster API Cluster Controller can pull it lbIP, err := externalLoadBalancer.IP(ctx) if err != nil { - conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, infrav1.LoadBalancerProvisioningFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, infrav1.LoadBalancerProvisioningFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) v1beta2conditions.Set(dockerCluster, metav1.Condition{ Type: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition, Status: metav1.ConditionFalse, @@ -104,7 +104,7 @@ func (r *ClusterBackEndReconciler) ReconcileNormal(ctx context.Context, cluster // Mark the dockerCluster ready dockerCluster.Status.Ready = true - conditions.MarkTrue(dockerCluster, infrav1.LoadBalancerAvailableCondition) + v1beta1conditions.MarkTrue(dockerCluster, infrav1.LoadBalancerAvailableCondition) v1beta2conditions.Set(dockerCluster, metav1.Condition{ Type: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition, Status: metav1.ConditionTrue, @@ -126,7 +126,7 @@ func (r *ClusterBackEndReconciler) ReconcileDelete(ctx context.Context, cluster dockerCluster.Spec.Backend.Docker.LoadBalancer.ImageTag, strconv.Itoa(dockerCluster.Spec.ControlPlaneEndpoint.Port)) if err != nil { - conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, infrav1.LoadBalancerProvisioningFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, infrav1.LoadBalancerProvisioningFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) v1beta2conditions.Set(dockerCluster, metav1.Condition{ Type: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition, Status: metav1.ConditionFalse, @@ -139,8 +139,9 @@ func (r *ClusterBackEndReconciler) ReconcileDelete(ctx context.Context, cluster // Set the LoadBalancerAvailableCondition reporting delete is started, and requeue in order to make // this visible to the users. - if conditions.GetReason(dockerCluster, infrav1.LoadBalancerAvailableCondition) != clusterv1.DeletingReason { - conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + // TODO (v1beta2): test for v1beta2 conditions + if v1beta1conditions.GetReason(dockerCluster, infrav1.LoadBalancerAvailableCondition) != clusterv1.DeletingReason { + v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") v1beta2conditions.Set(dockerCluster, metav1.Condition{ Type: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition, Status: metav1.ConditionFalse, @@ -168,11 +169,11 @@ func (r *ClusterBackEndReconciler) PatchDevCluster(ctx context.Context, patchHel // Always update the readyCondition by summarizing the state of other conditions. // A step counter is added to represent progress during the provisioning process (instead we are hiding it during the deletion process). - conditions.SetSummary(dockerCluster, - conditions.WithConditions( + v1beta1conditions.SetSummary(dockerCluster, + v1beta1conditions.WithConditions( infrav1.LoadBalancerAvailableCondition, ), - conditions.WithStepCounterIf(dockerCluster.ObjectMeta.DeletionTimestamp.IsZero()), + v1beta1conditions.WithStepCounterIf(dockerCluster.ObjectMeta.DeletionTimestamp.IsZero()), ) if err := v1beta2conditions.SetSummaryCondition(dockerCluster, dockerCluster, infrav1.DevClusterReadyV1Beta2Condition, v1beta2conditions.ForConditionTypes{ diff --git a/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go b/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go index 64b75a524dd1..14b3360f3582 100644 --- a/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go @@ -40,7 +40,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/docker" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/labels" "sigs.k8s.io/cluster-api/util/patch" @@ -71,7 +71,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster // Check if the infrastructure is ready, otherwise return and wait for the cluster object to be updated if !cluster.Status.InfrastructureReady { log.Info("Waiting for DockerCluster Controller to create cluster infrastructure") - conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") v1beta2conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -120,7 +120,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster dockerMachine.Status.Ready = true if externalMachine.Exists() { - conditions.MarkTrue(dockerMachine, infrav1.ContainerProvisionedCondition) + v1beta1conditions.MarkTrue(dockerMachine, infrav1.ContainerProvisionedCondition) v1beta2conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerProvisionedV1Beta2Condition, Status: metav1.ConditionTrue, @@ -130,7 +130,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster // In this case recover the information from the existing v1beta1 condition, because we do not know if // all commands succeeded. if !v1beta2conditions.Has(dockerMachine, infrav1.DevMachineDockerContainerBootstrapExecSucceededV1Beta2Condition) { - condition := conditions.Get(dockerMachine, infrav1.BootstrapExecSucceededCondition) + condition := v1beta1conditions.Get(dockerMachine, infrav1.BootstrapExecSucceededCondition) if condition == nil || condition.Status == corev1.ConditionTrue { v1beta2conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerBootstrapExecSucceededV1Beta2Condition, @@ -152,7 +152,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster return ctrl.Result{}, errors.Wrap(err, "failed to set the machine address") } } else { - conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, infrav1.ContainerDeletedReason, clusterv1.ConditionSeverityError, fmt.Sprintf("Container %s does not exist anymore", externalMachine.Name())) + v1beta1conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, infrav1.ContainerDeletedReason, clusterv1.ConditionSeverityError, fmt.Sprintf("Container %s does not exist anymore", externalMachine.Name())) v1beta2conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -165,9 +165,10 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster // Make sure bootstrap data is available and populated. if dataSecretName == nil { - if !util.IsControlPlaneMachine(machine) && !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if !util.IsControlPlaneMachine(machine) && !v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { log.Info("Waiting for the control plane to be initialized") - conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") v1beta2conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -177,7 +178,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster } log.Info("Waiting for the Bootstrap provider controller to set bootstrap data") - conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") v1beta2conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -225,9 +226,10 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster } // Update the ContainerProvisioned and BootstrapExecSucceeded condition if not already in the correct state. + // TODO (v1beta2): test for v1beta2 conditions requeue := false - if !conditions.IsTrue(dockerMachine, infrav1.ContainerProvisionedCondition) { - conditions.MarkTrue(dockerMachine, infrav1.ContainerProvisionedCondition) + if !v1beta1conditions.IsTrue(dockerMachine, infrav1.ContainerProvisionedCondition) { + v1beta1conditions.MarkTrue(dockerMachine, infrav1.ContainerProvisionedCondition) v1beta2conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerProvisionedV1Beta2Condition, Status: metav1.ConditionTrue, @@ -235,8 +237,8 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster }) requeue = true } - if !conditions.Has(dockerMachine, infrav1.BootstrapExecSucceededCondition) { - conditions.MarkFalse(dockerMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrappingReason, clusterv1.ConditionSeverityInfo, "") + if !v1beta1conditions.Has(dockerMachine, infrav1.BootstrapExecSucceededCondition) { + v1beta1conditions.MarkFalse(dockerMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrappingReason, clusterv1.ConditionSeverityInfo, "") v1beta2conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerBootstrapExecSucceededV1Beta2Condition, Status: metav1.ConditionFalse, @@ -294,7 +296,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster // Run the bootstrap script. Simulates cloud-init/Ignition. if err := externalMachine.ExecBootstrap(timeoutCtx, bootstrapData, format, version, dockerMachine.Spec.Backend.Docker.CustomImage); err != nil { - conditions.MarkFalse(dockerMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityWarning, "Repeating bootstrap") + v1beta1conditions.MarkFalse(dockerMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityWarning, "Repeating bootstrap") v1beta2conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerBootstrapExecSucceededV1Beta2Condition, Status: metav1.ConditionFalse, @@ -306,7 +308,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster // Check for bootstrap success if err := externalMachine.CheckForBootstrapSuccess(timeoutCtx, true); err != nil { - conditions.MarkFalse(dockerMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityWarning, "Repeating bootstrap") + v1beta1conditions.MarkFalse(dockerMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityWarning, "Repeating bootstrap") v1beta2conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerBootstrapExecSucceededV1Beta2Condition, Status: metav1.ConditionFalse, @@ -320,7 +322,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster } // Update the BootstrapExecSucceededCondition condition - conditions.MarkTrue(dockerMachine, infrav1.BootstrapExecSucceededCondition) + v1beta1conditions.MarkTrue(dockerMachine, infrav1.BootstrapExecSucceededCondition) v1beta2conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerBootstrapExecSucceededV1Beta2Condition, Status: metav1.ConditionTrue, @@ -338,8 +340,9 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster // NOTE: If the Cluster doesn't use a control plane, the ControlPlaneInitialized condition is only // set to true after a control plane machine has a node ref. If we would requeue here in this case, the // Machine will never get a node ref as ProviderID is required to set the node ref, so we would get a deadlock. + // TODO (v1beta2): test for v1beta2 conditions if cluster.Spec.ControlPlaneRef != nil && - !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + !v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } @@ -419,8 +422,9 @@ func (r *MachineBackendReconciler) ReconcileDelete(ctx context.Context, cluster // this visible to the users. // NB. The operation in docker is fast, so there is the chance the user will not notice the status change; // nevertheless we are issuing a patch so we can test a pattern that will be used by other providers as well - if conditions.GetReason(dockerMachine, infrav1.ContainerProvisionedCondition) != clusterv1.DeletingReason { - conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + // TODO (v1beta2): test for v1beta2 conditions + if v1beta1conditions.GetReason(dockerMachine, infrav1.ContainerProvisionedCondition) != clusterv1.DeletingReason { + v1beta1conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") v1beta2conditions.Set(dockerCluster, metav1.Condition{ Type: infrav1.DevMachineDockerContainerProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -453,12 +457,12 @@ func (r *MachineBackendReconciler) PatchDevMachine(ctx context.Context, patchHel // Always update the readyCondition by summarizing the state of other conditions. // A step counter is added to represent progress during the provisioning process (instead we are hiding the step counter during the deletion process). - conditions.SetSummary(dockerMachine, - conditions.WithConditions( + v1beta1conditions.SetSummary(dockerMachine, + v1beta1conditions.WithConditions( infrav1.ContainerProvisionedCondition, infrav1.BootstrapExecSucceededCondition, ), - conditions.WithStepCounterIf(dockerMachine.ObjectMeta.DeletionTimestamp.IsZero() && dockerMachine.Spec.ProviderID == nil), + v1beta1conditions.WithStepCounterIf(dockerMachine.ObjectMeta.DeletionTimestamp.IsZero() && dockerMachine.Spec.ProviderID == nil), ) if err := v1beta2conditions.SetSummaryCondition(dockerMachine, dockerMachine, infrav1.DevMachineReadyV1Beta2Condition, v1beta2conditions.ForConditionTypes{ diff --git a/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go b/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go index 73388a05eafa..da9dcff53b92 100644 --- a/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go @@ -45,7 +45,7 @@ import ( inmemoryserver "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/secret" @@ -94,7 +94,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster // Check if the infrastructure is ready, otherwise return and wait for the cluster object to be updated if !cluster.Status.InfrastructureReady { - conditions.MarkFalse(inMemoryMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(inMemoryMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryVMProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -109,8 +109,9 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster // NOTE: we are not using bootstrap data, but we wait for it in order to simulate a real machine // provisioning workflow. if machine.Spec.Bootstrap.DataSecretName == nil { - if !util.IsControlPlaneMachine(machine) && !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { - conditions.MarkFalse(inMemoryMachine, infrav1.VMProvisionedCondition, infrav1.WaitingControlPlaneInitializedReason, clusterv1.ConditionSeverityInfo, "") + // TODO (v1beta2): test for v1beta2 conditions + if !util.IsControlPlaneMachine(machine) && !v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + v1beta1conditions.MarkFalse(inMemoryMachine, infrav1.VMProvisionedCondition, infrav1.WaitingControlPlaneInitializedReason, clusterv1.ConditionSeverityInfo, "") v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryVMProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -121,7 +122,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster return ctrl.Result{}, nil } - conditions.MarkFalse(inMemoryMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(inMemoryMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryVMProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -216,7 +217,7 @@ func (r *MachineBackendReconciler) reconcileNormalCloudMachine(ctx context.Conte start := cloudMachine.CreationTimestamp now := time.Now() if now.Before(start.Add(provisioningDuration)) { - conditions.MarkFalse(inMemoryMachine, infrav1.VMProvisionedCondition, infrav1.VMWaitingForStartupTimeoutReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(inMemoryMachine, infrav1.VMProvisionedCondition, infrav1.VMWaitingForStartupTimeoutReason, clusterv1.ConditionSeverityInfo, "") v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryVMProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -229,7 +230,7 @@ func (r *MachineBackendReconciler) reconcileNormalCloudMachine(ctx context.Conte inMemoryMachine.Spec.ProviderID = ptr.To(calculateProviderID(inMemoryMachine)) inMemoryMachine.Status.Ready = true - conditions.MarkTrue(inMemoryMachine, infrav1.VMProvisionedCondition) + v1beta1conditions.MarkTrue(inMemoryMachine, infrav1.VMProvisionedCondition) v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryVMProvisionedV1Beta2Condition, Status: metav1.ConditionTrue, @@ -240,7 +241,8 @@ func (r *MachineBackendReconciler) reconcileNormalCloudMachine(ctx context.Conte func (r *MachineBackendReconciler) reconcileNormalNode(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, inMemoryMachine *infrav1.DevMachine) (_ ctrl.Result, retErr error) { // No-op if the VM is not provisioned yet - if !conditions.IsTrue(inMemoryMachine, infrav1.VMProvisionedCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if !v1beta1conditions.IsTrue(inMemoryMachine, infrav1.VMProvisionedCondition) { v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryNodeProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -277,10 +279,11 @@ func (r *MachineBackendReconciler) reconcileNormalNode(ctx context.Context, clus } } - start := conditions.Get(inMemoryMachine, infrav1.VMProvisionedCondition).LastTransitionTime + // TODO (v1beta2): test for v1beta2 conditions + start := v1beta1conditions.Get(inMemoryMachine, infrav1.VMProvisionedCondition).LastTransitionTime now := time.Now() if now.Before(start.Add(provisioningDuration)) { - conditions.MarkFalse(inMemoryMachine, infrav1.NodeProvisionedCondition, infrav1.NodeWaitingForStartupTimeoutReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(inMemoryMachine, infrav1.NodeProvisionedCondition, infrav1.NodeWaitingForStartupTimeoutReason, clusterv1.ConditionSeverityInfo, "") v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryNodeProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -350,7 +353,7 @@ func (r *MachineBackendReconciler) reconcileNormalNode(ctx context.Context, clus } } - conditions.MarkTrue(inMemoryMachine, infrav1.NodeProvisionedCondition) + v1beta1conditions.MarkTrue(inMemoryMachine, infrav1.NodeProvisionedCondition) v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryNodeProvisionedV1Beta2Condition, Status: metav1.ConditionTrue, @@ -370,7 +373,8 @@ func (r *MachineBackendReconciler) reconcileNormalETCD(ctx context.Context, clus } // No-op if the VM is not provisioned yet - if !conditions.IsTrue(inMemoryMachine, infrav1.VMProvisionedCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if !v1beta1conditions.IsTrue(inMemoryMachine, infrav1.VMProvisionedCondition) { v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryEtcdProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -380,7 +384,8 @@ func (r *MachineBackendReconciler) reconcileNormalETCD(ctx context.Context, clus } // No-op if the Node is not provisioned yet - if !conditions.IsTrue(inMemoryMachine, infrav1.NodeProvisionedCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if !v1beta1conditions.IsTrue(inMemoryMachine, infrav1.NodeProvisionedCondition) { v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryEtcdProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -417,10 +422,11 @@ func (r *MachineBackendReconciler) reconcileNormalETCD(ctx context.Context, clus } } - start := conditions.Get(inMemoryMachine, infrav1.NodeProvisionedCondition).LastTransitionTime + // TODO (v1beta2): test for v1beta2 conditions + start := v1beta1conditions.Get(inMemoryMachine, infrav1.NodeProvisionedCondition).LastTransitionTime now := time.Now() if now.Before(start.Add(provisioningDuration)) { - conditions.MarkFalse(inMemoryMachine, infrav1.EtcdProvisionedCondition, infrav1.EtcdWaitingForStartupTimeoutReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(inMemoryMachine, infrav1.EtcdProvisionedCondition, infrav1.EtcdWaitingForStartupTimeoutReason, clusterv1.ConditionSeverityInfo, "") v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryEtcdProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -540,7 +546,7 @@ func (r *MachineBackendReconciler) reconcileNormalETCD(ctx context.Context, clus } } - conditions.MarkTrue(inMemoryMachine, infrav1.EtcdProvisionedCondition) + v1beta1conditions.MarkTrue(inMemoryMachine, infrav1.EtcdProvisionedCondition) v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryEtcdProvisionedV1Beta2Condition, Status: metav1.ConditionTrue, @@ -611,7 +617,8 @@ func (r *MachineBackendReconciler) reconcileNormalAPIServer(ctx context.Context, } // No-op if the VM is not provisioned yet - if !conditions.IsTrue(inMemoryMachine, infrav1.VMProvisionedCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if !v1beta1conditions.IsTrue(inMemoryMachine, infrav1.VMProvisionedCondition) { v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryAPIServerProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -621,7 +628,8 @@ func (r *MachineBackendReconciler) reconcileNormalAPIServer(ctx context.Context, } // No-op if the Node is not provisioned yet - if !conditions.IsTrue(inMemoryMachine, infrav1.NodeProvisionedCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if !v1beta1conditions.IsTrue(inMemoryMachine, infrav1.NodeProvisionedCondition) { v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryAPIServerProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -658,10 +666,11 @@ func (r *MachineBackendReconciler) reconcileNormalAPIServer(ctx context.Context, } } - start := conditions.Get(inMemoryMachine, infrav1.NodeProvisionedCondition).LastTransitionTime + // TODO (v1beta2): test for v1beta2 conditions + start := v1beta1conditions.Get(inMemoryMachine, infrav1.NodeProvisionedCondition).LastTransitionTime now := time.Now() if now.Before(start.Add(provisioningDuration)) { - conditions.MarkFalse(inMemoryMachine, infrav1.APIServerProvisionedCondition, infrav1.APIServerWaitingForStartupTimeoutReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(inMemoryMachine, infrav1.APIServerProvisionedCondition, infrav1.APIServerWaitingForStartupTimeoutReason, clusterv1.ConditionSeverityInfo, "") v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryAPIServerProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, @@ -746,7 +755,7 @@ func (r *MachineBackendReconciler) reconcileNormalAPIServer(ctx context.Context, } } - conditions.MarkTrue(inMemoryMachine, infrav1.APIServerProvisionedCondition) + v1beta1conditions.MarkTrue(inMemoryMachine, infrav1.APIServerProvisionedCondition) v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryAPIServerProvisionedV1Beta2Condition, Status: metav1.ConditionTrue, @@ -765,7 +774,8 @@ func (r *MachineBackendReconciler) reconcileNormalScheduler(ctx context.Context, // specific behaviour for this component because they are not relevant for stress tests. // As a current approximation, we create the scheduler as soon as the API server is provisioned; // also, the scheduler is immediately marked as ready. - if !conditions.IsTrue(inMemoryMachine, infrav1.APIServerProvisionedCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if !v1beta1conditions.IsTrue(inMemoryMachine, infrav1.APIServerProvisionedCondition) { return ctrl.Result{}, nil } @@ -812,7 +822,8 @@ func (r *MachineBackendReconciler) reconcileNormalControllerManager(ctx context. // specific behaviour for this component because they are not relevant for stress tests. // As a current approximation, we create the controller manager as soon as the API server is provisioned; // also, the controller manager is immediately marked as ready. - if !conditions.IsTrue(inMemoryMachine, infrav1.APIServerProvisionedCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if !v1beta1conditions.IsTrue(inMemoryMachine, infrav1.APIServerProvisionedCondition) { return ctrl.Result{}, nil } @@ -1237,9 +1248,9 @@ func (r *MachineBackendReconciler) PatchDevMachine(ctx context.Context, patchHel } // Always update the readyCondition by summarizing the state of other conditions. // A step counter is added to represent progress during the provisioning process (instead we are hiding the step counter during the deletion process). - conditions.SetSummary(inMemoryMachine, - conditions.WithConditions(inMemoryMachineConditions...), - conditions.WithStepCounterIf(inMemoryMachine.ObjectMeta.DeletionTimestamp.IsZero() && inMemoryMachine.Spec.ProviderID == nil), + v1beta1conditions.SetSummary(inMemoryMachine, + v1beta1conditions.WithConditions(inMemoryMachineConditions...), + v1beta1conditions.WithStepCounterIf(inMemoryMachine.ObjectMeta.DeletionTimestamp.IsZero() && inMemoryMachine.Spec.ProviderID == nil), ) if err := v1beta2conditions.SetSummaryCondition(inMemoryMachine, inMemoryMachine, infrav1.DevMachineReadyV1Beta2Condition, inMemoryMachineV1Beta2Conditions, diff --git a/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_controller_test.go b/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_controller_test.go index 94849ee50db3..a546195d1948 100644 --- a/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_controller_test.go +++ b/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_controller_test.go @@ -44,7 +44,7 @@ import ( inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" inmemoryserver "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server" "sigs.k8s.io/cluster-api/util/certs" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" secretutil "sigs.k8s.io/cluster-api/util/secret" ) @@ -112,7 +112,7 @@ func TestReconcileNormalCloudMachine(t *testing.T) { res, err := r.reconcileNormalCloudMachine(ctx, cluster, cpMachine, inMemoryMachine) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.IsZero()).To(BeFalse()) - g.Expect(conditions.IsFalse(inMemoryMachine, infrav1.VMProvisionedCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsFalse(inMemoryMachine, infrav1.VMProvisionedCondition)).To(BeTrue()) got := &cloudv1.CloudMachine{ ObjectMeta: metav1.ObjectMeta{ @@ -134,8 +134,8 @@ func TestReconcileNormalCloudMachine(t *testing.T) { return res.IsZero() }, inMemoryMachine.Spec.Backend.InMemory.VM.Provisioning.StartupDuration.Duration*2).Should(BeTrue()) - g.Expect(conditions.IsTrue(inMemoryMachine, infrav1.VMProvisionedCondition)).To(BeTrue()) - g.Expect(conditions.Get(inMemoryMachine, infrav1.VMProvisionedCondition).LastTransitionTime.Time).To(BeTemporally(">", inMemoryMachine.CreationTimestamp.Time, inMemoryMachine.Spec.Backend.InMemory.VM.Provisioning.StartupDuration.Duration)) + g.Expect(v1beta1conditions.IsTrue(inMemoryMachine, infrav1.VMProvisionedCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.Get(inMemoryMachine, infrav1.VMProvisionedCondition).LastTransitionTime.Time).To(BeTemporally(">", inMemoryMachine.CreationTimestamp.Time, inMemoryMachine.Spec.Backend.InMemory.VM.Provisioning.StartupDuration.Duration)) }) t.Run("no-op after it is provisioned", func(t *testing.T) { @@ -226,7 +226,7 @@ func TestReconcileNormalNode(t *testing.T) { res, err := r.reconcileNormalNode(ctx, cluster, cpMachine, inMemoryMachineWithVMProvisioned) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.IsZero()).To(BeFalse()) - g.Expect(conditions.IsFalse(inMemoryMachineWithVMProvisioned, infrav1.NodeProvisionedCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsFalse(inMemoryMachineWithVMProvisioned, infrav1.NodeProvisionedCondition)).To(BeTrue()) got := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -251,8 +251,8 @@ func TestReconcileNormalNode(t *testing.T) { err = c.Get(ctx, client.ObjectKeyFromObject(got), got) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(conditions.IsTrue(inMemoryMachineWithVMProvisioned, infrav1.NodeProvisionedCondition)).To(BeTrue()) - g.Expect(conditions.Get(inMemoryMachineWithVMProvisioned, infrav1.NodeProvisionedCondition).LastTransitionTime.Time).To(BeTemporally(">", conditions.Get(inMemoryMachineWithVMProvisioned, infrav1.VMProvisionedCondition).LastTransitionTime.Time, inMemoryMachineWithVMProvisioned.Spec.Backend.InMemory.Node.Provisioning.StartupDuration.Duration)) + g.Expect(v1beta1conditions.IsTrue(inMemoryMachineWithVMProvisioned, infrav1.NodeProvisionedCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.Get(inMemoryMachineWithVMProvisioned, infrav1.NodeProvisionedCondition).LastTransitionTime.Time).To(BeTemporally(">", v1beta1conditions.Get(inMemoryMachineWithVMProvisioned, infrav1.VMProvisionedCondition).LastTransitionTime.Time, inMemoryMachineWithVMProvisioned.Spec.Backend.InMemory.Node.Provisioning.StartupDuration.Duration)) }) t.Run("no-op after it is provisioned", func(t *testing.T) { @@ -366,7 +366,7 @@ func TestReconcileNormalEtcd(t *testing.T) { res, err := r.reconcileNormalETCD(ctx, cluster, cpMachine, inMemoryMachineWithNodeProvisioned1) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.IsZero()).To(BeFalse()) - g.Expect(conditions.IsFalse(inMemoryMachineWithNodeProvisioned1, infrav1.EtcdProvisionedCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsFalse(inMemoryMachineWithNodeProvisioned1, infrav1.EtcdProvisionedCondition)).To(BeTrue()) got := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -396,8 +396,8 @@ func TestReconcileNormalEtcd(t *testing.T) { g.Expect(got.Annotations).To(HaveKey(cloudv1.EtcdMemberIDAnnotationName)) g.Expect(got.Annotations).To(HaveKey(cloudv1.EtcdLeaderFromAnnotationName)) - g.Expect(conditions.IsTrue(inMemoryMachineWithNodeProvisioned1, infrav1.EtcdProvisionedCondition)).To(BeTrue()) - g.Expect(conditions.Get(inMemoryMachineWithNodeProvisioned1, infrav1.EtcdProvisionedCondition).LastTransitionTime.Time).To(BeTemporally(">", conditions.Get(inMemoryMachineWithNodeProvisioned1, infrav1.NodeProvisionedCondition).LastTransitionTime.Time, inMemoryMachineWithNodeProvisioned1.Spec.Backend.InMemory.Etcd.Provisioning.StartupDuration.Duration)) + g.Expect(v1beta1conditions.IsTrue(inMemoryMachineWithNodeProvisioned1, infrav1.EtcdProvisionedCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.Get(inMemoryMachineWithNodeProvisioned1, infrav1.EtcdProvisionedCondition).LastTransitionTime.Time).To(BeTemporally(">", v1beta1conditions.Get(inMemoryMachineWithNodeProvisioned1, infrav1.NodeProvisionedCondition).LastTransitionTime.Time, inMemoryMachineWithNodeProvisioned1.Spec.Backend.InMemory.Etcd.Provisioning.StartupDuration.Duration)) }) t.Run("no-op after it is provisioned", func(t *testing.T) { @@ -451,7 +451,7 @@ func TestReconcileNormalEtcd(t *testing.T) { res, err := r.reconcileNormalETCD(ctx, cluster, cpMachine, inMemoryMachineWithNodeProvisioned1) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.IsZero()).To(BeTrue()) - g.Expect(conditions.IsTrue(inMemoryMachineWithNodeProvisioned1, infrav1.EtcdProvisionedCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(inMemoryMachineWithNodeProvisioned1, infrav1.EtcdProvisionedCondition)).To(BeTrue()) got1 := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -472,7 +472,7 @@ func TestReconcileNormalEtcd(t *testing.T) { res, err = r.reconcileNormalETCD(ctx, cluster, cpMachine, inMemoryMachineWithNodeProvisioned2) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.IsZero()).To(BeTrue()) - g.Expect(conditions.IsTrue(inMemoryMachineWithNodeProvisioned2, infrav1.EtcdProvisionedCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(inMemoryMachineWithNodeProvisioned2, infrav1.EtcdProvisionedCondition)).To(BeTrue()) got2 := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -593,7 +593,7 @@ func TestReconcileNormalApiServer(t *testing.T) { res, err := r.reconcileNormalAPIServer(ctx, cluster, cpMachine, inMemoryMachineWithNodeProvisioned) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.IsZero()).To(BeFalse()) - g.Expect(conditions.IsFalse(inMemoryMachineWithNodeProvisioned, infrav1.APIServerProvisionedCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsFalse(inMemoryMachineWithNodeProvisioned, infrav1.APIServerProvisionedCondition)).To(BeTrue()) got := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -619,8 +619,8 @@ func TestReconcileNormalApiServer(t *testing.T) { err = c.Get(ctx, client.ObjectKeyFromObject(got), got) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(conditions.IsTrue(inMemoryMachineWithNodeProvisioned, infrav1.APIServerProvisionedCondition)).To(BeTrue()) - g.Expect(conditions.Get(inMemoryMachineWithNodeProvisioned, infrav1.APIServerProvisionedCondition).LastTransitionTime.Time).To(BeTemporally(">", conditions.Get(inMemoryMachineWithNodeProvisioned, infrav1.NodeProvisionedCondition).LastTransitionTime.Time, inMemoryMachineWithNodeProvisioned.Spec.Backend.InMemory.APIServer.Provisioning.StartupDuration.Duration)) + g.Expect(v1beta1conditions.IsTrue(inMemoryMachineWithNodeProvisioned, infrav1.APIServerProvisionedCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.Get(inMemoryMachineWithNodeProvisioned, infrav1.APIServerProvisionedCondition).LastTransitionTime.Time).To(BeTemporally(">", v1beta1conditions.Get(inMemoryMachineWithNodeProvisioned, infrav1.NodeProvisionedCondition).LastTransitionTime.Time, inMemoryMachineWithNodeProvisioned.Spec.Backend.InMemory.APIServer.Provisioning.StartupDuration.Duration)) }) t.Run("no-op after it is provisioned", func(t *testing.T) { diff --git a/test/infrastructure/docker/internal/controllers/dockercluster_controller.go b/test/infrastructure/docker/internal/controllers/dockercluster_controller.go index edab97a2b72a..825381ce70e0 100644 --- a/test/infrastructure/docker/internal/controllers/dockercluster_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockercluster_controller.go @@ -34,7 +34,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" dockerbackend "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/controllers/backends/docker" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/finalizers" "sigs.k8s.io/cluster-api/util/patch" @@ -153,11 +153,11 @@ func (r *DockerClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl func patchDockerCluster(ctx context.Context, patchHelper *patch.Helper, dockerCluster *infrav1.DockerCluster) error { // Always update the readyCondition by summarizing the state of other conditions. // A step counter is added to represent progress during the provisioning process (instead we are hiding it during the deletion process). - conditions.SetSummary(dockerCluster, - conditions.WithConditions( + v1beta1conditions.SetSummary(dockerCluster, + v1beta1conditions.WithConditions( infrav1.LoadBalancerAvailableCondition, ), - conditions.WithStepCounterIf(dockerCluster.ObjectMeta.DeletionTimestamp.IsZero()), + v1beta1conditions.WithStepCounterIf(dockerCluster.ObjectMeta.DeletionTimestamp.IsZero()), ) if err := v1beta2conditions.SetSummaryCondition(dockerCluster, dockerCluster, infrav1.DevClusterReadyV1Beta2Condition, v1beta2conditions.ForConditionTypes{ diff --git a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go index 2744788c6a29..3317bf0a9637 100644 --- a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go @@ -36,7 +36,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" dockerbackend "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/controllers/backends/docker" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/finalizers" clog "sigs.k8s.io/cluster-api/util/log" @@ -248,12 +248,12 @@ func (r *DockerMachineReconciler) dockerClusterToDockerMachines(ctx context.Cont func patchDockerMachine(ctx context.Context, patchHelper *patch.Helper, dockerMachine *infrav1.DockerMachine) error { // Always update the readyCondition by summarizing the state of other conditions. // A step counter is added to represent progress during the provisioning process (instead we are hiding the step counter during the deletion process). - conditions.SetSummary(dockerMachine, - conditions.WithConditions( + v1beta1conditions.SetSummary(dockerMachine, + v1beta1conditions.WithConditions( infrav1.ContainerProvisionedCondition, infrav1.BootstrapExecSucceededCondition, ), - conditions.WithStepCounterIf(dockerMachine.ObjectMeta.DeletionTimestamp.IsZero() && dockerMachine.Spec.ProviderID == nil), + v1beta1conditions.WithStepCounterIf(dockerMachine.ObjectMeta.DeletionTimestamp.IsZero() && dockerMachine.Spec.ProviderID == nil), ) if err := v1beta2conditions.SetSummaryCondition(dockerMachine, dockerMachine, infrav1.DevMachineReadyV1Beta2Condition, v1beta2conditions.ForConditionTypes{ diff --git a/util/collections/machine_collection.go b/util/collections/machine_collection.go index a0d174477c56..e512dec32cb3 100644 --- a/util/collections/machine_collection.go +++ b/util/collections/machine_collection.go @@ -33,7 +33,7 @@ import ( "github.com/blang/semver/v4" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/version" ) @@ -241,8 +241,8 @@ func (s Machines) DeepCopy() Machines { } // ConditionGetters returns the slice with machines converted into conditions.Getter. -func (s Machines) ConditionGetters() []conditions.Getter { - res := make([]conditions.Getter, 0, len(s)) +func (s Machines) ConditionGetters() []v1beta1conditions.Getter { + res := make([]v1beta1conditions.Getter, 0, len(s)) for _, v := range s { value := *v res = append(res, &value) diff --git a/util/collections/machine_filters.go b/util/collections/machine_filters.go index 28f7b3e3ab61..27df67a8d671 100644 --- a/util/collections/machine_filters.go +++ b/util/collections/machine_filters.go @@ -28,7 +28,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) // Func is the functon definition for a filter. @@ -155,7 +155,8 @@ func IsUnhealthyAndOwnerRemediated(machine *clusterv1.Machine) bool { if machine == nil { return false } - return conditions.IsFalse(machine, clusterv1.MachineHealthCheckSucceededCondition) && conditions.IsFalse(machine, clusterv1.MachineOwnerRemediatedCondition) + // TODO (v1beta2): test for v1beta2 conditions + return v1beta1conditions.IsFalse(machine, clusterv1.MachineHealthCheckSucceededCondition) && v1beta1conditions.IsFalse(machine, clusterv1.MachineOwnerRemediatedCondition) } // IsUnhealthy returns a filter to find all machines that have a MachineHealthCheckSucceeded condition set to False, @@ -164,7 +165,8 @@ func IsUnhealthy(machine *clusterv1.Machine) bool { if machine == nil { return false } - return conditions.IsFalse(machine, clusterv1.MachineHealthCheckSucceededCondition) + // TODO (v1beta2): test for v1beta2 conditions + return v1beta1conditions.IsFalse(machine, clusterv1.MachineHealthCheckSucceededCondition) } // HasUnhealthyControlPlaneComponents returns a filter to find all unhealthy control plane machines that @@ -195,7 +197,8 @@ func HasUnhealthyControlPlaneComponents(isEtcdManaged bool) Func { // Do not return true when the condition is not set or is set to Unknown because // it means a transient state and can not be considered as unhealthy. // preflightCheckCondition() can cover these two cases and skip the scaling up/down. - if conditions.IsFalse(machine, condition) { + // TODO (v1beta2): test for v1beta2 conditions + if v1beta1conditions.IsFalse(machine, condition) { return true } } @@ -209,7 +212,8 @@ func IsReady() Func { if machine == nil { return false } - return conditions.IsTrue(machine, clusterv1.ReadyCondition) + // TODO (v1beta2): test for v1beta2 conditions + return v1beta1conditions.IsTrue(machine, clusterv1.ReadyCondition) } } diff --git a/util/collections/machine_filters_test.go b/util/collections/machine_filters_test.go index 697354e090ae..863baf34dbb1 100644 --- a/util/collections/machine_filters_test.go +++ b/util/collections/machine_filters_test.go @@ -29,7 +29,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta2" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) func falseFilter(_ *clusterv1.Machine) bool { @@ -89,22 +89,22 @@ func TestUnhealthyFilters(t *testing.T) { t.Run("healthy machine (with HealthCheckSucceeded condition == True) should return false", func(t *testing.T) { g := NewWithT(t) m := &clusterv1.Machine{} - conditions.MarkTrue(m, clusterv1.MachineHealthCheckSucceededCondition) + v1beta1conditions.MarkTrue(m, clusterv1.MachineHealthCheckSucceededCondition) g.Expect(collections.IsUnhealthy(m)).To(BeFalse()) g.Expect(collections.IsUnhealthyAndOwnerRemediated(m)).To(BeFalse()) }) t.Run("unhealthy machine NOT eligible for KCP remediation (with withHealthCheckSucceeded condition == False but without OwnerRemediated) should return false", func(t *testing.T) { g := NewWithT(t) m := &clusterv1.Machine{} - conditions.MarkFalse(m, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(m, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "") g.Expect(collections.IsUnhealthy(m)).To(BeTrue()) g.Expect(collections.IsUnhealthyAndOwnerRemediated(m)).To(BeFalse()) }) t.Run("unhealthy machine eligible for KCP (with HealthCheckSucceeded condition == False and with OwnerRemediated) should return true", func(t *testing.T) { g := NewWithT(t) m := &clusterv1.Machine{} - conditions.MarkFalse(m, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "") - conditions.MarkFalse(m, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(m, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(m, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "") g.Expect(collections.IsUnhealthy(m)).To(BeTrue()) g.Expect(collections.IsUnhealthyAndOwnerRemediated(m)).To(BeTrue()) }) @@ -460,9 +460,9 @@ func TestHasUnhealthyControlPlaneComponentCondition(t *testing.T) { machine.Status.Deprecated = &clusterv1.MachineDeprecatedStatus{ V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ Conditions: clusterv1.Conditions{ - *conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), }, }, } @@ -479,9 +479,9 @@ func TestHasUnhealthyControlPlaneComponentCondition(t *testing.T) { machine.Status.Deprecated = &clusterv1.MachineDeprecatedStatus{ V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ Conditions: clusterv1.Conditions{ - *conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), - *conditions.FalseCondition(controlplanev1.MachineAPIServerPodHealthyCondition, "", + *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), + *v1beta1conditions.FalseCondition(controlplanev1.MachineAPIServerPodHealthyCondition, "", clusterv1.ConditionSeverityWarning, ""), }, }, @@ -499,12 +499,12 @@ func TestHasUnhealthyControlPlaneComponentCondition(t *testing.T) { machine.Status.Deprecated = &clusterv1.MachineDeprecatedStatus{ V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ Conditions: clusterv1.Conditions{ - *conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), - *conditions.FalseCondition(controlplanev1.MachineEtcdPodHealthyCondition, "", + *v1beta1conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), + *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdPodHealthyCondition, "", clusterv1.ConditionSeverityWarning, ""), - *conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, "", + *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, "", clusterv1.ConditionSeverityWarning, ""), }, }, @@ -522,12 +522,12 @@ func TestHasUnhealthyControlPlaneComponentCondition(t *testing.T) { machine.Status.Deprecated = &clusterv1.MachineDeprecatedStatus{ V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ Conditions: clusterv1.Conditions{ - *conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), - *conditions.FalseCondition(controlplanev1.MachineEtcdPodHealthyCondition, "", + *v1beta1conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), + *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdPodHealthyCondition, "", clusterv1.ConditionSeverityWarning, ""), - *conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, "", + *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, "", clusterv1.ConditionSeverityWarning, ""), }, }, @@ -545,11 +545,11 @@ func TestHasUnhealthyControlPlaneComponentCondition(t *testing.T) { machine.Status.Deprecated = &clusterv1.MachineDeprecatedStatus{ V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ Conditions: clusterv1.Conditions{ - *conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineEtcdPodHealthyCondition), - *conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdPodHealthyCondition), + *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition), }, }, } diff --git a/util/conditions/doc.go b/util/conditions/deprecated/v1beta1/doc.go similarity index 88% rename from util/conditions/doc.go rename to util/conditions/deprecated/v1beta1/doc.go index 019d42d7c0c7..6830163ce72a 100644 --- a/util/conditions/doc.go +++ b/util/conditions/deprecated/v1beta1/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package conditions implements condition utilities. -package conditions +// Package v1beta1 implements condition utilities. +package v1beta1 diff --git a/util/conditions/getter.go b/util/conditions/deprecated/v1beta1/getter.go similarity index 99% rename from util/conditions/getter.go rename to util/conditions/deprecated/v1beta1/getter.go index 79e0dd580b14..5a1b17bfa31d 100644 --- a/util/conditions/getter.go +++ b/util/conditions/deprecated/v1beta1/getter.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package conditions +package v1beta1 import ( corev1 "k8s.io/api/core/v1" diff --git a/util/conditions/getter_test.go b/util/conditions/deprecated/v1beta1/getter_test.go similarity index 99% rename from util/conditions/getter_test.go rename to util/conditions/deprecated/v1beta1/getter_test.go index f80685e542a7..d2c6b65bcc6d 100644 --- a/util/conditions/getter_test.go +++ b/util/conditions/deprecated/v1beta1/getter_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package conditions +package v1beta1 import ( "testing" diff --git a/util/conditions/matcher.go b/util/conditions/deprecated/v1beta1/matcher.go similarity index 99% rename from util/conditions/matcher.go rename to util/conditions/deprecated/v1beta1/matcher.go index bfc91fc7cf06..49679245de7e 100644 --- a/util/conditions/matcher.go +++ b/util/conditions/deprecated/v1beta1/matcher.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package conditions +package v1beta1 import ( "fmt" diff --git a/util/conditions/matcher_test.go b/util/conditions/deprecated/v1beta1/matcher_test.go similarity index 99% rename from util/conditions/matcher_test.go rename to util/conditions/deprecated/v1beta1/matcher_test.go index 1976238860c6..e6536ff1dac2 100644 --- a/util/conditions/matcher_test.go +++ b/util/conditions/deprecated/v1beta1/matcher_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package conditions +package v1beta1 import ( "testing" diff --git a/util/conditions/matchers.go b/util/conditions/deprecated/v1beta1/matchers.go similarity index 98% rename from util/conditions/matchers.go rename to util/conditions/deprecated/v1beta1/matchers.go index 42f71fc2727f..ba1392046d33 100644 --- a/util/conditions/matchers.go +++ b/util/conditions/deprecated/v1beta1/matchers.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package conditions +package v1beta1 import ( "errors" diff --git a/util/conditions/merge.go b/util/conditions/deprecated/v1beta1/merge.go similarity index 99% rename from util/conditions/merge.go rename to util/conditions/deprecated/v1beta1/merge.go index 28fce2466217..4f93ad6f836a 100644 --- a/util/conditions/merge.go +++ b/util/conditions/deprecated/v1beta1/merge.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package conditions +package v1beta1 import ( "sort" diff --git a/util/conditions/merge_strategies.go b/util/conditions/deprecated/v1beta1/merge_strategies.go similarity index 99% rename from util/conditions/merge_strategies.go rename to util/conditions/deprecated/v1beta1/merge_strategies.go index 87d477a9c83f..0aca7a3ba2eb 100644 --- a/util/conditions/merge_strategies.go +++ b/util/conditions/deprecated/v1beta1/merge_strategies.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package conditions +package v1beta1 import ( "fmt" diff --git a/util/conditions/merge_strategies_test.go b/util/conditions/deprecated/v1beta1/merge_strategies_test.go similarity index 99% rename from util/conditions/merge_strategies_test.go rename to util/conditions/deprecated/v1beta1/merge_strategies_test.go index 29e8bb6cdcff..3e24e50c095b 100644 --- a/util/conditions/merge_strategies_test.go +++ b/util/conditions/deprecated/v1beta1/merge_strategies_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package conditions +package v1beta1 import ( "testing" diff --git a/util/conditions/merge_test.go b/util/conditions/deprecated/v1beta1/merge_test.go similarity index 99% rename from util/conditions/merge_test.go rename to util/conditions/deprecated/v1beta1/merge_test.go index fc2fd487cb0d..926ad5419e4b 100644 --- a/util/conditions/merge_test.go +++ b/util/conditions/deprecated/v1beta1/merge_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package conditions +package v1beta1 import ( "testing" diff --git a/util/conditions/patch.go b/util/conditions/deprecated/v1beta1/patch.go similarity index 99% rename from util/conditions/patch.go rename to util/conditions/deprecated/v1beta1/patch.go index d3fcb15ccffc..dfc39419cdd8 100644 --- a/util/conditions/patch.go +++ b/util/conditions/deprecated/v1beta1/patch.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package conditions +package v1beta1 import ( "reflect" diff --git a/util/conditions/patch_test.go b/util/conditions/deprecated/v1beta1/patch_test.go similarity index 99% rename from util/conditions/patch_test.go rename to util/conditions/deprecated/v1beta1/patch_test.go index c288d7136b3d..cf8197930cd9 100644 --- a/util/conditions/patch_test.go +++ b/util/conditions/deprecated/v1beta1/patch_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package conditions +package v1beta1 import ( "testing" diff --git a/util/conditions/setter.go b/util/conditions/deprecated/v1beta1/setter.go similarity index 99% rename from util/conditions/setter.go rename to util/conditions/deprecated/v1beta1/setter.go index 0fa3816f5de0..329b5444c1a4 100644 --- a/util/conditions/setter.go +++ b/util/conditions/deprecated/v1beta1/setter.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package conditions +package v1beta1 import ( "fmt" diff --git a/util/conditions/setter_test.go b/util/conditions/deprecated/v1beta1/setter_test.go similarity index 99% rename from util/conditions/setter_test.go rename to util/conditions/deprecated/v1beta1/setter_test.go index 7984cf50de87..2549a6c0f857 100644 --- a/util/conditions/setter_test.go +++ b/util/conditions/deprecated/v1beta1/setter_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package conditions +package v1beta1 import ( "testing" diff --git a/util/conditions/suite_test.go b/util/conditions/deprecated/v1beta1/suite_test.go similarity index 97% rename from util/conditions/suite_test.go rename to util/conditions/deprecated/v1beta1/suite_test.go index 87e40a4d305e..57ea7d91f7d3 100644 --- a/util/conditions/suite_test.go +++ b/util/conditions/deprecated/v1beta1/suite_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package conditions +package v1beta1 import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" diff --git a/util/conditions/unstructured.go b/util/conditions/deprecated/v1beta1/unstructured.go similarity index 99% rename from util/conditions/unstructured.go rename to util/conditions/deprecated/v1beta1/unstructured.go index 9ca22ac2e91d..e51605f578b1 100644 --- a/util/conditions/unstructured.go +++ b/util/conditions/deprecated/v1beta1/unstructured.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package conditions +package v1beta1 import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" diff --git a/util/conditions/unstructured_test.go b/util/conditions/deprecated/v1beta1/unstructured_test.go similarity index 99% rename from util/conditions/unstructured_test.go rename to util/conditions/deprecated/v1beta1/unstructured_test.go index ec2d8d544396..a8b5531a3c2e 100644 --- a/util/conditions/unstructured_test.go +++ b/util/conditions/deprecated/v1beta1/unstructured_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package conditions +package v1beta1 import ( "testing" diff --git a/util/patch/patch.go b/util/patch/patch.go index 6067abc76de3..d157704ce2ad 100644 --- a/util/patch/patch.go +++ b/util/patch/patch.go @@ -35,7 +35,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) @@ -124,7 +124,7 @@ func (h *Helper) Patch(ctx context.Context, obj client.Object, opts ...Option) e } // Check if the object satisfies the Cluster API contract setter interfaces; if not, ignore condition field path entirely. - if _, canInterfaceConditions := obj.(conditions.Setter); !canInterfaceConditions { + if _, canInterfaceConditions := obj.(v1beta1conditions.Setter); !canInterfaceConditions { h.clusterv1ConditionsFieldPath = nil } if _, canInterfaceV1Beta2Conditions := obj.(v1beta2conditions.Setter); !canInterfaceV1Beta2Conditions { @@ -237,16 +237,16 @@ func (h *Helper) patchStatusConditions(ctx context.Context, obj client.Object, f // // NOTE: The checks and error below are done so that we don't panic if any of the objects don't satisfy the // interface any longer, although this shouldn't happen because we already check when creating the patcher. - before, ok := h.beforeObject.(conditions.Getter) + before, ok := h.beforeObject.(v1beta1conditions.Getter) if !ok { return errors.Errorf("%s %s doesn't satisfy conditions.Getter, cannot patch", h.gvk.Kind, klog.KObj(h.beforeObject)) } - after, ok := obj.(conditions.Getter) + after, ok := obj.(v1beta1conditions.Getter) if !ok { return errors.Errorf("%s %s doesn't satisfy conditions.Getter, cannot compute patch", h.gvk.Kind, klog.KObj(obj)) } - diff, err := conditions.NewPatch( + diff, err := v1beta1conditions.NewPatch( before, after, ) @@ -255,12 +255,12 @@ func (h *Helper) patchStatusConditions(ctx context.Context, obj client.Object, f } if !diff.IsZero() { clusterv1ApplyPatch = func(latest client.Object) error { - latestSetter, ok := latest.(conditions.Setter) + latestSetter, ok := latest.(v1beta1conditions.Setter) if !ok { return errors.Errorf("%s %s doesn't satisfy conditions.Setter, cannot apply patch", h.gvk.Kind, klog.KObj(latest)) } - return diff.Apply(latestSetter, conditions.WithForceOverwrite(forceOverwrite), conditions.WithOwnedConditions(ownedConditions...)) + return diff.Apply(latestSetter, v1beta1conditions.WithForceOverwrite(forceOverwrite), v1beta1conditions.WithOwnedConditions(ownedConditions...)) } } } diff --git a/util/patch/patch_test.go b/util/patch/patch_test.go index fe5165500609..94257ea55d60 100644 --- a/util/patch/patch_test.go +++ b/util/patch/patch_test.go @@ -32,7 +32,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/test/builder" ) @@ -202,7 +202,7 @@ func TestPatchHelper(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -214,7 +214,7 @@ func TestPatchHelper(t *testing.T) { return clusterv1.Conditions{} } return objAfter.Status.Deprecated.V1Beta1.Conditions - }, timeout).Should(conditions.MatchConditions(obj.Status.Deprecated.V1Beta1.Conditions)) + }, timeout).Should(v1beta1conditions.MatchConditions(obj.Status.Deprecated.V1Beta1.Conditions)) }) t.Run("should recover if there is a resolvable conflict", func(t *testing.T) { @@ -238,7 +238,7 @@ func TestPatchHelper(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking TestCondition=False") - conditions.MarkFalse(objCopy, clusterv1.ConditionType("TestCondition"), "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("TestCondition"), "reason", clusterv1.ConditionSeverityInfo, "message") g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -249,7 +249,7 @@ func TestPatchHelper(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -261,22 +261,22 @@ func TestPatchHelper(t *testing.T) { return false } - testConditionCopy := conditions.Get(objCopy, "TestCondition") - testConditionAfter := conditions.Get(objAfter, "TestCondition") + testConditionCopy := v1beta1conditions.Get(objCopy, "TestCondition") + testConditionAfter := v1beta1conditions.Get(objAfter, "TestCondition") if testConditionCopy == nil || testConditionAfter == nil { return false } - ok, err := conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) + ok, err := v1beta1conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) if err != nil || !ok { return false } - readyBefore := conditions.Get(obj, clusterv1.ReadyCondition) - readyAfter := conditions.Get(objAfter, clusterv1.ReadyCondition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyCondition) + readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyCondition) if readyBefore == nil || readyAfter == nil { return false } - ok, err = conditions.MatchCondition(*readyBefore).Match(*readyAfter) + ok, err = v1beta1conditions.MatchCondition(*readyBefore).Match(*readyAfter) if err != nil || !ok { return false } @@ -306,7 +306,7 @@ func TestPatchHelper(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking TestCondition=False") - conditions.MarkFalse(objCopy, clusterv1.ConditionType("TestCondition"), "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("TestCondition"), "reason", clusterv1.ConditionSeverityInfo, "message") g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -321,7 +321,7 @@ func TestPatchHelper(t *testing.T) { obj.Spec.ControlPlaneEndpoint.Host = "test://endpoint" obj.Spec.ControlPlaneEndpoint.Port = 8443 obj.Status.Phase = "Provisioning" - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -333,22 +333,22 @@ func TestPatchHelper(t *testing.T) { return false } - testConditionCopy := conditions.Get(objCopy, "TestCondition") - testConditionAfter := conditions.Get(objAfter, "TestCondition") + testConditionCopy := v1beta1conditions.Get(objCopy, "TestCondition") + testConditionAfter := v1beta1conditions.Get(objAfter, "TestCondition") if testConditionCopy == nil || testConditionAfter == nil { return false } - ok, err := conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) + ok, err := v1beta1conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) if err != nil || !ok { return false } - readyBefore := conditions.Get(obj, clusterv1.ReadyCondition) - readyAfter := conditions.Get(objAfter, clusterv1.ReadyCondition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyCondition) + readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyCondition) if readyBefore == nil || readyAfter == nil { return false } - ok, err = conditions.MatchCondition(*readyBefore).Match(*readyAfter) + ok, err = v1beta1conditions.MatchCondition(*readyBefore).Match(*readyAfter) if err != nil || !ok { return false } @@ -380,7 +380,7 @@ func TestPatchHelper(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking Ready=False") - conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -391,7 +391,7 @@ func TestPatchHelper(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).NotTo(Succeed()) @@ -404,7 +404,7 @@ func TestPatchHelper(t *testing.T) { } for _, afterCondition := range objAfter.Status.Deprecated.V1Beta1.Conditions { - ok, err := conditions.MatchCondition(objCopy.Status.Deprecated.V1Beta1.Conditions[0]).Match(afterCondition) + ok, err := v1beta1conditions.MatchCondition(objCopy.Status.Deprecated.V1Beta1.Conditions[0]).Match(afterCondition) if err == nil && ok { return true } @@ -435,7 +435,7 @@ func TestPatchHelper(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking Ready=False") - conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -446,21 +446,21 @@ func TestPatchHelper(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj, WithOwnedConditions{Conditions: []clusterv1.ConditionType{clusterv1.ReadyCondition}})).To(Succeed()) t.Log("Validating the object has been updated") - readyBefore := conditions.Get(obj, clusterv1.ReadyCondition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyCondition) g.Eventually(func() clusterv1.Condition { objAfter := obj.DeepCopy() if err := env.Get(ctx, key, objAfter); err != nil { return clusterv1.Condition{} } - return *conditions.Get(objAfter, clusterv1.ReadyCondition) - }, timeout).Should(conditions.MatchCondition(*readyBefore)) + return *v1beta1conditions.Get(objAfter, clusterv1.ReadyCondition) + }, timeout).Should(v1beta1conditions.MatchCondition(*readyBefore)) }) t.Run("should not return an error if there is an unresolvable conflict when force overwrite is enabled", func(t *testing.T) { @@ -484,7 +484,7 @@ func TestPatchHelper(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking Ready=False") - conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -495,21 +495,21 @@ func TestPatchHelper(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj, WithForceOverwriteConditions{})).To(Succeed()) t.Log("Validating the object has been updated") - readyBefore := conditions.Get(obj, clusterv1.ReadyCondition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyCondition) g.Eventually(func() clusterv1.Condition { objAfter := obj.DeepCopy() if err := env.Get(ctx, key, objAfter); err != nil { return clusterv1.Condition{} } - return *conditions.Get(objAfter, clusterv1.ReadyCondition) - }, timeout).Should(conditions.MatchCondition(*readyBefore)) + return *v1beta1conditions.Get(objAfter, clusterv1.ReadyCondition) + }, timeout).Should(v1beta1conditions.MatchCondition(*readyBefore)) }) }) }) @@ -718,7 +718,7 @@ func TestPatchHelper(t *testing.T) { obj.Status.InfrastructureReady = true t.Log("Setting Ready condition") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -731,7 +731,7 @@ func TestPatchHelper(t *testing.T) { } return obj.Status.InfrastructureReady == objAfter.Status.InfrastructureReady && - conditions.IsTrue(objAfter, clusterv1.ReadyCondition) && + v1beta1conditions.IsTrue(objAfter, clusterv1.ReadyCondition) && cmp.Equal(obj.Spec, objAfter.Spec) }, timeout).Should(BeTrue()) }) @@ -1047,7 +1047,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -1059,7 +1059,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return clusterv1.Conditions{} } return objAfter.Status.Conditions - }, timeout).Should(conditions.MatchConditions(obj.Status.Conditions)) + }, timeout).Should(v1beta1conditions.MatchConditions(obj.Status.Conditions)) }) t.Run("should mark it ready when passing Clusterv1ConditionsFieldPath", func(t *testing.T) { @@ -1086,7 +1086,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj, Clusterv1ConditionsFieldPath{"status", "conditions"})).To(Succeed()) @@ -1098,7 +1098,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return clusterv1.Conditions{} } return objAfter.Status.Conditions - }, timeout).Should(conditions.MatchConditions(obj.Status.Conditions)) + }, timeout).Should(v1beta1conditions.MatchConditions(obj.Status.Conditions)) }) t.Run("should recover if there is a resolvable conflict", func(t *testing.T) { @@ -1122,7 +1122,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a custom condition to be false") - conditions.MarkFalse(objCopy, clusterv1.ConditionType("TestCondition"), "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("TestCondition"), "reason", clusterv1.ConditionSeverityInfo, "message") g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -1133,7 +1133,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -1145,22 +1145,22 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - testConditionCopy := conditions.Get(objCopy, "TestCondition") - testConditionAfter := conditions.Get(objAfter, "TestCondition") + testConditionCopy := v1beta1conditions.Get(objCopy, "TestCondition") + testConditionAfter := v1beta1conditions.Get(objAfter, "TestCondition") if testConditionCopy == nil || testConditionAfter == nil { return false } - ok, err := conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) + ok, err := v1beta1conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) if err != nil || !ok { return false } - readyBefore := conditions.Get(obj, clusterv1.ReadyCondition) - readyAfter := conditions.Get(objAfter, clusterv1.ReadyCondition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyCondition) + readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyCondition) if readyBefore == nil || readyAfter == nil { return false } - ok, err = conditions.MatchCondition(*readyBefore).Match(*readyAfter) + ok, err = v1beta1conditions.MatchCondition(*readyBefore).Match(*readyAfter) if err != nil || !ok { return false } @@ -1190,7 +1190,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a custom condition to be false") - conditions.MarkFalse(objCopy, clusterv1.ConditionType("TestCondition"), "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("TestCondition"), "reason", clusterv1.ConditionSeverityInfo, "message") g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -1203,7 +1203,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Changing the object spec, status, and adding Ready=True condition") obj.Spec.Foo = "foo" obj.Status.Bar = "bat" - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -1215,22 +1215,22 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - testConditionCopy := conditions.Get(objCopy, "TestCondition") - testConditionAfter := conditions.Get(objAfter, "TestCondition") + testConditionCopy := v1beta1conditions.Get(objCopy, "TestCondition") + testConditionAfter := v1beta1conditions.Get(objAfter, "TestCondition") if testConditionCopy == nil || testConditionAfter == nil { return false } - ok, err := conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) + ok, err := v1beta1conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) if err != nil || !ok { return false } - readyBefore := conditions.Get(obj, clusterv1.ReadyCondition) - readyAfter := conditions.Get(objAfter, clusterv1.ReadyCondition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyCondition) + readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyCondition) if readyBefore == nil || readyAfter == nil { return false } - ok, err = conditions.MatchCondition(*readyBefore).Match(*readyAfter) + ok, err = v1beta1conditions.MatchCondition(*readyBefore).Match(*readyAfter) if err != nil || !ok { return false } @@ -1261,7 +1261,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking Ready=False") - conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -1272,7 +1272,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).NotTo(Succeed()) @@ -1285,7 +1285,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { } for _, afterCondition := range objAfter.Status.Conditions { - ok, err := conditions.MatchCondition(objCopy.Status.Conditions[0]).Match(afterCondition) + ok, err := v1beta1conditions.MatchCondition(objCopy.Status.Conditions[0]).Match(afterCondition) if err == nil && ok { return true } @@ -1316,7 +1316,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking Ready=False") - conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -1327,21 +1327,21 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj, WithOwnedConditions{Conditions: []clusterv1.ConditionType{clusterv1.ReadyCondition}})).To(Succeed()) t.Log("Validating the object has been updated") - readyBefore := conditions.Get(obj, clusterv1.ReadyCondition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyCondition) g.Eventually(func() clusterv1.Condition { objAfter := obj.DeepCopy() if err := env.Get(ctx, key, objAfter); err != nil { return clusterv1.Condition{} } - return *conditions.Get(objAfter, clusterv1.ReadyCondition) - }, timeout).Should(conditions.MatchCondition(*readyBefore)) + return *v1beta1conditions.Get(objAfter, clusterv1.ReadyCondition) + }, timeout).Should(v1beta1conditions.MatchCondition(*readyBefore)) }) t.Run("should not return an error if there is an unresolvable conflict when force overwrite is enabled", func(t *testing.T) { @@ -1365,7 +1365,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking Ready=False") - conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -1376,21 +1376,21 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj, WithForceOverwriteConditions{})).To(Succeed()) t.Log("Validating the object has been updated") - readyBefore := conditions.Get(obj, clusterv1.ReadyCondition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyCondition) g.Eventually(func() clusterv1.Condition { objAfter := obj.DeepCopy() if err := env.Get(ctx, key, objAfter); err != nil { return clusterv1.Condition{} } - return *conditions.Get(objAfter, clusterv1.ReadyCondition) - }, timeout).Should(conditions.MatchCondition(*readyBefore)) + return *v1beta1conditions.Get(objAfter, clusterv1.ReadyCondition) + }, timeout).Should(v1beta1conditions.MatchCondition(*readyBefore)) }) }) @@ -1427,7 +1427,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking clusterv1.conditions and metav1.conditions Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -1451,7 +1451,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return clusterv1.Conditions{} } return objAfter.Status.Conditions - }, timeout).Should(conditions.MatchConditions(obj.Status.Conditions)) + }, timeout).Should(v1beta1conditions.MatchConditions(obj.Status.Conditions)) g.Eventually(func() []metav1.Condition { objAfter := obj.DeepCopy() if err := env.Get(ctx, key, objAfter); err != nil { @@ -1492,7 +1492,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking clusterv1.conditions and metav1.conditions Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -1505,7 +1505,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return clusterv1.Conditions{} } return objAfter.Status.Conditions - }, timeout).Should(conditions.MatchConditions(obj.Status.Conditions)) + }, timeout).Should(v1beta1conditions.MatchConditions(obj.Status.Conditions)) g.Eventually(func() []metav1.Condition { objAfter := obj.DeepCopy() if err := env.Get(ctx, key, objAfter); err != nil { @@ -1536,7 +1536,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking clusterv1.conditions and metav1.conditions Test=False") - conditions.MarkFalse(objCopy, clusterv1.ConditionType("Test"), "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("Test"), "reason", clusterv1.ConditionSeverityInfo, "message") v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Test", Status: metav1.ConditionFalse, Reason: "reason", Message: "message", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) @@ -1548,7 +1548,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking clusterv1.conditions and metav1.conditions Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -1561,22 +1561,22 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - testConditionCopy := conditions.Get(objCopy, "Test") - testConditionAfter := conditions.Get(objAfter, "Test") + testConditionCopy := v1beta1conditions.Get(objCopy, "Test") + testConditionAfter := v1beta1conditions.Get(objAfter, "Test") if testConditionCopy == nil || testConditionAfter == nil { return false } - ok, err := conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) + ok, err := v1beta1conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) if err != nil || !ok { return false } - readyBefore := conditions.Get(obj, clusterv1.ReadyCondition) - readyAfter := conditions.Get(objAfter, clusterv1.ReadyCondition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyCondition) + readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyCondition) if readyBefore == nil || readyAfter == nil { return false } - ok, err = conditions.MatchCondition(*readyBefore).Match(*readyAfter) + ok, err = v1beta1conditions.MatchCondition(*readyBefore).Match(*readyAfter) if err != nil || !ok { return false } @@ -1626,7 +1626,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking clusterv1.conditions and metav1.conditions Test=False") - conditions.MarkFalse(objCopy, clusterv1.ConditionType("Test"), "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("Test"), "reason", clusterv1.ConditionSeverityInfo, "message") v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Test", Status: metav1.ConditionFalse, Reason: "reason", Message: "message", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) @@ -1640,7 +1640,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Changing the object spec, status, and marking clusterv1.condition and metav1.conditions Ready=True") obj.Spec.Foo = "foo" obj.Status.Bar = "bat" - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -1654,22 +1654,22 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - testConditionCopy := conditions.Get(objCopy, "Test") - testConditionAfter := conditions.Get(objAfter, "Test") + testConditionCopy := v1beta1conditions.Get(objCopy, "Test") + testConditionAfter := v1beta1conditions.Get(objAfter, "Test") if testConditionCopy == nil || testConditionAfter == nil { return false } - ok, err := conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) + ok, err := v1beta1conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) if err != nil || !ok { return false } - readyBefore := conditions.Get(obj, clusterv1.ReadyCondition) - readyAfter := conditions.Get(objAfter, clusterv1.ReadyCondition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyCondition) + readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyCondition) if readyBefore == nil || readyAfter == nil { return false } - ok, err = conditions.MatchCondition(*readyBefore).Match(*readyAfter) + ok, err = v1beta1conditions.MatchCondition(*readyBefore).Match(*readyAfter) if err != nil || !ok { return false } @@ -1720,7 +1720,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a Ready condition to be false") - conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -1731,7 +1731,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).NotTo(Succeed()) @@ -1743,7 +1743,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return clusterv1.Conditions{} } return objAfter.Status.Conditions - }, timeout).Should(conditions.MatchConditions(objCopy.Status.Conditions)) + }, timeout).Should(v1beta1conditions.MatchConditions(objCopy.Status.Conditions)) }) t.Run("should return an error if there is an unresolvable conflict on v1beta2.conditions", func(t *testing.T) { @@ -1814,7 +1814,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a Ready clusterv1.condition and metav1.conditions to be false") - conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) @@ -1826,7 +1826,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready clusterv1.condition and metav1.conditions True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -1839,12 +1839,12 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyBefore := conditions.Get(obj, clusterv1.ReadyCondition) - readyAfter := conditions.Get(objAfter, clusterv1.ReadyCondition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyCondition) + readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyCondition) if readyBefore == nil || readyAfter == nil { return false } - ok, err := conditions.MatchCondition(*readyBefore).Match(*readyAfter) + ok, err := v1beta1conditions.MatchCondition(*readyBefore).Match(*readyAfter) if err != nil || !ok { return false } @@ -1884,7 +1884,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a Ready clusterv1.condition and metav1.conditions to be false") - conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) @@ -1896,7 +1896,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready clusterv1.condition and metav1.conditions True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -1909,12 +1909,12 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyBefore := conditions.Get(obj, clusterv1.ReadyCondition) - readyAfter := conditions.Get(objAfter, clusterv1.ReadyCondition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyCondition) + readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyCondition) if readyBefore == nil || readyAfter == nil { return false } - ok, err := conditions.MatchCondition(*readyBefore).Match(*readyAfter) + ok, err := v1beta1conditions.MatchCondition(*readyBefore).Match(*readyAfter) if err != nil || !ok { return false } @@ -1967,7 +1967,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking condition and back compatibility condition Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -1991,7 +1991,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return clusterv1.Conditions{} } return objAfter.Status.Deprecated.V1Beta1.Conditions - }, timeout).Should(conditions.MatchConditions(obj.Status.Deprecated.V1Beta1.Conditions)) + }, timeout).Should(v1beta1conditions.MatchConditions(obj.Status.Deprecated.V1Beta1.Conditions)) g.Eventually(func() []metav1.Condition { objAfter := obj.DeepCopy() if err := env.Get(ctx, key, objAfter); err != nil { @@ -2032,7 +2032,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking condition and back compatibility condition Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -2045,7 +2045,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return clusterv1.Conditions{} } return objAfter.Status.Deprecated.V1Beta1.Conditions - }, timeout).Should(conditions.MatchConditions(obj.Status.Deprecated.V1Beta1.Conditions)) + }, timeout).Should(v1beta1conditions.MatchConditions(obj.Status.Deprecated.V1Beta1.Conditions)) g.Eventually(func() []metav1.Condition { objAfter := obj.DeepCopy() if err := env.Get(ctx, key, objAfter); err != nil { @@ -2076,7 +2076,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking condition and back compatibility condition Test=False") - conditions.MarkFalse(objCopy, clusterv1.ConditionType("Test"), "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("Test"), "reason", clusterv1.ConditionSeverityInfo, "message") v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Test", Status: metav1.ConditionFalse, Reason: "reason", Message: "message", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) @@ -2088,7 +2088,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking condition and back compatibility condition Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -2101,22 +2101,22 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - testBackCompatibilityCopy := conditions.Get(objCopy, "Test") - testBackCompatibilityAfter := conditions.Get(objAfter, "Test") + testBackCompatibilityCopy := v1beta1conditions.Get(objCopy, "Test") + testBackCompatibilityAfter := v1beta1conditions.Get(objAfter, "Test") if testBackCompatibilityCopy == nil || testBackCompatibilityAfter == nil { return false } - ok, err := conditions.MatchCondition(*testBackCompatibilityCopy).Match(*testBackCompatibilityAfter) + ok, err := v1beta1conditions.MatchCondition(*testBackCompatibilityCopy).Match(*testBackCompatibilityAfter) if err != nil || !ok { return false } - readyBackCompatibilityBefore := conditions.Get(obj, clusterv1.ReadyCondition) - readyBackCompatibilityAfter := conditions.Get(objAfter, clusterv1.ReadyCondition) + readyBackCompatibilityBefore := v1beta1conditions.Get(obj, clusterv1.ReadyCondition) + readyBackCompatibilityAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyCondition) if readyBackCompatibilityBefore == nil || readyBackCompatibilityAfter == nil { return false } - ok, err = conditions.MatchCondition(*readyBackCompatibilityBefore).Match(*readyBackCompatibilityAfter) + ok, err = v1beta1conditions.MatchCondition(*readyBackCompatibilityBefore).Match(*readyBackCompatibilityAfter) if err != nil || !ok { return false } @@ -2166,7 +2166,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking condition and back compatibility condition Test=False") - conditions.MarkFalse(objCopy, clusterv1.ConditionType("Test"), "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("Test"), "reason", clusterv1.ConditionSeverityInfo, "message") v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Test", Status: metav1.ConditionFalse, Reason: "reason", Message: "message", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) @@ -2180,7 +2180,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Changing the object spec, status, and marking condition and back compatibility condition Ready=True") obj.Spec.Foo = "foo" obj.Status.Bar = "bat" - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -2194,22 +2194,22 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - testBackCompatibilityCopy := conditions.Get(objCopy, "Test") - testBackCompatibilityAfter := conditions.Get(objAfter, "Test") + testBackCompatibilityCopy := v1beta1conditions.Get(objCopy, "Test") + testBackCompatibilityAfter := v1beta1conditions.Get(objAfter, "Test") if testBackCompatibilityCopy == nil || testBackCompatibilityAfter == nil { return false } - ok, err := conditions.MatchCondition(*testBackCompatibilityCopy).Match(*testBackCompatibilityAfter) + ok, err := v1beta1conditions.MatchCondition(*testBackCompatibilityCopy).Match(*testBackCompatibilityAfter) if err != nil || !ok { return false } - readyBackCompatibilityBefore := conditions.Get(obj, clusterv1.ReadyCondition) - readyBackCompatibilityAfter := conditions.Get(objAfter, clusterv1.ReadyCondition) + readyBackCompatibilityBefore := v1beta1conditions.Get(obj, clusterv1.ReadyCondition) + readyBackCompatibilityAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyCondition) if readyBackCompatibilityBefore == nil || readyBackCompatibilityAfter == nil { return false } - ok, err = conditions.MatchCondition(*readyBackCompatibilityBefore).Match(*readyBackCompatibilityAfter) + ok, err = v1beta1conditions.MatchCondition(*readyBackCompatibilityBefore).Match(*readyBackCompatibilityAfter) if err != nil || !ok { return false } @@ -2260,7 +2260,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a Ready condition to be false") - conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -2271,7 +2271,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).NotTo(Succeed()) @@ -2283,7 +2283,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return clusterv1.Conditions{} } return objAfter.Status.Deprecated.V1Beta1.Conditions - }, timeout).Should(conditions.MatchConditions(objCopy.Status.Deprecated.V1Beta1.Conditions)) + }, timeout).Should(v1beta1conditions.MatchConditions(objCopy.Status.Deprecated.V1Beta1.Conditions)) }) t.Run("should return an error if there is an unresolvable conflict on conditions", func(t *testing.T) { @@ -2354,7 +2354,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a Ready condition and back compatibility condition to be false") - conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) @@ -2366,7 +2366,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready condition and back compatibility condition True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -2379,12 +2379,12 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyBackCompatibilityBefore := conditions.Get(obj, clusterv1.ReadyCondition) - readyBackCompatibilityAfter := conditions.Get(objAfter, clusterv1.ReadyCondition) + readyBackCompatibilityBefore := v1beta1conditions.Get(obj, clusterv1.ReadyCondition) + readyBackCompatibilityAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyCondition) if readyBackCompatibilityBefore == nil || readyBackCompatibilityAfter == nil { return false } - ok, err := conditions.MatchCondition(*readyBackCompatibilityBefore).Match(*readyBackCompatibilityAfter) + ok, err := v1beta1conditions.MatchCondition(*readyBackCompatibilityBefore).Match(*readyBackCompatibilityAfter) if err != nil || !ok { return false } @@ -2424,7 +2424,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a Ready condition and back compatibility condition to be false") - conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) @@ -2436,7 +2436,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready condition and back compatibility condition True") - conditions.MarkTrue(obj, clusterv1.ReadyCondition) + v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -2449,12 +2449,12 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyBackCompatibilityBefore := conditions.Get(obj, clusterv1.ReadyCondition) - readyBackCompatibilityAfter := conditions.Get(objAfter, clusterv1.ReadyCondition) + readyBackCompatibilityBefore := v1beta1conditions.Get(obj, clusterv1.ReadyCondition) + readyBackCompatibilityAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyCondition) if readyBackCompatibilityBefore == nil || readyBackCompatibilityAfter == nil { return false } - ok, err := conditions.MatchCondition(*readyBackCompatibilityBefore).Match(*readyBackCompatibilityAfter) + ok, err := v1beta1conditions.MatchCondition(*readyBackCompatibilityBefore).Match(*readyBackCompatibilityAfter) if err != nil || !ok { return false } diff --git a/util/predicates/cluster_predicates.go b/util/predicates/cluster_predicates.go index bbedd0d66904..d70821cc4da1 100644 --- a/util/predicates/cluster_predicates.go +++ b/util/predicates/cluster_predicates.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) // ClusterCreateInfraReady returns a predicate that returns true for a create event when a cluster has Status.InfrastructureReady set as true @@ -244,8 +244,9 @@ func ClusterControlPlaneInitialized(scheme *runtime.Scheme, logger logr.Logger) newCluster := e.ObjectNew.(*clusterv1.Cluster) - if !conditions.IsTrue(oldCluster, clusterv1.ControlPlaneInitializedCondition) && - conditions.IsTrue(newCluster, clusterv1.ControlPlaneInitializedCondition) { + // TODO (v1beta2): test for v1beta2 conditions + if !v1beta1conditions.IsTrue(oldCluster, clusterv1.ControlPlaneInitializedCondition) && + v1beta1conditions.IsTrue(newCluster, clusterv1.ControlPlaneInitializedCondition) { log.V(6).Info("Cluster ControlPlaneInitialized was set, allow further processing") return true } diff --git a/util/predicates/cluster_predicates_test.go b/util/predicates/cluster_predicates_test.go index 5348d4e42d37..1256810cbbf7 100644 --- a/util/predicates/cluster_predicates_test.go +++ b/util/predicates/cluster_predicates_test.go @@ -26,7 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -35,10 +35,10 @@ func TestClusterControlplaneInitializedPredicate(t *testing.T) { predicate := predicates.ClusterControlPlaneInitialized(runtime.NewScheme(), logr.New(log.NullLogSink{})) markedFalse := clusterv1.Cluster{} - conditions.MarkFalse(&markedFalse, clusterv1.ControlPlaneInitializedCondition, clusterv1.MissingNodeRefReason, clusterv1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(&markedFalse, clusterv1.ControlPlaneInitializedCondition, clusterv1.MissingNodeRefReason, clusterv1.ConditionSeverityWarning, "") markedTrue := clusterv1.Cluster{} - conditions.MarkTrue(&markedTrue, clusterv1.ControlPlaneInitializedCondition) + v1beta1conditions.MarkTrue(&markedTrue, clusterv1.ControlPlaneInitializedCondition) notMarked := clusterv1.Cluster{} From a96781dae66ad6571d68660c326d943bd0e2aeff Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Thu, 10 Apr 2025 18:11:29 +0200 Subject: [PATCH 2/5] Move v1beta2 conditions utils top level --- .golangci.yml | 2 - .../controllers/kubeadmconfig_controller.go | 58 ++-- .../kubeadmconfig_controller_test.go | 6 +- cmd/clusterctl/client/tree/tree_test.go | 6 +- cmd/clusterctl/client/tree/util.go | 34 +-- .../internal/controllers/controller.go | 32 +-- .../internal/controllers/controller_test.go | 26 +- .../internal/controllers/remediation.go | 30 +-- .../internal/controllers/remediation_test.go | 14 +- .../kubeadm/internal/controllers/status.go | 118 ++++---- .../internal/controllers/status_test.go | 38 +-- .../internal/workload_cluster_conditions.go | 78 +++--- .../workload_cluster_conditions_test.go | 16 +- .../controllers/extensionconfig_controller.go | 6 +- .../extensionconfig_controller_test.go | 4 +- .../controllers/cluster/cluster_controller.go | 6 +- .../cluster/cluster_controller_status.go | 254 +++++++++--------- .../cluster/cluster_controller_status_test.go | 68 ++--- .../cluster/cluster_controller_test.go | 6 +- .../clusterclass_controller_status.go | 12 +- .../clusterclass_controller_status_test.go | 10 +- .../clusterresourceset_controller.go | 14 +- .../clusterresourceset_controller_test.go | 4 +- .../machine/machine_controller_status.go | 118 ++++---- .../machine/machine_controller_status_test.go | 50 ++-- .../machinedeployment_status.go | 94 +++---- .../machinedeployment_status_test.go | 36 +-- .../machinehealthcheck_controller.go | 16 +- .../machinehealthcheck_controller_test.go | 12 +- .../machinehealthcheck_targets.go | 12 +- .../machineset/machineset_controller.go | 12 +- .../machineset_controller_status.go | 82 +++--- .../machineset_controller_status_test.go | 28 +- .../machineset/machineset_controller_test.go | 62 ++--- .../topology/cluster/conditions.go | 16 +- .../topology/cluster/conditions_test.go | 4 +- test/e2e/clusterctl_upgrade.go | 10 +- .../handler_integration_test.go | 4 +- test/framework/cluster_helpers.go | 4 +- .../backends/docker/dockercluster_backend.go | 24 +- .../backends/docker/dockermachine_backend.go | 40 +-- .../inmemory/inmemorymachine_backend.go | 58 ++-- .../controllers/dockercluster_controller.go | 12 +- .../controllers/dockermachine_controller.go | 12 +- util/conditions/{v1beta2 => }/aggregate.go | 2 +- .../{v1beta2 => }/aggregate_test.go | 2 +- util/conditions/doc.go | 20 ++ util/conditions/{v1beta2 => }/getter.go | 2 +- util/conditions/{v1beta2 => }/getter_test.go | 2 +- util/conditions/{v1beta2 => }/matcher.go | 2 +- util/conditions/{v1beta2 => }/matcher_test.go | 2 +- .../{v1beta2 => }/merge_strategies.go | 2 +- .../{v1beta2 => }/merge_strategies_test.go | 2 +- util/conditions/{v1beta2 => }/mirror.go | 2 +- util/conditions/{v1beta2 => }/mirror_test.go | 2 +- util/conditions/{v1beta2 => }/options.go | 2 +- util/conditions/{v1beta2 => }/patch.go | 2 +- util/conditions/{v1beta2 => }/patch_test.go | 2 +- util/conditions/{v1beta2 => }/setter.go | 2 +- util/conditions/{v1beta2 => }/setter_test.go | 2 +- util/conditions/{v1beta2 => }/sort.go | 2 +- util/conditions/{v1beta2 => }/sort_test.go | 2 +- util/conditions/{v1beta2 => }/summary.go | 2 +- util/conditions/{v1beta2 => }/summary_test.go | 2 +- util/conditions/v1beta2/doc.go | 32 --- util/patch/options.go | 4 +- util/patch/patch.go | 20 +- util/patch/patch_test.go | 204 +++++++------- util/paused/paused.go | 14 +- util/paused/paused_test.go | 4 +- 70 files changed, 935 insertions(+), 949 deletions(-) rename util/conditions/{v1beta2 => }/aggregate.go (99%) rename util/conditions/{v1beta2 => }/aggregate_test.go (99%) create mode 100644 util/conditions/doc.go rename util/conditions/{v1beta2 => }/getter.go (99%) rename util/conditions/{v1beta2 => }/getter_test.go (99%) rename util/conditions/{v1beta2 => }/matcher.go (99%) rename util/conditions/{v1beta2 => }/matcher_test.go (99%) rename util/conditions/{v1beta2 => }/merge_strategies.go (99%) rename util/conditions/{v1beta2 => }/merge_strategies_test.go (99%) rename util/conditions/{v1beta2 => }/mirror.go (99%) rename util/conditions/{v1beta2 => }/mirror_test.go (99%) rename util/conditions/{v1beta2 => }/options.go (99%) rename util/conditions/{v1beta2 => }/patch.go (99%) rename util/conditions/{v1beta2 => }/patch_test.go (99%) rename util/conditions/{v1beta2 => }/setter.go (99%) rename util/conditions/{v1beta2 => }/setter_test.go (99%) rename util/conditions/{v1beta2 => }/sort.go (99%) rename util/conditions/{v1beta2 => }/sort_test.go (98%) rename util/conditions/{v1beta2 => }/summary.go (99%) rename util/conditions/{v1beta2 => }/summary_test.go (99%) delete mode 100644 util/conditions/v1beta2/doc.go diff --git a/.golangci.yml b/.golangci.yml index 18d31989dab0..b9777ffc3540 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -185,8 +185,6 @@ linters-settings: # CAPI utils - pkg: sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1 alias: v1beta1conditions - - pkg: sigs.k8s.io/cluster-api/util/conditions/v1beta2 - alias: v1beta2conditions - pkg: sigs.k8s.io/cluster-api/internal/topology/names alias: topologynames # CAPD diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go index f3277cd53f85..6d721e24ac66 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go @@ -55,8 +55,8 @@ import ( "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/util/taints" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" clog "sigs.k8s.io/cluster-api/util/log" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/paused" @@ -233,17 +233,17 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques bootstrapv1.CertificatesAvailableCondition, ), ) - if err := v1beta2conditions.SetSummaryCondition(config, config, bootstrapv1.KubeadmConfigReadyV1Beta2Condition, - v1beta2conditions.ForConditionTypes{ + if err := conditions.SetSummaryCondition(config, config, bootstrapv1.KubeadmConfigReadyV1Beta2Condition, + conditions.ForConditionTypes{ bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, }, // Using a custom merge strategy to override reasons applied during merge and to ignore some // info message so the ready condition aggregation in other resources is less noisy. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( // Use custom reasons. - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( bootstrapv1.KubeadmConfigNotReadyV1Beta2Reason, bootstrapv1.KubeadmConfigReadyUnknownV1Beta2Reason, bootstrapv1.KubeadmConfigReadyV1Beta2Reason, @@ -295,7 +295,7 @@ func (r *KubeadmConfigReconciler) reconcile(ctx context.Context, scope *Scope, c case !cluster.Status.InfrastructureReady: log.Info("Cluster infrastructure is not ready, waiting") v1beta1conditions.MarkFalse(config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, @@ -308,12 +308,12 @@ func (r *KubeadmConfigReconciler) reconcile(ctx context.Context, scope *Scope, c config.Status.Ready = true config.Status.DataSecretName = configOwner.DataSecretName() v1beta1conditions.MarkTrue(config, bootstrapv1.DataSecretAvailableCondition) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionTrue, Reason: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Reason, }) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionTrue, Reason: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Reason, @@ -325,14 +325,14 @@ func (r *KubeadmConfigReconciler) reconcile(ctx context.Context, scope *Scope, c // Based on existing code paths status.Ready is only true if status.dataSecretName is set // So we can assume that the DataSecret is available. v1beta1conditions.MarkTrue(config, bootstrapv1.DataSecretAvailableCondition) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionTrue, Reason: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Reason, }) // Same applies for the CertificatesAvailable, which must have been the case to generate // the DataSecret. - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionTrue, Reason: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Reason, @@ -483,7 +483,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex // TODO (v1beta2): test for v1beta2 conditions if v1beta1conditions.GetReason(scope.Config, bootstrapv1.DataSecretAvailableCondition) != bootstrapv1.DataSecretGenerationFailedReason { v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, @@ -592,7 +592,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex } if err != nil { v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: bootstrapv1.KubeadmConfigCertificatesAvailableInternalErrorV1Beta2Reason, @@ -602,7 +602,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex } v1beta1conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionTrue, Reason: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Reason, @@ -616,7 +616,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex files, err := r.resolveFiles(ctx, scope.Config) if err != nil { v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, @@ -628,7 +628,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex users, err := r.resolveUsers(ctx, scope.Config) if err != nil { v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, @@ -691,7 +691,7 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) ) if err != nil { v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: bootstrapv1.KubeadmConfigCertificatesAvailableInternalErrorV1Beta2Reason, @@ -701,7 +701,7 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) } if err := certificates.EnsureAllExist(); err != nil { v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: bootstrapv1.KubeadmConfigCertificatesAvailableInternalErrorV1Beta2Reason, @@ -710,7 +710,7 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) return ctrl.Result{}, err } v1beta1conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionTrue, Reason: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Reason, @@ -758,7 +758,7 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) files, err := r.resolveFiles(ctx, scope.Config) if err != nil { v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, @@ -770,7 +770,7 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) users, err := r.resolveUsers(ctx, scope.Config) if err != nil { v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, @@ -783,7 +783,7 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) kubeconfig, err := r.resolveDiscoveryKubeConfig(discoveryFile) if err != nil { v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, @@ -856,7 +856,7 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S ) if err != nil { v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: bootstrapv1.KubeadmConfigCertificatesAvailableInternalErrorV1Beta2Reason, @@ -866,7 +866,7 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S } if err := certificates.EnsureAllExist(); err != nil { v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: bootstrapv1.KubeadmConfigCertificatesAvailableInternalErrorV1Beta2Reason, @@ -876,7 +876,7 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S } v1beta1conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionTrue, Reason: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Reason, @@ -911,7 +911,7 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S files, err := r.resolveFiles(ctx, scope.Config) if err != nil { v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, @@ -923,7 +923,7 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S users, err := r.resolveUsers(ctx, scope.Config) if err != nil { v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, @@ -936,7 +936,7 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S kubeconfig, err := r.resolveDiscoveryKubeConfig(discoveryFile) if err != nil { v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, @@ -1418,7 +1418,7 @@ func (r *KubeadmConfigReconciler) storeBootstrapData(ctx context.Context, scope scope.Config.Status.DataSecretName = ptr.To(secret.Name) scope.Config.Status.Ready = true v1beta1conditions.MarkTrue(scope.Config, bootstrapv1.DataSecretAvailableCondition) - v1beta2conditions.Set(scope.Config, metav1.Condition{ + conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, Status: metav1.ConditionTrue, Reason: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Reason, diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go index bd0cdb0d9255..c65235ef0906 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go @@ -45,8 +45,8 @@ import ( "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/secret" "sigs.k8s.io/cluster-api/util/test/builder" @@ -2827,13 +2827,13 @@ func TestKubeadmConfigReconciler_Reconcile_v1beta2_conditions(t *testing.T) { g.Expect(myclient.Get(ctx, key, newConfig)).To(Succeed()) for _, conditionType := range []string{bootstrapv1.KubeadmConfigReadyV1Beta2Condition, bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition} { - condition := v1beta2conditions.Get(newConfig, conditionType) + condition := conditions.Get(newConfig, conditionType) g.Expect(condition).ToNot(BeNil(), "condition %s is missing", conditionType) g.Expect(condition.Status).To(Equal(metav1.ConditionTrue)) g.Expect(condition.Message).To(BeEmpty()) } for _, conditionType := range []string{clusterv1.PausedV1Beta2Condition} { - condition := v1beta2conditions.Get(newConfig, conditionType) + condition := conditions.Get(newConfig, conditionType) g.Expect(condition).ToNot(BeNil(), "condition %s is missing", conditionType) g.Expect(condition.Status).To(Equal(metav1.ConditionFalse)) g.Expect(condition.Message).To(BeEmpty()) diff --git a/cmd/clusterctl/client/tree/tree_test.go b/cmd/clusterctl/client/tree/tree_test.go index 6c8ad30915eb..f5592b37d184 100644 --- a/cmd/clusterctl/client/tree/tree_test.go +++ b/cmd/clusterctl/client/tree/tree_test.go @@ -28,8 +28,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) func Test_hasSameAvailableReadyUptoDateStatusAndReason(t *testing.T) { @@ -1422,7 +1422,7 @@ func withClusterV1Beta1Condition(c *clusterv1.Condition) func(*clusterv1.Cluster func withClusterV1Beta2Condition(c metav1.Condition) func(*clusterv1.Cluster) { return func(m *clusterv1.Cluster) { - v1beta2conditions.Set(m, c) + conditions.Set(m, c) } } @@ -1453,6 +1453,6 @@ func withMachineCondition(c *clusterv1.Condition) func(*clusterv1.Machine) { func withMachineV1Beta2Condition(c metav1.Condition) func(*clusterv1.Machine) { return func(m *clusterv1.Machine) { - v1beta2conditions.Set(m, c) + conditions.Set(m, c) } } diff --git a/cmd/clusterctl/client/tree/util.go b/cmd/clusterctl/client/tree/util.go index 3a9058a61ad0..076f7a432c5c 100644 --- a/cmd/clusterctl/client/tree/util.go +++ b/cmd/clusterctl/client/tree/util.go @@ -28,8 +28,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) // GroupVersionVirtualObject is the group version for VirtualObject. @@ -37,12 +37,12 @@ var GroupVersionVirtualObject = schema.GroupVersion{Group: "virtual.cluster.x-k8 // GetReadyV1Beta2Condition returns the ReadyCondition for an object, if defined. func GetReadyV1Beta2Condition(obj client.Object) *metav1.Condition { - if getter, ok := obj.(v1beta2conditions.Getter); ok { - return v1beta2conditions.Get(getter, clusterv1.ReadyV1Beta2Condition) + if getter, ok := obj.(conditions.Getter); ok { + return conditions.Get(getter, clusterv1.ReadyV1Beta2Condition) } if objUnstructured, ok := obj.(*unstructured.Unstructured); ok { - c, err := v1beta2conditions.UnstructuredGet(objUnstructured, clusterv1.ReadyV1Beta2Condition) + c, err := conditions.UnstructuredGet(objUnstructured, clusterv1.ReadyV1Beta2Condition) if err != nil { return nil } @@ -54,12 +54,12 @@ func GetReadyV1Beta2Condition(obj client.Object) *metav1.Condition { // GetAvailableV1Beta2Condition returns the AvailableCondition for an object, if defined. func GetAvailableV1Beta2Condition(obj client.Object) *metav1.Condition { - if getter, ok := obj.(v1beta2conditions.Getter); ok { - return v1beta2conditions.Get(getter, clusterv1.AvailableV1Beta2Condition) + if getter, ok := obj.(conditions.Getter); ok { + return conditions.Get(getter, clusterv1.AvailableV1Beta2Condition) } if objUnstructured, ok := obj.(*unstructured.Unstructured); ok { - c, err := v1beta2conditions.UnstructuredGet(objUnstructured, clusterv1.AvailableV1Beta2Condition) + c, err := conditions.UnstructuredGet(objUnstructured, clusterv1.AvailableV1Beta2Condition) if err != nil { return nil } @@ -72,8 +72,8 @@ func GetAvailableV1Beta2Condition(obj client.Object) *metav1.Condition { // GetMachineUpToDateV1Beta2Condition returns machine's UpToDate condition, if defined. // Note: The UpToDate condition only exist on machines, so no need to support reading from unstructured. func GetMachineUpToDateV1Beta2Condition(obj client.Object) *metav1.Condition { - if getter, ok := obj.(v1beta2conditions.Getter); ok { - return v1beta2conditions.Get(getter, clusterv1.MachineUpToDateV1Beta2Condition) + if getter, ok := obj.(conditions.Getter); ok { + return conditions.Get(getter, clusterv1.MachineUpToDateV1Beta2Condition) } return nil } @@ -89,12 +89,12 @@ func GetReadyCondition(obj client.Object) *clusterv1.Condition { // GetAllV1Beta2Conditions returns the other conditions (all the conditions except ready) for an object, if defined. func GetAllV1Beta2Conditions(obj client.Object) []metav1.Condition { - if getter, ok := obj.(v1beta2conditions.Getter); ok { + if getter, ok := obj.(conditions.Getter); ok { return getter.GetV1Beta2Conditions() } if objUnstructured, ok := obj.(*unstructured.Unstructured); ok { - conditionList, err := v1beta2conditions.UnstructuredGetAll(objUnstructured) + conditionList, err := conditions.UnstructuredGetAll(objUnstructured) if err != nil { return nil } @@ -123,20 +123,20 @@ func GetOtherConditions(obj client.Object) []*clusterv1.Condition { } func setAvailableV1Beta2Condition(obj client.Object, available *metav1.Condition) { - if setter, ok := obj.(v1beta2conditions.Setter); ok { - v1beta2conditions.Set(setter, *available) + if setter, ok := obj.(conditions.Setter); ok { + conditions.Set(setter, *available) } } func setReadyV1Beta2Condition(obj client.Object, ready *metav1.Condition) { - if setter, ok := obj.(v1beta2conditions.Setter); ok { - v1beta2conditions.Set(setter, *ready) + if setter, ok := obj.(conditions.Setter); ok { + conditions.Set(setter, *ready) } } func setUpToDateV1Beta2Condition(obj client.Object, upToDate *metav1.Condition) { - if setter, ok := obj.(v1beta2conditions.Setter); ok { - v1beta2conditions.Set(setter, *upToDate) + if setter, ok := obj.(conditions.Setter); ok { + conditions.Set(setter, *upToDate) } } diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index 8d430e34036a..c24f651edf8b 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -51,8 +51,8 @@ import ( "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/finalizers" clog "sigs.k8s.io/cluster-api/util/log" "sigs.k8s.io/cluster-api/util/patch" @@ -370,14 +370,14 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPl // Wait for the cluster infrastructure to be ready before creating machines if !controlPlane.Cluster.Status.InfrastructureReady { // Note: in future we might want to move this inside reconcileControlPlaneAndMachinesConditions. - v1beta2conditions.Set(controlPlane.KCP, metav1.Condition{ + conditions.Set(controlPlane.KCP, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneEtcdClusterInspectionFailedV1Beta2Reason, Message: "Waiting for Cluster status.infrastructureReady to be true", }) - v1beta2conditions.Set(controlPlane.KCP, metav1.Condition{ + conditions.Set(controlPlane.KCP, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsInspectionFailedV1Beta2Reason, @@ -396,14 +396,14 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPl // If ControlPlaneEndpoint is not set, return early if !controlPlane.Cluster.Spec.ControlPlaneEndpoint.IsValid() { // Note: in future we might want to move this inside reconcileControlPlaneAndMachinesConditions. - v1beta2conditions.Set(controlPlane.KCP, metav1.Condition{ + conditions.Set(controlPlane.KCP, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneEtcdClusterInspectionFailedV1Beta2Reason, Message: "Waiting for Cluster spec.controlPlaneEndpoint to be set", }) - v1beta2conditions.Set(controlPlane.KCP, metav1.Condition{ + conditions.Set(controlPlane.KCP, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsInspectionFailedV1Beta2Reason, @@ -551,7 +551,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileClusterCertificates(ctx context if err := certificates.LookupOrGenerateCached(ctx, r.SecretCachingClient, r.Client, util.ObjectKey(controlPlane.Cluster), *controllerRef); err != nil { v1beta1conditions.MarkFalse(controlPlane.KCP, controlplanev1.CertificatesAvailableCondition, controlplanev1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(controlPlane.KCP, metav1.Condition{ + conditions.Set(controlPlane.KCP, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneCertificatesInternalErrorV1Beta2Reason, @@ -561,7 +561,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileClusterCertificates(ctx context } if err := r.ensureCertificatesOwnerRef(ctx, certificates, *controllerRef); err != nil { - v1beta2conditions.Set(controlPlane.KCP, metav1.Condition{ + conditions.Set(controlPlane.KCP, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneCertificatesInternalErrorV1Beta2Reason, @@ -573,7 +573,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileClusterCertificates(ctx context v1beta1conditions.MarkTrue(controlPlane.KCP, controlplanev1.CertificatesAvailableCondition) - v1beta2conditions.Set(controlPlane.KCP, metav1.Condition{ + conditions.Set(controlPlane.KCP, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneCertificatesAvailableV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneCertificatesAvailableV1Beta2Reason, @@ -982,7 +982,7 @@ func reconcileMachineUpToDateCondition(_ context.Context, controlPlane *internal message = strings.Join(reasons, "\n") } - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineUpToDateV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineNotUpToDateV1Beta2Reason, @@ -991,7 +991,7 @@ func reconcileMachineUpToDateCondition(_ context.Context, controlPlane *internal continue } - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineUpToDateV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineUpToDateV1Beta2Reason, @@ -1013,11 +1013,11 @@ func setConditionsToUnknown(input setConditionsToUnknownInput) { // Note: We are not checking if conditions on the Machines are already set, we just check the KCP conditions instead. // This means if Overwrite is set to false, we only set the EtcdMemberHealthy condition if the EtcdClusterHealthy condition is not set. // The same applies to ControlPlaneComponentsHealthy and the control plane component conditions on the Machines. - etcdClusterHealthySet := v1beta2conditions.Has(input.ControlPlane.KCP, controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition) - controlPlaneComponentsHealthySet := v1beta2conditions.Has(input.ControlPlane.KCP, controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition) + etcdClusterHealthySet := conditions.Has(input.ControlPlane.KCP, controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition) + controlPlaneComponentsHealthySet := conditions.Has(input.ControlPlane.KCP, controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition) if input.Overwrite || !etcdClusterHealthySet { - v1beta2conditions.Set(input.ControlPlane.KCP, metav1.Condition{ + conditions.Set(input.ControlPlane.KCP, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: input.EtcdClusterHealthyReason, @@ -1025,7 +1025,7 @@ func setConditionsToUnknown(input setConditionsToUnknownInput) { }) for _, machine := range input.ControlPlane.Machines { if input.ControlPlane.IsEtcdManaged() { - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: input.EtcdMemberHealthyReason, @@ -1036,7 +1036,7 @@ func setConditionsToUnknown(input setConditionsToUnknownInput) { } if input.Overwrite || !controlPlaneComponentsHealthySet { - v1beta2conditions.Set(input.ControlPlane.KCP, metav1.Condition{ + conditions.Set(input.ControlPlane.KCP, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: input.ControlPlaneComponentsHealthyReason, @@ -1053,7 +1053,7 @@ func setConditionsToUnknown(input setConditionsToUnknownInput) { } for _, machine := range input.ControlPlane.Machines { for _, condition := range allMachinePodV1beta2Conditions { - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: condition, Status: metav1.ConditionUnknown, Reason: input.StaticPodReason, diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 1ab570f1ee02..be0b59f7e058 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -62,8 +62,8 @@ import ( "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/secret" @@ -1383,7 +1383,7 @@ kubernetesVersion: metav1.16.1 g.Expect(kcp.Status.Selector).NotTo(BeEmpty()) g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(1)) g.Expect(v1beta1conditions.IsFalse(kcp, controlplanev1.AvailableCondition)).To(BeTrue()) - g.Expect(v1beta2conditions.IsFalse(kcp, controlplanev1.KubeadmControlPlaneInitializedV1Beta2Condition)).To(BeTrue()) + g.Expect(conditions.IsFalse(kcp, controlplanev1.KubeadmControlPlaneInitializedV1Beta2Condition)).To(BeTrue()) s, err := secret.GetFromNamespacedName(ctx, env, client.ObjectKey{Namespace: cluster.Namespace, Name: "foo"}, secret.ClusterCA) g.Expect(err).ToNot(HaveOccurred()) @@ -2161,7 +2161,7 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition kcp := defaultKCP.DeepCopy() kcp.Status.Initialized = false v1beta1conditions.MarkFalse(kcp, controlplanev1.AvailableCondition, "", clusterv1.ConditionSeverityError, "") - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneInitializedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneNotInitializedV1Beta2Reason, @@ -2400,12 +2400,12 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition kcp.Status.Conditions[i].LastTransitionTime.Time = now.Add(-4 * time.Minute) } } - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Reason, }) - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Reason, @@ -2415,7 +2415,7 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition Machines: map[string]*clusterv1.Machine{ defaultMachine1.Name: func() *clusterv1.Machine { m := defaultMachine1.DeepCopy() - v1beta2conditions.Set(m, metav1.Condition{ + conditions.Set(m, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachinePodRunningV1Beta2Reason, @@ -2424,7 +2424,7 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition }(), defaultMachine2.Name: func() *clusterv1.Machine { m := defaultMachine2.DeepCopy() - v1beta2conditions.Set(m, metav1.Condition{ + conditions.Set(m, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachinePodRunningV1Beta2Reason, @@ -2577,12 +2577,12 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition kcp.Status.Conditions[i].LastTransitionTime.Time = now.Add(-7 * time.Minute) } } - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Reason, }) - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Reason, @@ -2592,7 +2592,7 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition Machines: map[string]*clusterv1.Machine{ defaultMachine1.Name: func() *clusterv1.Machine { m := defaultMachine1.DeepCopy() - v1beta2conditions.Set(m, metav1.Condition{ + conditions.Set(m, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachinePodRunningV1Beta2Reason, @@ -2601,7 +2601,7 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition }(), defaultMachine2.Name: func() *clusterv1.Machine { m := defaultMachine2.DeepCopy() - v1beta2conditions.Set(m, metav1.Condition{ + conditions.Set(m, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachinePodRunningV1Beta2Reason, @@ -2817,9 +2817,9 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition g.Expect(err).ToNot(HaveOccurred()) } - g.Expect(tc.controlPlane.KCP.GetV1Beta2Conditions()).To(v1beta2conditions.MatchConditions(tc.expectKCPConditions, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(tc.controlPlane.KCP.GetV1Beta2Conditions()).To(conditions.MatchConditions(tc.expectKCPConditions, conditions.IgnoreLastTransitionTime(true))) for _, machine := range tc.controlPlane.Machines { - g.Expect(machine.GetV1Beta2Conditions()).To(v1beta2conditions.MatchConditions(tc.expectMachineConditions, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(machine.GetV1Beta2Conditions()).To(conditions.MatchConditions(tc.expectMachineConditions, conditions.IgnoreLastTransitionTime(true))) } }) } diff --git a/controlplane/kubeadm/internal/controllers/remediation.go b/controlplane/kubeadm/internal/controllers/remediation.go index 100ff4170f7b..212875f4dd76 100644 --- a/controlplane/kubeadm/internal/controllers/remediation.go +++ b/controlplane/kubeadm/internal/controllers/remediation.go @@ -37,8 +37,8 @@ import ( "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" ) @@ -57,7 +57,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C } shouldCleanup := v1beta1conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededCondition) && v1beta1conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedCondition) - shouldCleanupV1Beta2 := v1beta2conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededV1Beta2Condition) && v1beta2conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) + shouldCleanupV1Beta2 := conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededV1Beta2Condition) && conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) if !(shouldCleanup || shouldCleanupV1Beta2) { continue @@ -74,7 +74,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C } if shouldCleanupV1Beta2 { - v1beta2conditions.Delete(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) + conditions.Delete(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) } if err := patchHelper.Patch(ctx, m, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ @@ -180,7 +180,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C clusterv1.ConditionSeverityWarning, message) - v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ + conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, @@ -213,7 +213,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C log.Info("A control plane machine needs remediation, but the number of current replicas is less or equal to 1. Skipping remediation", "replicas", controlPlane.Machines.Len()) v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate if current replicas are less or equal to 1") - v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ + conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, @@ -227,7 +227,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C log.Info("A control plane machine needs remediation, but there are other control-plane machines being provisioned. Skipping remediation") v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine provisioning to complete before triggering remediation") - v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ + conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, @@ -241,7 +241,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C log.Info("A control plane machine needs remediation, but there are other control-plane machines being deleted. Skipping remediation") v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine deletion to complete before triggering remediation") - v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ + conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, @@ -257,7 +257,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C if err != nil { v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityError, err.Error()) - v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ + conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineRemediationInternalErrorV1Beta2Reason, @@ -269,7 +269,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C log.Info("A control plane machine needs remediation, but removing this machine could result in etcd quorum loss. Skipping remediation") v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because this could result in etcd loosing quorum") - v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ + conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, @@ -300,7 +300,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityWarning, "A control plane machine needs remediation, but there is no healthy machine to forward etcd leadership to. Skipping remediation") - v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ + conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, @@ -312,7 +312,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C log.Error(err, "Failed to move etcd leadership to candidate machine", "candidate", klog.KObj(etcdLeaderCandidate)) v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityError, err.Error()) - v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ + conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineRemediationInternalErrorV1Beta2Reason, @@ -329,7 +329,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C if err := r.Client.Delete(ctx, machineToBeRemediated); err != nil { v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityError, err.Error()) - v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ + conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineRemediationInternalErrorV1Beta2Reason, @@ -345,7 +345,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C Info("Deleting Machine (remediating unhealthy Machine)") v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ + conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, @@ -532,7 +532,7 @@ func (r *KubeadmControlPlaneReconciler) checkRetryLimits(log logr.Logger, machin log.Info(fmt.Sprintf("A control plane machine needs remediation, but the operation already failed in the latest %s. Skipping remediation", retryPeriod)) v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed in the latest %s (RetryPeriod)", retryPeriod) - v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ + conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, @@ -548,7 +548,7 @@ func (r *KubeadmControlPlaneReconciler) checkRetryLimits(log logr.Logger, machin log.Info(fmt.Sprintf("A control plane machine needs remediation, but the operation already failed %d times (MaxRetry %d). Skipping remediation", remediationInProgressData.RetryCount, maxRetry)) v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed %d times (MaxRetry)", maxRetry) - v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ + conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, diff --git a/controlplane/kubeadm/internal/controllers/remediation_test.go b/controlplane/kubeadm/internal/controllers/remediation_test.go index 5f58d19d3116..a9755fc4f7a6 100644 --- a/controlplane/kubeadm/internal/controllers/remediation_test.go +++ b/controlplane/kubeadm/internal/controllers/remediation_test.go @@ -38,8 +38,8 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" ) @@ -173,7 +173,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { if c := v1beta1conditions.Get(m, clusterv1.MachineOwnerRemediatedCondition); c != nil { return errors.Errorf("condition %s still exists", clusterv1.MachineOwnerRemediatedCondition) } - if c := v1beta2conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition); c != nil { + if c := conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition); c != nil { return errors.Errorf("condition %s still exists", clusterv1.MachineOwnerRemediatedV1Beta2Condition) } return nil @@ -2034,12 +2034,12 @@ func withMachineHealthCheckFailed() machineOption { v1beta1conditions.MarkFalse(machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "") v1beta1conditions.MarkFalse(machine, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "") - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineHealthCheckNodeDeletedV1Beta2Reason, }) - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineOwnerRemediatedWaitingForRemediationV1Beta2Reason, @@ -2053,12 +2053,12 @@ func withStuckRemediation() machineOption { v1beta1conditions.MarkTrue(machine, clusterv1.MachineHealthCheckSucceededCondition) v1beta1conditions.MarkFalse(machine, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "") - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineHealthCheckSucceededV1Beta2Reason, }) - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineOwnerRemediatedWaitingForRemediationV1Beta2Reason, @@ -2199,7 +2199,7 @@ func assertMachineV1beta2Condition(ctx context.Context, g *WithT, m *clusterv1.M if err := env.Get(ctx, client.ObjectKey{Namespace: m.Namespace, Name: m.Name}, m); err != nil { return err } - c := v1beta2conditions.Get(m, t) + c := conditions.Get(m, t) if c == nil { return errors.Errorf("condition %q was nil", t) } diff --git a/controlplane/kubeadm/internal/controllers/status.go b/controlplane/kubeadm/internal/controllers/status.go index cbaad73a5830..ac5fc1064f47 100644 --- a/controlplane/kubeadm/internal/controllers/status.go +++ b/controlplane/kubeadm/internal/controllers/status.go @@ -34,8 +34,8 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" clog "sigs.k8s.io/cluster-api/util/log" ) @@ -180,13 +180,13 @@ func (r *KubeadmControlPlaneReconciler) updateV1Beta2Status(ctx context.Context, func setReplicas(_ context.Context, kcp *controlplanev1.KubeadmControlPlane, machines collections.Machines) { var readyReplicas, availableReplicas, upToDateReplicas int32 for _, machine := range machines { - if v1beta2conditions.IsTrue(machine, clusterv1.MachineReadyV1Beta2Condition) { + if conditions.IsTrue(machine, clusterv1.MachineReadyV1Beta2Condition) { readyReplicas++ } - if v1beta2conditions.IsTrue(machine, clusterv1.MachineAvailableV1Beta2Condition) { + if conditions.IsTrue(machine, clusterv1.MachineAvailableV1Beta2Condition) { availableReplicas++ } - if v1beta2conditions.IsTrue(machine, clusterv1.MachineUpToDateV1Beta2Condition) { + if conditions.IsTrue(machine, clusterv1.MachineUpToDateV1Beta2Condition) { upToDateReplicas++ } } @@ -198,7 +198,7 @@ func setReplicas(_ context.Context, kcp *controlplanev1.KubeadmControlPlane, mac func setInitializedCondition(_ context.Context, kcp *controlplanev1.KubeadmControlPlane) { if kcp.Status.Initialized { - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneInitializedV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneInitializedV1Beta2Reason, @@ -206,7 +206,7 @@ func setInitializedCondition(_ context.Context, kcp *controlplanev1.KubeadmContr return } - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneInitializedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneNotInitializedV1Beta2Reason, @@ -222,7 +222,7 @@ func setRollingOutCondition(_ context.Context, kcp *controlplanev1.KubeadmContro rollingOutReplicas := 0 rolloutReasons := sets.Set[string]{} for _, machine := range machines { - upToDateCondition := v1beta2conditions.Get(machine, clusterv1.MachineUpToDateV1Beta2Condition) + upToDateCondition := conditions.Get(machine, clusterv1.MachineUpToDateV1Beta2Condition) if upToDateCondition == nil || upToDateCondition.Status != metav1.ConditionFalse { continue } @@ -234,7 +234,7 @@ func setRollingOutCondition(_ context.Context, kcp *controlplanev1.KubeadmContro if rollingOutReplicas == 0 { var message string - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneRollingOutV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneNotRollingOutV1Beta2Reason, @@ -259,7 +259,7 @@ func setRollingOutCondition(_ context.Context, kcp *controlplanev1.KubeadmContro }) message += fmt.Sprintf("\n%s", strings.Join(reasons, "\n")) } - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneRollingOutV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneRollingOutV1Beta2Reason, @@ -269,7 +269,7 @@ func setRollingOutCondition(_ context.Context, kcp *controlplanev1.KubeadmContro func setScalingUpCondition(_ context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, machines collections.Machines, infrastructureObjectNotFound bool, preflightChecks internal.PreflightCheckResults) { if kcp.Spec.Replicas == nil { - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneScalingUpV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneScalingUpWaitingForReplicasSetV1Beta2Reason, @@ -291,7 +291,7 @@ func setScalingUpCondition(_ context.Context, cluster *clusterv1.Cluster, kcp *c if missingReferencesMessage != "" { message = fmt.Sprintf("Scaling up would be blocked because %s", missingReferencesMessage) } - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneScalingUpV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneNotScalingUpV1Beta2Reason, @@ -311,7 +311,7 @@ func setScalingUpCondition(_ context.Context, cluster *clusterv1.Cluster, kcp *c message += fmt.Sprintf(" is blocked because:\n%s", strings.Join(additionalMessages, "\n")) } - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneScalingUpV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneScalingUpV1Beta2Reason, @@ -321,7 +321,7 @@ func setScalingUpCondition(_ context.Context, cluster *clusterv1.Cluster, kcp *c func setScalingDownCondition(_ context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, machines collections.Machines, preflightChecks internal.PreflightCheckResults) { if kcp.Spec.Replicas == nil { - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneScalingDownV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneScalingDownWaitingForReplicasSetV1Beta2Reason, @@ -337,7 +337,7 @@ func setScalingDownCondition(_ context.Context, cluster *clusterv1.Cluster, kcp } if currentReplicas <= desiredReplicas { - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneScalingDownV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneNotScalingDownV1Beta2Reason, @@ -356,7 +356,7 @@ func setScalingDownCondition(_ context.Context, cluster *clusterv1.Cluster, kcp message += fmt.Sprintf(" is blocked because:\n%s", strings.Join(additionalMessages, "\n")) } - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneScalingDownV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneScalingDownV1Beta2Reason, @@ -366,7 +366,7 @@ func setScalingDownCondition(_ context.Context, cluster *clusterv1.Cluster, kcp func setMachinesReadyCondition(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, machines collections.Machines) { if len(machines) == 0 { - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachinesReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachinesReadyNoReplicasV1Beta2Reason, @@ -374,13 +374,13 @@ func setMachinesReadyCondition(ctx context.Context, kcp *controlplanev1.KubeadmC return } - readyCondition, err := v1beta2conditions.NewAggregateCondition( + readyCondition, err := conditions.NewAggregateCondition( machines.UnsortedList(), clusterv1.MachineReadyV1Beta2Condition, - v1beta2conditions.TargetConditionType(controlplanev1.KubeadmControlPlaneMachinesReadyV1Beta2Condition), + conditions.TargetConditionType(controlplanev1.KubeadmControlPlaneMachinesReadyV1Beta2Condition), // Using a custom merge strategy to override reasons applied during merge. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( controlplanev1.KubeadmControlPlaneMachinesNotReadyV1Beta2Reason, controlplanev1.KubeadmControlPlaneMachinesReadyUnknownV1Beta2Reason, controlplanev1.KubeadmControlPlaneMachinesReadyV1Beta2Reason, @@ -389,7 +389,7 @@ func setMachinesReadyCondition(ctx context.Context, kcp *controlplanev1.KubeadmC }, ) if err != nil { - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachinesReadyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinesReadyInternalErrorV1Beta2Reason, @@ -401,7 +401,7 @@ func setMachinesReadyCondition(ctx context.Context, kcp *controlplanev1.KubeadmC return } - v1beta2conditions.Set(kcp, *readyCondition) + conditions.Set(kcp, *readyCondition) } func setMachinesUpToDateCondition(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, machines collections.Machines) { @@ -409,11 +409,11 @@ func setMachinesUpToDateCondition(ctx context.Context, kcp *controlplanev1.Kubea // This is done to ensure the MachinesUpToDate condition doesn't flicker after a new Machine is created, // because it can take a bit until the UpToDate condition is set on a new Machine. machines = machines.Filter(func(machine *clusterv1.Machine) bool { - return v1beta2conditions.Has(machine, clusterv1.MachineUpToDateV1Beta2Condition) || time.Since(machine.CreationTimestamp.Time) > 10*time.Second + return conditions.Has(machine, clusterv1.MachineUpToDateV1Beta2Condition) || time.Since(machine.CreationTimestamp.Time) > 10*time.Second }) if len(machines) == 0 { - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachinesUpToDateNoReplicasV1Beta2Reason, @@ -421,13 +421,13 @@ func setMachinesUpToDateCondition(ctx context.Context, kcp *controlplanev1.Kubea return } - upToDateCondition, err := v1beta2conditions.NewAggregateCondition( + upToDateCondition, err := conditions.NewAggregateCondition( machines.UnsortedList(), clusterv1.MachineUpToDateV1Beta2Condition, - v1beta2conditions.TargetConditionType(controlplanev1.KubeadmControlPlaneMachinesUpToDateV1Beta2Condition), + conditions.TargetConditionType(controlplanev1.KubeadmControlPlaneMachinesUpToDateV1Beta2Condition), // Using a custom merge strategy to override reasons applied during merge. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( controlplanev1.KubeadmControlPlaneMachinesNotUpToDateV1Beta2Reason, controlplanev1.KubeadmControlPlaneMachinesUpToDateUnknownV1Beta2Reason, controlplanev1.KubeadmControlPlaneMachinesUpToDateV1Beta2Reason, @@ -436,7 +436,7 @@ func setMachinesUpToDateCondition(ctx context.Context, kcp *controlplanev1.Kubea }, ) if err != nil { - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinesUpToDateInternalErrorV1Beta2Reason, @@ -448,7 +448,7 @@ func setMachinesUpToDateCondition(ctx context.Context, kcp *controlplanev1.Kubea return } - v1beta2conditions.Set(kcp, *upToDateCondition) + conditions.Set(kcp, *upToDateCondition) } func calculateMissingReferencesMessage(kcp *controlplanev1.KubeadmControlPlane, infraMachineTemplateNotFound bool) string { @@ -461,7 +461,7 @@ func calculateMissingReferencesMessage(kcp *controlplanev1.KubeadmControlPlane, func setRemediatingCondition(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, machinesToBeRemediated, unhealthyMachines collections.Machines) { if len(machinesToBeRemediated) == 0 { message := aggregateUnhealthyMachines(unhealthyMachines) - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneRemediatingV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneNotRemediatingV1Beta2Reason, @@ -470,14 +470,14 @@ func setRemediatingCondition(ctx context.Context, kcp *controlplanev1.KubeadmCon return } - remediatingCondition, err := v1beta2conditions.NewAggregateCondition( + remediatingCondition, err := conditions.NewAggregateCondition( machinesToBeRemediated.UnsortedList(), clusterv1.MachineOwnerRemediatedV1Beta2Condition, - v1beta2conditions.TargetConditionType(controlplanev1.KubeadmControlPlaneRemediatingV1Beta2Condition), + conditions.TargetConditionType(controlplanev1.KubeadmControlPlaneRemediatingV1Beta2Condition), // Note: in case of the remediating conditions it is not required to use a CustomMergeStrategy/ComputeReasonFunc // because we are considering only machinesToBeRemediated (and we can pin the reason when we set the condition). ) if err != nil { - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneRemediatingV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneRemediatingInternalErrorV1Beta2Reason, @@ -489,7 +489,7 @@ func setRemediatingCondition(ctx context.Context, kcp *controlplanev1.KubeadmCon return } - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: remediatingCondition.Type, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneRemediatingV1Beta2Reason, @@ -499,7 +499,7 @@ func setRemediatingCondition(ctx context.Context, kcp *controlplanev1.KubeadmCon func setDeletingCondition(_ context.Context, kcp *controlplanev1.KubeadmControlPlane, deletingReason, deletingMessage string) { if kcp.DeletionTimestamp.IsZero() { - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneDeletingV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneNotDeletingV1Beta2Reason, @@ -507,7 +507,7 @@ func setDeletingCondition(_ context.Context, kcp *controlplanev1.KubeadmControlP return } - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneDeletingV1Beta2Condition, Status: metav1.ConditionTrue, Reason: deletingReason, @@ -517,7 +517,7 @@ func setDeletingCondition(_ context.Context, kcp *controlplanev1.KubeadmControlP func setAvailableCondition(_ context.Context, kcp *controlplanev1.KubeadmControlPlane, etcdIsManaged bool, etcdMembers []*etcd.Member, etcdMembersAndMachinesAreMatching bool, machines collections.Machines) { if !kcp.Status.Initialized { - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneNotAvailableV1Beta2Reason, @@ -530,10 +530,10 @@ func setAvailableCondition(_ context.Context, kcp *controlplanev1.KubeadmControl if etcdMembers == nil { // In case the control plane just initialized, give some more time before reporting failed to get etcd members. // Note: Two minutes is the time after which we assume that not getting the list of etcd members is an actual problem. - if c := v1beta2conditions.Get(kcp, controlplanev1.KubeadmControlPlaneInitializedV1Beta2Condition); c != nil && + if c := conditions.Get(kcp, controlplanev1.KubeadmControlPlaneInitializedV1Beta2Condition); c != nil && c.Status == metav1.ConditionTrue && time.Since(c.LastTransitionTime.Time) < 2*time.Minute { - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneNotAvailableV1Beta2Reason, @@ -542,7 +542,7 @@ func setAvailableCondition(_ context.Context, kcp *controlplanev1.KubeadmControl return } - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneAvailableInspectionFailedV1Beta2Reason, @@ -552,7 +552,7 @@ func setAvailableCondition(_ context.Context, kcp *controlplanev1.KubeadmControl } if !etcdMembersAndMachinesAreMatching { - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneNotAvailableV1Beta2Reason, @@ -579,9 +579,9 @@ func setAvailableCondition(_ context.Context, kcp *controlplanev1.KubeadmControl // if external etcd, only look at the status of the K8s control plane components on this machine. if !etcdIsManaged { - if v1beta2conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition) && - v1beta2conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition) && - v1beta2conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyV1Beta2Condition) { + if conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition) && + conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition) && + conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyV1Beta2Condition) { k8sControlPlaneHealthy++ } else if shouldSurfaceWhenAvailableTrue(machine, controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, @@ -600,11 +600,11 @@ func setAvailableCondition(_ context.Context, kcp *controlplanev1.KubeadmControl // - API server on one machine only connect to the local etcd member // - ControllerManager and scheduler on a machine connect to the local API server (not to the control plane endpoint) // As a consequence, we consider the K8s control plane on this machine healthy only if everything is healthy. - if v1beta2conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition) && - v1beta2conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition) && - v1beta2conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyV1Beta2Condition) && - v1beta2conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition) && - v1beta2conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyV1Beta2Condition) { + if conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition) && + conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition) && + conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyV1Beta2Condition) && + conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition) && + conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyV1Beta2Condition) { k8sControlPlaneHealthy++ } else if shouldSurfaceWhenAvailableTrue(machine, controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, @@ -677,7 +677,7 @@ func setAvailableCondition(_ context.Context, kcp *controlplanev1.KubeadmControl } // Otherwise read the status of the etcd member from he EtcdMemberHealthy condition. - if v1beta2conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition) { + if conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition) { etcdMembersHealthy++ } else if shouldSurfaceWhenAvailableTrue(machine, controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition) { @@ -693,7 +693,7 @@ func setAvailableCondition(_ context.Context, kcp *controlplanev1.KubeadmControl if kcp.DeletionTimestamp.IsZero() && (!etcdIsManaged || etcdMembersHealthy >= etcdQuorum) && k8sControlPlaneHealthy >= 1 && - v1beta2conditions.IsTrue(kcp, controlplanev1.KubeadmControlPlaneCertificatesAvailableV1Beta2Condition) { + conditions.IsTrue(kcp, controlplanev1.KubeadmControlPlaneCertificatesAvailableV1Beta2Condition) { messages := []string{} if etcdIsManaged && etcdMembersNotHealthy > 0 { @@ -725,7 +725,7 @@ func setAvailableCondition(_ context.Context, kcp *controlplanev1.KubeadmControl } } - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneAvailableV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneAvailableV1Beta2Reason, @@ -739,7 +739,7 @@ func setAvailableCondition(_ context.Context, kcp *controlplanev1.KubeadmControl messages = append(messages, "* Control plane metadata.deletionTimestamp is set") } - if !v1beta2conditions.IsTrue(kcp, controlplanev1.KubeadmControlPlaneCertificatesAvailableV1Beta2Condition) { + if !conditions.IsTrue(kcp, controlplanev1.KubeadmControlPlaneCertificatesAvailableV1Beta2Condition) { messages = append(messages, "* Control plane certificates are not available") } @@ -762,7 +762,7 @@ func setAvailableCondition(_ context.Context, kcp *controlplanev1.KubeadmControl messages = append(messages, "* There are no Machines with healthy control plane components, at least 1 required") } - v1beta2conditions.Set(kcp, metav1.Condition{ + conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneNotAvailableV1Beta2Reason, @@ -778,7 +778,7 @@ func shouldSurfaceWhenAvailableTrue(machine *clusterv1.Machine, conditionTypes . // Get the min time when one of the conditions in input transitioned to false or unknown. var t *time.Time for _, conditionType := range conditionTypes { - c := v1beta2conditions.Get(machine, conditionType) + c := conditions.Get(machine, conditionType) if c == nil { continue } @@ -837,7 +837,7 @@ func aggregateStaleMachines(machines collections.Machines) string { if !machine.GetDeletionTimestamp().IsZero() && time.Since(machine.GetDeletionTimestamp().Time) > time.Minute*15 { machineNames = append(machineNames, machine.GetName()) - deletingCondition := v1beta2conditions.Get(machine, clusterv1.MachineDeletingV1Beta2Condition) + deletingCondition := conditions.Get(machine, clusterv1.MachineDeletingV1Beta2Condition) if deletingCondition != nil && deletingCondition.Status == metav1.ConditionTrue && deletingCondition.Reason == clusterv1.MachineDeletingDrainingNodeV1Beta2Reason && diff --git a/controlplane/kubeadm/internal/controllers/status_test.go b/controlplane/kubeadm/internal/controllers/status_test.go index 4671a5840e00..ec0cac18ab34 100644 --- a/controlplane/kubeadm/internal/controllers/status_test.go +++ b/controlplane/kubeadm/internal/controllers/status_test.go @@ -35,8 +35,8 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" controlplanev1webhooks "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/webhooks" "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) func TestSetReplicas(t *testing.T) { @@ -114,9 +114,9 @@ func Test_setInitializedCondition(t *testing.T) { setInitializedCondition(ctx, tt.controlPlane.KCP) - condition := v1beta2conditions.Get(tt.controlPlane.KCP, controlplanev1.KubeadmControlPlaneInitializedV1Beta2Condition) + condition := conditions.Get(tt.controlPlane.KCP, controlplanev1.KubeadmControlPlaneInitializedV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -208,9 +208,9 @@ func Test_setRollingOutCondition(t *testing.T) { } setRollingOutCondition(ctx, tt.kcp, machines) - condition := v1beta2conditions.Get(tt.kcp, controlplanev1.KubeadmControlPlaneRollingOutV1Beta2Condition) + condition := conditions.Get(tt.kcp, controlplanev1.KubeadmControlPlaneRollingOutV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -379,9 +379,9 @@ func Test_setScalingUpCondition(t *testing.T) { setScalingUpCondition(ctx, tt.controlPlane.Cluster, tt.controlPlane.KCP, tt.controlPlane.Machines, tt.controlPlane.InfraMachineTemplateIsNotFound, tt.controlPlane.PreflightCheckResults) - condition := v1beta2conditions.Get(tt.controlPlane.KCP, controlplanev1.KubeadmControlPlaneScalingUpV1Beta2Condition) + condition := conditions.Get(tt.controlPlane.KCP, controlplanev1.KubeadmControlPlaneScalingUpV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -573,9 +573,9 @@ After above Pods have been removed from the Node, the following Pods will be evi setScalingDownCondition(ctx, tt.controlPlane.Cluster, tt.controlPlane.KCP, tt.controlPlane.Machines, tt.controlPlane.PreflightCheckResults) - condition := v1beta2conditions.Get(tt.controlPlane.KCP, controlplanev1.KubeadmControlPlaneScalingDownV1Beta2Condition) + condition := conditions.Get(tt.controlPlane.KCP, controlplanev1.KubeadmControlPlaneScalingDownV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -644,13 +644,13 @@ func Test_setMachinesReadyAndMachinesUpToDateConditions(t *testing.T) { setMachinesReadyCondition(ctx, tt.controlPlane.KCP, tt.controlPlane.Machines) setMachinesUpToDateCondition(ctx, tt.controlPlane.KCP, tt.controlPlane.Machines) - readyCondition := v1beta2conditions.Get(tt.controlPlane.KCP, controlplanev1.KubeadmControlPlaneMachinesReadyV1Beta2Condition) + readyCondition := conditions.Get(tt.controlPlane.KCP, controlplanev1.KubeadmControlPlaneMachinesReadyV1Beta2Condition) g.Expect(readyCondition).ToNot(BeNil()) - g.Expect(*readyCondition).To(v1beta2conditions.MatchCondition(tt.expectMachinesReadyCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*readyCondition).To(conditions.MatchCondition(tt.expectMachinesReadyCondition, conditions.IgnoreLastTransitionTime(true))) - upToDateCondition := v1beta2conditions.Get(tt.controlPlane.KCP, controlplanev1.KubeadmControlPlaneMachinesUpToDateV1Beta2Condition) + upToDateCondition := conditions.Get(tt.controlPlane.KCP, controlplanev1.KubeadmControlPlaneMachinesUpToDateV1Beta2Condition) g.Expect(upToDateCondition).ToNot(BeNil()) - g.Expect(*upToDateCondition).To(v1beta2conditions.MatchCondition(tt.expectMachinesUpToDateCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*upToDateCondition).To(conditions.MatchCondition(tt.expectMachinesUpToDateCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -739,9 +739,9 @@ func Test_setRemediatingCondition(t *testing.T) { setRemediatingCondition(ctx, tt.controlPlane.KCP, tt.controlPlane.MachinesToBeRemediatedByKCP(), tt.controlPlane.UnhealthyMachines()) - condition := v1beta2conditions.Get(tt.controlPlane.KCP, controlplanev1.KubeadmControlPlaneRemediatingV1Beta2Condition) + condition := conditions.Get(tt.controlPlane.KCP, controlplanev1.KubeadmControlPlaneRemediatingV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -796,9 +796,9 @@ func TestDeletingCondition(t *testing.T) { setDeletingCondition(ctx, tc.kcp, tc.deletingReason, tc.deletingMessage) - deletingCondition := v1beta2conditions.Get(tc.kcp, controlplanev1.KubeadmControlPlaneDeletingV1Beta2Condition) + deletingCondition := conditions.Get(tc.kcp, controlplanev1.KubeadmControlPlaneDeletingV1Beta2Condition) g.Expect(deletingCondition).ToNot(BeNil()) - g.Expect(*deletingCondition).To(v1beta2conditions.MatchCondition(tc.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*deletingCondition).To(conditions.MatchCondition(tc.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -1821,9 +1821,9 @@ func Test_setAvailableCondition(t *testing.T) { setAvailableCondition(ctx, tt.controlPlane.KCP, tt.controlPlane.IsEtcdManaged(), tt.controlPlane.EtcdMembers, tt.controlPlane.EtcdMembersAndMachinesAreMatching, tt.controlPlane.Machines) - availableCondition := v1beta2conditions.Get(tt.controlPlane.KCP, controlplanev1.KubeadmControlPlaneAvailableV1Beta2Condition) + availableCondition := conditions.Get(tt.controlPlane.KCP, controlplanev1.KubeadmControlPlaneAvailableV1Beta2Condition) g.Expect(availableCondition).ToNot(BeNil()) - g.Expect(*availableCondition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*availableCondition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } diff --git a/controlplane/kubeadm/internal/workload_cluster_conditions.go b/controlplane/kubeadm/internal/workload_cluster_conditions.go index 0c047604ab64..b562f84962b9 100644 --- a/controlplane/kubeadm/internal/workload_cluster_conditions.go +++ b/controlplane/kubeadm/internal/workload_cluster_conditions.go @@ -38,8 +38,8 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" etcdutil "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd/util" "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" clog "sigs.k8s.io/cluster-api/util/log" ) @@ -74,7 +74,7 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane for _, m := range controlPlane.Machines { v1beta1conditions.MarkUnknown(m, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberInspectionFailedReason, "Failed to get the Node which is hosting the etcd member") - v1beta2conditions.Set(m, metav1.Condition{ + conditions.Set(m, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberInspectionFailedV1Beta2Reason, @@ -84,7 +84,7 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane v1beta1conditions.MarkUnknown(controlPlane.KCP, controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterInspectionFailedReason, "Failed to list Nodes which are hosting the etcd members") - v1beta2conditions.Set(controlPlane.KCP, metav1.Condition{ + conditions.Set(controlPlane.KCP, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneEtcdClusterInspectionFailedV1Beta2Reason, @@ -105,7 +105,7 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane // If the machine is at the beginning of the provisioning phase, with ProviderID not yet set, surface this. msg = fmt.Sprintf("Waiting for %s to report spec.providerID", machine.Spec.InfrastructureRef.Kind) } - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberInspectionFailedV1Beta2Reason, @@ -117,7 +117,7 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane for _, machine := range controlPlane.Machines.Filter(collections.HasDeletionTimestamp) { v1beta1conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberDeletingV1Beta2Reason, @@ -139,7 +139,7 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane if member == nil { v1beta1conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd member reports the cluster is composed by members %s, but the member hosted on this Machine is not included", etcdutil.MemberNames(currentMembers)) - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberNotHealthyV1Beta2Reason, @@ -165,7 +165,7 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane if len(alarmList) > 0 { v1beta1conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd member reports alarms: %s", strings.Join(alarmList, ", ")) - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberNotHealthyV1Beta2Reason, @@ -177,7 +177,7 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane // Otherwise consider the member healthy v1beta1conditions.MarkTrue(machine, controlplanev1.MachineEtcdMemberHealthyCondition) - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Reason, @@ -263,7 +263,7 @@ func (w *Workload) getCurrentEtcdMembersAndAlarms(ctx context.Context, machines for _, m := range machines { v1beta1conditions.MarkUnknown(m, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberInspectionFailedReason, "Failed to connect to etcd: %s", err) - v1beta2conditions.Set(m, metav1.Condition{ + conditions.Set(m, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberInspectionFailedV1Beta2Reason, @@ -279,7 +279,7 @@ func (w *Workload) getCurrentEtcdMembersAndAlarms(ctx context.Context, machines for _, m := range machines { v1beta1conditions.MarkFalse(m, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd endpoint %s reports errors: %s", etcdClient.Endpoint, strings.Join(etcdClient.Errors, ", ")) - v1beta2conditions.Set(m, metav1.Condition{ + conditions.Set(m, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberNotHealthyV1Beta2Reason, @@ -295,7 +295,7 @@ func (w *Workload) getCurrentEtcdMembersAndAlarms(ctx context.Context, machines for _, m := range machines { v1beta1conditions.MarkFalse(m, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Failed to get etcd members") - v1beta2conditions.Set(m, metav1.Condition{ + conditions.Set(m, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberInspectionFailedV1Beta2Reason, @@ -311,7 +311,7 @@ func (w *Workload) getCurrentEtcdMembersAndAlarms(ctx context.Context, machines for _, m := range machines { v1beta1conditions.MarkFalse(m, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Failed to get etcd alarms") - v1beta2conditions.Set(m, metav1.Condition{ + conditions.Set(m, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberInspectionFailedV1Beta2Reason, @@ -409,7 +409,7 @@ func compareMachinesAndMembers(controlPlane *ControlPlane, nodes *corev1.NodeLis // The same info will also surface into the EtcdClusterHealthy condition on kcp. v1beta1conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Missing etcd member") - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberNotHealthyV1Beta2Reason, @@ -500,7 +500,7 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * } for _, condition := range allMachinePodV1beta2Conditions { - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, @@ -511,7 +511,7 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * v1beta1conditions.MarkUnknown(controlPlane.KCP, controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsInspectionFailedReason, "Failed to list Nodes which are hosting control plane components: %v", err) - v1beta2conditions.Set(controlPlane.KCP, metav1.Condition{ + conditions.Set(controlPlane.KCP, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsInspectionFailedV1Beta2Reason, @@ -535,7 +535,7 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * // If the machine is at the beginning of the provisioning phase, with ProviderID not yet set, surface this. msg = fmt.Sprintf("Waiting for %s to report spec.providerID", machine.Spec.InfrastructureRef.Kind) } - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, @@ -572,7 +572,7 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * } for _, condition := range allMachinePodV1beta2Conditions { - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodDeletingV1Beta2Reason, @@ -591,7 +591,7 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * } for _, condition := range allMachinePodV1beta2Conditions { - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, @@ -629,7 +629,7 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * } for _, condition := range allMachinePodV1beta2Conditions { - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, @@ -683,7 +683,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste if nodeReadyUnknown(node) { v1beta1conditions.MarkUnknown(machine, staticPodCondition, controlplanev1.PodInspectionFailedReason, "Node Ready condition is Unknown, Pod data might be stale") - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, @@ -703,7 +703,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste if apierrors.IsNotFound(err) { v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodMissingReason, clusterv1.ConditionSeverityError, "Pod %s is missing", podKey.Name) - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodDoesNotExistV1Beta2Reason, @@ -713,7 +713,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste } v1beta1conditions.MarkUnknown(machine, staticPodCondition, controlplanev1.PodInspectionFailedReason, "Failed to get Pod status") - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, @@ -734,7 +734,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste if podCondition(pod, corev1.PodScheduled) != corev1.ConditionTrue { v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting to be scheduled") - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodProvisioningV1Beta2Reason, @@ -748,7 +748,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste if podCondition(pod, corev1.PodInitialized) != corev1.ConditionTrue { v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Running init containers") - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodProvisioningV1Beta2Reason, @@ -760,7 +760,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // If there are no error from containers, report provisioning without further details. v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodProvisioningV1Beta2Reason, @@ -775,7 +775,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste if podCondition(pod, corev1.PodReady) == corev1.ConditionTrue { v1beta1conditions.MarkTrue(machine, staticPodCondition) - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachinePodRunningV1Beta2Reason, @@ -800,7 +800,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste if terminatedWithError { v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, strings.Join(containerWaitingMessages, ", ")) - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodFailedV1Beta2Reason, @@ -812,7 +812,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // e.g., "waiting.reason: ErrImagePull" is an error, but since LastTerminationState does not exist, this cannot be differentiated from "PodProvisioningReason" v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, strings.Join(containerWaitingMessages, ", ")) - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodProvisioningV1Beta2Reason, @@ -831,7 +831,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste if len(containerTerminatedMessages) > 0 { v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, strings.Join(containerTerminatedMessages, ", ")) - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodFailedV1Beta2Reason, @@ -844,7 +844,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // Report this as part of the provisioning process because the corresponding control plane component is not ready yet. v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting for startup or readiness probes") - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodProvisioningV1Beta2Reason, @@ -856,7 +856,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // NOTE: This should never happen for the static pods running control plane components. v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated") - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodFailedV1Beta2Reason, @@ -868,7 +868,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // NOTE: This should never happen for the static pods running control plane components. v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated") - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodFailedV1Beta2Reason, @@ -879,7 +879,7 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // to an error in communicating with the host of the pod. v1beta1conditions.MarkUnknown(machine, staticPodCondition, controlplanev1.PodInspectionFailedReason, "Pod is reporting Unknown status") - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: staticPodV1beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, @@ -1000,7 +1000,7 @@ type aggregateV1Beta2ConditionsFromMachinesToKCPInput struct { } // aggregateV1Beta2ConditionsFromMachinesToKCP aggregates a group of conditions from machines to KCP. -// Note: the aggregation is computed in way that is similar to how v1beta2conditions.NewAggregateCondition works, but in this case the +// Note: the aggregation is computed in way that is similar to how conditions.NewAggregateCondition works, but in this case the // implementation is simpler/less flexible and it surfaces only issues & unknown conditions. func aggregateV1Beta2ConditionsFromMachinesToKCP(input aggregateV1Beta2ConditionsFromMachinesToKCPInput) { // Aggregates machines for condition status. @@ -1016,7 +1016,7 @@ func aggregateV1Beta2ConditionsFromMachinesToKCP(input aggregateV1Beta2Condition conditionCount := 0 conditionMessages := sets.Set[string]{} for _, condition := range input.machineConditions { - if machineCondition := v1beta2conditions.Get(machine, condition); machineCondition != nil { + if machineCondition := conditions.Get(machine, condition); machineCondition != nil { conditionCount++ conditionMessages.Insert(machineCondition.Message) switch machineCondition.Status { @@ -1098,7 +1098,7 @@ func aggregateV1Beta2ConditionsFromMachinesToKCP(input aggregateV1Beta2Condition // In case of at least one machine with errors or KCP level errors (nodes without machines), report false. if len(input.kcpErrors) > 0 || len(kcpMachinesWithErrors) > 0 { - v1beta2conditions.Set(input.controlPlane.KCP, metav1.Condition{ + conditions.Set(input.controlPlane.KCP, metav1.Condition{ Type: input.condition, Status: metav1.ConditionFalse, Reason: input.falseReason, @@ -1109,7 +1109,7 @@ func aggregateV1Beta2ConditionsFromMachinesToKCP(input aggregateV1Beta2Condition // Otherwise, if there is at least one machine with unknown, report unknown. if len(kcpMachinesWithUnknown) > 0 { - v1beta2conditions.Set(input.controlPlane.KCP, metav1.Condition{ + conditions.Set(input.controlPlane.KCP, metav1.Condition{ Type: input.condition, Status: metav1.ConditionUnknown, Reason: input.unknownReason, @@ -1120,7 +1120,7 @@ func aggregateV1Beta2ConditionsFromMachinesToKCP(input aggregateV1Beta2Condition // In case of no errors, no unknown, and at least one machine with info, report true. if len(kcpMachinesWithInfo) > 0 { - v1beta2conditions.Set(input.controlPlane.KCP, metav1.Condition{ + conditions.Set(input.controlPlane.KCP, metav1.Condition{ Type: input.condition, Status: metav1.ConditionTrue, Reason: input.trueReason, @@ -1129,7 +1129,7 @@ func aggregateV1Beta2ConditionsFromMachinesToKCP(input aggregateV1Beta2Condition } // This last case should happen only if there are no provisioned machines. - v1beta2conditions.Set(input.controlPlane.KCP, metav1.Condition{ + conditions.Set(input.controlPlane.KCP, metav1.Condition{ Type: input.condition, Status: metav1.ConditionUnknown, Reason: input.unknownReason, diff --git a/controlplane/kubeadm/internal/workload_cluster_conditions_test.go b/controlplane/kubeadm/internal/workload_cluster_conditions_test.go index a5d03ee17620..e8ca26a902b0 100644 --- a/controlplane/kubeadm/internal/workload_cluster_conditions_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_conditions_test.go @@ -40,8 +40,8 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" fake2 "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd/fake" "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) func TestUpdateEtcdConditions(t *testing.T) { @@ -619,13 +619,13 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { g.Expect(*v1beta1conditions.Get(tt.kcp, controlplanev1.EtcdClusterHealthyCondition)).To(v1beta1conditions.MatchCondition(*tt.expectedKCPCondition)) } if tt.expectedKCPV1Beta2Condition != nil { - g.Expect(*v1beta2conditions.Get(tt.kcp, controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition)).To(v1beta2conditions.MatchCondition(*tt.expectedKCPV1Beta2Condition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*conditions.Get(tt.kcp, controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition)).To(conditions.MatchCondition(*tt.expectedKCPV1Beta2Condition, conditions.IgnoreLastTransitionTime(true))) } for _, m := range tt.machines { g.Expect(tt.expectedMachineConditions).To(HaveKey(m.Name)) g.Expect(m.GetConditions()).To(v1beta1conditions.MatchConditions(tt.expectedMachineConditions[m.Name]), "unexpected conditions for Machine %s", m.Name) - g.Expect(m.GetV1Beta2Conditions()).To(v1beta2conditions.MatchConditions(tt.expectedMachineV1Beta2Conditions[m.Name], v1beta2conditions.IgnoreLastTransitionTime(true)), "unexpected conditions for Machine %s", m.Name) + g.Expect(m.GetV1Beta2Conditions()).To(conditions.MatchConditions(tt.expectedMachineV1Beta2Conditions[m.Name], conditions.IgnoreLastTransitionTime(true)), "unexpected conditions for Machine %s", m.Name) } g.Expect(controlPane.EtcdMembersAndMachinesAreMatching).To(Equal(tt.expectedEtcdMembersAndMachinesAreMatching), "EtcdMembersAndMachinesAreMatching does not match") @@ -680,7 +680,7 @@ func TestUpdateExternalEtcdConditions(t *testing.T) { g.Expect(*v1beta1conditions.Get(tt.kcp, controlplanev1.EtcdClusterHealthyCondition)).To(v1beta1conditions.MatchCondition(*tt.expectedKCPCondition)) } if tt.expectedKCPV1Beta2Condition != nil { - g.Expect(*v1beta2conditions.Get(tt.kcp, controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition)).To(v1beta2conditions.MatchCondition(*tt.expectedKCPV1Beta2Condition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*conditions.Get(tt.kcp, controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition)).To(conditions.MatchCondition(*tt.expectedKCPV1Beta2Condition, conditions.IgnoreLastTransitionTime(true))) } }) } @@ -1104,12 +1104,12 @@ func TestUpdateStaticPodConditions(t *testing.T) { if tt.expectedKCPCondition != nil { g.Expect(*v1beta1conditions.Get(tt.kcp, controlplanev1.ControlPlaneComponentsHealthyCondition)).To(v1beta1conditions.MatchCondition(*tt.expectedKCPCondition)) } - g.Expect(*v1beta2conditions.Get(tt.kcp, controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition)).To(v1beta2conditions.MatchCondition(tt.expectedKCPV1Beta2Condition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*conditions.Get(tt.kcp, controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition)).To(conditions.MatchCondition(tt.expectedKCPV1Beta2Condition, conditions.IgnoreLastTransitionTime(true))) for _, m := range tt.machines { g.Expect(tt.expectedMachineConditions).To(HaveKey(m.Name)) g.Expect(m.GetConditions()).To(v1beta1conditions.MatchConditions(tt.expectedMachineConditions[m.Name])) - g.Expect(m.GetV1Beta2Conditions()).To(v1beta2conditions.MatchConditions(tt.expectedMachineV1Beta2Conditions[m.Name], v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(m.GetV1Beta2Conditions()).To(conditions.MatchConditions(tt.expectedMachineV1Beta2Conditions[m.Name], conditions.IgnoreLastTransitionTime(true))) } }) } @@ -1409,7 +1409,7 @@ func TestUpdateStaticPodCondition(t *testing.T) { w.updateStaticPodCondition(ctx, machine, *tt.node, component, condition, v1beta2Condition) g.Expect(*v1beta1conditions.Get(machine, condition)).To(v1beta1conditions.MatchCondition(tt.expectedCondition)) - g.Expect(*v1beta2conditions.Get(machine, v1beta2Condition)).To(v1beta2conditions.MatchCondition(tt.expectedV1Beta2Condition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*conditions.Get(machine, v1beta2Condition)).To(conditions.MatchCondition(tt.expectedV1Beta2Condition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -1745,7 +1745,7 @@ func TestAggregateV1Beta2ConditionsFromMachinesToKCP(t *testing.T) { } aggregateV1Beta2ConditionsFromMachinesToKCP(input) - g.Expect(*v1beta2conditions.Get(input.controlPlane.KCP, conditionType)).To(v1beta2conditions.MatchCondition(tt.expectedCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*conditions.Get(input.controlPlane.KCP, conditionType)).To(conditions.MatchCondition(tt.expectedCondition, conditions.IgnoreLastTransitionTime(true))) }) } } diff --git a/exp/runtime/internal/controllers/extensionconfig_controller.go b/exp/runtime/internal/controllers/extensionconfig_controller.go index 725154b1ad7b..c1f5d2b107f4 100644 --- a/exp/runtime/internal/controllers/extensionconfig_controller.go +++ b/exp/runtime/internal/controllers/extensionconfig_controller.go @@ -39,8 +39,8 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimeclient "sigs.k8s.io/cluster-api/exp/runtime/client" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/paused" "sigs.k8s.io/cluster-api/util/predicates" @@ -232,7 +232,7 @@ func discoverExtensionConfig(ctx context.Context, runtimeClient runtimeclient.Cl if err != nil { modifiedExtensionConfig := extensionConfig.DeepCopy() v1beta1conditions.MarkFalse(modifiedExtensionConfig, runtimev1.RuntimeExtensionDiscoveredCondition, runtimev1.DiscoveryFailedReason, clusterv1.ConditionSeverityError, "Error in discovery: %v", err) - v1beta2conditions.Set(modifiedExtensionConfig, metav1.Condition{ + conditions.Set(modifiedExtensionConfig, metav1.Condition{ Type: runtimev1.ExtensionConfigDiscoveredV1Beta2Condition, Status: metav1.ConditionFalse, Reason: runtimev1.ExtensionConfigNotDiscoveredV1Beta2Reason, @@ -242,7 +242,7 @@ func discoverExtensionConfig(ctx context.Context, runtimeClient runtimeclient.Cl } v1beta1conditions.MarkTrue(discoveredExtension, runtimev1.RuntimeExtensionDiscoveredCondition) - v1beta2conditions.Set(discoveredExtension, metav1.Condition{ + conditions.Set(discoveredExtension, metav1.Condition{ Type: runtimev1.ExtensionConfigDiscoveredV1Beta2Condition, Status: metav1.ConditionTrue, Reason: runtimev1.ExtensionConfigDiscoveredV1Beta2Reason, diff --git a/exp/runtime/internal/controllers/extensionconfig_controller_test.go b/exp/runtime/internal/controllers/extensionconfig_controller_test.go index 842a4a143fcb..26801d8c4f76 100644 --- a/exp/runtime/internal/controllers/extensionconfig_controller_test.go +++ b/exp/runtime/internal/controllers/extensionconfig_controller_test.go @@ -46,7 +46,7 @@ import ( runtimeregistry "sigs.k8s.io/cluster-api/internal/runtime/registry" fakev1alpha1 "sigs.k8s.io/cluster-api/internal/runtime/test/v1alpha1" "sigs.k8s.io/cluster-api/util" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/conditions" ) func TestExtensionReconciler_Reconcile(t *testing.T) { @@ -116,7 +116,7 @@ func TestExtensionReconciler_Reconcile(t *testing.T) { g.Eventually(func(g Gomega) { conf := &runtimev1.ExtensionConfig{} g.Expect(env.Get(ctx, util.ObjectKey(extensionConfig), conf)).To(Succeed()) - pausedCondition := v1beta2conditions.Get(conf, clusterv1.PausedV1Beta2Condition) + pausedCondition := conditions.Get(conf, clusterv1.PausedV1Beta2Condition) g.Expect(pausedCondition).ToNot(BeNil()) g.Expect(pausedCondition.ObservedGeneration).To(Equal(conf.Generation)) }).WithTimeout(10 * time.Second).WithPolling(100 * time.Millisecond).Should(Succeed()) diff --git a/internal/controllers/cluster/cluster_controller.go b/internal/controllers/cluster/cluster_controller.go index 7e1a9289f127..22210dd51281 100644 --- a/internal/controllers/cluster/cluster_controller.go +++ b/internal/controllers/cluster/cluster_controller.go @@ -51,8 +51,8 @@ import ( "sigs.k8s.io/cluster-api/internal/hooks" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/finalizers" clog "sigs.k8s.io/cluster-api/util/log" "sigs.k8s.io/cluster-api/util/patch" @@ -210,14 +210,14 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (retRes ct } else { msg = fmt.Sprintf("Remote connection probe failed, probe last succeeded at %s", lastProbeSuccessTime.Format(time.RFC3339)) } - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterRemoteConnectionProbeV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterRemoteConnectionProbeFailedV1Beta2Reason, Message: msg, }) } else { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterRemoteConnectionProbeV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.ClusterRemoteConnectionProbeSucceededV1Beta2Reason, diff --git a/internal/controllers/cluster/cluster_controller_status.go b/internal/controllers/cluster/cluster_controller_status.go index 0179622053f2..350ef8f0b55f 100644 --- a/internal/controllers/cluster/cluster_controller_status.go +++ b/internal/controllers/cluster/cluster_controller_status.go @@ -35,7 +35,7 @@ import ( "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/conditions" utilconversion "sigs.k8s.io/cluster-api/util/conversion" clog "sigs.k8s.io/cluster-api/util/log" ) @@ -135,13 +135,13 @@ func setControlPlaneReplicas(_ context.Context, cluster *clusterv1.Cluster, cont var replicas, readyReplicas, availableReplicas, upToDateReplicas *int32 for _, machine := range controlPlaneMachines.UnsortedList() { replicas = ptr.To(ptr.Deref(replicas, 0) + 1) - if v1beta2conditions.IsTrue(machine, clusterv1.MachineReadyV1Beta2Condition) { + if conditions.IsTrue(machine, clusterv1.MachineReadyV1Beta2Condition) { readyReplicas = ptr.To(ptr.Deref(readyReplicas, 0) + 1) } - if v1beta2conditions.IsTrue(machine, clusterv1.MachineAvailableV1Beta2Condition) { + if conditions.IsTrue(machine, clusterv1.MachineAvailableV1Beta2Condition) { availableReplicas = ptr.To(ptr.Deref(availableReplicas, 0) + 1) } - if v1beta2conditions.IsTrue(machine, clusterv1.MachineUpToDateV1Beta2Condition) { + if conditions.IsTrue(machine, clusterv1.MachineUpToDateV1Beta2Condition) { upToDateReplicas = ptr.To(ptr.Deref(upToDateReplicas, 0) + 1) } } @@ -223,13 +223,13 @@ func setWorkersReplicas(_ context.Context, cluster *clusterv1.Cluster, machinePo continue } currentReplicas = ptr.To(ptr.Deref(currentReplicas, 0) + 1) - if v1beta2conditions.IsTrue(m, clusterv1.MachineReadyV1Beta2Condition) { + if conditions.IsTrue(m, clusterv1.MachineReadyV1Beta2Condition) { readyReplicas = ptr.To(ptr.Deref(readyReplicas, 0) + 1) } - if v1beta2conditions.IsTrue(m, clusterv1.MachineAvailableV1Beta2Condition) { + if conditions.IsTrue(m, clusterv1.MachineAvailableV1Beta2Condition) { availableReplicas = ptr.To(ptr.Deref(availableReplicas, 0) + 1) } - if v1beta2conditions.IsTrue(m, clusterv1.MachineUpToDateV1Beta2Condition) { + if conditions.IsTrue(m, clusterv1.MachineUpToDateV1Beta2Condition) { upToDateReplicas = ptr.To(ptr.Deref(upToDateReplicas, 0) + 1) } @@ -252,7 +252,7 @@ func setInfrastructureReadyCondition(_ context.Context, cluster *clusterv1.Clust if cluster.DeletionTimestamp.IsZero() { message = "Waiting for cluster topology to be reconciled" //nolint:goconst // Not making this a constant for now } - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterInfrastructureDoesNotExistV1Beta2Reason, @@ -262,17 +262,17 @@ func setInfrastructureReadyCondition(_ context.Context, cluster *clusterv1.Clust } if infraCluster != nil { - ready, err := v1beta2conditions.NewMirrorConditionFromUnstructured( + ready, err := conditions.NewMirrorConditionFromUnstructured( infraCluster, - contract.InfrastructureCluster().ReadyConditionType(), v1beta2conditions.TargetConditionType(clusterv1.ClusterInfrastructureReadyV1Beta2Condition), - v1beta2conditions.FallbackCondition{ - Status: v1beta2conditions.BoolToStatus(cluster.Status.InfrastructureReady), + contract.InfrastructureCluster().ReadyConditionType(), conditions.TargetConditionType(clusterv1.ClusterInfrastructureReadyV1Beta2Condition), + conditions.FallbackCondition{ + Status: conditions.BoolToStatus(cluster.Status.InfrastructureReady), Reason: fallbackReason(cluster.Status.InfrastructureReady, clusterv1.ClusterInfrastructureReadyV1Beta2Reason, clusterv1.ClusterInfrastructureNotReadyV1Beta2Reason), Message: infrastructureReadyFallBackMessage(cluster.Spec.InfrastructureRef.Kind, cluster.Status.InfrastructureReady), }, ) if err != nil { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterInfrastructureInvalidConditionReportedV1Beta2Reason, @@ -283,16 +283,16 @@ func setInfrastructureReadyCondition(_ context.Context, cluster *clusterv1.Clust // In case condition has NoReasonReported and status true, we assume it is a v1beta1 condition // and replace the reason with something less confusing. - if ready.Reason == v1beta2conditions.NoReasonReported && ready.Status == metav1.ConditionTrue { + if ready.Reason == conditions.NoReasonReported && ready.Status == metav1.ConditionTrue { ready.Reason = clusterv1.ClusterInfrastructureReadyV1Beta2Reason } - v1beta2conditions.Set(cluster, *ready) + conditions.Set(cluster, *ready) return } // If we got errors in reading the infra cluster (this should happen rarely), surface them if !infraClusterIsNotFound { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterInfrastructureInternalErrorV1Beta2Reason, @@ -305,7 +305,7 @@ func setInfrastructureReadyCondition(_ context.Context, cluster *clusterv1.Clust // Infra cluster missing when the cluster is deleting. if !cluster.DeletionTimestamp.IsZero() { if cluster.Status.InfrastructureReady { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterInfrastructureDeletedV1Beta2Reason, @@ -314,7 +314,7 @@ func setInfrastructureReadyCondition(_ context.Context, cluster *clusterv1.Clust return } - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterInfrastructureDoesNotExistV1Beta2Reason, @@ -325,7 +325,7 @@ func setInfrastructureReadyCondition(_ context.Context, cluster *clusterv1.Clust // Report an issue if infra cluster missing after the cluster has been initialized (and the cluster is still running). if cluster.Status.InfrastructureReady { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterInfrastructureDeletedV1Beta2Reason, @@ -337,7 +337,7 @@ func setInfrastructureReadyCondition(_ context.Context, cluster *clusterv1.Clust // If the cluster is not deleting, and infra cluster object does not exist yet, // surface this fact. This could happen when: // - when applying the yaml file with the cluster and all the objects referenced by it (provisioning yet to start/started, but status.InfrastructureReady not yet set). - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterInfrastructureDoesNotExistV1Beta2Reason, @@ -352,7 +352,7 @@ func setControlPlaneAvailableCondition(_ context.Context, cluster *clusterv1.Clu if cluster.DeletionTimestamp.IsZero() { message = "Waiting for cluster topology to be reconciled" } - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneDoesNotExistV1Beta2Reason, @@ -362,17 +362,17 @@ func setControlPlaneAvailableCondition(_ context.Context, cluster *clusterv1.Clu } if controlPlane != nil { - available, err := v1beta2conditions.NewMirrorConditionFromUnstructured( + available, err := conditions.NewMirrorConditionFromUnstructured( controlPlane, - contract.ControlPlane().AvailableConditionType(), v1beta2conditions.TargetConditionType(clusterv1.ClusterControlPlaneAvailableV1Beta2Condition), - v1beta2conditions.FallbackCondition{ - Status: v1beta2conditions.BoolToStatus(cluster.Status.ControlPlaneReady), + contract.ControlPlane().AvailableConditionType(), conditions.TargetConditionType(clusterv1.ClusterControlPlaneAvailableV1Beta2Condition), + conditions.FallbackCondition{ + Status: conditions.BoolToStatus(cluster.Status.ControlPlaneReady), Reason: fallbackReason(cluster.Status.ControlPlaneReady, clusterv1.ClusterControlPlaneAvailableV1Beta2Reason, clusterv1.ClusterControlPlaneNotAvailableV1Beta2Reason), Message: controlPlaneAvailableFallBackMessage(cluster.Spec.ControlPlaneRef.Kind, cluster.Status.ControlPlaneReady), }, ) if err != nil { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterControlPlaneInvalidConditionReportedV1Beta2Reason, @@ -383,16 +383,16 @@ func setControlPlaneAvailableCondition(_ context.Context, cluster *clusterv1.Clu // In case condition has NoReasonReported and status true, we assume it is a v1beta1 condition // and replace the reason with something less confusing. - if available.Reason == v1beta2conditions.NoReasonReported && available.Status == metav1.ConditionTrue { + if available.Reason == conditions.NoReasonReported && available.Status == metav1.ConditionTrue { available.Reason = clusterv1.ClusterControlPlaneAvailableV1Beta2Reason } - v1beta2conditions.Set(cluster, *available) + conditions.Set(cluster, *available) return } // If we got errors in reading the control plane (this should happen rarely), surface them if !controlPlaneIsNotFound { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterControlPlaneInternalErrorV1Beta2Reason, @@ -405,7 +405,7 @@ func setControlPlaneAvailableCondition(_ context.Context, cluster *clusterv1.Clu // Infra cluster missing when the cluster is deleting. if !cluster.DeletionTimestamp.IsZero() { if cluster.Status.ControlPlaneReady { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneDeletedV1Beta2Reason, @@ -414,7 +414,7 @@ func setControlPlaneAvailableCondition(_ context.Context, cluster *clusterv1.Clu return } - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneDoesNotExistV1Beta2Reason, @@ -425,7 +425,7 @@ func setControlPlaneAvailableCondition(_ context.Context, cluster *clusterv1.Clu // Report an issue if control plane missing after the cluster has been initialized (and the cluster is still running). if cluster.Status.ControlPlaneReady { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneDeletedV1Beta2Reason, @@ -437,7 +437,7 @@ func setControlPlaneAvailableCondition(_ context.Context, cluster *clusterv1.Clu // If the cluster is not deleting, and control plane object does not exist yet, // surface this fact. This could happen when: // - when applying the yaml file with the cluster and all the objects referenced by it (provisioning yet to start/started, but status.ControlPlaneReady not yet set). - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneDoesNotExistV1Beta2Reason, @@ -449,7 +449,7 @@ func setControlPlaneInitializedCondition(ctx context.Context, cluster *clusterv1 log := ctrl.LoggerFrom(ctx) // No-op if control plane is already initialized. - if v1beta2conditions.IsTrue(cluster, clusterv1.ClusterControlPlaneInitializedV1Beta2Condition) { + if conditions.IsTrue(cluster, clusterv1.ClusterControlPlaneInitializedV1Beta2Condition) { return } @@ -459,7 +459,7 @@ func setControlPlaneInitializedCondition(ctx context.Context, cluster *clusterv1 if cluster.DeletionTimestamp.IsZero() { message = "Waiting for cluster topology to be reconciled" } - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneInitializedV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterControlPlaneDoesNotExistV1Beta2Reason, @@ -472,7 +472,7 @@ func setControlPlaneInitializedCondition(ctx context.Context, cluster *clusterv1 if cluster.Spec.ControlPlaneRef != nil { if controlPlane == nil { if !controlPlaneIsNotFound { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneInitializedV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterControlPlaneInitializedInternalErrorV1Beta2Reason, @@ -481,7 +481,7 @@ func setControlPlaneInitializedCondition(ctx context.Context, cluster *clusterv1 return } - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneInitializedV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterControlPlaneDoesNotExistV1Beta2Reason, @@ -493,7 +493,7 @@ func setControlPlaneInitializedCondition(ctx context.Context, cluster *clusterv1 initialized, err := external.IsInitialized(controlPlane) if err != nil { log.Error(err, fmt.Sprintf("Failed to get status.initialized from %s", cluster.Spec.ControlPlaneRef.Kind)) - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneInitializedV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterControlPlaneInitializedInternalErrorV1Beta2Reason, @@ -503,7 +503,7 @@ func setControlPlaneInitializedCondition(ctx context.Context, cluster *clusterv1 } if initialized { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneInitializedV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.ClusterControlPlaneInitializedV1Beta2Reason, @@ -511,7 +511,7 @@ func setControlPlaneInitializedCondition(ctx context.Context, cluster *clusterv1 return } - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneInitializedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneNotInitializedV1Beta2Reason, @@ -524,7 +524,7 @@ func setControlPlaneInitializedCondition(ctx context.Context, cluster *clusterv1 // when at least one of those machines has a node. if !getDescendantsSucceeded { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneInitializedV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterControlPlaneInitializedInternalErrorV1Beta2Reason, @@ -534,7 +534,7 @@ func setControlPlaneInitializedCondition(ctx context.Context, cluster *clusterv1 } if len(controlPlaneMachines.Filter(collections.HasNode())) > 0 { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneInitializedV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.ClusterControlPlaneInitializedV1Beta2Reason, @@ -542,7 +542,7 @@ func setControlPlaneInitializedCondition(ctx context.Context, cluster *clusterv1 return } - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneInitializedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneNotInitializedV1Beta2Reason, @@ -555,7 +555,7 @@ func setWorkersAvailableCondition(ctx context.Context, cluster *clusterv1.Cluste // If there was some unexpected errors in listing descendants (this should never happen), surface it. if !getDescendantsSucceeded { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterWorkersAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterWorkersAvailableInternalErrorV1Beta2Reason, @@ -565,7 +565,7 @@ func setWorkersAvailableCondition(ctx context.Context, cluster *clusterv1.Cluste } if len(machinePools.Items)+len(machineDeployments.Items) == 0 { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterWorkersAvailableV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.ClusterWorkersAvailableNoWorkersV1Beta2Reason, @@ -581,13 +581,13 @@ func setWorkersAvailableCondition(ctx context.Context, cluster *clusterv1.Cluste ws = append(ws, aggregationWrapper{md: &md}) } - workersAvailableCondition, err := v1beta2conditions.NewAggregateCondition( + workersAvailableCondition, err := conditions.NewAggregateCondition( ws, clusterv1.AvailableV1Beta2Condition, - v1beta2conditions.TargetConditionType(clusterv1.ClusterWorkersAvailableV1Beta2Condition), + conditions.TargetConditionType(clusterv1.ClusterWorkersAvailableV1Beta2Condition), // Using a custom merge strategy to override reasons applied during merge - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( clusterv1.ClusterWorkersNotAvailableV1Beta2Reason, clusterv1.ClusterWorkersAvailableUnknownV1Beta2Reason, clusterv1.ClusterWorkersAvailableV1Beta2Reason, @@ -597,7 +597,7 @@ func setWorkersAvailableCondition(ctx context.Context, cluster *clusterv1.Cluste ) if err != nil { log.Error(err, "Failed to aggregate MachinePool and MachineDeployment's Available conditions") - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterWorkersAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterWorkersAvailableInternalErrorV1Beta2Reason, @@ -606,7 +606,7 @@ func setWorkersAvailableCondition(ctx context.Context, cluster *clusterv1.Cluste return } - v1beta2conditions.Set(cluster, *workersAvailableCondition) + conditions.Set(cluster, *workersAvailableCondition) } func setControlPlaneMachinesReadyCondition(ctx context.Context, cluster *clusterv1.Cluster, machines collections.Machines, getDescendantsSucceeded bool) { @@ -638,7 +638,7 @@ func setControlPlaneMachinesUpToDateCondition(ctx context.Context, cluster *clus // This is done to ensure the MachinesUpToDate condition doesn't flicker after a new Machine is created, // because it can take a bit until the UpToDate condition is set on a new Machine. machines = machines.Filter(func(machine *clusterv1.Machine) bool { - return v1beta2conditions.Has(machine, clusterv1.MachineUpToDateV1Beta2Condition) || time.Since(machine.CreationTimestamp.Time) > 10*time.Second + return conditions.Has(machine, clusterv1.MachineUpToDateV1Beta2Condition) || time.Since(machine.CreationTimestamp.Time) > 10*time.Second }) machinesConditionSetter{ @@ -657,7 +657,7 @@ func setWorkerMachinesUpToDateCondition(ctx context.Context, cluster *clusterv1. // This is done to ensure the MachinesUpToDate condition doesn't flicker after a new Machine is created, // because it can take a bit until the UpToDate condition is set on a new Machine. machines = machines.Filter(func(machine *clusterv1.Machine) bool { - return v1beta2conditions.Has(machine, clusterv1.MachineUpToDateV1Beta2Condition) || time.Since(machine.CreationTimestamp.Time) > 10*time.Second + return conditions.Has(machine, clusterv1.MachineUpToDateV1Beta2Condition) || time.Since(machine.CreationTimestamp.Time) > 10*time.Second }) machinesConditionSetter{ @@ -686,7 +686,7 @@ func (s machinesConditionSetter) setMachinesCondition(ctx context.Context, clust // If there was some unexpected errors in listing descendants (this should never happen), surface it. if !getDescendantsSucceeded { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: s.condition, Status: metav1.ConditionUnknown, Reason: s.internalErrorReason, @@ -696,7 +696,7 @@ func (s machinesConditionSetter) setMachinesCondition(ctx context.Context, clust } if len(machines) == 0 { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: s.condition, Status: metav1.ConditionTrue, Reason: s.noReplicasReason, @@ -704,13 +704,13 @@ func (s machinesConditionSetter) setMachinesCondition(ctx context.Context, clust return } - machinesCondition, err := v1beta2conditions.NewAggregateCondition( + machinesCondition, err := conditions.NewAggregateCondition( machines.UnsortedList(), s.machineAggregationCondition, - v1beta2conditions.TargetConditionType(s.condition), + conditions.TargetConditionType(s.condition), // Using a custom merge strategy to override reasons applied during merge - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( s.issueReason, s.unknownReason, s.infoReason, @@ -720,7 +720,7 @@ func (s machinesConditionSetter) setMachinesCondition(ctx context.Context, clust ) if err != nil { log.Error(err, fmt.Sprintf("Failed to aggregate Machine's %s conditions", s.machineAggregationCondition)) - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: s.condition, Status: metav1.ConditionUnknown, Reason: s.internalErrorReason, @@ -729,12 +729,12 @@ func (s machinesConditionSetter) setMachinesCondition(ctx context.Context, clust return } - v1beta2conditions.Set(cluster, *machinesCondition) + conditions.Set(cluster, *machinesCondition) } func setRemediatingCondition(ctx context.Context, cluster *clusterv1.Cluster, machinesToBeRemediated, unhealthyMachines collections.Machines, getMachinesSucceeded bool) { if !getMachinesSucceeded { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterRemediatingV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterRemediatingInternalErrorV1Beta2Reason, @@ -745,7 +745,7 @@ func setRemediatingCondition(ctx context.Context, cluster *clusterv1.Cluster, ma if len(machinesToBeRemediated) == 0 { message := aggregateUnhealthyMachines(unhealthyMachines) - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterRemediatingV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterNotRemediatingV1Beta2Reason, @@ -754,14 +754,14 @@ func setRemediatingCondition(ctx context.Context, cluster *clusterv1.Cluster, ma return } - remediatingCondition, err := v1beta2conditions.NewAggregateCondition( + remediatingCondition, err := conditions.NewAggregateCondition( machinesToBeRemediated.UnsortedList(), clusterv1.MachineOwnerRemediatedV1Beta2Condition, - v1beta2conditions.TargetConditionType(clusterv1.ClusterRemediatingV1Beta2Condition), + conditions.TargetConditionType(clusterv1.ClusterRemediatingV1Beta2Condition), // Note: in case of the remediating conditions it is not required to use a CustomMergeStrategy/ComputeReasonFunc // because we are considering only machinesToBeRemediated (and we can pin the reason when we set the condition). ) if err != nil { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterRemediatingV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterRemediatingInternalErrorV1Beta2Reason, @@ -773,7 +773,7 @@ func setRemediatingCondition(ctx context.Context, cluster *clusterv1.Cluster, ma return } - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: remediatingCondition.Type, Status: metav1.ConditionTrue, Reason: clusterv1.ClusterRemediatingV1Beta2Reason, @@ -786,7 +786,7 @@ func setRollingOutCondition(ctx context.Context, cluster *clusterv1.Cluster, con // If there was some unexpected errors in getting control plane or listing descendants (this should never happen), surface it. if (cluster.Spec.ControlPlaneRef != nil && controlPlane == nil && !controlPlaneIsNotFound) || !getDescendantsSucceeded { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterRollingOutV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterRollingOutInternalErrorV1Beta2Reason, @@ -799,7 +799,7 @@ func setRollingOutCondition(ctx context.Context, cluster *clusterv1.Cluster, con if controlPlane != nil { // control plane is considered only if it is reporting the condition (the contract does not require conditions to be reported) // Note: this implies that it won't surface as "Conditions RollingOut not yet reported from ...". - if c, err := v1beta2conditions.UnstructuredGet(controlPlane, clusterv1.RollingOutV1Beta2Condition); err == nil && c != nil { + if c, err := conditions.UnstructuredGet(controlPlane, clusterv1.RollingOutV1Beta2Condition); err == nil && c != nil { ws = append(ws, aggregationWrapper{cp: controlPlane}) } } @@ -811,7 +811,7 @@ func setRollingOutCondition(ctx context.Context, cluster *clusterv1.Cluster, con } if len(ws) == 0 { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterRollingOutV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterNotRollingOutV1Beta2Reason, @@ -819,28 +819,28 @@ func setRollingOutCondition(ctx context.Context, cluster *clusterv1.Cluster, con return } - rollingOutCondition, err := v1beta2conditions.NewAggregateCondition( + rollingOutCondition, err := conditions.NewAggregateCondition( ws, clusterv1.RollingOutV1Beta2Condition, - v1beta2conditions.TargetConditionType(clusterv1.ClusterRollingOutV1Beta2Condition), + conditions.TargetConditionType(clusterv1.ClusterRollingOutV1Beta2Condition), // Instruct aggregate to consider RollingOut condition with negative polarity. - v1beta2conditions.NegativePolarityConditionTypes{clusterv1.RollingOutV1Beta2Condition}, + conditions.NegativePolarityConditionTypes{clusterv1.RollingOutV1Beta2Condition}, // Using a custom merge strategy to override reasons applied during merge and to ensure merge // takes into account the fact the RollingOut has negative polarity. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( - v1beta2conditions.TargetConditionHasPositivePolarity(false), - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( + conditions.TargetConditionHasPositivePolarity(false), + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( clusterv1.ClusterRollingOutV1Beta2Reason, clusterv1.ClusterRollingOutUnknownV1Beta2Reason, clusterv1.ClusterNotRollingOutV1Beta2Reason, )), - v1beta2conditions.GetPriorityFunc(v1beta2conditions.GetDefaultMergePriorityFunc(clusterv1.RollingOutV1Beta2Condition)), + conditions.GetPriorityFunc(conditions.GetDefaultMergePriorityFunc(clusterv1.RollingOutV1Beta2Condition)), ), }, ) if err != nil { log.Error(err, "Failed to aggregate ControlPlane, MachinePool, MachineDeployment's RollingOut conditions") - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterRollingOutV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterRollingOutInternalErrorV1Beta2Reason, @@ -849,7 +849,7 @@ func setRollingOutCondition(ctx context.Context, cluster *clusterv1.Cluster, con return } - v1beta2conditions.Set(cluster, *rollingOutCondition) + conditions.Set(cluster, *rollingOutCondition) } func setScalingUpCondition(ctx context.Context, cluster *clusterv1.Cluster, controlPlane *unstructured.Unstructured, machinePools expv1.MachinePoolList, machineDeployments clusterv1.MachineDeploymentList, machineSets clusterv1.MachineSetList, controlPlaneIsNotFound bool, getDescendantsSucceeded bool) { @@ -857,7 +857,7 @@ func setScalingUpCondition(ctx context.Context, cluster *clusterv1.Cluster, cont // If there was some unexpected errors in getting control plane or listing descendants (this should never happen), surface it. if (cluster.Spec.ControlPlaneRef != nil && controlPlane == nil && !controlPlaneIsNotFound) || !getDescendantsSucceeded { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterScalingUpV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterScalingUpInternalErrorV1Beta2Reason, @@ -870,7 +870,7 @@ func setScalingUpCondition(ctx context.Context, cluster *clusterv1.Cluster, cont if controlPlane != nil { // control plane is considered only if it is reporting the condition (the contract does not require conditions to be reported) // Note: this implies that it won't surface as "Conditions ScalingUp not yet reported from ...". - if c, err := v1beta2conditions.UnstructuredGet(controlPlane, clusterv1.ScalingUpV1Beta2Condition); err == nil && c != nil { + if c, err := conditions.UnstructuredGet(controlPlane, clusterv1.ScalingUpV1Beta2Condition); err == nil && c != nil { ws = append(ws, aggregationWrapper{cp: controlPlane}) } } @@ -888,7 +888,7 @@ func setScalingUpCondition(ctx context.Context, cluster *clusterv1.Cluster, cont } if len(ws) == 0 { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterScalingUpV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterNotScalingUpV1Beta2Reason, @@ -896,28 +896,28 @@ func setScalingUpCondition(ctx context.Context, cluster *clusterv1.Cluster, cont return } - scalingUpCondition, err := v1beta2conditions.NewAggregateCondition( + scalingUpCondition, err := conditions.NewAggregateCondition( ws, clusterv1.ScalingUpV1Beta2Condition, - v1beta2conditions.TargetConditionType(clusterv1.ClusterScalingUpV1Beta2Condition), + conditions.TargetConditionType(clusterv1.ClusterScalingUpV1Beta2Condition), // Instruct aggregate to consider ScalingUp condition with negative polarity. - v1beta2conditions.NegativePolarityConditionTypes{clusterv1.ScalingUpV1Beta2Condition}, + conditions.NegativePolarityConditionTypes{clusterv1.ScalingUpV1Beta2Condition}, // Using a custom merge strategy to override reasons applied during merge and to ensure merge // takes into account the fact the ScalingUp has negative polarity. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( - v1beta2conditions.TargetConditionHasPositivePolarity(false), - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( + conditions.TargetConditionHasPositivePolarity(false), + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( clusterv1.ClusterScalingUpV1Beta2Reason, clusterv1.ClusterScalingUpUnknownV1Beta2Reason, clusterv1.ClusterNotScalingUpV1Beta2Reason, )), - v1beta2conditions.GetPriorityFunc(v1beta2conditions.GetDefaultMergePriorityFunc(clusterv1.ScalingUpV1Beta2Condition)), + conditions.GetPriorityFunc(conditions.GetDefaultMergePriorityFunc(clusterv1.ScalingUpV1Beta2Condition)), ), }, ) if err != nil { log.Error(err, "Failed to aggregate ControlPlane, MachinePool, MachineDeployment, MachineSet's ScalingUp conditions") - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterScalingUpV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterScalingUpInternalErrorV1Beta2Reason, @@ -926,7 +926,7 @@ func setScalingUpCondition(ctx context.Context, cluster *clusterv1.Cluster, cont return } - v1beta2conditions.Set(cluster, *scalingUpCondition) + conditions.Set(cluster, *scalingUpCondition) } func setScalingDownCondition(ctx context.Context, cluster *clusterv1.Cluster, controlPlane *unstructured.Unstructured, machinePools expv1.MachinePoolList, machineDeployments clusterv1.MachineDeploymentList, machineSets clusterv1.MachineSetList, controlPlaneIsNotFound bool, getDescendantsSucceeded bool) { @@ -934,7 +934,7 @@ func setScalingDownCondition(ctx context.Context, cluster *clusterv1.Cluster, co // If there was some unexpected errors in getting control plane or listing descendants (this should never happen), surface it. if (cluster.Spec.ControlPlaneRef != nil && controlPlane == nil && !controlPlaneIsNotFound) || !getDescendantsSucceeded { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterScalingDownV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterScalingDownInternalErrorV1Beta2Reason, @@ -947,7 +947,7 @@ func setScalingDownCondition(ctx context.Context, cluster *clusterv1.Cluster, co if controlPlane != nil { // control plane is considered only if it is reporting the condition (the contract does not require conditions to be reported) // Note: this implies that it won't surface as "Conditions ScalingDown not yet reported from ...". - if c, err := v1beta2conditions.UnstructuredGet(controlPlane, clusterv1.ScalingDownV1Beta2Condition); err == nil && c != nil { + if c, err := conditions.UnstructuredGet(controlPlane, clusterv1.ScalingDownV1Beta2Condition); err == nil && c != nil { ws = append(ws, aggregationWrapper{cp: controlPlane}) } } @@ -965,7 +965,7 @@ func setScalingDownCondition(ctx context.Context, cluster *clusterv1.Cluster, co } if len(ws) == 0 { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterScalingDownV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterNotScalingDownV1Beta2Reason, @@ -973,28 +973,28 @@ func setScalingDownCondition(ctx context.Context, cluster *clusterv1.Cluster, co return } - scalingDownCondition, err := v1beta2conditions.NewAggregateCondition( + scalingDownCondition, err := conditions.NewAggregateCondition( ws, clusterv1.ScalingDownV1Beta2Condition, - v1beta2conditions.TargetConditionType(clusterv1.ClusterScalingDownV1Beta2Condition), + conditions.TargetConditionType(clusterv1.ClusterScalingDownV1Beta2Condition), // Instruct aggregate to consider ScalingDown condition with negative polarity. - v1beta2conditions.NegativePolarityConditionTypes{clusterv1.ScalingDownV1Beta2Condition}, + conditions.NegativePolarityConditionTypes{clusterv1.ScalingDownV1Beta2Condition}, // Using a custom merge strategy to override reasons applied during merge and to ensure merge // takes into account the fact the ScalingDown has negative polarity. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( - v1beta2conditions.TargetConditionHasPositivePolarity(false), - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( + conditions.TargetConditionHasPositivePolarity(false), + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( clusterv1.ClusterScalingDownV1Beta2Reason, clusterv1.ClusterScalingDownUnknownV1Beta2Reason, clusterv1.ClusterNotScalingDownV1Beta2Reason, )), - v1beta2conditions.GetPriorityFunc(v1beta2conditions.GetDefaultMergePriorityFunc(clusterv1.ScalingDownV1Beta2Condition)), + conditions.GetPriorityFunc(conditions.GetDefaultMergePriorityFunc(clusterv1.ScalingDownV1Beta2Condition)), ), }, ) if err != nil { log.Error(err, "Failed to aggregate ControlPlane, MachinePool, MachineDeployment, MachineSet's ScalingDown conditions") - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterScalingDownV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterScalingDownInternalErrorV1Beta2Reason, @@ -1003,12 +1003,12 @@ func setScalingDownCondition(ctx context.Context, cluster *clusterv1.Cluster, co return } - v1beta2conditions.Set(cluster, *scalingDownCondition) + conditions.Set(cluster, *scalingDownCondition) } func setDeletingCondition(_ context.Context, cluster *clusterv1.Cluster, deletingReason, deletingMessage string) { if cluster.DeletionTimestamp.IsZero() { - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterDeletingV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterNotDeletingV1Beta2Reason, @@ -1016,7 +1016,7 @@ func setDeletingCondition(_ context.Context, cluster *clusterv1.Cluster, deletin return } - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterDeletingV1Beta2Condition, Status: metav1.ConditionTrue, Reason: deletingReason, @@ -1029,38 +1029,38 @@ type clusterConditionCustomMergeStrategy struct { negativePolarityConditionTypes []string } -func (c clusterConditionCustomMergeStrategy) Merge(operation v1beta2conditions.MergeOperation, conditions []v1beta2conditions.ConditionWithOwnerInfo, conditionTypes []string) (status metav1.ConditionStatus, reason, message string, err error) { - return v1beta2conditions.DefaultMergeStrategy(v1beta2conditions.GetPriorityFunc( - func(condition metav1.Condition) v1beta2conditions.MergePriority { +func (c clusterConditionCustomMergeStrategy) Merge(operation conditions.MergeOperation, mergeConditions []conditions.ConditionWithOwnerInfo, conditionTypes []string) (status metav1.ConditionStatus, reason, message string, err error) { + return conditions.DefaultMergeStrategy(conditions.GetPriorityFunc( + func(condition metav1.Condition) conditions.MergePriority { // While cluster is deleting, treat unknown conditions from external objects as info (it is ok that those objects have been deleted at this stage). if !c.cluster.DeletionTimestamp.IsZero() { if condition.Type == clusterv1.ClusterInfrastructureReadyV1Beta2Condition && (condition.Reason == clusterv1.ClusterInfrastructureDeletedV1Beta2Reason || condition.Reason == clusterv1.ClusterInfrastructureDoesNotExistV1Beta2Reason) { - return v1beta2conditions.InfoMergePriority + return conditions.InfoMergePriority } if condition.Type == clusterv1.ClusterControlPlaneAvailableV1Beta2Condition && (condition.Reason == clusterv1.ClusterControlPlaneDeletedV1Beta2Reason || condition.Reason == clusterv1.ClusterControlPlaneDoesNotExistV1Beta2Reason) { - return v1beta2conditions.InfoMergePriority + return conditions.InfoMergePriority } } // Treat all reasons except TopologyReconcileFailed and ClusterClassNotReconciled of TopologyReconciled condition as info. if condition.Type == clusterv1.ClusterTopologyReconciledV1Beta2Condition && condition.Status == metav1.ConditionFalse && condition.Reason != clusterv1.ClusterTopologyReconciledFailedV1Beta2Reason && condition.Reason != clusterv1.ClusterTopologyReconciledClusterClassNotReconciledV1Beta2Reason { - return v1beta2conditions.InfoMergePriority + return conditions.InfoMergePriority } - return v1beta2conditions.GetDefaultMergePriorityFunc(c.negativePolarityConditionTypes...)(condition) + return conditions.GetDefaultMergePriorityFunc(c.negativePolarityConditionTypes...)(condition) }), - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( clusterv1.ClusterNotAvailableV1Beta2Reason, clusterv1.ClusterAvailableUnknownV1Beta2Reason, clusterv1.ClusterAvailableV1Beta2Reason, )), - ).Merge(operation, conditions, conditionTypes) + ).Merge(operation, mergeConditions, conditionTypes) } func setAvailableCondition(ctx context.Context, cluster *clusterv1.Cluster, clusterClass *clusterv1.ClusterClass) { log := ctrl.LoggerFrom(ctx) - forConditionTypes := v1beta2conditions.ForConditionTypes{ + forConditionTypes := conditions.ForConditionTypes{ clusterv1.ClusterDeletingV1Beta2Condition, clusterv1.ClusterRemoteConnectionProbeV1Beta2Condition, clusterv1.ClusterInfrastructureReadyV1Beta2Condition, @@ -1080,13 +1080,13 @@ func setAvailableCondition(ctx context.Context, cluster *clusterv1.Cluster, clus } } - summaryOpts := []v1beta2conditions.SummaryOption{ + summaryOpts := []conditions.SummaryOption{ forConditionTypes, // Instruct summary to consider Deleting condition with negative polarity. - v1beta2conditions.NegativePolarityConditionTypes{clusterv1.ClusterDeletingV1Beta2Condition}, + conditions.NegativePolarityConditionTypes{clusterv1.ClusterDeletingV1Beta2Condition}, // Using a custom merge strategy to override reasons applied during merge and to ignore some // info message so the available condition is less noisy. - v1beta2conditions.CustomMergeStrategy{ + conditions.CustomMergeStrategy{ MergeStrategy: clusterConditionCustomMergeStrategy{ cluster: cluster, // Instruct merge to consider Deleting condition with negative polarity, @@ -1095,10 +1095,10 @@ func setAvailableCondition(ctx context.Context, cluster *clusterv1.Cluster, clus }, } if cluster.Spec.Topology == nil { - summaryOpts = append(summaryOpts, v1beta2conditions.IgnoreTypesIfMissing{clusterv1.ClusterTopologyReconciledV1Beta2Condition}) + summaryOpts = append(summaryOpts, conditions.IgnoreTypesIfMissing{clusterv1.ClusterTopologyReconciledV1Beta2Condition}) } - availableCondition, err := v1beta2conditions.NewSummaryCondition(cluster, clusterv1.ClusterAvailableV1Beta2Condition, summaryOpts...) + availableCondition, err := conditions.NewSummaryCondition(cluster, clusterv1.ClusterAvailableV1Beta2Condition, summaryOpts...) if err != nil { // Note, this could only happen if we hit edge cases in computing the summary, which should not happen due to the fact @@ -1112,7 +1112,7 @@ func setAvailableCondition(ctx context.Context, cluster *clusterv1.Cluster, clus } } - v1beta2conditions.Set(cluster, *availableCondition) + conditions.Set(cluster, *availableCondition) } func fallbackReason(status bool, trueReason, falseReason string) string { @@ -1199,7 +1199,7 @@ func (w aggregationWrapper) DeepCopyObject() runtime.Object { func (w aggregationWrapper) GetV1Beta2Conditions() []metav1.Condition { switch { case w.cp != nil: - if c, err := v1beta2conditions.UnstructuredGetAll(w.cp); err == nil && c != nil { + if c, err := conditions.UnstructuredGetAll(w.cp); err == nil && c != nil { return c } return nil diff --git a/internal/controllers/cluster/cluster_controller_status_test.go b/internal/controllers/cluster/cluster_controller_status_test.go index eb5f95ce4ee0..9e232dccaa3b 100644 --- a/internal/controllers/cluster/cluster_controller_status_test.go +++ b/internal/controllers/cluster/cluster_controller_status_test.go @@ -30,7 +30,7 @@ import ( expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta2" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/util/collections" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/conditions" ) func TestSetControlPlaneReplicas(t *testing.T) { @@ -364,9 +364,9 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { setInfrastructureReadyCondition(ctx, tc.cluster, tc.infraCluster, tc.infraClusterIsNotFound) - condition := v1beta2conditions.Get(tc.cluster, clusterv1.ClusterInfrastructureReadyV1Beta2Condition) + condition := conditions.Get(tc.cluster, clusterv1.ClusterInfrastructureReadyV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tc.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tc.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -529,9 +529,9 @@ func TestSetControlPlaneAvailableCondition(t *testing.T) { setControlPlaneAvailableCondition(ctx, tc.cluster, tc.controlPlane, tc.controlPlaneIsNotFound) - condition := v1beta2conditions.Get(tc.cluster, clusterv1.ClusterControlPlaneAvailableV1Beta2Condition) + condition := conditions.Get(tc.cluster, clusterv1.ClusterControlPlaneAvailableV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tc.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tc.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -666,9 +666,9 @@ func TestSetControlPlaneInitialized(t *testing.T) { setControlPlaneInitializedCondition(ctx, tt.cluster, tt.controlPlane, tt.machines, tt.controlPlaneIsNotFound, tt.getDescendantsSucceeded) - condition := v1beta2conditions.Get(tt.cluster, clusterv1.ClusterControlPlaneInitializedV1Beta2Condition) + condition := conditions.Get(tt.cluster, clusterv1.ClusterControlPlaneInitializedV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -756,9 +756,9 @@ func TestSetWorkersAvailableCondition(t *testing.T) { setWorkersAvailableCondition(ctx, tt.cluster, tt.machinePools, tt.machineDeployments, tt.getDescendantsSucceeded) - condition := v1beta2conditions.Get(tt.cluster, clusterv1.ClusterWorkersAvailableV1Beta2Condition) + condition := conditions.Get(tt.cluster, clusterv1.ClusterWorkersAvailableV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -874,9 +874,9 @@ func TestSetControlPlaneMachinesReadyCondition(t *testing.T) { } setControlPlaneMachinesReadyCondition(ctx, tt.cluster, machines, tt.getDescendantsSucceeded) - condition := v1beta2conditions.Get(tt.cluster, clusterv1.ClusterControlPlaneMachinesReadyV1Beta2Condition) + condition := conditions.Get(tt.cluster, clusterv1.ClusterControlPlaneMachinesReadyV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -992,9 +992,9 @@ func TestSetWorkerMachinesReadyCondition(t *testing.T) { } setWorkerMachinesReadyCondition(ctx, tt.cluster, machines, tt.getDescendantsSucceeded) - condition := v1beta2conditions.Get(tt.cluster, clusterv1.ClusterWorkerMachinesReadyV1Beta2Condition) + condition := conditions.Get(tt.cluster, clusterv1.ClusterWorkerMachinesReadyV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -1151,9 +1151,9 @@ func TestSetControlPlaneMachinesUpToDateCondition(t *testing.T) { } setControlPlaneMachinesUpToDateCondition(ctx, tt.cluster, machines, tt.getDescendantsSucceeded) - condition := v1beta2conditions.Get(tt.cluster, clusterv1.ClusterControlPlaneMachinesUpToDateV1Beta2Condition) + condition := conditions.Get(tt.cluster, clusterv1.ClusterControlPlaneMachinesUpToDateV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -1309,9 +1309,9 @@ func TestSetWorkerMachinesUpToDateCondition(t *testing.T) { } setWorkerMachinesUpToDateCondition(ctx, tt.cluster, machines, tt.getDescendantsSucceeded) - condition := v1beta2conditions.Get(tt.cluster, clusterv1.ClusterWorkerMachinesUpToDateV1Beta2Condition) + condition := conditions.Get(tt.cluster, clusterv1.ClusterWorkerMachinesUpToDateV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -1504,9 +1504,9 @@ func TestSetRollingOutCondition(t *testing.T) { setRollingOutCondition(ctx, tt.cluster, tt.controlPlane, tt.machinePools, tt.machineDeployments, tt.controlPlaneIsNotFound, tt.getDescendantsSucceeded) - condition := v1beta2conditions.Get(tt.cluster, clusterv1.ClusterRollingOutV1Beta2Condition) + condition := conditions.Get(tt.cluster, clusterv1.ClusterRollingOutV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -1755,9 +1755,9 @@ func TestSetScalingUpCondition(t *testing.T) { setScalingUpCondition(ctx, tt.cluster, tt.controlPlane, tt.machinePools, tt.machineDeployments, tt.machineSets, tt.controlPlaneIsNotFound, tt.getDescendantsSucceeded) - condition := v1beta2conditions.Get(tt.cluster, clusterv1.ClusterScalingUpV1Beta2Condition) + condition := conditions.Get(tt.cluster, clusterv1.ClusterScalingUpV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -2006,9 +2006,9 @@ func TestSetScalingDownCondition(t *testing.T) { setScalingDownCondition(ctx, tt.cluster, tt.controlPlane, tt.machinePools, tt.machineDeployments, tt.machineSets, tt.controlPlaneIsNotFound, tt.getDescendantsSucceeded) - condition := v1beta2conditions.Get(tt.cluster, clusterv1.ClusterScalingDownV1Beta2Condition) + condition := conditions.Get(tt.cluster, clusterv1.ClusterScalingDownV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -2113,9 +2113,9 @@ func TestSetRemediatingCondition(t *testing.T) { } setRemediatingCondition(ctx, tt.cluster, machinesToBeRemediated, unHealthyMachines, tt.getDescendantsSucceeded) - condition := v1beta2conditions.Get(tt.cluster, clusterv1.ClusterRemediatingV1Beta2Condition) + condition := conditions.Get(tt.cluster, clusterv1.ClusterRemediatingV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -2165,9 +2165,9 @@ func TestDeletingCondition(t *testing.T) { setDeletingCondition(ctx, tc.cluster, tc.deletingReason, tc.deletingMessage) - deletingCondition := v1beta2conditions.Get(tc.cluster, clusterv1.ClusterDeletingV1Beta2Condition) + deletingCondition := conditions.Get(tc.cluster, clusterv1.ClusterDeletingV1Beta2Condition) g.Expect(deletingCondition).ToNot(BeNil()) - g.Expect(*deletingCondition).To(v1beta2conditions.MatchCondition(tc.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*deletingCondition).To(conditions.MatchCondition(tc.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -2776,9 +2776,9 @@ func TestSetAvailableCondition(t *testing.T) { setAvailableCondition(ctx, tc.cluster, tc.clusterClass) - condition := v1beta2conditions.Get(tc.cluster, clusterv1.ClusterAvailableV1Beta2Condition) + condition := conditions.Get(tc.cluster, clusterv1.ClusterAvailableV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tc.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tc.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -3032,23 +3032,23 @@ func (s deleted) ApplyToCluster(c *clusterv1.Cluster) { type v1beta2Condition metav1.Condition func (c v1beta2Condition) ApplyToCluster(cluster *clusterv1.Cluster) { - v1beta2conditions.Set(cluster, metav1.Condition(c)) + conditions.Set(cluster, metav1.Condition(c)) } func (c v1beta2Condition) ApplyToMachinePool(mp *expv1.MachinePool) { - v1beta2conditions.Set(mp, metav1.Condition(c)) + conditions.Set(mp, metav1.Condition(c)) } func (c v1beta2Condition) ApplyToMachineDeployment(md *clusterv1.MachineDeployment) { - v1beta2conditions.Set(md, metav1.Condition(c)) + conditions.Set(md, metav1.Condition(c)) } func (c v1beta2Condition) ApplyToMachineSet(ms *clusterv1.MachineSet) { - v1beta2conditions.Set(ms, metav1.Condition(c)) + conditions.Set(ms, metav1.Condition(c)) } func (c v1beta2Condition) ApplyToMachine(m *clusterv1.Machine) { - v1beta2conditions.Set(m, metav1.Condition(c)) + conditions.Set(m, metav1.Condition(c)) } type condition clusterv1.Condition diff --git a/internal/controllers/cluster/cluster_controller_test.go b/internal/controllers/cluster/cluster_controller_test.go index e9793ecdd998..841b3db4b5db 100644 --- a/internal/controllers/cluster/cluster_controller_test.go +++ b/internal/controllers/cluster/cluster_controller_test.go @@ -36,8 +36,8 @@ import ( "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/test/builder" ) @@ -88,7 +88,7 @@ func TestClusterReconciler(t *testing.T) { g.Eventually(func(g Gomega) { g.Expect(env.Get(ctx, key, instance)).To(Succeed()) - condition := v1beta2conditions.Get(instance, clusterv1.ClusterRemoteConnectionProbeV1Beta2Condition) + condition := conditions.Get(instance, clusterv1.ClusterRemoteConnectionProbeV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) g.Expect(condition.Status).To(Equal(metav1.ConditionFalse)) g.Expect(condition.Reason).To(Equal(clusterv1.ClusterRemoteConnectionProbeFailedV1Beta2Reason)) @@ -100,7 +100,7 @@ func TestClusterReconciler(t *testing.T) { g.Eventually(func(g Gomega) { g.Expect(env.Get(ctx, key, instance)).To(Succeed()) - condition := v1beta2conditions.Get(instance, clusterv1.ClusterRemoteConnectionProbeV1Beta2Condition) + condition := conditions.Get(instance, clusterv1.ClusterRemoteConnectionProbeV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) g.Expect(condition.Status).To(Equal(metav1.ConditionTrue)) g.Expect(condition.Reason).To(Equal(clusterv1.ClusterRemoteConnectionProbeSucceededV1Beta2Reason)) diff --git a/internal/controllers/clusterclass/clusterclass_controller_status.go b/internal/controllers/clusterclass/clusterclass_controller_status.go index 5cbf750d0ae5..dadbbf3cfe69 100644 --- a/internal/controllers/clusterclass/clusterclass_controller_status.go +++ b/internal/controllers/clusterclass/clusterclass_controller_status.go @@ -24,8 +24,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) func updateStatus(ctx context.Context, s *scope) { @@ -40,7 +40,7 @@ func setRefVersionsUpToDateCondition(_ context.Context, clusterClass *clusterv1. clusterv1.ClusterClassRefVersionsUpToDateInternalErrorReason, "Please check controller logs for errors", ) - v1beta2conditions.Set(clusterClass, metav1.Condition{ + conditions.Set(clusterClass, metav1.Condition{ Type: clusterv1.ClusterClassRefVersionsUpToDateV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterClassRefVersionsUpToDateInternalErrorV1Beta2Reason, @@ -62,7 +62,7 @@ func setRefVersionsUpToDateCondition(_ context.Context, clusterClass *clusterv1. strings.Join(msg, "\n"), ), ) - v1beta2conditions.Set(clusterClass, metav1.Condition{ + conditions.Set(clusterClass, metav1.Condition{ Type: clusterv1.ClusterClassRefVersionsUpToDateV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterClassRefVersionsNotUpToDateV1Beta2Reason, @@ -74,7 +74,7 @@ func setRefVersionsUpToDateCondition(_ context.Context, clusterClass *clusterv1. v1beta1conditions.Set(clusterClass, v1beta1conditions.TrueCondition(clusterv1.ClusterClassRefVersionsUpToDateCondition), ) - v1beta2conditions.Set(clusterClass, metav1.Condition{ + conditions.Set(clusterClass, metav1.Condition{ Type: clusterv1.ClusterClassRefVersionsUpToDateV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.ClusterClassRefVersionsUpToDateV1Beta2Reason, @@ -89,7 +89,7 @@ func setVariablesReconciledCondition(_ context.Context, clusterClass *clusterv1. clusterv1.ConditionSeverityError, variableDiscoveryError.Error(), ) - v1beta2conditions.Set(clusterClass, metav1.Condition{ + conditions.Set(clusterClass, metav1.Condition{ Type: clusterv1.ClusterClassVariablesReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterClassVariablesReadyVariableDiscoveryFailedV1Beta2Reason, @@ -99,7 +99,7 @@ func setVariablesReconciledCondition(_ context.Context, clusterClass *clusterv1. } v1beta1conditions.MarkTrue(clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) - v1beta2conditions.Set(clusterClass, metav1.Condition{ + conditions.Set(clusterClass, metav1.Condition{ Type: clusterv1.ClusterClassVariablesReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.ClusterClassVariablesReadyV1Beta2Reason, diff --git a/internal/controllers/clusterclass/clusterclass_controller_status_test.go b/internal/controllers/clusterclass/clusterclass_controller_status_test.go index d2eb725404e4..e3f3d0822db4 100644 --- a/internal/controllers/clusterclass/clusterclass_controller_status_test.go +++ b/internal/controllers/clusterclass/clusterclass_controller_status_test.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/conditions" ) func TestSetRefVersionsUpToDateCondition(t *testing.T) { @@ -106,9 +106,9 @@ func TestSetRefVersionsUpToDateCondition(t *testing.T) { setRefVersionsUpToDateCondition(ctx, cc, tc.outdatedExternalReferences, tc.reconcileExternalReferencesError) - condition := v1beta2conditions.Get(cc, clusterv1.ClusterClassRefVersionsUpToDateV1Beta2Condition) + condition := conditions.Get(cc, clusterv1.ClusterClassRefVersionsUpToDateV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tc.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tc.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -149,9 +149,9 @@ func TestSetVariablesReconciledCondition(t *testing.T) { setVariablesReconciledCondition(ctx, cc, tc.variableDiscoveryError) - condition := v1beta2conditions.Get(cc, clusterv1.ClusterClassVariablesReadyV1Beta2Condition) + condition := conditions.Get(cc, clusterv1.ClusterClassVariablesReadyV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tc.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tc.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } diff --git a/internal/controllers/clusterresourceset/clusterresourceset_controller.go b/internal/controllers/clusterresourceset/clusterresourceset_controller.go index 106b675a76ec..b46bec535743 100644 --- a/internal/controllers/clusterresourceset/clusterresourceset_controller.go +++ b/internal/controllers/clusterresourceset/clusterresourceset_controller.go @@ -45,8 +45,8 @@ import ( "sigs.k8s.io/cluster-api/controllers/clustercache" resourcepredicates "sigs.k8s.io/cluster-api/internal/controllers/clusterresourceset/predicates" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/finalizers" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/paused" @@ -173,7 +173,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re if err != nil { log.Error(err, "Failed fetching clusters that matches ClusterResourceSet labels", "ClusterResourceSet", klog.KObj(clusterResourceSet)) v1beta1conditions.MarkFalse(clusterResourceSet, addonsv1.ResourcesAppliedCondition, addonsv1.ClusterMatchFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(clusterResourceSet, metav1.Condition{ + conditions.Set(clusterResourceSet, metav1.Condition{ Type: addonsv1.ResourcesAppliedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: addonsv1.ResourcesAppliedInternalErrorV1Beta2Reason, @@ -315,7 +315,7 @@ func (r *Reconciler) ApplyClusterResourceSet(ctx context.Context, cluster *clust if err != nil { if err == ErrSecretTypeNotSupported { v1beta1conditions.MarkFalse(clusterResourceSet, addonsv1.ResourcesAppliedCondition, addonsv1.WrongSecretTypeReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(clusterResourceSet, metav1.Condition{ + conditions.Set(clusterResourceSet, metav1.Condition{ Type: addonsv1.ResourcesAppliedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: addonsv1.ResourcesAppliedWrongSecretTypeV1Beta2Reason, @@ -323,7 +323,7 @@ func (r *Reconciler) ApplyClusterResourceSet(ctx context.Context, cluster *clust }) } else { v1beta1conditions.MarkFalse(clusterResourceSet, addonsv1.ResourcesAppliedCondition, addonsv1.RetrievingResourceFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(clusterResourceSet, metav1.Condition{ + conditions.Set(clusterResourceSet, metav1.Condition{ Type: addonsv1.ResourcesAppliedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: addonsv1.ResourcesAppliedInternalErrorV1Beta2Reason, @@ -381,7 +381,7 @@ func (r *Reconciler) ApplyClusterResourceSet(ctx context.Context, cluster *clust remoteClient, err := r.ClusterCache.GetClient(ctx, util.ObjectKey(cluster)) if err != nil { v1beta1conditions.MarkFalse(clusterResourceSet, addonsv1.ResourcesAppliedCondition, addonsv1.RemoteClusterClientFailedReason, clusterv1.ConditionSeverityError, err.Error()) - v1beta2conditions.Set(clusterResourceSet, metav1.Condition{ + conditions.Set(clusterResourceSet, metav1.Condition{ Type: addonsv1.ResourcesAppliedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.InternalErrorV1Beta2Reason, @@ -438,7 +438,7 @@ func (r *Reconciler) ApplyClusterResourceSet(ctx context.Context, cluster *clust isSuccessful = false log.Error(err, "Failed to apply ClusterResourceSet resource", resource.Kind, klog.KRef(clusterResourceSet.Namespace, resource.Name)) v1beta1conditions.MarkFalse(clusterResourceSet, addonsv1.ResourcesAppliedCondition, addonsv1.ApplyFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(clusterResourceSet, metav1.Condition{ + conditions.Set(clusterResourceSet, metav1.Condition{ Type: addonsv1.ResourcesAppliedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: addonsv1.ResourcesNotAppliedV1Beta2Reason, @@ -459,7 +459,7 @@ func (r *Reconciler) ApplyClusterResourceSet(ctx context.Context, cluster *clust } v1beta1conditions.MarkTrue(clusterResourceSet, addonsv1.ResourcesAppliedCondition) - v1beta2conditions.Set(clusterResourceSet, metav1.Condition{ + conditions.Set(clusterResourceSet, metav1.Condition{ Type: addonsv1.ResourcesAppliedV1Beta2Condition, Status: metav1.ConditionTrue, Reason: addonsv1.ResourcesAppliedV1beta2Reason, diff --git a/internal/controllers/clusterresourceset/clusterresourceset_controller_test.go b/internal/controllers/clusterresourceset/clusterresourceset_controller_test.go index 7945eadfb55a..01cd02c49cba 100644 --- a/internal/controllers/clusterresourceset/clusterresourceset_controller_test.go +++ b/internal/controllers/clusterresourceset/clusterresourceset_controller_test.go @@ -35,8 +35,8 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/internal/test/envtest" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) const ( @@ -899,7 +899,7 @@ metadata: g.Expect(appliedCondition.Reason).To(Equal(addonsv1.ApplyFailedReason)) g.Expect(appliedCondition.Message).To(ContainSubstring("creating object /v1, Kind=ConfigMap %s/cm-missing-namespace", missingNamespace)) - appliedConditionV1Beta2 := v1beta2conditions.Get(crs, addonsv1.ResourcesAppliedV1Beta2Condition) + appliedConditionV1Beta2 := conditions.Get(crs, addonsv1.ResourcesAppliedV1Beta2Condition) g.Expect(appliedConditionV1Beta2).NotTo(BeNil()) g.Expect(appliedConditionV1Beta2.Status).To(BeEquivalentTo(corev1.ConditionFalse)) g.Expect(appliedConditionV1Beta2.Reason).To(Equal(addonsv1.ResourcesNotAppliedV1Beta2Reason)) diff --git a/internal/controllers/machine/machine_controller_status.go b/internal/controllers/machine/machine_controller_status.go index cb7f8ed2f030..e5364919c2b6 100644 --- a/internal/controllers/machine/machine_controller_status.go +++ b/internal/controllers/machine/machine_controller_status.go @@ -34,8 +34,8 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/controllers/clustercache" "sigs.k8s.io/cluster-api/internal/contract" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) // updateStatus update Machine's status. @@ -75,7 +75,7 @@ func (r *Reconciler) updateStatus(ctx context.Context, s *scope) { func setBootstrapReadyCondition(_ context.Context, machine *clusterv1.Machine, bootstrapConfig *unstructured.Unstructured, bootstrapConfigIsNotFound bool) { if machine.Spec.Bootstrap.ConfigRef == nil { - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineBootstrapConfigReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineBootstrapDataSecretProvidedV1Beta2Reason, @@ -84,17 +84,17 @@ func setBootstrapReadyCondition(_ context.Context, machine *clusterv1.Machine, b } if bootstrapConfig != nil { - ready, err := v1beta2conditions.NewMirrorConditionFromUnstructured( + ready, err := conditions.NewMirrorConditionFromUnstructured( bootstrapConfig, - contract.Bootstrap().ReadyConditionType(), v1beta2conditions.TargetConditionType(clusterv1.MachineBootstrapConfigReadyV1Beta2Condition), - v1beta2conditions.FallbackCondition{ - Status: v1beta2conditions.BoolToStatus(machine.Status.BootstrapReady), + contract.Bootstrap().ReadyConditionType(), conditions.TargetConditionType(clusterv1.MachineBootstrapConfigReadyV1Beta2Condition), + conditions.FallbackCondition{ + Status: conditions.BoolToStatus(machine.Status.BootstrapReady), Reason: fallbackReason(machine.Status.BootstrapReady, clusterv1.MachineBootstrapConfigReadyV1Beta2Reason, clusterv1.MachineBootstrapConfigNotReadyV1Beta2Reason), Message: bootstrapConfigReadyFallBackMessage(machine.Spec.Bootstrap.ConfigRef.Kind, machine.Status.BootstrapReady), }, ) if err != nil { - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineBootstrapConfigReadyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineBootstrapConfigInvalidConditionReportedV1Beta2Reason, @@ -105,16 +105,16 @@ func setBootstrapReadyCondition(_ context.Context, machine *clusterv1.Machine, b // In case condition has NoReasonReported and status true, we assume it is a v1beta1 condition // and replace the reason with something less confusing. - if ready.Reason == v1beta2conditions.NoReasonReported && ready.Status == metav1.ConditionTrue { + if ready.Reason == conditions.NoReasonReported && ready.Status == metav1.ConditionTrue { ready.Reason = clusterv1.MachineBootstrapConfigReadyV1Beta2Reason } - v1beta2conditions.Set(machine, *ready) + conditions.Set(machine, *ready) return } // If we got unexpected errors in reading the bootstrap config (this should happen rarely), surface them if !bootstrapConfigIsNotFound { - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineBootstrapConfigReadyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineBootstrapConfigInternalErrorV1Beta2Reason, @@ -126,7 +126,7 @@ func setBootstrapReadyCondition(_ context.Context, machine *clusterv1.Machine, b // Bootstrap config missing when the machine is deleting and we know that the BootstrapConfig actually existed. if !machine.DeletionTimestamp.IsZero() && machine.Status.BootstrapReady { - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineBootstrapConfigReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineBootstrapConfigDeletedV1Beta2Reason, @@ -139,7 +139,7 @@ func setBootstrapReadyCondition(_ context.Context, machine *clusterv1.Machine, b // surface this fact. This could happen when: // - when applying the yaml file with the machine and all the objects referenced by it (provisioning yet to start/started, but status.nodeRef not yet set). // - when the machine has been provisioned - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineBootstrapConfigReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineBootstrapConfigDoesNotExistV1Beta2Reason, @@ -163,17 +163,17 @@ func bootstrapConfigReadyFallBackMessage(kind string, ready bool) string { func setInfrastructureReadyCondition(_ context.Context, machine *clusterv1.Machine, infraMachine *unstructured.Unstructured, infraMachineIsNotFound bool) { if infraMachine != nil { - ready, err := v1beta2conditions.NewMirrorConditionFromUnstructured( + ready, err := conditions.NewMirrorConditionFromUnstructured( infraMachine, - contract.InfrastructureMachine().ReadyConditionType(), v1beta2conditions.TargetConditionType(clusterv1.MachineInfrastructureReadyV1Beta2Condition), - v1beta2conditions.FallbackCondition{ - Status: v1beta2conditions.BoolToStatus(machine.Status.InfrastructureReady), + contract.InfrastructureMachine().ReadyConditionType(), conditions.TargetConditionType(clusterv1.MachineInfrastructureReadyV1Beta2Condition), + conditions.FallbackCondition{ + Status: conditions.BoolToStatus(machine.Status.InfrastructureReady), Reason: fallbackReason(machine.Status.InfrastructureReady, clusterv1.MachineInfrastructureReadyV1Beta2Reason, clusterv1.MachineInfrastructureNotReadyV1Beta2Reason), Message: infrastructureReadyFallBackMessage(machine.Spec.InfrastructureRef.Kind, machine.Status.InfrastructureReady), }, ) if err != nil { - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineInfrastructureReadyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineInfrastructureInvalidConditionReportedV1Beta2Reason, @@ -184,16 +184,16 @@ func setInfrastructureReadyCondition(_ context.Context, machine *clusterv1.Machi // In case condition has NoReasonReported and status true, we assume it is a v1beta1 condition // and replace the reason with something less confusing. - if ready.Reason == v1beta2conditions.NoReasonReported && ready.Status == metav1.ConditionTrue { + if ready.Reason == conditions.NoReasonReported && ready.Status == metav1.ConditionTrue { ready.Reason = clusterv1.MachineInfrastructureReadyV1Beta2Reason } - v1beta2conditions.Set(machine, *ready) + conditions.Set(machine, *ready) return } // If we got errors in reading the infra machine (this should happen rarely), surface them if !infraMachineIsNotFound { - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineInfrastructureReadyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineInfrastructureInternalErrorV1Beta2Reason, @@ -208,7 +208,7 @@ func setInfrastructureReadyCondition(_ context.Context, machine *clusterv1.Machi // will be considered unreachable Machine deletion will complete. if !machine.DeletionTimestamp.IsZero() { if machine.Status.InfrastructureReady { - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineInfrastructureReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineInfrastructureDeletedV1Beta2Reason, @@ -217,7 +217,7 @@ func setInfrastructureReadyCondition(_ context.Context, machine *clusterv1.Machi return } - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineInfrastructureReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineInfrastructureDoesNotExistV1Beta2Reason, @@ -228,7 +228,7 @@ func setInfrastructureReadyCondition(_ context.Context, machine *clusterv1.Machi // Report an issue if infra machine missing after the machine has been initialized (and the machine is still running). if machine.Status.InfrastructureReady { - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineInfrastructureReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineInfrastructureDeletedV1Beta2Reason, @@ -240,7 +240,7 @@ func setInfrastructureReadyCondition(_ context.Context, machine *clusterv1.Machi // If the machine is not deleting, and infra machine object does not exist yet, // surface this fact. This could happen when: // - when applying the yaml file with the machine and all the objects referenced by it (provisioning yet to start/started, but status.InfrastructureReady not yet set). - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineInfrastructureReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineInfrastructureDoesNotExistV1Beta2Reason, @@ -286,8 +286,8 @@ func setNodeHealthyAndReadyConditions(ctx context.Context, cluster *clusterv1.Cl // If conditions are not set, set them to ConnectionDown. // Note: This will allow to keep reporting last known status in case there are temporary connection errors. // However, if connection errors persist more than remoteConditionsGracePeriod, conditions will be overridden. - if !v1beta2conditions.Has(machine, clusterv1.MachineNodeReadyV1Beta2Condition) || - !v1beta2conditions.Has(machine, clusterv1.MachineNodeHealthyV1Beta2Condition) { + if !conditions.Has(machine, clusterv1.MachineNodeReadyV1Beta2Condition) || + !conditions.Has(machine, clusterv1.MachineNodeHealthyV1Beta2Condition) { setNodeConditions(machine, metav1.ConditionUnknown, clusterv1.MachineNodeConnectionDownV1Beta2Reason, lastProbeSuccessMessage(lastProbeSuccessTime)) @@ -341,10 +341,10 @@ func setNodeHealthyAndReadyConditions(ctx context.Context, cluster *clusterv1.Cl Message: "* Node.Ready: Condition not yet reported", } } - v1beta2conditions.Set(machine, *nodeReady) + conditions.Set(machine, *nodeReady) status, reason, message := summarizeNodeV1Beta2Conditions(ctx, node) - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineNodeHealthyV1Beta2Condition, Status: status, Reason: reason, @@ -398,7 +398,7 @@ func setNodeHealthyAndReadyConditions(ctx context.Context, cluster *clusterv1.Cl func setNodeConditions(machine *clusterv1.Machine, status metav1.ConditionStatus, reason, msg string) { for _, conditionType := range []string{clusterv1.MachineNodeReadyV1Beta2Condition, clusterv1.MachineNodeHealthyV1Beta2Condition} { - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: conditionType, Status: status, Reason: reason, @@ -422,7 +422,7 @@ func maxTime(t1, t2 time.Time) time.Time { } // summarizeNodeV1Beta2Conditions summarizes a Node's conditions (NodeReady, NodeMemoryPressure, NodeDiskPressure, NodePIDPressure). -// the summary is computed in way that is similar to how v1beta2conditions.NewSummaryCondition works, but in this case the +// the summary is computed in way that is similar to how conditions.NewSummaryCondition works, but in this case the // implementation is simpler/less flexible and it surfaces only issues & unknown conditions. func summarizeNodeV1Beta2Conditions(_ context.Context, node *corev1.Node) (metav1.ConditionStatus, string, string) { semanticallyFalseStatus := 0 @@ -497,33 +497,33 @@ type machineConditionCustomMergeStrategy struct { negativePolarityConditionTypes []string } -func (c machineConditionCustomMergeStrategy) Merge(operation v1beta2conditions.MergeOperation, conditions []v1beta2conditions.ConditionWithOwnerInfo, conditionTypes []string) (status metav1.ConditionStatus, reason, message string, err error) { - return v1beta2conditions.DefaultMergeStrategy( +func (c machineConditionCustomMergeStrategy) Merge(operation conditions.MergeOperation, mergeConditions []conditions.ConditionWithOwnerInfo, conditionTypes []string) (status metav1.ConditionStatus, reason, message string, err error) { + return conditions.DefaultMergeStrategy( // While machine is deleting, treat unknown conditions from external objects as info (it is ok that those objects have been deleted at this stage). - v1beta2conditions.GetPriorityFunc(func(condition metav1.Condition) v1beta2conditions.MergePriority { + conditions.GetPriorityFunc(func(condition metav1.Condition) conditions.MergePriority { if !c.machine.DeletionTimestamp.IsZero() { if condition.Type == clusterv1.MachineBootstrapConfigReadyV1Beta2Condition && (condition.Reason == clusterv1.MachineBootstrapConfigDeletedV1Beta2Reason || condition.Reason == clusterv1.MachineBootstrapConfigDoesNotExistV1Beta2Reason) { - return v1beta2conditions.InfoMergePriority + return conditions.InfoMergePriority } if condition.Type == clusterv1.MachineInfrastructureReadyV1Beta2Condition && (condition.Reason == clusterv1.MachineInfrastructureDeletedV1Beta2Reason || condition.Reason == clusterv1.MachineInfrastructureDoesNotExistV1Beta2Reason) { - return v1beta2conditions.InfoMergePriority + return conditions.InfoMergePriority } if condition.Type == clusterv1.MachineNodeHealthyV1Beta2Condition && (condition.Reason == clusterv1.MachineNodeDeletedV1Beta2Reason || condition.Reason == clusterv1.MachineNodeDoesNotExistV1Beta2Reason) { - return v1beta2conditions.InfoMergePriority + return conditions.InfoMergePriority } // Note: MachineNodeReadyV1Beta2Condition is not relevant for the summary. } - return v1beta2conditions.GetDefaultMergePriorityFunc(c.negativePolarityConditionTypes...)(condition) + return conditions.GetDefaultMergePriorityFunc(c.negativePolarityConditionTypes...)(condition) }), // Group readiness gates for control plane and etcd conditions when they have the same messages. - v1beta2conditions.SummaryMessageTransformFunc(transformControlPlaneAndEtcdConditions), + conditions.SummaryMessageTransformFunc(transformControlPlaneAndEtcdConditions), // Use custom reasons. - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( clusterv1.MachineNotReadyV1Beta2Reason, clusterv1.MachineReadyUnknownV1Beta2Reason, clusterv1.MachineReadyV1Beta2Reason, )), - ).Merge(operation, conditions, conditionTypes) + ).Merge(operation, mergeConditions, conditionTypes) } // transformControlPlaneAndEtcdConditions Group readiness gates for control plane conditions when they have the same messages. @@ -599,7 +599,7 @@ func transformControlPlaneAndEtcdConditions(messages []string) []string { func setDeletingCondition(_ context.Context, machine *clusterv1.Machine, reconcileDeleteExecuted bool, deletingReason, deletingMessage string) { if machine.DeletionTimestamp.IsZero() { - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineDeletingV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineNotDeletingV1Beta2Reason, @@ -613,7 +613,7 @@ func setDeletingCondition(_ context.Context, machine *clusterv1.Machine, reconci return } - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineDeletingV1Beta2Condition, Status: metav1.ConditionTrue, Reason: deletingReason, @@ -624,7 +624,7 @@ func setDeletingCondition(_ context.Context, machine *clusterv1.Machine, reconci func setReadyCondition(ctx context.Context, machine *clusterv1.Machine) { log := ctrl.LoggerFrom(ctx) - forConditionTypes := v1beta2conditions.ForConditionTypes{ + forConditionTypes := conditions.ForConditionTypes{ clusterv1.MachineDeletingV1Beta2Condition, clusterv1.MachineBootstrapConfigReadyV1Beta2Condition, clusterv1.MachineInfrastructureReadyV1Beta2Condition, @@ -639,15 +639,15 @@ func setReadyCondition(ctx context.Context, machine *clusterv1.Machine) { } } - summaryOpts := []v1beta2conditions.SummaryOption{ + summaryOpts := []conditions.SummaryOption{ forConditionTypes, // Tolerate HealthCheckSucceeded to not exist. - v1beta2conditions.IgnoreTypesIfMissing{ + conditions.IgnoreTypesIfMissing{ clusterv1.MachineHealthCheckSucceededV1Beta2Condition, }, // Using a custom merge strategy to override reasons applied during merge and to ignore some // info message so the ready condition aggregation in other resources is less noisy. - v1beta2conditions.CustomMergeStrategy{ + conditions.CustomMergeStrategy{ MergeStrategy: machineConditionCustomMergeStrategy{ machine: machine, // Instruct merge to consider Deleting condition with negative polarity, @@ -655,14 +655,14 @@ func setReadyCondition(ctx context.Context, machine *clusterv1.Machine) { }, }, // Instruct summary to consider Deleting condition with negative polarity. - v1beta2conditions.NegativePolarityConditionTypes{ + conditions.NegativePolarityConditionTypes{ clusterv1.MachineDeletingV1Beta2Condition, }, } // Add overrides for conditions we want to surface in the Ready condition with slightly different messages, // mostly to improve when we will aggregate the Ready condition from many machines on MS, MD etc. - var overrideConditions v1beta2conditions.OverrideConditions + var overrideConditions conditions.OverrideConditions if !machine.DeletionTimestamp.IsZero() { overrideConditions = append(overrideConditions, calculateDeletingConditionForSummary(machine)) } @@ -671,7 +671,7 @@ func setReadyCondition(ctx context.Context, machine *clusterv1.Machine) { summaryOpts = append(summaryOpts, overrideConditions) } - readyCondition, err := v1beta2conditions.NewSummaryCondition(machine, clusterv1.MachineReadyV1Beta2Condition, summaryOpts...) + readyCondition, err := conditions.NewSummaryCondition(machine, clusterv1.MachineReadyV1Beta2Condition, summaryOpts...) if err != nil { // Note, this could only happen if we hit edge cases in computing the summary, which should not happen due to the fact @@ -685,7 +685,7 @@ func setReadyCondition(ctx context.Context, machine *clusterv1.Machine) { } } - v1beta2conditions.Set(machine, *readyCondition) + conditions.Set(machine, *readyCondition) } // calculateDeletingConditionForSummary calculates a Deleting condition for the calculation of the Ready condition @@ -696,8 +696,8 @@ func setReadyCondition(ctx context.Context, machine *clusterv1.Machine) { // For the same reason we are only surfacing messages with "more than 15m" instead of using the exact durations. // 15 minutes is a duration after which we assume it makes sense to emphasize that Node drains and waiting for volume // detach are still in progress. -func calculateDeletingConditionForSummary(machine *clusterv1.Machine) v1beta2conditions.ConditionWithOwnerInfo { - deletingCondition := v1beta2conditions.Get(machine, clusterv1.MachineDeletingV1Beta2Condition) +func calculateDeletingConditionForSummary(machine *clusterv1.Machine) conditions.ConditionWithOwnerInfo { + deletingCondition := conditions.Get(machine, clusterv1.MachineDeletingV1Beta2Condition) msg := "Machine deletion in progress" if deletingCondition != nil { @@ -725,8 +725,8 @@ func calculateDeletingConditionForSummary(machine *clusterv1.Machine) v1beta2con } } - return v1beta2conditions.ConditionWithOwnerInfo{ - OwnerResource: v1beta2conditions.ConditionOwnerInfo{ + return conditions.ConditionWithOwnerInfo{ + OwnerResource: conditions.ConditionOwnerInfo{ Kind: "Machine", Name: machine.Name, }, @@ -741,13 +741,13 @@ func calculateDeletingConditionForSummary(machine *clusterv1.Machine) v1beta2con func setAvailableCondition(ctx context.Context, machine *clusterv1.Machine) { log := ctrl.LoggerFrom(ctx) - readyCondition := v1beta2conditions.Get(machine, clusterv1.MachineReadyV1Beta2Condition) + readyCondition := conditions.Get(machine, clusterv1.MachineReadyV1Beta2Condition) if readyCondition == nil { // NOTE: this should never happen given that setReadyCondition is called before this method and // it always add a ready condition. log.Error(errors.New("Ready condition must be set before setting the available condition"), "Failed to set Available condition") - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineAvailableInternalErrorV1Beta2Reason, @@ -757,7 +757,7 @@ func setAvailableCondition(ctx context.Context, machine *clusterv1.Machine) { } if readyCondition.Status != metav1.ConditionTrue { - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineNotReadyV1Beta2Reason, @@ -766,7 +766,7 @@ func setAvailableCondition(ctx context.Context, machine *clusterv1.Machine) { } if time.Since(readyCondition.LastTransitionTime.Time) >= 0*time.Second { // TODO: use MinReadySeconds as soon as it is available (and fix corresponding unit test) - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineAvailableV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineAvailableV1Beta2Reason, @@ -774,7 +774,7 @@ func setAvailableCondition(ctx context.Context, machine *clusterv1.Machine) { return } - v1beta2conditions.Set(machine, metav1.Condition{ + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineWaitingForMinReadySecondsV1Beta2Reason, diff --git a/internal/controllers/machine/machine_controller_status_test.go b/internal/controllers/machine/machine_controller_status_test.go index 5b3bc1acb895..36399a4e4377 100644 --- a/internal/controllers/machine/machine_controller_status_test.go +++ b/internal/controllers/machine/machine_controller_status_test.go @@ -32,8 +32,8 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/controllers/clustercache" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/kubeconfig" ) @@ -280,9 +280,9 @@ func TestSetBootstrapReadyCondition(t *testing.T) { setBootstrapReadyCondition(ctx, tc.machine, tc.bootstrapConfig, tc.bootstrapConfigIsNotFound) - condition := v1beta2conditions.Get(tc.machine, clusterv1.MachineBootstrapConfigReadyV1Beta2Condition) + condition := conditions.Get(tc.machine, clusterv1.MachineBootstrapConfigReadyV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tc.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tc.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -524,9 +524,9 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { setInfrastructureReadyCondition(ctx, tc.machine, tc.infraMachine, tc.infraMachineIsNotFound) - condition := v1beta2conditions.Get(tc.machine, clusterv1.MachineInfrastructureReadyV1Beta2Condition) + condition := conditions.Get(tc.machine, clusterv1.MachineInfrastructureReadyV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tc.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tc.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -951,12 +951,12 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { }(), machine: func() *clusterv1.Machine { m := defaultMachine.DeepCopy() - v1beta2conditions.Set(m, metav1.Condition{ + conditions.Set(m, metav1.Condition{ Type: clusterv1.MachineNodeHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineNodeHealthyV1Beta2Reason, }) - v1beta2conditions.Set(m, metav1.Condition{ + conditions.Set(m, metav1.Condition{ Type: clusterv1.MachineNodeReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineNodeReadyV1Beta2Reason, @@ -1034,12 +1034,12 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { }(), machine: func() *clusterv1.Machine { m := defaultMachine.DeepCopy() - v1beta2conditions.Set(m, metav1.Condition{ + conditions.Set(m, metav1.Condition{ Type: clusterv1.MachineNodeHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineNodeHealthyV1Beta2Reason, }) - v1beta2conditions.Set(m, metav1.Condition{ + conditions.Set(m, metav1.Condition{ Type: clusterv1.MachineNodeReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineNodeReadyV1Beta2Reason, @@ -1096,7 +1096,7 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { g := NewWithT(t) setNodeHealthyAndReadyConditions(ctx, tc.cluster, tc.machine, tc.node, tc.nodeGetErr, tc.lastProbeSuccessTime, 5*time.Minute) - g.Expect(tc.machine.GetV1Beta2Conditions()).To(v1beta2conditions.MatchConditions(tc.expectConditions, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(tc.machine.GetV1Beta2Conditions()).To(conditions.MatchConditions(tc.expectConditions, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -1219,9 +1219,9 @@ func TestDeletingCondition(t *testing.T) { setDeletingCondition(ctx, tc.machine, tc.reconcileDeleteExecuted, tc.deletingReason, tc.deletingMessage) - deletingCondition := v1beta2conditions.Get(tc.machine, clusterv1.MachineDeletingV1Beta2Condition) + deletingCondition := conditions.Get(tc.machine, clusterv1.MachineDeletingV1Beta2Condition) g.Expect(deletingCondition).ToNot(BeNil()) - g.Expect(*deletingCondition).To(v1beta2conditions.MatchCondition(tc.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*deletingCondition).To(conditions.MatchCondition(tc.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -1666,9 +1666,9 @@ func TestSetReadyCondition(t *testing.T) { setReadyCondition(ctx, tc.machine) - condition := v1beta2conditions.Get(tc.machine, clusterv1.MachineReadyV1Beta2Condition) + condition := conditions.Get(tc.machine, clusterv1.MachineReadyV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tc.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tc.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -1677,7 +1677,7 @@ func TestCalculateDeletingConditionForSummary(t *testing.T) { testCases := []struct { name string machine *clusterv1.Machine - expectCondition v1beta2conditions.ConditionWithOwnerInfo + expectCondition conditions.ConditionWithOwnerInfo }{ { name: "No Deleting condition", @@ -1690,8 +1690,8 @@ func TestCalculateDeletingConditionForSummary(t *testing.T) { Conditions: []metav1.Condition{}, }, }, - expectCondition: v1beta2conditions.ConditionWithOwnerInfo{ - OwnerResource: v1beta2conditions.ConditionOwnerInfo{ + expectCondition: conditions.ConditionWithOwnerInfo{ + OwnerResource: conditions.ConditionOwnerInfo{ Kind: "Machine", Name: "machine-test", }, @@ -1730,8 +1730,8 @@ After above Pods have been removed from the Node, the following Pods will be evi }, }, }, - expectCondition: v1beta2conditions.ConditionWithOwnerInfo{ - OwnerResource: v1beta2conditions.ConditionOwnerInfo{ + expectCondition: conditions.ConditionWithOwnerInfo{ + OwnerResource: conditions.ConditionOwnerInfo{ Kind: "Machine", Name: "machine-test", }, @@ -1765,8 +1765,8 @@ After above Pods have been removed from the Node, the following Pods will be evi }, }, }, - expectCondition: v1beta2conditions.ConditionWithOwnerInfo{ - OwnerResource: v1beta2conditions.ConditionOwnerInfo{ + expectCondition: conditions.ConditionWithOwnerInfo{ + OwnerResource: conditions.ConditionOwnerInfo{ Kind: "Machine", Name: "machine-test", }, @@ -1796,8 +1796,8 @@ After above Pods have been removed from the Node, the following Pods will be evi }, }, }, - expectCondition: v1beta2conditions.ConditionWithOwnerInfo{ - OwnerResource: v1beta2conditions.ConditionOwnerInfo{ + expectCondition: conditions.ConditionWithOwnerInfo{ + OwnerResource: conditions.ConditionOwnerInfo{ Kind: "Machine", Name: "machine-test", }, @@ -1905,9 +1905,9 @@ func TestAvailableCondition(t *testing.T) { setAvailableCondition(ctx, tc.machine) - availableCondition := v1beta2conditions.Get(tc.machine, clusterv1.MachineAvailableV1Beta2Condition) + availableCondition := conditions.Get(tc.machine, clusterv1.MachineAvailableV1Beta2Condition) g.Expect(availableCondition).ToNot(BeNil()) - g.Expect(*availableCondition).To(v1beta2conditions.MatchCondition(tc.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*availableCondition).To(conditions.MatchCondition(tc.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } diff --git a/internal/controllers/machinedeployment/machinedeployment_status.go b/internal/controllers/machinedeployment/machinedeployment_status.go index f68f1183113a..cf1d74690f7d 100644 --- a/internal/controllers/machinedeployment/machinedeployment_status.go +++ b/internal/controllers/machinedeployment/machinedeployment_status.go @@ -32,7 +32,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/internal/controllers/machinedeployment/mdutil" "sigs.k8s.io/cluster-api/util/collections" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/conditions" clog "sigs.k8s.io/cluster-api/util/log" ) @@ -90,7 +90,7 @@ func setReplicas(machineDeployment *clusterv1.MachineDeployment, machineSets []* func setAvailableCondition(_ context.Context, machineDeployment *clusterv1.MachineDeployment, getAndAdoptMachineSetsForDeploymentSucceeded bool) { // If we got unexpected errors in listing the machine sets (this should never happen), surface them. if !getAndAdoptMachineSetsForDeploymentSucceeded { - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineDeploymentAvailableInternalErrorV1Beta2Reason, @@ -101,7 +101,7 @@ func setAvailableCondition(_ context.Context, machineDeployment *clusterv1.Machi // Surface if .spec.replicas is not yet set (this should never happen). if machineDeployment.Spec.Replicas == nil { - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineDeploymentAvailableWaitingForReplicasSetV1Beta2Reason, @@ -112,7 +112,7 @@ func setAvailableCondition(_ context.Context, machineDeployment *clusterv1.Machi // Surface if .status.v1beta2.availableReplicas is not yet set. if machineDeployment.Status.AvailableReplicas == nil { - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineDeploymentAvailableWaitingForAvailableReplicasSetV1Beta2Reason, @@ -125,7 +125,7 @@ func setAvailableCondition(_ context.Context, machineDeployment *clusterv1.Machi minReplicasNeeded := *(machineDeployment.Spec.Replicas) - mdutil.MaxUnavailable(*machineDeployment) if *machineDeployment.Status.AvailableReplicas >= minReplicasNeeded { - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentAvailableV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeploymentAvailableV1Beta2Reason, @@ -141,7 +141,7 @@ func setAvailableCondition(_ context.Context, machineDeployment *clusterv1.Machi if !machineDeployment.DeletionTimestamp.IsZero() { message = "Deletion in progress" } - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineDeploymentNotAvailableV1Beta2Reason, @@ -152,7 +152,7 @@ func setAvailableCondition(_ context.Context, machineDeployment *clusterv1.Machi func setRollingOutCondition(_ context.Context, machineDeployment *clusterv1.MachineDeployment, machines collections.Machines, getMachinesSucceeded bool) { // If we got unexpected errors in listing the machines (this should never happen), surface them. if !getMachinesSucceeded { - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentRollingOutV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineDeploymentRollingOutInternalErrorV1Beta2Reason, @@ -169,7 +169,7 @@ func setRollingOutCondition(_ context.Context, machineDeployment *clusterv1.Mach rollingOutReplicas := 0 rolloutReasons := sets.Set[string]{} for _, machine := range machines { - upToDateCondition := v1beta2conditions.Get(machine, clusterv1.MachineUpToDateV1Beta2Condition) + upToDateCondition := conditions.Get(machine, clusterv1.MachineUpToDateV1Beta2Condition) if upToDateCondition == nil || upToDateCondition.Status != metav1.ConditionFalse { continue } @@ -181,7 +181,7 @@ func setRollingOutCondition(_ context.Context, machineDeployment *clusterv1.Mach if rollingOutReplicas == 0 { var message string - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentRollingOutV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineDeploymentNotRollingOutV1Beta2Reason, @@ -206,7 +206,7 @@ func setRollingOutCondition(_ context.Context, machineDeployment *clusterv1.Mach }) message += fmt.Sprintf("\n%s", strings.Join(reasons, "\n")) } - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentRollingOutV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeploymentRollingOutV1Beta2Reason, @@ -217,7 +217,7 @@ func setRollingOutCondition(_ context.Context, machineDeployment *clusterv1.Mach func setScalingUpCondition(_ context.Context, machineDeployment *clusterv1.MachineDeployment, machineSets []*clusterv1.MachineSet, bootstrapObjectNotFound, infrastructureObjectNotFound, getAndAdoptMachineSetsForDeploymentSucceeded bool) { // If we got unexpected errors in listing the machine sets (this should never happen), surface them. if !getAndAdoptMachineSetsForDeploymentSucceeded { - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentScalingUpV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineDeploymentScalingUpInternalErrorV1Beta2Reason, @@ -228,7 +228,7 @@ func setScalingUpCondition(_ context.Context, machineDeployment *clusterv1.Machi // Surface if .spec.replicas is not yet set (this should never happen). if machineDeployment.Spec.Replicas == nil { - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentScalingUpV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineDeploymentScalingUpWaitingForReplicasSetV1Beta2Reason, @@ -251,7 +251,7 @@ func setScalingUpCondition(_ context.Context, machineDeployment *clusterv1.Machi if machineDeployment.DeletionTimestamp.IsZero() && missingReferencesMessage != "" { message = fmt.Sprintf("Scaling up would be blocked %s", missingReferencesMessage) } - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentScalingUpV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineDeploymentNotScalingUpV1Beta2Reason, @@ -265,7 +265,7 @@ func setScalingUpCondition(_ context.Context, machineDeployment *clusterv1.Machi if missingReferencesMessage != "" { message += fmt.Sprintf(" is blocked %s", missingReferencesMessage) } - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentScalingUpV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeploymentScalingUpV1Beta2Reason, @@ -276,7 +276,7 @@ func setScalingUpCondition(_ context.Context, machineDeployment *clusterv1.Machi func setScalingDownCondition(_ context.Context, machineDeployment *clusterv1.MachineDeployment, machineSets []*clusterv1.MachineSet, machines collections.Machines, getAndAdoptMachineSetsForDeploymentSucceeded, getMachinesSucceeded bool) { // If we got unexpected errors in listing the machines sets (this should never happen), surface them. if !getAndAdoptMachineSetsForDeploymentSucceeded { - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentScalingDownV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineDeploymentScalingDownInternalErrorV1Beta2Reason, @@ -287,7 +287,7 @@ func setScalingDownCondition(_ context.Context, machineDeployment *clusterv1.Mac // Surface if .spec.replicas is not yet set (this should never happen). if machineDeployment.Spec.Replicas == nil { - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentScalingDownV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineDeploymentScalingDownWaitingForReplicasSetV1Beta2Reason, @@ -311,7 +311,7 @@ func setScalingDownCondition(_ context.Context, machineDeployment *clusterv1.Mac message += fmt.Sprintf("\n* %s", staleMessage) } } - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentScalingDownV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeploymentScalingDownV1Beta2Reason, @@ -321,7 +321,7 @@ func setScalingDownCondition(_ context.Context, machineDeployment *clusterv1.Mac } // Not scaling down. - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentScalingDownV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineDeploymentNotScalingDownV1Beta2Reason, @@ -332,7 +332,7 @@ func setMachinesReadyCondition(ctx context.Context, machineDeployment *clusterv1 log := ctrl.LoggerFrom(ctx) // If we got unexpected errors in listing the machines (this should never happen), surface them. if !getMachinesSucceeded { - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentMachinesReadyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineDeploymentMachinesReadyInternalErrorV1Beta2Reason, @@ -342,7 +342,7 @@ func setMachinesReadyCondition(ctx context.Context, machineDeployment *clusterv1 } if len(machines) == 0 { - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentMachinesReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeploymentMachinesReadyNoReplicasV1Beta2Reason, @@ -350,13 +350,13 @@ func setMachinesReadyCondition(ctx context.Context, machineDeployment *clusterv1 return } - readyCondition, err := v1beta2conditions.NewAggregateCondition( + readyCondition, err := conditions.NewAggregateCondition( machines.UnsortedList(), clusterv1.MachineReadyV1Beta2Condition, - v1beta2conditions.TargetConditionType(clusterv1.MachineDeploymentMachinesReadyV1Beta2Condition), + conditions.TargetConditionType(clusterv1.MachineDeploymentMachinesReadyV1Beta2Condition), // Using a custom merge strategy to override reasons applied during merge. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( clusterv1.MachineDeploymentMachinesNotReadyV1Beta2Reason, clusterv1.MachineDeploymentMachinesReadyUnknownV1Beta2Reason, clusterv1.MachineDeploymentMachinesReadyV1Beta2Reason, @@ -366,7 +366,7 @@ func setMachinesReadyCondition(ctx context.Context, machineDeployment *clusterv1 ) if err != nil { log.Error(err, "Failed to aggregate Machine's Ready conditions") - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentMachinesReadyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineDeploymentMachinesReadyInternalErrorV1Beta2Reason, @@ -375,14 +375,14 @@ func setMachinesReadyCondition(ctx context.Context, machineDeployment *clusterv1 return } - v1beta2conditions.Set(machineDeployment, *readyCondition) + conditions.Set(machineDeployment, *readyCondition) } func setMachinesUpToDateCondition(ctx context.Context, machineDeployment *clusterv1.MachineDeployment, machines collections.Machines, getMachinesSucceeded bool) { log := ctrl.LoggerFrom(ctx) // If we got unexpected errors in listing the machines (this should never happen), surface them. if !getMachinesSucceeded { - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineDeploymentMachinesUpToDateInternalErrorV1Beta2Reason, @@ -395,11 +395,11 @@ func setMachinesUpToDateCondition(ctx context.Context, machineDeployment *cluste // This is done to ensure the MachinesUpToDate condition doesn't flicker after a new Machine is created, // because it can take a bit until the UpToDate condition is set on a new Machine. machines = machines.Filter(func(machine *clusterv1.Machine) bool { - return v1beta2conditions.Has(machine, clusterv1.MachineUpToDateV1Beta2Condition) || time.Since(machine.CreationTimestamp.Time) > 10*time.Second + return conditions.Has(machine, clusterv1.MachineUpToDateV1Beta2Condition) || time.Since(machine.CreationTimestamp.Time) > 10*time.Second }) if len(machines) == 0 { - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeploymentMachinesUpToDateNoReplicasV1Beta2Reason, @@ -407,13 +407,13 @@ func setMachinesUpToDateCondition(ctx context.Context, machineDeployment *cluste return } - upToDateCondition, err := v1beta2conditions.NewAggregateCondition( + upToDateCondition, err := conditions.NewAggregateCondition( machines.UnsortedList(), clusterv1.MachineUpToDateV1Beta2Condition, - v1beta2conditions.TargetConditionType(clusterv1.MachineDeploymentMachinesUpToDateV1Beta2Condition), + conditions.TargetConditionType(clusterv1.MachineDeploymentMachinesUpToDateV1Beta2Condition), // Using a custom merge strategy to override reasons applied during merge. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( clusterv1.MachineDeploymentMachinesNotUpToDateV1Beta2Reason, clusterv1.MachineDeploymentMachinesUpToDateUnknownV1Beta2Reason, clusterv1.MachineDeploymentMachinesUpToDateV1Beta2Reason, @@ -423,7 +423,7 @@ func setMachinesUpToDateCondition(ctx context.Context, machineDeployment *cluste ) if err != nil { log.Error(err, "Failed to aggregate Machine's UpToDate conditions") - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineDeploymentMachinesUpToDateInternalErrorV1Beta2Reason, @@ -432,12 +432,12 @@ func setMachinesUpToDateCondition(ctx context.Context, machineDeployment *cluste return } - v1beta2conditions.Set(machineDeployment, *upToDateCondition) + conditions.Set(machineDeployment, *upToDateCondition) } func setRemediatingCondition(ctx context.Context, machineDeployment *clusterv1.MachineDeployment, machinesToBeRemediated, unhealthyMachines collections.Machines, getMachinesSucceeded bool) { if !getMachinesSucceeded { - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentRemediatingV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineDeploymentRemediatingInternalErrorV1Beta2Reason, @@ -448,7 +448,7 @@ func setRemediatingCondition(ctx context.Context, machineDeployment *clusterv1.M if len(machinesToBeRemediated) == 0 { message := aggregateUnhealthyMachines(unhealthyMachines) - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentRemediatingV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineDeploymentNotRemediatingV1Beta2Reason, @@ -457,14 +457,14 @@ func setRemediatingCondition(ctx context.Context, machineDeployment *clusterv1.M return } - remediatingCondition, err := v1beta2conditions.NewAggregateCondition( + remediatingCondition, err := conditions.NewAggregateCondition( machinesToBeRemediated.UnsortedList(), clusterv1.MachineOwnerRemediatedV1Beta2Condition, - v1beta2conditions.TargetConditionType(clusterv1.MachineDeploymentRemediatingV1Beta2Condition), + conditions.TargetConditionType(clusterv1.MachineDeploymentRemediatingV1Beta2Condition), // Note: in case of the remediating conditions it is not required to use a CustomMergeStrategy/ComputeReasonFunc // because we are considering only machinesToBeRemediated (and we can pin the reason when we set the condition). ) if err != nil { - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentRemediatingV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineDeploymentRemediatingInternalErrorV1Beta2Reason, @@ -476,7 +476,7 @@ func setRemediatingCondition(ctx context.Context, machineDeployment *clusterv1.M return } - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: remediatingCondition.Type, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeploymentRemediatingV1Beta2Reason, @@ -487,7 +487,7 @@ func setRemediatingCondition(ctx context.Context, machineDeployment *clusterv1.M func setDeletingCondition(_ context.Context, machineDeployment *clusterv1.MachineDeployment, machineSets []*clusterv1.MachineSet, machines collections.Machines, getAndAdoptMachineSetsForDeploymentSucceeded, getMachinesSucceeded bool) { // If we got unexpected errors in listing the machines sets or machines (this should never happen), surface them. if !getAndAdoptMachineSetsForDeploymentSucceeded || !getMachinesSucceeded { - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentDeletingV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineDeploymentDeletingInternalErrorV1Beta2Reason, @@ -497,7 +497,7 @@ func setDeletingCondition(_ context.Context, machineDeployment *clusterv1.Machin } if machineDeployment.DeletionTimestamp.IsZero() { - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentDeletingV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineDeploymentNotDeletingV1Beta2Reason, @@ -524,7 +524,7 @@ func setDeletingCondition(_ context.Context, machineDeployment *clusterv1.Machin if message == "" { message = "Deletion completed" } - v1beta2conditions.Set(machineDeployment, metav1.Condition{ + conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentDeletingV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeploymentDeletingV1Beta2Reason, @@ -563,7 +563,7 @@ func aggregateStaleMachines(machines collections.Machines) string { if !machine.GetDeletionTimestamp().IsZero() && time.Since(machine.GetDeletionTimestamp().Time) > time.Minute*15 { machineNames = append(machineNames, machine.GetName()) - deletingCondition := v1beta2conditions.Get(machine, clusterv1.MachineDeletingV1Beta2Condition) + deletingCondition := conditions.Get(machine, clusterv1.MachineDeletingV1Beta2Condition) if deletingCondition != nil && deletingCondition.Status == metav1.ConditionTrue && deletingCondition.Reason == clusterv1.MachineDeletingDrainingNodeV1Beta2Reason && diff --git a/internal/controllers/machinedeployment/machinedeployment_status_test.go b/internal/controllers/machinedeployment/machinedeployment_status_test.go index ccc36223c896..c227c350d6fc 100644 --- a/internal/controllers/machinedeployment/machinedeployment_status_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_status_test.go @@ -28,7 +28,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/util/collections" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/conditions" ) func Test_setReplicas(t *testing.T) { @@ -223,9 +223,9 @@ func Test_setAvailableCondition(t *testing.T) { setAvailableCondition(ctx, tt.machineDeployment, tt.getAndAdoptMachineSetsForDeploymentSucceeded) - condition := v1beta2conditions.Get(tt.machineDeployment, clusterv1.MachineDeploymentAvailableV1Beta2Condition) + condition := conditions.Get(tt.machineDeployment, clusterv1.MachineDeploymentAvailableV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -327,9 +327,9 @@ func Test_setRollingOutCondition(t *testing.T) { } setRollingOutCondition(ctx, tt.machineDeployment, machines, tt.getMachinesSucceeded) - condition := v1beta2conditions.Get(tt.machineDeployment, clusterv1.MachineDeploymentRollingOutV1Beta2Condition) + condition := conditions.Get(tt.machineDeployment, clusterv1.MachineDeploymentRollingOutV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -546,9 +546,9 @@ func Test_setScalingUpCondition(t *testing.T) { setScalingUpCondition(ctx, tt.machineDeployment, tt.machineSets, tt.bootstrapTemplateNotFound, tt.infrastructureTemplateNotFound, tt.getAndAdoptMachineSetsForDeploymentSucceeded) - condition := v1beta2conditions.Get(tt.machineDeployment, clusterv1.MachineDeploymentScalingUpV1Beta2Condition) + condition := conditions.Get(tt.machineDeployment, clusterv1.MachineDeploymentScalingUpV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -755,9 +755,9 @@ After above Pods have been removed from the Node, the following Pods will be evi setScalingDownCondition(ctx, tt.machineDeployment, tt.machineSets, collections.FromMachines(tt.machines...), tt.getAndAdoptMachineSetsForDeploymentSucceeded, true) - condition := v1beta2conditions.Get(tt.machineDeployment, clusterv1.MachineDeploymentScalingDownV1Beta2Condition) + condition := conditions.Get(tt.machineDeployment, clusterv1.MachineDeploymentScalingDownV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -873,9 +873,9 @@ func Test_setMachinesReadyCondition(t *testing.T) { } setMachinesReadyCondition(ctx, tt.machineDeployment, machines, tt.getMachinesSucceeded) - condition := v1beta2conditions.Get(tt.machineDeployment, clusterv1.MachineDeploymentMachinesReadyV1Beta2Condition) + condition := conditions.Get(tt.machineDeployment, clusterv1.MachineDeploymentMachinesReadyV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -1032,9 +1032,9 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { } setMachinesUpToDateCondition(ctx, tt.machineDeployment, machines, tt.getMachinesSucceeded) - condition := v1beta2conditions.Get(tt.machineDeployment, clusterv1.MachineDeploymentMachinesUpToDateV1Beta2Condition) + condition := conditions.Get(tt.machineDeployment, clusterv1.MachineDeploymentMachinesUpToDateV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -1139,9 +1139,9 @@ func Test_setRemediatingCondition(t *testing.T) { } setRemediatingCondition(ctx, tt.machineDeployment, machinesToBeRemediated, unHealthyMachines, tt.getMachinesSucceeded) - condition := v1beta2conditions.Get(tt.machineDeployment, clusterv1.MachineDeploymentRemediatingV1Beta2Condition) + condition := conditions.Get(tt.machineDeployment, clusterv1.MachineDeploymentRemediatingV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -1261,9 +1261,9 @@ func Test_setDeletingCondition(t *testing.T) { } setDeletingCondition(ctx, tt.machineDeployment, tt.machineSets, machines, tt.getAndAdoptMachineSetsForDeploymentSucceeded, tt.getMachinesSucceeded) - condition := v1beta2conditions.Get(tt.machineDeployment, clusterv1.MachineDeploymentDeletingV1Beta2Condition) + condition := conditions.Get(tt.machineDeployment, clusterv1.MachineDeploymentDeletingV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -1334,7 +1334,7 @@ func withStaleDeletion() fakeMachinesOption { func withV1Beta2Condition(c metav1.Condition) fakeMachinesOption { return func(m *clusterv1.Machine) { - v1beta2conditions.Set(m, c) + conditions.Set(m, c) } } diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go index 557112ba5da7..29d8b3a79a49 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go @@ -51,8 +51,8 @@ import ( "sigs.k8s.io/cluster-api/internal/controllers/machine" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/paused" "sigs.k8s.io/cluster-api/util/predicates" @@ -284,7 +284,7 @@ func (r *Reconciler) reconcile(ctx context.Context, logger logr.Logger, cluster Message: message, }) - v1beta2conditions.Set(m, metav1.Condition{ + conditions.Set(m, metav1.Condition{ Type: clusterv1.MachineHealthCheckRemediationAllowedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineHealthCheckTooManyUnhealthyV1Beta2Reason, @@ -344,7 +344,7 @@ func (r *Reconciler) reconcile(ctx context.Context, logger logr.Logger, cluster m.Status.RemediationsAllowed = remediationCount v1beta1conditions.MarkTrue(m, clusterv1.RemediationAllowedCondition) - v1beta2conditions.Set(m, metav1.Condition{ + conditions.Set(m, metav1.Condition{ Type: clusterv1.MachineHealthCheckRemediationAllowedV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineHealthCheckRemediationAllowedV1Beta2Reason, @@ -442,7 +442,7 @@ func (r *Reconciler) patchUnhealthyTargets(ctx context.Context, logger logr.Logg if err != nil { v1beta1conditions.MarkFalse(m, clusterv1.ExternalRemediationTemplateAvailableCondition, clusterv1.ExternalRemediationTemplateNotFoundReason, clusterv1.ConditionSeverityError, err.Error()) - v1beta2conditions.Set(t.Machine, metav1.Condition{ + conditions.Set(t.Machine, metav1.Condition{ Type: clusterv1.MachineExternallyRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineExternallyRemediatedRemediationTemplateNotFoundV1Beta2Reason, @@ -478,7 +478,7 @@ func (r *Reconciler) patchUnhealthyTargets(ctx context.Context, logger logr.Logg if err := r.Client.Create(ctx, to); err != nil { v1beta1conditions.MarkFalse(m, clusterv1.ExternalRemediationRequestAvailableCondition, clusterv1.ExternalRemediationRequestCreationFailedReason, clusterv1.ConditionSeverityError, err.Error()) - v1beta2conditions.Set(t.Machine, metav1.Condition{ + conditions.Set(t.Machine, metav1.Condition{ Type: clusterv1.MachineExternallyRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineExternallyRemediatedRemediationRequestCreationFailedV1Beta2Reason, @@ -488,7 +488,7 @@ func (r *Reconciler) patchUnhealthyTargets(ctx context.Context, logger logr.Logg return errList } - v1beta2conditions.Set(t.Machine, metav1.Condition{ + conditions.Set(t.Machine, metav1.Condition{ Type: clusterv1.MachineExternallyRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineExternallyRemediatedWaitingForRemediationV1Beta2Reason, @@ -502,8 +502,8 @@ func (r *Reconciler) patchUnhealthyTargets(ctx context.Context, logger logr.Logg v1beta1conditions.MarkFalse(t.Machine, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "") } - if ownerRemediatedCondition := v1beta2conditions.Get(t.Machine, clusterv1.MachineOwnerRemediatedV1Beta2Condition); ownerRemediatedCondition == nil || ownerRemediatedCondition.Status == metav1.ConditionTrue { - v1beta2conditions.Set(t.Machine, metav1.Condition{ + if ownerRemediatedCondition := conditions.Get(t.Machine, clusterv1.MachineOwnerRemediatedV1Beta2Condition); ownerRemediatedCondition == nil || ownerRemediatedCondition.Status == metav1.ConditionTrue { + conditions.Set(t.Machine, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineOwnerRemediatedWaitingForRemediationV1Beta2Reason, diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go index b738d7a6275c..ce410b12a5ce 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go @@ -47,8 +47,8 @@ import ( capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/internal/webhooks" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/test/builder" ) @@ -1940,7 +1940,7 @@ func assertMachinesNotHealthy(g *WithT, mhc *clusterv1.MachineHealthCheck, expec if !v1beta1conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededCondition) { continue } - if c := v1beta2conditions.Get(&machines.Items[i], clusterv1.MachineHealthCheckSucceededV1Beta2Condition); c == nil || c.Status != metav1.ConditionFalse { + if c := conditions.Get(&machines.Items[i], clusterv1.MachineHealthCheckSucceededV1Beta2Condition); c == nil || c.Status != metav1.ConditionFalse { continue } @@ -1969,10 +1969,10 @@ func assertMachinesOwnerRemediated(g *WithT, mhc *clusterv1.MachineHealthCheck, continue } - if !v1beta2conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededV1Beta2Condition) { + if !conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededV1Beta2Condition) { continue } - if !v1beta2conditions.Has(&machines.Items[i], clusterv1.MachineOwnerRemediatedV1Beta2Condition) { + if !conditions.Has(&machines.Items[i], clusterv1.MachineOwnerRemediatedV1Beta2Condition) { continue } @@ -2796,7 +2796,7 @@ func TestPatchTargets(t *testing.T) { machine1.ResourceVersion = "999" v1beta1conditions.MarkTrue(machine1, clusterv1.MachineHealthCheckSucceededCondition) - v1beta2conditions.Set(machine1, metav1.Condition{ + conditions.Set(machine1, metav1.Condition{ Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineHealthCheckSucceededV1Beta2Reason, @@ -2843,7 +2843,7 @@ func TestPatchTargets(t *testing.T) { g.Expect(r.patchUnhealthyTargets(context.TODO(), logr.New(log.NullLogSink{}), []healthCheckTarget{target1, target3}, defaultCluster, mhc)).ToNot(BeEmpty()) g.Expect(cl.Get(ctx, client.ObjectKey{Name: machine2.Name, Namespace: machine2.Namespace}, machine2)).ToNot(HaveOccurred()) g.Expect(v1beta1conditions.Get(machine2, clusterv1.MachineOwnerRemediatedCondition).Status).To(Equal(corev1.ConditionFalse)) - g.Expect(v1beta2conditions.Get(machine2, clusterv1.MachineOwnerRemediatedV1Beta2Condition).Status).To(Equal(metav1.ConditionFalse)) + g.Expect(conditions.Get(machine2, clusterv1.MachineOwnerRemediatedV1Beta2Condition).Status).To(Equal(metav1.ConditionFalse)) // Target with wrong patch helper will fail but the other one will be patched. g.Expect(r.patchHealthyTargets(context.TODO(), logr.New(log.NullLogSink{}), []healthCheckTarget{target1, target3}, mhc)).ToNot(BeEmpty()) diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go index 4a963cfc01fb..4c508c7ef6e9 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go @@ -33,8 +33,8 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" ) @@ -90,7 +90,7 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi v1beta1conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.HasRemediateMachineAnnotationReason, clusterv1.ConditionSeverityWarning, "Marked for remediation via remediate-machine annotation") logger.V(3).Info("Target is marked for remediation via remediate-machine annotation") - v1beta2conditions.Set(t.Machine, metav1.Condition{ + conditions.Set(t.Machine, metav1.Condition{ Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineHealthCheckHasRemediateAnnotationV1Beta2Reason, @@ -116,7 +116,7 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi logger.V(3).Info("Target is unhealthy: node is missing") v1beta1conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityWarning, "") - v1beta2conditions.Set(t.Machine, metav1.Condition{ + conditions.Set(t.Machine, metav1.Condition{ Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineHealthCheckNodeDeletedV1Beta2Reason, @@ -178,7 +178,7 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi v1beta1conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.NodeStartupTimeoutReason, clusterv1.ConditionSeverityWarning, "Node failed to report startup in %s", timeoutDuration) logger.V(3).Info("Target is unhealthy: machine has no node", "duration", timeoutDuration) - v1beta2conditions.Set(t.Machine, metav1.Condition{ + conditions.Set(t.Machine, metav1.Condition{ Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineHealthCheckNodeStartupTimeoutV1Beta2Reason, @@ -209,7 +209,7 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi v1beta1conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.UnhealthyNodeConditionReason, clusterv1.ConditionSeverityWarning, "Condition %s on node is reporting status %s for more than %s", c.Type, c.Status, c.Timeout.Duration.String()) logger.V(3).Info("Target is unhealthy: condition is in state longer than allowed timeout", "condition", c.Type, "state", c.Status, "timeout", c.Timeout.Duration.String()) - v1beta2conditions.Set(t.Machine, metav1.Condition{ + conditions.Set(t.Machine, metav1.Condition{ Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineHealthCheckUnhealthyNodeV1Beta2Reason, @@ -349,7 +349,7 @@ func (r *Reconciler) healthCheckTargets(targets []healthCheckTarget, logger logr if t.Machine.DeletionTimestamp.IsZero() && t.Node != nil { v1beta1conditions.MarkTrue(t.Machine, clusterv1.MachineHealthCheckSucceededCondition) - v1beta2conditions.Set(t.Machine, metav1.Condition{ + conditions.Set(t.Machine, metav1.Condition{ Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineHealthCheckSucceededV1Beta2Reason, diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index 30f57e00ac9f..202451ccca4c 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -56,8 +56,8 @@ import ( "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" utilconversion "sigs.k8s.io/cluster-api/util/conversion" "sigs.k8s.io/cluster-api/util/finalizers" "sigs.k8s.io/cluster-api/util/labels/format" @@ -514,7 +514,7 @@ func (r *Reconciler) syncMachines(ctx context.Context, s *scope) (ctrl.Result, e // Set machine's up to date condition if upToDateCondition != nil { - v1beta2conditions.Set(m, *upToDateCondition) + conditions.Set(m, *upToDateCondition) } if err := patchHelper.Patch(ctx, m, patch.WithOwnedV1Beta2Conditions{Conditions: []string{clusterv1.MachineUpToDateV1Beta2Condition}}); err != nil { @@ -531,7 +531,7 @@ func (r *Reconciler) syncMachines(ctx context.Context, s *scope) (ctrl.Result, e if err != nil { return ctrl.Result{}, err } - v1beta2conditions.Set(m, *upToDateCondition) + conditions.Set(m, *upToDateCondition) if err := patchHelper.Patch(ctx, m, patch.WithOwnedV1Beta2Conditions{Conditions: []string{clusterv1.MachineUpToDateV1Beta2Condition}}); err != nil { return ctrl.Result{}, err } @@ -1339,7 +1339,7 @@ func (r *Reconciler) reconcileUnhealthyMachines(ctx context.Context, s *scope) ( } shouldCleanup := v1beta1conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededCondition) && v1beta1conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedCondition) - shouldCleanupV1Beta2 := v1beta2conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededV1Beta2Condition) && v1beta2conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) + shouldCleanupV1Beta2 := conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededV1Beta2Condition) && conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) if !(shouldCleanup || shouldCleanupV1Beta2) { continue @@ -1356,7 +1356,7 @@ func (r *Reconciler) reconcileUnhealthyMachines(ctx context.Context, s *scope) ( } if shouldCleanupV1Beta2 { - v1beta2conditions.Delete(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) + conditions.Delete(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) } if err := patchHelper.Patch(ctx, m, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ @@ -1549,7 +1549,7 @@ func patchMachineConditions(ctx context.Context, c client.Client, machines []*cl if condition != nil { v1beta1conditions.Set(m, condition) } - v1beta2conditions.Set(m, v1beta2Condition) + conditions.Set(m, v1beta2Condition) if err := patchHelper.Patch(ctx, m, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ diff --git a/internal/controllers/machineset/machineset_controller_status.go b/internal/controllers/machineset/machineset_controller_status.go index 87639f9ae303..1a912a9a3531 100644 --- a/internal/controllers/machineset/machineset_controller_status.go +++ b/internal/controllers/machineset/machineset_controller_status.go @@ -30,7 +30,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/util/collections" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/conditions" clog "sigs.k8s.io/cluster-api/util/log" ) @@ -73,13 +73,13 @@ func setReplicas(_ context.Context, ms *clusterv1.MachineSet, machines []*cluste var readyReplicas, availableReplicas, upToDateReplicas int32 for _, machine := range machines { - if v1beta2conditions.IsTrue(machine, clusterv1.MachineReadyV1Beta2Condition) { + if conditions.IsTrue(machine, clusterv1.MachineReadyV1Beta2Condition) { readyReplicas++ } - if v1beta2conditions.IsTrue(machine, clusterv1.MachineAvailableV1Beta2Condition) { + if conditions.IsTrue(machine, clusterv1.MachineAvailableV1Beta2Condition) { availableReplicas++ } - if v1beta2conditions.IsTrue(machine, clusterv1.MachineUpToDateV1Beta2Condition) { + if conditions.IsTrue(machine, clusterv1.MachineUpToDateV1Beta2Condition) { upToDateReplicas++ } } @@ -92,7 +92,7 @@ func setReplicas(_ context.Context, ms *clusterv1.MachineSet, machines []*cluste func setScalingUpCondition(_ context.Context, ms *clusterv1.MachineSet, machines []*clusterv1.Machine, bootstrapObjectNotFound, infrastructureObjectNotFound, getAndAdoptMachinesForMachineSetSucceeded bool, scaleUpPreflightCheckErrMessages []string) { // If we got unexpected errors in listing the machines (this should never happen), surface them. if !getAndAdoptMachinesForMachineSetSucceeded { - v1beta2conditions.Set(ms, metav1.Condition{ + conditions.Set(ms, metav1.Condition{ Type: clusterv1.MachineSetScalingUpV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineSetScalingUpInternalErrorV1Beta2Reason, @@ -103,7 +103,7 @@ func setScalingUpCondition(_ context.Context, ms *clusterv1.MachineSet, machines // Surface if .spec.replicas is not yet set (this should never happen). if ms.Spec.Replicas == nil { - v1beta2conditions.Set(ms, metav1.Condition{ + conditions.Set(ms, metav1.Condition{ Type: clusterv1.MachineSetScalingUpV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineSetScalingUpWaitingForReplicasSetV1Beta2Reason, @@ -126,7 +126,7 @@ func setScalingUpCondition(_ context.Context, ms *clusterv1.MachineSet, machines if ms.DeletionTimestamp.IsZero() && missingReferencesMessage != "" { message = fmt.Sprintf("Scaling up would be blocked because %s", missingReferencesMessage) } - v1beta2conditions.Set(ms, metav1.Condition{ + conditions.Set(ms, metav1.Condition{ Type: clusterv1.MachineSetScalingUpV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetNotScalingUpV1Beta2Reason, @@ -147,7 +147,7 @@ func setScalingUpCondition(_ context.Context, ms *clusterv1.MachineSet, machines } message += fmt.Sprintf(" is blocked because:\n%s", strings.Join(listMessages, "\n")) } - v1beta2conditions.Set(ms, metav1.Condition{ + conditions.Set(ms, metav1.Condition{ Type: clusterv1.MachineSetScalingUpV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineSetScalingUpV1Beta2Reason, @@ -158,7 +158,7 @@ func setScalingUpCondition(_ context.Context, ms *clusterv1.MachineSet, machines func setScalingDownCondition(_ context.Context, ms *clusterv1.MachineSet, machines []*clusterv1.Machine, getAndAdoptMachinesForMachineSetSucceeded bool) { // If we got unexpected errors in listing the machines (this should never happen), surface them. if !getAndAdoptMachinesForMachineSetSucceeded { - v1beta2conditions.Set(ms, metav1.Condition{ + conditions.Set(ms, metav1.Condition{ Type: clusterv1.MachineSetScalingDownV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineSetScalingDownInternalErrorV1Beta2Reason, @@ -169,7 +169,7 @@ func setScalingDownCondition(_ context.Context, ms *clusterv1.MachineSet, machin // Surface if .spec.replicas is not yet set (this should never happen). if ms.Spec.Replicas == nil { - v1beta2conditions.Set(ms, metav1.Condition{ + conditions.Set(ms, metav1.Condition{ Type: clusterv1.MachineSetScalingDownV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineSetScalingDownWaitingForReplicasSetV1Beta2Reason, @@ -191,7 +191,7 @@ func setScalingDownCondition(_ context.Context, ms *clusterv1.MachineSet, machin if staleMessage != "" { message += fmt.Sprintf("\n* %s", staleMessage) } - v1beta2conditions.Set(ms, metav1.Condition{ + conditions.Set(ms, metav1.Condition{ Type: clusterv1.MachineSetScalingDownV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineSetScalingDownV1Beta2Reason, @@ -201,7 +201,7 @@ func setScalingDownCondition(_ context.Context, ms *clusterv1.MachineSet, machin } // Not scaling down. - v1beta2conditions.Set(ms, metav1.Condition{ + conditions.Set(ms, metav1.Condition{ Type: clusterv1.MachineSetScalingDownV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetNotScalingDownV1Beta2Reason, @@ -212,7 +212,7 @@ func setMachinesReadyCondition(ctx context.Context, machineSet *clusterv1.Machin log := ctrl.LoggerFrom(ctx) // If we got unexpected errors in listing the machines (this should never happen), surface them. if !getAndAdoptMachinesForMachineSetSucceeded { - v1beta2conditions.Set(machineSet, metav1.Condition{ + conditions.Set(machineSet, metav1.Condition{ Type: clusterv1.MachineSetMachinesReadyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineSetMachinesReadyInternalErrorV1Beta2Reason, @@ -222,7 +222,7 @@ func setMachinesReadyCondition(ctx context.Context, machineSet *clusterv1.Machin } if len(machines) == 0 { - v1beta2conditions.Set(machineSet, metav1.Condition{ + conditions.Set(machineSet, metav1.Condition{ Type: clusterv1.MachineSetMachinesReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineSetMachinesReadyNoReplicasV1Beta2Reason, @@ -230,13 +230,13 @@ func setMachinesReadyCondition(ctx context.Context, machineSet *clusterv1.Machin return } - readyCondition, err := v1beta2conditions.NewAggregateCondition( + readyCondition, err := conditions.NewAggregateCondition( machines, clusterv1.MachineReadyV1Beta2Condition, - v1beta2conditions.TargetConditionType(clusterv1.MachineSetMachinesReadyV1Beta2Condition), + conditions.TargetConditionType(clusterv1.MachineSetMachinesReadyV1Beta2Condition), // Using a custom merge strategy to override reasons applied during merge. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( clusterv1.MachineSetMachinesNotReadyV1Beta2Reason, clusterv1.MachineSetMachinesReadyUnknownV1Beta2Reason, clusterv1.MachineSetMachinesReadyV1Beta2Reason, @@ -246,7 +246,7 @@ func setMachinesReadyCondition(ctx context.Context, machineSet *clusterv1.Machin ) if err != nil { log.Error(err, "Failed to aggregate Machine's Ready conditions") - v1beta2conditions.Set(machineSet, metav1.Condition{ + conditions.Set(machineSet, metav1.Condition{ Type: clusterv1.MachineSetMachinesReadyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineSetMachinesReadyInternalErrorV1Beta2Reason, @@ -255,14 +255,14 @@ func setMachinesReadyCondition(ctx context.Context, machineSet *clusterv1.Machin return } - v1beta2conditions.Set(machineSet, *readyCondition) + conditions.Set(machineSet, *readyCondition) } func setMachinesUpToDateCondition(ctx context.Context, machineSet *clusterv1.MachineSet, machinesSlice []*clusterv1.Machine, getAndAdoptMachinesForMachineSetSucceeded bool) { log := ctrl.LoggerFrom(ctx) // If we got unexpected errors in listing the machines (this should never happen), surface them. if !getAndAdoptMachinesForMachineSetSucceeded { - v1beta2conditions.Set(machineSet, metav1.Condition{ + conditions.Set(machineSet, metav1.Condition{ Type: clusterv1.MachineSetMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineSetMachinesUpToDateInternalErrorV1Beta2Reason, @@ -275,11 +275,11 @@ func setMachinesUpToDateCondition(ctx context.Context, machineSet *clusterv1.Mac // This is done to ensure the MachinesUpToDate condition doesn't flicker after a new Machine is created, // because it can take a bit until the UpToDate condition is set on a new Machine. machines := collections.FromMachines(machinesSlice...).Filter(func(machine *clusterv1.Machine) bool { - return v1beta2conditions.Has(machine, clusterv1.MachineUpToDateV1Beta2Condition) || time.Since(machine.CreationTimestamp.Time) > 10*time.Second + return conditions.Has(machine, clusterv1.MachineUpToDateV1Beta2Condition) || time.Since(machine.CreationTimestamp.Time) > 10*time.Second }) if len(machines) == 0 { - v1beta2conditions.Set(machineSet, metav1.Condition{ + conditions.Set(machineSet, metav1.Condition{ Type: clusterv1.MachineSetMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineSetMachinesUpToDateNoReplicasV1Beta2Reason, @@ -287,13 +287,13 @@ func setMachinesUpToDateCondition(ctx context.Context, machineSet *clusterv1.Mac return } - upToDateCondition, err := v1beta2conditions.NewAggregateCondition( + upToDateCondition, err := conditions.NewAggregateCondition( machines.UnsortedList(), clusterv1.MachineUpToDateV1Beta2Condition, - v1beta2conditions.TargetConditionType(clusterv1.MachineSetMachinesUpToDateV1Beta2Condition), + conditions.TargetConditionType(clusterv1.MachineSetMachinesUpToDateV1Beta2Condition), // Using a custom merge strategy to override reasons applied during merge. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( clusterv1.MachineSetMachinesNotUpToDateV1Beta2Reason, clusterv1.MachineSetMachinesUpToDateUnknownV1Beta2Reason, clusterv1.MachineSetMachinesUpToDateV1Beta2Reason, @@ -303,7 +303,7 @@ func setMachinesUpToDateCondition(ctx context.Context, machineSet *clusterv1.Mac ) if err != nil { log.Error(err, "Failed to aggregate Machine's UpToDate conditions") - v1beta2conditions.Set(machineSet, metav1.Condition{ + conditions.Set(machineSet, metav1.Condition{ Type: clusterv1.MachineSetMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineSetMachinesUpToDateInternalErrorV1Beta2Reason, @@ -312,12 +312,12 @@ func setMachinesUpToDateCondition(ctx context.Context, machineSet *clusterv1.Mac return } - v1beta2conditions.Set(machineSet, *upToDateCondition) + conditions.Set(machineSet, *upToDateCondition) } func setRemediatingCondition(ctx context.Context, machineSet *clusterv1.MachineSet, machinesToBeRemediated, unhealthyMachines collections.Machines, getAndAdoptMachinesForMachineSetSucceeded bool) { if !getAndAdoptMachinesForMachineSetSucceeded { - v1beta2conditions.Set(machineSet, metav1.Condition{ + conditions.Set(machineSet, metav1.Condition{ Type: clusterv1.MachineSetRemediatingV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineSetRemediatingInternalErrorV1Beta2Reason, @@ -328,7 +328,7 @@ func setRemediatingCondition(ctx context.Context, machineSet *clusterv1.MachineS if len(machinesToBeRemediated) == 0 { message := aggregateUnhealthyMachines(unhealthyMachines) - v1beta2conditions.Set(machineSet, metav1.Condition{ + conditions.Set(machineSet, metav1.Condition{ Type: clusterv1.MachineSetRemediatingV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetNotRemediatingV1Beta2Reason, @@ -337,14 +337,14 @@ func setRemediatingCondition(ctx context.Context, machineSet *clusterv1.MachineS return } - remediatingCondition, err := v1beta2conditions.NewAggregateCondition( + remediatingCondition, err := conditions.NewAggregateCondition( machinesToBeRemediated.UnsortedList(), clusterv1.MachineOwnerRemediatedV1Beta2Condition, - v1beta2conditions.TargetConditionType(clusterv1.MachineSetRemediatingV1Beta2Condition), + conditions.TargetConditionType(clusterv1.MachineSetRemediatingV1Beta2Condition), // Note: in case of the remediating conditions it is not required to use a CustomMergeStrategy/ComputeReasonFunc // because we are considering only machinesToBeRemediated (and we can pin the reason when we set the condition). ) if err != nil { - v1beta2conditions.Set(machineSet, metav1.Condition{ + conditions.Set(machineSet, metav1.Condition{ Type: clusterv1.MachineSetRemediatingV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineSetRemediatingInternalErrorV1Beta2Reason, @@ -356,7 +356,7 @@ func setRemediatingCondition(ctx context.Context, machineSet *clusterv1.MachineS return } - v1beta2conditions.Set(machineSet, metav1.Condition{ + conditions.Set(machineSet, metav1.Condition{ Type: remediatingCondition.Type, Status: metav1.ConditionTrue, Reason: clusterv1.MachineSetRemediatingV1Beta2Reason, @@ -367,7 +367,7 @@ func setRemediatingCondition(ctx context.Context, machineSet *clusterv1.MachineS func setDeletingCondition(_ context.Context, machineSet *clusterv1.MachineSet, machines []*clusterv1.Machine, getAndAdoptMachinesForMachineSetSucceeded bool) { // If we got unexpected errors in listing the machines (this should never happen), surface them. if !getAndAdoptMachinesForMachineSetSucceeded { - v1beta2conditions.Set(machineSet, metav1.Condition{ + conditions.Set(machineSet, metav1.Condition{ Type: clusterv1.MachineSetDeletingV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineSetDeletingInternalErrorV1Beta2Reason, @@ -377,7 +377,7 @@ func setDeletingCondition(_ context.Context, machineSet *clusterv1.MachineSet, m } if machineSet.DeletionTimestamp.IsZero() { - v1beta2conditions.Set(machineSet, metav1.Condition{ + conditions.Set(machineSet, metav1.Condition{ Type: clusterv1.MachineSetDeletingV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetNotDeletingV1Beta2Reason, @@ -400,7 +400,7 @@ func setDeletingCondition(_ context.Context, machineSet *clusterv1.MachineSet, m if message == "" { message = "Deletion completed" } - v1beta2conditions.Set(machineSet, metav1.Condition{ + conditions.Set(machineSet, metav1.Condition{ Type: clusterv1.MachineSetDeletingV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineSetDeletingV1Beta2Reason, @@ -439,7 +439,7 @@ func aggregateStaleMachines(machines []*clusterv1.Machine) string { if !machine.GetDeletionTimestamp().IsZero() && time.Since(machine.GetDeletionTimestamp().Time) > time.Minute*15 { machineNames = append(machineNames, machine.GetName()) - deletingCondition := v1beta2conditions.Get(machine, clusterv1.MachineDeletingV1Beta2Condition) + deletingCondition := conditions.Get(machine, clusterv1.MachineDeletingV1Beta2Condition) if deletingCondition != nil && deletingCondition.Status == metav1.ConditionTrue && deletingCondition.Reason == clusterv1.MachineDeletingDrainingNodeV1Beta2Reason && diff --git a/internal/controllers/machineset/machineset_controller_status_test.go b/internal/controllers/machineset/machineset_controller_status_test.go index 3e93336b6191..38086df2ec8a 100644 --- a/internal/controllers/machineset/machineset_controller_status_test.go +++ b/internal/controllers/machineset/machineset_controller_status_test.go @@ -28,7 +28,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/util/collections" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/conditions" ) func Test_setReplicas(t *testing.T) { @@ -341,9 +341,9 @@ func Test_setScalingUpCondition(t *testing.T) { setScalingUpCondition(ctx, tt.ms, tt.machines, tt.bootstrapObjectNotFound, tt.infrastructureObjectNotFound, tt.getAndAdoptMachinesForMachineSetSucceeded, tt.scaleUpPreflightCheckErrMessages) - condition := v1beta2conditions.Get(tt.ms, clusterv1.MachineSetScalingUpV1Beta2Condition) + condition := conditions.Get(tt.ms, clusterv1.MachineSetScalingUpV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -522,9 +522,9 @@ After above Pods have been removed from the Node, the following Pods will be evi setScalingDownCondition(ctx, tt.ms, tt.machines, tt.getAndAdoptMachinesForMachineSetSucceeded) - condition := v1beta2conditions.Get(tt.ms, clusterv1.MachineSetScalingDownV1Beta2Condition) + condition := conditions.Get(tt.ms, clusterv1.MachineSetScalingDownV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -651,9 +651,9 @@ func Test_setMachinesReadyCondition(t *testing.T) { setMachinesReadyCondition(ctx, tt.machineSet, tt.machines, tt.getAndAdoptMachinesForMachineSetSucceeded) - condition := v1beta2conditions.Get(tt.machineSet, clusterv1.MachineSetMachinesReadyV1Beta2Condition) + condition := conditions.Get(tt.machineSet, clusterv1.MachineSetMachinesReadyV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -808,9 +808,9 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { setMachinesUpToDateCondition(ctx, tt.machineSet, tt.machines, tt.getAndAdoptMachinesForMachineSetSucceeded) - condition := v1beta2conditions.Get(tt.machineSet, clusterv1.MachineSetMachinesUpToDateV1Beta2Condition) + condition := conditions.Get(tt.machineSet, clusterv1.MachineSetMachinesUpToDateV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -935,9 +935,9 @@ func Test_setRemediatingCondition(t *testing.T) { } setRemediatingCondition(ctx, tt.machineSet, machinesToBeRemediated, unHealthyMachines, tt.getAndAdoptMachinesForMachineSetSucceeded) - condition := v1beta2conditions.Get(tt.machineSet, clusterv1.MachineSetRemediatingV1Beta2Condition) + condition := conditions.Get(tt.machineSet, clusterv1.MachineSetRemediatingV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -1010,9 +1010,9 @@ func Test_setDeletingCondition(t *testing.T) { setDeletingCondition(ctx, tt.machineSet, tt.machines, tt.getAndAdoptMachinesForMachineSetSucceeded) - condition := v1beta2conditions.Get(tt.machineSet, clusterv1.MachineSetDeletingV1Beta2Condition) + condition := conditions.Get(tt.machineSet, clusterv1.MachineSetDeletingV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -1168,7 +1168,7 @@ func withStaleDrain() fakeMachinesOption { func withV1Beta2Condition(c metav1.Condition) fakeMachinesOption { return func(m *clusterv1.Machine) { - v1beta2conditions.Set(m, c) + conditions.Set(m, c) } } diff --git a/internal/controllers/machineset/machineset_controller_test.go b/internal/controllers/machineset/machineset_controller_test.go index 04c84b15e92e..f8adb08bc4a1 100644 --- a/internal/controllers/machineset/machineset_controller_test.go +++ b/internal/controllers/machineset/machineset_controller_test.go @@ -47,8 +47,8 @@ import ( "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/test/builder" ) @@ -1607,21 +1607,21 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(unhealthyMachine), m)).To(Succeed()) g.Expect(m.DeletionTimestamp.IsZero()).To(BeFalse()) g.Expect(v1beta1conditions.IsTrue(m, clusterv1.MachineOwnerRemediatedCondition)).To(BeTrue()) - c := v1beta2conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) + c := conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) g.Expect(c).ToNot(BeNil()) - g.Expect(*c).To(v1beta2conditions.MatchCondition(metav1.Condition{ + g.Expect(*c).To(conditions.MatchCondition(metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineRemediationMachineDeletingV1Beta2Reason, Message: "Machine is deleting", - }, v1beta2conditions.IgnoreLastTransitionTime(true))) + }, conditions.IgnoreLastTransitionTime(true))) // Verify the healthy machine is not deleted and does not have the OwnerRemediated condition. m = &clusterv1.Machine{} g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(healthyMachine), m)).Should(Succeed()) g.Expect(m.DeletionTimestamp.IsZero()).To(BeTrue()) g.Expect(v1beta1conditions.Has(m, clusterv1.MachineOwnerRemediatedCondition)).To(BeFalse()) - g.Expect(v1beta2conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) + g.Expect(conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) }) t.Run("should update the unhealthy machine MachineOwnerRemediated condition if preflight checks did not pass", func(t *testing.T) { @@ -1746,14 +1746,14 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { To(Equal(corev1.ConditionFalse), "%s condition status should be false", condition) g.Expect(machineOwnerRemediatedCondition.Reason). To(Equal(clusterv1.WaitingForRemediationReason), "%s condition should have reason %s", condition, clusterv1.WaitingForRemediationReason) - c := v1beta2conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) + c := conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) g.Expect(c).ToNot(BeNil()) - g.Expect(*c).To(v1beta2conditions.MatchCondition(metav1.Condition{ + g.Expect(*c).To(conditions.MatchCondition(metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineRemediationDeferredV1Beta2Reason, Message: "* GenericControlPlane default/cp1 is upgrading (\"ControlPlaneIsStable\" preflight check failed)", - }, v1beta2conditions.IgnoreLastTransitionTime(true))) + }, conditions.IgnoreLastTransitionTime(true))) // Verify the healthy machine is not deleted and does not have the OwnerRemediated condition. m = &clusterv1.Machine{} @@ -1761,7 +1761,7 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { g.Expect(m.DeletionTimestamp.IsZero()).To(BeTrue()) g.Expect(v1beta1conditions.Has(m, condition)). To(BeFalse(), "Machine should not have the %s condition set", condition) - g.Expect(v1beta2conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) + g.Expect(conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) }) t.Run("should only try to remediate MachineOwnerRemediated if MachineSet is current", func(t *testing.T) { @@ -1927,14 +1927,14 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { g.Expect(machineOwnerRemediatedCondition.Status). To(Equal(corev1.ConditionFalse), "%s condition status should be false", condition) g.Expect(unhealthyMachine.DeletionTimestamp).Should(BeZero()) - c := v1beta2conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) + c := conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) g.Expect(c).ToNot(BeNil()) - g.Expect(*c).To(v1beta2conditions.MatchCondition(metav1.Condition{ + g.Expect(*c).To(conditions.MatchCondition(metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineCannotBeRemediatedV1Beta2Reason, Message: "Machine won't be remediated because it is pending removal due to rollout", - }, v1beta2conditions.IgnoreLastTransitionTime(true))) + }, conditions.IgnoreLastTransitionTime(true))) // Verify the healthy machine is not deleted and does not have the OwnerRemediated condition. m = &clusterv1.Machine{} @@ -1942,7 +1942,7 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { g.Expect(m.DeletionTimestamp.IsZero()).To(BeTrue()) g.Expect(v1beta1conditions.Has(m, condition)). To(BeFalse(), "Machine should not have the %s condition set", condition) - g.Expect(v1beta2conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) + g.Expect(conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) // Test with the current MachineSet. s = &scope{ @@ -1959,14 +1959,14 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(unhealthyMachine), m)).To(Succeed()) g.Expect(m.DeletionTimestamp.IsZero()).To(BeFalse()) g.Expect(v1beta1conditions.IsTrue(m, clusterv1.MachineOwnerRemediatedCondition)).To(BeTrue()) - c = v1beta2conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) + c = conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) g.Expect(c).ToNot(BeNil()) - g.Expect(*c).To(v1beta2conditions.MatchCondition(metav1.Condition{ + g.Expect(*c).To(conditions.MatchCondition(metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineRemediationMachineDeletingV1Beta2Reason, Message: "Machine is deleting", - }, v1beta2conditions.IgnoreLastTransitionTime(true))) + }, conditions.IgnoreLastTransitionTime(true))) // Verify (again) the healthy machine is not deleted and does not have the OwnerRemediated condition. m = &clusterv1.Machine{} @@ -1974,7 +1974,7 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { g.Expect(m.DeletionTimestamp.IsZero()).To(BeTrue()) g.Expect(v1beta1conditions.Has(m, condition)). To(BeFalse(), "Machine should not have the %s condition set", condition) - g.Expect(v1beta2conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) + g.Expect(conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) }) t.Run("should only try to remediate up to MaxInFlight unhealthy", func(t *testing.T) { @@ -2148,14 +2148,14 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { machineOwnerRemediatedCondition := v1beta1conditions.Get(m, condition) g.Expect(machineOwnerRemediatedCondition.Status). To(Equal(corev1.ConditionFalse), "%s condition status should be false", condition) - c := v1beta2conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) + c := conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) g.Expect(c).ToNot(BeNil()) - g.Expect(*c).To(v1beta2conditions.MatchCondition(metav1.Condition{ + g.Expect(*c).To(conditions.MatchCondition(metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineRemediationDeferredV1Beta2Reason, Message: "Waiting because there are already too many remediations in progress (spec.strategy.remediation.maxInFlight is 3)", - }, v1beta2conditions.IgnoreLastTransitionTime(true))) + }, conditions.IgnoreLastTransitionTime(true))) } else { // Machines after maxInFlight, should be deleted. g.Expect(apierrors.IsNotFound(err)).To(BeTrue(), "expected machine %d to be deleted", i) @@ -2168,7 +2168,7 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { g.Expect(m.DeletionTimestamp.IsZero()).To(BeTrue()) g.Expect(v1beta1conditions.Has(m, condition)). To(BeFalse(), "Machine should not have the %s condition set", condition) - g.Expect(v1beta2conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) + g.Expect(conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) // // Second pass. @@ -2215,14 +2215,14 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { machineOwnerRemediatedCondition := v1beta1conditions.Get(m, condition) g.Expect(machineOwnerRemediatedCondition.Status). To(Equal(corev1.ConditionFalse), "%s condition status should be false", condition) - c := v1beta2conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) + c := conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) g.Expect(c).ToNot(BeNil()) - g.Expect(*c).To(v1beta2conditions.MatchCondition(metav1.Condition{ + g.Expect(*c).To(conditions.MatchCondition(metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineRemediationDeferredV1Beta2Reason, Message: "Waiting because there are already too many remediations in progress (spec.strategy.remediation.maxInFlight is 3)", - }, v1beta2conditions.IgnoreLastTransitionTime(true))) + }, conditions.IgnoreLastTransitionTime(true))) g.Expect(m.DeletionTimestamp).To(BeZero()) } else if i < total-maxInFlight { // Machines before the maxInFlight should have a deletion timestamp @@ -2232,14 +2232,14 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { machineOwnerRemediatedCondition := v1beta1conditions.Get(m, condition) g.Expect(machineOwnerRemediatedCondition.Status). To(Equal(corev1.ConditionTrue), "%s condition status should be true", condition) - c := v1beta2conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) + c := conditions.Get(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) g.Expect(c).ToNot(BeNil()) - g.Expect(*c).To(v1beta2conditions.MatchCondition(metav1.Condition{ + g.Expect(*c).To(conditions.MatchCondition(metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineRemediationMachineDeletingV1Beta2Reason, Message: "Machine is deleting", - }, v1beta2conditions.IgnoreLastTransitionTime(true))) + }, conditions.IgnoreLastTransitionTime(true))) g.Expect(m.DeletionTimestamp).ToNot(BeZero()) if cleanFinalizer { @@ -2259,7 +2259,7 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { g.Expect(m.DeletionTimestamp.IsZero()).To(BeTrue()) g.Expect(v1beta1conditions.Has(m, condition)). To(BeFalse(), "Machine should not have the %s condition set", condition) - g.Expect(v1beta2conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) + g.Expect(conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) // Perform another pass with the same exact configuration. // This is testing that, given that we have Machines that are being deleted and are in flight, @@ -2282,7 +2282,7 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { g.Expect(m.DeletionTimestamp.IsZero()).To(BeTrue()) g.Expect(v1beta1conditions.Has(m, condition)). To(BeFalse(), "Machine should not have the %s condition set", condition) - g.Expect(v1beta2conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) + g.Expect(conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) // Call again to verify that the remaining unhealthy machines are deleted, // at this point all unhealthy machines should be deleted given the max in flight @@ -2308,7 +2308,7 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { g.Expect(m.DeletionTimestamp.IsZero()).To(BeTrue()) g.Expect(v1beta1conditions.Has(m, condition)). To(BeFalse(), "Machine should not have the %s condition set", condition) - g.Expect(v1beta2conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) + g.Expect(conditions.Has(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition)).To(BeFalse()) }) } @@ -3153,7 +3153,7 @@ func TestNewMachineUpToDateCondition(t *testing.T) { condition := newMachineUpToDateCondition(s) if tt.expectCondition != nil { g.Expect(condition).ToNot(BeNil()) - g.Expect(*condition).To(v1beta2conditions.MatchCondition(*tt.expectCondition, v1beta2conditions.IgnoreLastTransitionTime(true))) + g.Expect(*condition).To(conditions.MatchCondition(*tt.expectCondition, conditions.IgnoreLastTransitionTime(true))) } else { g.Expect(condition).To(BeNil()) } diff --git a/internal/controllers/topology/cluster/conditions.go b/internal/controllers/topology/cluster/conditions.go index 44886ecb42ca..53afe29151d2 100644 --- a/internal/controllers/topology/cluster/conditions.go +++ b/internal/controllers/topology/cluster/conditions.go @@ -27,8 +27,8 @@ import ( "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) func (r *Reconciler) reconcileConditions(s *scope.Scope, cluster *clusterv1.Cluster, reconcileErr error) error { @@ -64,7 +64,7 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste strings.Join(messages, ", "), ), ) - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterTopologyReconciledV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterTopologyReconcilePausedV1Beta2Reason, @@ -83,7 +83,7 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste "", ), ) - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterTopologyReconciledV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterTopologyReconciledDeletingV1Beta2Reason, @@ -104,7 +104,7 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste reconcileErr.Error(), ), ) - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterTopologyReconciledV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterTopologyReconciledFailedV1Beta2Reason, @@ -127,7 +127,7 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste ".status.observedGeneration == .metadata.generation is true. If this is not the case either ClusterClass reconciliation failed or the ClusterClass is paused", ), ) - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterTopologyReconciledV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterTopologyReconciledClusterClassNotReconciledV1Beta2Reason, @@ -149,7 +149,7 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste s.HookResponseTracker.AggregateMessage(), ), ) - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterTopologyReconciledV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterTopologyReconciledHookBlockingV1Beta2Reason, @@ -257,7 +257,7 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste msgBuilder.String(), ), ) - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterTopologyReconciledV1Beta2Condition, Status: metav1.ConditionFalse, Reason: v1beta2Reason, @@ -272,7 +272,7 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste v1beta1conditions.Set(cluster, v1beta1conditions.TrueCondition(clusterv1.TopologyReconciledCondition), ) - v1beta2conditions.Set(cluster, metav1.Condition{ + conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterTopologyReconciledV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.ClusterTopologyReconcileSucceededV1Beta2Reason, diff --git a/internal/controllers/topology/cluster/conditions_test.go b/internal/controllers/topology/cluster/conditions_test.go index 4ae3b8ce895b..2716dbd82b47 100644 --- a/internal/controllers/topology/cluster/conditions_test.go +++ b/internal/controllers/topology/cluster/conditions_test.go @@ -34,8 +34,8 @@ import ( runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/feature" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/test/builder" ) @@ -984,7 +984,7 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { g.Expect(actualCondition.Reason).To(BeEquivalentTo(tt.wantConditionReason)) g.Expect(actualCondition.Message).To(BeEquivalentTo(tt.wantConditionMessage)) - actualV1Beta2Condition := v1beta2conditions.Get(tt.cluster, clusterv1.ClusterTopologyReconciledV1Beta2Condition) + actualV1Beta2Condition := conditions.Get(tt.cluster, clusterv1.ClusterTopologyReconciledV1Beta2Condition) g.Expect(actualV1Beta2Condition).ToNot(BeNil()) g.Expect(actualV1Beta2Condition.Status).To(BeEquivalentTo(tt.wantV1Beta2ConditionStatus)) g.Expect(actualV1Beta2Condition.Reason).To(BeEquivalentTo(tt.wantV1Beta2ConditionReason)) diff --git a/test/e2e/clusterctl_upgrade.go b/test/e2e/clusterctl_upgrade.go index 7ef4d2a62410..dd189df9b04d 100644 --- a/test/e2e/clusterctl_upgrade.go +++ b/test/e2e/clusterctl_upgrade.go @@ -55,7 +55,7 @@ import ( "sigs.k8s.io/cluster-api/test/framework/bootstrap" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/conditions" ) // ClusterctlUpgradeSpecInput is the input for ClusterctlUpgradeSpec. @@ -815,8 +815,8 @@ func verifyV1Beta2ConditionsTrueV1Beta1(ctx context.Context, c client.Client, cl }, 3*time.Minute, 3*time.Second).Should(Succeed(), "Failed to get Cluster object %s", klog.KRef(clusterNamespace, clusterName)) for _, conditionType := range v1beta2conditionTypes { - if v1beta2conditions.Has(cluster, conditionType) { - condition := v1beta2conditions.Get(cluster, conditionType) + if conditions.Has(cluster, conditionType) { + condition := conditions.Get(cluster, conditionType) Expect(condition.Status).To(Equal(metav1.ConditionTrue), "The v1beta2 condition %q on the Cluster should be set to true", conditionType) Expect(condition.Message).To(BeEmpty(), "The v1beta2 condition %q on the Cluster should have an empty message", conditionType) } @@ -831,8 +831,8 @@ func verifyV1Beta2ConditionsTrueV1Beta1(ctx context.Context, c client.Client, cl }, 3*time.Minute, 3*time.Second).Should(Succeed(), "Failed to list Machines for Cluster %s", klog.KObj(cluster)) for _, machine := range machineList.Items { for _, conditionType := range v1beta2conditionTypes { - if v1beta2conditions.Has(&machine, conditionType) { - condition := v1beta2conditions.Get(&machine, conditionType) + if conditions.Has(&machine, conditionType) { + condition := conditions.Get(&machine, conditionType) Expect(condition.Status).To(Equal(metav1.ConditionTrue), "The v1beta2 condition %q on the Machine %q should be set to true", conditionType, machine.Name) Expect(condition.Message).To(BeEmpty(), "The v1beta2 condition %q on the Machine %q should have an empty message", conditionType, machine.Name) } diff --git a/test/extension/handlers/topologymutation/handler_integration_test.go b/test/extension/handlers/topologymutation/handler_integration_test.go index 98ef495dcc4c..5a81068069bd 100644 --- a/test/extension/handlers/topologymutation/handler_integration_test.go +++ b/test/extension/handlers/topologymutation/handler_integration_test.go @@ -55,7 +55,7 @@ import ( "sigs.k8s.io/cluster-api/exp/topology/desiredstate" "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/feature" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/contract" "sigs.k8s.io/cluster-api/webhooks" ) @@ -250,7 +250,7 @@ func getScope(cluster *clusterv1.Cluster, clusterClassFile string) (*scope.Scope Kind: "ClusterClass", })) // Set paused condition for ClusterClass - v1beta2conditions.Set(s.Blueprint.ClusterClass, metav1.Condition{ + conditions.Set(s.Blueprint.ClusterClass, metav1.Condition{ Type: clusterv1.PausedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.NotPausedV1Beta2Reason, diff --git a/test/framework/cluster_helpers.go b/test/framework/cluster_helpers.go index 545a312a616b..15bfb6dcfaca 100644 --- a/test/framework/cluster_helpers.go +++ b/test/framework/cluster_helpers.go @@ -35,7 +35,7 @@ import ( cmdtree "sigs.k8s.io/cluster-api/internal/util/tree" . "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions" "sigs.k8s.io/cluster-api/test/framework/internal/log" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" ) @@ -208,7 +208,7 @@ func dumpArtifactsOnDeletionTimeout(ctx context.Context, clusterProxy ClusterPro // Try to get more details about why Cluster deletion timed out. if err := clusterProxy.GetClient().Get(ctx, client.ObjectKeyFromObject(cluster), cluster); err == nil { - if c := v1beta2conditions.Get(cluster, clusterv1.MachineDeletingV1Beta2Condition); c != nil { + if c := conditions.Get(cluster, clusterv1.MachineDeletingV1Beta2Condition); c != nil { return fmt.Sprintf("waiting for cluster deletion timed out:\ncondition: %s\nmessage: %s", c.Type, c.Message) } } diff --git a/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go b/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go index 80062e36e28b..47cbd664d8e4 100644 --- a/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go @@ -33,8 +33,8 @@ import ( "sigs.k8s.io/cluster-api/test/infrastructure/container" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/docker" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" ) @@ -62,7 +62,7 @@ func (r *ClusterBackEndReconciler) ReconcileNormal(ctx context.Context, cluster strconv.Itoa(dockerCluster.Spec.ControlPlaneEndpoint.Port)) if err != nil { v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, infrav1.LoadBalancerProvisioningFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(dockerCluster, metav1.Condition{ + conditions.Set(dockerCluster, metav1.Condition{ Type: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevClusterDockerLoadBalancerNotAvailableV1Beta2Reason, @@ -74,7 +74,7 @@ func (r *ClusterBackEndReconciler) ReconcileNormal(ctx context.Context, cluster // Create the docker container hosting the load balancer. if err := externalLoadBalancer.Create(ctx); err != nil { v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, infrav1.LoadBalancerProvisioningFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(dockerCluster, metav1.Condition{ + conditions.Set(dockerCluster, metav1.Condition{ Type: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevClusterDockerLoadBalancerNotAvailableV1Beta2Reason, @@ -87,7 +87,7 @@ func (r *ClusterBackEndReconciler) ReconcileNormal(ctx context.Context, cluster lbIP, err := externalLoadBalancer.IP(ctx) if err != nil { v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, infrav1.LoadBalancerProvisioningFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(dockerCluster, metav1.Condition{ + conditions.Set(dockerCluster, metav1.Condition{ Type: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevClusterDockerLoadBalancerNotAvailableV1Beta2Reason, @@ -105,7 +105,7 @@ func (r *ClusterBackEndReconciler) ReconcileNormal(ctx context.Context, cluster // Mark the dockerCluster ready dockerCluster.Status.Ready = true v1beta1conditions.MarkTrue(dockerCluster, infrav1.LoadBalancerAvailableCondition) - v1beta2conditions.Set(dockerCluster, metav1.Condition{ + conditions.Set(dockerCluster, metav1.Condition{ Type: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Reason, @@ -127,7 +127,7 @@ func (r *ClusterBackEndReconciler) ReconcileDelete(ctx context.Context, cluster strconv.Itoa(dockerCluster.Spec.ControlPlaneEndpoint.Port)) if err != nil { v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, infrav1.LoadBalancerProvisioningFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(dockerCluster, metav1.Condition{ + conditions.Set(dockerCluster, metav1.Condition{ Type: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevClusterDockerLoadBalancerNotAvailableV1Beta2Reason, @@ -142,7 +142,7 @@ func (r *ClusterBackEndReconciler) ReconcileDelete(ctx context.Context, cluster // TODO (v1beta2): test for v1beta2 conditions if v1beta1conditions.GetReason(dockerCluster, infrav1.LoadBalancerAvailableCondition) != clusterv1.DeletingReason { v1beta1conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(dockerCluster, metav1.Condition{ + conditions.Set(dockerCluster, metav1.Condition{ Type: infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevClusterDockerLoadBalancerDeletingV1Beta2Reason, @@ -175,15 +175,15 @@ func (r *ClusterBackEndReconciler) PatchDevCluster(ctx context.Context, patchHel ), v1beta1conditions.WithStepCounterIf(dockerCluster.ObjectMeta.DeletionTimestamp.IsZero()), ) - if err := v1beta2conditions.SetSummaryCondition(dockerCluster, dockerCluster, infrav1.DevClusterReadyV1Beta2Condition, - v1beta2conditions.ForConditionTypes{ + if err := conditions.SetSummaryCondition(dockerCluster, dockerCluster, infrav1.DevClusterReadyV1Beta2Condition, + conditions.ForConditionTypes{ infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition, }, // Using a custom merge strategy to override reasons applied during merge. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( // Use custom reasons. - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( infrav1.DevClusterNotReadyV1Beta2Reason, infrav1.DevClusterReadyUnknownV1Beta2Reason, infrav1.DevClusterReadyV1Beta2Reason, diff --git a/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go b/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go index 14b3360f3582..cc838543e74c 100644 --- a/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go @@ -40,8 +40,8 @@ import ( infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/docker" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/labels" "sigs.k8s.io/cluster-api/util/patch" ) @@ -72,7 +72,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster if !cluster.Status.InfrastructureReady { log.Info("Waiting for DockerCluster Controller to create cluster infrastructure") v1beta1conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(dockerMachine, metav1.Condition{ + conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineDockerContainerWaitingForClusterInfrastructureReadyV1Beta2Reason, @@ -121,7 +121,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster if externalMachine.Exists() { v1beta1conditions.MarkTrue(dockerMachine, infrav1.ContainerProvisionedCondition) - v1beta2conditions.Set(dockerMachine, metav1.Condition{ + conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerProvisionedV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.DevMachineDockerContainerProvisionedV1Beta2Reason, @@ -129,16 +129,16 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster // In case of upgrades the v1beta2 condition for BootstrapExecSucceeded does not exist. // In this case recover the information from the existing v1beta1 condition, because we do not know if // all commands succeeded. - if !v1beta2conditions.Has(dockerMachine, infrav1.DevMachineDockerContainerBootstrapExecSucceededV1Beta2Condition) { + if !conditions.Has(dockerMachine, infrav1.DevMachineDockerContainerBootstrapExecSucceededV1Beta2Condition) { condition := v1beta1conditions.Get(dockerMachine, infrav1.BootstrapExecSucceededCondition) if condition == nil || condition.Status == corev1.ConditionTrue { - v1beta2conditions.Set(dockerMachine, metav1.Condition{ + conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerBootstrapExecSucceededV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.DevMachineDockerContainerBootstrapExecSucceededV1Beta2Reason, }) } else { - v1beta2conditions.Set(dockerMachine, metav1.Condition{ + conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerBootstrapExecSucceededV1Beta2Condition, Status: metav1.ConditionFalse, Message: condition.Message, @@ -153,7 +153,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster } } else { v1beta1conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, infrav1.ContainerDeletedReason, clusterv1.ConditionSeverityError, fmt.Sprintf("Container %s does not exist anymore", externalMachine.Name())) - v1beta2conditions.Set(dockerMachine, metav1.Condition{ + conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineDockerContainerNotProvisionedV1Beta2Reason, @@ -169,7 +169,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster if !util.IsControlPlaneMachine(machine) && !v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { log.Info("Waiting for the control plane to be initialized") v1beta1conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(dockerMachine, metav1.Condition{ + conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineDockerContainerWaitingForControlPlaneInitializedV1Beta2Reason, @@ -179,7 +179,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster log.Info("Waiting for the Bootstrap provider controller to set bootstrap data") v1beta1conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(dockerMachine, metav1.Condition{ + conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineDockerContainerWaitingForBootstrapDataV1Beta2Reason, @@ -230,7 +230,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster requeue := false if !v1beta1conditions.IsTrue(dockerMachine, infrav1.ContainerProvisionedCondition) { v1beta1conditions.MarkTrue(dockerMachine, infrav1.ContainerProvisionedCondition) - v1beta2conditions.Set(dockerMachine, metav1.Condition{ + conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerProvisionedV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.DevMachineDockerContainerProvisionedV1Beta2Reason, @@ -239,7 +239,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster } if !v1beta1conditions.Has(dockerMachine, infrav1.BootstrapExecSucceededCondition) { v1beta1conditions.MarkFalse(dockerMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrappingReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(dockerMachine, metav1.Condition{ + conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerBootstrapExecSucceededV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineDockerContainerBootstrapExecNotSucceededV1Beta2Reason, @@ -297,7 +297,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster // Run the bootstrap script. Simulates cloud-init/Ignition. if err := externalMachine.ExecBootstrap(timeoutCtx, bootstrapData, format, version, dockerMachine.Spec.Backend.Docker.CustomImage); err != nil { v1beta1conditions.MarkFalse(dockerMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityWarning, "Repeating bootstrap") - v1beta2conditions.Set(dockerMachine, metav1.Condition{ + conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerBootstrapExecSucceededV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineDockerContainerBootstrapExecNotSucceededV1Beta2Reason, @@ -309,7 +309,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster // Check for bootstrap success if err := externalMachine.CheckForBootstrapSuccess(timeoutCtx, true); err != nil { v1beta1conditions.MarkFalse(dockerMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityWarning, "Repeating bootstrap") - v1beta2conditions.Set(dockerMachine, metav1.Condition{ + conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerBootstrapExecSucceededV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineDockerContainerBootstrapExecNotSucceededV1Beta2Reason, @@ -323,7 +323,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster // Update the BootstrapExecSucceededCondition condition v1beta1conditions.MarkTrue(dockerMachine, infrav1.BootstrapExecSucceededCondition) - v1beta2conditions.Set(dockerMachine, metav1.Condition{ + conditions.Set(dockerMachine, metav1.Condition{ Type: infrav1.DevMachineDockerContainerBootstrapExecSucceededV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.DevMachineDockerContainerBootstrapExecSucceededV1Beta2Reason, @@ -425,7 +425,7 @@ func (r *MachineBackendReconciler) ReconcileDelete(ctx context.Context, cluster // TODO (v1beta2): test for v1beta2 conditions if v1beta1conditions.GetReason(dockerMachine, infrav1.ContainerProvisionedCondition) != clusterv1.DeletingReason { v1beta1conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(dockerCluster, metav1.Condition{ + conditions.Set(dockerCluster, metav1.Condition{ Type: infrav1.DevMachineDockerContainerProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineDockerContainerDeletingV1Beta2Reason, @@ -464,16 +464,16 @@ func (r *MachineBackendReconciler) PatchDevMachine(ctx context.Context, patchHel ), v1beta1conditions.WithStepCounterIf(dockerMachine.ObjectMeta.DeletionTimestamp.IsZero() && dockerMachine.Spec.ProviderID == nil), ) - if err := v1beta2conditions.SetSummaryCondition(dockerMachine, dockerMachine, infrav1.DevMachineReadyV1Beta2Condition, - v1beta2conditions.ForConditionTypes{ + if err := conditions.SetSummaryCondition(dockerMachine, dockerMachine, infrav1.DevMachineReadyV1Beta2Condition, + conditions.ForConditionTypes{ infrav1.DevMachineDockerContainerProvisionedV1Beta2Condition, infrav1.DevMachineDockerContainerBootstrapExecSucceededV1Beta2Condition, }, // Using a custom merge strategy to override reasons applied during merge. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( // Use custom reasons. - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( infrav1.DevMachineNotReadyV1Beta2Reason, infrav1.DevMachineReadyUnknownV1Beta2Reason, infrav1.DevMachineReadyV1Beta2Reason, diff --git a/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go b/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go index da9dcff53b92..006358575be2 100644 --- a/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go @@ -45,8 +45,8 @@ import ( inmemoryserver "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/secret" ) @@ -69,7 +69,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster log := ctrl.LoggerFrom(ctx) setOtherWaitingConditions := func() { - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryNodeProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryNodeWaitingForVMProvisionedV1Beta2Reason, @@ -79,13 +79,13 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster return } - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryEtcdProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryEtcdWaitingForVMProvisionedV1Beta2Reason, }) - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryAPIServerProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryAPIServerWaitingForVMProvisionedV1Beta2Reason, @@ -95,7 +95,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster // Check if the infrastructure is ready, otherwise return and wait for the cluster object to be updated if !cluster.Status.InfrastructureReady { v1beta1conditions.MarkFalse(inMemoryMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryVMProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryVMWaitingForClusterInfrastructureV1Beta2Reason, @@ -112,7 +112,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster // TODO (v1beta2): test for v1beta2 conditions if !util.IsControlPlaneMachine(machine) && !v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { v1beta1conditions.MarkFalse(inMemoryMachine, infrav1.VMProvisionedCondition, infrav1.WaitingControlPlaneInitializedReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryVMProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryVMWaitingForControlPlaneInitializedV1Beta2Reason, @@ -123,7 +123,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster } v1beta1conditions.MarkFalse(inMemoryMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryVMProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryVMWaitingForBootstrapDataV1Beta2Reason, @@ -167,7 +167,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster func (r *MachineBackendReconciler) reconcileNormalCloudMachine(ctx context.Context, cluster *clusterv1.Cluster, _ *clusterv1.Machine, inMemoryMachine *infrav1.DevMachine) (_ ctrl.Result, retErr error) { defer func() { if retErr != nil { - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryVMProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryVMInternalErrorV1Beta2Reason, @@ -218,7 +218,7 @@ func (r *MachineBackendReconciler) reconcileNormalCloudMachine(ctx context.Conte now := time.Now() if now.Before(start.Add(provisioningDuration)) { v1beta1conditions.MarkFalse(inMemoryMachine, infrav1.VMProvisionedCondition, infrav1.VMWaitingForStartupTimeoutReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryVMProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryVMWaitingForStartupTimeoutV1Beta2Reason, @@ -231,7 +231,7 @@ func (r *MachineBackendReconciler) reconcileNormalCloudMachine(ctx context.Conte inMemoryMachine.Spec.ProviderID = ptr.To(calculateProviderID(inMemoryMachine)) inMemoryMachine.Status.Ready = true v1beta1conditions.MarkTrue(inMemoryMachine, infrav1.VMProvisionedCondition) - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryVMProvisionedV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.DevMachineInMemoryVMProvisionedV1Beta2Reason, @@ -243,7 +243,7 @@ func (r *MachineBackendReconciler) reconcileNormalNode(ctx context.Context, clus // No-op if the VM is not provisioned yet // TODO (v1beta2): test for v1beta2 conditions if !v1beta1conditions.IsTrue(inMemoryMachine, infrav1.VMProvisionedCondition) { - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryNodeProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryNodeWaitingForVMProvisionedV1Beta2Reason, @@ -253,7 +253,7 @@ func (r *MachineBackendReconciler) reconcileNormalNode(ctx context.Context, clus defer func() { if retErr != nil { - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryNodeProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryNodeInternalErrorV1Beta2Reason, @@ -284,7 +284,7 @@ func (r *MachineBackendReconciler) reconcileNormalNode(ctx context.Context, clus now := time.Now() if now.Before(start.Add(provisioningDuration)) { v1beta1conditions.MarkFalse(inMemoryMachine, infrav1.NodeProvisionedCondition, infrav1.NodeWaitingForStartupTimeoutReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryNodeProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryNodeWaitingForStartupTimeoutV1Beta2Reason, @@ -354,7 +354,7 @@ func (r *MachineBackendReconciler) reconcileNormalNode(ctx context.Context, clus } v1beta1conditions.MarkTrue(inMemoryMachine, infrav1.NodeProvisionedCondition) - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryNodeProvisionedV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.DevMachineInMemoryNodeProvisionedV1Beta2Reason, @@ -375,7 +375,7 @@ func (r *MachineBackendReconciler) reconcileNormalETCD(ctx context.Context, clus // No-op if the VM is not provisioned yet // TODO (v1beta2): test for v1beta2 conditions if !v1beta1conditions.IsTrue(inMemoryMachine, infrav1.VMProvisionedCondition) { - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryEtcdProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryEtcdWaitingForVMProvisionedV1Beta2Reason, @@ -386,7 +386,7 @@ func (r *MachineBackendReconciler) reconcileNormalETCD(ctx context.Context, clus // No-op if the Node is not provisioned yet // TODO (v1beta2): test for v1beta2 conditions if !v1beta1conditions.IsTrue(inMemoryMachine, infrav1.NodeProvisionedCondition) { - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryEtcdProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryEtcdWaitingForNodeProvisionedV1Beta2Reason, @@ -396,7 +396,7 @@ func (r *MachineBackendReconciler) reconcileNormalETCD(ctx context.Context, clus defer func() { if retErr != nil { - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryEtcdProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryEtcdInternalErrorV1Beta2Reason, @@ -427,7 +427,7 @@ func (r *MachineBackendReconciler) reconcileNormalETCD(ctx context.Context, clus now := time.Now() if now.Before(start.Add(provisioningDuration)) { v1beta1conditions.MarkFalse(inMemoryMachine, infrav1.EtcdProvisionedCondition, infrav1.EtcdWaitingForStartupTimeoutReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryEtcdProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryEtcdWaitingForStartupTimeoutV1Beta2Reason, @@ -547,7 +547,7 @@ func (r *MachineBackendReconciler) reconcileNormalETCD(ctx context.Context, clus } v1beta1conditions.MarkTrue(inMemoryMachine, infrav1.EtcdProvisionedCondition) - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryEtcdProvisionedV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.DevMachineInMemoryEtcdProvisionedV1Beta2Reason, @@ -619,7 +619,7 @@ func (r *MachineBackendReconciler) reconcileNormalAPIServer(ctx context.Context, // No-op if the VM is not provisioned yet // TODO (v1beta2): test for v1beta2 conditions if !v1beta1conditions.IsTrue(inMemoryMachine, infrav1.VMProvisionedCondition) { - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryAPIServerProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryAPIServerWaitingForVMProvisionedV1Beta2Reason, @@ -630,7 +630,7 @@ func (r *MachineBackendReconciler) reconcileNormalAPIServer(ctx context.Context, // No-op if the Node is not provisioned yet // TODO (v1beta2): test for v1beta2 conditions if !v1beta1conditions.IsTrue(inMemoryMachine, infrav1.NodeProvisionedCondition) { - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryAPIServerProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryAPIServerWaitingForNodeProvisionedV1Beta2Reason, @@ -640,7 +640,7 @@ func (r *MachineBackendReconciler) reconcileNormalAPIServer(ctx context.Context, defer func() { if retErr != nil { - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryAPIServerProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryAPIServerInternalErrorV1Beta2Reason, @@ -671,7 +671,7 @@ func (r *MachineBackendReconciler) reconcileNormalAPIServer(ctx context.Context, now := time.Now() if now.Before(start.Add(provisioningDuration)) { v1beta1conditions.MarkFalse(inMemoryMachine, infrav1.APIServerProvisionedCondition, infrav1.APIServerWaitingForStartupTimeoutReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryAPIServerProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.DevMachineInMemoryAPIServerWaitingForStartupTimeoutV1Beta2Reason, @@ -756,7 +756,7 @@ func (r *MachineBackendReconciler) reconcileNormalAPIServer(ctx context.Context, } v1beta1conditions.MarkTrue(inMemoryMachine, infrav1.APIServerProvisionedCondition) - v1beta2conditions.Set(inMemoryMachine, metav1.Condition{ + conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryAPIServerProvisionedV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.DevMachineInMemoryAPIServerProvisionedV1Beta2Reason, @@ -1232,7 +1232,7 @@ func (r *MachineBackendReconciler) PatchDevMachine(ctx context.Context, patchHel infrav1.VMProvisionedCondition, infrav1.NodeProvisionedCondition, } - inMemoryMachineV1Beta2Conditions := v1beta2conditions.ForConditionTypes{ + inMemoryMachineV1Beta2Conditions := conditions.ForConditionTypes{ infrav1.DevMachineInMemoryVMProvisionedV1Beta2Condition, infrav1.DevMachineInMemoryNodeProvisionedV1Beta2Condition, } @@ -1252,13 +1252,13 @@ func (r *MachineBackendReconciler) PatchDevMachine(ctx context.Context, patchHel v1beta1conditions.WithConditions(inMemoryMachineConditions...), v1beta1conditions.WithStepCounterIf(inMemoryMachine.ObjectMeta.DeletionTimestamp.IsZero() && inMemoryMachine.Spec.ProviderID == nil), ) - if err := v1beta2conditions.SetSummaryCondition(inMemoryMachine, inMemoryMachine, infrav1.DevMachineReadyV1Beta2Condition, + if err := conditions.SetSummaryCondition(inMemoryMachine, inMemoryMachine, infrav1.DevMachineReadyV1Beta2Condition, inMemoryMachineV1Beta2Conditions, // Using a custom merge strategy to override reasons applied during merge. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( // Use custom reasons. - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( infrav1.DevMachineNotReadyV1Beta2Reason, infrav1.DevMachineReadyUnknownV1Beta2Reason, infrav1.DevMachineReadyV1Beta2Reason, diff --git a/test/infrastructure/docker/internal/controllers/dockercluster_controller.go b/test/infrastructure/docker/internal/controllers/dockercluster_controller.go index 825381ce70e0..10d96db4d6b3 100644 --- a/test/infrastructure/docker/internal/controllers/dockercluster_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockercluster_controller.go @@ -34,8 +34,8 @@ import ( infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" dockerbackend "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/controllers/backends/docker" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/finalizers" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/paused" @@ -159,15 +159,15 @@ func patchDockerCluster(ctx context.Context, patchHelper *patch.Helper, dockerCl ), v1beta1conditions.WithStepCounterIf(dockerCluster.ObjectMeta.DeletionTimestamp.IsZero()), ) - if err := v1beta2conditions.SetSummaryCondition(dockerCluster, dockerCluster, infrav1.DevClusterReadyV1Beta2Condition, - v1beta2conditions.ForConditionTypes{ + if err := conditions.SetSummaryCondition(dockerCluster, dockerCluster, infrav1.DevClusterReadyV1Beta2Condition, + conditions.ForConditionTypes{ infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition, }, // Using a custom merge strategy to override reasons applied during merge. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( // Use custom reasons. - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( infrav1.DevClusterNotReadyV1Beta2Reason, infrav1.DevClusterReadyUnknownV1Beta2Reason, infrav1.DevClusterReadyV1Beta2Reason, diff --git a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go index 3317bf0a9637..d61e5d3ac01a 100644 --- a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go @@ -36,8 +36,8 @@ import ( infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" dockerbackend "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/controllers/backends/docker" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/finalizers" clog "sigs.k8s.io/cluster-api/util/log" "sigs.k8s.io/cluster-api/util/patch" @@ -255,16 +255,16 @@ func patchDockerMachine(ctx context.Context, patchHelper *patch.Helper, dockerMa ), v1beta1conditions.WithStepCounterIf(dockerMachine.ObjectMeta.DeletionTimestamp.IsZero() && dockerMachine.Spec.ProviderID == nil), ) - if err := v1beta2conditions.SetSummaryCondition(dockerMachine, dockerMachine, infrav1.DevMachineReadyV1Beta2Condition, - v1beta2conditions.ForConditionTypes{ + if err := conditions.SetSummaryCondition(dockerMachine, dockerMachine, infrav1.DevMachineReadyV1Beta2Condition, + conditions.ForConditionTypes{ infrav1.DevMachineDockerContainerProvisionedV1Beta2Condition, infrav1.DevMachineDockerContainerBootstrapExecSucceededV1Beta2Condition, }, // Using a custom merge strategy to override reasons applied during merge. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( // Use custom reasons. - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( infrav1.DevMachineNotReadyV1Beta2Reason, infrav1.DevMachineReadyUnknownV1Beta2Reason, infrav1.DevMachineReadyV1Beta2Reason, diff --git a/util/conditions/v1beta2/aggregate.go b/util/conditions/aggregate.go similarity index 99% rename from util/conditions/v1beta2/aggregate.go rename to util/conditions/aggregate.go index 892fd9a9ce38..9b9b7e7c186a 100644 --- a/util/conditions/v1beta2/aggregate.go +++ b/util/conditions/aggregate.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import ( "fmt" diff --git a/util/conditions/v1beta2/aggregate_test.go b/util/conditions/aggregate_test.go similarity index 99% rename from util/conditions/v1beta2/aggregate_test.go rename to util/conditions/aggregate_test.go index 87c7a3a1d373..04476371c43e 100644 --- a/util/conditions/v1beta2/aggregate_test.go +++ b/util/conditions/aggregate_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import ( "fmt" diff --git a/util/conditions/doc.go b/util/conditions/doc.go new file mode 100644 index 000000000000..54f17ac79528 --- /dev/null +++ b/util/conditions/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package conditions implements utils for metav1.Conditions that will be used starting with the v1beta2 API. +// +// Please see the proposal https://github.com/kubernetes-sigs/cluster-api/tree/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more details. +package conditions diff --git a/util/conditions/v1beta2/getter.go b/util/conditions/getter.go similarity index 99% rename from util/conditions/v1beta2/getter.go rename to util/conditions/getter.go index f76b9c0880e1..c48a31cdda6b 100644 --- a/util/conditions/v1beta2/getter.go +++ b/util/conditions/getter.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import ( "github.com/pkg/errors" diff --git a/util/conditions/v1beta2/getter_test.go b/util/conditions/getter_test.go similarity index 99% rename from util/conditions/v1beta2/getter_test.go rename to util/conditions/getter_test.go index ab1fef3f0907..4fe3ee873771 100644 --- a/util/conditions/v1beta2/getter_test.go +++ b/util/conditions/getter_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import ( "testing" diff --git a/util/conditions/v1beta2/matcher.go b/util/conditions/matcher.go similarity index 99% rename from util/conditions/v1beta2/matcher.go rename to util/conditions/matcher.go index d842b2136e19..f2efd3484c0e 100644 --- a/util/conditions/v1beta2/matcher.go +++ b/util/conditions/matcher.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import ( "fmt" diff --git a/util/conditions/v1beta2/matcher_test.go b/util/conditions/matcher_test.go similarity index 99% rename from util/conditions/v1beta2/matcher_test.go rename to util/conditions/matcher_test.go index d51496ae9300..128543b5bb45 100644 --- a/util/conditions/v1beta2/matcher_test.go +++ b/util/conditions/matcher_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import ( "testing" diff --git a/util/conditions/v1beta2/merge_strategies.go b/util/conditions/merge_strategies.go similarity index 99% rename from util/conditions/v1beta2/merge_strategies.go rename to util/conditions/merge_strategies.go index aa850c28376b..85863eab4cc5 100644 --- a/util/conditions/v1beta2/merge_strategies.go +++ b/util/conditions/merge_strategies.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import ( "fmt" diff --git a/util/conditions/v1beta2/merge_strategies_test.go b/util/conditions/merge_strategies_test.go similarity index 99% rename from util/conditions/v1beta2/merge_strategies_test.go rename to util/conditions/merge_strategies_test.go index 7b70270b6b2f..cc8c0ac238bf 100644 --- a/util/conditions/v1beta2/merge_strategies_test.go +++ b/util/conditions/merge_strategies_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import ( "testing" diff --git a/util/conditions/v1beta2/mirror.go b/util/conditions/mirror.go similarity index 99% rename from util/conditions/v1beta2/mirror.go rename to util/conditions/mirror.go index bbf5986fe079..ca756cfd2915 100644 --- a/util/conditions/v1beta2/mirror.go +++ b/util/conditions/mirror.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import ( "fmt" diff --git a/util/conditions/v1beta2/mirror_test.go b/util/conditions/mirror_test.go similarity index 99% rename from util/conditions/v1beta2/mirror_test.go rename to util/conditions/mirror_test.go index bb6b14bc2c64..c3d59d8f15b8 100644 --- a/util/conditions/v1beta2/mirror_test.go +++ b/util/conditions/mirror_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import ( "testing" diff --git a/util/conditions/v1beta2/options.go b/util/conditions/options.go similarity index 99% rename from util/conditions/v1beta2/options.go rename to util/conditions/options.go index c212fb90089b..2e099031d7bd 100644 --- a/util/conditions/v1beta2/options.go +++ b/util/conditions/options.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/util/conditions/v1beta2/patch.go b/util/conditions/patch.go similarity index 99% rename from util/conditions/v1beta2/patch.go rename to util/conditions/patch.go index 939ff41ff01b..b7d8b331d3d5 100644 --- a/util/conditions/v1beta2/patch.go +++ b/util/conditions/patch.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import ( "reflect" diff --git a/util/conditions/v1beta2/patch_test.go b/util/conditions/patch_test.go similarity index 99% rename from util/conditions/v1beta2/patch_test.go rename to util/conditions/patch_test.go index 64cb94d366e6..1e6897c061a9 100644 --- a/util/conditions/v1beta2/patch_test.go +++ b/util/conditions/patch_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import ( "testing" diff --git a/util/conditions/v1beta2/setter.go b/util/conditions/setter.go similarity index 99% rename from util/conditions/v1beta2/setter.go rename to util/conditions/setter.go index 75b1042b94e7..c26ef8693b7c 100644 --- a/util/conditions/v1beta2/setter.go +++ b/util/conditions/setter.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import ( "sort" diff --git a/util/conditions/v1beta2/setter_test.go b/util/conditions/setter_test.go similarity index 99% rename from util/conditions/v1beta2/setter_test.go rename to util/conditions/setter_test.go index dc7b78b0d1fb..d3dd21d7d186 100644 --- a/util/conditions/v1beta2/setter_test.go +++ b/util/conditions/setter_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import ( "testing" diff --git a/util/conditions/v1beta2/sort.go b/util/conditions/sort.go similarity index 99% rename from util/conditions/v1beta2/sort.go rename to util/conditions/sort.go index f8a8bba184e4..2abfb8846b75 100644 --- a/util/conditions/v1beta2/sort.go +++ b/util/conditions/sort.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/util/conditions/v1beta2/sort_test.go b/util/conditions/sort_test.go similarity index 98% rename from util/conditions/v1beta2/sort_test.go rename to util/conditions/sort_test.go index f1f9cf4deb9e..887a922f287f 100644 --- a/util/conditions/v1beta2/sort_test.go +++ b/util/conditions/sort_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import ( "sort" diff --git a/util/conditions/v1beta2/summary.go b/util/conditions/summary.go similarity index 99% rename from util/conditions/v1beta2/summary.go rename to util/conditions/summary.go index beb5fd461a71..78ea1ab0c4e9 100644 --- a/util/conditions/v1beta2/summary.go +++ b/util/conditions/summary.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import ( "github.com/pkg/errors" diff --git a/util/conditions/v1beta2/summary_test.go b/util/conditions/summary_test.go similarity index 99% rename from util/conditions/v1beta2/summary_test.go rename to util/conditions/summary_test.go index ea720dc0b7a8..3472d8feebe7 100644 --- a/util/conditions/v1beta2/summary_test.go +++ b/util/conditions/summary_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package conditions import ( "strings" diff --git a/util/conditions/v1beta2/doc.go b/util/conditions/v1beta2/doc.go deleted file mode 100644 index ba311f8aa18c..000000000000 --- a/util/conditions/v1beta2/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1beta2 implements utils for metav1.Conditions that will be used starting with the v1beta2 API. -// -// Please note that in order to make this change while respecting API deprecation rules, it is required -// to go through a phased approach: -// - Phase 1. metav1.Conditions will be added into v1beta1 API types under the Status.V1Beta2.Conditions struct (clusterv1.Conditions will remain in Status.Conditions) -// - Phase 2. when introducing v1beta2 API types: -// - clusterv1.Conditions will be moved from Status.Conditions to Status.Deprecated.V1Beta1.Conditions -// - metav1.Conditions will be moved from Status.V1Beta2.Conditions to Status.Conditions -// -// - Phase 3. when removing v1beta1 API types, Status.Deprecated will be dropped. -// -// Please see the proposal https://github.com/kubernetes-sigs/cluster-api/tree/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more details. -// -// In order to make this transition easier both for CAPI and other projects using this package, -// utils automatically adapt to handle objects at different stage of the transition. -package v1beta2 diff --git a/util/patch/options.go b/util/patch/options.go index f7fed1c252f8..9dee0e8cd8f5 100644 --- a/util/patch/options.go +++ b/util/patch/options.go @@ -46,7 +46,7 @@ type HelperOptions struct { // Please note that the default value for this option is inferred from the object struct. // This means, that if the correct path cannot be detected, this option has to be specified. One example // is if you pass a wrapper to unstructured. - // The override for this option is considered only if the object implements the v1beta2conditions.Setter interface. + // The override for this option is considered only if the object implements the conditions.Setter interface. Metav1ConditionsFieldPath []string // Clusterv1ConditionsFieldPath allows to override the path for the field hosting clusterv1.Conditions. @@ -99,7 +99,7 @@ func (w WithOwnedV1Beta2Conditions) ApplyToHelper(in *HelperOptions) { // Metav1ConditionsFieldPath allows to override the path for the field hosting []metav1.Condition. // Please note that the default value for this option is inferred from the object struct. -// The override for this option is considered only if the object implements the v1beta2conditions.Setter interface. +// The override for this option is considered only if the object implements the conditions.Setter interface. type Metav1ConditionsFieldPath []string // ApplyToHelper applies this configuration to the given HelperOptions. diff --git a/util/patch/patch.go b/util/patch/patch.go index d157704ce2ad..cecd9bd90570 100644 --- a/util/patch/patch.go +++ b/util/patch/patch.go @@ -35,8 +35,8 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) // Helper is a utility for ensuring the proper patching of objects. @@ -56,7 +56,7 @@ type Helper struct { // obj. After changing obj use Helper.Patch to persist your changes. // // Please note that patch helper implements a custom handling for objects implementing -// the condition.Setter interface or the v1beta2conditions.Setter interface. +// the condition.Setter interface or the conditions.Setter interface. // // It is also possible to implement wrappers for object not implementing those interfaces; // in case those objects have custom conditions types the wrapper should take care of conversions. @@ -127,7 +127,7 @@ func (h *Helper) Patch(ctx context.Context, obj client.Object, opts ...Option) e if _, canInterfaceConditions := obj.(v1beta1conditions.Setter); !canInterfaceConditions { h.clusterv1ConditionsFieldPath = nil } - if _, canInterfaceV1Beta2Conditions := obj.(v1beta2conditions.Setter); !canInterfaceV1Beta2Conditions { + if _, canInterfaceV1Beta2Conditions := obj.(conditions.Setter); !canInterfaceV1Beta2Conditions { h.metav1ConditionsFieldPath = nil } @@ -272,16 +272,16 @@ func (h *Helper) patchStatusConditions(ctx context.Context, obj client.Object, f // // NOTE: The checks and error below are done so that we don't panic if any of the objects don't satisfy the // interface any longer, although this shouldn't happen because we already check when creating the patcher. - before, ok := h.beforeObject.(v1beta2conditions.Getter) + before, ok := h.beforeObject.(conditions.Getter) if !ok { - return errors.Errorf("%s %s doesn't satisfy v1beta2conditions.Getter, cannot patch", h.gvk.Kind, klog.KObj(h.beforeObject)) + return errors.Errorf("%s %s doesn't satisfy conditions.Getter, cannot patch", h.gvk.Kind, klog.KObj(h.beforeObject)) } - after, ok := obj.(v1beta2conditions.Getter) + after, ok := obj.(conditions.Getter) if !ok { - return errors.Errorf("%s %s doesn't satisfy v1beta2conditions.Getter, cannot compute patch", h.gvk.Kind, klog.KObj(obj)) + return errors.Errorf("%s %s doesn't satisfy conditions.Getter, cannot compute patch", h.gvk.Kind, klog.KObj(obj)) } - diff, err := v1beta2conditions.NewPatch( + diff, err := conditions.NewPatch( before, after, ) @@ -291,12 +291,12 @@ func (h *Helper) patchStatusConditions(ctx context.Context, obj client.Object, f if !diff.IsZero() { metav1ApplyPatch = func(latest client.Object) error { - latestSetter, ok := latest.(v1beta2conditions.Setter) + latestSetter, ok := latest.(conditions.Setter) if !ok { return errors.Errorf("%s %s doesn't satisfy conditions.Setter, cannot apply patch", h.gvk.Kind, klog.KObj(latest)) } - return diff.Apply(latestSetter, v1beta2conditions.ForceOverwrite(forceOverwrite), v1beta2conditions.OwnedConditionTypes(ownedV1beta2Conditions)) + return diff.Apply(latestSetter, conditions.ForceOverwrite(forceOverwrite), conditions.OwnedConditionTypes(ownedV1beta2Conditions)) } } } diff --git a/util/patch/patch_test.go b/util/patch/patch_test.go index 94257ea55d60..6fa2700fa6c5 100644 --- a/util/patch/patch_test.go +++ b/util/patch/patch_test.go @@ -32,8 +32,8 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/test/builder" ) @@ -1428,7 +1428,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Marking clusterv1.conditions and metav1.conditions Ready=True") v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -1439,7 +1439,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking metav1.conditions Available=True") - v1beta2conditions.Set(obj, metav1.Condition{Type: "Available", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Available", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -1465,7 +1465,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { } return objAfter.Status.V1Beta2.Conditions - }, timeout).Should(v1beta2conditions.MatchConditions(obj.Status.V1Beta2.Conditions)) + }, timeout).Should(conditions.MatchConditions(obj.Status.V1Beta2.Conditions)) }) t.Run("should mark it ready when passing Clusterv1ConditionsFieldPath and Metav1ConditionsFieldPath", func(t *testing.T) { @@ -1493,7 +1493,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Marking clusterv1.conditions and metav1.conditions Ready=True") v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj, Clusterv1ConditionsFieldPath{"status", "conditions"}, Metav1ConditionsFieldPath{"status", "v1beta2", "conditions"})).To(Succeed()) @@ -1512,7 +1512,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return nil } return objAfter.Status.V1Beta2.Conditions - }, timeout).Should(v1beta2conditions.MatchConditions(obj.Status.V1Beta2.Conditions)) + }, timeout).Should(conditions.MatchConditions(obj.Status.V1Beta2.Conditions)) }) t.Run("should recover if there is a resolvable conflict", func(t *testing.T) { @@ -1537,7 +1537,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Marking clusterv1.conditions and metav1.conditions Test=False") v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("Test"), "reason", clusterv1.ConditionSeverityInfo, "message") - v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Test", Status: metav1.ConditionFalse, Reason: "reason", Message: "message", LastTransitionTime: now}) + conditions.Set(objCopy, metav1.Condition{Type: "Test", Status: metav1.ConditionFalse, Reason: "reason", Message: "message", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -1549,7 +1549,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Marking clusterv1.conditions and metav1.conditions Ready=True") v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -1581,22 +1581,22 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - testV1Beta2ConditionCopy := v1beta2conditions.Get(objCopy, "Test") - testV1Beta2ConditionAfter := v1beta2conditions.Get(objAfter, "Test") + testV1Beta2ConditionCopy := conditions.Get(objCopy, "Test") + testV1Beta2ConditionAfter := conditions.Get(objAfter, "Test") if testV1Beta2ConditionCopy == nil || testV1Beta2ConditionAfter == nil { return false } - ok, err = v1beta2conditions.MatchCondition(*testV1Beta2ConditionCopy).Match(*testV1Beta2ConditionAfter) + ok, err = conditions.MatchCondition(*testV1Beta2ConditionCopy).Match(*testV1Beta2ConditionAfter) if err != nil || !ok { return false } - readyV1Beta2Before := v1beta2conditions.Get(obj, "Ready") - readyV1Beta2After := v1beta2conditions.Get(objAfter, "Ready") + readyV1Beta2Before := conditions.Get(obj, "Ready") + readyV1Beta2After := conditions.Get(objAfter, "Ready") if readyV1Beta2Before == nil || readyV1Beta2After == nil { return false } - ok, err = v1beta2conditions.MatchCondition(*readyV1Beta2Before).Match(*readyV1Beta2After) + ok, err = conditions.MatchCondition(*readyV1Beta2Before).Match(*readyV1Beta2After) if err != nil || !ok { return false } @@ -1627,7 +1627,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Marking clusterv1.conditions and metav1.conditions Test=False") v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("Test"), "reason", clusterv1.ConditionSeverityInfo, "message") - v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Test", Status: metav1.ConditionFalse, Reason: "reason", Message: "message", LastTransitionTime: now}) + conditions.Set(objCopy, metav1.Condition{Type: "Test", Status: metav1.ConditionFalse, Reason: "reason", Message: "message", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -1641,7 +1641,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { obj.Spec.Foo = "foo" obj.Status.Bar = "bat" v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -1674,22 +1674,22 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - testV1Beta2ConditionCopy := v1beta2conditions.Get(objCopy, "Test") - testV1Beta2ConditionAfter := v1beta2conditions.Get(objAfter, "Test") + testV1Beta2ConditionCopy := conditions.Get(objCopy, "Test") + testV1Beta2ConditionAfter := conditions.Get(objAfter, "Test") if testV1Beta2ConditionCopy == nil || testV1Beta2ConditionAfter == nil { return false } - ok, err = v1beta2conditions.MatchCondition(*testV1Beta2ConditionCopy).Match(*testV1Beta2ConditionAfter) + ok, err = conditions.MatchCondition(*testV1Beta2ConditionCopy).Match(*testV1Beta2ConditionAfter) if err != nil || !ok { return false } - readyV1Beta2Before := v1beta2conditions.Get(obj, "Ready") - readyV1Beta2After := v1beta2conditions.Get(objAfter, "Ready") + readyV1Beta2Before := conditions.Get(obj, "Ready") + readyV1Beta2After := conditions.Get(objAfter, "Ready") if readyV1Beta2Before == nil || readyV1Beta2After == nil { return false } - ok, err = v1beta2conditions.MatchCondition(*readyV1Beta2Before).Match(*readyV1Beta2After) + ok, err = conditions.MatchCondition(*readyV1Beta2Before).Match(*readyV1Beta2After) if err != nil || !ok { return false } @@ -1767,7 +1767,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a Ready condition to be false") - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -1778,7 +1778,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking condition Ready=True") - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).NotTo(Succeed()) @@ -1815,7 +1815,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Marking a Ready clusterv1.condition and metav1.conditions to be false") v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") - v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) + conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -1827,7 +1827,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Marking Ready clusterv1.condition and metav1.conditions True") v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj, WithOwnedConditions{Conditions: []clusterv1.ConditionType{clusterv1.ReadyCondition}}, WithOwnedV1Beta2Conditions{Conditions: []string{"Ready"}})).To(Succeed()) @@ -1849,12 +1849,12 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyV1Beta2Before := v1beta2conditions.Get(obj, "Ready") - readyV1Beta2After := v1beta2conditions.Get(objAfter, "Ready") + readyV1Beta2Before := conditions.Get(obj, "Ready") + readyV1Beta2After := conditions.Get(objAfter, "Ready") if readyV1Beta2Before == nil || readyV1Beta2After == nil { return false } - ok, err = v1beta2conditions.MatchCondition(*readyV1Beta2Before).Match(*readyV1Beta2After) + ok, err = conditions.MatchCondition(*readyV1Beta2Before).Match(*readyV1Beta2After) if err != nil || !ok { return false } @@ -1885,7 +1885,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Marking a Ready clusterv1.condition and metav1.conditions to be false") v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") - v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) + conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -1897,7 +1897,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Marking Ready clusterv1.condition and metav1.conditions True") v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj, WithForceOverwriteConditions{})).To(Succeed()) @@ -1919,12 +1919,12 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyV1Beta2Before := v1beta2conditions.Get(obj, "Ready") - readyV1Beta2After := v1beta2conditions.Get(objAfter, "Ready") + readyV1Beta2Before := conditions.Get(obj, "Ready") + readyV1Beta2After := conditions.Get(objAfter, "Ready") if readyV1Beta2Before == nil || readyV1Beta2After == nil { return false } - ok, err = v1beta2conditions.MatchCondition(*readyV1Beta2Before).Match(*readyV1Beta2After) + ok, err = conditions.MatchCondition(*readyV1Beta2Before).Match(*readyV1Beta2After) if err != nil || !ok { return false } @@ -1968,7 +1968,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Marking condition and back compatibility condition Ready=True") v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -1979,7 +1979,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking condition Available=True") - v1beta2conditions.Set(obj, metav1.Condition{Type: "Available", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Available", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -2005,7 +2005,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { } return objAfter.Status.Conditions - }, timeout).Should(v1beta2conditions.MatchConditions(obj.Status.Conditions)) + }, timeout).Should(conditions.MatchConditions(obj.Status.Conditions)) }) t.Run("should mark it ready when passing Clusterv1ConditionsFieldPath and Metav1ConditionsFieldPath", func(t *testing.T) { @@ -2033,7 +2033,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Marking condition and back compatibility condition Ready=True") v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj, Clusterv1ConditionsFieldPath{"status", "deprecated", "v1beta1", "conditions"}, Metav1ConditionsFieldPath{"status", "conditions"})).To(Succeed()) @@ -2052,7 +2052,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return nil } return objAfter.Status.Conditions - }, timeout).Should(v1beta2conditions.MatchConditions(obj.Status.Conditions)) + }, timeout).Should(conditions.MatchConditions(obj.Status.Conditions)) }) t.Run("should recover if there is a resolvable conflict", func(t *testing.T) { @@ -2077,7 +2077,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Marking condition and back compatibility condition Test=False") v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("Test"), "reason", clusterv1.ConditionSeverityInfo, "message") - v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Test", Status: metav1.ConditionFalse, Reason: "reason", Message: "message", LastTransitionTime: now}) + conditions.Set(objCopy, metav1.Condition{Type: "Test", Status: metav1.ConditionFalse, Reason: "reason", Message: "message", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -2089,7 +2089,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Marking condition and back compatibility condition Ready=True") v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -2121,22 +2121,22 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - testConditionCopy := v1beta2conditions.Get(objCopy, "Test") - testConditionAfter := v1beta2conditions.Get(objAfter, "Test") + testConditionCopy := conditions.Get(objCopy, "Test") + testConditionAfter := conditions.Get(objAfter, "Test") if testConditionCopy == nil || testConditionAfter == nil { return false } - ok, err = v1beta2conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) + ok, err = conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) if err != nil || !ok { return false } - readyBefore := v1beta2conditions.Get(obj, "Ready") - readyAfter := v1beta2conditions.Get(objAfter, "Ready") + readyBefore := conditions.Get(obj, "Ready") + readyAfter := conditions.Get(objAfter, "Ready") if readyBefore == nil || readyAfter == nil { return false } - ok, err = v1beta2conditions.MatchCondition(*readyBefore).Match(*readyAfter) + ok, err = conditions.MatchCondition(*readyBefore).Match(*readyAfter) if err != nil || !ok { return false } @@ -2167,7 +2167,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Marking condition and back compatibility condition Test=False") v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("Test"), "reason", clusterv1.ConditionSeverityInfo, "message") - v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Test", Status: metav1.ConditionFalse, Reason: "reason", Message: "message", LastTransitionTime: now}) + conditions.Set(objCopy, metav1.Condition{Type: "Test", Status: metav1.ConditionFalse, Reason: "reason", Message: "message", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -2181,7 +2181,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { obj.Spec.Foo = "foo" obj.Status.Bar = "bat" v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -2214,22 +2214,22 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - testConditionCopy := v1beta2conditions.Get(objCopy, "Test") - testConditionAfter := v1beta2conditions.Get(objAfter, "Test") + testConditionCopy := conditions.Get(objCopy, "Test") + testConditionAfter := conditions.Get(objAfter, "Test") if testConditionCopy == nil || testConditionAfter == nil { return false } - ok, err = v1beta2conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) + ok, err = conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) if err != nil || !ok { return false } - readyBefore := v1beta2conditions.Get(obj, "Ready") - readyAfter := v1beta2conditions.Get(objAfter, "Ready") + readyBefore := conditions.Get(obj, "Ready") + readyAfter := conditions.Get(objAfter, "Ready") if readyBefore == nil || readyAfter == nil { return false } - ok, err = v1beta2conditions.MatchCondition(*readyBefore).Match(*readyAfter) + ok, err = conditions.MatchCondition(*readyBefore).Match(*readyAfter) if err != nil || !ok { return false } @@ -2307,7 +2307,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a Ready condition to be false") - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -2318,7 +2318,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking condition Ready=True") - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).NotTo(Succeed()) @@ -2330,7 +2330,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return nil } return objAfter.Status.Conditions - }, timeout).Should(v1beta2conditions.MatchConditions(objCopy.Status.Conditions)) + }, timeout).Should(conditions.MatchConditions(objCopy.Status.Conditions)) }) t.Run("should not return an error if there is an unresolvable conflict but the conditions is owned by the controller", func(t *testing.T) { @@ -2355,7 +2355,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Marking a Ready condition and back compatibility condition to be false") v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") - v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) + conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -2367,7 +2367,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Marking Ready condition and back compatibility condition True") v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj, WithOwnedConditions{Conditions: []clusterv1.ConditionType{clusterv1.ReadyCondition}}, WithOwnedV1Beta2Conditions{Conditions: []string{"Ready"}})).To(Succeed()) @@ -2389,12 +2389,12 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyBefore := v1beta2conditions.Get(obj, "Ready") - readyAfter := v1beta2conditions.Get(objAfter, "Ready") + readyBefore := conditions.Get(obj, "Ready") + readyAfter := conditions.Get(objAfter, "Ready") if readyBefore == nil || readyAfter == nil { return false } - ok, err = v1beta2conditions.MatchCondition(*readyBefore).Match(*readyAfter) + ok, err = conditions.MatchCondition(*readyBefore).Match(*readyAfter) if err != nil || !ok { return false } @@ -2425,7 +2425,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Marking a Ready condition and back compatibility condition to be false") v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") - v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) + conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -2437,7 +2437,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Marking Ready condition and back compatibility condition True") v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj, WithForceOverwriteConditions{})).To(Succeed()) @@ -2459,12 +2459,12 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyBefore := v1beta2conditions.Get(obj, "Ready") - readyAfter := v1beta2conditions.Get(objAfter, "Ready") + readyBefore := conditions.Get(obj, "Ready") + readyAfter := conditions.Get(objAfter, "Ready") if readyBefore == nil || readyAfter == nil { return false } - ok, err = v1beta2conditions.MatchCondition(*readyBefore).Match(*readyAfter) + ok, err = conditions.MatchCondition(*readyBefore).Match(*readyAfter) if err != nil || !ok { return false } @@ -2507,7 +2507,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking condition Ready=True") - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -2518,7 +2518,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking condition Available=True") - v1beta2conditions.Set(obj, metav1.Condition{Type: "Available", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Available", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -2537,7 +2537,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { } return objAfter.Status.Conditions - }, timeout).Should(v1beta2conditions.MatchConditions(obj.Status.Conditions)) + }, timeout).Should(conditions.MatchConditions(obj.Status.Conditions)) }) t.Run("should mark it ready when passing Metav1ConditionsFieldPath", func(t *testing.T) { @@ -2564,7 +2564,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking condition Ready=True") - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj, Metav1ConditionsFieldPath{"status", "conditions"})).To(Succeed()) @@ -2576,7 +2576,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return nil } return objAfter.Status.Conditions - }, timeout).Should(v1beta2conditions.MatchConditions(obj.Status.Conditions)) + }, timeout).Should(conditions.MatchConditions(obj.Status.Conditions)) }) t.Run("should recover if there is a resolvable conflict", func(t *testing.T) { @@ -2600,7 +2600,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking condition Test=False") - v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Test", Status: metav1.ConditionFalse, Reason: "reason", Message: "message", LastTransitionTime: now}) + conditions.Set(objCopy, metav1.Condition{Type: "Test", Status: metav1.ConditionFalse, Reason: "reason", Message: "message", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -2611,7 +2611,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking condition Ready=True") - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -2623,22 +2623,22 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - testConditionCopy := v1beta2conditions.Get(objCopy, "Test") - testConditionAfter := v1beta2conditions.Get(objAfter, "Test") + testConditionCopy := conditions.Get(objCopy, "Test") + testConditionAfter := conditions.Get(objAfter, "Test") if testConditionCopy == nil || testConditionAfter == nil { return false } - ok, err := v1beta2conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) + ok, err := conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) if err != nil || !ok { return false } - readyBefore := v1beta2conditions.Get(obj, "Ready") - readyAfter := v1beta2conditions.Get(objAfter, "Ready") + readyBefore := conditions.Get(obj, "Ready") + readyAfter := conditions.Get(objAfter, "Ready") if readyBefore == nil || readyAfter == nil { return false } - ok, err = v1beta2conditions.MatchCondition(*readyBefore).Match(*readyAfter) + ok, err = conditions.MatchCondition(*readyBefore).Match(*readyAfter) if err != nil || !ok { return false } @@ -2668,7 +2668,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking condition Test=False") - v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Test", Status: metav1.ConditionFalse, Reason: "reason", Message: "message", LastTransitionTime: now}) + conditions.Set(objCopy, metav1.Condition{Type: "Test", Status: metav1.ConditionFalse, Reason: "reason", Message: "message", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -2681,7 +2681,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Changing the object spec, status, and marking condition Ready=True") obj.Spec.Foo = "foo" obj.Status.Bar = "bat" - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -2694,22 +2694,22 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - testConditionCopy := v1beta2conditions.Get(objCopy, "Test") - testConditionAfter := v1beta2conditions.Get(objAfter, "Test") + testConditionCopy := conditions.Get(objCopy, "Test") + testConditionAfter := conditions.Get(objAfter, "Test") if testConditionCopy == nil || testConditionAfter == nil { return false } - ok, err := v1beta2conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) + ok, err := conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) if err != nil || !ok { return false } - readyBefore := v1beta2conditions.Get(obj, "Ready") - readyAfter := v1beta2conditions.Get(objAfter, "Ready") + readyBefore := conditions.Get(obj, "Ready") + readyAfter := conditions.Get(objAfter, "Ready") if readyBefore == nil || readyAfter == nil { return false } - ok, err = v1beta2conditions.MatchCondition(*readyBefore).Match(*readyAfter) + ok, err = conditions.MatchCondition(*readyBefore).Match(*readyAfter) if err != nil || !ok { return false } @@ -2740,7 +2740,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a Ready condition to be false") - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -2751,7 +2751,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking condition Ready=True") - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).NotTo(Succeed()) @@ -2763,7 +2763,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return nil } return objAfter.Status.Conditions - }, timeout).Should(v1beta2conditions.MatchConditions(objCopy.Status.Conditions)) + }, timeout).Should(conditions.MatchConditions(objCopy.Status.Conditions)) }) t.Run("should not return an error if there is an unresolvable conflict but the conditions is owned by the controller", func(t *testing.T) { @@ -2787,7 +2787,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a Ready condition to be false") - v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) + conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -2798,7 +2798,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready condition True") - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj, WithOwnedV1Beta2Conditions{Conditions: []string{"Ready"}})).To(Succeed()) @@ -2810,12 +2810,12 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyBefore := v1beta2conditions.Get(obj, "Ready") - readyAfter := v1beta2conditions.Get(objAfter, "Ready") + readyBefore := conditions.Get(obj, "Ready") + readyAfter := conditions.Get(objAfter, "Ready") if readyBefore == nil || readyAfter == nil { return false } - ok, err := v1beta2conditions.MatchCondition(*readyBefore).Match(*readyAfter) + ok, err := conditions.MatchCondition(*readyBefore).Match(*readyAfter) if err != nil || !ok { return false } @@ -2845,7 +2845,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a Ready condition to be false") - v1beta2conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) + conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -2856,7 +2856,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready condition True") - v1beta2conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj, WithForceOverwriteConditions{})).To(Succeed()) @@ -2868,12 +2868,12 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyBefore := v1beta2conditions.Get(obj, "Ready") - readyAfter := v1beta2conditions.Get(objAfter, "Ready") + readyBefore := conditions.Get(obj, "Ready") + readyAfter := conditions.Get(objAfter, "Ready") if readyBefore == nil || readyAfter == nil { return false } - ok, err := v1beta2conditions.MatchCondition(*readyBefore).Match(*readyAfter) + ok, err := conditions.MatchCondition(*readyBefore).Match(*readyAfter) if err != nil || !ok { return false } diff --git a/util/paused/paused.go b/util/paused/paused.go index b16de39e2ba5..9cafa6d3f81e 100644 --- a/util/paused/paused.go +++ b/util/paused/paused.go @@ -30,19 +30,19 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/util/annotations" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" ) // ConditionSetter combines the client.Object and Setter interface. type ConditionSetter interface { - v1beta2conditions.Setter + conditions.Setter client.Object } // EnsurePausedCondition sets the paused condition on the object and returns if it should be considered as paused. func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, obj ConditionSetter) (isPaused bool, requeue bool, err error) { - oldCondition := v1beta2conditions.Get(obj, clusterv1.PausedV1Beta2Condition) + oldCondition := conditions.Get(obj, clusterv1.PausedV1Beta2Condition) newCondition := pausedCondition(c.Scheme(), cluster, obj, clusterv1.PausedV1Beta2Condition) isPaused = newCondition.Status == metav1.ConditionTrue @@ -61,14 +61,14 @@ func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *cluste if oldCondition != nil { // Return early if the paused condition did not change at all. - if v1beta2conditions.HasSameState(oldCondition, &newCondition) { + if conditions.HasSameState(oldCondition, &newCondition) { return isPaused, false, nil } // Set condition and return early if only observed generation changed and obj is not paused. // In this case we want to avoid the additional reconcile that we would get by requeueing. - if v1beta2conditions.HasSameStateExceptObservedGeneration(oldCondition, &newCondition) && !isPaused { - v1beta2conditions.Set(obj, newCondition) + if conditions.HasSameStateExceptObservedGeneration(oldCondition, &newCondition) && !isPaused { + conditions.Set(obj, newCondition) return isPaused, false, nil } } @@ -78,7 +78,7 @@ func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *cluste return isPaused, false, err } - v1beta2conditions.Set(obj, newCondition) + conditions.Set(obj, newCondition) if err := patchHelper.Patch(ctx, obj, patch.WithOwnedV1Beta2Conditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, diff --git a/util/paused/paused_test.go b/util/paused/paused_test.go index 263e1ff27b56..08fee059da6f 100644 --- a/util/paused/paused_test.go +++ b/util/paused/paused_test.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/test/builder" ) @@ -135,7 +135,7 @@ func TestEnsurePausedCondition(t *testing.T) { } func assertCondition(g Gomega, object ConditionSetter, wantIsPaused bool) { - condition := v1beta2conditions.Get(object, clusterv1.PausedV1Beta2Condition) + condition := conditions.Get(object, clusterv1.PausedV1Beta2Condition) g.Expect(condition.ObservedGeneration).To(Equal(object.GetGeneration())) if wantIsPaused { g.Expect(condition.Status).To(Equal(metav1.ConditionTrue)) From 6b71d7934031fe30493a2f8097463335b64abb69 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Thu, 10 Apr 2025 21:10:24 +0200 Subject: [PATCH 3/5] Rename methods for conditions Getter and Setter --- .../v1beta2/clusterresourceset_types.go | 16 +++---- api/v1beta2/cluster_types.go | 16 +++---- api/v1beta2/clusterclass_types.go | 16 +++---- api/v1beta2/machine_types.go | 16 +++---- api/v1beta2/machinedeployment_types.go | 16 +++---- api/v1beta2/machinehealthcheck_types.go | 16 +++---- api/v1beta2/machineset_types.go | 16 +++---- .../api/v1beta2/kubeadmconfig_types.go | 16 +++---- cmd/clusterctl/client/tree/node_object.go | 16 +++---- cmd/clusterctl/client/tree/util.go | 4 +- .../v1beta2/kubeadm_control_plane_types.go | 16 +++---- .../internal/controllers/controller_test.go | 4 +- .../internal/controllers/helpers_test.go | 2 +- .../workload_cluster_conditions_test.go | 8 ++-- exp/api/v1beta2/machinepool_types.go | 16 +++---- .../machinepool_controller_test.go | 8 ++-- exp/ipam/api/v1beta1/ipaddressclaim_types.go | 16 +++---- exp/ipam/api/v1beta2/ipaddressclaim_types.go | 16 +++---- .../api/v1alpha1/extensionconfig_types.go | 16 +++---- .../extensionconfig_controller_test.go | 16 +++---- .../internal/controllers/warmup_test.go | 4 +- .../cluster/cluster_controller_status.go | 8 ++-- .../machine/machine_controller_status_test.go | 2 +- .../machine/machine_controller_test.go | 4 +- .../machinehealthcheck_targets_test.go | 10 ++-- .../topology/cluster/reconcile_state.go | 2 +- internal/util/tree/tree_test.go | 4 +- .../docker/api/v1beta1/devcluster_types.go | 16 +++---- .../docker/api/v1beta1/devmachine_types.go | 16 +++---- .../docker/api/v1beta1/dockercluster_types.go | 16 +++---- .../docker/api/v1beta1/dockermachine_types.go | 16 +++---- .../api/v1beta1/dockermachinepool_types.go | 8 ++-- util/conditions/deprecated/v1beta1/getter.go | 6 +-- .../deprecated/v1beta1/getter_test.go | 4 +- .../deprecated/v1beta1/merge_test.go | 2 +- util/conditions/deprecated/v1beta1/patch.go | 4 +- .../deprecated/v1beta1/patch_test.go | 4 +- util/conditions/deprecated/v1beta1/setter.go | 14 +++--- .../deprecated/v1beta1/setter_test.go | 4 +- .../deprecated/v1beta1/unstructured.go | 4 +- .../deprecated/v1beta1/unstructured_test.go | 20 ++++---- util/conditions/getter.go | 7 ++- util/conditions/getter_test.go | 4 +- util/conditions/merge_strategies.go | 2 +- util/conditions/patch.go | 8 ++-- util/conditions/patch_test.go | 2 +- util/conditions/setter.go | 13 +++-- util/conditions/setter_test.go | 2 +- util/test/builder/v1beta2_transition.go | 48 +++++++++---------- 49 files changed, 259 insertions(+), 261 deletions(-) diff --git a/api/addons/v1beta2/clusterresourceset_types.go b/api/addons/v1beta2/clusterresourceset_types.go index a337958cbd52..7117ea737165 100644 --- a/api/addons/v1beta2/clusterresourceset_types.go +++ b/api/addons/v1beta2/clusterresourceset_types.go @@ -157,16 +157,16 @@ type ClusterResourceSetV1Beta1DeprecatedStatus struct { // ANCHOR_END: ClusterResourceSetStatus -// GetConditions returns the set of conditions for this object. -func (m *ClusterResourceSet) GetConditions() clusterv1.Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (m *ClusterResourceSet) GetV1Beta1Conditions() clusterv1.Conditions { if m.Status.Deprecated == nil || m.Status.Deprecated.V1Beta1 == nil { return nil } return m.Status.Deprecated.V1Beta1.Conditions } -// SetConditions sets the conditions on this object. -func (m *ClusterResourceSet) SetConditions(conditions clusterv1.Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (m *ClusterResourceSet) SetV1Beta1Conditions(conditions clusterv1.Conditions) { if m.Status.Deprecated == nil { m.Status.Deprecated = &ClusterResourceSetDeprecatedStatus{} } @@ -176,13 +176,13 @@ func (m *ClusterResourceSet) SetConditions(conditions clusterv1.Conditions) { m.Status.Deprecated.V1Beta1.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (m *ClusterResourceSet) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (m *ClusterResourceSet) GetConditions() []metav1.Condition { return m.Status.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (m *ClusterResourceSet) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (m *ClusterResourceSet) SetConditions(conditions []metav1.Condition) { m.Status.Conditions = conditions } diff --git a/api/v1beta2/cluster_types.go b/api/v1beta2/cluster_types.go index 18d3dde6ea6f..69c4deb519f5 100644 --- a/api/v1beta2/cluster_types.go +++ b/api/v1beta2/cluster_types.go @@ -1180,16 +1180,16 @@ func (c *Cluster) GetClassKey() types.NamespacedName { return types.NamespacedName{Namespace: namespace, Name: c.Spec.Topology.Class} } -// GetConditions returns the set of conditions for this object. -func (c *Cluster) GetConditions() Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (c *Cluster) GetV1Beta1Conditions() Conditions { if c.Status.Deprecated == nil || c.Status.Deprecated.V1Beta1 == nil { return nil } return c.Status.Deprecated.V1Beta1.Conditions } -// SetConditions sets the conditions on this object. -func (c *Cluster) SetConditions(conditions Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (c *Cluster) SetV1Beta1Conditions(conditions Conditions) { if c.Status.Deprecated == nil { c.Status.Deprecated = &ClusterDeprecatedStatus{} } @@ -1199,13 +1199,13 @@ func (c *Cluster) SetConditions(conditions Conditions) { c.Status.Deprecated.V1Beta1.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (c *Cluster) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (c *Cluster) GetConditions() []metav1.Condition { return c.Status.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (c *Cluster) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (c *Cluster) SetConditions(conditions []metav1.Condition) { c.Status.Conditions = conditions } diff --git a/api/v1beta2/clusterclass_types.go b/api/v1beta2/clusterclass_types.go index ec3321f57796..822d1a9fb788 100644 --- a/api/v1beta2/clusterclass_types.go +++ b/api/v1beta2/clusterclass_types.go @@ -1230,16 +1230,16 @@ type ClusterClassStatusVariableDefinition struct { Schema VariableSchema `json:"schema"` } -// GetConditions returns the set of conditions for this object. -func (c *ClusterClass) GetConditions() Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (c *ClusterClass) GetV1Beta1Conditions() Conditions { if c.Status.Deprecated == nil || c.Status.Deprecated.V1Beta1 == nil { return nil } return c.Status.Deprecated.V1Beta1.Conditions } -// SetConditions sets the conditions on this object. -func (c *ClusterClass) SetConditions(conditions Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (c *ClusterClass) SetV1Beta1Conditions(conditions Conditions) { if c.Status.Deprecated == nil { c.Status.Deprecated = &ClusterClassDeprecatedStatus{} } @@ -1249,13 +1249,13 @@ func (c *ClusterClass) SetConditions(conditions Conditions) { c.Status.Deprecated.V1Beta1.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (c *ClusterClass) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (c *ClusterClass) GetConditions() []metav1.Condition { return c.Status.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (c *ClusterClass) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (c *ClusterClass) SetConditions(conditions []metav1.Condition) { c.Status.Conditions = conditions } diff --git a/api/v1beta2/machine_types.go b/api/v1beta2/machine_types.go index 497440bb57c7..d86765c0b925 100644 --- a/api/v1beta2/machine_types.go +++ b/api/v1beta2/machine_types.go @@ -717,16 +717,16 @@ type Machine struct { Status MachineStatus `json:"status,omitempty"` } -// GetConditions returns the set of conditions for this object. -func (m *Machine) GetConditions() Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (m *Machine) GetV1Beta1Conditions() Conditions { if m.Status.Deprecated == nil || m.Status.Deprecated.V1Beta1 == nil { return nil } return m.Status.Deprecated.V1Beta1.Conditions } -// SetConditions sets the conditions on this object. -func (m *Machine) SetConditions(conditions Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (m *Machine) SetV1Beta1Conditions(conditions Conditions) { if m.Status.Deprecated == nil { m.Status.Deprecated = &MachineDeprecatedStatus{} } @@ -736,13 +736,13 @@ func (m *Machine) SetConditions(conditions Conditions) { m.Status.Deprecated.V1Beta1.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (m *Machine) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (m *Machine) GetConditions() []metav1.Condition { return m.Status.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (m *Machine) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (m *Machine) SetConditions(conditions []metav1.Condition) { m.Status.Conditions = conditions } diff --git a/api/v1beta2/machinedeployment_types.go b/api/v1beta2/machinedeployment_types.go index 538af62d6c30..4c4ca0266545 100644 --- a/api/v1beta2/machinedeployment_types.go +++ b/api/v1beta2/machinedeployment_types.go @@ -641,16 +641,16 @@ func init() { objectTypes = append(objectTypes, &MachineDeployment{}, &MachineDeploymentList{}) } -// GetConditions returns the set of conditions for the machinedeployment. -func (m *MachineDeployment) GetConditions() Conditions { +// GetV1Beta1Conditions returns the set of conditions for the machinedeployment. +func (m *MachineDeployment) GetV1Beta1Conditions() Conditions { if m.Status.Deprecated == nil || m.Status.Deprecated.V1Beta1 == nil { return nil } return m.Status.Deprecated.V1Beta1.Conditions } -// SetConditions updates the set of conditions on the machinedeployment. -func (m *MachineDeployment) SetConditions(conditions Conditions) { +// SetV1Beta1Conditions updates the set of conditions on the machinedeployment. +func (m *MachineDeployment) SetV1Beta1Conditions(conditions Conditions) { if m.Status.Deprecated == nil { m.Status.Deprecated = &MachineDeploymentDeprecatedStatus{} } @@ -660,12 +660,12 @@ func (m *MachineDeployment) SetConditions(conditions Conditions) { m.Status.Deprecated.V1Beta1.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (m *MachineDeployment) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (m *MachineDeployment) GetConditions() []metav1.Condition { return m.Status.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (m *MachineDeployment) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (m *MachineDeployment) SetConditions(conditions []metav1.Condition) { m.Status.Conditions = conditions } diff --git a/api/v1beta2/machinehealthcheck_types.go b/api/v1beta2/machinehealthcheck_types.go index c06537c9ebe6..185fcddc8fde 100644 --- a/api/v1beta2/machinehealthcheck_types.go +++ b/api/v1beta2/machinehealthcheck_types.go @@ -240,16 +240,16 @@ type MachineHealthCheck struct { Status MachineHealthCheckStatus `json:"status,omitempty"` } -// GetConditions returns the set of conditions for this object. -func (m *MachineHealthCheck) GetConditions() Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (m *MachineHealthCheck) GetV1Beta1Conditions() Conditions { if m.Status.Deprecated == nil || m.Status.Deprecated.V1Beta1 == nil { return nil } return m.Status.Deprecated.V1Beta1.Conditions } -// SetConditions sets the conditions on this object. -func (m *MachineHealthCheck) SetConditions(conditions Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (m *MachineHealthCheck) SetV1Beta1Conditions(conditions Conditions) { if m.Status.Deprecated == nil { m.Status.Deprecated = &MachineHealthCheckDeprecatedStatus{} } @@ -259,13 +259,13 @@ func (m *MachineHealthCheck) SetConditions(conditions Conditions) { m.Status.Deprecated.V1Beta1.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (m *MachineHealthCheck) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (m *MachineHealthCheck) GetConditions() []metav1.Condition { return m.Status.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (m *MachineHealthCheck) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (m *MachineHealthCheck) SetConditions(conditions []metav1.Condition) { m.Status.Conditions = conditions } diff --git a/api/v1beta2/machineset_types.go b/api/v1beta2/machineset_types.go index ede9c790a42e..0d06fc39f7bd 100644 --- a/api/v1beta2/machineset_types.go +++ b/api/v1beta2/machineset_types.go @@ -457,16 +457,16 @@ type MachineSet struct { Status MachineSetStatus `json:"status,omitempty"` } -// GetConditions returns the set of conditions for the MachineSet. -func (m *MachineSet) GetConditions() Conditions { +// GetV1Beta1Conditions returns the set of conditions for the MachineSet. +func (m *MachineSet) GetV1Beta1Conditions() Conditions { if m.Status.Deprecated == nil || m.Status.Deprecated.V1Beta1 == nil { return nil } return m.Status.Deprecated.V1Beta1.Conditions } -// SetConditions updates the set of conditions on the MachineSet. -func (m *MachineSet) SetConditions(conditions Conditions) { +// SetV1Beta1Conditions updates the set of conditions on the MachineSet. +func (m *MachineSet) SetV1Beta1Conditions(conditions Conditions) { if m.Status.Deprecated == nil { m.Status.Deprecated = &MachineSetDeprecatedStatus{} } @@ -476,13 +476,13 @@ func (m *MachineSet) SetConditions(conditions Conditions) { m.Status.Deprecated.V1Beta1.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (m *MachineSet) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (m *MachineSet) GetConditions() []metav1.Condition { return m.Status.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (m *MachineSet) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (m *MachineSet) SetConditions(conditions []metav1.Condition) { m.Status.Conditions = conditions } diff --git a/bootstrap/kubeadm/api/v1beta2/kubeadmconfig_types.go b/bootstrap/kubeadm/api/v1beta2/kubeadmconfig_types.go index 294770db43d8..80b7625c705e 100644 --- a/bootstrap/kubeadm/api/v1beta2/kubeadmconfig_types.go +++ b/bootstrap/kubeadm/api/v1beta2/kubeadmconfig_types.go @@ -539,16 +539,16 @@ type KubeadmConfig struct { Status KubeadmConfigStatus `json:"status,omitempty"` } -// GetConditions returns the set of conditions for this object. -func (c *KubeadmConfig) GetConditions() clusterv1.Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (c *KubeadmConfig) GetV1Beta1Conditions() clusterv1.Conditions { if c.Status.Deprecated == nil || c.Status.Deprecated.V1Beta1 == nil { return nil } return c.Status.Deprecated.V1Beta1.Conditions } -// SetConditions sets the conditions on this object. -func (c *KubeadmConfig) SetConditions(conditions clusterv1.Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (c *KubeadmConfig) SetV1Beta1Conditions(conditions clusterv1.Conditions) { if c.Status.Deprecated == nil { c.Status.Deprecated = &KubeadmConfigDeprecatedStatus{} } @@ -558,13 +558,13 @@ func (c *KubeadmConfig) SetConditions(conditions clusterv1.Conditions) { c.Status.Deprecated.V1Beta1.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (c *KubeadmConfig) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (c *KubeadmConfig) GetConditions() []metav1.Condition { return c.Status.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (c *KubeadmConfig) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (c *KubeadmConfig) SetConditions(conditions []metav1.Condition) { c.Status.Conditions = conditions } diff --git a/cmd/clusterctl/client/tree/node_object.go b/cmd/clusterctl/client/tree/node_object.go index 91b1f242fd3f..a82b850c9c39 100644 --- a/cmd/clusterctl/client/tree/node_object.go +++ b/cmd/clusterctl/client/tree/node_object.go @@ -45,26 +45,26 @@ type NodeObjectV1Beta2Status struct { Conditions []metav1.Condition } -// GetConditions returns the set of conditions for this object. -func (o *NodeObject) GetConditions() clusterv1.Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (o *NodeObject) GetV1Beta1Conditions() clusterv1.Conditions { return o.Status.Conditions } -// SetConditions sets the conditions on this object. -func (o *NodeObject) SetConditions(conditions clusterv1.Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (o *NodeObject) SetV1Beta1Conditions(conditions clusterv1.Conditions) { o.Status.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (o *NodeObject) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (o *NodeObject) GetConditions() []metav1.Condition { if o.Status.V1Beta2 == nil { return nil } return o.Status.V1Beta2.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (o *NodeObject) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (o *NodeObject) SetConditions(conditions []metav1.Condition) { if o.Status.V1Beta2 == nil && conditions != nil { o.Status.V1Beta2 = &NodeObjectV1Beta2Status{} } diff --git a/cmd/clusterctl/client/tree/util.go b/cmd/clusterctl/client/tree/util.go index 076f7a432c5c..c66f5a8f9fb6 100644 --- a/cmd/clusterctl/client/tree/util.go +++ b/cmd/clusterctl/client/tree/util.go @@ -90,7 +90,7 @@ func GetReadyCondition(obj client.Object) *clusterv1.Condition { // GetAllV1Beta2Conditions returns the other conditions (all the conditions except ready) for an object, if defined. func GetAllV1Beta2Conditions(obj client.Object) []metav1.Condition { if getter, ok := obj.(conditions.Getter); ok { - return getter.GetV1Beta2Conditions() + return getter.GetConditions() } if objUnstructured, ok := obj.(*unstructured.Unstructured); ok { @@ -111,7 +111,7 @@ func GetOtherConditions(obj client.Object) []*clusterv1.Condition { return nil } var conditions []*clusterv1.Condition - for _, c := range getter.GetConditions() { + for _, c := range getter.GetV1Beta1Conditions() { if c.Type != clusterv1.ReadyCondition { conditions = append(conditions, &c) } diff --git a/controlplane/kubeadm/api/v1beta2/kubeadm_control_plane_types.go b/controlplane/kubeadm/api/v1beta2/kubeadm_control_plane_types.go index 5dbc94f67563..7d7a7275f7ef 100644 --- a/controlplane/kubeadm/api/v1beta2/kubeadm_control_plane_types.go +++ b/controlplane/kubeadm/api/v1beta2/kubeadm_control_plane_types.go @@ -470,16 +470,16 @@ type KubeadmControlPlane struct { Status KubeadmControlPlaneStatus `json:"status,omitempty"` } -// GetConditions returns the set of conditions for this object. -func (in *KubeadmControlPlane) GetConditions() clusterv1.Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (in *KubeadmControlPlane) GetV1Beta1Conditions() clusterv1.Conditions { if in.Status.Deprecated == nil || in.Status.Deprecated.V1Beta1 == nil { return nil } return in.Status.Deprecated.V1Beta1.Conditions } -// SetConditions sets the conditions on this object. -func (in *KubeadmControlPlane) SetConditions(conditions clusterv1.Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (in *KubeadmControlPlane) SetV1Beta1Conditions(conditions clusterv1.Conditions) { if in.Status.Deprecated == nil { in.Status.Deprecated = &KubeadmControlPlaneDeprecatedStatus{} } @@ -489,13 +489,13 @@ func (in *KubeadmControlPlane) SetConditions(conditions clusterv1.Conditions) { in.Status.Deprecated.V1Beta1.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (in *KubeadmControlPlane) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (in *KubeadmControlPlane) GetConditions() []metav1.Condition { return in.Status.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (in *KubeadmControlPlane) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (in *KubeadmControlPlane) SetConditions(conditions []metav1.Condition) { in.Status.Conditions = conditions } diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index be0b59f7e058..432df3a4bdac 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -2817,9 +2817,9 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition g.Expect(err).ToNot(HaveOccurred()) } - g.Expect(tc.controlPlane.KCP.GetV1Beta2Conditions()).To(conditions.MatchConditions(tc.expectKCPConditions, conditions.IgnoreLastTransitionTime(true))) + g.Expect(tc.controlPlane.KCP.GetConditions()).To(conditions.MatchConditions(tc.expectKCPConditions, conditions.IgnoreLastTransitionTime(true))) for _, machine := range tc.controlPlane.Machines { - g.Expect(machine.GetV1Beta2Conditions()).To(conditions.MatchConditions(tc.expectMachineConditions, conditions.IgnoreLastTransitionTime(true))) + g.Expect(machine.GetConditions()).To(conditions.MatchConditions(tc.expectMachineConditions, conditions.IgnoreLastTransitionTime(true))) } }) } diff --git a/controlplane/kubeadm/internal/controllers/helpers_test.go b/controlplane/kubeadm/internal/controllers/helpers_test.go index c3e570ab861e..64ce1e6bc734 100644 --- a/controlplane/kubeadm/internal/controllers/helpers_test.go +++ b/controlplane/kubeadm/internal/controllers/helpers_test.go @@ -466,7 +466,7 @@ func TestCloneConfigsAndGenerateMachineFail(t *testing.T) { kcp.Spec.MachineTemplate.InfrastructureRef.Name = "something_invalid" _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, bootstrapSpec, nil) g.Expect(err).To(HaveOccurred()) - g.Expect(&kcp.GetConditions()[0]).Should(v1beta1conditions.HaveSameStateOf(&clusterv1.Condition{ + g.Expect(&kcp.GetV1Beta1Conditions()[0]).Should(v1beta1conditions.HaveSameStateOf(&clusterv1.Condition{ Type: controlplanev1.MachinesCreatedCondition, Status: corev1.ConditionFalse, Severity: clusterv1.ConditionSeverityError, diff --git a/controlplane/kubeadm/internal/workload_cluster_conditions_test.go b/controlplane/kubeadm/internal/workload_cluster_conditions_test.go index e8ca26a902b0..13f60c5e9b07 100644 --- a/controlplane/kubeadm/internal/workload_cluster_conditions_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_conditions_test.go @@ -624,8 +624,8 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { for _, m := range tt.machines { g.Expect(tt.expectedMachineConditions).To(HaveKey(m.Name)) - g.Expect(m.GetConditions()).To(v1beta1conditions.MatchConditions(tt.expectedMachineConditions[m.Name]), "unexpected conditions for Machine %s", m.Name) - g.Expect(m.GetV1Beta2Conditions()).To(conditions.MatchConditions(tt.expectedMachineV1Beta2Conditions[m.Name], conditions.IgnoreLastTransitionTime(true)), "unexpected conditions for Machine %s", m.Name) + g.Expect(m.GetV1Beta1Conditions()).To(v1beta1conditions.MatchConditions(tt.expectedMachineConditions[m.Name]), "unexpected conditions for Machine %s", m.Name) + g.Expect(m.GetConditions()).To(conditions.MatchConditions(tt.expectedMachineV1Beta2Conditions[m.Name], conditions.IgnoreLastTransitionTime(true)), "unexpected conditions for Machine %s", m.Name) } g.Expect(controlPane.EtcdMembersAndMachinesAreMatching).To(Equal(tt.expectedEtcdMembersAndMachinesAreMatching), "EtcdMembersAndMachinesAreMatching does not match") @@ -1108,8 +1108,8 @@ func TestUpdateStaticPodConditions(t *testing.T) { for _, m := range tt.machines { g.Expect(tt.expectedMachineConditions).To(HaveKey(m.Name)) - g.Expect(m.GetConditions()).To(v1beta1conditions.MatchConditions(tt.expectedMachineConditions[m.Name])) - g.Expect(m.GetV1Beta2Conditions()).To(conditions.MatchConditions(tt.expectedMachineV1Beta2Conditions[m.Name], conditions.IgnoreLastTransitionTime(true))) + g.Expect(m.GetV1Beta1Conditions()).To(v1beta1conditions.MatchConditions(tt.expectedMachineConditions[m.Name])) + g.Expect(m.GetConditions()).To(conditions.MatchConditions(tt.expectedMachineV1Beta2Conditions[m.Name], conditions.IgnoreLastTransitionTime(true))) } }) } diff --git a/exp/api/v1beta2/machinepool_types.go b/exp/api/v1beta2/machinepool_types.go index 754da82d1ab7..a22ba14c6f59 100644 --- a/exp/api/v1beta2/machinepool_types.go +++ b/exp/api/v1beta2/machinepool_types.go @@ -302,16 +302,16 @@ type MachinePool struct { Status MachinePoolStatus `json:"status,omitempty"` } -// GetConditions returns the set of conditions for this object. -func (m *MachinePool) GetConditions() clusterv1.Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (m *MachinePool) GetV1Beta1Conditions() clusterv1.Conditions { if m.Status.Deprecated == nil || m.Status.Deprecated.V1Beta1 == nil { return nil } return m.Status.Deprecated.V1Beta1.Conditions } -// SetConditions sets the conditions on this object. -func (m *MachinePool) SetConditions(conditions clusterv1.Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (m *MachinePool) SetV1Beta1Conditions(conditions clusterv1.Conditions) { if m.Status.Deprecated == nil { m.Status.Deprecated = &MachinePoolDeprecatedStatus{} } @@ -321,13 +321,13 @@ func (m *MachinePool) SetConditions(conditions clusterv1.Conditions) { m.Status.Deprecated.V1Beta1.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (m *MachinePool) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (m *MachinePool) GetConditions() []metav1.Condition { return m.Status.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (m *MachinePool) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (m *MachinePool) SetConditions(conditions []metav1.Condition) { m.Status.Conditions = conditions } diff --git a/exp/internal/controllers/machinepool_controller_test.go b/exp/internal/controllers/machinepool_controller_test.go index 54110dc35cf8..a286f9670428 100644 --- a/exp/internal/controllers/machinepool_controller_test.go +++ b/exp/internal/controllers/machinepool_controller_test.go @@ -1047,8 +1047,8 @@ func TestMachinePoolConditions(t *testing.T) { t.Helper() g := NewWithT(t) - g.Expect(getter.GetConditions()).NotTo(BeEmpty()) - for _, c := range getter.GetConditions() { + g.Expect(getter.GetV1Beta1Conditions()).NotTo(BeEmpty()) + for _, c := range getter.GetV1Beta1Conditions() { g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) } }, @@ -1211,9 +1211,9 @@ func TestMachinePoolConditions(t *testing.T) { // adds a condition list to an external object. func addConditionsToExternal(u *unstructured.Unstructured, newConditions clusterv1.Conditions) { existingConditions := clusterv1.Conditions{} - if cs := v1beta1conditions.UnstructuredGetter(u).GetConditions(); len(cs) != 0 { + if cs := v1beta1conditions.UnstructuredGetter(u).GetV1Beta1Conditions(); len(cs) != 0 { existingConditions = cs } existingConditions = append(existingConditions, newConditions...) - v1beta1conditions.UnstructuredSetter(u).SetConditions(existingConditions) + v1beta1conditions.UnstructuredSetter(u).SetV1Beta1Conditions(existingConditions) } diff --git a/exp/ipam/api/v1beta1/ipaddressclaim_types.go b/exp/ipam/api/v1beta1/ipaddressclaim_types.go index bfc066a5ae06..01d5f6f96be1 100644 --- a/exp/ipam/api/v1beta1/ipaddressclaim_types.go +++ b/exp/ipam/api/v1beta1/ipaddressclaim_types.go @@ -86,26 +86,26 @@ type IPAddressClaim struct { Status IPAddressClaimStatus `json:"status,omitempty"` } -// GetConditions returns the set of conditions for this object. -func (m *IPAddressClaim) GetConditions() clusterv1beta1.Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (m *IPAddressClaim) GetV1Beta1Conditions() clusterv1beta1.Conditions { return m.Status.Conditions } -// SetConditions sets the conditions on this object. -func (m *IPAddressClaim) SetConditions(conditions clusterv1beta1.Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (m *IPAddressClaim) SetV1Beta1Conditions(conditions clusterv1beta1.Conditions) { m.Status.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (m *IPAddressClaim) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (m *IPAddressClaim) GetConditions() []metav1.Condition { if m.Status.V1Beta2 == nil { return nil } return m.Status.V1Beta2.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (m *IPAddressClaim) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (m *IPAddressClaim) SetConditions(conditions []metav1.Condition) { if m.Status.V1Beta2 == nil { m.Status.V1Beta2 = &IPAddressClaimV1Beta2Status{} } diff --git a/exp/ipam/api/v1beta2/ipaddressclaim_types.go b/exp/ipam/api/v1beta2/ipaddressclaim_types.go index 4ce3c49ca3c2..324ffe3fed93 100644 --- a/exp/ipam/api/v1beta2/ipaddressclaim_types.go +++ b/exp/ipam/api/v1beta2/ipaddressclaim_types.go @@ -97,16 +97,16 @@ type IPAddressClaim struct { Status IPAddressClaimStatus `json:"status,omitempty"` } -// GetConditions returns the set of conditions for this object. -func (m *IPAddressClaim) GetConditions() clusterv1.Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (m *IPAddressClaim) GetV1Beta1Conditions() clusterv1.Conditions { if m.Status.Deprecated == nil || m.Status.Deprecated.V1Beta1 == nil { return nil } return m.Status.Deprecated.V1Beta1.Conditions } -// SetConditions sets the conditions on this object. -func (m *IPAddressClaim) SetConditions(conditions clusterv1.Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (m *IPAddressClaim) SetV1Beta1Conditions(conditions clusterv1.Conditions) { if m.Status.Deprecated == nil { m.Status.Deprecated = &IPAddressClaimDeprecatedStatus{} } @@ -116,13 +116,13 @@ func (m *IPAddressClaim) SetConditions(conditions clusterv1.Conditions) { m.Status.Deprecated.V1Beta1.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (m *IPAddressClaim) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (m *IPAddressClaim) GetConditions() []metav1.Condition { return m.Status.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (m *IPAddressClaim) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (m *IPAddressClaim) SetConditions(conditions []metav1.Condition) { m.Status.Conditions = conditions } diff --git a/exp/runtime/api/v1alpha1/extensionconfig_types.go b/exp/runtime/api/v1alpha1/extensionconfig_types.go index 2a226fe1c23f..4f71244f00d2 100644 --- a/exp/runtime/api/v1alpha1/extensionconfig_types.go +++ b/exp/runtime/api/v1alpha1/extensionconfig_types.go @@ -223,26 +223,26 @@ type ExtensionConfig struct { Status ExtensionConfigStatus `json:"status,omitempty"` } -// GetConditions returns the set of conditions for this object. -func (e *ExtensionConfig) GetConditions() clusterv1.Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (e *ExtensionConfig) GetV1Beta1Conditions() clusterv1.Conditions { return e.Status.Conditions } -// SetConditions sets the conditions on this object. -func (e *ExtensionConfig) SetConditions(conditions clusterv1.Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (e *ExtensionConfig) SetV1Beta1Conditions(conditions clusterv1.Conditions) { e.Status.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (e *ExtensionConfig) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (e *ExtensionConfig) GetConditions() []metav1.Condition { if e.Status.V1Beta2 == nil { return nil } return e.Status.V1Beta2.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (e *ExtensionConfig) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (e *ExtensionConfig) SetConditions(conditions []metav1.Condition) { if e.Status.V1Beta2 == nil { e.Status.V1Beta2 = &ExtensionConfigV1Beta2Status{} } diff --git a/exp/runtime/internal/controllers/extensionconfig_controller_test.go b/exp/runtime/internal/controllers/extensionconfig_controller_test.go index 26801d8c4f76..637e7e9b91d7 100644 --- a/exp/runtime/internal/controllers/extensionconfig_controller_test.go +++ b/exp/runtime/internal/controllers/extensionconfig_controller_test.go @@ -134,12 +134,12 @@ func TestExtensionReconciler_Reconcile(t *testing.T) { g.Expect(handlers[1].Name).To(Equal("second.ext1")) g.Expect(handlers[2].Name).To(Equal("third.ext1")) - conditions := config.GetConditions() + conditions := config.GetV1Beta1Conditions() g.Expect(conditions).To(HaveLen(1)) g.Expect(conditions[0].Status).To(Equal(corev1.ConditionTrue)) g.Expect(conditions[0].Type).To(Equal(runtimev1.RuntimeExtensionDiscoveredCondition)) - v1beta2Conditions := config.GetV1Beta2Conditions() + v1beta2Conditions := config.GetConditions() g.Expect(v1beta2Conditions).To(HaveLen(2)) // Second condition is paused. g.Expect(v1beta2Conditions[0].Type).To(Equal(runtimev1.ExtensionConfigDiscoveredV1Beta2Condition)) g.Expect(v1beta2Conditions[0].Status).To(Equal(metav1.ConditionTrue)) @@ -192,12 +192,12 @@ func TestExtensionReconciler_Reconcile(t *testing.T) { g.Expect(handlers).To(HaveLen(2)) g.Expect(handlers[0].Name).To(Equal("first.ext1")) g.Expect(handlers[1].Name).To(Equal("third.ext1")) - conditions := config.GetConditions() + conditions := config.GetV1Beta1Conditions() g.Expect(conditions).To(HaveLen(1)) g.Expect(conditions[0].Status).To(Equal(corev1.ConditionTrue)) g.Expect(conditions[0].Type).To(Equal(runtimev1.RuntimeExtensionDiscoveredCondition)) - v1beta2Conditions := config.GetV1Beta2Conditions() + v1beta2Conditions := config.GetConditions() g.Expect(v1beta2Conditions).To(HaveLen(2)) // Second condition is paused. g.Expect(v1beta2Conditions[0].Type).To(Equal(runtimev1.ExtensionConfigDiscoveredV1Beta2Condition)) g.Expect(v1beta2Conditions[0].Status).To(Equal(metav1.ConditionTrue)) @@ -259,12 +259,12 @@ func TestExtensionReconciler_discoverExtensionConfig(t *testing.T) { // Expect exactly one condition and expect the condition to have type RuntimeExtensionDiscoveredCondition and // Status true. - conditions := discoveredExtensionConfig.GetConditions() + conditions := discoveredExtensionConfig.GetV1Beta1Conditions() g.Expect(conditions).To(HaveLen(1)) g.Expect(conditions[0].Status).To(Equal(corev1.ConditionTrue)) g.Expect(conditions[0].Type).To(Equal(runtimev1.RuntimeExtensionDiscoveredCondition)) - v1beta2Conditions := discoveredExtensionConfig.GetV1Beta2Conditions() + v1beta2Conditions := discoveredExtensionConfig.GetConditions() g.Expect(v1beta2Conditions).To(HaveLen(1)) g.Expect(v1beta2Conditions[0].Type).To(Equal(runtimev1.ExtensionConfigDiscoveredV1Beta2Condition)) g.Expect(v1beta2Conditions[0].Status).To(Equal(metav1.ConditionTrue)) @@ -298,12 +298,12 @@ func TestExtensionReconciler_discoverExtensionConfig(t *testing.T) { // Expect exactly one condition and expect the condition to have type RuntimeExtensionDiscoveredCondition and // Status false. - conditions := discoveredExtensionConfig.GetConditions() + conditions := discoveredExtensionConfig.GetV1Beta1Conditions() g.Expect(conditions).To(HaveLen(1)) g.Expect(conditions[0].Status).To(Equal(corev1.ConditionFalse)) g.Expect(conditions[0].Type).To(Equal(runtimev1.RuntimeExtensionDiscoveredCondition)) - v1beta2Conditions := discoveredExtensionConfig.GetV1Beta2Conditions() + v1beta2Conditions := discoveredExtensionConfig.GetConditions() g.Expect(v1beta2Conditions).To(HaveLen(1)) g.Expect(v1beta2Conditions[0].Type).To(Equal(runtimev1.ExtensionConfigDiscoveredV1Beta2Condition)) g.Expect(v1beta2Conditions[0].Status).To(Equal(metav1.ConditionFalse)) diff --git a/exp/runtime/internal/controllers/warmup_test.go b/exp/runtime/internal/controllers/warmup_test.go index 6cf0e22fa2da..2b8c18315686 100644 --- a/exp/runtime/internal/controllers/warmup_test.go +++ b/exp/runtime/internal/controllers/warmup_test.go @@ -95,7 +95,7 @@ func Test_warmupRunnable_Start(t *testing.T) { g.Expect(handlers[1].Name).To(Equal(fmt.Sprintf("second.ext%d", i+1))) g.Expect(handlers[2].Name).To(Equal(fmt.Sprintf("third.ext%d", i+1))) - conditions := config.GetConditions() + conditions := config.GetV1Beta1Conditions() g.Expect(conditions).To(HaveLen(1)) g.Expect(conditions[0].Status).To(Equal(corev1.ConditionTrue)) g.Expect(conditions[0].Type).To(Equal(runtimev1.RuntimeExtensionDiscoveredCondition)) @@ -157,7 +157,7 @@ func Test_warmupRunnable_Start(t *testing.T) { for i, config := range list.Items { handlers := config.Status.Handlers - conditions := config.GetConditions() + conditions := config.GetV1Beta1Conditions() // Expect no handlers and a failed condition for the broken extension. if config.Name == brokenExtension { diff --git a/internal/controllers/cluster/cluster_controller_status.go b/internal/controllers/cluster/cluster_controller_status.go index 350ef8f0b55f..1fd98f4be32e 100644 --- a/internal/controllers/cluster/cluster_controller_status.go +++ b/internal/controllers/cluster/cluster_controller_status.go @@ -1196,7 +1196,7 @@ func (w aggregationWrapper) DeepCopyObject() runtime.Object { panic("not supported") } -func (w aggregationWrapper) GetV1Beta2Conditions() []metav1.Condition { +func (w aggregationWrapper) GetConditions() []metav1.Condition { switch { case w.cp != nil: if c, err := conditions.UnstructuredGetAll(w.cp); err == nil && c != nil { @@ -1204,11 +1204,11 @@ func (w aggregationWrapper) GetV1Beta2Conditions() []metav1.Condition { } return nil case w.mp != nil: - return w.mp.GetV1Beta2Conditions() + return w.mp.GetConditions() case w.md != nil: - return w.md.GetV1Beta2Conditions() + return w.md.GetConditions() case w.ms != nil: - return w.ms.GetV1Beta2Conditions() + return w.ms.GetConditions() } panic("not supported") } diff --git a/internal/controllers/machine/machine_controller_status_test.go b/internal/controllers/machine/machine_controller_status_test.go index 36399a4e4377..f4dcb25b0c24 100644 --- a/internal/controllers/machine/machine_controller_status_test.go +++ b/internal/controllers/machine/machine_controller_status_test.go @@ -1096,7 +1096,7 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { g := NewWithT(t) setNodeHealthyAndReadyConditions(ctx, tc.cluster, tc.machine, tc.node, tc.nodeGetErr, tc.lastProbeSuccessTime, 5*time.Minute) - g.Expect(tc.machine.GetV1Beta2Conditions()).To(conditions.MatchConditions(tc.expectConditions, conditions.IgnoreLastTransitionTime(true))) + g.Expect(tc.machine.GetConditions()).To(conditions.MatchConditions(tc.expectConditions, conditions.IgnoreLastTransitionTime(true))) }) } } diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index de2cccc3aeb3..118e64f5bd1e 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -3498,11 +3498,11 @@ func TestNodeDeletionWithoutNodeRefFallback(t *testing.T) { // adds a condition list to an external object. func addConditionsToExternal(u *unstructured.Unstructured, newConditions clusterv1.Conditions) { existingConditions := clusterv1.Conditions{} - if cs := v1beta1conditions.UnstructuredGetter(u).GetConditions(); len(cs) != 0 { + if cs := v1beta1conditions.UnstructuredGetter(u).GetV1Beta1Conditions(); len(cs) != 0 { existingConditions = cs } existingConditions = append(existingConditions, newConditions...) - v1beta1conditions.UnstructuredSetter(u).SetConditions(existingConditions) + v1beta1conditions.UnstructuredSetter(u).SetV1Beta1Conditions(existingConditions) } // asserts the conditions set on the Getter object. diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go index de63d93a190c..d5437035d033 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go @@ -201,11 +201,11 @@ func TestHealthCheckTargets(t *testing.T) { // Ensure the control plane was initialized earlier to prevent it interfering with // NodeStartupTimeout testing. conds := clusterv1.Conditions{} - for _, condition := range cluster.GetConditions() { + for _, condition := range cluster.GetV1Beta1Conditions() { condition.LastTransitionTime = metav1.NewTime(condition.LastTransitionTime.Add(-1 * time.Hour)) conds = append(conds, condition) } - cluster.SetConditions(conds) + cluster.SetV1Beta1Conditions(conds) mhcSelector := map[string]string{"cluster": clusterName, "machine-group": "foo"} @@ -242,7 +242,7 @@ func TestHealthCheckTargets(t *testing.T) { testMachine := newTestMachine("machine1", namespace, clusterName, "node1", mhcSelector) testMachineWithInfraReady := testMachine.DeepCopy() testMachineWithInfraReady.CreationTimestamp = metav1.NewTime(time.Now().Add(-100 * time.Second)) - testMachineWithInfraReady.SetConditions(clusterv1.Conditions{ + testMachineWithInfraReady.SetV1Beta1Conditions(clusterv1.Conditions{ { Type: clusterv1.InfrastructureReadyCondition, Status: corev1.ConditionTrue, @@ -609,12 +609,12 @@ func TestHealthCheckTargets(t *testing.T) { gs.Expect(unhealthy).To(ConsistOf(tc.expectedNeedsRemediation)) gs.Expect(nextCheckTimes).To(WithTransform(roundDurations, ConsistOf(tc.expectedNextCheckTimes))) for i, expectedMachineCondition := range tc.expectedNeedsRemediationCondition { - actualConditions := unhealthy[i].Machine.GetConditions() + actualConditions := unhealthy[i].Machine.GetV1Beta1Conditions() conditionsMatcher := WithTransform(removeLastTransitionTimes, ContainElements(expectedMachineCondition)) gs.Expect(actualConditions).To(conditionsMatcher) } for i, expectedMachineCondition := range tc.expectedNeedsRemediationV1Beta2Condition { - actualConditions := unhealthy[i].Machine.GetV1Beta2Conditions() + actualConditions := unhealthy[i].Machine.GetConditions() conditionsMatcher := WithTransform(removeLastTransitionTimesV1Beta2, ContainElements(expectedMachineCondition)) gs.Expect(actualConditions).To(conditionsMatcher) } diff --git a/internal/controllers/topology/cluster/reconcile_state.go b/internal/controllers/topology/cluster/reconcile_state.go index e2e6989f2b74..308c78ccdb2d 100644 --- a/internal/controllers/topology/cluster/reconcile_state.go +++ b/internal/controllers/topology/cluster/reconcile_state.go @@ -220,7 +220,7 @@ func (r *Reconciler) callAfterControlPlaneInitialized(ctx context.Context, s *sc } func isControlPlaneInitialized(cluster *clusterv1.Cluster) bool { - for _, condition := range cluster.GetConditions() { + for _, condition := range cluster.GetV1Beta1Conditions() { if condition.Type == clusterv1.ControlPlaneInitializedCondition { if condition.Status == corev1.ConditionTrue { return true diff --git a/internal/util/tree/tree_test.go b/internal/util/tree/tree_test.go index cf554d9cacea..cda1e5e95ae9 100644 --- a/internal/util/tree/tree_test.go +++ b/internal/util/tree/tree_test.go @@ -581,9 +581,9 @@ func withCondition(c *clusterv1.Condition) func(ctrlclient.Object) { func withV1Beta2Condition(c metav1.Condition) func(ctrlclient.Object) { return func(m ctrlclient.Object) { cluster := m.(*clusterv1.Cluster) - conds := cluster.GetV1Beta2Conditions() + conds := cluster.GetConditions() conds = append(conds, c) - cluster.SetV1Beta2Conditions(conds) + cluster.SetConditions(conds) } } diff --git a/test/infrastructure/docker/api/v1beta1/devcluster_types.go b/test/infrastructure/docker/api/v1beta1/devcluster_types.go index 601a0b84981a..1d02ac68e6d0 100644 --- a/test/infrastructure/docker/api/v1beta1/devcluster_types.go +++ b/test/infrastructure/docker/api/v1beta1/devcluster_types.go @@ -155,26 +155,26 @@ type DevCluster struct { Status DevClusterStatus `json:"status,omitempty"` } -// GetConditions returns the set of conditions for this object. -func (c *DevCluster) GetConditions() clusterv1.Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (c *DevCluster) GetV1Beta1Conditions() clusterv1.Conditions { return c.Status.Conditions } -// SetConditions sets the conditions on this object. -func (c *DevCluster) SetConditions(conditions clusterv1.Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (c *DevCluster) SetV1Beta1Conditions(conditions clusterv1.Conditions) { c.Status.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (c *DevCluster) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (c *DevCluster) GetConditions() []metav1.Condition { if c.Status.V1Beta2 == nil { return nil } return c.Status.V1Beta2.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (c *DevCluster) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (c *DevCluster) SetConditions(conditions []metav1.Condition) { if c.Status.V1Beta2 == nil { c.Status.V1Beta2 = &DevClusterV1Beta2Status{} } diff --git a/test/infrastructure/docker/api/v1beta1/devmachine_types.go b/test/infrastructure/docker/api/v1beta1/devmachine_types.go index c7648b1237fd..15fc158928e6 100644 --- a/test/infrastructure/docker/api/v1beta1/devmachine_types.go +++ b/test/infrastructure/docker/api/v1beta1/devmachine_types.go @@ -414,26 +414,26 @@ type DevMachine struct { Status DevMachineStatus `json:"status,omitempty"` } -// GetConditions returns the set of conditions for this object. -func (c *DevMachine) GetConditions() clusterv1.Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (c *DevMachine) GetV1Beta1Conditions() clusterv1.Conditions { return c.Status.Conditions } -// SetConditions sets the conditions on this object. -func (c *DevMachine) SetConditions(conditions clusterv1.Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (c *DevMachine) SetV1Beta1Conditions(conditions clusterv1.Conditions) { c.Status.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (c *DevMachine) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (c *DevMachine) GetConditions() []metav1.Condition { if c.Status.V1Beta2 == nil { return nil } return c.Status.V1Beta2.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (c *DevMachine) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (c *DevMachine) SetConditions(conditions []metav1.Condition) { if c.Status.V1Beta2 == nil { c.Status.V1Beta2 = &DevMachineV1Beta2Status{} } diff --git a/test/infrastructure/docker/api/v1beta1/dockercluster_types.go b/test/infrastructure/docker/api/v1beta1/dockercluster_types.go index a4a2981d2d49..96bd2f3b39c9 100644 --- a/test/infrastructure/docker/api/v1beta1/dockercluster_types.go +++ b/test/infrastructure/docker/api/v1beta1/dockercluster_types.go @@ -140,26 +140,26 @@ type DockerCluster struct { Status DockerClusterStatus `json:"status,omitempty"` } -// GetConditions returns the set of conditions for this object. -func (c *DockerCluster) GetConditions() clusterv1.Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (c *DockerCluster) GetV1Beta1Conditions() clusterv1.Conditions { return c.Status.Conditions } -// SetConditions sets the conditions on this object. -func (c *DockerCluster) SetConditions(conditions clusterv1.Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (c *DockerCluster) SetV1Beta1Conditions(conditions clusterv1.Conditions) { c.Status.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (c *DockerCluster) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (c *DockerCluster) GetConditions() []metav1.Condition { if c.Status.V1Beta2 == nil { return nil } return c.Status.V1Beta2.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (c *DockerCluster) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (c *DockerCluster) SetConditions(conditions []metav1.Condition) { if c.Status.V1Beta2 == nil { c.Status.V1Beta2 = &DockerClusterV1Beta2Status{} } diff --git a/test/infrastructure/docker/api/v1beta1/dockermachine_types.go b/test/infrastructure/docker/api/v1beta1/dockermachine_types.go index a0b5b4e40279..cd21f5cf2499 100644 --- a/test/infrastructure/docker/api/v1beta1/dockermachine_types.go +++ b/test/infrastructure/docker/api/v1beta1/dockermachine_types.go @@ -134,26 +134,26 @@ type DockerMachine struct { Status DockerMachineStatus `json:"status,omitempty"` } -// GetConditions returns the set of conditions for this object. -func (c *DockerMachine) GetConditions() clusterv1.Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (c *DockerMachine) GetV1Beta1Conditions() clusterv1.Conditions { return c.Status.Conditions } -// SetConditions sets the conditions on this object. -func (c *DockerMachine) SetConditions(conditions clusterv1.Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (c *DockerMachine) SetV1Beta1Conditions(conditions clusterv1.Conditions) { c.Status.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (c *DockerMachine) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (c *DockerMachine) GetConditions() []metav1.Condition { if c.Status.V1Beta2 == nil { return nil } return c.Status.V1Beta2.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (c *DockerMachine) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (c *DockerMachine) SetConditions(conditions []metav1.Condition) { if c.Status.V1Beta2 == nil { c.Status.V1Beta2 = &DockerMachineV1Beta2Status{} } diff --git a/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_types.go b/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_types.go index e5b66fa03dea..2fa3ee1c28ab 100644 --- a/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_types.go +++ b/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_types.go @@ -133,13 +133,13 @@ type DockerMachinePool struct { Status DockerMachinePoolStatus `json:"status,omitempty"` } -// GetConditions returns the set of conditions for this object. -func (d *DockerMachinePool) GetConditions() clusterv1.Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (d *DockerMachinePool) GetV1Beta1Conditions() clusterv1.Conditions { return d.Status.Conditions } -// SetConditions sets the conditions on this object. -func (d *DockerMachinePool) SetConditions(conditions clusterv1.Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (d *DockerMachinePool) SetV1Beta1Conditions(conditions clusterv1.Conditions) { d.Status.Conditions = conditions } diff --git a/util/conditions/deprecated/v1beta1/getter.go b/util/conditions/deprecated/v1beta1/getter.go index 5a1b17bfa31d..e4c15ef140b2 100644 --- a/util/conditions/deprecated/v1beta1/getter.go +++ b/util/conditions/deprecated/v1beta1/getter.go @@ -30,13 +30,13 @@ type Getter interface { client.Object // GetConditions returns the list of conditions for a cluster API object. - GetConditions() clusterv1.Conditions + GetV1Beta1Conditions() clusterv1.Conditions } // Get returns the condition with the given type, if the condition does not exist, // it returns nil. func Get(from Getter, t clusterv1.ConditionType) *clusterv1.Condition { - conditions := from.GetConditions() + conditions := from.GetV1Beta1Conditions() if conditions == nil { return nil } @@ -119,7 +119,7 @@ func GetLastTransitionTime(from Getter, t clusterv1.ConditionType) *metav1.Time // on an object. If the object does not have other conditions, no summary condition is generated. // NOTE: The resulting Ready condition will have positive polarity; the conditions we are starting from might have positive or negative polarity. func summary(from Getter, options ...MergeOption) *clusterv1.Condition { - conditions := from.GetConditions() + conditions := from.GetV1Beta1Conditions() mergeOpt := &mergeOptions{} for _, o := range options { diff --git a/util/conditions/deprecated/v1beta1/getter_test.go b/util/conditions/deprecated/v1beta1/getter_test.go index d2c6b65bcc6d..c40c4e55c820 100644 --- a/util/conditions/deprecated/v1beta1/getter_test.go +++ b/util/conditions/deprecated/v1beta1/getter_test.go @@ -49,7 +49,7 @@ func TestGetAndHas(t *testing.T) { g.Expect(Has(cluster, "conditionBaz")).To(BeFalse()) g.Expect(Get(cluster, "conditionBaz")).To(BeNil()) - cluster.SetConditions(conditionList(TrueCondition("conditionBaz"))) + cluster.SetV1Beta1Conditions(conditionList(TrueCondition("conditionBaz"))) g.Expect(Has(cluster, "conditionBaz")).To(BeTrue()) g.Expect(Get(cluster, "conditionBaz")).To(HaveSameStateOf(TrueCondition("conditionBaz"))) @@ -330,7 +330,7 @@ func TestAggregate(t *testing.T) { func getterWithConditions(conditions ...*clusterv1.Condition) Getter { obj := &clusterv1.Cluster{} - obj.SetConditions(conditionList(conditions...)) + obj.SetV1Beta1Conditions(conditionList(conditions...)) return obj } diff --git a/util/conditions/deprecated/v1beta1/merge_test.go b/util/conditions/deprecated/v1beta1/merge_test.go index 926ad5419e4b..5ce7c6bae44e 100644 --- a/util/conditions/deprecated/v1beta1/merge_test.go +++ b/util/conditions/deprecated/v1beta1/merge_test.go @@ -265,7 +265,7 @@ func TestMergeRespectPriority(t *testing.T) { } func conditionsWithSource(obj Setter, conditions ...*clusterv1.Condition) []localizedCondition { - obj.SetConditions(conditionList(conditions...)) + obj.SetV1Beta1Conditions(conditionList(conditions...)) ret := []localizedCondition{} for i := range conditions { diff --git a/util/conditions/deprecated/v1beta1/patch.go b/util/conditions/deprecated/v1beta1/patch.go index dfc39419cdd8..8fd2f6989bc0 100644 --- a/util/conditions/deprecated/v1beta1/patch.go +++ b/util/conditions/deprecated/v1beta1/patch.go @@ -62,7 +62,7 @@ func NewPatch(before Getter, after Getter) (Patch, error) { } // Identify AddCondition and ModifyCondition changes. - targetConditions := after.GetConditions() + targetConditions := after.GetV1Beta1Conditions() for i := range targetConditions { targetCondition := targetConditions[i] currentCondition := Get(before, targetCondition.Type) @@ -77,7 +77,7 @@ func NewPatch(before Getter, after Getter) (Patch, error) { } // Identify RemoveCondition changes. - baseConditions := before.GetConditions() + baseConditions := before.GetV1Beta1Conditions() for i := range baseConditions { baseCondition := baseConditions[i] targetCondition := Get(after, baseCondition.Type) diff --git a/util/conditions/deprecated/v1beta1/patch_test.go b/util/conditions/deprecated/v1beta1/patch_test.go index cf8197930cd9..0e5d17aeb403 100644 --- a/util/conditions/deprecated/v1beta1/patch_test.go +++ b/util/conditions/deprecated/v1beta1/patch_test.go @@ -307,7 +307,7 @@ func TestApply(t *testing.T) { } g.Expect(err).ToNot(HaveOccurred()) - g.Expect(tt.latest.GetConditions()).To(haveSameConditionsOf(tt.want)) + g.Expect(tt.latest.GetV1Beta1Conditions()).To(haveSameConditionsOf(tt.want)) }) } } @@ -342,5 +342,5 @@ func TestApplyDoesNotAlterLastTransitionTime(t *testing.T) { err = diff.Apply(latest) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(latest.GetConditions()).To(BeComparableTo(after.GetConditions())) + g.Expect(latest.GetV1Beta1Conditions()).To(BeComparableTo(after.GetV1Beta1Conditions())) } diff --git a/util/conditions/deprecated/v1beta1/setter.go b/util/conditions/deprecated/v1beta1/setter.go index 329b5444c1a4..c5f9042c8e9e 100644 --- a/util/conditions/deprecated/v1beta1/setter.go +++ b/util/conditions/deprecated/v1beta1/setter.go @@ -31,7 +31,7 @@ import ( // use the conditions package for setting conditions. type Setter interface { Getter - SetConditions(clusterv1.Conditions) + SetV1Beta1Conditions(clusterv1.Conditions) } // Set sets the given condition. @@ -45,7 +45,7 @@ func Set(to Setter, condition *clusterv1.Condition) { // Check if the new conditions already exists, and change it only if there is a status // transition (otherwise we should preserve the current last transition time)- - conditions := to.GetConditions() + conditions := to.GetV1Beta1Conditions() exists := false for i := range conditions { existingCondition := conditions[i] @@ -74,7 +74,7 @@ func Set(to Setter, condition *clusterv1.Condition) { return lexicographicLess(&conditions[i], &conditions[j]) }) - to.SetConditions(conditions) + to.SetV1Beta1Conditions(conditions) } // SetWithCustomLastTransitionTime is similar to Set function which sets the given condition but following changes for LastTransitionTime. @@ -89,7 +89,7 @@ func SetWithCustomLastTransitionTime(to Setter, condition *clusterv1.Condition) // Check if the new conditions already exists, and change it only if there is a status // transition (otherwise we should preserve the current last transition time)- - conditions := to.GetConditions() + conditions := to.GetV1Beta1Conditions() exists := false for i := range conditions { existingCondition := conditions[i] @@ -122,7 +122,7 @@ func SetWithCustomLastTransitionTime(to Setter, condition *clusterv1.Condition) return lexicographicLess(&conditions[i], &conditions[j]) }) - to.SetConditions(conditions) + to.SetV1Beta1Conditions(conditions) } // TrueCondition returns a condition with Status=True and the given type. @@ -224,14 +224,14 @@ func Delete(to Setter, t clusterv1.ConditionType) { return } - conditions := to.GetConditions() + conditions := to.GetV1Beta1Conditions() newConditions := make(clusterv1.Conditions, 0, len(conditions)) for _, condition := range conditions { if condition.Type != t { newConditions = append(newConditions, condition) } } - to.SetConditions(newConditions) + to.SetV1Beta1Conditions(newConditions) } // lexicographicLess returns true if a condition is less than another in regard to diff --git a/util/conditions/deprecated/v1beta1/setter_test.go b/util/conditions/deprecated/v1beta1/setter_test.go index 2549a6c0f857..2afb1b880fa9 100644 --- a/util/conditions/deprecated/v1beta1/setter_test.go +++ b/util/conditions/deprecated/v1beta1/setter_test.go @@ -145,7 +145,7 @@ func TestSet(t *testing.T) { Set(tt.to, tt.condition) - g.Expect(tt.to.GetConditions()).To(haveSameConditionsOf(tt.want)) + g.Expect(tt.to.GetV1Beta1Conditions()).To(haveSameConditionsOf(tt.want)) }) } } @@ -380,7 +380,7 @@ func TestSetAggregate(t *testing.T) { func setterWithConditions(conditions ...*clusterv1.Condition) Setter { obj := &clusterv1.Cluster{} - obj.SetConditions(conditionList(conditions...)) + obj.SetV1Beta1Conditions(conditionList(conditions...)) return obj } diff --git a/util/conditions/deprecated/v1beta1/unstructured.go b/util/conditions/deprecated/v1beta1/unstructured.go index e51605f578b1..3abfd02fc717 100644 --- a/util/conditions/deprecated/v1beta1/unstructured.go +++ b/util/conditions/deprecated/v1beta1/unstructured.go @@ -50,7 +50,7 @@ type unstructuredWrapper struct { // in both cases the operation returns an empty slice is returned. // - If the object doesn't implement conditions on under status as defined in Cluster API, // JSON-unmarshal matches incoming object keys to the keys; this can lead to conditions values partially set. -func (c *unstructuredWrapper) GetConditions() clusterv1.Conditions { +func (c *unstructuredWrapper) GetV1Beta1Conditions() clusterv1.Conditions { conditions := clusterv1.Conditions{} if err := util.UnstructuredUnmarshalField(c.Unstructured, &conditions, "status", "conditions"); err != nil { return nil @@ -65,7 +65,7 @@ func (c *unstructuredWrapper) GetConditions() clusterv1.Conditions { // - Errors during JSON-unmarshal are ignored and a empty collection list is returned. // - It's not possible to detect if the object has an empty condition list or if it does not implement conditions; // in both cases the operation returns an empty slice is returned. -func (c *unstructuredWrapper) SetConditions(conditions clusterv1.Conditions) { +func (c *unstructuredWrapper) SetV1Beta1Conditions(conditions clusterv1.Conditions) { v := make([]interface{}, 0, len(conditions)) for i := range conditions { m, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&conditions[i]) diff --git a/util/conditions/deprecated/v1beta1/unstructured_test.go b/util/conditions/deprecated/v1beta1/unstructured_test.go index a8b5531a3c2e..820037d871ad 100644 --- a/util/conditions/deprecated/v1beta1/unstructured_test.go +++ b/util/conditions/deprecated/v1beta1/unstructured_test.go @@ -31,7 +31,7 @@ import ( func TestUnstructuredGetConditions(t *testing.T) { g := NewWithT(t) - // GetConditions should return conditions from an unstructured object + // GetV1Beta1Conditions should return conditions from an unstructured object u := &unstructured.Unstructured{ Object: map[string]interface{}{ "status": map[string]interface{}{ @@ -45,21 +45,21 @@ func TestUnstructuredGetConditions(t *testing.T) { }, } - g.Expect(UnstructuredGetter(u).GetConditions()).To(haveSameConditionsOf(conditionList(true1))) + g.Expect(UnstructuredGetter(u).GetV1Beta1Conditions()).To(haveSameConditionsOf(conditionList(true1))) - // GetConditions should return nil for an unstructured object with empty conditions + // GetV1Beta1Conditions should return nil for an unstructured object with empty conditions u = &unstructured.Unstructured{} - g.Expect(UnstructuredGetter(u).GetConditions()).To(BeNil()) + g.Expect(UnstructuredGetter(u).GetV1Beta1Conditions()).To(BeNil()) - // GetConditions should return nil for an unstructured object without conditions + // GetV1Beta1Conditions should return nil for an unstructured object without conditions e := &corev1.Endpoints{} u = &unstructured.Unstructured{} g.Expect(scheme.Scheme.Convert(e, u, nil)).To(Succeed()) - g.Expect(UnstructuredGetter(u).GetConditions()).To(BeNil()) + g.Expect(UnstructuredGetter(u).GetV1Beta1Conditions()).To(BeNil()) - // GetConditions should return conditions from an unstructured object with a different type of conditions. + // GetV1Beta1Conditions should return conditions from an unstructured object with a different type of conditions. p := &corev1.Pod{Status: corev1.PodStatus{ Conditions: []corev1.PodCondition{ { @@ -75,7 +75,7 @@ func TestUnstructuredGetConditions(t *testing.T) { u = &unstructured.Unstructured{} g.Expect(scheme.Scheme.Convert(p, u, nil)).To(Succeed()) - g.Expect(UnstructuredGetter(u).GetConditions()).To(HaveLen(1)) + g.Expect(UnstructuredGetter(u).GetV1Beta1Conditions()).To(HaveLen(1)) } func TestUnstructuredSetConditions(t *testing.T) { @@ -89,6 +89,6 @@ func TestUnstructuredSetConditions(t *testing.T) { conditions := conditionList(true1, falseInfo1) s := UnstructuredSetter(u) - s.SetConditions(conditions) - g.Expect(s.GetConditions()).To(BeComparableTo(conditions)) + s.SetV1Beta1Conditions(conditions) + g.Expect(s.GetV1Beta1Conditions()).To(BeComparableTo(conditions)) } diff --git a/util/conditions/getter.go b/util/conditions/getter.go index c48a31cdda6b..81689fb94783 100644 --- a/util/conditions/getter.go +++ b/util/conditions/getter.go @@ -35,9 +35,8 @@ const ( // Getter interface defines methods that an API object should implement in order to // use the conditions package for getting conditions. type Getter interface { - // GetV1Beta2Conditions returns the list of conditions for a cluster API object. - // Note: GetV1Beta2Conditions will be renamed to GetConditions in a later stage of the transition to V1Beta2. - GetV1Beta2Conditions() []metav1.Condition + // GetConditions returns the list of conditions for a cluster API object. + GetConditions() []metav1.Condition } // Get returns a condition from the object implementing the Getter interface. @@ -52,7 +51,7 @@ func Get(sourceObj Getter, sourceConditionType string) *metav1.Condition { } // Otherwise get the requested condition. - return meta.FindStatusCondition(sourceObj.GetV1Beta2Conditions(), sourceConditionType) + return meta.FindStatusCondition(sourceObj.GetConditions(), sourceConditionType) } // Has returns true if a condition with the given type exists. diff --git a/util/conditions/getter_test.go b/util/conditions/getter_test.go index 4fe3ee873771..001a1437dc2f 100644 --- a/util/conditions/getter_test.go +++ b/util/conditions/getter_test.go @@ -637,10 +637,10 @@ type objectWithValueGetterStatus struct { Conditions []metav1.Condition `json:"conditions,omitempty"` } -func (o objectWithValueGetter) GetV1Beta2Conditions() []metav1.Condition { +func (o objectWithValueGetter) GetConditions() []metav1.Condition { return o.Status.Conditions } -func (o *objectWithValueGetter) SetV1Beta2Conditions(conditions []metav1.Condition) { +func (o *objectWithValueGetter) SetConditions(conditions []metav1.Condition) { o.Status.Conditions = conditions } diff --git a/util/conditions/merge_strategies.go b/util/conditions/merge_strategies.go index 85863eab4cc5..06eb7af9709c 100644 --- a/util/conditions/merge_strategies.go +++ b/util/conditions/merge_strategies.go @@ -598,7 +598,7 @@ func indentIfMultiline(m string) string { // getConditionsWithOwnerInfo return all the conditions from an object each one with the corresponding ConditionOwnerInfo. func getConditionsWithOwnerInfo(obj Getter) []ConditionWithOwnerInfo { ret := make([]ConditionWithOwnerInfo, 0, 10) - conditions := obj.GetV1Beta2Conditions() + conditions := obj.GetConditions() ownerInfo := getConditionOwnerInfo(obj) for _, condition := range conditions { ret = append(ret, ConditionWithOwnerInfo{ diff --git a/util/conditions/patch.go b/util/conditions/patch.go index b7d8b331d3d5..867f8f302a27 100644 --- a/util/conditions/patch.go +++ b/util/conditions/patch.go @@ -59,12 +59,12 @@ func NewPatch(before, after Getter) (Patch, error) { if util.IsNil(before) { return nil, errors.New("error creating patch: before object is nil") } - beforeConditions := before.GetV1Beta2Conditions() + beforeConditions := before.GetConditions() if util.IsNil(after) { return nil, errors.New("error creating patch: after object is nil") } - afterConditions := after.GetV1Beta2Conditions() + afterConditions := after.GetConditions() // Identify AddCondition and ModifyCondition changes. for i := range afterConditions { @@ -135,7 +135,7 @@ func (p Patch) Apply(latest Setter, opts ...PatchApplyOption) error { if util.IsNil(latest) { return errors.New("error patching conditions: latest object is nil") } - latestConditions := latest.GetV1Beta2Conditions() + latestConditions := latest.GetConditions() applyOpt := &PatchApplyOptions{ // By default, sort conditions by the default condition order: available and ready always first, deleting and paused always last, all the other conditions in alphabetical order. @@ -222,7 +222,7 @@ func (p Patch) Apply(latest Setter, opts ...PatchApplyOption) error { }) } - latest.SetV1Beta2Conditions(latestConditions) + latest.SetConditions(latestConditions) return nil } diff --git a/util/conditions/patch_test.go b/util/conditions/patch_test.go index 1e6897c061a9..98e6c83e41b1 100644 --- a/util/conditions/patch_test.go +++ b/util/conditions/patch_test.go @@ -350,7 +350,7 @@ func TestApply(t *testing.T) { } g.Expect(err).ToNot(HaveOccurred()) - gotConditions := tt.latest.GetV1Beta2Conditions() + gotConditions := tt.latest.GetConditions() g.Expect(gotConditions).To(MatchConditions(tt.want)) }) } diff --git a/util/conditions/setter.go b/util/conditions/setter.go index c26ef8693b7c..fcea28476a73 100644 --- a/util/conditions/setter.go +++ b/util/conditions/setter.go @@ -31,9 +31,8 @@ import ( type Setter interface { Getter - // SetV1Beta2Conditions sets conditions for an API object. - // Note: SetV1Beta2Conditions will be renamed to SetConditions in a later stage of the transition to V1Beta2. - SetV1Beta2Conditions([]metav1.Condition) + // SetConditions sets conditions for an API object. + SetConditions([]metav1.Condition) } // SetOption is some configuration that modifies options for a Set request. @@ -85,7 +84,7 @@ func Set(targetObj Setter, condition metav1.Condition, opts ...SetOption) { condition.ObservedGeneration = objMeta.GetGeneration() } - conditions := targetObj.GetV1Beta2Conditions() + conditions := targetObj.GetConditions() if changed := setStatusCondition(&conditions, condition); !changed { return } @@ -96,7 +95,7 @@ func Set(targetObj Setter, condition metav1.Condition, opts ...SetOption) { }) } - targetObj.SetV1Beta2Conditions(conditions) + targetObj.SetConditions(conditions) } func setStatusCondition(conditions *[]metav1.Condition, condition metav1.Condition) bool { @@ -116,12 +115,12 @@ func Delete(to Setter, conditionType string) { return } - conditions := to.GetV1Beta2Conditions() + conditions := to.GetConditions() newConditions := make([]metav1.Condition, 0, len(conditions)-1) for _, condition := range conditions { if condition.Type != conditionType { newConditions = append(newConditions, condition) } } - to.SetV1Beta2Conditions(newConditions) + to.SetConditions(newConditions) } diff --git a/util/conditions/setter_test.go b/util/conditions/setter_test.go index d3dd21d7d186..a9be74137e9e 100644 --- a/util/conditions/setter_test.go +++ b/util/conditions/setter_test.go @@ -231,5 +231,5 @@ func TestDelete(t *testing.T) { Delete(obj, "trueCondition") Delete(obj, "trueCondition") // no-op - g.Expect(obj.GetV1Beta2Conditions()).To(MatchConditions([]metav1.Condition{{Type: "falseCondition", Status: metav1.ConditionFalse}}, IgnoreLastTransitionTime(true))) + g.Expect(obj.GetConditions()).To(MatchConditions([]metav1.Condition{{Type: "falseCondition", Status: metav1.ConditionFalse}}, IgnoreLastTransitionTime(true))) } diff --git a/util/test/builder/v1beta2_transition.go b/util/test/builder/v1beta2_transition.go index b5dd9b5385c6..14b4c400ca46 100644 --- a/util/test/builder/v1beta2_transition.go +++ b/util/test/builder/v1beta2_transition.go @@ -84,13 +84,13 @@ type Phase0ObjStatus struct { Conditions clusterv1.Conditions `json:"conditions,omitempty"` } -// GetConditions returns the set of conditions for this object. -func (o *Phase0Obj) GetConditions() clusterv1.Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (o *Phase0Obj) GetV1Beta1Conditions() clusterv1.Conditions { return o.Status.Conditions } -// SetConditions sets the conditions on this object. -func (o *Phase0Obj) SetConditions(conditions clusterv1.Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (o *Phase0Obj) SetV1Beta1Conditions(conditions clusterv1.Conditions) { o.Status.Conditions = conditions } @@ -142,26 +142,26 @@ type Phase1ObjV1Beta2Status struct { Conditions []metav1.Condition `json:"conditions,omitempty"` } -// GetConditions returns the set of conditions for this object. -func (o *Phase1Obj) GetConditions() clusterv1.Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (o *Phase1Obj) GetV1Beta1Conditions() clusterv1.Conditions { return o.Status.Conditions } -// SetConditions sets the conditions on this object. -func (o *Phase1Obj) SetConditions(conditions clusterv1.Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (o *Phase1Obj) SetV1Beta1Conditions(conditions clusterv1.Conditions) { o.Status.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (o *Phase1Obj) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (o *Phase1Obj) GetConditions() []metav1.Condition { if o.Status.V1Beta2 == nil { return nil } return o.Status.V1Beta2.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (o *Phase1Obj) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (o *Phase1Obj) SetConditions(conditions []metav1.Condition) { if o.Status.V1Beta2 == nil { o.Status.V1Beta2 = &Phase1ObjV1Beta2Status{} } @@ -223,16 +223,16 @@ type Phase2ObjDeprecatedV1Beta1Status struct { Conditions clusterv1.Conditions `json:"conditions,omitempty"` } -// GetConditions returns the set of conditions for this object. -func (o *Phase2Obj) GetConditions() clusterv1.Conditions { +// GetV1Beta1Conditions returns the set of conditions for this object. +func (o *Phase2Obj) GetV1Beta1Conditions() clusterv1.Conditions { if o.Status.Deprecated == nil || o.Status.Deprecated.V1Beta1 == nil { return nil } return o.Status.Deprecated.V1Beta1.Conditions } -// SetConditions sets the conditions on this object. -func (o *Phase2Obj) SetConditions(conditions clusterv1.Conditions) { +// SetV1Beta1Conditions sets the conditions on this object. +func (o *Phase2Obj) SetV1Beta1Conditions(conditions clusterv1.Conditions) { if o.Status.Deprecated == nil { o.Status.Deprecated = &Phase2ObjDeprecatedStatus{V1Beta1: &Phase2ObjDeprecatedV1Beta1Status{}} } @@ -242,13 +242,13 @@ func (o *Phase2Obj) SetConditions(conditions clusterv1.Conditions) { o.Status.Deprecated.V1Beta1.Conditions = conditions } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (o *Phase2Obj) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (o *Phase2Obj) GetConditions() []metav1.Condition { return o.Status.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (o *Phase2Obj) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (o *Phase2Obj) SetConditions(conditions []metav1.Condition) { o.Status.Conditions = conditions } @@ -290,12 +290,12 @@ type Phase3ObjStatus struct { Conditions []metav1.Condition `json:"conditions,omitempty"` } -// GetV1Beta2Conditions returns the set of conditions for this object. -func (o *Phase3Obj) GetV1Beta2Conditions() []metav1.Condition { +// GetConditions returns the set of conditions for this object. +func (o *Phase3Obj) GetConditions() []metav1.Condition { return o.Status.Conditions } -// SetV1Beta2Conditions sets conditions for an API object. -func (o *Phase3Obj) SetV1Beta2Conditions(conditions []metav1.Condition) { +// SetConditions sets conditions for an API object. +func (o *Phase3Obj) SetConditions(conditions []metav1.Condition) { o.Status.Conditions = conditions } From 722f6c16fb01adea4472f24fab942a0594e73d48 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Fri, 11 Apr 2025 10:49:19 +0200 Subject: [PATCH 4/5] Adjust variables and function names --- .../controllers/kubeadmconfig_controller.go | 4 +- cmd/clusterctl/client/tree/tree.go | 32 +- cmd/clusterctl/client/tree/tree_test.go | 84 ++--- cmd/clusterctl/cmd/describe_cluster.go | 4 +- .../kubeadm/internal/control_plane.go | 4 +- .../internal/controllers/controller.go | 4 +- .../internal/controllers/remediation.go | 18 +- .../internal/workload_cluster_conditions.go | 20 +- .../workload_cluster_conditions_test.go | 316 +++++++++--------- .../controllers/machinepool_controller.go | 4 +- .../controllers/extensionconfig_controller.go | 4 +- .../controllers/cluster/cluster_controller.go | 4 +- .../clusterclass/clusterclass_controller.go | 4 +- .../clusterresourceset_controller.go | 2 +- .../controllers/machine/machine_controller.go | 4 +- .../machinedeployment_controller.go | 4 +- .../machinehealthcheck_controller.go | 16 +- .../machineset/machineset_controller.go | 26 +- .../topology/cluster/cluster_controller.go | 4 +- internal/util/tree/tree.go | 86 ++--- internal/util/tree/tree_test.go | 86 ++--- test/e2e/clusterctl_upgrade.go | 13 +- test/framework/cluster_helpers.go | 4 +- .../dockermachinepool_controller.go | 2 +- .../backends/docker/dockercluster_backend.go | 4 +- .../backends/docker/dockermachine_backend.go | 4 +- .../inmemory/inmemorycluster_backend.go | 2 +- .../inmemory/inmemorymachine_backend.go | 4 +- .../controllers/dockercluster_controller.go | 4 +- .../controllers/dockermachine_controller.go | 4 +- util/patch/options.go | 12 +- util/patch/patch_test.go | 10 +- util/paused/paused.go | 2 +- 33 files changed, 399 insertions(+), 396 deletions(-) diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go index 6d721e24ac66..681ff2c0e580 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go @@ -255,12 +255,12 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques } // Patch ObservedGeneration only if the reconciliation completed successfully patchOpts := []patch.Option{ - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.ReadyCondition, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.CertificatesAvailableCondition, }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, bootstrapv1.KubeadmConfigReadyV1Beta2Condition, bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, diff --git a/cmd/clusterctl/client/tree/tree.go b/cmd/clusterctl/client/tree/tree.go index f0eed2699020..6d080a0556eb 100644 --- a/cmd/clusterctl/client/tree/tree.go +++ b/cmd/clusterctl/client/tree/tree.go @@ -183,9 +183,9 @@ func (od ObjectTree) Add(parent, obj client.Object, opts ...AddObjectOption) (ad if s.GetObjectKind().GroupVersionKind().Kind == obj.GetObjectKind().GroupVersionKind().Kind+"Group" { switch od.options.V1Beta2 { case true: - updateV1Beta2GroupNode(s, sReadyV1Beta2, obj, objAvailableV1Beta2, objReadyV1Beta2, objUpToDateV1Beta2) + updateGroupNode(s, sReadyV1Beta2, obj, objAvailableV1Beta2, objReadyV1Beta2, objUpToDateV1Beta2) default: - updateGroupNode(s, sReady, obj, objReady) + updateV1Beta1GroupNode(s, sReady, obj, objReady) } return true, false @@ -201,9 +201,9 @@ func (od ObjectTree) Add(parent, obj client.Object, opts ...AddObjectOption) (ad var groupNode *NodeObject switch od.options.V1Beta2 { case true: - groupNode = createV1Beta2GroupNode(s, sReadyV1Beta2, obj, objAvailableV1Beta2, objReadyV1Beta2, objUpToDateV1Beta2) + groupNode = createGroupNode(s, sReadyV1Beta2, obj, objAvailableV1Beta2, objReadyV1Beta2, objUpToDateV1Beta2) default: - groupNode = createGroupNode(s, sReady, obj, objReady) + groupNode = createV1Beta1GroupNode(s, sReady, obj, objReady) } // By default, grouping objects should be sorted last. @@ -313,13 +313,13 @@ func hasSameReadyStatusSeverityAndReason(a, b *clusterv1.Condition) bool { a.Reason == b.Reason } -func createV1Beta2GroupNode(sibling client.Object, siblingReady *metav1.Condition, obj client.Object, objAvailable, objReady, objUpToDate *metav1.Condition) *NodeObject { +func createGroupNode(sibling client.Object, siblingReady *metav1.Condition, obj client.Object, objAvailable, objReady, objUpToDate *metav1.Condition) *NodeObject { kind := fmt.Sprintf("%sGroup", obj.GetObjectKind().GroupVersionKind().Kind) // Create a new group node and add the GroupObjectAnnotation to signal // this to the presentation layer. // NB. The group nodes gets a unique ID to avoid conflicts. - groupNode := VirtualObject(obj.GetNamespace(), kind, readyStatusReasonUIDV1Beta2(obj)) + groupNode := VirtualObject(obj.GetNamespace(), kind, readyStatusReasonUID(obj)) addAnnotation(groupNode, GroupObjectAnnotation, "True") // Update the list of items included in the group and store it in the GroupItemsAnnotation. @@ -343,7 +343,7 @@ func createV1Beta2GroupNode(sibling client.Object, siblingReady *metav1.Conditio // Update the group's ready condition and counter. addAnnotation(groupNode, GroupItemsReadyCounter, "0") if objReady != nil { - objReady.LastTransitionTime = minLastTransitionTimeV1Beta2(objReady, siblingReady) + objReady.LastTransitionTime = minLastTransitionTime(objReady, siblingReady) objReady.Message = "" setReadyV1Beta2Condition(groupNode, objReady) if objReady.Status == metav1.ConditionTrue { @@ -369,7 +369,7 @@ func createV1Beta2GroupNode(sibling client.Object, siblingReady *metav1.Conditio return groupNode } -func readyStatusReasonUIDV1Beta2(obj client.Object) string { +func readyStatusReasonUID(obj client.Object) string { ready := GetReadyV1Beta2Condition(obj) if ready == nil { return fmt.Sprintf("zzz_%s", util.RandomString(6)) @@ -377,7 +377,7 @@ func readyStatusReasonUIDV1Beta2(obj client.Object) string { return fmt.Sprintf("zz_%s_%s_%s", ready.Status, ready.Reason, util.RandomString(6)) } -func minLastTransitionTimeV1Beta2(a, b *metav1.Condition) metav1.Time { +func minLastTransitionTime(a, b *metav1.Condition) metav1.Time { if a == nil && b == nil { return metav1.Time{} } @@ -393,7 +393,7 @@ func minLastTransitionTimeV1Beta2(a, b *metav1.Condition) metav1.Time { return a.LastTransitionTime } -func createGroupNode(sibling client.Object, siblingReady *clusterv1.Condition, obj client.Object, objReady *clusterv1.Condition) *NodeObject { +func createV1Beta1GroupNode(sibling client.Object, siblingReady *clusterv1.Condition, obj client.Object, objReady *clusterv1.Condition) *NodeObject { kind := fmt.Sprintf("%sGroup", obj.GetObjectKind().GroupVersionKind().Kind) // Create a new group node and add the GroupObjectAnnotation to signal @@ -409,7 +409,7 @@ func createGroupNode(sibling client.Object, siblingReady *clusterv1.Condition, o // Update the group's ready condition. if objReady != nil { - objReady.LastTransitionTime = minLastTransitionTime(objReady, siblingReady) + objReady.LastTransitionTime = minLastTransitionTimeV1Beta1(objReady, siblingReady) objReady.Message = "" setReadyV1Beta1Condition(groupNode, objReady) } @@ -424,7 +424,7 @@ func readyStatusSeverityAndReasonUID(obj client.Object) string { return fmt.Sprintf("zz_%s_%s_%s_%s", ready.Status, ready.Severity, ready.Reason, util.RandomString(6)) } -func minLastTransitionTime(a, b *clusterv1.Condition) metav1.Time { +func minLastTransitionTimeV1Beta1(a, b *clusterv1.Condition) metav1.Time { if a == nil && b == nil { return metav1.Time{} } @@ -440,7 +440,7 @@ func minLastTransitionTime(a, b *clusterv1.Condition) metav1.Time { return a.LastTransitionTime } -func updateV1Beta2GroupNode(groupObj client.Object, groupReady *metav1.Condition, obj client.Object, objAvailable, objReady, objUpToDate *metav1.Condition) { +func updateGroupNode(groupObj client.Object, groupReady *metav1.Condition, obj client.Object, objAvailable, objReady, objUpToDate *metav1.Condition) { // Update the list of items included in the group and store it in the GroupItemsAnnotation. items := strings.Split(GetGroupItems(groupObj), GroupItemsSeparator) items = append(items, obj.GetName()) @@ -457,7 +457,7 @@ func updateV1Beta2GroupNode(groupObj client.Object, groupReady *metav1.Condition // Update the group's ready condition and ready counter. if groupReady != nil { - groupReady.LastTransitionTime = minLastTransitionTimeV1Beta2(objReady, groupReady) + groupReady.LastTransitionTime = minLastTransitionTime(objReady, groupReady) groupReady.Message = "" setReadyV1Beta2Condition(groupObj, groupReady) } @@ -476,7 +476,7 @@ func updateV1Beta2GroupNode(groupObj client.Object, groupReady *metav1.Condition } } -func updateGroupNode(groupObj client.Object, groupReady *clusterv1.Condition, obj client.Object, objReady *clusterv1.Condition) { +func updateV1Beta1GroupNode(groupObj client.Object, groupReady *clusterv1.Condition, obj client.Object, objReady *clusterv1.Condition) { // Update the list of items included in the group and store it in the GroupItemsAnnotation. items := strings.Split(GetGroupItems(groupObj), GroupItemsSeparator) items = append(items, obj.GetName()) @@ -485,7 +485,7 @@ func updateGroupNode(groupObj client.Object, groupReady *clusterv1.Condition, ob // Update the group's ready condition. if groupReady != nil { - groupReady.LastTransitionTime = minLastTransitionTime(objReady, groupReady) + groupReady.LastTransitionTime = minLastTransitionTimeV1Beta1(objReady, groupReady) groupReady.Message = "" setReadyV1Beta1Condition(groupObj, groupReady) } diff --git a/cmd/clusterctl/client/tree/tree_test.go b/cmd/clusterctl/client/tree/tree_test.go index f5592b37d184..9df99aaf22e4 100644 --- a/cmd/clusterctl/client/tree/tree_test.go +++ b/cmd/clusterctl/client/tree/tree_test.go @@ -312,7 +312,7 @@ func Test_hasSameReadyStatusSeverityAndReason(t *testing.T) { } } -func Test_minLastTransitionTimeV1Beta2(t *testing.T) { +func Test_minLastTransitionTime(t *testing.T) { now := &metav1.Condition{Type: "now", LastTransitionTime: metav1.Now()} beforeNow := &metav1.Condition{Type: "beforeNow", LastTransitionTime: metav1.Time{Time: now.LastTransitionTime.Time.Add(-1 * time.Hour)}} type args struct { @@ -369,13 +369,13 @@ func Test_minLastTransitionTimeV1Beta2(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got := minLastTransitionTimeV1Beta2(tt.args.a, tt.args.b) + got := minLastTransitionTime(tt.args.a, tt.args.b) g.Expect(got.Time).To(BeTemporally("~", tt.want.Time)) }) } } -func Test_minLastTransitionTime(t *testing.T) { +func Test_minLastTransitionTimeV1Beta1(t *testing.T) { now := &clusterv1.Condition{Type: "now", LastTransitionTime: metav1.Now()} beforeNow := &clusterv1.Condition{Type: "beforeNow", LastTransitionTime: metav1.Time{Time: now.LastTransitionTime.Time.Add(-1 * time.Hour)}} type args struct { @@ -432,7 +432,7 @@ func Test_minLastTransitionTime(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got := minLastTransitionTime(tt.args.a, tt.args.b) + got := minLastTransitionTimeV1Beta1(tt.args.a, tt.args.b) g.Expect(got.Time).To(BeTemporally("~", tt.want.Time)) }) } @@ -501,7 +501,7 @@ func Test_isObjDebug(t *testing.T) { } } -func Test_createV1Beta2GroupNode(t *testing.T) { +func Test_createGroupNode(t *testing.T) { now := metav1.Now() beforeNow := metav1.Time{Time: now.Time.Add(-1 * time.Hour)}.Rfc3339Copy() @@ -567,7 +567,7 @@ func Test_createV1Beta2GroupNode(t *testing.T) { } g := NewWithT(t) - got := createV1Beta2GroupNode(sibling, GetReadyV1Beta2Condition(sibling), obj, GetAvailableV1Beta2Condition(obj), GetReadyV1Beta2Condition(obj), GetMachineUpToDateV1Beta2Condition(obj)) + got := createGroupNode(sibling, GetReadyV1Beta2Condition(sibling), obj, GetAvailableV1Beta2Condition(obj), GetReadyV1Beta2Condition(obj), GetMachineUpToDateV1Beta2Condition(obj)) // Some values are generated randomly, so pick up them. want.SetName(got.GetName()) @@ -582,7 +582,7 @@ func Test_createV1Beta2GroupNode(t *testing.T) { g.Expect(got).To(BeComparableTo(want)) } -func Test_createGroupNode(t *testing.T) { +func Test_createV1Beta1GroupNode(t *testing.T) { now := metav1.Now() beforeNow := metav1.Time{Time: now.Time.Add(-1 * time.Hour)} @@ -651,7 +651,7 @@ func Test_createGroupNode(t *testing.T) { } g := NewWithT(t) - got := createGroupNode(sibling, GetReadyCondition(sibling), obj, GetReadyCondition(obj)) + got := createV1Beta1GroupNode(sibling, GetReadyCondition(sibling), obj, GetReadyCondition(obj)) // Some values are generated randomly, so pick up them. want.SetName(got.GetName()) @@ -660,7 +660,7 @@ func Test_createGroupNode(t *testing.T) { g.Expect(got).To(BeComparableTo(want)) } -func Test_updateV1Beta2GroupNode(t *testing.T) { +func Test_updateGroupNode(t *testing.T) { now := metav1.Now() beforeNow := metav1.Time{Time: now.Time.Add(-1 * time.Hour)} @@ -740,12 +740,12 @@ func Test_updateV1Beta2GroupNode(t *testing.T) { } g := NewWithT(t) - updateV1Beta2GroupNode(group, GetReadyV1Beta2Condition(group), obj, GetAvailableV1Beta2Condition(obj), GetReadyV1Beta2Condition(obj), GetMachineUpToDateV1Beta2Condition(obj)) + updateGroupNode(group, GetReadyV1Beta2Condition(group), obj, GetAvailableV1Beta2Condition(obj), GetReadyV1Beta2Condition(obj), GetMachineUpToDateV1Beta2Condition(obj)) g.Expect(group).To(BeComparableTo(want)) } -func Test_updateGroupNode(t *testing.T) { +func Test_updateV1Beta1GroupNode(t *testing.T) { now := metav1.Now() beforeNow := metav1.Time{Time: now.Time.Add(-1 * time.Hour)} @@ -821,7 +821,7 @@ func Test_updateGroupNode(t *testing.T) { } g := NewWithT(t) - updateGroupNode(group, GetReadyCondition(group), obj, GetReadyCondition(obj)) + updateV1Beta1GroupNode(group, GetReadyCondition(group), obj, GetReadyCondition(obj)) g.Expect(group).To(BeComparableTo(want)) } @@ -999,9 +999,9 @@ func Test_Add_setsObjectMetaNameAnnotation(t *testing.T) { } } -func Test_Add_NoEcho_v1Beta2(t *testing.T) { +func Test_Add_NoEcho(t *testing.T) { parent := fakeCluster("parent", - withClusterV1Beta2Condition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), + withClusterCondition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), ) type args struct { @@ -1020,7 +1020,7 @@ func Test_Add_NoEcho_v1Beta2(t *testing.T) { treeOptions: ObjectTreeOptions{}, addOptions: nil, obj: fakeMachine("my-machine", - withMachineV1Beta2Condition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), + withMachineCondition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), ), }, wantNode: true, @@ -1031,7 +1031,7 @@ func Test_Add_NoEcho_v1Beta2(t *testing.T) { treeOptions: ObjectTreeOptions{}, addOptions: []AddObjectOption{NoEcho(true)}, obj: fakeMachine("my-machine", - withMachineV1Beta2Condition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), + withMachineCondition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), ), }, wantNode: false, @@ -1042,7 +1042,7 @@ func Test_Add_NoEcho_v1Beta2(t *testing.T) { treeOptions: ObjectTreeOptions{}, addOptions: []AddObjectOption{NoEcho(true)}, obj: fakeMachine("my-machine", - withMachineV1Beta2Condition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionFalse}), + withMachineCondition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionFalse}), ), }, wantNode: true, @@ -1053,7 +1053,7 @@ func Test_Add_NoEcho_v1Beta2(t *testing.T) { treeOptions: ObjectTreeOptions{Echo: true}, addOptions: []AddObjectOption{NoEcho(true)}, obj: fakeMachine("my-machine", - withMachineV1Beta2Condition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), + withMachineCondition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), ), }, wantNode: true, @@ -1081,7 +1081,7 @@ func Test_Add_NoEcho_v1Beta2(t *testing.T) { } } -func Test_Add_NoEcho(t *testing.T) { +func Test_Add_NoEcho_V1Beta1(t *testing.T) { parent := fakeCluster("parent", withClusterV1Beta1Condition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ) @@ -1102,7 +1102,7 @@ func Test_Add_NoEcho(t *testing.T) { treeOptions: ObjectTreeOptions{}, addOptions: nil, obj: fakeMachine("my-machine", - withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineV1Beta1Condition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), }, wantNode: true, @@ -1113,7 +1113,7 @@ func Test_Add_NoEcho(t *testing.T) { treeOptions: ObjectTreeOptions{}, addOptions: []AddObjectOption{NoEcho(true)}, obj: fakeMachine("my-machine", - withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineV1Beta1Condition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), }, wantNode: false, @@ -1124,7 +1124,7 @@ func Test_Add_NoEcho(t *testing.T) { treeOptions: ObjectTreeOptions{}, addOptions: []AddObjectOption{NoEcho(true)}, obj: fakeMachine("my-machine", - withMachineCondition(v1beta1conditions.FalseCondition(clusterv1.ReadyCondition, "", clusterv1.ConditionSeverityInfo, "")), + withMachineV1Beta1Condition(v1beta1conditions.FalseCondition(clusterv1.ReadyCondition, "", clusterv1.ConditionSeverityInfo, "")), ), }, wantNode: true, @@ -1135,7 +1135,7 @@ func Test_Add_NoEcho(t *testing.T) { treeOptions: ObjectTreeOptions{Echo: true}, addOptions: []AddObjectOption{NoEcho(true)}, obj: fakeMachine("my-machine", - withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineV1Beta1Condition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), }, wantNode: true, @@ -1162,7 +1162,7 @@ func Test_Add_NoEcho(t *testing.T) { } } -func Test_Add_Grouping_v1Beta2(t *testing.T) { +func Test_Add_Grouping(t *testing.T) { parent := fakeCluster("parent", withClusterAnnotation(GroupingObjectAnnotation, "True"), ) @@ -1192,11 +1192,11 @@ func Test_Add_Grouping_v1Beta2(t *testing.T) { args: args{ siblings: []client.Object{ fakeMachine("first-machine", - withMachineV1Beta2Condition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), + withMachineCondition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), ), }, obj: fakeMachine("second-machine", - withMachineV1Beta2Condition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), + withMachineCondition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), ), }, wantNodesPrefix: []string{"zz_True"}, @@ -1208,14 +1208,14 @@ func Test_Add_Grouping_v1Beta2(t *testing.T) { args: args{ siblings: []client.Object{ fakeMachine("first-machine", - withMachineV1Beta2Condition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), + withMachineCondition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), ), fakeMachine("second-machine", - withMachineV1Beta2Condition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), + withMachineCondition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), ), }, obj: fakeMachine("third-machine", - withMachineV1Beta2Condition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), + withMachineCondition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), ), }, wantNodesPrefix: []string{"zz_True"}, @@ -1227,10 +1227,10 @@ func Test_Add_Grouping_v1Beta2(t *testing.T) { args: args{ siblings: []client.Object{ fakeMachine("first-machine", - withMachineV1Beta2Condition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), + withMachineCondition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), ), fakeMachine("second-machine", - withMachineV1Beta2Condition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), + withMachineCondition(metav1.Condition{Type: clusterv1.ReadyV1Beta2Condition, Status: metav1.ConditionTrue}), ), }, obj: VirtualObject("ns", "NotAMachine", "other-object"), @@ -1274,7 +1274,7 @@ func Test_Add_Grouping_v1Beta2(t *testing.T) { } } -func Test_Add_Grouping(t *testing.T) { +func Test_Add_Grouping_V1Beta1(t *testing.T) { parent := fakeCluster("parent", withClusterAnnotation(GroupingObjectAnnotation, "True"), ) @@ -1304,11 +1304,11 @@ func Test_Add_Grouping(t *testing.T) { args: args{ siblings: []client.Object{ fakeMachine("first-machine", - withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineV1Beta1Condition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), }, obj: fakeMachine("second-machine", - withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineV1Beta1Condition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), }, wantNodesPrefix: []string{"zz_True"}, @@ -1320,14 +1320,14 @@ func Test_Add_Grouping(t *testing.T) { args: args{ siblings: []client.Object{ fakeMachine("first-machine", - withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineV1Beta1Condition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), fakeMachine("second-machine", - withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineV1Beta1Condition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), }, obj: fakeMachine("third-machine", - withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineV1Beta1Condition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), }, wantNodesPrefix: []string{"zz_True"}, @@ -1339,10 +1339,10 @@ func Test_Add_Grouping(t *testing.T) { args: args{ siblings: []client.Object{ fakeMachine("first-machine", - withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineV1Beta1Condition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), fakeMachine("second-machine", - withMachineCondition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), + withMachineV1Beta1Condition(v1beta1conditions.TrueCondition(clusterv1.ReadyCondition)), ), }, obj: VirtualObject("ns", "NotAMachine", "other-object"), @@ -1420,7 +1420,7 @@ func withClusterV1Beta1Condition(c *clusterv1.Condition) func(*clusterv1.Cluster } } -func withClusterV1Beta2Condition(c metav1.Condition) func(*clusterv1.Cluster) { +func withClusterCondition(c metav1.Condition) func(*clusterv1.Cluster) { return func(m *clusterv1.Cluster) { conditions.Set(m, c) } @@ -1445,13 +1445,13 @@ func fakeMachine(name string, options ...machineOption) *clusterv1.Machine { return m } -func withMachineCondition(c *clusterv1.Condition) func(*clusterv1.Machine) { +func withMachineV1Beta1Condition(c *clusterv1.Condition) func(*clusterv1.Machine) { return func(m *clusterv1.Machine) { v1beta1conditions.Set(m, c) } } -func withMachineV1Beta2Condition(c metav1.Condition) func(*clusterv1.Machine) { +func withMachineCondition(c metav1.Condition) func(*clusterv1.Machine) { return func(m *clusterv1.Machine) { conditions.Set(m, c) } diff --git a/cmd/clusterctl/cmd/describe_cluster.go b/cmd/clusterctl/cmd/describe_cluster.go index 511dde707c32..629575e892ad 100644 --- a/cmd/clusterctl/cmd/describe_cluster.go +++ b/cmd/clusterctl/cmd/describe_cluster.go @@ -156,9 +156,9 @@ func runDescribeCluster(cmd *cobra.Command, name string) error { switch dc.v1beta2 { case true: - cmdtree.PrintObjectTreeV1Beta2(tree, os.Stdout) + cmdtree.PrintObjectTree(tree, os.Stdout) default: - cmdtree.PrintObjectTree(tree) + cmdtree.PrintObjectTreeV1Beta1(tree) } return nil diff --git a/controlplane/kubeadm/internal/control_plane.go b/controlplane/kubeadm/internal/control_plane.go index ed5ab5d45845..b276f2f945ac 100644 --- a/controlplane/kubeadm/internal/control_plane.go +++ b/controlplane/kubeadm/internal/control_plane.go @@ -330,13 +330,13 @@ func (c *ControlPlane) PatchMachines(ctx context.Context) error { for i := range c.Machines { machine := c.Machines[i] if helper, ok := c.machinesPatchHelpers[machine.Name]; ok { - if err := helper.Patch(ctx, machine, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + if err := helper.Patch(ctx, machine, patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ controlplanev1.MachineAPIServerPodHealthyCondition, controlplanev1.MachineControllerManagerPodHealthyCondition, controlplanev1.MachineSchedulerPodHealthyCondition, controlplanev1.MachineEtcdPodHealthyCondition, controlplanev1.MachineEtcdMemberHealthyCondition, - }}, patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + }}, patch.WithOwnedConditions{Conditions: []string{ clusterv1.MachineUpToDateV1Beta2Condition, controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition, diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index c24f651edf8b..612143f8291e 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -328,7 +328,7 @@ func patchKubeadmControlPlane(ctx context.Context, patchHelper *patch.Helper, kc // Also, if requested, we are adding additional options like e.g. Patch ObservedGeneration when issuing the // patch at the end of the reconcile loop. options = append(options, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ controlplanev1.MachinesCreatedCondition, clusterv1.ReadyCondition, controlplanev1.MachinesSpecUpToDateCondition, @@ -337,7 +337,7 @@ func patchKubeadmControlPlane(ctx context.Context, patchHelper *patch.Helper, kc controlplanev1.AvailableCondition, controlplanev1.CertificatesAvailableCondition, }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, controlplanev1.KubeadmControlPlaneAvailableV1Beta2Condition, controlplanev1.KubeadmControlPlaneInitializedV1Beta2Condition, diff --git a/controlplane/kubeadm/internal/controllers/remediation.go b/controlplane/kubeadm/internal/controllers/remediation.go index 212875f4dd76..d2fc432414ec 100644 --- a/controlplane/kubeadm/internal/controllers/remediation.go +++ b/controlplane/kubeadm/internal/controllers/remediation.go @@ -56,10 +56,10 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C continue } - shouldCleanup := v1beta1conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededCondition) && v1beta1conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedCondition) - shouldCleanupV1Beta2 := conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededV1Beta2Condition) && conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) + shouldCleanupV1Beta1 := v1beta1conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededCondition) && v1beta1conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedCondition) + shouldCleanup := conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededV1Beta2Condition) && conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) - if !(shouldCleanup || shouldCleanupV1Beta2) { + if !(shouldCleanupV1Beta1 || shouldCleanup) { continue } @@ -69,17 +69,17 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C continue } - if shouldCleanup { + if shouldCleanupV1Beta1 { v1beta1conditions.Delete(m, clusterv1.MachineOwnerRemediatedCondition) } - if shouldCleanupV1Beta2 { + if shouldCleanup { conditions.Delete(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) } - if err := patchHelper.Patch(ctx, m, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + if err := patchHelper.Patch(ctx, m, patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.MachineOwnerRemediatedCondition, - }}, patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + }}, patch.WithOwnedConditions{Conditions: []string{ clusterv1.MachineOwnerRemediatedV1Beta2Condition, }}); err != nil { errList = append(errList, err) @@ -152,10 +152,10 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C defer func() { // Always attempt to Patch the Machine conditions after each reconcileUnhealthyMachines. if err := patchHelper.Patch(ctx, machineToBeRemediated, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.MachineOwnerRemediatedCondition, }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.MachineOwnerRemediatedV1Beta2Condition, }}, ); err != nil { diff --git a/controlplane/kubeadm/internal/workload_cluster_conditions.go b/controlplane/kubeadm/internal/workload_cluster_conditions.go index b562f84962b9..3edaca487ab8 100644 --- a/controlplane/kubeadm/internal/workload_cluster_conditions.go +++ b/controlplane/kubeadm/internal/workload_cluster_conditions.go @@ -213,7 +213,7 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane } // Aggregate components error from machines at KCP level - aggregateConditionsFromMachinesToKCP(aggregateConditionsFromMachinesToKCPInput{ + aggregateV1Beta1ConditionsFromMachinesToKCP(aggregateV1Beta1ConditionsFromMachinesToKCPInput{ controlPlane: controlPlane, machineConditions: []clusterv1.ConditionType{controlplanev1.MachineEtcdMemberHealthyCondition}, kcpErrors: kcpErrors, @@ -223,7 +223,7 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane note: "etcd member", }) - aggregateV1Beta2ConditionsFromMachinesToKCP(aggregateV1Beta2ConditionsFromMachinesToKCPInput{ + aggregateConditionsFromMachinesToKCP(aggregateConditionsFromMachinesToKCPInput{ controlPlane: controlPlane, machineConditions: []string{controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition}, kcpErrors: kcpErrors, @@ -640,7 +640,7 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * } // Aggregate components error from machines at KCP level. - aggregateConditionsFromMachinesToKCP(aggregateConditionsFromMachinesToKCPInput{ + aggregateV1Beta1ConditionsFromMachinesToKCP(aggregateV1Beta1ConditionsFromMachinesToKCPInput{ controlPlane: controlPlane, machineConditions: allMachinePodConditions, kcpErrors: kcpErrors, @@ -650,7 +650,7 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * note: "control plane", }) - aggregateV1Beta2ConditionsFromMachinesToKCP(aggregateV1Beta2ConditionsFromMachinesToKCPInput{ + aggregateConditionsFromMachinesToKCP(aggregateConditionsFromMachinesToKCPInput{ controlPlane: controlPlane, machineConditions: allMachinePodV1beta2Conditions, kcpErrors: kcpErrors, @@ -906,7 +906,7 @@ func podCondition(pod corev1.Pod, condition corev1.PodConditionType) corev1.Cond return corev1.ConditionUnknown } -type aggregateConditionsFromMachinesToKCPInput struct { +type aggregateV1Beta1ConditionsFromMachinesToKCPInput struct { controlPlane *ControlPlane machineConditions []clusterv1.ConditionType kcpErrors []string @@ -916,10 +916,10 @@ type aggregateConditionsFromMachinesToKCPInput struct { note string } -// aggregateConditionsFromMachinesToKCP aggregates a group of conditions from machines to KCP. +// aggregateV1Beta1ConditionsFromMachinesToKCP aggregates a group of conditions from machines to KCP. // NOTE: this func follows the same aggregation rules used by conditions.Merge thus giving priority to // errors, then warning, info down to unknown. -func aggregateConditionsFromMachinesToKCP(input aggregateConditionsFromMachinesToKCPInput) { +func aggregateV1Beta1ConditionsFromMachinesToKCP(input aggregateV1Beta1ConditionsFromMachinesToKCPInput) { // Aggregates machines for condition status. // NB. A machine could be assigned to many groups, but only the group with the highest severity will be reported. kcpMachinesWithErrors := sets.Set[string]{} @@ -988,7 +988,7 @@ func aggregateConditionsFromMachinesToKCP(input aggregateConditionsFromMachinesT // So there will be no condition at KCP level too. } -type aggregateV1Beta2ConditionsFromMachinesToKCPInput struct { +type aggregateConditionsFromMachinesToKCPInput struct { controlPlane *ControlPlane machineConditions []string kcpErrors []string @@ -999,10 +999,10 @@ type aggregateV1Beta2ConditionsFromMachinesToKCPInput struct { note string } -// aggregateV1Beta2ConditionsFromMachinesToKCP aggregates a group of conditions from machines to KCP. +// aggregateConditionsFromMachinesToKCP aggregates a group of conditions from machines to KCP. // Note: the aggregation is computed in way that is similar to how conditions.NewAggregateCondition works, but in this case the // implementation is simpler/less flexible and it surfaces only issues & unknown conditions. -func aggregateV1Beta2ConditionsFromMachinesToKCP(input aggregateV1Beta2ConditionsFromMachinesToKCPInput) { +func aggregateConditionsFromMachinesToKCP(input aggregateConditionsFromMachinesToKCPInput) { // Aggregates machines for condition status. // NB. A machine could be assigned to many groups, but only the group with the highest severity will be reported. kcpMachinesWithErrors := sets.Set[string]{} diff --git a/controlplane/kubeadm/internal/workload_cluster_conditions_test.go b/controlplane/kubeadm/internal/workload_cluster_conditions_test.go index 13f60c5e9b07..b9c8e09a0060 100644 --- a/controlplane/kubeadm/internal/workload_cluster_conditions_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_conditions_test.go @@ -172,10 +172,10 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { machines []*clusterv1.Machine injectClient client.Client // This test is injecting a fake client because it is required to create nodes with a controlled Status or to fail with a specific error. injectEtcdClientGenerator etcdClientFor // This test is injecting a fake etcdClientGenerator because it is required to nodes with a controlled Status or to fail with a specific error. - expectedKCPCondition *clusterv1.Condition - expectedKCPV1Beta2Condition *metav1.Condition - expectedMachineConditions map[string]clusterv1.Conditions - expectedMachineV1Beta2Conditions map[string][]metav1.Condition + expectedKCPV1Beta1Condition *clusterv1.Condition + expectedKCPCondition *metav1.Condition + expectedMachineV1Beta1Conditions map[string]clusterv1.Conditions + expectedMachineConditions map[string][]metav1.Condition expectedEtcdMembers []string expectedEtcdMembersAndMachinesAreMatching bool }{ @@ -187,19 +187,19 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { injectClient: &fakeClient{ listErr: errors.New("something went wrong"), }, - expectedKCPCondition: v1beta1conditions.UnknownCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterInspectionFailedReason, "Failed to list Nodes which are hosting the etcd members"), - expectedMachineConditions: map[string]clusterv1.Conditions{ + expectedKCPV1Beta1Condition: v1beta1conditions.UnknownCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterInspectionFailedReason, "Failed to list Nodes which are hosting the etcd members"), + expectedMachineV1Beta1Conditions: map[string]clusterv1.Conditions{ "m1": { *v1beta1conditions.UnknownCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberInspectionFailedReason, "Failed to get the Node which is hosting the etcd member"), }, }, - expectedKCPV1Beta2Condition: &metav1.Condition{ + expectedKCPCondition: &metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneEtcdClusterInspectionFailedV1Beta2Reason, Message: "Failed to get Nodes hosting the etcd cluster", }, - expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + expectedMachineConditions: map[string][]metav1.Condition{ "m1": { {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberInspectionFailedV1Beta2Reason, Message: "Failed to get the Node hosting the etcd member"}, }, @@ -216,18 +216,18 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { Items: []corev1.Node{*fakeNode("n1")}, }, }, - expectedKCPCondition: nil, - expectedMachineConditions: map[string]clusterv1.Conditions{ + expectedKCPV1Beta1Condition: nil, + expectedMachineV1Beta1Conditions: map[string]clusterv1.Conditions{ "m1": {}, }, - expectedKCPV1Beta2Condition: &metav1.Condition{ + expectedKCPCondition: &metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneEtcdClusterHealthUnknownV1Beta2Reason, Message: "* Machine m1:\n" + " * EtcdMemberHealthy: Waiting for a Node with spec.providerID n1 to exist", }, - expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + expectedMachineConditions: map[string][]metav1.Condition{ "m1": { {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberInspectionFailedV1Beta2Reason, Message: "Waiting for a Node with spec.providerID n1 to exist"}, }, @@ -244,18 +244,18 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { Items: []corev1.Node{*fakeNode("n1")}, }, }, - expectedKCPCondition: nil, - expectedMachineConditions: map[string]clusterv1.Conditions{ + expectedKCPV1Beta1Condition: nil, + expectedMachineV1Beta1Conditions: map[string]clusterv1.Conditions{ "m1": {}, }, - expectedKCPV1Beta2Condition: &metav1.Condition{ + expectedKCPCondition: &metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneEtcdClusterHealthUnknownV1Beta2Reason, Message: "* Machine m1:\n" + " * EtcdMemberHealthy: Waiting for a Node with spec.providerID dummy-provider-id to exist", }, - expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + expectedMachineConditions: map[string][]metav1.Condition{ "m1": { {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberInspectionFailedV1Beta2Reason, Message: "Waiting for a Node with spec.providerID dummy-provider-id to exist"}, }, @@ -271,8 +271,8 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { Items: []corev1.Node{*fakeNode("n1")}, }, }, - expectedKCPCondition: v1beta1conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Control plane Node %s does not have a corresponding Machine", "n1"), - expectedKCPV1Beta2Condition: &metav1.Condition{ + expectedKCPV1Beta1Condition: v1beta1conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Control plane Node %s does not have a corresponding Machine", "n1"), + expectedKCPCondition: &metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneEtcdClusterNotHealthyV1Beta2Reason, @@ -293,20 +293,20 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { injectEtcdClientGenerator: &fakeEtcdClientGenerator{ forNodesErr: errors.New("something went wrong"), }, - expectedKCPCondition: v1beta1conditions.UnknownCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnknownReason, "Following Machines are reporting unknown etcd member status: m1"), - expectedMachineConditions: map[string]clusterv1.Conditions{ + expectedKCPV1Beta1Condition: v1beta1conditions.UnknownCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnknownReason, "Following Machines are reporting unknown etcd member status: m1"), + expectedMachineV1Beta1Conditions: map[string]clusterv1.Conditions{ "m1": { *v1beta1conditions.UnknownCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberInspectionFailedReason, "Failed to connect to etcd: something went wrong"), }, }, - expectedKCPV1Beta2Condition: &metav1.Condition{ + expectedKCPCondition: &metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneEtcdClusterHealthUnknownV1Beta2Reason, Message: "* Machine m1:\n" + " * EtcdMemberHealthy: Failed to connect to etcd: something went wrong", }, - expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + expectedMachineConditions: map[string][]metav1.Condition{ "m1": { {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberInspectionFailedV1Beta2Reason, Message: "Failed to connect to etcd: something went wrong"}, }, @@ -332,20 +332,20 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { Errors: []string{"something went wrong"}, }, }, - expectedKCPCondition: v1beta1conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting etcd member errors: %s", "m1"), - expectedMachineConditions: map[string]clusterv1.Conditions{ + expectedKCPV1Beta1Condition: v1beta1conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting etcd member errors: %s", "m1"), + expectedMachineV1Beta1Conditions: map[string]clusterv1.Conditions{ "m1": { *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd endpoint n1 reports errors: %s", "something went wrong"), }, }, - expectedKCPV1Beta2Condition: &metav1.Condition{ + expectedKCPCondition: &metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneEtcdClusterNotHealthyV1Beta2Reason, Message: "* Machine m1:\n" + " * EtcdMemberHealthy: Etcd endpoint n1 reports errors: something went wrong", }, - expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + expectedMachineConditions: map[string][]metav1.Condition{ "m1": { {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberNotHealthyV1Beta2Reason, Message: "Etcd endpoint n1 reports errors: something went wrong"}, }, @@ -370,20 +370,20 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { }, }, }, - expectedKCPCondition: v1beta1conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting etcd member errors: %s", "m1"), - expectedMachineConditions: map[string]clusterv1.Conditions{ + expectedKCPV1Beta1Condition: v1beta1conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting etcd member errors: %s", "m1"), + expectedMachineV1Beta1Conditions: map[string]clusterv1.Conditions{ "m1": { *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Failed to get etcd members"), }, }, - expectedKCPV1Beta2Condition: &metav1.Condition{ + expectedKCPCondition: &metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneEtcdClusterHealthUnknownV1Beta2Reason, Message: "* Machine m1:\n" + " * EtcdMemberHealthy: Failed to get etcd members: something went wrong", }, - expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + expectedMachineConditions: map[string][]metav1.Condition{ "m1": { {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberInspectionFailedV1Beta2Reason, Message: "Failed to get etcd members: something went wrong"}, }, @@ -417,20 +417,20 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { }, }, }, - expectedKCPCondition: v1beta1conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting etcd member errors: %s", "m1"), - expectedMachineConditions: map[string]clusterv1.Conditions{ + expectedKCPV1Beta1Condition: v1beta1conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting etcd member errors: %s", "m1"), + expectedMachineV1Beta1Conditions: map[string]clusterv1.Conditions{ "m1": { *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd member reports alarms: %s", "NOSPACE"), }, }, - expectedKCPV1Beta2Condition: &metav1.Condition{ + expectedKCPCondition: &metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneEtcdClusterNotHealthyV1Beta2Reason, Message: "* Machine m1:\n" + " * EtcdMemberHealthy: Etcd reports alarms: NOSPACE", }, - expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + expectedMachineConditions: map[string][]metav1.Condition{ "m1": { {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberNotHealthyV1Beta2Reason, Message: "Etcd reports alarms: NOSPACE"}, }, @@ -482,8 +482,8 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { return nil, errors.Wrapf(kerrors.NewAggregate(errs), "could not establish a connection to etcd members hosted on %s", strings.Join(nodeNames, ",")) }, }, - expectedKCPCondition: v1beta1conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting etcd member errors: %s", "m2"), - expectedMachineConditions: map[string]clusterv1.Conditions{ + expectedKCPV1Beta1Condition: v1beta1conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting etcd member errors: %s", "m2"), + expectedMachineV1Beta1Conditions: map[string]clusterv1.Conditions{ "m1": { *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition), }, @@ -491,14 +491,14 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Missing etcd member"), }, }, - expectedKCPV1Beta2Condition: &metav1.Condition{ + expectedKCPCondition: &metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneEtcdClusterNotHealthyV1Beta2Reason, Message: "* Machine m2:\n" + " * EtcdMemberHealthy: Etcd doesn't have an etcd member for Node n2", }, - expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + expectedMachineConditions: map[string][]metav1.Condition{ "m1": { {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Reason, Message: ""}, }, @@ -571,8 +571,8 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { return nil, errors.Wrapf(kerrors.NewAggregate(errs), "could not establish a connection to etcd members hosted on %s", strings.Join(nodeNames, ",")) }, }, - expectedKCPCondition: v1beta1conditions.TrueCondition(controlplanev1.EtcdClusterHealthyCondition), - expectedMachineConditions: map[string]clusterv1.Conditions{ + expectedKCPV1Beta1Condition: v1beta1conditions.TrueCondition(controlplanev1.EtcdClusterHealthyCondition), + expectedMachineV1Beta1Conditions: map[string]clusterv1.Conditions{ "m1": { *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition), }, @@ -580,12 +580,12 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition), }, }, - expectedKCPV1Beta2Condition: &metav1.Condition{ + expectedKCPCondition: &metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Reason, }, - expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + expectedMachineConditions: map[string][]metav1.Condition{ "m1": { {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Reason, Message: ""}, }, @@ -615,17 +615,17 @@ func TestUpdateManagedEtcdConditions(t *testing.T) { w.updateManagedEtcdConditions(ctx, controlPane) - if tt.expectedKCPCondition != nil { - g.Expect(*v1beta1conditions.Get(tt.kcp, controlplanev1.EtcdClusterHealthyCondition)).To(v1beta1conditions.MatchCondition(*tt.expectedKCPCondition)) + if tt.expectedKCPV1Beta1Condition != nil { + g.Expect(*v1beta1conditions.Get(tt.kcp, controlplanev1.EtcdClusterHealthyCondition)).To(v1beta1conditions.MatchCondition(*tt.expectedKCPV1Beta1Condition)) } - if tt.expectedKCPV1Beta2Condition != nil { - g.Expect(*conditions.Get(tt.kcp, controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition)).To(conditions.MatchCondition(*tt.expectedKCPV1Beta2Condition, conditions.IgnoreLastTransitionTime(true))) + if tt.expectedKCPCondition != nil { + g.Expect(*conditions.Get(tt.kcp, controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition)).To(conditions.MatchCondition(*tt.expectedKCPCondition, conditions.IgnoreLastTransitionTime(true))) } for _, m := range tt.machines { - g.Expect(tt.expectedMachineConditions).To(HaveKey(m.Name)) - g.Expect(m.GetV1Beta1Conditions()).To(v1beta1conditions.MatchConditions(tt.expectedMachineConditions[m.Name]), "unexpected conditions for Machine %s", m.Name) - g.Expect(m.GetConditions()).To(conditions.MatchConditions(tt.expectedMachineV1Beta2Conditions[m.Name], conditions.IgnoreLastTransitionTime(true)), "unexpected conditions for Machine %s", m.Name) + g.Expect(tt.expectedMachineV1Beta1Conditions).To(HaveKey(m.Name)) + g.Expect(m.GetV1Beta1Conditions()).To(v1beta1conditions.MatchConditions(tt.expectedMachineV1Beta1Conditions[m.Name]), "unexpected conditions for Machine %s", m.Name) + g.Expect(m.GetConditions()).To(conditions.MatchConditions(tt.expectedMachineConditions[m.Name], conditions.IgnoreLastTransitionTime(true)), "unexpected conditions for Machine %s", m.Name) } g.Expect(controlPane.EtcdMembersAndMachinesAreMatching).To(Equal(tt.expectedEtcdMembersAndMachinesAreMatching), "EtcdMembersAndMachinesAreMatching does not match") @@ -643,8 +643,8 @@ func TestUpdateExternalEtcdConditions(t *testing.T) { tests := []struct { name string kcp *controlplanev1.KubeadmControlPlane - expectedKCPCondition *clusterv1.Condition - expectedKCPV1Beta2Condition *metav1.Condition + expectedKCPV1Beta1Condition *clusterv1.Condition + expectedKCPCondition *metav1.Condition }{ { name: "External etcd should set a condition at KCP level for v1beta1, not for v1beta2", @@ -659,8 +659,8 @@ func TestUpdateExternalEtcdConditions(t *testing.T) { }, }, }, - expectedKCPCondition: v1beta1conditions.TrueCondition(controlplanev1.EtcdClusterHealthyCondition), - expectedKCPV1Beta2Condition: nil, + expectedKCPV1Beta1Condition: v1beta1conditions.TrueCondition(controlplanev1.EtcdClusterHealthyCondition), + expectedKCPCondition: nil, }, } for _, tt := range tests { @@ -676,11 +676,11 @@ func TestUpdateExternalEtcdConditions(t *testing.T) { } w.updateExternalEtcdConditions(ctx, controlPane) - if tt.expectedKCPCondition != nil { - g.Expect(*v1beta1conditions.Get(tt.kcp, controlplanev1.EtcdClusterHealthyCondition)).To(v1beta1conditions.MatchCondition(*tt.expectedKCPCondition)) + if tt.expectedKCPV1Beta1Condition != nil { + g.Expect(*v1beta1conditions.Get(tt.kcp, controlplanev1.EtcdClusterHealthyCondition)).To(v1beta1conditions.MatchCondition(*tt.expectedKCPV1Beta1Condition)) } - if tt.expectedKCPV1Beta2Condition != nil { - g.Expect(*conditions.Get(tt.kcp, controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition)).To(conditions.MatchCondition(*tt.expectedKCPV1Beta2Condition, conditions.IgnoreLastTransitionTime(true))) + if tt.expectedKCPCondition != nil { + g.Expect(*conditions.Get(tt.kcp, controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition)).To(conditions.MatchCondition(*tt.expectedKCPCondition, conditions.IgnoreLastTransitionTime(true))) } }) } @@ -712,10 +712,10 @@ func TestUpdateStaticPodConditions(t *testing.T) { kcp *controlplanev1.KubeadmControlPlane machines []*clusterv1.Machine injectClient client.Client // This test is injecting a fake client because it is required to create nodes with a controlled Status or to fail with a specific error. - expectedKCPCondition *clusterv1.Condition - expectedKCPV1Beta2Condition metav1.Condition - expectedMachineV1Beta2Conditions map[string][]metav1.Condition - expectedMachineConditions map[string]clusterv1.Conditions + expectedKCPV1Beta1Condition *clusterv1.Condition + expectedKCPCondition metav1.Condition + expectedMachineConditions map[string][]metav1.Condition + expectedMachineV1Beta1Conditions map[string]clusterv1.Conditions }{ { name: "if list nodes return an error, it should report all the conditions Unknown", @@ -725,8 +725,8 @@ func TestUpdateStaticPodConditions(t *testing.T) { injectClient: &fakeClient{ listErr: errors.New("failed to list Nodes"), }, - expectedKCPCondition: v1beta1conditions.UnknownCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsInspectionFailedReason, "Failed to list Nodes which are hosting control plane components: failed to list Nodes"), - expectedMachineConditions: map[string]clusterv1.Conditions{ + expectedKCPV1Beta1Condition: v1beta1conditions.UnknownCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsInspectionFailedReason, "Failed to list Nodes which are hosting control plane components: failed to list Nodes"), + expectedMachineV1Beta1Conditions: map[string]clusterv1.Conditions{ "m1": { *v1beta1conditions.UnknownCondition(controlplanev1.MachineAPIServerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Failed to get the Node which is hosting this component: failed to list Nodes"), *v1beta1conditions.UnknownCondition(controlplanev1.MachineControllerManagerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Failed to get the Node which is hosting this component: failed to list Nodes"), @@ -734,13 +734,13 @@ func TestUpdateStaticPodConditions(t *testing.T) { *v1beta1conditions.UnknownCondition(controlplanev1.MachineEtcdPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Failed to get the Node which is hosting this component: failed to list Nodes"), }, }, - expectedKCPV1Beta2Condition: metav1.Condition{ + expectedKCPCondition: metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsInspectionFailedV1Beta2Reason, Message: "Failed to get Nodes hosting control plane components: failed to list Nodes", }, - expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + expectedMachineConditions: map[string][]metav1.Condition{ "m1": { {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Failed to get the Node hosting the Pod: failed to list Nodes"}, {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Failed to get the Node hosting the Pod: failed to list Nodes"}, @@ -759,17 +759,17 @@ func TestUpdateStaticPodConditions(t *testing.T) { Items: []corev1.Node{*fakeNode("n1")}, }, }, - expectedKCPCondition: nil, - expectedMachineConditions: map[string]clusterv1.Conditions{ + expectedKCPV1Beta1Condition: nil, + expectedMachineV1Beta1Conditions: map[string]clusterv1.Conditions{ "m1": {}, }, - expectedKCPV1Beta2Condition: metav1.Condition{ + expectedKCPCondition: metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Reason, Message: "", }, - expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + expectedMachineConditions: map[string][]metav1.Condition{ "m1": { {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Waiting for GenericInfraMachine to report spec.providerID"}, {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Waiting for GenericInfraMachine to report spec.providerID"}, @@ -788,18 +788,18 @@ func TestUpdateStaticPodConditions(t *testing.T) { Items: []corev1.Node{*fakeNode("n1")}, }, }, - expectedKCPCondition: nil, - expectedMachineConditions: map[string]clusterv1.Conditions{ + expectedKCPV1Beta1Condition: nil, + expectedMachineV1Beta1Conditions: map[string]clusterv1.Conditions{ "m1": {}, }, - expectedKCPV1Beta2Condition: metav1.Condition{ + expectedKCPCondition: metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthUnknownV1Beta2Reason, Message: "* Machine m1:\n" + " * Control plane components: Waiting for a Node with spec.providerID dummy-provider-id to exist", }, - expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + expectedMachineConditions: map[string][]metav1.Condition{ "m1": { {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Waiting for a Node with spec.providerID dummy-provider-id to exist"}, {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Waiting for a Node with spec.providerID dummy-provider-id to exist"}, @@ -816,8 +816,8 @@ func TestUpdateStaticPodConditions(t *testing.T) { Items: []corev1.Node{*fakeNode("n1")}, }, }, - expectedKCPCondition: v1beta1conditions.FalseCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnhealthyReason, clusterv1.ConditionSeverityError, "Control plane Node %s does not have a corresponding Machine", "n1"), - expectedKCPV1Beta2Condition: metav1.Condition{ + expectedKCPV1Beta1Condition: v1beta1conditions.FalseCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnhealthyReason, clusterv1.ConditionSeverityError, "Control plane Node %s does not have a corresponding Machine", "n1"), + expectedKCPCondition: metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsNotHealthyV1Beta2Reason, @@ -834,8 +834,8 @@ func TestUpdateStaticPodConditions(t *testing.T) { Items: []corev1.Node{*fakeNode("n1", withUnreachableTaint())}, }, }, - expectedKCPCondition: v1beta1conditions.UnknownCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnknownReason, "Following Machines are reporting unknown control plane status: m1"), - expectedMachineConditions: map[string]clusterv1.Conditions{ + expectedKCPV1Beta1Condition: v1beta1conditions.UnknownCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnknownReason, "Following Machines are reporting unknown control plane status: m1"), + expectedMachineV1Beta1Conditions: map[string]clusterv1.Conditions{ "m1": { *v1beta1conditions.UnknownCondition(controlplanev1.MachineAPIServerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Node is unreachable"), *v1beta1conditions.UnknownCondition(controlplanev1.MachineControllerManagerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Node is unreachable"), @@ -843,14 +843,14 @@ func TestUpdateStaticPodConditions(t *testing.T) { *v1beta1conditions.UnknownCondition(controlplanev1.MachineEtcdPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Node is unreachable"), }, }, - expectedKCPV1Beta2Condition: metav1.Condition{ + expectedKCPCondition: metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthUnknownV1Beta2Reason, Message: "* Machine m1:\n" + " * Control plane components: Node n1 is unreachable", }, - expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + expectedMachineConditions: map[string][]metav1.Condition{ "m1": { {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Node n1 is unreachable"}, {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Node n1 is unreachable"}, @@ -867,18 +867,18 @@ func TestUpdateStaticPodConditions(t *testing.T) { injectClient: &fakeClient{ list: &corev1.NodeList{}, }, - expectedKCPCondition: nil, - expectedMachineConditions: map[string]clusterv1.Conditions{ + expectedKCPV1Beta1Condition: nil, + expectedMachineV1Beta1Conditions: map[string]clusterv1.Conditions{ "m1": {}, }, - expectedKCPV1Beta2Condition: metav1.Condition{ + expectedKCPCondition: metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthUnknownV1Beta2Reason, Message: "* Machine m1:\n" + " * Control plane components: Waiting for a Node with spec.providerID n1 to exist", }, - expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + expectedMachineConditions: map[string][]metav1.Condition{ "m1": { {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Waiting for a Node with spec.providerID n1 to exist"}, {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Waiting for a Node with spec.providerID n1 to exist"}, @@ -895,8 +895,8 @@ func TestUpdateStaticPodConditions(t *testing.T) { injectClient: &fakeClient{ list: &corev1.NodeList{}, }, - expectedKCPCondition: v1beta1conditions.FalseCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting control plane errors: %s", "m1"), - expectedMachineConditions: map[string]clusterv1.Conditions{ + expectedKCPV1Beta1Condition: v1beta1conditions.FalseCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting control plane errors: %s", "m1"), + expectedMachineV1Beta1Conditions: map[string]clusterv1.Conditions{ "m1": { *v1beta1conditions.FalseCondition(controlplanev1.MachineAPIServerPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Missing Node"), *v1beta1conditions.FalseCondition(controlplanev1.MachineControllerManagerPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Missing Node"), @@ -904,14 +904,14 @@ func TestUpdateStaticPodConditions(t *testing.T) { *v1beta1conditions.FalseCondition(controlplanev1.MachineSchedulerPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Missing Node"), }, }, - expectedKCPV1Beta2Condition: metav1.Condition{ + expectedKCPCondition: metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthUnknownV1Beta2Reason, Message: "* Machine m1:\n" + " * Control plane components: Node n1 does not exist", }, - expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + expectedMachineConditions: map[string][]metav1.Condition{ "m1": { {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Node n1 does not exist"}, {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Node n1 does not exist"}, @@ -946,8 +946,8 @@ func TestUpdateStaticPodConditions(t *testing.T) { ), }, }, - expectedKCPCondition: v1beta1conditions.FalseCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting control plane errors: %s", "m1"), - expectedMachineConditions: map[string]clusterv1.Conditions{ + expectedKCPV1Beta1Condition: v1beta1conditions.FalseCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnhealthyReason, clusterv1.ConditionSeverityError, "Following Machines are reporting control plane errors: %s", "m1"), + expectedMachineV1Beta1Conditions: map[string]clusterv1.Conditions{ "m1": { *v1beta1conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), *v1beta1conditions.FalseCondition(controlplanev1.MachineControllerManagerPodHealthyCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting to be scheduled"), @@ -955,7 +955,7 @@ func TestUpdateStaticPodConditions(t *testing.T) { *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated"), }, }, - expectedKCPV1Beta2Condition: metav1.Condition{ + expectedKCPCondition: metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsNotHealthyV1Beta2Reason, @@ -964,7 +964,7 @@ func TestUpdateStaticPodConditions(t *testing.T) { " * SchedulerPodHealthy: All the containers have been terminated\n" + " * EtcdPodHealthy: All the containers have been terminated", }, - expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + expectedMachineConditions: map[string][]metav1.Condition{ "m1": { {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachinePodRunningV1Beta2Reason, Message: ""}, {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodProvisioningV1Beta2Reason, Message: "Waiting to be scheduled"}, @@ -1001,8 +1001,8 @@ func TestUpdateStaticPodConditions(t *testing.T) { ), }, }, - expectedKCPCondition: v1beta1conditions.TrueCondition(controlplanev1.ControlPlaneComponentsHealthyCondition), - expectedMachineConditions: map[string]clusterv1.Conditions{ + expectedKCPV1Beta1Condition: v1beta1conditions.TrueCondition(controlplanev1.ControlPlaneComponentsHealthyCondition), + expectedMachineV1Beta1Conditions: map[string]clusterv1.Conditions{ "m1": { *v1beta1conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), @@ -1010,12 +1010,12 @@ func TestUpdateStaticPodConditions(t *testing.T) { *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdPodHealthyCondition), }, }, - expectedKCPV1Beta2Condition: metav1.Condition{ + expectedKCPCondition: metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Reason, }, - expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + expectedMachineConditions: map[string][]metav1.Condition{ "m1": { {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachinePodRunningV1Beta2Reason, Message: ""}, {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachinePodRunningV1Beta2Reason, Message: ""}, @@ -1060,8 +1060,8 @@ func TestUpdateStaticPodConditions(t *testing.T) { // no static pod for etcd }, }, - expectedKCPCondition: v1beta1conditions.TrueCondition(controlplanev1.ControlPlaneComponentsHealthyCondition), - expectedMachineConditions: map[string]clusterv1.Conditions{ + expectedKCPV1Beta1Condition: v1beta1conditions.TrueCondition(controlplanev1.ControlPlaneComponentsHealthyCondition), + expectedMachineV1Beta1Conditions: map[string]clusterv1.Conditions{ "m1": { *v1beta1conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), @@ -1069,12 +1069,12 @@ func TestUpdateStaticPodConditions(t *testing.T) { // no condition for etcd Pod }, }, - expectedKCPV1Beta2Condition: metav1.Condition{ + expectedKCPCondition: metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Reason, }, - expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + expectedMachineConditions: map[string][]metav1.Condition{ "m1": { {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachinePodRunningV1Beta2Reason, Message: ""}, {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachinePodRunningV1Beta2Reason, Message: ""}, @@ -1101,15 +1101,15 @@ func TestUpdateStaticPodConditions(t *testing.T) { } w.UpdateStaticPodConditions(ctx, controlPane) - if tt.expectedKCPCondition != nil { - g.Expect(*v1beta1conditions.Get(tt.kcp, controlplanev1.ControlPlaneComponentsHealthyCondition)).To(v1beta1conditions.MatchCondition(*tt.expectedKCPCondition)) + if tt.expectedKCPV1Beta1Condition != nil { + g.Expect(*v1beta1conditions.Get(tt.kcp, controlplanev1.ControlPlaneComponentsHealthyCondition)).To(v1beta1conditions.MatchCondition(*tt.expectedKCPV1Beta1Condition)) } - g.Expect(*conditions.Get(tt.kcp, controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition)).To(conditions.MatchCondition(tt.expectedKCPV1Beta2Condition, conditions.IgnoreLastTransitionTime(true))) + g.Expect(*conditions.Get(tt.kcp, controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition)).To(conditions.MatchCondition(tt.expectedKCPCondition, conditions.IgnoreLastTransitionTime(true))) for _, m := range tt.machines { - g.Expect(tt.expectedMachineConditions).To(HaveKey(m.Name)) - g.Expect(m.GetV1Beta1Conditions()).To(v1beta1conditions.MatchConditions(tt.expectedMachineConditions[m.Name])) - g.Expect(m.GetConditions()).To(conditions.MatchConditions(tt.expectedMachineV1Beta2Conditions[m.Name], conditions.IgnoreLastTransitionTime(true))) + g.Expect(tt.expectedMachineV1Beta1Conditions).To(HaveKey(m.Name)) + g.Expect(m.GetV1Beta1Conditions()).To(v1beta1conditions.MatchConditions(tt.expectedMachineV1Beta1Conditions[m.Name])) + g.Expect(m.GetConditions()).To(conditions.MatchConditions(tt.expectedMachineConditions[m.Name], conditions.IgnoreLastTransitionTime(true))) } }) } @@ -1131,14 +1131,14 @@ func TestUpdateStaticPodCondition(t *testing.T) { name string injectClient client.Client // This test is injecting a fake client because it is required to create pods with a controlled Status or to fail with a specific error. node *corev1.Node - expectedCondition clusterv1.Condition - expectedV1Beta2Condition metav1.Condition + expectedV1Beta1Condition clusterv1.Condition + expectedCondition metav1.Condition }{ { - name: "if node Ready is unknown, assume pod status is stale", - node: fakeNode(nodeName, withReadyCondition(corev1.ConditionUnknown)), - expectedCondition: *v1beta1conditions.UnknownCondition(condition, controlplanev1.PodInspectionFailedReason, "Node Ready condition is Unknown, Pod data might be stale"), - expectedV1Beta2Condition: metav1.Condition{ + name: "if node Ready is unknown, assume pod status is stale", + node: fakeNode(nodeName, withReadyCondition(corev1.ConditionUnknown)), + expectedV1Beta1Condition: *v1beta1conditions.UnknownCondition(condition, controlplanev1.PodInspectionFailedReason, "Node Ready condition is Unknown, Pod data might be stale"), + expectedCondition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, @@ -1150,9 +1150,9 @@ func TestUpdateStaticPodCondition(t *testing.T) { injectClient: &fakeClient{ getErr: apierrors.NewNotFound(schema.ParseGroupResource("Pod"), component), }, - node: fakeNode(nodeName), - expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodMissingReason, clusterv1.ConditionSeverityError, "Pod kube-component-node is missing"), - expectedV1Beta2Condition: metav1.Condition{ + node: fakeNode(nodeName), + expectedV1Beta1Condition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodMissingReason, clusterv1.ConditionSeverityError, "Pod kube-component-node is missing"), + expectedCondition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodDoesNotExistV1Beta2Reason, @@ -1164,9 +1164,9 @@ func TestUpdateStaticPodCondition(t *testing.T) { injectClient: &fakeClient{ getErr: errors.New("get failure"), }, - node: fakeNode(nodeName), - expectedCondition: *v1beta1conditions.UnknownCondition(condition, controlplanev1.PodInspectionFailedReason, "Failed to get Pod status"), - expectedV1Beta2Condition: metav1.Condition{ + node: fakeNode(nodeName), + expectedV1Beta1Condition: *v1beta1conditions.UnknownCondition(condition, controlplanev1.PodInspectionFailedReason, "Failed to get Pod status"), + expectedCondition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, @@ -1183,9 +1183,9 @@ func TestUpdateStaticPodCondition(t *testing.T) { ), }, }, - node: fakeNode(nodeName), - expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting to be scheduled"), - expectedV1Beta2Condition: metav1.Condition{ + node: fakeNode(nodeName), + expectedV1Beta1Condition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting to be scheduled"), + expectedCondition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodProvisioningV1Beta2Reason, @@ -1203,9 +1203,9 @@ func TestUpdateStaticPodCondition(t *testing.T) { ), }, }, - node: fakeNode(nodeName), - expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Running init containers"), - expectedV1Beta2Condition: metav1.Condition{ + node: fakeNode(nodeName), + expectedV1Beta1Condition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Running init containers"), + expectedCondition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodProvisioningV1Beta2Reason, @@ -1223,9 +1223,9 @@ func TestUpdateStaticPodCondition(t *testing.T) { ), }, }, - node: fakeNode(nodeName), - expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, ""), - expectedV1Beta2Condition: metav1.Condition{ + node: fakeNode(nodeName), + expectedV1Beta1Condition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, ""), + expectedCondition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodProvisioningV1Beta2Reason, @@ -1242,9 +1242,9 @@ func TestUpdateStaticPodCondition(t *testing.T) { ), }, }, - node: fakeNode(nodeName), - expectedCondition: *v1beta1conditions.TrueCondition(condition), - expectedV1Beta2Condition: metav1.Condition{ + node: fakeNode(nodeName), + expectedV1Beta1Condition: *v1beta1conditions.TrueCondition(condition), + expectedCondition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachinePodRunningV1Beta2Reason, @@ -1265,9 +1265,9 @@ func TestUpdateStaticPodCondition(t *testing.T) { ), }, }, - node: fakeNode(nodeName), - expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting something"), - expectedV1Beta2Condition: metav1.Condition{ + node: fakeNode(nodeName), + expectedV1Beta1Condition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting something"), + expectedCondition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodProvisioningV1Beta2Reason, @@ -1293,9 +1293,9 @@ func TestUpdateStaticPodCondition(t *testing.T) { ), }, }, - node: fakeNode(nodeName), - expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Waiting something"), - expectedV1Beta2Condition: metav1.Condition{ + node: fakeNode(nodeName), + expectedV1Beta1Condition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Waiting something"), + expectedCondition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodFailedV1Beta2Reason, @@ -1316,9 +1316,9 @@ func TestUpdateStaticPodCondition(t *testing.T) { ), }, }, - node: fakeNode(nodeName), - expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Something failed"), - expectedV1Beta2Condition: metav1.Condition{ + node: fakeNode(nodeName), + expectedV1Beta1Condition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Something failed"), + expectedCondition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodFailedV1Beta2Reason, @@ -1334,9 +1334,9 @@ func TestUpdateStaticPodCondition(t *testing.T) { ), }, }, - node: fakeNode(nodeName), - expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting for startup or readiness probes"), - expectedV1Beta2Condition: metav1.Condition{ + node: fakeNode(nodeName), + expectedV1Beta1Condition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting for startup or readiness probes"), + expectedCondition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodProvisioningV1Beta2Reason, @@ -1352,9 +1352,9 @@ func TestUpdateStaticPodCondition(t *testing.T) { ), }, }, - node: fakeNode(nodeName), - expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated"), - expectedV1Beta2Condition: metav1.Condition{ + node: fakeNode(nodeName), + expectedV1Beta1Condition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated"), + expectedCondition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodFailedV1Beta2Reason, @@ -1370,9 +1370,9 @@ func TestUpdateStaticPodCondition(t *testing.T) { ), }, }, - node: fakeNode(nodeName), - expectedCondition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated"), - expectedV1Beta2Condition: metav1.Condition{ + node: fakeNode(nodeName), + expectedV1Beta1Condition: *v1beta1conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated"), + expectedCondition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodFailedV1Beta2Reason, @@ -1388,9 +1388,9 @@ func TestUpdateStaticPodCondition(t *testing.T) { ), }, }, - node: fakeNode(nodeName), - expectedCondition: *v1beta1conditions.UnknownCondition(condition, controlplanev1.PodInspectionFailedReason, "Pod is reporting Unknown status"), - expectedV1Beta2Condition: metav1.Condition{ + node: fakeNode(nodeName), + expectedV1Beta1Condition: *v1beta1conditions.UnknownCondition(condition, controlplanev1.PodInspectionFailedReason, "Pod is reporting Unknown status"), + expectedCondition: metav1.Condition{ Type: v1beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, @@ -1408,8 +1408,8 @@ func TestUpdateStaticPodCondition(t *testing.T) { } w.updateStaticPodCondition(ctx, machine, *tt.node, component, condition, v1beta2Condition) - g.Expect(*v1beta1conditions.Get(machine, condition)).To(v1beta1conditions.MatchCondition(tt.expectedCondition)) - g.Expect(*conditions.Get(machine, v1beta2Condition)).To(conditions.MatchCondition(tt.expectedV1Beta2Condition, conditions.IgnoreLastTransitionTime(true))) + g.Expect(*v1beta1conditions.Get(machine, condition)).To(v1beta1conditions.MatchCondition(tt.expectedV1Beta1Condition)) + g.Expect(*conditions.Get(machine, v1beta2Condition)).To(conditions.MatchCondition(tt.expectedCondition, conditions.IgnoreLastTransitionTime(true))) }) } } @@ -1548,7 +1548,7 @@ func withCondition(condition corev1.PodConditionType, status corev1.ConditionSta } } -func TestAggregateConditionsFromMachinesToKCP(t *testing.T) { +func TestAggregateV1Beta1ConditionsFromMachinesToKCP(t *testing.T) { conditionType := controlplanev1.ControlPlaneComponentsHealthyCondition unhealthyReason := "unhealthy reason" unknownReason := "unknown reason" @@ -1609,7 +1609,7 @@ func TestAggregateConditionsFromMachinesToKCP(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - input := aggregateConditionsFromMachinesToKCPInput{ + input := aggregateV1Beta1ConditionsFromMachinesToKCPInput{ controlPlane: &ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{}, Machines: collections.FromMachines(tt.machines...), @@ -1621,14 +1621,14 @@ func TestAggregateConditionsFromMachinesToKCP(t *testing.T) { unknownReason: unknownReason, note: note, } - aggregateConditionsFromMachinesToKCP(input) + aggregateV1Beta1ConditionsFromMachinesToKCP(input) g.Expect(*v1beta1conditions.Get(input.controlPlane.KCP, conditionType)).To(v1beta1conditions.MatchCondition(tt.expectedCondition)) }) } } -func TestAggregateV1Beta2ConditionsFromMachinesToKCP(t *testing.T) { +func TestAggregateConditionsFromMachinesToKCP(t *testing.T) { conditionType := controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition trueReason := "true reason" unknownReason := "unknown reason" @@ -1730,7 +1730,7 @@ func TestAggregateV1Beta2ConditionsFromMachinesToKCP(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - input := aggregateV1Beta2ConditionsFromMachinesToKCPInput{ + input := aggregateConditionsFromMachinesToKCPInput{ controlPlane: &ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{}, Machines: collections.FromMachines(tt.machines...), @@ -1743,7 +1743,7 @@ func TestAggregateV1Beta2ConditionsFromMachinesToKCP(t *testing.T) { falseReason: falseReason, note: note, } - aggregateV1Beta2ConditionsFromMachinesToKCP(input) + aggregateConditionsFromMachinesToKCP(input) g.Expect(*conditions.Get(input.controlPlane.KCP, conditionType)).To(conditions.MatchCondition(tt.expectedCondition, conditions.IgnoreLastTransitionTime(true))) }) diff --git a/exp/internal/controllers/machinepool_controller.go b/exp/internal/controllers/machinepool_controller.go index c66719196392..3eb720d14e69 100644 --- a/exp/internal/controllers/machinepool_controller.go +++ b/exp/internal/controllers/machinepool_controller.go @@ -205,13 +205,13 @@ func (r *MachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Always attempt to patch the object and status after each reconciliation. // Patch ObservedGeneration only if the reconciliation completed successfully patchOpts := []patch.Option{ - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.ReadyCondition, clusterv1.BootstrapReadyCondition, clusterv1.InfrastructureReadyCondition, expv1.ReplicasReadyCondition, }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, }}, } diff --git a/exp/runtime/internal/controllers/extensionconfig_controller.go b/exp/runtime/internal/controllers/extensionconfig_controller.go index c1f5d2b107f4..157b4cdafcf4 100644 --- a/exp/runtime/internal/controllers/extensionconfig_controller.go +++ b/exp/runtime/internal/controllers/extensionconfig_controller.go @@ -179,10 +179,10 @@ func patchExtensionConfig(ctx context.Context, client client.Client, original, m } options = append(options, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ runtimev1.RuntimeExtensionDiscoveredCondition, }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, runtimev1.ExtensionConfigDiscoveredV1Beta2Condition, }}, diff --git a/internal/controllers/cluster/cluster_controller.go b/internal/controllers/cluster/cluster_controller.go index 22210dd51281..5b4a9686d41a 100644 --- a/internal/controllers/cluster/cluster_controller.go +++ b/internal/controllers/cluster/cluster_controller.go @@ -270,12 +270,12 @@ func patchCluster(ctx context.Context, patchHelper *patch.Helper, cluster *clust // Also, if requested, we are adding additional options like e.g. Patch ObservedGeneration when issuing the // patch at the end of the reconcile loop. options = append(options, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.ReadyCondition, clusterv1.ControlPlaneReadyCondition, clusterv1.InfrastructureReadyCondition, }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, clusterv1.ClusterInfrastructureReadyV1Beta2Condition, clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, diff --git a/internal/controllers/clusterclass/clusterclass_controller.go b/internal/controllers/clusterclass/clusterclass_controller.go index 9493c570e366..17ec3ff931df 100644 --- a/internal/controllers/clusterclass/clusterclass_controller.go +++ b/internal/controllers/clusterclass/clusterclass_controller.go @@ -136,11 +136,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (retres ct updateStatus(ctx, s) patchOpts := []patch.Option{ - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.ClusterClassRefVersionsUpToDateCondition, clusterv1.ClusterClassVariablesReconciledCondition, }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, clusterv1.ClusterClassRefVersionsUpToDateV1Beta2Condition, clusterv1.ClusterClassVariablesReadyV1Beta2Condition, diff --git a/internal/controllers/clusterresourceset/clusterresourceset_controller.go b/internal/controllers/clusterresourceset/clusterresourceset_controller.go index b46bec535743..df5d4373dc71 100644 --- a/internal/controllers/clusterresourceset/clusterresourceset_controller.go +++ b/internal/controllers/clusterresourceset/clusterresourceset_controller.go @@ -156,7 +156,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re // Always attempt to patch the object and status after each reconciliation. // Patch ObservedGeneration only if the reconciliation completed successfully. patchOpts := []patch.Option{ - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, addonsv1.ResourcesAppliedV1Beta2Condition, }}, diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 7eb5439e31cb..d959fc316a3e 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -309,13 +309,13 @@ func patchMachine(ctx context.Context, patchHelper *patch.Helper, machine *clust // Also, if requested, we are adding additional options like e.g. Patch ObservedGeneration when issuing the // patch at the end of the reconcile loop. options = append(options, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.ReadyCondition, clusterv1.BootstrapReadyCondition, clusterv1.InfrastructureReadyCondition, clusterv1.DrainingSucceededCondition, }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, clusterv1.MachineAvailableV1Beta2Condition, clusterv1.MachineReadyV1Beta2Condition, diff --git a/internal/controllers/machinedeployment/machinedeployment_controller.go b/internal/controllers/machinedeployment/machinedeployment_controller.go index 31a84bfdcc1a..1cd34370c646 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller.go @@ -210,11 +210,11 @@ func patchMachineDeployment(ctx context.Context, patchHelper *patch.Helper, md * // Patch the object, ignoring conflicts on the conditions owned by this controller. options = append(options, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.ReadyCondition, clusterv1.MachineDeploymentAvailableCondition, }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, clusterv1.MachineDeploymentAvailableV1Beta2Condition, clusterv1.MachineDeploymentMachinesReadyV1Beta2Condition, diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go index 29d8b3a79a49..750a0c4e0df0 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go @@ -167,10 +167,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re // Always attempt to patch the object and status after each reconciliation. // Patch ObservedGeneration only if the reconciliation completed successfully patchOpts := []patch.Option{ - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.RemediationAllowedCondition, }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, clusterv1.MachineHealthCheckRemediationAllowedV1Beta2Condition, }}, @@ -303,11 +303,11 @@ func (r *Reconciler) reconcile(ctx context.Context, logger logr.Logger, cluster errList := []error{} for _, t := range append(healthy, unhealthy...) { patchOpts := []patch.Option{ - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.MachineHealthCheckSucceededCondition, // Note: intentionally leaving out OwnerRemediated condition which is mostly controlled by the owner. }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.MachineHealthCheckSucceededV1Beta2Condition, // Note: intentionally leaving out OwnerRemediated condition which is mostly controlled by the owner. // (Same for ExternallyRemediated condition) @@ -394,11 +394,11 @@ func (r *Reconciler) patchHealthyTargets(ctx context.Context, logger logr.Logger } patchOpts := []patch.Option{ - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.MachineHealthCheckSucceededCondition, // Note: intentionally leaving out OwnerRemediated condition which is mostly controlled by the owner. }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.MachineHealthCheckSucceededV1Beta2Condition, // Note: intentionally leaving out OwnerRemediated condition which is mostly controlled by the owner. // (Same for ExternallyRemediated condition) @@ -514,11 +514,11 @@ func (r *Reconciler) patchUnhealthyTargets(ctx context.Context, logger logr.Logg } patchOpts := []patch.Option{ - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.MachineHealthCheckSucceededCondition, // Note: intentionally leaving out OwnerRemediated condition which is mostly controlled by the owner. }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.MachineHealthCheckSucceededV1Beta2Condition, // Note: intentionally leaving out OwnerRemediated condition which is mostly controlled by the owner. // (Same for ExternallyRemediated condition) diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index 202451ccca4c..c4dd9555699a 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -316,13 +316,13 @@ func patchMachineSet(ctx context.Context, patchHelper *patch.Helper, machineSet // Patch the object, ignoring conflicts on the conditions owned by this controller. options := []patch.Option{ - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.ReadyCondition, clusterv1.MachinesCreatedCondition, clusterv1.ResizedCondition, clusterv1.MachinesReadyCondition, }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, clusterv1.MachineSetScalingUpV1Beta2Condition, clusterv1.MachineSetScalingDownV1Beta2Condition, @@ -517,7 +517,7 @@ func (r *Reconciler) syncMachines(ctx context.Context, s *scope) (ctrl.Result, e conditions.Set(m, *upToDateCondition) } - if err := patchHelper.Patch(ctx, m, patch.WithOwnedV1Beta2Conditions{Conditions: []string{clusterv1.MachineUpToDateV1Beta2Condition}}); err != nil { + if err := patchHelper.Patch(ctx, m, patch.WithOwnedConditions{Conditions: []string{clusterv1.MachineUpToDateV1Beta2Condition}}); err != nil { return ctrl.Result{}, err } continue @@ -532,7 +532,7 @@ func (r *Reconciler) syncMachines(ctx context.Context, s *scope) (ctrl.Result, e return ctrl.Result{}, err } conditions.Set(m, *upToDateCondition) - if err := patchHelper.Patch(ctx, m, patch.WithOwnedV1Beta2Conditions{Conditions: []string{clusterv1.MachineUpToDateV1Beta2Condition}}); err != nil { + if err := patchHelper.Patch(ctx, m, patch.WithOwnedConditions{Conditions: []string{clusterv1.MachineUpToDateV1Beta2Condition}}); err != nil { return ctrl.Result{}, err } } @@ -1338,10 +1338,10 @@ func (r *Reconciler) reconcileUnhealthyMachines(ctx context.Context, s *scope) ( continue } - shouldCleanup := v1beta1conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededCondition) && v1beta1conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedCondition) - shouldCleanupV1Beta2 := conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededV1Beta2Condition) && conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) + shouldCleanupV1Beta1 := v1beta1conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededCondition) && v1beta1conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedCondition) + shouldCleanup := conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededV1Beta2Condition) && conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) - if !(shouldCleanup || shouldCleanupV1Beta2) { + if !(shouldCleanupV1Beta1 || shouldCleanup) { continue } @@ -1351,17 +1351,17 @@ func (r *Reconciler) reconcileUnhealthyMachines(ctx context.Context, s *scope) ( continue } - if shouldCleanup { + if shouldCleanupV1Beta1 { v1beta1conditions.Delete(m, clusterv1.MachineOwnerRemediatedCondition) } - if shouldCleanupV1Beta2 { + if shouldCleanup { conditions.Delete(m, clusterv1.MachineOwnerRemediatedV1Beta2Condition) } - if err := patchHelper.Patch(ctx, m, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + if err := patchHelper.Patch(ctx, m, patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.MachineOwnerRemediatedCondition, - }}, patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + }}, patch.WithOwnedConditions{Conditions: []string{ clusterv1.MachineOwnerRemediatedV1Beta2Condition, }}); err != nil { errList = append(errList, err) @@ -1552,9 +1552,9 @@ func patchMachineConditions(ctx context.Context, c client.Client, machines []*cl conditions.Set(m, v1beta2Condition) if err := patchHelper.Patch(ctx, m, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.MachineOwnerRemediatedCondition, - }}, patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + }}, patch.WithOwnedConditions{Conditions: []string{ clusterv1.MachineOwnerRemediatedV1Beta2Condition, }}); err != nil { errs = append(errs, err) diff --git a/internal/controllers/topology/cluster/cluster_controller.go b/internal/controllers/topology/cluster/cluster_controller.go index d64737bd27dd..30365ddcffce 100644 --- a/internal/controllers/topology/cluster/cluster_controller.go +++ b/internal/controllers/topology/cluster/cluster_controller.go @@ -306,10 +306,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return } options := []patch.Option{ - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.TopologyReconciledCondition, }}, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.ClusterTopologyReconciledV1Beta2Condition, }}, } diff --git a/internal/util/tree/tree.go b/internal/util/tree/tree.go index b80879eacb9e..be41aea6fbbb 100644 --- a/internal/util/tree/tree.go +++ b/internal/util/tree/tree.go @@ -61,38 +61,38 @@ var ( cyan = color.New(color.FgCyan) ) -// PrintObjectTreeV1Beta2 prints the cluster status to stdout. +// PrintObjectTree prints the cluster status to stdout. // Note: this function is exposed only for usage in clusterctl and Cluster API E2E tests. -func PrintObjectTreeV1Beta2(tree *tree.ObjectTree, w io.Writer) { +func PrintObjectTree(tree *tree.ObjectTree, w io.Writer) { // Creates the output table tbl := tablewriter.NewWriter(w) tbl.SetHeader([]string{"NAME", "REPLICAS", "AVAILABLE", "READY", "UP TO DATE", "STATUS", "REASON", "SINCE", "MESSAGE"}) - formatTableTreeV1Beta2(tbl) + formatTableTree(tbl) // Add row for the root object, the cluster, and recursively for all the nodes representing the cluster status. - addObjectRowV1Beta2("", tbl, tree, tree.GetRoot()) + addObjectRow("", tbl, tree, tree.GetRoot()) // Prints the output table tbl.Render() } -// PrintObjectTree prints the cluster status to stdout. +// PrintObjectTreeV1Beta1 prints the cluster status to stdout. // Note: this function is exposed only for usage in clusterctl and Cluster API E2E tests. -func PrintObjectTree(tree *tree.ObjectTree) { +func PrintObjectTreeV1Beta1(tree *tree.ObjectTree) { // Creates the output table tbl := tablewriter.NewWriter(os.Stdout) tbl.SetHeader([]string{"NAME", "READY", "SEVERITY", "REASON", "SINCE", "MESSAGE"}) - formatTableTree(tbl) + formatTableTreeV1Beta1(tbl) // Add row for the root object, the cluster, and recursively for all the nodes representing the cluster status. - addObjectRow("", tbl, tree, tree.GetRoot()) + addObjectRowV1Beta1("", tbl, tree, tree.GetRoot()) // Prints the output table tbl.Render() } // formats the table with required attributes. -func formatTableTreeV1Beta2(tbl *tablewriter.Table) { +func formatTableTree(tbl *tablewriter.Table) { tbl.SetAutoWrapText(false) tbl.SetHeaderAlignment(tablewriter.ALIGN_LEFT) tbl.SetAlignment(tablewriter.ALIGN_LEFT) @@ -106,7 +106,7 @@ func formatTableTreeV1Beta2(tbl *tablewriter.Table) { } // formats the table with required attributes. -func formatTableTree(tbl *tablewriter.Table) { +func formatTableTreeV1Beta1(tbl *tablewriter.Table) { tbl.SetAutoWrapText(false) tbl.SetHeaderAlignment(tablewriter.ALIGN_LEFT) tbl.SetAlignment(tablewriter.ALIGN_LEFT) @@ -121,12 +121,12 @@ func formatTableTree(tbl *tablewriter.Table) { tbl.SetNoWhiteSpace(true) } -// addObjectRowV1Beta2 add a row for a given object, and recursively for all the object's children. +// addObjectRow add a row for a given object, and recursively for all the object's children. // NOTE: each row name gets a prefix, that generates a tree view like representation. -func addObjectRowV1Beta2(prefix string, tbl *tablewriter.Table, objectTree *tree.ObjectTree, obj ctrlclient.Object) { +func addObjectRow(prefix string, tbl *tablewriter.Table, objectTree *tree.ObjectTree, obj ctrlclient.Object) { // Get a row descriptor for a given object. // With v1beta2, the return value of this func adapt to the object represented in the line. - rowDescriptor := newV1beta2RowDescriptor(obj) + rowDescriptor := newRowDescriptor(obj) // If the object is a group object, override the condition message with the list of objects in the group. e.g machine-1, machine-2, ... if tree.IsGroupObject(obj) { @@ -188,7 +188,7 @@ func addObjectRowV1Beta2(prefix string, tbl *tablewriter.Table, objectTree *tree // If it is required to show all the conditions for the object, add a row for each object's conditions. if tree.IsShowConditionsObject(obj) { - addOtherConditionsV1Beta2(prefix, tbl, objectTree, obj) + addOtherConditions(prefix, tbl, objectTree, obj) } // Add a row for each object's children, taking care of updating the tree view prefix. @@ -196,7 +196,7 @@ func addObjectRowV1Beta2(prefix string, tbl *tablewriter.Table, objectTree *tree childrenObj = orderChildrenObjects(childrenObj) for i, child := range childrenObj { - addObjectRowV1Beta2(getChildPrefix(prefix, i, len(childrenObj)), tbl, objectTree, child) + addObjectRow(getChildPrefix(prefix, i, len(childrenObj)), tbl, objectTree, child) } } @@ -215,13 +215,13 @@ func orderChildrenObjects(childrenObj []ctrlclient.Object) []ctrlclient.Object { return childrenObj } -// addObjectRow add a row for a given object, and recursively for all the object's children. +// addObjectRowV1Beta1 add a row for a given object, and recursively for all the object's children. // NOTE: each row name gets a prefix, that generates a tree view like representation. -func addObjectRow(prefix string, tbl *tablewriter.Table, objectTree *tree.ObjectTree, obj ctrlclient.Object) { +func addObjectRowV1Beta1(prefix string, tbl *tablewriter.Table, objectTree *tree.ObjectTree, obj ctrlclient.Object) { // Gets the descriptor for the object's ready condition, if any. - readyDescriptor := conditionDescriptor{readyColor: gray} + readyDescriptor := v1beta1ConditionDescriptor{readyColor: gray} if ready := tree.GetReadyCondition(obj); ready != nil { - readyDescriptor = newConditionDescriptor(ready) + readyDescriptor = newV1Beta1ConditionDescriptor(ready) } // If the object is a group object, override the condition message with the list of objects in the group. e.g machine-1, machine-2, ... @@ -251,7 +251,7 @@ func addObjectRow(prefix string, tbl *tablewriter.Table, objectTree *tree.Object // If it is required to show all the conditions for the object, add a row for each object's conditions. if tree.IsShowConditionsObject(obj) { - addOtherConditions(prefix, tbl, objectTree, obj) + addOtherConditionsV1Beta1(prefix, tbl, objectTree, obj) } // Add a row for each object's children, taking care of updating the tree view prefix. @@ -270,12 +270,12 @@ func addObjectRow(prefix string, tbl *tablewriter.Table, objectTree *tree.Object sort.Slice(childrenObj, printBefore) for i, child := range childrenObj { - addObjectRow(getChildPrefix(prefix, i, len(childrenObj)), tbl, objectTree, child) + addObjectRowV1Beta1(getChildPrefix(prefix, i, len(childrenObj)), tbl, objectTree, child) } } -// addAllConditionsV1Beta2 adds a row for each object condition. -func addOtherConditionsV1Beta2(prefix string, tbl *tablewriter.Table, objectTree *tree.ObjectTree, obj ctrlclient.Object) { +// addOtherConditions adds a row for each object condition. +func addOtherConditions(prefix string, tbl *tablewriter.Table, objectTree *tree.ObjectTree, obj ctrlclient.Object) { // Add a row for each other condition, taking care of updating the tree view prefix. // In this case the tree prefix get a filler, to indent conditions from objects, and eventually a // and additional pipe if the object has children that should be presented after the conditions. @@ -303,7 +303,7 @@ func addOtherConditionsV1Beta2(prefix string, tbl *tablewriter.Table, objectTree } childPrefix := getChildPrefix(prefix+childrenPipe+filler, i, len(conditions)) - c, status, age, reason, message := v1Beta2ConditionInfo(condition, positivePolarity) + c, status, age, reason, message := conditionInfo(condition, positivePolarity) // Add the row representing each condition. // Note: if the condition has a multiline message, also add additional rows for each line. @@ -338,9 +338,9 @@ func addOtherConditionsV1Beta2(prefix string, tbl *tablewriter.Table, objectTree } } -// addOtherConditions adds a row for each object condition except the ready condition, +// addOtherConditionsV1Beta1 adds a row for each object condition except the ready condition, // which is already represented on the object's main row. -func addOtherConditions(prefix string, tbl *tablewriter.Table, objectTree *tree.ObjectTree, obj ctrlclient.Object) { +func addOtherConditionsV1Beta1(prefix string, tbl *tablewriter.Table, objectTree *tree.ObjectTree, obj ctrlclient.Object) { // Add a row for each other condition, taking care of updating the tree view prefix. // In this case the tree prefix get a filler, to indent conditions from objects, and eventually a // and additional pipe if the object has children that should be presented after the conditions. @@ -353,7 +353,7 @@ func addOtherConditions(prefix string, tbl *tablewriter.Table, objectTree *tree. otherConditions := tree.GetOtherConditions(obj) for i := range otherConditions { otherCondition := otherConditions[i] - otherDescriptor := newConditionDescriptor(otherCondition) + otherDescriptor := newV1Beta1ConditionDescriptor(otherCondition) otherConditionPrefix := getChildPrefix(prefix+childrenPipe+filler, i, len(otherConditions)) tbl.Append([]string{ fmt.Sprintf("%s%s", gray.Sprint(otherConditionPrefix), cyan.Sprint(otherCondition.Type)), @@ -508,8 +508,8 @@ func getRowName(obj ctrlclient.Object) string { return name } -// v1beta2RowDescriptor contains all the info for representing a condition. -type v1beta2RowDescriptor struct { +// rowDescriptor contains all the info for representing a condition. +type rowDescriptor struct { age string replicas string availableCounters string @@ -520,10 +520,10 @@ type v1beta2RowDescriptor struct { message string } -// newV1beta2RowDescriptor returns a v1beta2ConditionDescriptor for the given condition. +// newRowDescriptor returns a v1beta2ConditionDescriptor for the given condition. // Note: the return value of this func adapt to the object represented in the line. -func newV1beta2RowDescriptor(obj ctrlclient.Object) v1beta2RowDescriptor { - v := v1beta2RowDescriptor{} +func newRowDescriptor(obj ctrlclient.Object) rowDescriptor { + v := rowDescriptor{} switch obj := obj.(type) { case *clusterv1.Cluster: // If the object is a cluster, returns all the replica counters (CP and worker replicas are summed for sake of simplicity); @@ -550,7 +550,7 @@ func newV1beta2RowDescriptor(obj ctrlclient.Object) v1beta2RowDescriptor { } if available := tree.GetAvailableV1Beta2Condition(obj); available != nil { - availableColor, availableStatus, availableAge, availableReason, availableMessage := v1Beta2ConditionInfo(*available, true) + availableColor, availableStatus, availableAge, availableReason, availableMessage := conditionInfo(*available, true) v.status = availableColor.Sprintf("Available: %s", availableStatus) v.reason = availableReason v.age = availableAge @@ -573,7 +573,7 @@ func newV1beta2RowDescriptor(obj ctrlclient.Object) v1beta2RowDescriptor { } if available := tree.GetAvailableV1Beta2Condition(obj); available != nil { - availableColor, availableStatus, availableAge, availableReason, availableMessage := v1Beta2ConditionInfo(*available, true) + availableColor, availableStatus, availableAge, availableReason, availableMessage := conditionInfo(*available, true) v.status = availableColor.Sprintf("Available: %s", availableStatus) v.reason = availableReason v.age = availableAge @@ -610,7 +610,7 @@ func newV1beta2RowDescriptor(obj ctrlclient.Object) v1beta2RowDescriptor { v.readyCounters = "0" if ready := tree.GetReadyV1Beta2Condition(obj); ready != nil { - readyColor, readyStatus, readyAge, readyReason, readyMessage := v1Beta2ConditionInfo(*ready, true) + readyColor, readyStatus, readyAge, readyReason, readyMessage := conditionInfo(*ready, true) v.status = readyColor.Sprintf("Ready: %s", readyStatus) v.reason = readyReason v.age = readyAge @@ -633,7 +633,7 @@ func newV1beta2RowDescriptor(obj ctrlclient.Object) v1beta2RowDescriptor { // Also, if the Unstructured object implements the Cluster API control plane contract, surface // corresponding replica counters. if ready := tree.GetReadyV1Beta2Condition(obj); ready != nil { - readyColor, readyStatus, readyAge, readyReason, readyMessage := v1Beta2ConditionInfo(*ready, true) + readyColor, readyStatus, readyAge, readyReason, readyMessage := conditionInfo(*ready, true) v.status = readyColor.Sprintf("Ready: %s", readyStatus) v.reason = readyReason v.age = readyAge @@ -670,7 +670,7 @@ func newV1beta2RowDescriptor(obj ctrlclient.Object) v1beta2RowDescriptor { } if ready := tree.GetReadyV1Beta2Condition(obj); ready != nil { - readyColor, readyStatus, readyAge, readyReason, readyMessage := v1Beta2ConditionInfo(*ready, true) + readyColor, readyStatus, readyAge, readyReason, readyMessage := conditionInfo(*ready, true) v.status = readyColor.Sprintf("Ready: %s", readyStatus) v.reason = readyReason v.age = readyAge @@ -681,7 +681,7 @@ func newV1beta2RowDescriptor(obj ctrlclient.Object) v1beta2RowDescriptor { return v } -func v1Beta2ConditionInfo(c metav1.Condition, positivePolarity bool) (color *color.Color, status, age, reason, message string) { +func conditionInfo(c metav1.Condition, positivePolarity bool) (color *color.Color, status, age, reason, message string) { switch c.Status { case metav1.ConditionFalse: if positivePolarity { @@ -744,8 +744,8 @@ func formatParagraph(text string, maxWidth int) string { return strings.Join(lines, "\n") } -// conditionDescriptor contains all the info for representing a condition. -type conditionDescriptor struct { +// v1beta1ConditionDescriptor contains all the info for representing a condition. +type v1beta1ConditionDescriptor struct { readyColor *color.Color age string status string @@ -754,9 +754,9 @@ type conditionDescriptor struct { message string } -// newConditionDescriptor returns a conditionDescriptor for the given condition. -func newConditionDescriptor(c *clusterv1.Condition) conditionDescriptor { - v := conditionDescriptor{} +// newV1Beta1ConditionDescriptor returns a v1beta1ConditionDescriptor for the given condition. +func newV1Beta1ConditionDescriptor(c *clusterv1.Condition) v1beta1ConditionDescriptor { + v := v1beta1ConditionDescriptor{} v.status = string(c.Status) v.severity = string(c.Severity) diff --git a/internal/util/tree/tree_test.go b/internal/util/tree/tree_test.go index cda1e5e95ae9..d86e6b805931 100644 --- a/internal/util/tree/tree_test.go +++ b/internal/util/tree/tree_test.go @@ -120,7 +120,7 @@ func Test_newConditionDescriptor_readyColor(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got := newConditionDescriptor(tt.condition) + got := newV1Beta1ConditionDescriptor(tt.condition) g.Expect(got.readyColor).To(Equal(tt.expectReadyColor)) }) } @@ -146,13 +146,13 @@ func Test_newConditionDescriptor_truncateMessages(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got := newConditionDescriptor(tt.condition) + got := newV1Beta1ConditionDescriptor(tt.condition) g.Expect(got.message).To(Equal(tt.expectMessage)) }) } } -func Test_TreePrefix(t *testing.T) { +func Test_V1Beta1TreePrefix(t *testing.T) { tests := []struct { name string objectTree *tree.ObjectTree @@ -215,13 +215,13 @@ func Test_TreePrefix(t *testing.T) { o1 := fakeObject("child1", withAnnotation(tree.ShowObjectConditionsAnnotation, "True"), - withCondition(v1beta1conditions.TrueCondition("C1.1")), - withCondition(v1beta1conditions.TrueCondition("C1.2")), + withV1Beta1Condition(v1beta1conditions.TrueCondition("C1.1")), + withV1Beta1Condition(v1beta1conditions.TrueCondition("C1.2")), ) o2 := fakeObject("child2", withAnnotation(tree.ShowObjectConditionsAnnotation, "True"), - withCondition(v1beta1conditions.TrueCondition("C2.1")), - withCondition(v1beta1conditions.TrueCondition("C2.2")), + withV1Beta1Condition(v1beta1conditions.TrueCondition("C2.1")), + withV1Beta1Condition(v1beta1conditions.TrueCondition("C2.2")), ) obectjTree.Add(root, o1) obectjTree.Add(root, o2) @@ -245,15 +245,15 @@ func Test_TreePrefix(t *testing.T) { o1 := fakeObject("child1", withAnnotation(tree.ShowObjectConditionsAnnotation, "True"), - withCondition(v1beta1conditions.TrueCondition("C1.1")), - withCondition(v1beta1conditions.TrueCondition("C1.2")), + withV1Beta1Condition(v1beta1conditions.TrueCondition("C1.1")), + withV1Beta1Condition(v1beta1conditions.TrueCondition("C1.2")), ) o1_1 := fakeObject("child1.1") o2 := fakeObject("child2", withAnnotation(tree.ShowObjectConditionsAnnotation, "True"), - withCondition(v1beta1conditions.TrueCondition("C2.1")), - withCondition(v1beta1conditions.TrueCondition("C2.2")), + withV1Beta1Condition(v1beta1conditions.TrueCondition("C2.1")), + withV1Beta1Condition(v1beta1conditions.TrueCondition("C2.2")), ) o2_1 := fakeObject("child2.1") obectjTree.Add(root, o1) @@ -283,10 +283,10 @@ func Test_TreePrefix(t *testing.T) { // Creates the output table tbl := tablewriter.NewWriter(&output) - formatTableTree(tbl) + formatTableTreeV1Beta1(tbl) // Add row for the root object, the cluster, and recursively for all the nodes representing the cluster status. - addObjectRow("", tbl, tt.objectTree, tt.objectTree.GetRoot()) + addObjectRowV1Beta1("", tbl, tt.objectTree, tt.objectTree.GetRoot()) tbl.Render() // Compare the output with the expected prefix. @@ -297,7 +297,7 @@ func Test_TreePrefix(t *testing.T) { } } -func Test_V1Beta2TreePrefix(t *testing.T) { +func Test_TreePrefix(t *testing.T) { tests := []struct { name string objectTree *tree.ObjectTree @@ -307,7 +307,7 @@ func Test_V1Beta2TreePrefix(t *testing.T) { name: "Conditions should get the right prefix with multiline message", objectTree: func() *tree.ObjectTree { root := fakeObject("root", - withV1Beta2Condition(metav1.Condition{ + withCondition(metav1.Condition{ Type: "Available", Status: metav1.ConditionFalse, Reason: "NotAvailable", @@ -319,11 +319,11 @@ func Test_V1Beta2TreePrefix(t *testing.T) { }) o1 := fakeObject("child1", - withV1Beta2Condition(falseV1Beta2Condition("Available", "first line must not be validated in the test\nsecond line")), + withCondition(falseCondition("Available", "first line must not be validated in the test\nsecond line")), ) o2 := fakeObject("child2", - withV1Beta2Condition(falseV1Beta2Condition("Available", "first line must not be validated in the test\nsecond line")), + withCondition(falseCondition("Available", "first line must not be validated in the test\nsecond line")), ) objectTree.Add(root, o1) objectTree.Add(root, o2) @@ -342,7 +342,7 @@ func Test_V1Beta2TreePrefix(t *testing.T) { name: "Conditions should get the right prefix with multiline message and a child", objectTree: func() *tree.ObjectTree { root := fakeObject("root", - withV1Beta2Condition(metav1.Condition{ + withCondition(metav1.Condition{ Type: "Available", Status: metav1.ConditionTrue, Reason: "Available", @@ -353,11 +353,11 @@ func Test_V1Beta2TreePrefix(t *testing.T) { }) o1 := fakeObject("child1", - withV1Beta2Condition(trueV1Beta2Condition()), + withCondition(trueCondition()), ) o2 := fakeObject("child2", - withV1Beta2Condition(falseV1Beta2Condition("Available", "first line must not be validated in the test\nsecond line")), + withCondition(falseCondition("Available", "first line must not be validated in the test\nsecond line")), ) o2_1 := fakeObject("child2.1") obectjTree.Add(root, o1) @@ -377,7 +377,7 @@ func Test_V1Beta2TreePrefix(t *testing.T) { name: "Multiple nested childs should get the right multiline prefix", objectTree: func() *tree.ObjectTree { root := fakeObject("root", - withV1Beta2Condition(metav1.Condition{ + withCondition(metav1.Condition{ Type: "Available", Status: metav1.ConditionTrue, Reason: "Available", @@ -388,19 +388,19 @@ func Test_V1Beta2TreePrefix(t *testing.T) { }) o1 := fakeObject("child1", - withV1Beta2Condition(trueV1Beta2Condition()), + withCondition(trueCondition()), ) o2 := fakeObject("child2", - withV1Beta2Condition(falseV1Beta2Condition("Available", "first line must not be validated in the test\nsecond line")), + withCondition(falseCondition("Available", "first line must not be validated in the test\nsecond line")), ) o3 := fakeObject("child3", - withV1Beta2Condition(falseV1Beta2Condition("Available", "first line must not be validated in the test\nsecond line")), + withCondition(falseCondition("Available", "first line must not be validated in the test\nsecond line")), ) o4 := fakeObject("child4", - withV1Beta2Condition(falseV1Beta2Condition("Available", "first line must not be validated in the test\nsecond line")), + withCondition(falseCondition("Available", "first line must not be validated in the test\nsecond line")), ) o5 := fakeObject("child5", - withV1Beta2Condition(falseV1Beta2Condition("Available", "first line must not be validated in the test\nsecond line")), + withCondition(falseCondition("Available", "first line must not be validated in the test\nsecond line")), ) obectjTree.Add(root, o1) obectjTree.Add(o1, o2) @@ -426,7 +426,7 @@ func Test_V1Beta2TreePrefix(t *testing.T) { name: "Nested childs should get the right prefix with multiline message", objectTree: func() *tree.ObjectTree { root := fakeObject("root", - withV1Beta2Condition(metav1.Condition{ + withCondition(metav1.Condition{ Type: "Available", Status: metav1.ConditionTrue, Reason: "Available", @@ -437,16 +437,16 @@ func Test_V1Beta2TreePrefix(t *testing.T) { }) o1 := fakeObject("child1", - withV1Beta2Condition(trueV1Beta2Condition()), + withCondition(trueCondition()), ) o2 := fakeObject("child2", - withV1Beta2Condition(falseV1Beta2Condition("Available", "first line must not be validated in the test\nsecond line")), + withCondition(falseCondition("Available", "first line must not be validated in the test\nsecond line")), ) o3 := fakeObject("child3", - withV1Beta2Condition(falseV1Beta2Condition("Available", "first line must not be validated in the test\nsecond line")), + withCondition(falseCondition("Available", "first line must not be validated in the test\nsecond line")), ) o4 := fakeObject("child4", - withV1Beta2Condition(falseV1Beta2Condition("Available", "first line must not be validated in the test\nsecond line")), + withCondition(falseCondition("Available", "first line must not be validated in the test\nsecond line")), ) obectjTree.Add(root, o1) obectjTree.Add(o1, o2) @@ -469,7 +469,7 @@ func Test_V1Beta2TreePrefix(t *testing.T) { name: "Conditions should get the right prefix with childs", objectTree: func() *tree.ObjectTree { root := fakeObject("root", - withV1Beta2Condition(metav1.Condition{ + withCondition(metav1.Condition{ Type: "Available", Status: metav1.ConditionTrue, Reason: "Available", @@ -480,20 +480,20 @@ func Test_V1Beta2TreePrefix(t *testing.T) { }) o1 := fakeObject("child1", - withV1Beta2Condition(trueV1Beta2Condition()), + withCondition(trueCondition()), ) o2 := fakeObject("child2", - withV1Beta2Condition(falseV1Beta2Condition("Available", "first line must not be validated in the test\nsecond line")), + withCondition(falseCondition("Available", "first line must not be validated in the test\nsecond line")), ) o2_1 := fakeObject("child2.1", - withV1Beta2Condition(falseV1Beta2Condition("Available", "first line must not be validated in the test\nsecond line")), + withCondition(falseCondition("Available", "first line must not be validated in the test\nsecond line")), ) o3 := fakeObject("child3", - withV1Beta2Condition(falseV1Beta2Condition("Available", "first line must not be validated in the test\nsecond line")), + withCondition(falseCondition("Available", "first line must not be validated in the test\nsecond line")), ) o3_1 := fakeObject("child3.1", - withV1Beta2Condition(falseV1Beta2Condition("Available", "first line must not be validated in the test\nsecond line")), + withCondition(falseCondition("Available", "first line must not be validated in the test\nsecond line")), ) obectjTree.Add(root, o1) obectjTree.Add(root, o2) @@ -524,10 +524,10 @@ func Test_V1Beta2TreePrefix(t *testing.T) { // Creates the output table tbl := tablewriter.NewWriter(&output) - formatTableTreeV1Beta2(tbl) + formatTableTree(tbl) // Add row for the root object, the cluster, and recursively for all the nodes representing the cluster status. - addObjectRowV1Beta2("", tbl, tt.objectTree, tt.objectTree.GetRoot()) + addObjectRow("", tbl, tt.objectTree, tt.objectTree.GetRoot()) tbl.Render() // Remove empty lines from the output. We need this because v1beta2 adds lines at the beginning and end. @@ -571,14 +571,14 @@ func withAnnotation(name, value string) func(ctrlclient.Object) { } } -func withCondition(c *clusterv1.Condition) func(ctrlclient.Object) { +func withV1Beta1Condition(c *clusterv1.Condition) func(ctrlclient.Object) { return func(m ctrlclient.Object) { setter := m.(v1beta1conditions.Setter) v1beta1conditions.Set(setter, c) } } -func withV1Beta2Condition(c metav1.Condition) func(ctrlclient.Object) { +func withCondition(c metav1.Condition) func(ctrlclient.Object) { return func(m ctrlclient.Object) { cluster := m.(*clusterv1.Cluster) conds := cluster.GetConditions() @@ -587,7 +587,7 @@ func withV1Beta2Condition(c metav1.Condition) func(ctrlclient.Object) { } } -func trueV1Beta2Condition() metav1.Condition { +func trueCondition() metav1.Condition { return metav1.Condition{ Type: "Available", Status: metav1.ConditionTrue, @@ -595,7 +595,7 @@ func trueV1Beta2Condition() metav1.Condition { } } -func falseV1Beta2Condition(t, m string) metav1.Condition { +func falseCondition(t, m string) metav1.Condition { return metav1.Condition{ Type: t, Status: metav1.ConditionFalse, diff --git a/test/e2e/clusterctl_upgrade.go b/test/e2e/clusterctl_upgrade.go index dd189df9b04d..eff999eb1463 100644 --- a/test/e2e/clusterctl_upgrade.go +++ b/test/e2e/clusterctl_upgrade.go @@ -55,7 +55,6 @@ import ( "sigs.k8s.io/cluster-api/test/framework/bootstrap" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" ) // ClusterctlUpgradeSpecInput is the input for ClusterctlUpgradeSpec. @@ -815,8 +814,10 @@ func verifyV1Beta2ConditionsTrueV1Beta1(ctx context.Context, c client.Client, cl }, 3*time.Minute, 3*time.Second).Should(Succeed(), "Failed to get Cluster object %s", klog.KRef(clusterNamespace, clusterName)) for _, conditionType := range v1beta2conditionTypes { - if conditions.Has(cluster, conditionType) { - condition := conditions.Get(cluster, conditionType) + for _, condition := range cluster.Status.V1Beta2.Conditions { + if condition.Type != conditionType { + continue + } Expect(condition.Status).To(Equal(metav1.ConditionTrue), "The v1beta2 condition %q on the Cluster should be set to true", conditionType) Expect(condition.Message).To(BeEmpty(), "The v1beta2 condition %q on the Cluster should have an empty message", conditionType) } @@ -831,8 +832,10 @@ func verifyV1Beta2ConditionsTrueV1Beta1(ctx context.Context, c client.Client, cl }, 3*time.Minute, 3*time.Second).Should(Succeed(), "Failed to list Machines for Cluster %s", klog.KObj(cluster)) for _, machine := range machineList.Items { for _, conditionType := range v1beta2conditionTypes { - if conditions.Has(&machine, conditionType) { - condition := conditions.Get(&machine, conditionType) + for _, condition := range machine.Status.V1Beta2.Conditions { + if condition.Type != conditionType { + continue + } Expect(condition.Status).To(Equal(metav1.ConditionTrue), "The v1beta2 condition %q on the Machine %q should be set to true", conditionType, machine.Name) Expect(condition.Message).To(BeEmpty(), "The v1beta2 condition %q on the Machine %q should have an empty message", conditionType, machine.Name) } diff --git a/test/framework/cluster_helpers.go b/test/framework/cluster_helpers.go index 15bfb6dcfaca..a26e53d5fa62 100644 --- a/test/framework/cluster_helpers.go +++ b/test/framework/cluster_helpers.go @@ -380,9 +380,9 @@ func DescribeCluster(ctx context.Context, input DescribeClusterInput) { defer f.Close() w := bufio.NewWriter(f) - cmdtree.PrintObjectTreeV1Beta2(tree, w) + cmdtree.PrintObjectTree(tree, w) if CurrentSpecReport().Failed() { - cmdtree.PrintObjectTreeV1Beta2(tree, GinkgoWriter) + cmdtree.PrintObjectTree(tree, GinkgoWriter) } Expect(w.Flush()).To(Succeed(), "Failed to save clusterctl describe output") } diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go index 2ec89150b2ee..df46b0060453 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go @@ -448,7 +448,7 @@ func patchDockerMachinePool(ctx context.Context, patchHelper *patch.Helper, dock return patchHelper.Patch( ctx, dockerMachinePool, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.ReadyCondition, expv1.ReplicasReadyCondition, }}, diff --git a/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go b/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go index 47cbd664d8e4..d2512e52e20d 100644 --- a/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go @@ -198,11 +198,11 @@ func (r *ClusterBackEndReconciler) PatchDevCluster(ctx context.Context, patchHel return patchHelper.Patch( ctx, dockerCluster, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.ReadyCondition, infrav1.LoadBalancerAvailableCondition, }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, infrav1.DevClusterReadyV1Beta2Condition, infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition, diff --git a/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go b/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go index cc838543e74c..88fa0c32b3bf 100644 --- a/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go @@ -488,12 +488,12 @@ func (r *MachineBackendReconciler) PatchDevMachine(ctx context.Context, patchHel return patchHelper.Patch( ctx, dockerMachine, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.ReadyCondition, infrav1.ContainerProvisionedCondition, infrav1.BootstrapExecSucceededCondition, }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, infrav1.DevMachineReadyV1Beta2Condition, infrav1.DevMachineDockerContainerProvisionedV1Beta2Condition, diff --git a/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorycluster_backend.go b/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorycluster_backend.go index 2ff49d40aa6c..b08709caddb2 100644 --- a/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorycluster_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorycluster_backend.go @@ -163,7 +163,7 @@ func (r *ClusterBackendReconciler) PatchDevCluster(ctx context.Context, patchHel return patchHelper.Patch( ctx, inMemoryCluster, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, }}, ) diff --git a/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go b/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go index 006358575be2..229c3e0a06b5 100644 --- a/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go @@ -1270,14 +1270,14 @@ func (r *MachineBackendReconciler) PatchDevMachine(ctx context.Context, patchHel } return patchHelper.Patch(ctx, inMemoryMachine, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.ReadyCondition, infrav1.VMProvisionedCondition, infrav1.NodeProvisionedCondition, infrav1.EtcdProvisionedCondition, infrav1.APIServerProvisionedCondition, }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, infrav1.DevMachineReadyV1Beta2Condition, infrav1.DevMachineInMemoryVMProvisionedV1Beta2Condition, diff --git a/test/infrastructure/docker/internal/controllers/dockercluster_controller.go b/test/infrastructure/docker/internal/controllers/dockercluster_controller.go index 10d96db4d6b3..3439098e37da 100644 --- a/test/infrastructure/docker/internal/controllers/dockercluster_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockercluster_controller.go @@ -182,11 +182,11 @@ func patchDockerCluster(ctx context.Context, patchHelper *patch.Helper, dockerCl return patchHelper.Patch( ctx, dockerCluster, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.ReadyCondition, infrav1.LoadBalancerAvailableCondition, }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, infrav1.DevClusterReadyV1Beta2Condition, infrav1.DevClusterDockerLoadBalancerAvailableV1Beta2Condition, diff --git a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go index d61e5d3ac01a..c09ec34ad3ce 100644 --- a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go @@ -279,12 +279,12 @@ func patchDockerMachine(ctx context.Context, patchHelper *patch.Helper, dockerMa return patchHelper.Patch( ctx, dockerMachine, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ clusterv1.ReadyCondition, infrav1.ContainerProvisionedCondition, infrav1.BootstrapExecSucceededCondition, }}, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, infrav1.DevMachineReadyV1Beta2Condition, infrav1.DevMachineDockerContainerProvisionedV1Beta2Condition, diff --git a/util/patch/options.go b/util/patch/options.go index 9dee0e8cd8f5..0912b3099b2f 100644 --- a/util/patch/options.go +++ b/util/patch/options.go @@ -75,25 +75,25 @@ func (w WithStatusObservedGeneration) ApplyToHelper(in *HelperOptions) { in.IncludeStatusObservedGeneration = true } -// WithOwnedConditions allows to define condition types owned by the controller. +// WithOwnedV1beta1Conditions allows to define condition types owned by the controller. // In case of conflicts for the owned conditions, the patch helper will always use the value provided by the controller. -type WithOwnedConditions struct { +type WithOwnedV1beta1Conditions struct { Conditions []clusterv1.ConditionType } // ApplyToHelper applies this configuration to the given HelperOptions. -func (w WithOwnedConditions) ApplyToHelper(in *HelperOptions) { +func (w WithOwnedV1beta1Conditions) ApplyToHelper(in *HelperOptions) { in.OwnedConditions = w.Conditions } -// WithOwnedV1Beta2Conditions allows to define condition types owned by the controller. +// WithOwnedConditions allows to define condition types owned by the controller. // In case of conflicts for the owned conditions, the patch helper will always use the value provided by the controller. -type WithOwnedV1Beta2Conditions struct { +type WithOwnedConditions struct { Conditions []string } // ApplyToHelper applies this configuration to the given HelperOptions. -func (w WithOwnedV1Beta2Conditions) ApplyToHelper(in *HelperOptions) { +func (w WithOwnedConditions) ApplyToHelper(in *HelperOptions) { in.OwnedV1Beta2Conditions = w.Conditions } diff --git a/util/patch/patch_test.go b/util/patch/patch_test.go index 6fa2700fa6c5..5eb128527556 100644 --- a/util/patch/patch_test.go +++ b/util/patch/patch_test.go @@ -449,7 +449,7 @@ func TestPatchHelper(t *testing.T) { v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) t.Log("Patching the object") - g.Expect(patcher.Patch(ctx, obj, WithOwnedConditions{Conditions: []clusterv1.ConditionType{clusterv1.ReadyCondition}})).To(Succeed()) + g.Expect(patcher.Patch(ctx, obj, WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{clusterv1.ReadyCondition}})).To(Succeed()) t.Log("Validating the object has been updated") readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyCondition) @@ -1330,7 +1330,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { v1beta1conditions.MarkTrue(obj, clusterv1.ReadyCondition) t.Log("Patching the object") - g.Expect(patcher.Patch(ctx, obj, WithOwnedConditions{Conditions: []clusterv1.ConditionType{clusterv1.ReadyCondition}})).To(Succeed()) + g.Expect(patcher.Patch(ctx, obj, WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{clusterv1.ReadyCondition}})).To(Succeed()) t.Log("Validating the object has been updated") readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyCondition) @@ -1830,7 +1830,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") - g.Expect(patcher.Patch(ctx, obj, WithOwnedConditions{Conditions: []clusterv1.ConditionType{clusterv1.ReadyCondition}}, WithOwnedV1Beta2Conditions{Conditions: []string{"Ready"}})).To(Succeed()) + g.Expect(patcher.Patch(ctx, obj, WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{clusterv1.ReadyCondition}}, WithOwnedConditions{Conditions: []string{"Ready"}})).To(Succeed()) t.Log("Validating the object has been updated") g.Eventually(func() bool { @@ -2370,7 +2370,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") - g.Expect(patcher.Patch(ctx, obj, WithOwnedConditions{Conditions: []clusterv1.ConditionType{clusterv1.ReadyCondition}}, WithOwnedV1Beta2Conditions{Conditions: []string{"Ready"}})).To(Succeed()) + g.Expect(patcher.Patch(ctx, obj, WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{clusterv1.ReadyCondition}}, WithOwnedConditions{Conditions: []string{"Ready"}})).To(Succeed()) t.Log("Validating the object has been updated") g.Eventually(func() bool { @@ -2801,7 +2801,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") - g.Expect(patcher.Patch(ctx, obj, WithOwnedV1Beta2Conditions{Conditions: []string{"Ready"}})).To(Succeed()) + g.Expect(patcher.Patch(ctx, obj, WithOwnedConditions{Conditions: []string{"Ready"}})).To(Succeed()) t.Log("Validating the object has been updated") g.Eventually(func() bool { diff --git a/util/paused/paused.go b/util/paused/paused.go index 9cafa6d3f81e..c363fa683d0f 100644 --- a/util/paused/paused.go +++ b/util/paused/paused.go @@ -80,7 +80,7 @@ func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *cluste conditions.Set(obj, newCondition) - if err := patchHelper.Patch(ctx, obj, patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + if err := patchHelper.Patch(ctx, obj, patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedV1Beta2Condition, }}); err != nil { return isPaused, false, err From ad69e7c188f5f4b125f2f1f87bb99fb65d1d667d Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Fri, 11 Apr 2025 15:38:59 +0200 Subject: [PATCH 5/5] Fix E2E --- test/e2e/clusterctl_upgrade.go | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/test/e2e/clusterctl_upgrade.go b/test/e2e/clusterctl_upgrade.go index eff999eb1463..18ac8d4bc3fe 100644 --- a/test/e2e/clusterctl_upgrade.go +++ b/test/e2e/clusterctl_upgrade.go @@ -813,13 +813,15 @@ func verifyV1Beta2ConditionsTrueV1Beta1(ctx context.Context, c client.Client, cl return c.Get(ctx, key, cluster) }, 3*time.Minute, 3*time.Second).Should(Succeed(), "Failed to get Cluster object %s", klog.KRef(clusterNamespace, clusterName)) - for _, conditionType := range v1beta2conditionTypes { - for _, condition := range cluster.Status.V1Beta2.Conditions { - if condition.Type != conditionType { - continue + if cluster.Status.V1Beta2 != nil && cluster.Status.V1Beta2.Conditions == nil { + for _, conditionType := range v1beta2conditionTypes { + for _, condition := range cluster.Status.V1Beta2.Conditions { + if condition.Type != conditionType { + continue + } + Expect(condition.Status).To(Equal(metav1.ConditionTrue), "The v1beta2 condition %q on the Cluster should be set to true", conditionType) + Expect(condition.Message).To(BeEmpty(), "The v1beta2 condition %q on the Cluster should have an empty message", conditionType) } - Expect(condition.Status).To(Equal(metav1.ConditionTrue), "The v1beta2 condition %q on the Cluster should be set to true", conditionType) - Expect(condition.Message).To(BeEmpty(), "The v1beta2 condition %q on the Cluster should have an empty message", conditionType) } } @@ -830,14 +832,16 @@ func verifyV1Beta2ConditionsTrueV1Beta1(ctx context.Context, c client.Client, cl clusterv1.ClusterNameLabel: clusterName, }) }, 3*time.Minute, 3*time.Second).Should(Succeed(), "Failed to list Machines for Cluster %s", klog.KObj(cluster)) - for _, machine := range machineList.Items { - for _, conditionType := range v1beta2conditionTypes { - for _, condition := range machine.Status.V1Beta2.Conditions { - if condition.Type != conditionType { - continue + if cluster.Status.V1Beta2 != nil && cluster.Status.V1Beta2.Conditions == nil { + for _, machine := range machineList.Items { + for _, conditionType := range v1beta2conditionTypes { + for _, condition := range machine.Status.V1Beta2.Conditions { + if condition.Type != conditionType { + continue + } + Expect(condition.Status).To(Equal(metav1.ConditionTrue), "The v1beta2 condition %q on the Machine %q should be set to true", conditionType, machine.Name) + Expect(condition.Message).To(BeEmpty(), "The v1beta2 condition %q on the Machine %q should have an empty message", conditionType, machine.Name) } - Expect(condition.Status).To(Equal(metav1.ConditionTrue), "The v1beta2 condition %q on the Machine %q should be set to true", conditionType, machine.Name) - Expect(condition.Message).To(BeEmpty(), "The v1beta2 condition %q on the Machine %q should have an empty message", conditionType, machine.Name) } } }