From 2afc2f589c1ad392de48126871b98d30ba00d9a7 Mon Sep 17 00:00:00 2001 From: Robert Lucian Chiriac Date: Wed, 31 Mar 2021 23:35:43 +0300 Subject: [PATCH 01/12] Convert configure to scale --- cli/cmd/cluster.go | 138 ++++++++++++++++++-------- cli/cmd/errors.go | 157 ++++++++++++------------------ cli/cmd/lib_cluster_config.go | 78 --------------- cli/cmd/lib_manager.go | 22 +++-- dev/generate_cli_md.sh | 2 +- docs/clients/cli.md | 16 +-- manager/install.sh | 72 +++++--------- pkg/types/clusterconfig/errors.go | 8 -- 8 files changed, 209 insertions(+), 284 deletions(-) diff --git a/cli/cmd/cluster.go b/cli/cmd/cluster.go index cc572fd715..5c7d05a679 100644 --- a/cli/cmd/cluster.go +++ b/cli/cmd/cluster.go @@ -51,15 +51,17 @@ import ( ) var ( - _flagClusterUpEnv string - _flagClusterInfoEnv string - _flagClusterConfigureEnv string - _flagClusterConfig string - _flagClusterName string - _flagClusterRegion string - _flagClusterInfoDebug bool - _flagClusterDisallowPrompt bool - _flagClusterDownKeepVolumes bool + _flagClusterUpEnv string + _flagClusterInfoEnv string + _flagClusterScaleNodeGroup string + _flagClusterScaleMinInstances int64 + _flagClusterScaleMaxInstances int64 + _flagClusterConfig string + _flagClusterName string + _flagClusterRegion string + _flagClusterInfoDebug bool + _flagClusterDisallowPrompt bool + _flagClusterDownKeepVolumes bool ) var _eksctlPrefixRegex = regexp.MustCompile(`^[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} \[.+] {2}`) @@ -79,10 +81,12 @@ func clusterInit() { _clusterInfoCmd.Flags().BoolVarP(&_flagClusterDisallowPrompt, "yes", "y", false, "skip prompts") _clusterCmd.AddCommand(_clusterInfoCmd) - _clusterConfigureCmd.Flags().SortFlags = false - _clusterConfigureCmd.Flags().StringVarP(&_flagClusterConfigureEnv, "configure-env", "e", "", "name of environment to configure") - _clusterConfigureCmd.Flags().BoolVarP(&_flagClusterDisallowPrompt, "yes", "y", false, "skip prompts") - _clusterCmd.AddCommand(_clusterConfigureCmd) + _clusterScaleCmd.Flags().SortFlags = false + addClusterNameFlag(_clusterScaleCmd) + addClusterRegionFlag(_clusterScaleCmd) + addClusterScaleFlags(_clusterScaleCmd) + _clusterScaleCmd.Flags().BoolVarP(&_flagClusterDisallowPrompt, "yes", "y", false, "skip prompts") + _clusterCmd.AddCommand(_clusterScaleCmd) _clusterDownCmd.Flags().SortFlags = false addClusterConfigFlag(_clusterDownCmd) @@ -112,6 +116,16 @@ func addClusterRegionFlag(cmd *cobra.Command) { cmd.Flags().StringVarP(&_flagClusterRegion, "region", "r", "", "aws region of the cluster") } +func addClusterScaleFlags(cmd *cobra.Command) { + cmd.Flags().StringVar(&_flagClusterScaleNodeGroup, "node-group", "", "name of the node group to scale") + cmd.Flags().Int64Var(&_flagClusterScaleMinInstances, "min-instances", 0, "minimum number of instances for the given node group") + cmd.Flags().Int64Var(&_flagClusterScaleMaxInstances, "max-instances", 0, "maximum number of instances for the given node group") + + cmd.MarkFlagRequired("node-group") + cmd.MarkFlagRequired("min-instances") + cmd.MarkFlagRequired("max-instances") +} + var _clusterCmd = &cobra.Command{ Use: "cluster", Short: "manage cortex clusters (contains subcommands)", @@ -193,7 +207,7 @@ var _clusterUpCmd = &cobra.Command{ exit.Error(err) } - out, exitCode, err := runManagerWithClusterConfig("/root/install.sh", clusterConfig, awsClient, nil, nil) + out, exitCode, err := runManagerWithClusterConfig("/root/install.sh", clusterConfig, awsClient, nil, nil, nil) if err != nil { exit.Error(err) } @@ -288,20 +302,18 @@ var _clusterUpCmd = &cobra.Command{ }, } -var _clusterConfigureCmd = &cobra.Command{ - Use: "configure [CLUSTER_CONFIG_FILE]", - Short: "update a cluster's configuration", - Args: cobra.ExactArgs(1), +var _clusterScaleCmd = &cobra.Command{ + Use: "scale [flags]", + Short: "update the min/max instances for a nodegroup", + Args: cobra.NoArgs, Run: func(cmd *cobra.Command, args []string) { telemetry.Event("cli.cluster.configure") - clusterConfigFile := args[0] - if _, err := docker.GetDockerClient(); err != nil { exit.Error(err) } - accessConfig, err := getNewClusterAccessConfig(clusterConfigFile) + accessConfig, err := getClusterAccessConfigWithCache() if err != nil { exit.Error(err) } @@ -321,14 +333,17 @@ var _clusterConfigureCmd = &cobra.Command{ exit.Error(err) } - cachedClusterConfig := refreshCachedClusterConfig(*awsClient, accessConfig) - - clusterConfig, err := getConfigureClusterConfig(cachedClusterConfig, clusterConfigFile, _flagClusterDisallowPrompt) + clusterConfig := refreshCachedClusterConfig(*awsClient, accessConfig) + clusterConfig, err = scaleNodeGroup(clusterConfig) if err != nil { exit.Error(err) } - out, exitCode, err := runManagerWithClusterConfig("/root/install.sh --update", clusterConfig, awsClient, nil, nil) + out, exitCode, err := runManagerWithClusterConfig("/root/install.sh --update", &clusterConfig, awsClient, nil, nil, []string{ + "CORTEX_SCALING_NODEGROUP=" + _flagClusterScaleNodeGroup, + "CORTEX_SCALING_MIN_INSTANCES=" + s.Int64(_flagClusterScaleMinInstances), + "CORTEX_SCALING_MAX_INSTANCES=" + s.Int64(_flagClusterScaleMaxInstances), + }) if err != nil { exit.Error(err) } @@ -336,19 +351,7 @@ var _clusterConfigureCmd = &cobra.Command{ helpStr := "\ndebugging tips (may or may not apply to this error):" helpStr += fmt.Sprintf("\n* if your cluster was unable to provision instances, additional error information may be found in the activity history of your cluster's autoscaling groups (select each autoscaling group and click the \"Activity\" or \"Activity History\" tab): https://console.aws.amazon.com/ec2/autoscaling/home?region=%s#AutoScalingGroups:", clusterConfig.Region) fmt.Println(helpStr) - exit.Error(ErrorClusterConfigure(out + helpStr)) - } - - if _flagClusterConfigureEnv != "" { - loadBalancer, err := getAWSOperatorLoadBalancer(clusterConfig.ClusterName, awsClient) - if err != nil { - exit.Error(errors.Append(err, fmt.Sprintf("\n\nyou can attempt to resolve this issue and configure your cli environment by running `cortex cluster info --configure-env %s`", _flagClusterConfigureEnv))) - } - operatorEndpoint := "https://" + *loadBalancer.DNSName - err = updateAWSCLIEnv(_flagClusterConfigureEnv, operatorEndpoint, _flagClusterDisallowPrompt) - if err != nil { - exit.Error(errors.Append(err, fmt.Sprintf("\n\nyou can attempt to resolve this issue and configure your cli environment by running `cortex cluster info --configure-env %s`", _flagClusterConfigureEnv))) - } + exit.Error(ErrorClusterScale(out + helpStr)) } }, } @@ -659,7 +662,7 @@ func cmdInfo(awsClient *aws.Client, accessConfig *clusterconfig.AccessConfig, di clusterConfig := refreshCachedClusterConfig(*awsClient, accessConfig) - out, exitCode, err := runManagerWithClusterConfig("/root/info.sh", &clusterConfig, awsClient, nil, nil) + out, exitCode, err := runManagerWithClusterConfig("/root/info.sh", &clusterConfig, awsClient, nil, nil, nil) if err != nil { exit.Error(err) } @@ -970,6 +973,63 @@ func refreshCachedClusterConfig(awsClient aws.Client, accessConfig *clusterconfi return *refreshedClusterConfig } +func scaleNodeGroup(clusterConfig clusterconfig.Config) (clusterconfig.Config, error) { + if _flagClusterScaleMinInstances < 0 { + return clusterconfig.Config{}, ErrorMaxInstancesLowerThan(0) + } + if _flagClusterScaleMaxInstances < 0 { + return clusterconfig.Config{}, ErrorMaxInstancesLowerThan(0) + } + if _flagClusterScaleMinInstances > _flagClusterScaleMaxInstances { + return clusterconfig.Config{}, ErrorMinInstancesGreaterThanMaxInstances() + } + + clusterName := clusterConfig.ClusterName + region := clusterConfig.Region + + ngFound := false + availableNodeGroups := []string{} + for idx, ng := range clusterConfig.NodeGroups { + if ng == nil { + continue + } + availableNodeGroups = append(availableNodeGroups, ng.Name) + if ng.Name == _flagClusterScaleNodeGroup { + if ng.MinInstances == _flagClusterScaleMinInstances && ng.MaxInstances == _flagClusterScaleMaxInstances { + fmt.Printf("no changes to the %s nodegroup required in cluster %s from region %s\n", ng.Name, clusterName, region) + exit.Ok() + } + + if !_flagClusterDisallowPrompt { + promptMessage := "" + if ng.MinInstances != _flagClusterScaleMinInstances && ng.MaxInstances != _flagClusterScaleMaxInstances { + promptMessage = fmt.Sprintf("nodegroup %s of cluster %s from region %s will have field %s set from %d to %d and field %s set from %d to %d", ng.Name, clusterName, region, clusterconfig.MinInstancesKey, ng.MinInstances, _flagClusterScaleMinInstances, clusterconfig.MaxInstancesKey, ng.MaxInstances, _flagClusterScaleMaxInstances) + } + if ng.MinInstances == _flagClusterScaleMinInstances && ng.MaxInstances != _flagClusterScaleMaxInstances { + promptMessage = fmt.Sprintf("nodegroup %s of cluster %s from region %s will have field %s set from %d to %d", ng.Name, clusterName, region, clusterconfig.MaxInstancesKey, ng.MaxInstances, _flagClusterScaleMaxInstances) + } + if ng.MinInstances != _flagClusterScaleMinInstances && ng.MaxInstances == _flagClusterScaleMaxInstances { + promptMessage = fmt.Sprintf("nodegroup %s of cluster %s from region %s will have field %s set from %d to %d", ng.Name, clusterName, region, clusterconfig.MinInstancesKey, ng.MinInstances, _flagClusterScaleMinInstances) + } + if !prompt.YesOrNo(promptMessage, "", "") { + exit.Ok() + } + } + + clusterConfig.NodeGroups[idx].MinInstances = _flagClusterScaleMinInstances + clusterConfig.NodeGroups[idx].MaxInstances = _flagClusterScaleMaxInstances + ngFound = true + break + } + } + + if !ngFound { + return clusterconfig.Config{}, ErrorNodeGroupNotFound(_flagClusterScaleNodeGroup, clusterName, region, availableNodeGroups) + } + + return clusterConfig, nil +} + func createS3BucketIfNotFound(awsClient *aws.Client, bucket string, tags map[string]string) error { bucketFound, err := awsClient.DoesBucketExist(bucket) if err != nil { diff --git a/cli/cmd/errors.go b/cli/cmd/errors.go index f777f4dcc8..507e1e3f41 100644 --- a/cli/cmd/errors.go +++ b/cli/cmd/errors.go @@ -19,14 +19,13 @@ package cmd import ( "fmt" "net/url" - "runtime" "strings" "github.com/cortexlabs/cortex/pkg/consts" "github.com/cortexlabs/cortex/pkg/lib/errors" + s "github.com/cortexlabs/cortex/pkg/lib/strings" "github.com/cortexlabs/cortex/pkg/lib/urls" "github.com/cortexlabs/cortex/pkg/types/clusterconfig" - "github.com/cortexlabs/cortex/pkg/types/userconfig" ) const ( @@ -39,38 +38,35 @@ func errStrFailedToConnect(u url.URL) string { } const ( - ErrInvalidProvider = "cli.invalid_provider" - ErrInvalidLegacyProvider = "cli.invalid_legacy_provider" - ErrCommandNotSupportedForKind = "cli.command_not_supported_for_kind" - ErrNoAvailableEnvironment = "cli.no_available_environment" - ErrEnvironmentNotSet = "cli.environment_not_set" - ErrEnvironmentNotFound = "cli.environment_not_found" - ErrFieldNotFoundInEnvironment = "cli.field_not_found_in_environment" - ErrInvalidOperatorEndpoint = "cli.invalid_operator_endpoint" - ErrNoOperatorLoadBalancer = "cli.no_operator_load_balancer" - ErrCortexYAMLNotFound = "cli.cortex_yaml_not_found" - ErrConnectToDockerDaemon = "cli.connect_to_docker_daemon" - ErrDockerPermissions = "cli.docker_permissions" - ErrDockerCtrlC = "cli.docker_ctrl_c" - ErrResponseUnknown = "cli.response_unknown" - ErrAPINotReady = "cli.api_not_ready" - ErrOneAWSEnvVarSet = "cli.one_aws_env_var_set" - ErrOneAWSFlagSet = "cli.one_aws_flag_set" - ErrOnlyAWSClusterEnvVarSet = "cli.only_aws_cluster_env_var_set" - ErrOnlyAWSClusterFlagSet = "cli.only_aws_cluster_flag_set" - ErrMissingAWSCredentials = "cli.missing_aws_credentials" - ErrCredentialsInClusterConfig = "cli.credentials_in_cluster_config" - ErrClusterUp = "cli.cluster_up" - ErrClusterConfigure = "cli.cluster_configure" - ErrClusterInfo = "cli.cluster_info" - ErrClusterDebug = "cli.cluster_debug" - ErrClusterRefresh = "cli.cluster_refresh" - ErrClusterDown = "cli.cluster_down" - ErrDuplicateCLIEnvNames = "cli.duplicate_cli_env_names" - ErrClusterAccessConfigRequired = "cli.cluster_access_config_or_prompts_required" - ErrShellCompletionNotSupported = "cli.shell_completion_not_supported" - ErrNoTerminalWidth = "cli.no_terminal_width" - ErrDeployFromTopLevelDir = "cli.deploy_from_top_level_dir" + ErrInvalidProvider = "cli.invalid_provider" + ErrInvalidLegacyProvider = "cli.invalid_legacy_provider" + ErrNoAvailableEnvironment = "cli.no_available_environment" + ErrEnvironmentNotSet = "cli.environment_not_set" + ErrEnvironmentNotFound = "cli.environment_not_found" + ErrFieldNotFoundInEnvironment = "cli.field_not_found_in_environment" + ErrInvalidOperatorEndpoint = "cli.invalid_operator_endpoint" + ErrNoOperatorLoadBalancer = "cli.no_operator_load_balancer" + ErrCortexYAMLNotFound = "cli.cortex_yaml_not_found" + ErrDockerCtrlC = "cli.docker_ctrl_c" + ErrResponseUnknown = "cli.response_unknown" + ErrOnlyAWSClusterFlagSet = "cli.only_aws_cluster_flag_set" + ErrMissingAWSCredentials = "cli.missing_aws_credentials" + ErrCredentialsInClusterConfig = "cli.credentials_in_cluster_config" + ErrClusterUp = "cli.cluster_up" + ErrClusterScale = "cli.cluster_scale" + ErrClusterInfo = "cli.cluster_info" + ErrClusterDebug = "cli.cluster_debug" + ErrClusterRefresh = "cli.cluster_refresh" + ErrClusterDown = "cli.cluster_down" + ErrMinInstancesLowerThan = "cli.min_instances_lower_than" + ErrMaxInstancesLowerThan = "cli.max_instances_lower_than" + ErrMinInstancesGreaterThanMaxInstances = "cli.min_instances_greater_than_max_instances" + ErrNodeGroupNotFound = "cli.nodegroup_not_found" + ErrDuplicateCLIEnvNames = "cli.duplicate_cli_env_names" + ErrClusterAccessConfigRequired = "cli.cluster_access_config_or_prompts_required" + ErrShellCompletionNotSupported = "cli.shell_completion_not_supported" + ErrNoTerminalWidth = "cli.no_terminal_width" + ErrDeployFromTopLevelDir = "cli.deploy_from_top_level_dir" ) func ErrorInvalidProvider(providerStr, cliConfigPath string) error { @@ -87,13 +83,6 @@ func ErrorInvalidLegacyProvider(providerStr, cliConfigPath string) error { }) } -func ErrorCommandNotSupportedForKind(kind userconfig.Kind, command string) error { - return errors.WithStack(&errors.Error{ - Kind: ErrCommandNotSupportedForKind, - Message: fmt.Sprintf("the `%s` command is not supported for %s kind", command, kind), - }) -} - func ErrorNoAvailableEnvironment() error { return errors.WithStack(&errors.Error{ Kind: ErrNoAvailableEnvironment, @@ -143,32 +132,6 @@ func ErrorCortexYAMLNotFound() error { }) } -func ErrorConnectToDockerDaemon() error { - installMsg := "install it by following the instructions for your operating system: https://docs.docker.com/install" - if strings.HasPrefix(runtime.GOOS, "darwin") { - installMsg = "install it here: https://docs.docker.com/docker-for-mac/install" - } - - return errors.WithStack(&errors.Error{ - Kind: ErrConnectToDockerDaemon, - Message: fmt.Sprintf("unable to connect to the Docker daemon\n\nplease confirm Docker is running, or if Docker is not installed, %s", installMsg), - }) -} - -func ErrorDockerPermissions(err error) error { - errStr := errors.Message(err) - - var groupAddStr string - if strings.HasPrefix(runtime.GOOS, "linux") { - groupAddStr = " (e.g. by running `sudo groupadd docker && sudo gpasswd -a $USER docker`)" - } - - return errors.WithStack(&errors.Error{ - Kind: ErrDockerPermissions, - Message: errStr + "\n\nyou can re-run this command with `sudo`, or grant your current user access to docker" + groupAddStr, - }) -} - func ErrorDockerCtrlC() error { return errors.WithStack(&errors.Error{ Kind: ErrDockerCtrlC, @@ -189,34 +152,6 @@ func ErrorResponseUnknown(body string, statusCode int) error { }) } -func ErrorAPINotReady(apiName string, status string) error { - return errors.WithStack(&errors.Error{ - Kind: ErrAPINotReady, - Message: fmt.Sprintf("%s is %s", apiName, status), - }) -} - -func ErrorOneAWSEnvVarSet(setEnvVar string, missingEnvVar string) error { - return errors.WithStack(&errors.Error{ - Kind: ErrOneAWSEnvVarSet, - Message: fmt.Sprintf("only $%s is set; please run `export %s=***`", setEnvVar, missingEnvVar), - }) -} - -func ErrorOneAWSFlagSet(setFlag string, missingFlag string) error { - return errors.WithStack(&errors.Error{ - Kind: ErrOneAWSFlagSet, - Message: fmt.Sprintf("only flag %s was provided; please provide %s as well", setFlag, missingFlag), - }) -} - -func ErrorOnlyAWSClusterEnvVarSet() error { - return errors.WithStack(&errors.Error{ - Kind: ErrOnlyAWSClusterEnvVarSet, - Message: "when specifying $CLUSTER_AWS_ACCESS_KEY_ID and $CLUSTER_AWS_SECRET_ACCESS_KEY, please also specify $AWS_ACCESS_KEY_ID and $AWS_SECRET_ACCESS_KEY", - }) -} - func ErrorClusterUp(out string) error { return errors.WithStack(&errors.Error{ Kind: ErrClusterUp, @@ -225,9 +160,9 @@ func ErrorClusterUp(out string) error { }) } -func ErrorClusterConfigure(out string) error { +func ErrorClusterScale(out string) error { return errors.WithStack(&errors.Error{ - Kind: ErrClusterConfigure, + Kind: ErrClusterScale, Message: out, NoPrint: true, }) @@ -265,6 +200,34 @@ func ErrorClusterDown(out string) error { }) } +func ErrorMinInstancesLowerThan(minValue int64) error { + return errors.WithStack(&errors.Error{ + Kind: ErrMinInstancesLowerThan, + Message: fmt.Sprintf("flag --min-instances cannot be set to a value lower than %d", minValue), + }) +} + +func ErrorMaxInstancesLowerThan(minValue int64) error { + return errors.WithStack(&errors.Error{ + Kind: ErrMaxInstancesLowerThan, + Message: fmt.Sprintf("flag --max-instances cannot be set to a value lower than %d", minValue), + }) +} + +func ErrorMinInstancesGreaterThanMaxInstances() error { + return errors.WithStack(&errors.Error{ + Kind: ErrMinInstancesGreaterThanMaxInstances, + Message: "flag --min-instances cannot be set to a value higher than that of --max-instances flag", + }) +} + +func ErrorNodeGroupNotFound(scalingNodeGroupName, clusterName, clusterRegion string, availableNodeGroups []string) error { + return errors.WithStack(&errors.Error{ + Kind: ErrNodeGroupNotFound, + Message: fmt.Sprintf("nodegroup %s couldn't be found in the cluster named %s from region %s; the available nodegroups for this cluster are %s", scalingNodeGroupName, clusterName, clusterRegion, s.StrsAnd(availableNodeGroups)), + }) +} + func ErrorClusterAccessConfigRequired() error { return errors.WithStack(&errors.Error{ Kind: ErrClusterAccessConfigRequired, diff --git a/cli/cmd/lib_cluster_config.go b/cli/cmd/lib_cluster_config.go index 225487e9da..40ca71001c 100644 --- a/cli/cmd/lib_cluster_config.go +++ b/cli/cmd/lib_cluster_config.go @@ -25,7 +25,6 @@ import ( "github.com/cortexlabs/cortex/pkg/consts" "github.com/cortexlabs/cortex/pkg/lib/aws" cr "github.com/cortexlabs/cortex/pkg/lib/configreader" - "github.com/cortexlabs/cortex/pkg/lib/console" "github.com/cortexlabs/cortex/pkg/lib/errors" "github.com/cortexlabs/cortex/pkg/lib/files" "github.com/cortexlabs/cortex/pkg/lib/maps" @@ -34,7 +33,6 @@ import ( s "github.com/cortexlabs/cortex/pkg/lib/strings" "github.com/cortexlabs/cortex/pkg/lib/table" "github.com/cortexlabs/cortex/pkg/types/clusterconfig" - "github.com/cortexlabs/yaml" ) var _cachedClusterConfigRegex = regexp.MustCompile(`^cluster_\S+\.yaml$`) @@ -147,82 +145,6 @@ func getInstallClusterConfig(awsClient *aws.Client, clusterConfigFile string, di return clusterConfig, nil } -func getConfigureClusterConfig(cachedClusterConfig clusterconfig.Config, clusterConfigFile string, disallowPrompt bool) (*clusterconfig.Config, error) { - userClusterConfig := &clusterconfig.Config{} - var awsClient *aws.Client - - err := readUserClusterConfigFile(userClusterConfig, clusterConfigFile) - if err != nil { - return nil, err - } - - userClusterConfig.ClusterName = cachedClusterConfig.ClusterName - userClusterConfig.Region = cachedClusterConfig.Region - awsClient, err = newAWSClient(userClusterConfig.Region) - if err != nil { - return nil, err - } - promptIfNotAdmin(awsClient, disallowPrompt) - - userClusterConfig.Telemetry, err = readTelemetryConfig() - if err != nil { - return nil, err - } - - err = userClusterConfig.Validate(awsClient, true) - if err != nil { - err = errors.Append(err, fmt.Sprintf("\n\ncluster configuration schema can be found at https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor)) - return nil, errors.Wrap(err, clusterConfigFile) - } - - clusterConfigCopy, err := userClusterConfig.DeepCopy() - if err != nil { - return nil, err - } - - cachedConfigCopy, err := cachedClusterConfig.DeepCopy() - if err != nil { - return nil, err - } - - for idx := range clusterConfigCopy.NodeGroups { - clusterConfigCopy.NodeGroups[idx].MinInstances = 0 - clusterConfigCopy.NodeGroups[idx].MaxInstances = 0 - } - for idx := range cachedConfigCopy.NodeGroups { - cachedConfigCopy.NodeGroups[idx].MinInstances = 0 - cachedConfigCopy.NodeGroups[idx].MaxInstances = 0 - } - - h1, err := clusterConfigCopy.Hash() - if err != nil { - return nil, err - } - h2, err := cachedConfigCopy.Hash() - if err != nil { - return nil, err - } - if h1 != h2 { - return nil, clusterconfig.ErrorConfigCannotBeChangedOnUpdate() - } - - yamlBytes, err := yaml.Marshal(userClusterConfig) - if err != nil { - return nil, err - } - yamlString := string(yamlBytes) - - fmt.Println(console.Bold("cluster config:")) - fmt.Println(yamlString) - - if !disallowPrompt { - exitMessage := fmt.Sprintf("cluster configuration can be modified via the cluster config file; see https://docs.cortex.dev/v/%s/ for more information", consts.CortexVersionMinor) - prompt.YesOrExit(fmt.Sprintf("your cluster named \"%s\" in %s will be updated according to the configuration above, are you sure you want to continue?", userClusterConfig.ClusterName, userClusterConfig.Region), "", exitMessage) - } - - return userClusterConfig, nil -} - func confirmInstallClusterConfig(clusterConfig *clusterconfig.Config, awsClient *aws.Client, disallowPrompt bool) { eksPrice := aws.EKSPrices[clusterConfig.Region] operatorInstancePrice := aws.InstanceMetadatas[clusterConfig.Region]["t3.medium"].Price diff --git a/cli/cmd/lib_manager.go b/cli/cmd/lib_manager.go index 6e7297abec..ec3de0ee14 100644 --- a/cli/cmd/lib_manager.go +++ b/cli/cmd/lib_manager.go @@ -160,7 +160,7 @@ func runManager(containerConfig *container.Config, addNewLineAfterPull bool, cop return output, &info.State.ExitCode, nil } -func runManagerWithClusterConfig(entrypoint string, clusterConfig *clusterconfig.Config, awsClient *aws.Client, copyToPaths []dockerCopyToPath, copyFromPaths []dockerCopyFromPath) (string, *int, error) { +func runManagerWithClusterConfig(entrypoint string, clusterConfig *clusterconfig.Config, awsClient *aws.Client, copyToPaths []dockerCopyToPath, copyFromPaths []dockerCopyFromPath, extraEnvs []string) (string, *int, error) { clusterConfigBytes, err := yaml.Marshal(clusterConfig) if err != nil { return "", nil, errors.WithStack(err) @@ -184,6 +184,16 @@ func runManagerWithClusterConfig(entrypoint string, clusterConfig *clusterconfig containerPath: "/", }) + envs := []string{ + "AWS_ACCESS_KEY_ID=" + *awsClient.AccessKeyID(), + "AWS_SECRET_ACCESS_KEY=" + *awsClient.SecretAccessKey(), + "CORTEX_TELEMETRY_DISABLE=" + os.Getenv("CORTEX_TELEMETRY_DISABLE"), + "CORTEX_TELEMETRY_SENTRY_DSN=" + os.Getenv("CORTEX_TELEMETRY_SENTRY_DSN"), + "CORTEX_TELEMETRY_SEGMENT_WRITE_KEY=" + os.Getenv("CORTEX_TELEMETRY_SEGMENT_WRITE_KEY"), + "CORTEX_DEV_DEFAULT_PREDICTOR_IMAGE_REGISTRY=" + os.Getenv("CORTEX_DEV_DEFAULT_PREDICTOR_IMAGE_REGISTRY"), + "CORTEX_CLUSTER_CONFIG_FILE=" + containerClusterConfigPath, + } + envs = append(envs, extraEnvs...) containerConfig := &container.Config{ Image: clusterConfig.ImageManager, Entrypoint: []string{"/bin/bash", "-c"}, @@ -191,15 +201,7 @@ func runManagerWithClusterConfig(entrypoint string, clusterConfig *clusterconfig Tty: true, AttachStdout: true, AttachStderr: true, - Env: []string{ - "AWS_ACCESS_KEY_ID=" + *awsClient.AccessKeyID(), - "AWS_SECRET_ACCESS_KEY=" + *awsClient.SecretAccessKey(), - "CORTEX_TELEMETRY_DISABLE=" + os.Getenv("CORTEX_TELEMETRY_DISABLE"), - "CORTEX_TELEMETRY_SENTRY_DSN=" + os.Getenv("CORTEX_TELEMETRY_SENTRY_DSN"), - "CORTEX_TELEMETRY_SEGMENT_WRITE_KEY=" + os.Getenv("CORTEX_TELEMETRY_SEGMENT_WRITE_KEY"), - "CORTEX_DEV_DEFAULT_PREDICTOR_IMAGE_REGISTRY=" + os.Getenv("CORTEX_DEV_DEFAULT_PREDICTOR_IMAGE_REGISTRY"), - "CORTEX_CLUSTER_CONFIG_FILE=" + containerClusterConfigPath, - }, + Env: envs, } if sessionToken := awsClient.SessionToken(); sessionToken != nil { diff --git a/dev/generate_cli_md.sh b/dev/generate_cli_md.sh index da462d92f7..297c7f1d97 100755 --- a/dev/generate_cli_md.sh +++ b/dev/generate_cli_md.sh @@ -39,7 +39,7 @@ commands=( "delete" "cluster up" "cluster info" - "cluster configure" + "cluster scale" "cluster down" "cluster export" "env configure" diff --git a/docs/clients/cli.md b/docs/clients/cli.md index 92a4ab4038..0de5272b40 100644 --- a/docs/clients/cli.md +++ b/docs/clients/cli.md @@ -124,18 +124,22 @@ Flags: -h, --help help for info ``` -## cluster configure +## cluster scale ```text -update a cluster's configuration +update a node group's min/max instances Usage: - cortex cluster configure [CLUSTER_CONFIG_FILE] [flags] + cortex cluster scale [flags] Flags: - -e, --configure-env string name of environment to configure - -y, --yes skip prompts - -h, --help help for configure + -n, --name string name of the cluster + -r, --region string aws region of the cluster + --node-group string name of the node group to scale + --min-instances int minimum number of instances for the given node group + --max-instances int maximum number of instances for the given node group + -y, --yes skip prompts + -h, --help help for scale ``` ## cluster down diff --git a/manager/install.sh b/manager/install.sh index 5dc13eec8e..08cee095db 100755 --- a/manager/install.sh +++ b/manager/install.sh @@ -257,57 +257,39 @@ function restart_operator() { function resize_nodegroup() { eksctl get nodegroup --cluster=$CORTEX_CLUSTER_NAME --region=$CORTEX_REGION -o json > nodegroups.json ng_len=$(cat nodegroups.json | jq -r length) - cluster_config_ng_len=$(cat /in/cluster_${CORTEX_CLUSTER_NAME}_${CORTEX_REGION}.yaml | yq -r .node_groups | yq -r length) - num_resizes=0 + config_ng="$CORTEX_SCALING_NODEGROUP" - for idx in $(seq 0 $(($cluster_config_ng_len-1))); do - config_ng=$(cat /in/cluster_${CORTEX_CLUSTER_NAME}_${CORTEX_REGION}.yaml | yq -r .node_groups[$idx].name) - - for eks_idx in $(seq 0 $(($ng_len-1))); do - stack_ng=$(cat nodegroups.json | jq -r .[$eks_idx].Name) - if [ "$stack_ng" = "cx-operator" ]; then - continue - fi - if [[ "$stack_ng" == *"$config_ng" ]]; then - break - fi - done - - desired=$(cat nodegroups.json | jq -r .[$eks_idx].DesiredCapacity) - existing_min=$(cat nodegroups.json | jq -r .[$eks_idx].MinSize) - existing_max=$(cat nodegroups.json | jq -r .[$eks_idx].MaxSize) - updating_min=$(cat /in/cluster_${CORTEX_CLUSTER_NAME}_${CORTEX_REGION}.yaml | yq -r .node_groups[$idx].min_instances) - updating_max=$(cat /in/cluster_${CORTEX_CLUSTER_NAME}_${CORTEX_REGION}.yaml | yq -r .node_groups[$idx].max_instances) - if [ $updating_min = "null" ]; then - updating_min=1 + for eks_idx in $(seq 0 $(($ng_len-1))); do + stack_ng=$(cat nodegroups.json | jq -r .[$eks_idx].Name) + if [ "$stack_ng" = "cx-operator" ]; then + continue fi - if [ $updating_max = "null" ]; then - updating_max=5 - fi - - if [ "$existing_min" != "$updating_min" ] && [ "$existing_max" != "$updating_max" ]; then - echo "○ nodegroup $idx ($config_ng): updating min instances to $updating_min and max instances to $updating_max " - eksctl scale nodegroup --cluster=$CORTEX_CLUSTER_NAME --region=$CORTEX_REGION $stack_ng --nodes $desired --nodes-min $updating_min --nodes-max $updating_max - num_resizes=$(($num_resizes+1)) - echo - elif [ "$existing_min" != "$updating_min" ]; then - echo "○ nodegroup $idx ($config_ng): updating min instances to $updating_min " - eksctl scale nodegroup --cluster=$CORTEX_CLUSTER_NAME --region=$CORTEX_REGION $stack_ng --nodes $desired --nodes-min $updating_min - num_resizes=$(($num_resizes+1)) - echo - elif [ "$existing_max" != "$updating_max" ]; then - echo "○ nodegroup $idx ($config_ng): updating max instances to $updating_max " - eksctl scale nodegroup --cluster=$CORTEX_CLUSTER_NAME --region=$CORTEX_REGION $stack_ng --nodes $desired --nodes-max $updating_max - num_resizes=$(($num_resizes+1)) - echo + if [[ "$stack_ng" == *"$config_ng" ]]; then + break fi done - rm nodegroups.json - if [ "$num_resizes" -eq "0" ]; then - echo "no changes to node group sizes detected in the cluster config" - exit 0 + desired=$(cat nodegroups.json | jq -r .[$eks_idx].DesiredCapacity) + existing_min=$(cat nodegroups.json | jq -r .[$eks_idx].MinSize) + existing_max=$(cat nodegroups.json | jq -r .[$eks_idx].MaxSize) + updating_min="$CORTEX_SCALING_MIN_INSTANCES" + updating_max="$CORTEX_SCALING_MAX_INSTANCES" + + if [ "$existing_min" != "$updating_min" ] && [ "$existing_max" != "$updating_max" ]; then + echo "○ nodegroup $config_ng: updating min instances to $updating_min and max instances to $updating_max " + eksctl scale nodegroup --cluster=$CORTEX_CLUSTER_NAME --region=$CORTEX_REGION $stack_ng --nodes $desired --nodes-min $updating_min --nodes-max $updating_max + echo + elif [ "$existing_min" != "$updating_min" ]; then + echo "○ nodegroup $config_ng: updating min instances to $updating_min " + eksctl scale nodegroup --cluster=$CORTEX_CLUSTER_NAME --region=$CORTEX_REGION $stack_ng --nodes $desired --nodes-min $updating_min + echo + elif [ "$existing_max" != "$updating_max" ]; then + echo "○ nodegroup $config_ng: updating max instances to $updating_max " + eksctl scale nodegroup --cluster=$CORTEX_CLUSTER_NAME --region=$CORTEX_REGION $stack_ng --nodes $desired --nodes-max $updating_max + echo fi + + rm nodegroups.json } function suspend_az_rebalance() { diff --git a/pkg/types/clusterconfig/errors.go b/pkg/types/clusterconfig/errors.go index 0ed6bd4612..5ac429f01a 100644 --- a/pkg/types/clusterconfig/errors.go +++ b/pkg/types/clusterconfig/errors.go @@ -49,7 +49,6 @@ const ( ErrNoCompatibleSpotInstanceFound = "clusterconfig.no_compatible_spot_instance_found" ErrConfiguredWhenSpotIsNotEnabled = "clusterconfig.configured_when_spot_is_not_enabled" ErrOnDemandBaseCapacityGreaterThanMax = "clusterconfig.on_demand_base_capacity_greater_than_max" - ErrConfigCannotBeChangedOnUpdate = "clusterconfig.config_cannot_be_changed_on_update" ErrInvalidAvailabilityZone = "clusterconfig.invalid_availability_zone" ErrAvailabilityZoneSpecifiedTwice = "clusterconfig.availability_zone_specified_twice" ErrUnsupportedAvailabilityZone = "clusterconfig.unsupported_availability_zone" @@ -202,13 +201,6 @@ func ErrorOnDemandBaseCapacityGreaterThanMax(onDemandBaseCapacity int64, max int }) } -func ErrorConfigCannotBeChangedOnUpdate() error { - return errors.WithStack(&errors.Error{ - Kind: ErrConfigCannotBeChangedOnUpdate, - Message: fmt.Sprintf("in a running cluster, only the %s and %s fields in the %s section can be modified", MinInstancesKey, MaxInstancesKey, NodeGroupsKey), - }) -} - func ErrorInvalidAvailabilityZone(userZone string, allZones strset.Set, region string) error { return errors.WithStack(&errors.Error{ Kind: ErrInvalidAvailabilityZone, From 253e1880a8285b3f8fd8617be636ca597083a68f Mon Sep 17 00:00:00 2001 From: Robert Lucian Chiriac Date: Wed, 31 Mar 2021 23:54:05 +0300 Subject: [PATCH 02/12] Docs/misc changes --- Makefile | 12 ------------ docs/clusters/management/environments.md | 2 +- docs/clusters/management/update.md | 4 ++-- docs/workloads/async/autoscaling.md | 4 ++-- docs/workloads/realtime/autoscaling.md | 2 +- docs/workloads/realtime/troubleshooting.md | 6 +++--- manager/check_cortex_version.sh | 2 +- pkg/operator/operator/errors.go | 2 +- 8 files changed, 11 insertions(+), 23 deletions(-) diff --git a/Makefile b/Makefile index bb19c74c85..8eb70fa8f4 100644 --- a/Makefile +++ b/Makefile @@ -84,18 +84,6 @@ cluster-info: @$(MAKE) cli @eval $$(python3 ./manager/cluster_config_env.py ./dev/config/cluster.yaml) && ./bin/cortex cluster info --config=./dev/config/cluster.yaml --configure-env="$$CORTEX_CLUSTER_NAME" --yes -cluster-configure: - @$(MAKE) images-all - @$(MAKE) cli - @kill $(shell pgrep -f rerun) >/dev/null 2>&1 || true - @eval $$(python3 ./manager/cluster_config_env.py ./dev/config/cluster.yaml) && ./bin/cortex cluster configure ./dev/config/cluster.yaml --configure-env="$$CORTEX_CLUSTER_NAME" - -cluster-configure-y: - @$(MAKE) images-all - @$(MAKE) cli - @kill $(shell pgrep -f rerun) >/dev/null 2>&1 || true - @eval $$(python3 ./manager/cluster_config_env.py ./dev/config/cluster.yaml) && ./bin/cortex cluster configure ./dev/config/cluster.yaml --configure-env="$$CORTEX_CLUSTER_NAME" --yes - # stop the in-cluster operator operator-stop: @$(MAKE) kubectl diff --git a/docs/clusters/management/environments.md b/docs/clusters/management/environments.md index 437b821037..73c779bdb7 100644 --- a/docs/clusters/management/environments.md +++ b/docs/clusters/management/environments.md @@ -1,6 +1,6 @@ # Environments -When you create a cluster with `cortex cluster up`, an environment named `aws` is automatically created to point to your cluster and is configured to be the default environment. You can name the environment something else via the `--configure-env` flag, e.g. `cortex cluster up --configure-env prod`. You can also use the `--configure-env` flag with `cortex cluster info` and `cortex cluster configure` to create / update the specified environment. +When you create a cluster with `cortex cluster up`, an environment named `aws` is automatically created to point to your cluster and is configured to be the default environment. You can name the environment something else via the `--configure-env` flag, e.g. `cortex cluster up --configure-env prod`. You can also use the `--configure-env` flag with `cortex cluster info` to create / update the specified environment. You can list your environments with `cortex env list`, change the default environment with `cortex env default`, delete an environment with `cortex env delete`, and create/update an environment with `cortex env configure`. diff --git a/docs/clusters/management/update.md b/docs/clusters/management/update.md index 93566682e5..915dc20aa7 100644 --- a/docs/clusters/management/update.md +++ b/docs/clusters/management/update.md @@ -1,9 +1,9 @@ # Update -## Update configuration +## Update node group size ```bash -cortex cluster configure cluster.yaml +cortex cluster scale --node-group --min-instances --max-instances ``` ## Upgrade to a newer version diff --git a/docs/workloads/async/autoscaling.md b/docs/workloads/async/autoscaling.md index c03ac058cb..5256ab56f3 100644 --- a/docs/workloads/async/autoscaling.md +++ b/docs/workloads/async/autoscaling.md @@ -88,8 +88,8 @@ this value will prevent thrashing, but setting it too high will prevent the clus ## Autoscaling instances Cortex spins up and down instances based on the aggregate resource requests of all APIs. The number of instances will be -at least `min_instances` and no more than `max_instances` (configured during installation and modifiable -via `cortex cluster configure`). +at least `min_instances` and no more than `max_instances` for each node group (configured during installation and modifiable +via `cortex cluster scale`). ## Autoscaling responsiveness diff --git a/docs/workloads/realtime/autoscaling.md b/docs/workloads/realtime/autoscaling.md index 1d1338d488..a94ec6d3bd 100644 --- a/docs/workloads/realtime/autoscaling.md +++ b/docs/workloads/realtime/autoscaling.md @@ -58,7 +58,7 @@ For example, setting `target_replica_concurrency` to `processes_per_replica` * ` ## Autoscaling instances -Cortex spins up and down instances based on the aggregate resource requests of all APIs. The number of instances will be at least `min_instances` and no more than `max_instances` (configured during installation and modifiable via `cortex cluster configure`). +Cortex spins up and down instances based on the aggregate resource requests of all APIs. The number of instances will be at least `min_instances` and no more than `max_instances` for each node group (configured during installation and modifiable via `cortex cluster scale`). ## Overprovisioning diff --git a/docs/workloads/realtime/troubleshooting.md b/docs/workloads/realtime/troubleshooting.md index 527122f433..223e139c0b 100644 --- a/docs/workloads/realtime/troubleshooting.md +++ b/docs/workloads/realtime/troubleshooting.md @@ -19,11 +19,11 @@ If no logs appear (e.g. it just says "fetching logs..."), continue down this lis ### Check `max_instances` for your cluster -When you created your Cortex cluster, you configured `max_instances` (either from the command prompts or via a cluster configuration file, e.g. `cluster.yaml`). If your cluster already has `min_instances` running instances, additional instances cannot be created and APIs may not be able to deploy, scale, or update. +When you created your Cortex cluster, you configured `max_instances` for each node group that you specified (via the cluster configuration file, e.g. `cluster.yaml`). If your cluster already has `min_instances` running instances for a given node group, additional instances cannot be created and APIs may not be able to deploy, scale, or update. -You can check the current value of `max_instances` by running `cortex cluster info --config cluster.yaml` (or `cortex cluster info --name --region ` if you have the name and region of the cluster). +You can check the current value of `max_instances` for the selected node group by running `cortex cluster info --config cluster.yaml` (or `cortex cluster info --name --region ` if you have the name and region of the cluster). -You can update `max_instances` by modifying `max_instances` in your cluster configuration file and running `cortex cluster configure --config cluster.yaml` (or `cortex cluster configure --name --region ` if you have the name and region of the cluster). +Once you have the name and region of the cluster, you can update `max_instances` by specifying the desired number of `max_instances` for your node group with `cortex cluster scale --name --region --node-group --min-instances --max-instances `. ## Check your AWS auto scaling group activity history diff --git a/manager/check_cortex_version.sh b/manager/check_cortex_version.sh index 41db9ca9ef..690491f16f 100755 --- a/manager/check_cortex_version.sh +++ b/manager/check_cortex_version.sh @@ -19,6 +19,6 @@ set -e CORTEX_VERSION=master if [ "$CORTEX_VERSION" != "$CORTEX_CLI_VERSION" ]; then - echo "error: your CLI version ($CORTEX_CLI_VERSION) doesn't match your Cortex manager image version ($CORTEX_VERSION); please update your CLI (pip install cortex==$CORTEX_VERSION), or update your Cortex manager image by modifying the value for \`image_manager\` in your cluster configuration file and running \`cortex cluster configure --config cluster.yaml\` (update other image paths in cluster.yaml as well if necessary)" + echo "error: your CLI version ($CORTEX_CLI_VERSION) doesn't match your Cortex manager image version ($CORTEX_VERSION); please update your CLI (pip install cortex==$CORTEX_VERSION) to match the version of your Cortex manager image" exit 1 fi diff --git a/pkg/operator/operator/errors.go b/pkg/operator/operator/errors.go index 41b6a94f7d..3520f5e79b 100644 --- a/pkg/operator/operator/errors.go +++ b/pkg/operator/operator/errors.go @@ -32,7 +32,7 @@ const ( func ErrorCortexInstallationBroken() error { return errors.WithStack(&errors.Error{ Kind: ErrCortexInstallationBroken, - Message: "cortex is out of date or not installed properly; run `cortex cluster configure` to repair, or spin down your cluster with `cortex cluster down` and create a new one with `cortex cluster up`", + Message: "cortex is out of date or not installed properly; spin down your cluster with `cortex cluster down` and create a new one with `cortex cluster up`", }) } From b5ff1e135078d2340fd27921795cb57c3a9a45c4 Mon Sep 17 00:00:00 2001 From: Robert Lucian Chiriac Date: Thu, 1 Apr 2021 00:04:28 +0300 Subject: [PATCH 03/12] Change flags desc + fix error message --- cli/cmd/cluster.go | 4 ++-- cli/cmd/errors.go | 10 ++++++++-- cli/cmd/lib_cluster_config.go | 3 ++- docs/clients/cli.md | 6 +++--- 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/cli/cmd/cluster.go b/cli/cmd/cluster.go index 5c7d05a679..cebd166d4d 100644 --- a/cli/cmd/cluster.go +++ b/cli/cmd/cluster.go @@ -118,8 +118,8 @@ func addClusterRegionFlag(cmd *cobra.Command) { func addClusterScaleFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&_flagClusterScaleNodeGroup, "node-group", "", "name of the node group to scale") - cmd.Flags().Int64Var(&_flagClusterScaleMinInstances, "min-instances", 0, "minimum number of instances for the given node group") - cmd.Flags().Int64Var(&_flagClusterScaleMaxInstances, "max-instances", 0, "maximum number of instances for the given node group") + cmd.Flags().Int64Var(&_flagClusterScaleMinInstances, "min-instances", 0, "minimum number of instances") + cmd.Flags().Int64Var(&_flagClusterScaleMaxInstances, "max-instances", 0, "maximum number of instances") cmd.MarkFlagRequired("node-group") cmd.MarkFlagRequired("min-instances") diff --git a/cli/cmd/errors.go b/cli/cmd/errors.go index 507e1e3f41..9c821d5f95 100644 --- a/cli/cmd/errors.go +++ b/cli/cmd/errors.go @@ -228,10 +228,16 @@ func ErrorNodeGroupNotFound(scalingNodeGroupName, clusterName, clusterRegion str }) } -func ErrorClusterAccessConfigRequired() error { +func ErrorClusterAccessConfigRequired(cliFlagsOnly bool) error { + message := "" + if cliFlagsOnly { + message = "please provide the name and region of the cluster using the CLI flags (e.g. via `--name` and `--region`)" + } else { + message = fmt.Sprintf("please provide a cluster configuration file which specifies `%s` and `%s` (e.g. via `--config cluster.yaml`) or use the CLI flags to specify the cluster (e.g. via `--name` and `--region`)", clusterconfig.ClusterNameKey, clusterconfig.RegionKey) + } return errors.WithStack(&errors.Error{ Kind: ErrClusterAccessConfigRequired, - Message: fmt.Sprintf("please provide a cluster configuration file which specifies `%s` and `%s` (e.g. via `--config cluster.yaml`) or use the CLI flags to specify the cluster (e.g. via `--name` and `--region`)", clusterconfig.ClusterNameKey, clusterconfig.RegionKey), + Message: message, }) } diff --git a/cli/cmd/lib_cluster_config.go b/cli/cmd/lib_cluster_config.go index 40ca71001c..4b182066d3 100644 --- a/cli/cmd/lib_cluster_config.go +++ b/cli/cmd/lib_cluster_config.go @@ -114,7 +114,8 @@ func getClusterAccessConfigWithCache() (*clusterconfig.AccessConfig, error) { } if accessConfig.ClusterName == "" || accessConfig.Region == "" { - return nil, ErrorClusterAccessConfigRequired() + cliFlagsOnly := _flagClusterScaleNodeGroup != "" + return nil, ErrorClusterAccessConfigRequired(cliFlagsOnly) } return accessConfig, nil } diff --git a/docs/clients/cli.md b/docs/clients/cli.md index 0de5272b40..ddf4e043c5 100644 --- a/docs/clients/cli.md +++ b/docs/clients/cli.md @@ -127,7 +127,7 @@ Flags: ## cluster scale ```text -update a node group's min/max instances +update the min/max instances for a nodegroup Usage: cortex cluster scale [flags] @@ -136,8 +136,8 @@ Flags: -n, --name string name of the cluster -r, --region string aws region of the cluster --node-group string name of the node group to scale - --min-instances int minimum number of instances for the given node group - --max-instances int maximum number of instances for the given node group + --min-instances int minimum number of instances + --max-instances int maximum number of instances -y, --yes skip prompts -h, --help help for scale ``` From 5a89644ec34fb11b133d0cb97f7c3745e0eb8a65 Mon Sep 17 00:00:00 2001 From: Robert Lucian Chiriac Date: Thu, 1 Apr 2021 01:51:32 +0300 Subject: [PATCH 04/12] Fix incorrect error message --- cli/cmd/cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/cmd/cluster.go b/cli/cmd/cluster.go index cebd166d4d..1c9429e459 100644 --- a/cli/cmd/cluster.go +++ b/cli/cmd/cluster.go @@ -975,7 +975,7 @@ func refreshCachedClusterConfig(awsClient aws.Client, accessConfig *clusterconfi func scaleNodeGroup(clusterConfig clusterconfig.Config) (clusterconfig.Config, error) { if _flagClusterScaleMinInstances < 0 { - return clusterconfig.Config{}, ErrorMaxInstancesLowerThan(0) + return clusterconfig.Config{}, ErrorMinInstancesLowerThan(0) } if _flagClusterScaleMaxInstances < 0 { return clusterconfig.Config{}, ErrorMaxInstancesLowerThan(0) From d9e8a9c3ab4432e53c579c50e4ad998c676559b0 Mon Sep 17 00:00:00 2001 From: Robert Lucian Chiriac Date: Thu, 1 Apr 2021 18:00:26 +0300 Subject: [PATCH 05/12] Address some PR comments --- cli/cmd/cluster.go | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/cli/cmd/cluster.go b/cli/cmd/cluster.go index 1c9429e459..dcc4d34222 100644 --- a/cli/cmd/cluster.go +++ b/cli/cmd/cluster.go @@ -334,7 +334,7 @@ var _clusterScaleCmd = &cobra.Command{ } clusterConfig := refreshCachedClusterConfig(*awsClient, accessConfig) - clusterConfig, err = scaleNodeGroup(clusterConfig) + clusterConfig, err = updateNodeGroupScale(clusterConfig, _flagClusterScaleNodeGroup, _flagClusterScaleMinInstances, _flagClusterScaleMaxInstances, _flagClusterDisallowPrompt) if err != nil { exit.Error(err) } @@ -973,14 +973,14 @@ func refreshCachedClusterConfig(awsClient aws.Client, accessConfig *clusterconfi return *refreshedClusterConfig } -func scaleNodeGroup(clusterConfig clusterconfig.Config) (clusterconfig.Config, error) { - if _flagClusterScaleMinInstances < 0 { +func updateNodeGroupScale(clusterConfig clusterconfig.Config, targetNg string, desiredMinReplicas, desiredMaxReplicas int64, disallowPrompt bool) (clusterconfig.Config, error) { + if desiredMinReplicas < 0 { return clusterconfig.Config{}, ErrorMinInstancesLowerThan(0) } - if _flagClusterScaleMaxInstances < 0 { + if desiredMaxReplicas < 0 { return clusterconfig.Config{}, ErrorMaxInstancesLowerThan(0) } - if _flagClusterScaleMinInstances > _flagClusterScaleMaxInstances { + if desiredMinReplicas > desiredMaxReplicas { return clusterconfig.Config{}, ErrorMinInstancesGreaterThanMaxInstances() } @@ -994,37 +994,37 @@ func scaleNodeGroup(clusterConfig clusterconfig.Config) (clusterconfig.Config, e continue } availableNodeGroups = append(availableNodeGroups, ng.Name) - if ng.Name == _flagClusterScaleNodeGroup { - if ng.MinInstances == _flagClusterScaleMinInstances && ng.MaxInstances == _flagClusterScaleMaxInstances { + if ng.Name == targetNg { + if ng.MinInstances == desiredMinReplicas && ng.MaxInstances == desiredMaxReplicas { fmt.Printf("no changes to the %s nodegroup required in cluster %s from region %s\n", ng.Name, clusterName, region) exit.Ok() } - if !_flagClusterDisallowPrompt { + if !disallowPrompt { promptMessage := "" - if ng.MinInstances != _flagClusterScaleMinInstances && ng.MaxInstances != _flagClusterScaleMaxInstances { - promptMessage = fmt.Sprintf("nodegroup %s of cluster %s from region %s will have field %s set from %d to %d and field %s set from %d to %d", ng.Name, clusterName, region, clusterconfig.MinInstancesKey, ng.MinInstances, _flagClusterScaleMinInstances, clusterconfig.MaxInstancesKey, ng.MaxInstances, _flagClusterScaleMaxInstances) + if ng.MinInstances != desiredMinReplicas && ng.MaxInstances != desiredMaxReplicas { + promptMessage = fmt.Sprintf("your nodegroup named %s in your %s cluster in %s will update its %s from %d to %d and %s set from %d to %d", ng.Name, clusterName, region, clusterconfig.MinInstancesKey, ng.MinInstances, desiredMinReplicas, clusterconfig.MaxInstancesKey, ng.MaxInstances, desiredMaxReplicas) } - if ng.MinInstances == _flagClusterScaleMinInstances && ng.MaxInstances != _flagClusterScaleMaxInstances { - promptMessage = fmt.Sprintf("nodegroup %s of cluster %s from region %s will have field %s set from %d to %d", ng.Name, clusterName, region, clusterconfig.MaxInstancesKey, ng.MaxInstances, _flagClusterScaleMaxInstances) + if ng.MinInstances == desiredMinReplicas && ng.MaxInstances != desiredMaxReplicas { + promptMessage = fmt.Sprintf("your nodegroup named %s in your %s cluster in %s will update its %s from %d to %d", ng.Name, clusterName, region, clusterconfig.MaxInstancesKey, ng.MaxInstances, desiredMaxReplicas) } - if ng.MinInstances != _flagClusterScaleMinInstances && ng.MaxInstances == _flagClusterScaleMaxInstances { - promptMessage = fmt.Sprintf("nodegroup %s of cluster %s from region %s will have field %s set from %d to %d", ng.Name, clusterName, region, clusterconfig.MinInstancesKey, ng.MinInstances, _flagClusterScaleMinInstances) + if ng.MinInstances != desiredMinReplicas && ng.MaxInstances == desiredMaxReplicas { + promptMessage = fmt.Sprintf("your nodegroup named %s in your %s cluster in %s will update its %s from %d to %d", ng.Name, clusterName, region, clusterconfig.MinInstancesKey, ng.MinInstances, desiredMinReplicas) } if !prompt.YesOrNo(promptMessage, "", "") { exit.Ok() } } - clusterConfig.NodeGroups[idx].MinInstances = _flagClusterScaleMinInstances - clusterConfig.NodeGroups[idx].MaxInstances = _flagClusterScaleMaxInstances + clusterConfig.NodeGroups[idx].MinInstances = desiredMinReplicas + clusterConfig.NodeGroups[idx].MaxInstances = desiredMaxReplicas ngFound = true break } } if !ngFound { - return clusterconfig.Config{}, ErrorNodeGroupNotFound(_flagClusterScaleNodeGroup, clusterName, region, availableNodeGroups) + return clusterconfig.Config{}, ErrorNodeGroupNotFound(targetNg, clusterName, region, availableNodeGroups) } return clusterConfig, nil From ae7bf5d67914c1c6031325a1d7a6e20a982ba0bf Mon Sep 17 00:00:00 2001 From: Robert Lucian Chiriac Date: Fri, 2 Apr 2021 18:03:20 +0300 Subject: [PATCH 06/12] Address some PR comments --- cli/cmd/cluster.go | 4 ++-- cli/cmd/errors.go | 10 +++++----- manager/install.sh | 7 +++++++ 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/cli/cmd/cluster.go b/cli/cmd/cluster.go index dcc4d34222..0b841899d0 100644 --- a/cli/cmd/cluster.go +++ b/cli/cmd/cluster.go @@ -981,7 +981,7 @@ func updateNodeGroupScale(clusterConfig clusterconfig.Config, targetNg string, d return clusterconfig.Config{}, ErrorMaxInstancesLowerThan(0) } if desiredMinReplicas > desiredMaxReplicas { - return clusterconfig.Config{}, ErrorMinInstancesGreaterThanMaxInstances() + return clusterconfig.Config{}, ErrorMinInstancesGreaterThanMaxInstances(desiredMinReplicas, desiredMaxReplicas) } clusterName := clusterConfig.ClusterName @@ -1003,7 +1003,7 @@ func updateNodeGroupScale(clusterConfig clusterconfig.Config, targetNg string, d if !disallowPrompt { promptMessage := "" if ng.MinInstances != desiredMinReplicas && ng.MaxInstances != desiredMaxReplicas { - promptMessage = fmt.Sprintf("your nodegroup named %s in your %s cluster in %s will update its %s from %d to %d and %s set from %d to %d", ng.Name, clusterName, region, clusterconfig.MinInstancesKey, ng.MinInstances, desiredMinReplicas, clusterconfig.MaxInstancesKey, ng.MaxInstances, desiredMaxReplicas) + promptMessage = fmt.Sprintf("your nodegroup named %s in your %s cluster in %s will update its %s from %d to %d and update its %s from %d to %d", ng.Name, clusterName, region, clusterconfig.MinInstancesKey, ng.MinInstances, desiredMinReplicas, clusterconfig.MaxInstancesKey, ng.MaxInstances, desiredMaxReplicas) } if ng.MinInstances == desiredMinReplicas && ng.MaxInstances != desiredMaxReplicas { promptMessage = fmt.Sprintf("your nodegroup named %s in your %s cluster in %s will update its %s from %d to %d", ng.Name, clusterName, region, clusterconfig.MaxInstancesKey, ng.MaxInstances, desiredMaxReplicas) diff --git a/cli/cmd/errors.go b/cli/cmd/errors.go index 9c821d5f95..c4f4f61a98 100644 --- a/cli/cmd/errors.go +++ b/cli/cmd/errors.go @@ -203,28 +203,28 @@ func ErrorClusterDown(out string) error { func ErrorMinInstancesLowerThan(minValue int64) error { return errors.WithStack(&errors.Error{ Kind: ErrMinInstancesLowerThan, - Message: fmt.Sprintf("flag --min-instances cannot be set to a value lower than %d", minValue), + Message: fmt.Sprintf("min instances cannot be set to a value lower than %d", minValue), }) } func ErrorMaxInstancesLowerThan(minValue int64) error { return errors.WithStack(&errors.Error{ Kind: ErrMaxInstancesLowerThan, - Message: fmt.Sprintf("flag --max-instances cannot be set to a value lower than %d", minValue), + Message: fmt.Sprintf("max instances cannot be set to a value lower than %d", minValue), }) } -func ErrorMinInstancesGreaterThanMaxInstances() error { +func ErrorMinInstancesGreaterThanMaxInstances(minInstances, maxInstances int64) error { return errors.WithStack(&errors.Error{ Kind: ErrMinInstancesGreaterThanMaxInstances, - Message: "flag --min-instances cannot be set to a value higher than that of --max-instances flag", + Message: "min instances (%d) cannot be set to a value higher than max instances (%d)", }) } func ErrorNodeGroupNotFound(scalingNodeGroupName, clusterName, clusterRegion string, availableNodeGroups []string) error { return errors.WithStack(&errors.Error{ Kind: ErrNodeGroupNotFound, - Message: fmt.Sprintf("nodegroup %s couldn't be found in the cluster named %s from region %s; the available nodegroups for this cluster are %s", scalingNodeGroupName, clusterName, clusterRegion, s.StrsAnd(availableNodeGroups)), + Message: fmt.Sprintf("nodegroup %s couldn't be found in the cluster named %s in region %s; the available nodegroups for this cluster are %s", scalingNodeGroupName, clusterName, clusterRegion, s.StrsAnd(availableNodeGroups)), }) } diff --git a/manager/install.sh b/manager/install.sh index 671f22e511..eb722f0bb5 100755 --- a/manager/install.sh +++ b/manager/install.sh @@ -257,16 +257,23 @@ function resize_nodegroup() { ng_len=$(cat nodegroups.json | jq -r length) config_ng="$CORTEX_SCALING_NODEGROUP" + has_ng="false" for eks_idx in $(seq 0 $(($ng_len-1))); do stack_ng=$(cat nodegroups.json | jq -r .[$eks_idx].Name) if [ "$stack_ng" = "cx-operator" ]; then continue fi if [[ "$stack_ng" == *"$config_ng" ]]; then + has_ng="true" break fi done + if [ "$has_ng" == "false" ]; then + echo "error: \"cx-*-$config_ng\" node group couldn't be found" + exit 1 + fi + desired=$(cat nodegroups.json | jq -r .[$eks_idx].DesiredCapacity) existing_min=$(cat nodegroups.json | jq -r .[$eks_idx].MinSize) existing_max=$(cat nodegroups.json | jq -r .[$eks_idx].MaxSize) From 43a0606c7bde53dbb5a5a7de9f62315ad684a24a Mon Sep 17 00:00:00 2001 From: Robert Lucian Chiriac Date: Fri, 2 Apr 2021 19:25:27 +0300 Subject: [PATCH 07/12] Check if desired < min & desired > max --- manager/install.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/manager/install.sh b/manager/install.sh index eb722f0bb5..bb73e212d7 100755 --- a/manager/install.sh +++ b/manager/install.sh @@ -280,6 +280,13 @@ function resize_nodegroup() { updating_min="$CORTEX_SCALING_MIN_INSTANCES" updating_max="$CORTEX_SCALING_MAX_INSTANCES" + if [ "$desired" -lt $updating_min ]; then + desired=$updating_min + fi + if [ "$desired" -gt $updating_max ]; then + desired=$updating_max + fi + if [ "$existing_min" != "$updating_min" ] && [ "$existing_max" != "$updating_max" ]; then echo "○ nodegroup $config_ng: updating min instances to $updating_min and max instances to $updating_max " eksctl scale nodegroup --cluster=$CORTEX_CLUSTER_NAME --region=$CORTEX_REGION $stack_ng --nodes $desired --nodes-min $updating_min --nodes-max $updating_max From 2162ff594bda728eb94f1a869fc94befa0104d35 Mon Sep 17 00:00:00 2001 From: Robert Lucian Chiriac Date: Fri, 2 Apr 2021 22:30:14 +0300 Subject: [PATCH 08/12] Address PR comments --- cli/cmd/cluster.go | 70 ++++++++++++++++++++++------------- cli/cmd/errors.go | 8 ++++ cli/cmd/get.go | 6 +-- cli/cmd/lib_cluster_config.go | 10 ++++- cli/cmd/root.go | 12 +++--- 5 files changed, 71 insertions(+), 35 deletions(-) diff --git a/cli/cmd/cluster.go b/cli/cmd/cluster.go index 0b841899d0..185abfb1d7 100644 --- a/cli/cmd/cluster.go +++ b/cli/cmd/cluster.go @@ -118,12 +118,9 @@ func addClusterRegionFlag(cmd *cobra.Command) { func addClusterScaleFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&_flagClusterScaleNodeGroup, "node-group", "", "name of the node group to scale") + cmd.MarkFlagRequired("node-group") cmd.Flags().Int64Var(&_flagClusterScaleMinInstances, "min-instances", 0, "minimum number of instances") cmd.Flags().Int64Var(&_flagClusterScaleMaxInstances, "max-instances", 0, "maximum number of instances") - - cmd.MarkFlagRequired("node-group") - cmd.MarkFlagRequired("min-instances") - cmd.MarkFlagRequired("max-instances") } var _clusterCmd = &cobra.Command{ @@ -309,6 +306,17 @@ var _clusterScaleCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { telemetry.Event("cli.cluster.configure") + var scaleMinIntances, scaleMaxInstances *int64 + if wasFlagProvided(cmd, "min-instances") { + scaleMinIntances = pointer.Int64(_flagClusterScaleMinInstances) + } + if wasFlagProvided(cmd, "max-instances") { + scaleMaxInstances = pointer.Int64(_flagClusterScaleMaxInstances) + } + if scaleMinIntances == nil && scaleMaxInstances == nil { + exit.Error(ErrorSpecifyAtLeastOneFlag("min-instances", "max-instances")) + } + if _, err := docker.GetDockerClient(); err != nil { exit.Error(err) } @@ -334,7 +342,7 @@ var _clusterScaleCmd = &cobra.Command{ } clusterConfig := refreshCachedClusterConfig(*awsClient, accessConfig) - clusterConfig, err = updateNodeGroupScale(clusterConfig, _flagClusterScaleNodeGroup, _flagClusterScaleMinInstances, _flagClusterScaleMaxInstances, _flagClusterDisallowPrompt) + clusterConfig, err = updateNodeGroupScale(clusterConfig, _flagClusterScaleNodeGroup, scaleMinIntances, scaleMaxInstances, _flagClusterDisallowPrompt) if err != nil { exit.Error(err) } @@ -973,17 +981,7 @@ func refreshCachedClusterConfig(awsClient aws.Client, accessConfig *clusterconfi return *refreshedClusterConfig } -func updateNodeGroupScale(clusterConfig clusterconfig.Config, targetNg string, desiredMinReplicas, desiredMaxReplicas int64, disallowPrompt bool) (clusterconfig.Config, error) { - if desiredMinReplicas < 0 { - return clusterconfig.Config{}, ErrorMinInstancesLowerThan(0) - } - if desiredMaxReplicas < 0 { - return clusterconfig.Config{}, ErrorMaxInstancesLowerThan(0) - } - if desiredMinReplicas > desiredMaxReplicas { - return clusterconfig.Config{}, ErrorMinInstancesGreaterThanMaxInstances(desiredMinReplicas, desiredMaxReplicas) - } - +func updateNodeGroupScale(clusterConfig clusterconfig.Config, targetNg string, desiredMinReplicas, desiredMaxReplicas *int64, disallowPrompt bool) (clusterconfig.Config, error) { clusterName := clusterConfig.ClusterName region := clusterConfig.Region @@ -995,29 +993,51 @@ func updateNodeGroupScale(clusterConfig clusterconfig.Config, targetNg string, d } availableNodeGroups = append(availableNodeGroups, ng.Name) if ng.Name == targetNg { - if ng.MinInstances == desiredMinReplicas && ng.MaxInstances == desiredMaxReplicas { + var minReplicas, maxReplicas int64 + if desiredMinReplicas == nil { + minReplicas = ng.MinInstances + } else { + minReplicas = *desiredMinReplicas + } + if desiredMaxReplicas == nil { + maxReplicas = ng.MaxInstances + } else { + maxReplicas = *desiredMaxReplicas + } + + if minReplicas < 0 { + return clusterconfig.Config{}, ErrorMinInstancesLowerThan(0) + } + if maxReplicas < 0 { + return clusterconfig.Config{}, ErrorMaxInstancesLowerThan(0) + } + if minReplicas > maxReplicas { + return clusterconfig.Config{}, ErrorMinInstancesGreaterThanMaxInstances(minReplicas, maxReplicas) + } + + if ng.MinInstances == minReplicas && ng.MaxInstances == maxReplicas { fmt.Printf("no changes to the %s nodegroup required in cluster %s from region %s\n", ng.Name, clusterName, region) exit.Ok() } if !disallowPrompt { promptMessage := "" - if ng.MinInstances != desiredMinReplicas && ng.MaxInstances != desiredMaxReplicas { - promptMessage = fmt.Sprintf("your nodegroup named %s in your %s cluster in %s will update its %s from %d to %d and update its %s from %d to %d", ng.Name, clusterName, region, clusterconfig.MinInstancesKey, ng.MinInstances, desiredMinReplicas, clusterconfig.MaxInstancesKey, ng.MaxInstances, desiredMaxReplicas) + if ng.MinInstances != minReplicas && ng.MaxInstances != maxReplicas { + promptMessage = fmt.Sprintf("your nodegroup named %s in your %s cluster in %s will update its %s from %d to %d and update its %s from %d to %d", ng.Name, clusterName, region, clusterconfig.MinInstancesKey, ng.MinInstances, minReplicas, clusterconfig.MaxInstancesKey, ng.MaxInstances, maxReplicas) } - if ng.MinInstances == desiredMinReplicas && ng.MaxInstances != desiredMaxReplicas { - promptMessage = fmt.Sprintf("your nodegroup named %s in your %s cluster in %s will update its %s from %d to %d", ng.Name, clusterName, region, clusterconfig.MaxInstancesKey, ng.MaxInstances, desiredMaxReplicas) + if ng.MinInstances == minReplicas && ng.MaxInstances != maxReplicas { + promptMessage = fmt.Sprintf("your nodegroup named %s in your %s cluster in %s will update its %s from %d to %d", ng.Name, clusterName, region, clusterconfig.MaxInstancesKey, ng.MaxInstances, maxReplicas) } - if ng.MinInstances != desiredMinReplicas && ng.MaxInstances == desiredMaxReplicas { - promptMessage = fmt.Sprintf("your nodegroup named %s in your %s cluster in %s will update its %s from %d to %d", ng.Name, clusterName, region, clusterconfig.MinInstancesKey, ng.MinInstances, desiredMinReplicas) + if ng.MinInstances != minReplicas && ng.MaxInstances == maxReplicas { + promptMessage = fmt.Sprintf("your nodegroup named %s in your %s cluster in %s will update its %s from %d to %d", ng.Name, clusterName, region, clusterconfig.MinInstancesKey, ng.MinInstances, minReplicas) } if !prompt.YesOrNo(promptMessage, "", "") { exit.Ok() } } - clusterConfig.NodeGroups[idx].MinInstances = desiredMinReplicas - clusterConfig.NodeGroups[idx].MaxInstances = desiredMaxReplicas + clusterConfig.NodeGroups[idx].MinInstances = minReplicas + clusterConfig.NodeGroups[idx].MaxInstances = maxReplicas ngFound = true break } diff --git a/cli/cmd/errors.go b/cli/cmd/errors.go index c4f4f61a98..fe38919556 100644 --- a/cli/cmd/errors.go +++ b/cli/cmd/errors.go @@ -58,6 +58,7 @@ const ( ErrClusterDebug = "cli.cluster_debug" ErrClusterRefresh = "cli.cluster_refresh" ErrClusterDown = "cli.cluster_down" + ErrSpecifyAtLeastOneFlag = "cli.specify_at_least_one_flag" ErrMinInstancesLowerThan = "cli.min_instances_lower_than" ErrMaxInstancesLowerThan = "cli.max_instances_lower_than" ErrMinInstancesGreaterThanMaxInstances = "cli.min_instances_greater_than_max_instances" @@ -200,6 +201,13 @@ func ErrorClusterDown(out string) error { }) } +func ErrorSpecifyAtLeastOneFlag(flagsToSpecify ...string) error { + return errors.WithStack(&errors.Error{ + Kind: ErrSpecifyAtLeastOneFlag, + Message: fmt.Sprintf("must specify at least one of the following flags: %s", s.StrsOr(flagsToSpecify)), + }) +} + func ErrorMinInstancesLowerThan(minValue int64) error { return errors.WithStack(&errors.Error{ Kind: ErrMinInstancesLowerThan, diff --git a/cli/cmd/get.go b/cli/cmd/get.go index 606dbd9f8b..99cfc0bb7e 100644 --- a/cli/cmd/get.go +++ b/cli/cmd/get.go @@ -72,7 +72,7 @@ var _getCmd = &cobra.Command{ Args: cobra.RangeArgs(0, 2), Run: func(cmd *cobra.Command, args []string) { var envName string - if wasEnvFlagProvided(cmd) { + if wasFlagProvided(cmd, "env") { envName = _flagGetEnv } else if len(args) > 0 { var err error @@ -83,7 +83,7 @@ var _getCmd = &cobra.Command{ } } - if len(args) == 1 || wasEnvFlagProvided(cmd) { + if len(args) == 1 || wasFlagProvided(cmd, "env") { env, err := ReadOrConfigureEnv(envName) if err != nil { telemetry.Event("cli.get") @@ -154,7 +154,7 @@ var _getCmd = &cobra.Command{ return "", ErrorNoAvailableEnvironment() } - if wasEnvFlagProvided(cmd) { + if wasFlagProvided(cmd, "env") { env, err := ReadOrConfigureEnv(envName) if err != nil { exit.Error(err) diff --git a/cli/cmd/lib_cluster_config.go b/cli/cmd/lib_cluster_config.go index 4b182066d3..6b67745744 100644 --- a/cli/cmd/lib_cluster_config.go +++ b/cli/cmd/lib_cluster_config.go @@ -18,6 +18,7 @@ package cmd import ( "fmt" + "os" "path" "path/filepath" "regexp" @@ -87,8 +88,15 @@ func getNewClusterAccessConfig(clusterConfigFile string) (*clusterconfig.AccessC } func getClusterAccessConfigWithCache() (*clusterconfig.AccessConfig, error) { + defaultImageRegistry := "quay.io/cortexlabs" + + devDefaultImageRegistry := os.Getenv("CORTEX_DEV_DEFAULT_PREDICTOR_IMAGE_REGISTRY") + if devDefaultImageRegistry != "" { + defaultImageRegistry = devDefaultImageRegistry + } + accessConfig := &clusterconfig.AccessConfig{ - ImageManager: "quay.io/cortexlabs/manager:" + consts.CortexVersion, + ImageManager: defaultImageRegistry + "/manager:" + consts.CortexVersion, } cachedPaths := existingCachedClusterConfigPaths() diff --git a/cli/cmd/root.go b/cli/cmd/root.go index fd08517002..d1a1c23813 100644 --- a/cli/cmd/root.go +++ b/cli/cmd/root.go @@ -198,15 +198,15 @@ func addVerboseFlag(cmd *cobra.Command) { cmd.Flags().BoolVarP(&_flagVerbose, "verbose", "v", false, "show additional information (only applies to pretty output format)") } -func wasEnvFlagProvided(cmd *cobra.Command) bool { - envFlagProvided := false +func wasFlagProvided(cmd *cobra.Command, flagName string) bool { + flagWasProvided := false cmd.Flags().VisitAll(func(flag *pflag.Flag) { - if flag.Shorthand == "e" && flag.Changed && flag.Value.String() != "" { - envFlagProvided = true + if flag.Name == flagName && flag.Changed && flag.Value.String() != "" { + flagWasProvided = true } }) - return envFlagProvided + return flagWasProvided } func printEnvIfNotSpecified(envName string, cmd *cobra.Command) error { @@ -228,7 +228,7 @@ func envStringIfNotSpecified(envName string, cmd *cobra.Command) (string, error) return "", err } - if _flagOutput == flags.PrettyOutputType && !wasEnvFlagProvided(cmd) && len(envNames) > 1 { + if _flagOutput == flags.PrettyOutputType && !wasFlagProvided(cmd, "env") && len(envNames) > 1 { return fmt.Sprintf("using %s environment\n\n", envName), nil } From 0ef3a9632763303d124df18539d21c49e8dde67f Mon Sep 17 00:00:00 2001 From: Robert Lucian Chiriac Date: Fri, 2 Apr 2021 22:31:56 +0300 Subject: [PATCH 09/12] Rename CORTEX_DEV_DEFAULT_PREDICTOR_IMAGE_REGISTRY --- CONTRIBUTING.md | 2 +- cli/cmd/lib_cluster_config.go | 2 +- cli/cmd/lib_manager.go | 2 +- dev/operator_local.sh | 2 +- manager/install.sh | 6 +++--- pkg/consts/consts.go | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ecb7014f5b..38780ef279 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -188,7 +188,7 @@ Add this to your bash profile (e.g. `~/.bash_profile`, `~/.profile` or `~/.bashr ```bash # set the default image for APIs -export CORTEX_DEV_DEFAULT_PREDICTOR_IMAGE_REGISTRY=".dkr.ecr..amazonaws.com/cortexlabs" +export CORTEX_DEV_DEFAULT_IMAGE_REGISTRY=".dkr.ecr..amazonaws.com/cortexlabs" # redirect analytics and error reporting to our dev environment export CORTEX_TELEMETRY_SENTRY_DSN="https://c334df915c014ffa93f2076769e5b334@sentry.io/1848098" diff --git a/cli/cmd/lib_cluster_config.go b/cli/cmd/lib_cluster_config.go index 6b67745744..3560a86da9 100644 --- a/cli/cmd/lib_cluster_config.go +++ b/cli/cmd/lib_cluster_config.go @@ -90,7 +90,7 @@ func getNewClusterAccessConfig(clusterConfigFile string) (*clusterconfig.AccessC func getClusterAccessConfigWithCache() (*clusterconfig.AccessConfig, error) { defaultImageRegistry := "quay.io/cortexlabs" - devDefaultImageRegistry := os.Getenv("CORTEX_DEV_DEFAULT_PREDICTOR_IMAGE_REGISTRY") + devDefaultImageRegistry := os.Getenv("CORTEX_DEV_DEFAULT_IMAGE_REGISTRY") if devDefaultImageRegistry != "" { defaultImageRegistry = devDefaultImageRegistry } diff --git a/cli/cmd/lib_manager.go b/cli/cmd/lib_manager.go index ec3de0ee14..656ab3df2f 100644 --- a/cli/cmd/lib_manager.go +++ b/cli/cmd/lib_manager.go @@ -190,7 +190,7 @@ func runManagerWithClusterConfig(entrypoint string, clusterConfig *clusterconfig "CORTEX_TELEMETRY_DISABLE=" + os.Getenv("CORTEX_TELEMETRY_DISABLE"), "CORTEX_TELEMETRY_SENTRY_DSN=" + os.Getenv("CORTEX_TELEMETRY_SENTRY_DSN"), "CORTEX_TELEMETRY_SEGMENT_WRITE_KEY=" + os.Getenv("CORTEX_TELEMETRY_SEGMENT_WRITE_KEY"), - "CORTEX_DEV_DEFAULT_PREDICTOR_IMAGE_REGISTRY=" + os.Getenv("CORTEX_DEV_DEFAULT_PREDICTOR_IMAGE_REGISTRY"), + "CORTEX_DEV_DEFAULT_IMAGE_REGISTRY=" + os.Getenv("CORTEX_DEV_DEFAULT_IMAGE_REGISTRY"), "CORTEX_CLUSTER_CONFIG_FILE=" + containerClusterConfigPath, } envs = append(envs, extraEnvs...) diff --git a/dev/operator_local.sh b/dev/operator_local.sh index acd1bce996..92fa3a2449 100755 --- a/dev/operator_local.sh +++ b/dev/operator_local.sh @@ -63,7 +63,7 @@ ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. >/dev/null && pwd)" eval $(python3 $ROOT/manager/cluster_config_env.py "$ROOT/dev/config/cluster.yaml") -export CORTEX_DEV_DEFAULT_PREDICTOR_IMAGE_REGISTRY="$CORTEX_DEV_DEFAULT_PREDICTOR_IMAGE_REGISTRY" +export CORTEX_DEV_DEFAULT_IMAGE_REGISTRY="$CORTEX_DEV_DEFAULT_IMAGE_REGISTRY" export CLUSTER_AWS_ACCESS_KEY_ID="${CLUSTER_AWS_ACCESS_KEY_ID:-$AWS_ACCESS_KEY_ID}" export CLUSTER_AWS_SECRET_ACCESS_KEY="${CLUSTER_AWS_SECRET_ACCESS_KEY:-$AWS_SECRET_ACCESS_KEY}" diff --git a/manager/install.sh b/manager/install.sh index bb73e212d7..81b788ac1a 100755 --- a/manager/install.sh +++ b/manager/install.sh @@ -214,7 +214,7 @@ function setup_configmap() { --from-literal='CORTEX_TELEMETRY_DISABLE'=$CORTEX_TELEMETRY_DISABLE \ --from-literal='CORTEX_TELEMETRY_SENTRY_DSN'=$CORTEX_TELEMETRY_SENTRY_DSN \ --from-literal='CORTEX_TELEMETRY_SEGMENT_WRITE_KEY'=$CORTEX_TELEMETRY_SEGMENT_WRITE_KEY \ - --from-literal='CORTEX_DEV_DEFAULT_PREDICTOR_IMAGE_REGISTRY'=$CORTEX_DEV_DEFAULT_PREDICTOR_IMAGE_REGISTRY \ + --from-literal='CORTEX_DEV_DEFAULT_IMAGE_REGISTRY'=$CORTEX_DEV_DEFAULT_IMAGE_REGISTRY \ -o yaml --dry-run=client | kubectl apply -f - >/dev/null } @@ -319,8 +319,8 @@ function setup_istio() { function start_pre_download_images() { registry="quay.io/cortexlabs" - if [ -n "$CORTEX_DEV_DEFAULT_PREDICTOR_IMAGE_REGISTRY" ]; then - registry="$CORTEX_DEV_DEFAULT_PREDICTOR_IMAGE_REGISTRY" + if [ -n "$CORTEX_DEV_DEFAULT_IMAGE_REGISTRY" ]; then + registry="$CORTEX_DEV_DEFAULT_IMAGE_REGISTRY" fi export CORTEX_IMAGE_PYTHON_PREDICTOR_CPU="${registry}/python-predictor-cpu:${CORTEX_VERSION}" export CORTEX_IMAGE_PYTHON_PREDICTOR_GPU="${registry}/python-predictor-gpu:${CORTEX_VERSION}-cuda10.2-cudnn8" diff --git a/pkg/consts/consts.go b/pkg/consts/consts.go index cfa73aa373..7779134d07 100644 --- a/pkg/consts/consts.go +++ b/pkg/consts/consts.go @@ -56,7 +56,7 @@ var ( ) func defaultRegistry() string { - if registryOverride := os.Getenv("CORTEX_DEV_DEFAULT_PREDICTOR_IMAGE_REGISTRY"); registryOverride != "" { + if registryOverride := os.Getenv("CORTEX_DEV_DEFAULT_IMAGE_REGISTRY"); registryOverride != "" { return registryOverride } return "quay.io/cortexlabs" From 1d6199d155032fb38815a2e4bde213c39964a565 Mon Sep 17 00:00:00 2001 From: Robert Lucian Chiriac Date: Fri, 2 Apr 2021 22:37:40 +0300 Subject: [PATCH 10/12] Small error description nit --- cli/cmd/cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/cmd/cluster.go b/cli/cmd/cluster.go index 185abfb1d7..77ecb6a9be 100644 --- a/cli/cmd/cluster.go +++ b/cli/cmd/cluster.go @@ -1016,7 +1016,7 @@ func updateNodeGroupScale(clusterConfig clusterconfig.Config, targetNg string, d } if ng.MinInstances == minReplicas && ng.MaxInstances == maxReplicas { - fmt.Printf("no changes to the %s nodegroup required in cluster %s from region %s\n", ng.Name, clusterName, region) + fmt.Printf("no changes to the %s nodegroup required in cluster %s in region %s\n", ng.Name, clusterName, region) exit.Ok() } From 1b92f6c40a96b1e6df1556e13c4f0a352512c4cc Mon Sep 17 00:00:00 2001 From: Robert Lucian Chiriac Date: Sat, 3 Apr 2021 01:54:28 +0300 Subject: [PATCH 11/12] Use DefaultRegistry function instead --- cli/cmd/lib_cluster_config.go | 10 +---- pkg/consts/consts.go | 20 +++++----- pkg/types/clusterconfig/cluster_config.go | 48 +++++++++++------------ 3 files changed, 35 insertions(+), 43 deletions(-) diff --git a/cli/cmd/lib_cluster_config.go b/cli/cmd/lib_cluster_config.go index 3560a86da9..16399c8024 100644 --- a/cli/cmd/lib_cluster_config.go +++ b/cli/cmd/lib_cluster_config.go @@ -18,7 +18,6 @@ package cmd import ( "fmt" - "os" "path" "path/filepath" "regexp" @@ -88,15 +87,8 @@ func getNewClusterAccessConfig(clusterConfigFile string) (*clusterconfig.AccessC } func getClusterAccessConfigWithCache() (*clusterconfig.AccessConfig, error) { - defaultImageRegistry := "quay.io/cortexlabs" - - devDefaultImageRegistry := os.Getenv("CORTEX_DEV_DEFAULT_IMAGE_REGISTRY") - if devDefaultImageRegistry != "" { - defaultImageRegistry = devDefaultImageRegistry - } - accessConfig := &clusterconfig.AccessConfig{ - ImageManager: defaultImageRegistry + "/manager:" + consts.CortexVersion, + ImageManager: consts.DefaultRegistry() + "/manager:" + consts.CortexVersion, } cachedPaths := existingCachedClusterConfigPaths() diff --git a/pkg/consts/consts.go b/pkg/consts/consts.go index 7779134d07..c3047c79ee 100644 --- a/pkg/consts/consts.go +++ b/pkg/consts/consts.go @@ -29,15 +29,15 @@ var ( SingleModelName = "_cortex_default" - DefaultImagePythonPredictorCPU = fmt.Sprintf("%s/python-predictor-cpu:%s", defaultRegistry(), CortexVersion) - DefaultImagePythonPredictorGPU = fmt.Sprintf("%s/python-predictor-gpu:%s-cuda10.2-cudnn8", defaultRegistry(), CortexVersion) - DefaultImagePythonPredictorInf = fmt.Sprintf("%s/python-predictor-inf:%s", defaultRegistry(), CortexVersion) - DefaultImageTensorFlowServingCPU = fmt.Sprintf("%s/tensorflow-serving-cpu:%s", defaultRegistry(), CortexVersion) - DefaultImageTensorFlowServingGPU = fmt.Sprintf("%s/tensorflow-serving-gpu:%s", defaultRegistry(), CortexVersion) - DefaultImageTensorFlowServingInf = fmt.Sprintf("%s/tensorflow-serving-inf:%s", defaultRegistry(), CortexVersion) - DefaultImageTensorFlowPredictor = fmt.Sprintf("%s/tensorflow-predictor:%s", defaultRegistry(), CortexVersion) - DefaultImageONNXPredictorCPU = fmt.Sprintf("%s/onnx-predictor-cpu:%s", defaultRegistry(), CortexVersion) - DefaultImageONNXPredictorGPU = fmt.Sprintf("%s/onnx-predictor-gpu:%s", defaultRegistry(), CortexVersion) + DefaultImagePythonPredictorCPU = fmt.Sprintf("%s/python-predictor-cpu:%s", DefaultRegistry(), CortexVersion) + DefaultImagePythonPredictorGPU = fmt.Sprintf("%s/python-predictor-gpu:%s-cuda10.2-cudnn8", DefaultRegistry(), CortexVersion) + DefaultImagePythonPredictorInf = fmt.Sprintf("%s/python-predictor-inf:%s", DefaultRegistry(), CortexVersion) + DefaultImageTensorFlowServingCPU = fmt.Sprintf("%s/tensorflow-serving-cpu:%s", DefaultRegistry(), CortexVersion) + DefaultImageTensorFlowServingGPU = fmt.Sprintf("%s/tensorflow-serving-gpu:%s", DefaultRegistry(), CortexVersion) + DefaultImageTensorFlowServingInf = fmt.Sprintf("%s/tensorflow-serving-inf:%s", DefaultRegistry(), CortexVersion) + DefaultImageTensorFlowPredictor = fmt.Sprintf("%s/tensorflow-predictor:%s", DefaultRegistry(), CortexVersion) + DefaultImageONNXPredictorCPU = fmt.Sprintf("%s/onnx-predictor-cpu:%s", DefaultRegistry(), CortexVersion) + DefaultImageONNXPredictorGPU = fmt.Sprintf("%s/onnx-predictor-gpu:%s", DefaultRegistry(), CortexVersion) DefaultImagePathsSet = strset.New( DefaultImagePythonPredictorCPU, DefaultImagePythonPredictorGPU, @@ -55,7 +55,7 @@ var ( AuthHeader = "X-Cortex-Authorization" ) -func defaultRegistry() string { +func DefaultRegistry() string { if registryOverride := os.Getenv("CORTEX_DEV_DEFAULT_IMAGE_REGISTRY"); registryOverride != "" { return registryOverride } diff --git a/pkg/types/clusterconfig/cluster_config.go b/pkg/types/clusterconfig/cluster_config.go index 088aef1000..5d54fa34d1 100644 --- a/pkg/types/clusterconfig/cluster_config.go +++ b/pkg/types/clusterconfig/cluster_config.go @@ -267,161 +267,161 @@ var CoreConfigStructFieldValidations = []*cr.StructFieldValidation{ { StructField: "ImageOperator", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/operator:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/operator:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImageManager", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/manager:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/manager:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImageDownloader", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/downloader:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/downloader:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImageRequestMonitor", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/request-monitor:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/request-monitor:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImageAsyncGateway", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/async-gateway:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/async-gateway:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImageClusterAutoscaler", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/cluster-autoscaler:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/cluster-autoscaler:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImageMetricsServer", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/metrics-server:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/metrics-server:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImageInferentia", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/inferentia:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/inferentia:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImageNeuronRTD", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/neuron-rtd:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/neuron-rtd:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImageNvidia", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/nvidia:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/nvidia:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImageFluentBit", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/fluent-bit:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/fluent-bit:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImageIstioProxy", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/istio-proxy:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/istio-proxy:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImageIstioPilot", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/istio-pilot:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/istio-pilot:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImagePrometheus", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/prometheus:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/prometheus:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImagePrometheusConfigReloader", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/prometheus-config-reloader:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/prometheus-config-reloader:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImagePrometheusOperator", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/prometheus-operator:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/prometheus-operator:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImagePrometheusStatsDExporter", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/prometheus-statsd-exporter:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/prometheus-statsd-exporter:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImagePrometheusDCGMExporter", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/prometheus-dcgm-exporter:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/prometheus-dcgm-exporter:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImagePrometheusKubeStateMetrics", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/prometheus-kube-state-metrics:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/prometheus-kube-state-metrics:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImagePrometheusNodeExporter", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/prometheus-node-exporter:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/prometheus-node-exporter:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImageKubeRBACProxy", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/kube-rbac-proxy:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/kube-rbac-proxy:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImageGrafana", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/grafana:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/grafana:" + consts.CortexVersion, Validator: validateImageVersion, }, }, { StructField: "ImageEventExporter", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/event-exporter:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/event-exporter:" + consts.CortexVersion, Validator: validateImageVersion, }, }, @@ -725,7 +725,7 @@ var AccessValidation = &cr.StructValidation{ { StructField: "ImageManager", StringValidation: &cr.StringValidation{ - Default: "quay.io/cortexlabs/manager:" + consts.CortexVersion, + Default: consts.DefaultRegistry() + "/manager:" + consts.CortexVersion, Validator: validateImageVersion, }, }, From ff8b36973fbe358f7ae00bc559c966cb21c5294a Mon Sep 17 00:00:00 2001 From: Robert Lucian Chiriac Date: Sat, 3 Apr 2021 01:59:56 +0300 Subject: [PATCH 12/12] Address PR nits --- cli/cmd/cluster.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cli/cmd/cluster.go b/cli/cmd/cluster.go index 77ecb6a9be..8aa4ea44c2 100644 --- a/cli/cmd/cluster.go +++ b/cli/cmd/cluster.go @@ -314,7 +314,7 @@ var _clusterScaleCmd = &cobra.Command{ scaleMaxInstances = pointer.Int64(_flagClusterScaleMaxInstances) } if scaleMinIntances == nil && scaleMaxInstances == nil { - exit.Error(ErrorSpecifyAtLeastOneFlag("min-instances", "max-instances")) + exit.Error(ErrorSpecifyAtLeastOneFlag("--min-instances", "--max-instances")) } if _, err := docker.GetDockerClient(); err != nil { @@ -1016,7 +1016,7 @@ func updateNodeGroupScale(clusterConfig clusterconfig.Config, targetNg string, d } if ng.MinInstances == minReplicas && ng.MaxInstances == maxReplicas { - fmt.Printf("no changes to the %s nodegroup required in cluster %s in region %s\n", ng.Name, clusterName, region) + fmt.Printf("the %s nodegroup in the %s cluster in %s already has min instances set to %d and max instances set to %d\n", ng.Name, clusterName, region, minReplicas, maxReplicas) exit.Ok() }