From 9461ee62077bcf1c364dd2bd74dbb7db1c40e7a6 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 2 Apr 2025 12:37:04 -0400 Subject: [PATCH 01/27] Fix get_service_hosts() Signed-off-by: Webster Mudge --- plugins/module_utils/service_utils.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/plugins/module_utils/service_utils.py b/plugins/module_utils/service_utils.py index 1a920f68..e41356e7 100644 --- a/plugins/module_utils/service_utils.py +++ b/plugins/module_utils/service_utils.py @@ -30,9 +30,11 @@ from cm_client import ( ApiClient, ApiConfig, + ApiHost, ApiService, ApiServiceConfig, ClustersResourceApi, + HostsResourceApi, MgmtServiceResourceApi, MgmtRoleConfigGroupsResourceApi, MgmtRolesResourceApi, @@ -190,9 +192,19 @@ def changed(self) -> bool: return bool(self.config.items) -def get_service_hosts(api_client: ApiClient, service: ApiService): - return ( - ClustersResourceApi(api_client) - .list_hosts(cluster_name=service.cluster_ref.cluster_name) +def get_service_hosts(api_client: ApiClient, service: ApiService) -> list[ApiHost]: + host_api = HostsResourceApi(api_client) + seen_hosts = dict() + + for r in ( + RolesResourceApi(api_client) + .read_roles( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + ) .items - ) + ): + if r.host_ref.hostname not in seen_hosts: + seen_hosts[r.host_ref.hostname] = host_api.read_host(r.host_ref.host_id) + + return seen_hosts.values() From 2835f0f89581609d4a2ccd86d213d1074f2210a4 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 2 Apr 2025 14:54:29 -0400 Subject: [PATCH 02/27] Add try/catch for deregistering non-existent services Signed-off-by: Webster Mudge --- tests/unit/__init__.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 9608ef8a..64e21bef 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -217,10 +217,14 @@ def deregister_service(api_client: ApiClient, registry: list[ApiService]) -> Non # Delete the services for s in registry: - service_api.delete_service( - cluster_name=s.cluster_ref.cluster_name, - service_name=s.name, - ) + try: + service_api.delete_service( + cluster_name=s.cluster_ref.cluster_name, + service_name=s.name, + ) + except ApiException as e: + if e.status != 404: + raise e def register_role( From e0c2bd924eb6ef79e06a92b1dbff9b0759248081 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 7 Apr 2025 15:02:23 -0400 Subject: [PATCH 03/27] Add utilities for creating models, provisioning, and updating role config groups Update the get_base_role_config_group to retrieve base groups for all types Signed-off-by: Webster Mudge --- .../module_utils/role_config_group_utils.py | 118 +++++++++++++++--- 1 file changed, 104 insertions(+), 14 deletions(-) diff --git a/plugins/module_utils/role_config_group_utils.py b/plugins/module_utils/role_config_group_utils.py index 99471f7f..5210c5e1 100644 --- a/plugins/module_utils/role_config_group_utils.py +++ b/plugins/module_utils/role_config_group_utils.py @@ -14,12 +14,20 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( normalize_output, + ConfigListUpdates, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + InvalidRoleTypeException, ) from cm_client import ( ApiClient, + ApiConfig, + ApiConfigList, ApiRoleConfigGroup, + ApiRoleConfigGroupList, RoleConfigGroupsResourceApi, + ServicesResourceApi, MgmtRoleConfigGroupsResourceApi, ) @@ -55,23 +63,105 @@ def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dic return output -def get_base_role_config_group( - api_client: ApiClient, cluster_name: str, service_name: str, role_type: str +def create_role_config_group( + api_client: ApiClient, + cluster_name: str, + service_name: str, + name: str, + role_type: str, + display_name: str = None, + config: dict = None, ) -> ApiRoleConfigGroup: - rcg_api = RoleConfigGroupsResourceApi(api_client) - return next( - iter( - [ - r - for r in rcg_api.read_role_config_groups( - cluster_name, service_name - ).items - if r.role_type == role_type and r.base - ] - ), - None, + if ( + role_type.upper() + not in ServicesResourceApi(api_client) + .list_role_types( + cluster_name=cluster_name, + service_name=service_name, + ) + .items + ): + raise InvalidRoleTypeException( + f"Invalid role type '{role_type}' for service '{service_name}'" + ) + + role_config_group = ApiRoleConfigGroup( + name=name, + role_type=role_type.upper(), ) + if display_name: + role_config_group.display_name = display_name + + if config: + role_config_group.config = ApiConfigList( + items=[ApiConfig(name=k, value=v) for k, v in config.items()] + ) + + return role_config_group + + +def provision_role_config_groups( + api_client: ApiClient, + cluster_name: str, + service_name: str, + role_config_groups: list[ApiRoleConfigGroup], +) -> ApiRoleConfigGroup: + return RoleConfigGroupsResourceApi(api_client).create_role_config_groups( + cluster_name=cluster_name, + service_name=service_name, + body=ApiRoleConfigGroupList(items=role_config_groups), + ) + + +def update_role_config_group( + role_config_group: ApiRoleConfigGroup, + display_name: str = None, + config: dict = None, + purge: bool = False, +) -> tuple[ApiRoleConfigGroup, dict, dict]: + before, after = dict(), dict() + + # Check for display name changes + if display_name is not None and display_name != role_config_group.display_name: + before.update(display_name=role_config_group.display_name) + after.update(display_name=display_name) + role_config_group.display_name = display_name + + # Reconcile configurations + if config or purge: + if config is None: + config = dict() + + updates = ConfigListUpdates(role_config_group.config, config, purge) + + if updates.changed: + before.update(config=updates.diff["before"]) + after.update(config=updates.diff["after"]) + role_config_group.config = updates.config + + return (role_config_group, before, after) + + +# TODO Normalize the return value to be a list +def get_base_role_config_group( + api_client: ApiClient, cluster_name: str, service_name: str, role_type: str = None +) -> ApiRoleConfigGroup: + base_rcg_list = [ + r + for r in RoleConfigGroupsResourceApi(api_client) + .read_role_config_groups( + cluster_name=cluster_name, + service_name=service_name, + ) + .items + if (r.base and role_type is None) or (r.base and r.role_type == role_type) + ] + if role_type is not None: + return next(iter(base_rcg_list), None) + else: + return base_rcg_list + def get_mgmt_base_role_config_group( api_client: ApiClient, role_type: str From de0f09b7c69206b1ca933bc744492face965b038 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 7 Apr 2025 15:03:09 -0400 Subject: [PATCH 04/27] Add check for invalid role type when creating a role model Signed-off-by: Webster Mudge --- plugins/module_utils/role_utils.py | 41 +++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/plugins/module_utils/role_utils.py b/plugins/module_utils/role_utils.py index 07de2872..acd99128 100644 --- a/plugins/module_utils/role_utils.py +++ b/plugins/module_utils/role_utils.py @@ -33,6 +33,7 @@ ApiRoleConfigGroupRef, ApiRoleNameList, ApiRoleState, + ServicesResourceApi, RoleCommandsResourceApi, RoleConfigGroupsResourceApi, RolesResourceApi, @@ -47,6 +48,22 @@ class RoleException(Exception): pass +class HostNotFoundException(RoleException): + pass + + +class RoleConfigGroupNotFoundException(RoleException): + pass + + +class MaintenanceStateException(RoleException): + pass + + +class InvalidRoleTypeException(RoleException): + pass + + ROLE_OUTPUT = [ "commission_state", "config_staleness_status", @@ -214,14 +231,6 @@ def read_cm_roles(api_client: ApiClient) -> ApiRoleList: return ApiRoleList(items=roles) -class HostNotFoundException(RoleException): - pass - - -class RoleConfigGroupNotFoundException(RoleException): - pass - - def create_role( api_client: ApiClient, role_type: str, @@ -233,6 +242,18 @@ def create_role( role_config_group: str = None, tags: dict = None, ) -> ApiRole: + if ( + role_type.upper() + not in ServicesResourceApi(api_client) + .list_role_types( + cluster_name=cluster_name, + service_name=service_name, + ) + .items + ): + raise InvalidRoleTypeException( + f"Invalid role type '{role_type}' for service '{service_name}'" + ) # Set up the role type role = ApiRole(type=str(role_type).upper()) @@ -315,10 +336,6 @@ def provision_service_role( raise RoleException(str(e)) -class MaintenanceStateException(RoleException): - pass - - def toggle_role_maintenance( api_client: ApiClient, role: ApiRole, maintenance: bool, check_mode: bool ) -> bool: From edd55cc35faa6e66bf4cce775b11e3e71d89c1fb Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 7 Apr 2025 15:04:03 -0400 Subject: [PATCH 05/27] Add utilities for creating models, provisioning, and reconciing role config groups for a service Signed-off-by: Webster Mudge --- plugins/module_utils/service_utils.py | 358 +++++++++++++++++++++++++- 1 file changed, 346 insertions(+), 12 deletions(-) diff --git a/plugins/module_utils/service_utils.py b/plugins/module_utils/service_utils.py index e41356e7..9cb739d6 100644 --- a/plugins/module_utils/service_utils.py +++ b/plugins/module_utils/service_utils.py @@ -19,20 +19,33 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( normalize_output, resolve_parameter_updates, + wait_command, + wait_commands, ) from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + create_role_config_group, parse_role_config_group_result, + update_role_config_group, ) from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( parse_role_result, + InvalidRoleTypeException, ) from cm_client import ( ApiClient, ApiConfig, + ApiConfigList, + ApiEntityTag, ApiHost, + ApiRole, + ApiRoleConfigGroup, + ApiRoleConfigGroupList, + ApiRoleNameList, ApiService, ApiServiceConfig, + ApiServiceList, + ApiServiceState, ClustersResourceApi, HostsResourceApi, MgmtServiceResourceApi, @@ -60,6 +73,18 @@ ] +class ServiceException(Exception): + pass + + +class ServiceMaintenanceStateException(ServiceException): + pass + + +class InvalidServiceTypeException(ServiceException): + pass + + def parse_service_result(service: ApiService) -> dict: # Retrieve only the cluster_name if it exists if service.cluster_ref is not None: @@ -118,24 +143,191 @@ def read_service( ) # Gather each role config group configuration - for rcg in service.role_config_groups: - rcg.config = rcg_api.read_config( - cluster_name=cluster_name, - service_name=service_name, - role_config_group_name=rcg.name, - ) + service.role_config_groups = rcg_api.read_role_config_groups( + cluster_name=cluster_name, + service_name=service_name, + ).items # Gather each role configuration - for role in service.roles: - role.config = role_api.read_role_config( - cluster_name=cluster_name, - service_name=service_name, - role_name=role.name, - ) + if service.roles is not None: + for role in service.roles: + role.config = role_api.read_role_config( + cluster_name=cluster_name, + service_name=service_name, + role_name=role.name, + ) + else: + service.roles = list() return service +def create_service( + api_client: ApiClient, + name: str, + type: str, + cluster_name: str, + display_name: str = None, + config: dict = None, + tags: dict = None, + # role_config_groups: list[ApiRoleConfigGroup] = None, + # roles: list[ApiRole] = None, +) -> ApiService: + if ( + type.upper() + not in ClustersResourceApi(api_client) + .list_service_types( + cluster_name=cluster_name, + ) + .items + ): + raise InvalidServiceTypeException( + f"Invalid service type '{type}' for cluster '{cluster_name}'" + ) + + # Set up the service basics + service = ApiService(name=name, type=str(type).upper()) + + if display_name: + service.display_name = display_name + + # Service-wide configurations + if config: + service.config = ApiConfigList( + items=[ApiConfig(name=k, value=v) for k, v in config.items()] + ) + + # Tags + if tags: + service.tags = [ApiEntityTag(k, v) for k, v in tags.items()] + + # # Role config groups + # # TODO Use a role_config_group utility to marshal the ApiRoleConfigGroup list + # # Keep the incoming type, but use it to create another via the utility call + # # This includes passing in the role type as an external reference + # if role_config_groups: + # available_types = ServicesResourceApi(api_client).list_role_types( + # cluster_name=cluster_name, + # service_name=name, + # ).items + + # for rcg in role_config_groups: + # if rcg.role_type not in available_types: + # raise InvalidRoleType("Unable to find role type: " + rcg.role_type) + + # service.role_config_groups = role_config_groups + + # # Roles + # # TODO Use the create_role() utility to marshal the ApiRole list + # # Keep the incoming ApiRole type, but use it to create another via the utility call + # # Need to pass in the role types and role config groups as external references (the latter because they + # # might be defined within the service) + # # For the former, the reference replaces an inline lookup. For the latter, the reference is a initial + # # lookup and then a fallback to the inline lookup + # # This might not work, as the references might fail because the service is not yet available... or + # # break up the provisioning flow to spin up an initial, "core" service, then have additional utility + # # calls to spin up RCG and roles, which then would be able to have the inline lookups (still would need + # # the to-be reference list for RCGs, however). + # if roles: + # pass + + return service + + +def provision_service( + api_client: ApiClient, cluster_name: str, service: ApiService +) -> ApiService: + service_api = ServicesResourceApi(api_client) + + provisioned_service = next( + ( + iter( + service_api.create_services( + cluster_name=cluster_name, + body=ApiServiceList(items=[service]), + ).items + ) + ), + None, + ) + + if provisioned_service is None: + return + + # Wait for any running commands like First Run + available_cmds = service_api.list_service_commands( + cluster_name=cluster_name, + service_name=provisioned_service.name, + ) + + running_cmds = service_api.list_active_commands( + cluster_name=cluster_name, + service_name=provisioned_service.name, + ) + + try: + wait_commands(api_client=api_client, commands=running_cmds) + return provisioned_service + except Exception as e: + raise ServiceException(str(e)) + + +def toggle_service_maintenance( + api_client: ApiClient, service: ApiService, maintenance: bool, check_mode: bool +) -> bool: + service_api = ServicesResourceApi(api_client) + changed = False + + if maintenance and not service.maintenance_mode: + changed = True + cmd = service_api.enter_maintenance_mode + elif not maintenance and service.maintenance_mode: + changed = True + cmd = service_api.exit_maintenance_mode + + if not check_mode and changed: + maintenance_cmd = cmd( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + ) + + if maintenance_cmd.success is False: + raise ServiceMaintenanceStateException( + f"Unable to set Maintenance mode to '{maintenance}': {maintenance_cmd.result_message}" + ) + + return changed + + +def toggle_service_state( + api_client: ApiClient, service: ApiService, state: str, check_mode: bool +) -> ApiServiceState: + service_api = ServicesResourceApi(api_client) + changed = None + + if state == "started" and service.service_state not in [ApiServiceState.STARTED]: + changed = ApiServiceState.STARTED + cmd = service_api.start_command + elif state == "stopped" and service.service_state not in [ + ApiServiceState.STOPPED, + ApiServiceState.NA, + ]: + changed = ApiServiceState.STOPPED + cmd = service_api.stop_command + elif state == "restarted": + changed = ApiServiceState.STARTED + cmd = service_api.restart_command + + if not check_mode and changed: + exec_cmd = cmd( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + ) + wait_command(api_client=api_client, command=exec_cmd) + + return changed + + def read_cm_service(api_client: ApiClient) -> ApiService: """Read the Cloudera Manager service and its role config group and role dependents. @@ -208,3 +400,145 @@ def get_service_hosts(api_client: ApiClient, service: ApiService) -> list[ApiHos seen_hosts[r.host_ref.hostname] = host_api.read_host(r.host_ref.host_id) return seen_hosts.values() + + +def reconcile_service_role_config_groups( + api_client: ApiClient, + service: ApiService, + role_config_groups: list[dict], + purge: bool, + check_mode: bool, +) -> tuple[dict, dict]: + # Map the current role config groups by name and by base role type + base_rcg_map, rcg_map = dict(), dict() + for rcg in service.role_config_groups: + if rcg.base: + base_rcg_map[rcg.role_type] = rcg + else: + rcg_map[rcg.name] = rcg + + addition_list = list[ApiRoleConfigGroup]() + diff_before, diff_after = list[dict](), list[dict]() + + rcg_api = RoleConfigGroupsResourceApi(api_client) + + for incoming_rcg in role_config_groups: + incoming_name = incoming_rcg["name"] + + # If it's a custom role config group + if incoming_name is not None: + # If the custom role config group exists, update it + current_rcg = rcg_map.pop(incoming_name, None) + if current_rcg is not None: + (updated_rcg, before, after) = update_role_config_group( + role_config_group=current_rcg, + display_name=incoming_rcg["display_name"], + config=incoming_rcg["config"], + purge=purge, + ) + + if before or after: + diff_before.append(current_rcg.to_dict()) + diff_after.append(updated_rcg.to_dict()) + + if not check_mode: + rcg_api.update_role_config_group( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=current_rcg.name, + body=updated_rcg, + ) + + # Else create the new custom role config group + else: + created_rcg = create_role_config_group( + api_client=api_client, + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_type=incoming_rcg["role_type"], + display_name=incoming_rcg["display_name"], + config=incoming_rcg["config"], + ) + diff_before.append(dict()) + diff_after.append(created_rcg.to_dict()) + addition_list(created_rcg) + + # Else it's a base role config group + else: + current_rcg = base_rcg_map.pop(incoming_rcg["role_type"]) + (updated_rcg, before, after) = update_role_config_group( + role_config_group=current_rcg, + display_name=incoming_rcg["display_name"], + config=incoming_rcg["config"], + purge=purge, + ) + + if before or after: + diff_before.append(current_rcg.to_dict()) + diff_after.append(updated_rcg.to_dict()) + + if not check_mode: + rcg_api.update_role_config_group( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=current_rcg.name, + body=updated_rcg, + ) + + # Process role config group additions + if addition_list: + if not check_mode: + rcg_api.create_role_config_groups( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + body=ApiRoleConfigGroupList(items=addition_list), + ) + + # Process role config group deletions if purge is set + if purge: + # Reset any remaining base role config groups + for current_rcg in base_rcg_map.values(): + (updated_rcg, before, after) = update_role_config_group( + role_config_group=current_rcg, + purge=purge, + ) + + if before or after: + diff_before.append(current_rcg.to_dict()) + diff_after.append(updated_rcg.to_dict()) + + if not check_mode: + rcg_api.update_role_config_group( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=current_rcg.name, + body=updated_rcg, + ) + + # Reset to base and remove any remaining custom role config groups + for current_rcg in rcg_map.values(): + diff_before.append(current_rcg.to_dict()) + diff_after.append(dict()) + + existing_roles = rcg_api.read_roles( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=current_rcg.name, + ).items + + if existing_roles: + if not check_mode: + rcg_api.move_roles_to_base_group( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + body=ApiRoleNameList(items=[e.name for e in existing_roles]), + ) + + if not check_mode: + rcg_api.delete_role_config_group( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=current_rcg.name, + ) + + return (diff_before, diff_after) From 33e0634ab7fae4c38e1e5cbdb059cb196f155bf8 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 7 Apr 2025 15:06:01 -0400 Subject: [PATCH 06/27] Update service module to handle service-wide configuration and core fields. Update service module to provision and reconcile role config groups. Update service module to handle maintenance mode. Signed-off-by: Webster Mudge --- plugins/modules/service.py | 473 +++++++++++++++++++++++--------- plugins/modules/service_role.py | 12 +- 2 files changed, 343 insertions(+), 142 deletions(-) diff --git a/plugins/modules/service.py b/plugins/modules/service.py index ff373a7f..6965d312 100644 --- a/plugins/modules/service.py +++ b/plugins/modules/service.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,8 +22,6 @@ - Manage a service in a cluster. author: - "Webster Mudge (@wmudge)" -requirements: - - cm-client options: cluster: description: @@ -90,6 +88,10 @@ support: full platform: platforms: all +requirements: + - cm-client +seealso: + - module: cloudera.cluster.service_info """ EXAMPLES = r""" @@ -293,23 +295,48 @@ returned: when supported """ -from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - ClouderaManagerMutableModule, - resolve_tag_updates, -) -from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( - parse_service_result, -) - from cm_client import ( ApiEntityTag, + ApiRoleConfigGroup, + ApiRoleConfigGroupList, + ApiRoleNameList, ApiService, ApiServiceList, ClustersResourceApi, + RoleConfigGroupsResourceApi, ServicesResourceApi, ) from cm_client.rest import ApiException +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + resolve_tag_updates, + ConfigListUpdates, + TagUpdates, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + create_role_config_group, + get_base_role_config_group, + provision_role_config_groups, + update_role_config_group, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, + provision_service_role, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + create_service, + parse_service_result, + provision_service, + read_service, + reconcile_service_role_config_groups, + toggle_service_maintenance, + toggle_service_state, + ServiceMaintenanceStateException, +) + class ClusterService(ClouderaManagerMutableModule): def __init__(self, module): @@ -317,13 +344,16 @@ def __init__(self, module): # Set the parameters self.cluster = self.get_param("cluster") - self.service = self.get_param("service") - self.maintenance = self.get_param("maintenance") + self.name = self.get_param("name") self.display_name = self.get_param("display_name") - self.tags = self.get_param("tags") self.type = self.get_param("type") - self.state = self.get_param("state") + self.maintenance = self.get_param("maintenance") self.purge = self.get_param("purge") + self.config = self.get_param("config") + self.tags = self.get_param("tags") + self.roles = self.get_param("roles") + self.role_config_groups = self.get_param("role_config_groups") + self.state = self.get_param("state") # Initialize the return values self.changed = False @@ -343,192 +373,363 @@ def process(self): else: raise ex - api_instance = ServicesResourceApi(self.api_client) - existing = None + service_api = ServicesResourceApi(self.api_client) + current = None + # Try and retrieve the service by name try: - existing = api_instance.read_service( - self.cluster, self.service, view="full" + current = read_service( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=self.name, ) except ApiException as ex: if ex.status != 404: raise ex if self.state == "absent": - if existing: - api_instance.delete_service(self.cluster, self.service) + if current: + self.changed = True - elif self.state in ["present", "started", "stopped"]: - if existing: + if self.module._diff: + self.diff = dict(before=parse_service_result(current), after=dict()) - # Handle maintenance mode - if ( - self.maintenance is not None - and self.maintenance != existing.maintenance_mode - ): - self.changed = True + if not self.module.check_mode: + service_api.delete_service(self.cluster, self.name) - if self.module._diff: - self.diff["before"].update( - maintenance_mode=existing.maintenance_mode + elif self.state in ["present", "restarted", "started", "stopped"]: + # If it is a new service + if not current: + self.changed = True + + if self.type is None: + self.module.fail_json(msg=f"missing required arguments: type") + + # Create and provision the service + service = create_service( + api_client=self.api_client, + name=self.name, + type=self.type, + cluster_name=self.cluster, + display_name=self.display_name, + config=self.config, + tags=self.tags, + # role_config_groups=self.role_config_groups, + # roles=self.roles, + ) + + if self.module._diff: + self.diff = dict( + before={}, + after=service.to_dict(), + ) + + if not self.module.check_mode: + current = provision_service( + api_client=self.api_client, + cluster_name=self.cluster, + service=service, + ) + + if not current: + self.module.fail_json( + msg="Unable to create new service", + service=to_native(service.to_dict()), ) - self.diff["after"].update(maintenance_mode=self.maintenance) - if not self.module.check_mode: - if self.maintenance: - maintenance_cmd = api_instance.enter_maintenance_mode( - self.cluster, self.service + # Create and provision the role config groups + if self.role_config_groups: + rcg_list = list() + base_rcg = None + + if self.module._diff: + before_list, after_list = list(), list() + + for requested_rcg in self.role_config_groups: + # Create any custom role config groups + if requested_rcg["name"] is not None: + custom_rcg = create_role_config_group( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=current.name, + name=requested_rcg["name"], + role_type=requested_rcg["role_type"], + display_name=requested_rcg.get("display_name", None), + config=requested_rcg.get("config", None), ) + + rcg_list.append(custom_rcg) + + if self.module._diff: + before_list.append(dict()) + after_list.append(custom_rcg.to_dict()) + + # Else record the base role config group for modification else: - maintenance_cmd = api_instance.exit_maintenance_mode( - self.cluster, self.service + current_base_rcg = get_base_role_config_group( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=current.name, + role_type=requested_rcg["role_type"], + ) + + (base_rcg, before, after) = update_role_config_group( + role_config_group=current_base_rcg, + display_name=requested_rcg.get("display_name", None), + config=requested_rcg.get("config", None), + purge=self.purge, ) - if maintenance_cmd.success is False: - self.module.fail_json( - msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}" + if self.module._diff: + before_list.append(before) + after_list.append(after) + + if self.module._diff: + self.diff["before"]["role_config_groups"] = before_list + self.diff["after"]["role_config_groups"] = after_list + + if not self.module.check_mode: + provision_role_config_groups( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=current.name, + role_config_groups=rcg_list, + ) + + if base_rcg is not None: + RoleConfigGroupsResourceApi( + self.api_client + ).update_role_config_group( + cluster_name=self.cluster, + service_name=current.name, + role_config_group_name=base_rcg.name, + message=self.message, + body=base_rcg, ) - # Tags - if self.tags: - (delta_add, delta_del) = resolve_tag_updates( - {t.name: t.value for t in existing.tags}, self.tags, self.purge + self.handle_maintenance(current) + + # Else the service exists, so address any changes + else: + if self.type and self.type.upper() != current.type: + self.module.fail_json( + msg="Service name already in use for type: " + current.type ) - if delta_add or delta_del: + self.handle_maintenance(current) + + # Handle service-wide configurations + if self.config or self.purge: + if self.config is None: + self.config = dict() + + config_updates = ConfigListUpdates( + current.config, self.config, self.purge + ) + + if config_updates.changed: self.changed = True if self.module._diff: - self.diff["before"].update(tags=delta_del) - self.diff["after"].update(tags=delta_add) + self.diff["before"].update( + config=config_updates.diff["before"] + ) + self.diff["after"].update( + config=config_updates.diff["after"] + ) if not self.module.check_mode: - if delta_del: - api_instance.delete_tags( - self.cluster, - self.service, - body=[ - ApiEntityTag(k, v) for k, v in delta_del.items() - ], - ) - if delta_add: - api_instance.add_tags( - self.cluster, - self.service, - body=[ - ApiEntityTag(k, v) for k, v in delta_add.items() - ], - ) + service_api.update_service_config( + cluster_name=self.cluster, + service_name=self.service, + message=self.message, + body=config_updates.config, + ) + + # Handle tags + if self.tags or self.purge: + if self.tags is None: + self.tags = dict() + + tag_updates = TagUpdates(current.tags, self.tags, self.purge) + + if tag_updates.changed: + self.changed = True - # TODO Config + if self.module._diff: + self.diff["before"].update(tags=tag_updates.diff["before"]) + self.diff["after"].update(tags=tag_updates.diff["after"]) - # Service details - # Currently, only display_name - delta = dict() + if not self.module.check_mode: + if tag_updates.deletions: + service_api.delete_tags( + cluster_name=self.cluster, + service_name=self.name, + body=tag_updates.deletions, + ) - if self.display_name and self.display_name != existing.display_name: - delta.update(display_name=self.display_name) + if tag_updates.additions: + service_api.add_tags( + cluster_name=self.cluster, + service_name=self.name, + body=tag_updates.additions, + ) - if delta: + # Handle service details (currently, only display_name) + if self.display_name and self.display_name != current.display_name: self.changed = True + current.display_name = self.display_name if self.module._diff: - self.diff["before"].update(display_name=existing.display_name) + self.diff["before"].update(display_name=current.display_name) self.diff["after"].update(display_name=self.display_name) if not self.module.check_mode: - api_instance.update_service( - self.cluster, self.service, body=ApiService(**delta) + service_api.update_service( + cluster_name=self.cluster, + service_name=self.name, + body=current, ) - if self.state == "started" and existing.service_state != "STARTED": - self.changed = True + # Handle roles - if self.module._diff: - self.diff["before"].update(service_state=existing.service_state) - self.diff["after"].update(service_state="STARTED") + # Handle role config groups + if self.role_config_groups or self.purge: + if self.role_config_groups is None: + self.role_config_groups = list() - if not self.module.check_mode: - if existing.service_state == "NA": - self.wait_command( - api_instance.first_run(self.cluster, self.service) - ) - else: - self.wait_command( - api_instance.start_command(self.cluster, self.service) - ) + # Then call the utility... + (before_rcg, after_rcg) = reconcile_service_role_config_groups( + api_client=self.api_client, + service=current, + role_config_groups=self.role_config_groups, + purge=self.purge, + check_mode=self.module.check_mode, + ) - elif self.state == "stopped" and existing.service_state not in [ - "STOPPED", - "NA", - ]: - self.changed = True + if before_rcg or after_rcg: + self.changed = True + if self.module._diff: + self.diff["before"].update(role_config_groups=before_rcg) + self.diff["after"].update(role_config_groups=after_rcg) + + # Handle state changes + # if not self.module.check_mode: + # service_api.create_services(self.cluster, body=service_list) + + # if self.state == "started": + # self.wait_command( + # service_api.first_run(self.cluster, self.name) + # ) + + # self.output = parse_service_result( + # service_api.read_service(self.cluster, self.name, view="full") + # ) + if self.state == "started" and current.service_state != "STARTED": + self.changed = True - if self.module._diff: - self.diff["before"].update(service_state=existing.service_state) - self.diff["after"].update(service_state="STOPPED") + if self.module._diff: + self.diff["before"].update(service_state=current.service_state) + self.diff["after"].update(service_state="STARTED") - if not self.module.check_mode: + if not self.module.check_mode: + if current.service_state == "NA": self.wait_command( - api_instance.stop_command(self.cluster, self.service) + service_api.first_run(self.cluster, self.name) ) - - if self.changed: - self.output = parse_service_result( - api_instance.read_service( - self.cluster, self.service, view="full" + else: + self.wait_command( + service_api.start_command(self.cluster, self.name) ) - ) - else: - self.output = parse_service_result(existing) - else: - - # Service doesn't exist - if self.type is None: - self.module.fail_json( - msg=f"Service does not exist, missing required arguments: type" - ) - - payload = dict(name=self.service, type=str(self.type).upper()) - - if self.display_name: - payload.update(display_name=self.display_name) - - service_list = ApiServiceList([ApiService(**payload)]) + elif self.state == "stopped" and current.service_state not in [ + "STOPPED", + "NA", + ]: self.changed = True if self.module._diff: - self.diff = dict( - before={}, - after=payload, - ) + self.diff["before"].update(service_state=current.service_state) + self.diff["after"].update(service_state="STOPPED") if not self.module.check_mode: - api_instance.create_services(self.cluster, body=service_list) - - if self.state == "started": - self.wait_command( - api_instance.first_run(self.cluster, self.service) - ) + self.wait_command(service_api.stop_command(self.cluster, self.name)) + if self.changed: self.output = parse_service_result( - api_instance.read_service(self.cluster, self.service, view="full") + read_service( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=self.name, + ) ) + else: + self.output = parse_service_result(current) else: self.module.fail_json(msg=f"Invalid state: {self.state}") + def handle_maintenance(self, service: ApiService) -> None: + if self.maintenance is not None: + try: + state_changed = toggle_service_maintenance( + api_client=self.api_client, + service=service, + maintenance=self.maintenance, + check_mode=self.module.check_mode, + ) + except ServiceMaintenanceStateException as ex: + self.module.fail_json(msg=to_native(ex)) + + if state_changed: + self.changed = True + if self.module._diff: + self.diff["before"].update( + maintenance_mode=service.maintenance_mode + ) + self.diff["after"].update(maintenance_mode=self.maintenance) + def main(): module = ClouderaManagerMutableModule.ansible_module( argument_spec=dict( cluster=dict(required=True, aliases=["cluster_name"]), - service=dict(required=True, aliases=["service_name", "name"]), - maintenance=dict(type="bool", aliases=["maintenance_mode"]), + name=dict(required=True, aliases=["service_name", "service"]), display_name=dict(), - tags=dict(type=dict), - purge=dict(type="bool", default=False), type=dict(aliases=["service_type"]), + # version=dict(), + maintenance=dict(type="bool", aliases=["maintenance_mode"]), + purge=dict(type="bool", default=False), + config=dict(type="dict", aliases=["service_wide_config"]), + tags=dict(type="dict"), + roles=dict( + type="list", + elements="dict", + options=dict( + type=dict(aliases=["role_type"]), + hostnames=dict( + type="list", + elements="str", + aliases=["cluster_hosts", "cluster_hostnames"], + ), + host_ids=dict( + type="list", elements="str", aliases=["cluster_host_ids"] + ), + maintenance=dict(type="bool", aliases=["maintenance_mode"]), + config=dict(type="dict", aliases=["parameters", "params"]), + role_config_group=dict(), + tags=dict(type="dict"), + ), + ), + role_config_groups=dict( + type="list", + elements="dict", + options=dict( + name=dict(aliases=["role_config_group_name", "role_config_group"]), + display_name=dict(), + role_type=dict(aliases=["type"]), + config=dict(type="dict", aliases=["params", "parameters"]), + ), + ), state=dict( default="present", choices=["present", "absent", "started", "stopped"] ), diff --git a/plugins/modules/service_role.py b/plugins/modules/service_role.py index 953cd72b..c2e03a08 100644 --- a/plugins/modules/service_role.py +++ b/plugins/modules/service_role.py @@ -620,17 +620,17 @@ def process(self): if not self.module.check_mode: if tag_updates.deletions: role_api.delete_tags( - self.cluster, - self.name, - self.service, + cluster_name=self.cluster, + service_name=self.service, + role_name=self.name, body=tag_updates.deletions, ) if tag_updates.additions: role_api.add_tags( - self.cluster, - self.name, - self.service, + cluster_name=self.cluster, + service_name=self.service, + role_name=self.name, body=tag_updates.additions, ) From 0cf3d2a6e9ba759b428550da65ef879402a93c97 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 7 Apr 2025 15:06:44 -0400 Subject: [PATCH 07/27] Add try/except to handle previously deleted role config groups Signed-off-by: Webster Mudge --- tests/unit/__init__.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 64e21bef..8d50d98c 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -359,21 +359,21 @@ def deregister_role_config_group( for rcg in registry: # Delete the custom role config groups if not rcg.base: - existing_roles = rcg_api.read_roles( - cluster_name=rcg.service_ref.cluster_name, - service_name=rcg.service_ref.service_name, - role_config_group_name=rcg.name, - ).items - - if existing_roles: - rcg_api.move_roles_to_base_group( + # The role might already be deleted, so ignore if not found + try: + existing_roles = rcg_api.read_roles( cluster_name=rcg.service_ref.cluster_name, service_name=rcg.service_ref.service_name, - body=ApiRoleNameList([r.name for r in existing_roles]), - ) + role_config_group_name=rcg.name, + ).items + + if existing_roles: + rcg_api.move_roles_to_base_group( + cluster_name=rcg.service_ref.cluster_name, + service_name=rcg.service_ref.service_name, + body=ApiRoleNameList([r.name for r in existing_roles]), + ) - # The role might already be deleted, so ignore if not found - try: rcg_api.delete_role_config_group( cluster_name=rcg.service_ref.cluster_name, service_name=rcg.service_ref.service_name, From 34f91ce37aefd8291b266dfc7cbec9f26ab3ed94 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 7 Apr 2025 15:07:44 -0400 Subject: [PATCH 08/27] Update tests for service role to use pytest fixtures. Break out role config group management into separate test script. Signed-off-by: Webster Mudge --- .../plugins/modules/service/test_service.py | 585 +++++++++++++- .../modules/service/test_service_rcgs.py | 726 ++++++++++++++++++ .../modules/service_role/test_service_role.py | 2 - 3 files changed, 1269 insertions(+), 44 deletions(-) create mode 100644 tests/unit/plugins/modules/service/test_service_rcgs.py diff --git a/tests/unit/plugins/modules/service/test_service.py b/tests/unit/plugins/modules/service/test_service.py index 2b78aab2..05a998b8 100644 --- a/tests/unit/plugins/modules/service/test_service.py +++ b/tests/unit/plugins/modules/service/test_service.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,84 +22,585 @@ import os import pytest +from collections.abc import Generator + from ansible.module_utils.common.dict_transformations import recursive_diff +from pathlib import Path + +from cm_client import ( + ApiClient, + ApiConfig, + ApiConfigList, + ApiEntityTag, + ApiHostRef, + ApiRole, + ApiRoleConfigGroup, + ApiRoleNameList, + ApiRoleState, + ApiService, + ClustersResourceApi, + RoleConfigGroupsResourceApi, + RolesResourceApi, + RoleCommandsResourceApi, + ServicesResourceApi, +) + from ansible_collections.cloudera.cluster.plugins.modules import service +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + wait_bulk_commands, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.cluster_utils import ( + get_cluster_hosts, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + get_service_hosts, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + get_base_role_config_group, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, + read_role, + read_roles, +) from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, + deregister_service, + register_service, + deregister_role, + register_role, + deregister_role_config_group, + register_role_config_group, ) LOG = logging.getLogger(__name__) -@pytest.fixture -def conn(): - conn = dict(username=os.getenv("CM_USERNAME"), password=os.getenv("CM_PASSWORD")) +@pytest.fixture() +def zookeeper(cm_api_client, base_cluster, request): + # Keep track of the provisioned service(s) + service_registry = list[ApiService]() + + # Get the current cluster hosts + hosts = get_cluster_hosts(cm_api_client, base_cluster) - if os.getenv("CM_HOST", None): - conn.update(host=os.getenv("CM_HOST")) + id = Path(request.node.name).stem + + zk_service = ApiService( + name=f"test-zk-{id}", + type="ZOOKEEPER", + display_name=f"ZooKeeper ({id})", + # Add a SERVER role (so we can start the service -- a ZK requirement!) + roles=[ApiRole(type="SERVER", host_ref=ApiHostRef(hosts[0].host_id))], + ) - if os.getenv("CM_PORT", None): - conn.update(port=os.getenv("CM_PORT")) + # Provision and yield the created service + yield register_service( + api_client=cm_api_client, + registry=service_registry, + cluster=base_cluster, + service=zk_service, + ) - if os.getenv("CM_ENDPOINT", None): - conn.update(url=os.getenv("CM_ENDPOINT")) + # Remove the created service + deregister_service(api_client=cm_api_client, registry=service_registry) + + +@pytest.fixture() +def server_role(cm_api_client, base_cluster, zookeeper): + # Keep track of the provisioned role(s) + role_registry = list[ApiRole]() + + existing_role_instances = [ + r.host_ref.hostname + for r in read_roles( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + type="SERVER", + ).items + ] + + hosts = [ + h + for h in get_cluster_hosts(cm_api_client, base_cluster) + if h.hostname not in existing_role_instances + ] + + second_role = create_role( + api_client=cm_api_client, + role_type="SERVER", + hostname=hosts[0].hostname, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) - if os.getenv("CM_PROXY", None): - conn.update(proxy=os.getenv("CM_PROXY")) + yield register_role( + api_client=cm_api_client, + registry=role_registry, + service=zookeeper, + role=second_role, + ) - return { - **conn, - "verify_tls": "no", - "debug": "no", - } + deregister_role(api_client=cm_api_client, registry=role_registry) -def test_missing_required(conn, module_args): - module_args(conn) +class TestServiceArgSpec: + def test_service_missing_required(self, conn, module_args): + module_args(conn) - with pytest.raises(AnsibleFailJson, match="cluster, service"): - service.main() + with pytest.raises(AnsibleFailJson, match="cluster, name"): + service.main() + def test_service_missing_name(self, conn, module_args): + module_args( + { + **conn, + "service": "example", + } + ) -def test_missing_service(conn, module_args): - conn.update(service="example") - module_args(conn) + with pytest.raises(AnsibleFailJson, match="cluster"): + service.main() - with pytest.raises(AnsibleFailJson, match="cluster"): - service.main() + def test_service_missing_cluster(self, conn, module_args): + module_args( + { + **conn, + "cluster": "example", + } + ) + with pytest.raises(AnsibleFailJson, match="name"): + service.main() -def test_missing_cluster(conn, module_args): - conn.update(cluster="example") - module_args(conn) + # TODO Add argspec for RCG - with pytest.raises(AnsibleFailJson, match="service"): + # TODO Add argspec for roles + + +class TestServiceInvalidParameters: + def test_present_invalid_cluster(self, conn, module_args): + module_args({**conn, "cluster": "BOOM", "service": "example"}) + + with pytest.raises(AnsibleFailJson, match="Cluster does not exist"): + service.main() + + def test_present_missing_type(self, conn, module_args, base_cluster): + module_args( + { + **conn, + "cluster": base_cluster.name, + "service": "test-zookeeper", + } + ) + + with pytest.raises(AnsibleFailJson, match="type"): + service.main() + + +class TestServiceProvision: + @pytest.fixture(autouse=True) + def zookeeper_reset(self, cm_api_client, base_cluster): + # Keep track of the existing ZOOKEEPER services + initial_services = set( + [ + s.name + for s in ServicesResourceApi(cm_api_client) + .read_services( + cluster_name=base_cluster.name, + ) + .items + ] + ) + + # Yield to the test + yield + + # Remove any added services + services_to_remove = [ + s + for s in ServicesResourceApi(cm_api_client) + .read_services( + cluster_name=base_cluster.name, + ) + .items + if s.name not in initial_services + ] + deregister_service(cm_api_client, services_to_remove) + + def test_service_provision_core(self, conn, module_args, base_cluster, request): + id = f"pytest-{Path(request.node.name)}" + + module_args( + { + **conn, + "cluster": base_cluster.name, + "name": id, + "type": "ZOOKEEPER", + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert e.value.service["role_config_groups"] == list() + assert e.value.service["roles"] == list() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert e.value.service["role_config_groups"] == list() + assert e.value.service["roles"] == list() + + def test_service_provision_display_name( + self, conn, module_args, base_cluster, request + ): + id = f"pytest-{Path(request.node.name)}" + name = "Pytest ZooKeeper" + + module_args( + { + **conn, + "cluster": base_cluster.name, + "name": id, + "type": "ZOOKEEPER", + "display_name": name, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert e.value.service["role_config_groups"] == list() + assert e.value.service["roles"] == list() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert e.value.service["role_config_groups"] == list() + assert e.value.service["roles"] == list() + + def test_service_provision_config(self, conn, module_args, base_cluster, request): + id = f"pytest-{Path(request.node.name)}" + + module_args( + { + **conn, + "cluster": base_cluster.name, + "name": id, + "type": "ZOOKEEPER", + "config": {"tickTime": 2001}, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"]["tickTime"] == "2001" + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert e.value.service["role_config_groups"] == list() + assert e.value.service["roles"] == list() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"]["tickTime"] == "2001" + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert e.value.service["role_config_groups"] == list() + assert e.value.service["roles"] == list() + + def test_service_provision_tags(self, conn, module_args, base_cluster, request): + id = f"pytest-{Path(request.node.name)}" + + module_args( + { + **conn, + "cluster": base_cluster.name, + "name": id, + "type": "ZOOKEEPER", + "tags": {"pytest": "example"}, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"] == dict() + assert e.value.service["tags"]["pytest"] == "example" + assert e.value.service["maintenance_mode"] == False + assert e.value.service["role_config_groups"] == list() + assert e.value.service["roles"] == list() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"] == dict() + assert e.value.service["tags"]["pytest"] == "example" + assert e.value.service["maintenance_mode"] == False + assert e.value.service["role_config_groups"] == list() + assert e.value.service["roles"] == list() + + +class TestServiceModification: + @pytest.fixture(autouse=True) + def maintenance_enabled_zookeeper( + self, cm_api_client, zookeeper + ) -> Generator[ApiService]: + ServicesResourceApi(cm_api_client).enter_maintenance_mode( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) + + def test_service_existing_type(self, conn, module_args, zookeeper): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "type": "GATEWAY", + "state": "present", + } + ) + + with pytest.raises(AnsibleFailJson, match="already in use"): + service.main() + + def test_service_existing_display_name(self, conn, module_args, zookeeper): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "display_name": "Example", + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["display_name"] == "Example" + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 + assert e.value.service["roles"] == list() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["display_name"] == "Example" + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 + assert e.value.service["roles"] == list() + + +def test_service_existing_maintenance_enabled(self, conn, module_args, zookeeper): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "maintenance": True, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: service.main() + assert e.value.changed == True + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == True + assert len(e.value.service["role_config_groups"]) == 2 + assert e.value.service["roles"] == list() -def test_present_invalid_cluster(conn, module_args): - conn.update( - cluster="example", - service="example", + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == True + assert len(e.value.service["role_config_groups"]) == 2 + assert e.value.service["roles"] == list() + + +def test_service_existing_maintenance_enabled(self, conn, module_args, zookeeper): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "maintenance": True, + "state": "present", + } ) - module_args(conn) - with pytest.raises(AnsibleFailJson, match="Cluster does not exist"): + with pytest.raises(AnsibleExitJson) as e: service.main() + assert e.value.changed == True + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == True + assert len(e.value.service["role_config_groups"]) == 2 + assert e.value.service["roles"] == list() -def test_present_missing_type(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == True + assert len(e.value.service["role_config_groups"]) == 2 + assert e.value.service["roles"] == list() + + +def test_service_existing_maintenance_disabled( + self, conn, module_args, maintenance_enabled_zookeeper +): + module_args( + { + **conn, + "cluster": maintenance_enabled_zookeeper.cluster_ref.cluster_name, + "name": maintenance_enabled_zookeeper.name, + "maintenance": False, + "state": "present", + } ) - module_args(conn) - with pytest.raises(AnsibleFailJson, match="type"): + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == maintenance_enabled_zookeeper.name + assert e.value.service["type"] == maintenance_enabled_zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 + assert e.value.service["roles"] == list() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: service.main() + assert e.value.changed == False + assert e.value.service["name"] == maintenance_enabled_zookeeper.name + assert e.value.service["type"] == maintenance_enabled_zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 + assert e.value.service["roles"] == list() + + +class TestServiceStates: + def test_service_existing_state_absent(self, conn, module_args, zookeeper): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "state": "absent", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert not e.value.service + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert not e.value.service + + +## PREVIOUS TESTS + def test_present_create_service(conn, module_args): conn.update( diff --git a/tests/unit/plugins/modules/service/test_service_rcgs.py b/tests/unit/plugins/modules/service/test_service_rcgs.py new file mode 100644 index 00000000..2a2e9a4a --- /dev/null +++ b/tests/unit/plugins/modules/service/test_service_rcgs.py @@ -0,0 +1,726 @@ +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from collections.abc import Generator + +from pathlib import Path + +from cm_client import ( + ApiClient, + ApiConfig, + ApiConfigList, + ApiEntityTag, + ApiHostRef, + ApiRole, + ApiRoleConfigGroup, + ApiRoleNameList, + ApiRoleState, + ApiService, + ClustersResourceApi, + RoleConfigGroupsResourceApi, + RolesResourceApi, + RoleCommandsResourceApi, + ServicesResourceApi, +) + +from ansible_collections.cloudera.cluster.plugins.modules import service +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + wait_bulk_commands, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.cluster_utils import ( + get_cluster_hosts, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + get_service_hosts, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + get_base_role_config_group, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, + read_role, + read_roles, +) +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, + deregister_service, + register_service, + deregister_role, + register_role, + deregister_role_config_group, + register_role_config_group, +) + +LOG = logging.getLogger(__name__) + + +@pytest.fixture() +def zookeeper(cm_api_client, base_cluster, request): + # Keep track of the provisioned service(s) + service_registry = list[ApiService]() + + # Get the current cluster hosts + hosts = get_cluster_hosts(cm_api_client, base_cluster) + + id = Path(request.node.name).stem + + zk_service = ApiService( + name=f"test-zk-{id}", + type="ZOOKEEPER", + display_name=f"ZooKeeper ({id})", + # Add a SERVER role (so we can start the service -- a ZK requirement!) + roles=[ApiRole(type="SERVER", host_ref=ApiHostRef(hosts[0].host_id))], + ) + + # Provision and yield the created service + yield register_service( + api_client=cm_api_client, + registry=service_registry, + cluster=base_cluster, + service=zk_service, + ) + + # Remove the created service + deregister_service(api_client=cm_api_client, registry=service_registry) + + +@pytest.fixture() +def server_role(cm_api_client, base_cluster, zookeeper): + # Keep track of the provisioned role(s) + role_registry = list[ApiRole]() + + existing_role_instances = [ + r.host_ref.hostname + for r in read_roles( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + type="SERVER", + ).items + ] + + hosts = [ + h + for h in get_cluster_hosts(cm_api_client, base_cluster) + if h.hostname not in existing_role_instances + ] + + second_role = create_role( + api_client=cm_api_client, + role_type="SERVER", + hostname=hosts[0].hostname, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) + + yield register_role( + api_client=cm_api_client, + registry=role_registry, + service=zookeeper, + role=second_role, + ) + + deregister_role(api_client=cm_api_client, registry=role_registry) + + +class TestServiceProvisionRoleConfigGroups: + @pytest.fixture(autouse=True) + def zookeeper_reset(self, cm_api_client, base_cluster): + # Keep track of the existing ZOOKEEPER services + initial_services = set( + [ + s.name + for s in ServicesResourceApi(cm_api_client) + .read_services( + cluster_name=base_cluster.name, + ) + .items + ] + ) + + # Yield to the test + yield + + # Remove any added services + services_to_remove = [ + s + for s in ServicesResourceApi(cm_api_client) + .read_services( + cluster_name=base_cluster.name, + ) + .items + if s.name not in initial_services + ] + deregister_service(cm_api_client, services_to_remove) + + def test_service_provision_custom_rcg( + self, conn, module_args, base_cluster, request + ): + id = f"pytest-{Path(request.node.name)}" + + module_args( + { + **conn, + "cluster": base_cluster.name, + "name": id, + "type": "ZOOKEEPER", + "role_config_groups": [ + { + "name": id, + "type": "SERVER", + "config": { + "minSessionTimeout": 4601, + }, + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert e.value.service["roles"] == list() + + assert len(e.value.service["role_config_groups"]) == 3 # custom + 2 bases + rcg = next( + iter([r for r in e.value.service["role_config_groups"] if not r["base"]]) + ) + assert rcg["name"] == id + assert rcg["role_type"] == "SERVER" + assert rcg["config"]["minSessionTimeout"] == "4601" + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert e.value.service["roles"] == list() + + assert len(e.value.service["role_config_groups"]) == 3 + rcg = next( + iter([r for r in e.value.service["role_config_groups"] if not r["base"]]) + ) + assert rcg["name"] == id + assert rcg["role_type"] == "SERVER" + assert rcg["config"]["minSessionTimeout"] == "4601" + + def test_service_provision_base_rcg(self, conn, module_args, base_cluster, request): + id = f"pytest-{Path(request.node.name)}" + + module_args( + { + **conn, + "cluster": base_cluster.name, + "name": id, + "type": "ZOOKEEPER", + "role_config_groups": [ + { + "type": "SERVER", + "config": { + "minSessionTimeout": 4601, + }, + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert e.value.service["roles"] == list() + + assert len(e.value.service["role_config_groups"]) == 2 # 2 bases + rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "SERVER" + ] + ) + ) + assert rcg["role_type"] == "SERVER" + assert rcg["config"]["minSessionTimeout"] == "4601" + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert e.value.service["roles"] == list() + + assert len(e.value.service["role_config_groups"]) == 2 + rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "SERVER" + ] + ) + ) + assert rcg["role_type"] == "SERVER" + assert rcg["config"]["minSessionTimeout"] == "4601" + + +class TestServiceModificationRoleConfigGroups: + @pytest.fixture() + def base_rcg_server(self, cm_api_client, zookeeper) -> ApiRoleConfigGroup: + base_rcg = get_base_role_config_group( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_type="SERVER", + ) + + base_rcg.config = ApiConfigList( + items=[ + ApiConfig(name="minSessionTimeout", value="5500"), + ApiConfig(name="maxSessionTimeout", value="45000"), + ] + ) + + return RoleConfigGroupsResourceApi(cm_api_client).update_role_config_group( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_config_group_name=base_rcg.name, + body=base_rcg, + ) + + @pytest.fixture() + def base_rcg_gateway(self, cm_api_client, zookeeper) -> ApiRoleConfigGroup: + base_rcg = get_base_role_config_group( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_type="GATEWAY", + ) + + base_rcg.config = ApiConfigList( + items=[ApiConfig(name="client_config_priority", value="91")] + ) + + return RoleConfigGroupsResourceApi(cm_api_client).update_role_config_group( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_config_group_name=base_rcg.name, + body=base_rcg, + ) + + @pytest.fixture() + def custom_rcg_server( + self, cm_api_client, zookeeper, request + ) -> Generator[ApiRoleConfigGroup]: + id = Path(request.node.name).stem + + role_config_groups = list[ApiRoleConfigGroup]() + + yield register_role_config_group( + api_client=cm_api_client, + registry=role_config_groups, + service=zookeeper, + role_config_group=ApiRoleConfigGroup( + name=f"pytest-{id}", + role_type="SERVER", + config=ApiConfigList(items=[ApiConfig("minSessionTimeout", "4501")]), + display_name=f"Pytest ({id})", + ), + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) + + deregister_role_config_group( + api_client=cm_api_client, + registry=role_config_groups, + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) + + @pytest.fixture() + def server_role_custom_rcg( + self, cm_api_client, server_role, custom_rcg_server + ) -> ApiRole: + RoleConfigGroupsResourceApi(cm_api_client).move_roles( + cluster_name=server_role.service_ref.cluster_name, + service_name=server_role.service_ref.service_name, + role_config_group_name=custom_rcg_server.name, + body=ApiRoleNameList(items=[server_role.name]), + ) + return server_role + + def test_service_existing_base_rcg( + self, conn, module_args, zookeeper, base_rcg_server, base_rcg_gateway + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "role_config_groups": [ + { + "type": base_rcg_server.role_type, + "config": { + "minSessionTimeout": 5501, + "maxSessionTimeout": 45001, + }, + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + + server_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "SERVER" + ] + ) + ) + assert server_rcg["config"]["minSessionTimeout"] == "5501" + assert server_rcg["config"]["maxSessionTimeout"] == "45001" + + gateway_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "GATEWAY" + ] + ) + ) + assert gateway_rcg["config"]["client_config_priority"] == "91" + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + + server_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "SERVER" + ] + ) + ) + assert server_rcg["config"]["minSessionTimeout"] == "5501" + assert server_rcg["config"]["maxSessionTimeout"] == "45001" + + gateway_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "GATEWAY" + ] + ) + ) + assert gateway_rcg["config"]["client_config_priority"] == "91" + + def test_service_existing_base_rcg_purge( + self, conn, module_args, zookeeper, base_rcg_server, base_rcg_gateway + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "role_config_groups": [ + { + "type": base_rcg_server.role_type, + "config": { + "minSessionTimeout": 5501, + }, + } + ], + "purge": True, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + + server_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "SERVER" + ] + ) + ) + assert server_rcg["config"]["minSessionTimeout"] == "5501" + assert "maxSessionTimeout" not in server_rcg["config"] + + gateway_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "GATEWAY" + ] + ) + ) + assert "client_config_priority" not in gateway_rcg["config"] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + + server_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "SERVER" + ] + ) + ) + assert server_rcg["config"]["minSessionTimeout"] == "5501" + assert "maxSessionTimeout" not in server_rcg["config"] + + gateway_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "GATEWAY" + ] + ) + ) + assert "client_config_priority" not in gateway_rcg["config"] + + def test_service_existing_custom_rcg( + self, conn, module_args, zookeeper, custom_rcg_server + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "role_config_groups": [ + { + "name": custom_rcg_server.name, + "config": { + "minSessionTimeout": 5501, + "maxSessionTimeout": 45001, + }, + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + + server_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["name"] == custom_rcg_server.name + ] + ) + ) + assert server_rcg["config"]["minSessionTimeout"] == "5501" + assert server_rcg["config"]["maxSessionTimeout"] == "45001" + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + + server_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["name"] == custom_rcg_server.name + ] + ) + ) + assert server_rcg["config"]["minSessionTimeout"] == "5501" + assert server_rcg["config"]["maxSessionTimeout"] == "45001" + + def test_service_existing_custom_rcg_purge( + self, conn, module_args, zookeeper, custom_rcg_server + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "role_config_groups": [ + { + "name": custom_rcg_server.name, + "config": { + "maxSessionTimeout": 45001, + }, + } + ], + "purge": True, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + + server_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["name"] == custom_rcg_server.name + ] + ) + ) + assert server_rcg["config"]["maxSessionTimeout"] == "45001" + assert "minSessionTimeout" not in server_rcg["config"] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + + server_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["name"] == custom_rcg_server.name + ] + ) + ) + assert server_rcg["config"]["maxSessionTimeout"] == "45001" + assert "minSessionTimeout" not in server_rcg["config"] + + def test_service_existing_custom_rcg_purge_role_assoc( + self, conn, module_args, cm_api_client, zookeeper, server_role_custom_rcg + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "purge": True, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert server_role_custom_rcg.name not in [ + rcg["name"] for rcg in e.value.service["role_config_groups"] + ] + + refreshed_role = read_role( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_name=server_role_custom_rcg.name, + ) + base_rcg = get_base_role_config_group( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_type=server_role_custom_rcg.type, + ) + assert ( + refreshed_role.role_config_group_ref.role_config_group_name == base_rcg.name + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert server_role_custom_rcg.name not in [ + rcg["name"] for rcg in e.value.service["role_config_groups"] + ] + + refreshed_role = read_role( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_name=server_role_custom_rcg.name, + ) + base_rcg = get_base_role_config_group( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_type=server_role_custom_rcg.type, + ) + assert ( + refreshed_role.role_config_group_ref.role_config_group_name == base_rcg.name + ) diff --git a/tests/unit/plugins/modules/service_role/test_service_role.py b/tests/unit/plugins/modules/service_role/test_service_role.py index 5fa7dc15..1a697ecb 100644 --- a/tests/unit/plugins/modules/service_role/test_service_role.py +++ b/tests/unit/plugins/modules/service_role/test_service_role.py @@ -39,8 +39,6 @@ RoleCommandsResourceApi, ) -from ansible.module_utils.common.dict_transformations import recursive_diff - from ansible_collections.cloudera.cluster.plugins.modules import service_role from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( wait_bulk_commands, From c88697c48dc2927b04814e34de5c52f34b286aa8 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 8 Apr 2025 20:14:02 -0400 Subject: [PATCH 09/27] Rename role exceptions Signed-off-by: Webster Mudge --- plugins/module_utils/role_utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/module_utils/role_utils.py b/plugins/module_utils/role_utils.py index acd99128..308fa42b 100644 --- a/plugins/module_utils/role_utils.py +++ b/plugins/module_utils/role_utils.py @@ -48,7 +48,7 @@ class RoleException(Exception): pass -class HostNotFoundException(RoleException): +class RoleHostNotFoundException(RoleException): pass @@ -56,7 +56,7 @@ class RoleConfigGroupNotFoundException(RoleException): pass -class MaintenanceStateException(RoleException): +class RoleMaintenanceStateException(RoleException): pass @@ -261,7 +261,7 @@ def create_role( # Host assignment host_ref = get_host_ref(api_client, hostname, host_id) if host_ref is None: - raise HostNotFoundException( + raise RoleHostNotFoundException( f"Host not found: hostname='{hostname}', host_id='{host_id}'" ) else: @@ -357,7 +357,7 @@ def toggle_role_maintenance( ) if maintenance_cmd.success is False: - raise MaintenanceStateException( + raise RoleMaintenanceStateException( f"Unable to set Maintenance mode to '{maintenance}': {maintenance_cmd.result_message}" ) From a7f6cb68b29f33b53dc310fe4e550f6459643c2c Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 8 Apr 2025 20:14:48 -0400 Subject: [PATCH 10/27] Add reconcile_service_roles() utility function Signed-off-by: Webster Mudge --- plugins/module_utils/service_utils.py | 272 ++++++++++++++++++++++++-- 1 file changed, 261 insertions(+), 11 deletions(-) diff --git a/plugins/module_utils/service_utils.py b/plugins/module_utils/service_utils.py index 9cb739d6..ab1b3e78 100644 --- a/plugins/module_utils/service_utils.py +++ b/plugins/module_utils/service_utils.py @@ -21,14 +21,26 @@ resolve_parameter_updates, wait_command, wait_commands, + ConfigListUpdates, + TagUpdates, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.host_utils import ( + get_host, ) from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( create_role_config_group, + get_base_role_config_group, parse_role_config_group_result, update_role_config_group, ) from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, + read_roles, + read_roles_by_type, parse_role_result, + provision_service_role, + toggle_role_maintenance, + toggle_role_state, InvalidRoleTypeException, ) @@ -41,6 +53,7 @@ ApiRole, ApiRoleConfigGroup, ApiRoleConfigGroupList, + ApiRoleList, ApiRoleNameList, ApiService, ApiServiceConfig, @@ -148,16 +161,12 @@ def read_service( service_name=service_name, ).items - # Gather each role configuration - if service.roles is not None: - for role in service.roles: - role.config = role_api.read_role_config( - cluster_name=cluster_name, - service_name=service_name, - role_name=role.name, - ) - else: - service.roles = list() + # Gather each role and its config + service.roles = read_roles( + api_client=api_client, + cluster_name=cluster_name, + service_name=service_name, + ).items return service @@ -307,7 +316,11 @@ def toggle_service_state( if state == "started" and service.service_state not in [ApiServiceState.STARTED]: changed = ApiServiceState.STARTED - cmd = service_api.start_command + + if service.service_state == ApiServiceState.NA: + cmd = service_api.first_run + else: + cmd = service_api.start_command elif state == "stopped" and service.service_state not in [ ApiServiceState.STOPPED, ApiServiceState.NA, @@ -542,3 +555,240 @@ def reconcile_service_role_config_groups( ) return (diff_before, diff_after) + + +def reconcile_service_roles( + api_client: ApiClient, + service: ApiService, + roles: list[dict], + purge: bool, + check_mode: bool, + # maintenance: bool, + # state: str, +) -> tuple[dict, dict]: + + diff_before, diff_after = list[dict](), list[dict]() + + role_api = RolesResourceApi(api_client) + rcg_api = RoleConfigGroupsResourceApi(api_client) + + for incoming_role in roles: + # Prepare for any per-entry changes + role_entry_before, role_entry_after = list(), list() + + # Prepare list for any new role instances + addition_list = list[ApiRole]() + + # Get all existing instances of type per host + current_role_instances = read_roles_by_type( + api_client=api_client, + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_type=incoming_role["type"], + ).items + + # Get the base role config group for the type + base_rcg = get_base_role_config_group( + api_client=api_client, + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_type=incoming_role["type"], + ) + + # Get the role config group, if defined, for use with all of the instance associations + if incoming_role.get("role_config_group", None) is not None: + incoming_rcg = rcg_api.read_role_config_group( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=incoming_role.get("role_config_group"), + ) + else: + incoming_rcg = None + + # Index the current role instances by hostname + instance_map = {r.host_ref.hostname: r for r in current_role_instances} + + # Reconcile existence of type/host + for h in incoming_role["hostnames"]: + # Prepare any role instance changes + instance_role_before, instance_role_after = dict(), dict() + + # Create new role - config, rcg, tags, and host + if h not in instance_map: + created_role = create_role( + api_client=api_client, + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_type=incoming_role["type"], + hostname=h, + config=incoming_role.get("config", None), + role_config_group=incoming_role.get("role_config_group", None), + tags=incoming_role.get("tags", None), + ) + + # before is already an empty dict + instance_role_after = create_role.dict() + + addition_list(created_role) + + # Update existing role - config, tags, role config group + else: + current_role = instance_map.pop(h, None) + if current_role is not None: + # Reconcile role override configurations + incoming_config = incoming_role.get("config", None) + if incoming_config or purge: + if incoming_config is None: + incoming_config = dict() + + updates = ConfigListUpdates( + current_role.config, incoming_config, purge + ) + + if updates.changed: + instance_role_before.update(config=current_role.config) + instance_role_after.update(config=updates.config) + + current_role.config = updates.config + + if not check_mode: + role_api.update_role_config( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_name=current_role.name, + body=current_role, + ) + + # Reconcile role tags + incoming_tags = incoming_role.get("tags", None) + if incoming_tags or purge: + if incoming_tags is None: + incoming_tags = dict() + + tag_updates = TagUpdates( + current_role.tags, incoming_tags, purge + ) + + if tag_updates.changed: + instance_role_before.update(tags=tag_updates.deletions) + instance_role_after.update(tags=tag_updates.additions) + + if tag_updates.additions: + if not check_mode: + role_api.add_tags( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_name=current_role.name, + body=tag_updates.additions, + ) + if tag_updates.deletions: + if not check_mode: + role_api.delete_tags( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_name=current_role.name, + body=tag_updates.deletions, + ) + + # Handle role config group associations + if incoming_rcg or purge: + # If role config group is not present and the existing reference is not the base, reset to base + if ( + incoming_rcg is None + and current_role.role_config_group_ref.role_config_group_name + != base_rcg.name + ): + instance_role_before.update( + role_config_group=current_role.role_config_group_ref.role_config_group_name + ) + instance_role_after.update(role_config_group=base_rcg.name) + + if not check_mode: + rcg_api.move_roles_to_base_group( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + body=ApiRoleNameList(items=[current_role.name]), + ) + + # Else if the role config group does not match the declared + elif ( + incoming_rcg.name + != current_role.role_config_group_ref.role_config_group_name + ): + instance_role_before.update( + role_config_group=current_role.role_config_group_ref.role_config_group_name + ) + instance_role_after.update( + role_config_group=incoming_rcg.name + ) + + if not check_mode: + rcg_api.move_roles( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=incoming_rcg.name, + body=ApiRoleNameList(items=[current_role.name]), + ) + + # # Handle maintenance + # if not maintenance: + # incoming_maintenance = incoming_role.get("maintenance", None) + + # maintenance_changed = toggle_role_maintenance( + # api_client=api_client, + # role=current_role, + # maintenance=incoming_maintenance, + # check_mode=check_mode, + # ) + + # if maintenance_changed: + # instance_role_before.update(maintenance_mode=current_role.maintenance_mode) + # instance_role_after.update(maintenance_mode=incoming_maintenance) + + # # Handle state; if the service state is not stopped, then allow the role to maintain its state + # incoming_state = incoming_role.get("state", None) + # if incoming_state is not None and state != incoming_state and state in ["present", "started", "restarted"]: + # state_changed = toggle_role_state( + # api_client=api_client, + # role=current_role, + # state=incoming_state, + # check_mode=check_mode, + # ) + + # if state_changed is not None: + # instance_role_before.update(state=current_role.role_state) + # instance_role_after.update(state=incoming_state) + + # Record any deltas for the role entry + if instance_role_before or instance_role_after: + role_entry_before.append(instance_role_before) + role_entry_after.append(instance_role_after) + + # Process role instance additions + if addition_list: + if not check_mode: + role_api.create_roles( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + body=ApiRoleList(items=addition_list), + ) + + # Process role deletions if purge is set + if purge: + for deleted_role in instance_map.values(): + role_entry_before.append(deleted_role.to_dict()) + role_entry_after.append(dict()) + + if not check_mode: + role_api.delete_role( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_name=deleted_role.name, + ) + + # Add any changes for the role entry + if role_entry_before or role_entry_after: + diff_before.append(role_entry_before) + diff_after.append(role_entry_after) + + return (diff_before, diff_after) From 2f801825748ef03210b62d60acdad6fdbaa2ff59 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 8 Apr 2025 20:16:04 -0400 Subject: [PATCH 11/27] Update for reconciling existing roles and state. Removed host_ids from roles parameter. Signed-off-by: Webster Mudge --- plugins/modules/service.py | 83 ++++++++++++++++++-------------------- 1 file changed, 39 insertions(+), 44 deletions(-) diff --git a/plugins/modules/service.py b/plugins/modules/service.py index 6965d312..48574324 100644 --- a/plugins/modules/service.py +++ b/plugins/modules/service.py @@ -332,6 +332,7 @@ provision_service, read_service, reconcile_service_role_config_groups, + reconcile_service_roles, toggle_service_maintenance, toggle_service_state, ServiceMaintenanceStateException, @@ -507,6 +508,8 @@ def process(self): body=base_rcg, ) + # TODO Create and provision roles + self.handle_maintenance(current) # Else the service exists, so address any changes @@ -541,7 +544,7 @@ def process(self): if not self.module.check_mode: service_api.update_service_config( cluster_name=self.cluster, - service_name=self.service, + service_name=self.name, message=self.message, body=config_updates.config, ) @@ -591,14 +594,11 @@ def process(self): body=current, ) - # Handle roles - # Handle role config groups if self.role_config_groups or self.purge: if self.role_config_groups is None: self.role_config_groups = list() - # Then call the utility... (before_rcg, after_rcg) = reconcile_service_role_config_groups( api_client=self.api_client, service=current, @@ -613,48 +613,42 @@ def process(self): self.diff["before"].update(role_config_groups=before_rcg) self.diff["after"].update(role_config_groups=after_rcg) - # Handle state changes - # if not self.module.check_mode: - # service_api.create_services(self.cluster, body=service_list) - - # if self.state == "started": - # self.wait_command( - # service_api.first_run(self.cluster, self.name) - # ) - - # self.output = parse_service_result( - # service_api.read_service(self.cluster, self.name, view="full") - # ) - if self.state == "started" and current.service_state != "STARTED": - self.changed = True + # Handle roles + if self.roles or self.purge: + if self.roles is None: + self.roles = list() - if self.module._diff: - self.diff["before"].update(service_state=current.service_state) - self.diff["after"].update(service_state="STARTED") + (before_role, after_role) = reconcile_service_roles( + api_client=self.api_client, + service=current, + roles=self.roles, + purge=self.purge, + check_mode=self.module.check_mode, + # state=self.state, + # maintenance=self.maintenance, + ) - if not self.module.check_mode: - if current.service_state == "NA": - self.wait_command( - service_api.first_run(self.cluster, self.name) - ) - else: - self.wait_command( - service_api.start_command(self.cluster, self.name) - ) + if before_role or after_role: + self.changed = True + if self.module._diff: + self.diff["before"].update(roles=before_role) + self.diff["after"].update(roles=after_role) - elif self.state == "stopped" and current.service_state not in [ - "STOPPED", - "NA", - ]: - self.changed = True + # Handle state changes + state_changed = toggle_service_state( + api_client=self.api_client, + service=current, + state=self.state, + check_mode=self.module.check_mode, + ) + if state_changed is not None: + self.changed = True if self.module._diff: self.diff["before"].update(service_state=current.service_state) - self.diff["after"].update(service_state="STOPPED") - - if not self.module.check_mode: - self.wait_command(service_api.stop_command(self.cluster, self.name)) + self.diff["after"].update(service_state=state_changed) + # If there are changes, get a fresh read if self.changed: self.output = parse_service_result( read_service( @@ -705,16 +699,14 @@ def main(): type="list", elements="dict", options=dict( - type=dict(aliases=["role_type"]), + type=dict(required=True, aliases=["role_type"]), hostnames=dict( + required=True, type="list", elements="str", aliases=["cluster_hosts", "cluster_hostnames"], ), - host_ids=dict( - type="list", elements="str", aliases=["cluster_host_ids"] - ), - maintenance=dict(type="bool", aliases=["maintenance_mode"]), + # maintenance=dict(type="bool", aliases=["maintenance_mode"]), config=dict(type="dict", aliases=["parameters", "params"]), role_config_group=dict(), tags=dict(type="dict"), @@ -729,6 +721,9 @@ def main(): role_type=dict(aliases=["type"]), config=dict(type="dict", aliases=["params", "parameters"]), ), + required_one_of=[ + ["name", "role_type"], + ], ), state=dict( default="present", choices=["present", "absent", "started", "stopped"] From f4e730b4d9fe324e7daf1fce362338e308c1f6ac Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 8 Apr 2025 20:17:04 -0400 Subject: [PATCH 12/27] Add missing tests and skip legacy tests Signed-off-by: Webster Mudge --- .../plugins/modules/service/test_service.py | 477 +++++++++++++----- .../modules/service/test_service_rcgs.py | 41 -- 2 files changed, 360 insertions(+), 158 deletions(-) diff --git a/tests/unit/plugins/modules/service/test_service.py b/tests/unit/plugins/modules/service/test_service.py index 05a998b8..d0c27380 100644 --- a/tests/unit/plugins/modules/service/test_service.py +++ b/tests/unit/plugins/modules/service/test_service.py @@ -39,6 +39,7 @@ ApiRoleNameList, ApiRoleState, ApiService, + ApiServiceConfig, ClustersResourceApi, RoleConfigGroupsResourceApi, RolesResourceApi, @@ -176,9 +177,58 @@ def test_service_missing_cluster(self, conn, module_args): with pytest.raises(AnsibleFailJson, match="name"): service.main() - # TODO Add argspec for RCG + def test_service_roles_missing_type(self, conn, module_args): + module_args( + { + **conn, + "cluster": "example", + "name": "example", + "roles": [ + { + "hostnames": "example", + } + ], + } + ) + + with pytest.raises(AnsibleFailJson, match="type found in roles"): + service.main() - # TODO Add argspec for roles + def test_service_roles_missing_hostnames(self, conn, module_args): + module_args( + { + **conn, + "cluster": "example", + "name": "example", + "roles": [ + { + "type": "example", + } + ], + } + ) + + with pytest.raises(AnsibleFailJson, match="hostnames found in roles"): + service.main() + + def test_service_role_config_group_missing_one_of(self, conn, module_args): + module_args( + { + **conn, + "cluster": "example", + "name": "example", + "role_config_groups": [ + { + "display_name": "example", + } + ], + } + ) + + with pytest.raises( + AnsibleFailJson, match="name, role_type found in role_config_groups" + ): + service.main() class TestServiceInvalidParameters: @@ -254,7 +304,7 @@ def test_service_provision_core(self, conn, module_args, base_cluster, request): assert e.value.service["config"] == dict() assert e.value.service["tags"] == dict() assert e.value.service["maintenance_mode"] == False - assert e.value.service["role_config_groups"] == list() + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases assert e.value.service["roles"] == list() # Idempotency @@ -268,7 +318,7 @@ def test_service_provision_core(self, conn, module_args, base_cluster, request): assert e.value.service["config"] == dict() assert e.value.service["tags"] == dict() assert e.value.service["maintenance_mode"] == False - assert e.value.service["role_config_groups"] == list() + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases assert e.value.service["roles"] == list() def test_service_provision_display_name( @@ -298,7 +348,7 @@ def test_service_provision_display_name( assert e.value.service["config"] == dict() assert e.value.service["tags"] == dict() assert e.value.service["maintenance_mode"] == False - assert e.value.service["role_config_groups"] == list() + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases assert e.value.service["roles"] == list() # Idempotency @@ -312,7 +362,7 @@ def test_service_provision_display_name( assert e.value.service["config"] == dict() assert e.value.service["tags"] == dict() assert e.value.service["maintenance_mode"] == False - assert e.value.service["role_config_groups"] == list() + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases assert e.value.service["roles"] == list() def test_service_provision_config(self, conn, module_args, base_cluster, request): @@ -339,7 +389,7 @@ def test_service_provision_config(self, conn, module_args, base_cluster, request assert e.value.service["config"]["tickTime"] == "2001" assert e.value.service["tags"] == dict() assert e.value.service["maintenance_mode"] == False - assert e.value.service["role_config_groups"] == list() + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases assert e.value.service["roles"] == list() # Idempotency @@ -353,7 +403,7 @@ def test_service_provision_config(self, conn, module_args, base_cluster, request assert e.value.service["config"]["tickTime"] == "2001" assert e.value.service["tags"] == dict() assert e.value.service["maintenance_mode"] == False - assert e.value.service["role_config_groups"] == list() + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases assert e.value.service["roles"] == list() def test_service_provision_tags(self, conn, module_args, base_cluster, request): @@ -380,7 +430,7 @@ def test_service_provision_tags(self, conn, module_args, base_cluster, request): assert e.value.service["config"] == dict() assert e.value.service["tags"]["pytest"] == "example" assert e.value.service["maintenance_mode"] == False - assert e.value.service["role_config_groups"] == list() + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases assert e.value.service["roles"] == list() # Idempotency @@ -394,19 +444,18 @@ def test_service_provision_tags(self, conn, module_args, base_cluster, request): assert e.value.service["config"] == dict() assert e.value.service["tags"]["pytest"] == "example" assert e.value.service["maintenance_mode"] == False - assert e.value.service["role_config_groups"] == list() + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases assert e.value.service["roles"] == list() class TestServiceModification: - @pytest.fixture(autouse=True) - def maintenance_enabled_zookeeper( - self, cm_api_client, zookeeper - ) -> Generator[ApiService]: + @pytest.fixture() + def maintenance_enabled_zookeeper(self, cm_api_client, zookeeper) -> ApiService: ServicesResourceApi(cm_api_client).enter_maintenance_mode( cluster_name=zookeeper.cluster_ref.cluster_name, service_name=zookeeper.name, ) + return zookeeper def test_service_existing_type(self, conn, module_args, zookeeper): module_args( @@ -443,8 +492,8 @@ def test_service_existing_display_name(self, conn, module_args, zookeeper): assert e.value.service["config"] == dict() assert e.value.service["tags"] == dict() assert e.value.service["maintenance_mode"] == False - assert len(e.value.service["role_config_groups"]) == 2 - assert e.value.service["roles"] == list() + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER # Idempotency with pytest.raises(AnsibleExitJson) as e: @@ -457,121 +506,306 @@ def test_service_existing_display_name(self, conn, module_args, zookeeper): assert e.value.service["config"] == dict() assert e.value.service["tags"] == dict() assert e.value.service["maintenance_mode"] == False - assert len(e.value.service["role_config_groups"]) == 2 - assert e.value.service["roles"] == list() + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + def test_service_existing_maintenance_enabled(self, conn, module_args, zookeeper): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "maintenance": True, + "state": "present", + } + ) -def test_service_existing_maintenance_enabled(self, conn, module_args, zookeeper): - module_args( - { - **conn, - "cluster": zookeeper.cluster_ref.cluster_name, - "name": zookeeper.name, - "maintenance": True, - "state": "present", - } - ) + with pytest.raises(AnsibleExitJson) as e: + service.main() - with pytest.raises(AnsibleExitJson) as e: - service.main() + assert e.value.changed == True + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == True + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER - assert e.value.changed == True - assert e.value.service["name"] == zookeeper.name - assert e.value.service["type"] == zookeeper.type - assert e.value.service["config"] == dict() - assert e.value.service["tags"] == dict() - assert e.value.service["maintenance_mode"] == True - assert len(e.value.service["role_config_groups"]) == 2 - assert e.value.service["roles"] == list() + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() - # Idempotency - with pytest.raises(AnsibleExitJson) as e: - service.main() + assert e.value.changed == False + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == True + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER - assert e.value.changed == False - assert e.value.service["name"] == zookeeper.name - assert e.value.service["type"] == zookeeper.type - assert e.value.service["config"] == dict() - assert e.value.service["tags"] == dict() - assert e.value.service["maintenance_mode"] == True - assert len(e.value.service["role_config_groups"]) == 2 - assert e.value.service["roles"] == list() - - -def test_service_existing_maintenance_enabled(self, conn, module_args, zookeeper): - module_args( - { - **conn, - "cluster": zookeeper.cluster_ref.cluster_name, - "name": zookeeper.name, - "maintenance": True, - "state": "present", - } - ) + def test_service_existing_maintenance_disabled( + self, conn, module_args, cm_api_client, zookeeper + ): + ServicesResourceApi(cm_api_client).enter_maintenance_mode( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) - with pytest.raises(AnsibleExitJson) as e: - service.main() + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "maintenance": False, + "state": "present", + } + ) - assert e.value.changed == True - assert e.value.service["name"] == zookeeper.name - assert e.value.service["type"] == zookeeper.type - assert e.value.service["config"] == dict() - assert e.value.service["tags"] == dict() - assert e.value.service["maintenance_mode"] == True - assert len(e.value.service["role_config_groups"]) == 2 - assert e.value.service["roles"] == list() + with pytest.raises(AnsibleExitJson) as e: + service.main() - # Idempotency - with pytest.raises(AnsibleExitJson) as e: - service.main() + assert e.value.changed == True + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER - assert e.value.changed == False - assert e.value.service["name"] == zookeeper.name - assert e.value.service["type"] == zookeeper.type - assert e.value.service["config"] == dict() - assert e.value.service["tags"] == dict() - assert e.value.service["maintenance_mode"] == True - assert len(e.value.service["role_config_groups"]) == 2 - assert e.value.service["roles"] == list() - - -def test_service_existing_maintenance_disabled( - self, conn, module_args, maintenance_enabled_zookeeper -): - module_args( - { - **conn, - "cluster": maintenance_enabled_zookeeper.cluster_ref.cluster_name, - "name": maintenance_enabled_zookeeper.name, - "maintenance": False, - "state": "present", - } - ) + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() - with pytest.raises(AnsibleExitJson) as e: - service.main() + assert e.value.changed == False + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER - assert e.value.changed == True - assert e.value.service["name"] == maintenance_enabled_zookeeper.name - assert e.value.service["type"] == maintenance_enabled_zookeeper.type - assert e.value.service["config"] == dict() - assert e.value.service["tags"] == dict() - assert e.value.service["maintenance_mode"] == False - assert len(e.value.service["role_config_groups"]) == 2 - assert e.value.service["roles"] == list() + def test_service_existing_config( + self, conn, module_args, cm_api_client, zookeeper, request + ): + ServicesResourceApi(cm_api_client).update_service_config( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + message=f"{request.node.name}::set", + body=ApiServiceConfig( + items=[ + ApiConfig(name="tickTime", value="3001"), + ApiConfig(name="autopurgeSnapRetainCount", value="9"), + ] + ), + ) - # Idempotency - with pytest.raises(AnsibleExitJson) as e: - service.main() + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "config": { + "tickTime": 2001, + "leaderServes": "no", + }, + "message": f"{request.node.name}::test", + "state": "present", + } + ) - assert e.value.changed == False - assert e.value.service["name"] == maintenance_enabled_zookeeper.name - assert e.value.service["type"] == maintenance_enabled_zookeeper.type - assert e.value.service["config"] == dict() - assert e.value.service["tags"] == dict() - assert e.value.service["maintenance_mode"] == False - assert len(e.value.service["role_config_groups"]) == 2 - assert e.value.service["roles"] == list() + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict( + tickTime="2001", leaderServes="no", autopurgeSnapRetainCount="9" + ) + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict( + tickTime="2001", leaderServes="no", autopurgeSnapRetainCount="9" + ) + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + def test_service_existing_config_purge( + self, conn, module_args, cm_api_client, zookeeper, request + ): + ServicesResourceApi(cm_api_client).update_service_config( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + message=f"{request.node.name}::set", + body=ApiServiceConfig( + items=[ + ApiConfig(name="tickTime", value="3001"), + ApiConfig(name="autopurgeSnapRetainCount", value="9"), + ] + ), + ) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "config": { + "tickTime": 2001, + "leaderServes": "no", + }, + "message": f"{request.node.name}::test", + "purge": True, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict( + tickTime="2001", + leaderServes="no", + ) + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict(tickTime="2001", leaderServes="no") + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + def test_service_existing_tags(self, conn, module_args, cm_api_client, zookeeper): + ServicesResourceApi(cm_api_client).add_tags( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + body=[ + ApiEntityTag(name="tag_one", value="Existing"), + ApiEntityTag(name="tag_two", value="Existing"), + ], + ) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "tags": { + "tag_one": "Updated", + "tag_three": "Added", + }, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict( + tag_one="Updated", tag_two="Existing", tag_three="Added" + ) + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict( + tag_one="Updated", tag_two="Existing", tag_three="Added" + ) + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + def test_service_existing_tags_purge( + self, conn, module_args, cm_api_client, zookeeper + ): + ServicesResourceApi(cm_api_client).add_tags( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + body=[ + ApiEntityTag(name="tag_one", value="Existing"), + ApiEntityTag(name="tag_two", value="Existing"), + ], + ) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "tags": { + "tag_one": "Updated", + "tag_three": "Added", + }, + "purge": True, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict(tag_one="Updated", tag_three="Added") + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict(tag_one="Updated", tag_three="Added") + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER class TestServiceStates: @@ -602,6 +836,7 @@ def test_service_existing_state_absent(self, conn, module_args, zookeeper): ## PREVIOUS TESTS +@pytest.mark.skip(reason="legacy") def test_present_create_service(conn, module_args): conn.update( cluster=os.getenv("CM_CLUSTER"), @@ -622,6 +857,7 @@ def test_present_create_service(conn, module_args): assert e.value.changed == False +@pytest.mark.skip(reason="legacy") def test_present_update_service(conn, module_args): conn.update( cluster=os.getenv("CM_CLUSTER"), @@ -641,6 +877,7 @@ def test_present_update_service(conn, module_args): assert e.value.changed == False +@pytest.mark.skip(reason="legacy") def test_present_maintenance_mode(conn, module_args): conn.update( cluster=os.getenv("CM_CLUSTER"), @@ -677,6 +914,7 @@ def test_present_maintenance_mode(conn, module_args): assert e.value.changed == False +@pytest.mark.skip(reason="legacy") def test_present_set_tags(conn, module_args): conn.update( cluster=os.getenv("CM_CLUSTER"), @@ -705,6 +943,7 @@ def test_present_set_tags(conn, module_args): assert e.value.changed == False +@pytest.mark.skip(reason="legacy") def test_present_append_tags(conn, module_args): conn.update( cluster=os.getenv("CM_CLUSTER"), @@ -760,6 +999,7 @@ def test_update_tags_check_mode(conn, module_args): assert e.value.diff["after"]["tags"] == dict(test="Ansible") +@pytest.mark.skip(reason="legacy") def test_present_purge_tags(conn, module_args): conn.update( cluster=os.getenv("CM_CLUSTER"), @@ -782,6 +1022,7 @@ def test_present_purge_tags(conn, module_args): assert e.value.changed == False +@pytest.mark.skip(reason="legacy") def test_started(conn, module_args): conn.update( cluster=os.getenv("CM_CLUSTER"), @@ -800,6 +1041,7 @@ def test_started(conn, module_args): assert e.value.changed == False +@pytest.mark.skip(reason="legacy") def test_stopped(conn, module_args): conn.update( cluster=os.getenv("CM_CLUSTER"), @@ -817,6 +1059,7 @@ def test_stopped(conn, module_args): assert e.value.changed == False +@pytest.mark.skip(reason="legacy") def test_absent(conn, module_args): conn.update( cluster=os.getenv("CM_CLUSTER"), diff --git a/tests/unit/plugins/modules/service/test_service_rcgs.py b/tests/unit/plugins/modules/service/test_service_rcgs.py index 2a2e9a4a..918e5802 100644 --- a/tests/unit/plugins/modules/service/test_service_rcgs.py +++ b/tests/unit/plugins/modules/service/test_service_rcgs.py @@ -26,33 +26,21 @@ from pathlib import Path from cm_client import ( - ApiClient, ApiConfig, ApiConfigList, - ApiEntityTag, ApiHostRef, ApiRole, ApiRoleConfigGroup, ApiRoleNameList, - ApiRoleState, ApiService, - ClustersResourceApi, RoleConfigGroupsResourceApi, - RolesResourceApi, - RoleCommandsResourceApi, ServicesResourceApi, ) from ansible_collections.cloudera.cluster.plugins.modules import service -from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - wait_bulk_commands, -) from ansible_collections.cloudera.cluster.plugins.module_utils.cluster_utils import ( get_cluster_hosts, ) -from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( - get_service_hosts, -) from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( get_base_role_config_group, ) @@ -145,35 +133,6 @@ def server_role(cm_api_client, base_cluster, zookeeper): class TestServiceProvisionRoleConfigGroups: - @pytest.fixture(autouse=True) - def zookeeper_reset(self, cm_api_client, base_cluster): - # Keep track of the existing ZOOKEEPER services - initial_services = set( - [ - s.name - for s in ServicesResourceApi(cm_api_client) - .read_services( - cluster_name=base_cluster.name, - ) - .items - ] - ) - - # Yield to the test - yield - - # Remove any added services - services_to_remove = [ - s - for s in ServicesResourceApi(cm_api_client) - .read_services( - cluster_name=base_cluster.name, - ) - .items - if s.name not in initial_services - ] - deregister_service(cm_api_client, services_to_remove) - def test_service_provision_custom_rcg( self, conn, module_args, base_cluster, request ): From 1d178d3c8723790d65721264f04ea7cf026a6230 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 9 Apr 2025 23:09:19 -0400 Subject: [PATCH 13/27] Fix errors with tag and role config group reconciliation and role creation Signed-off-by: Webster Mudge --- plugins/module_utils/service_utils.py | 117 ++++++++++---------------- 1 file changed, 43 insertions(+), 74 deletions(-) diff --git a/plugins/module_utils/service_utils.py b/plugins/module_utils/service_utils.py index ab1b3e78..95d12d0b 100644 --- a/plugins/module_utils/service_utils.py +++ b/plugins/module_utils/service_utils.py @@ -627,9 +627,9 @@ def reconcile_service_roles( ) # before is already an empty dict - instance_role_after = create_role.dict() + instance_role_after = created_role.to_dict() - addition_list(created_role) + addition_list.append(created_role) # Update existing role - config, tags, role config group else: @@ -656,7 +656,7 @@ def reconcile_service_roles( cluster_name=service.cluster_ref.cluster_name, service_name=service.name, role_name=current_role.name, - body=current_role, + body=current_role.config, ) # Reconcile role tags @@ -673,91 +673,60 @@ def reconcile_service_roles( instance_role_before.update(tags=tag_updates.deletions) instance_role_after.update(tags=tag_updates.additions) - if tag_updates.additions: + if tag_updates.deletions: if not check_mode: - role_api.add_tags( + role_api.delete_tags( cluster_name=service.cluster_ref.cluster_name, service_name=service.name, role_name=current_role.name, - body=tag_updates.additions, + body=tag_updates.deletions, ) - if tag_updates.deletions: + + if tag_updates.additions: if not check_mode: - role_api.delete_tags( + role_api.add_tags( cluster_name=service.cluster_ref.cluster_name, service_name=service.name, role_name=current_role.name, - body=tag_updates.deletions, + body=tag_updates.additions, ) # Handle role config group associations - if incoming_rcg or purge: - # If role config group is not present and the existing reference is not the base, reset to base - if ( - incoming_rcg is None - and current_role.role_config_group_ref.role_config_group_name - != base_rcg.name - ): - instance_role_before.update( - role_config_group=current_role.role_config_group_ref.role_config_group_name - ) - instance_role_after.update(role_config_group=base_rcg.name) - - if not check_mode: - rcg_api.move_roles_to_base_group( - cluster_name=service.cluster_ref.cluster_name, - service_name=service.name, - body=ApiRoleNameList(items=[current_role.name]), - ) - - # Else if the role config group does not match the declared - elif ( - incoming_rcg.name - != current_role.role_config_group_ref.role_config_group_name - ): - instance_role_before.update( - role_config_group=current_role.role_config_group_ref.role_config_group_name - ) - instance_role_after.update( - role_config_group=incoming_rcg.name + # If role config group is not present and the existing reference is not the base, reset to base + if ( + incoming_rcg is None + and current_role.role_config_group_ref.role_config_group_name + != base_rcg.name + ): + instance_role_before.update( + role_config_group=current_role.role_config_group_ref.role_config_group_name + ) + instance_role_after.update(role_config_group=base_rcg.name) + + if not check_mode: + rcg_api.move_roles_to_base_group( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + body=ApiRoleNameList(items=[current_role.name]), ) + # Else if the role config group does not match the declared + elif ( + incoming_rcg is not None + and incoming_rcg.name + != current_role.role_config_group_ref.role_config_group_name + ): + instance_role_before.update( + role_config_group=current_role.role_config_group_ref.role_config_group_name + ) + instance_role_after.update(role_config_group=incoming_rcg.name) - if not check_mode: - rcg_api.move_roles( - cluster_name=service.cluster_ref.cluster_name, - service_name=service.name, - role_config_group_name=incoming_rcg.name, - body=ApiRoleNameList(items=[current_role.name]), - ) - - # # Handle maintenance - # if not maintenance: - # incoming_maintenance = incoming_role.get("maintenance", None) - - # maintenance_changed = toggle_role_maintenance( - # api_client=api_client, - # role=current_role, - # maintenance=incoming_maintenance, - # check_mode=check_mode, - # ) - - # if maintenance_changed: - # instance_role_before.update(maintenance_mode=current_role.maintenance_mode) - # instance_role_after.update(maintenance_mode=incoming_maintenance) - - # # Handle state; if the service state is not stopped, then allow the role to maintain its state - # incoming_state = incoming_role.get("state", None) - # if incoming_state is not None and state != incoming_state and state in ["present", "started", "restarted"]: - # state_changed = toggle_role_state( - # api_client=api_client, - # role=current_role, - # state=incoming_state, - # check_mode=check_mode, - # ) - - # if state_changed is not None: - # instance_role_before.update(state=current_role.role_state) - # instance_role_after.update(state=incoming_state) + if not check_mode: + rcg_api.move_roles( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=incoming_rcg.name, + body=ApiRoleNameList(items=[current_role.name]), + ) # Record any deltas for the role entry if instance_role_before or instance_role_after: From 329b6507b24f18913d6adb2403091dffde4c99a1 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 9 Apr 2025 23:09:56 -0400 Subject: [PATCH 14/27] Add create roles Signed-off-by: Webster Mudge --- plugins/modules/service.py | 65 +++++++++++++++++++++++++++++++++----- 1 file changed, 57 insertions(+), 8 deletions(-) diff --git a/plugins/modules/service.py b/plugins/modules/service.py index 48574324..2ba957eb 100644 --- a/plugins/modules/service.py +++ b/plugins/modules/service.py @@ -325,6 +325,7 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( create_role, provision_service_role, + RoleException, ) from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( create_service, @@ -444,7 +445,7 @@ def process(self): base_rcg = None if self.module._diff: - before_list, after_list = list(), list() + before_rcg, after_rcg = list(), list() for requested_rcg in self.role_config_groups: # Create any custom role config groups @@ -462,8 +463,8 @@ def process(self): rcg_list.append(custom_rcg) if self.module._diff: - before_list.append(dict()) - after_list.append(custom_rcg.to_dict()) + before_rcg.append(dict()) + after_rcg.append(custom_rcg.to_dict()) # Else record the base role config group for modification else: @@ -482,12 +483,12 @@ def process(self): ) if self.module._diff: - before_list.append(before) - after_list.append(after) + before_rcg.append(before) + after_rcg.append(after) if self.module._diff: - self.diff["before"]["role_config_groups"] = before_list - self.diff["after"]["role_config_groups"] = after_list + self.diff["before"]["role_config_groups"] = before_rcg + self.diff["after"]["role_config_groups"] = after_rcg if not self.module.check_mode: provision_role_config_groups( @@ -508,8 +509,55 @@ def process(self): body=base_rcg, ) - # TODO Create and provision roles + # Create and provision roles + if self.roles: + if self.module._diff: + role_entries_before, role_entries_after = list(), list() + + for requested_role in self.roles: + if self.module._diff: + role_instances_before, role_instances_after = list(), list() + + for role_host in requested_role["hostnames"]: + try: + created_role = create_role( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=current.name, + role_type=requested_role["type"], + hostname=role_host, + config=requested_role.get("config", None), + role_config_group=requested_role.get( + "role_config_group", None + ), + tags=requested_role.get("tags", None), + ) + except RoleException as ex: + self.module.fail_json(msg=to_native(ex)) + + if self.module._diff: + role_instances_before.append(dict()) + role_instances_after.append(created_role.to_dict()) + + if not self.module.check_mode: + provisioned_role = provision_service_role( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=current.name, + role=created_role, + ) + + if not provisioned_role: + self.module.fail_json( + msg=f"Unable to create new role in service '{current.name}'", + role=to_native(provisioned_role.to_dict()), + ) + + if self.module._diff: + role_entries_before.append(role_instances_before) + role_entries_after.append(role_instances_after) + # Set the maintenance self.handle_maintenance(current) # Else the service exists, so address any changes @@ -519,6 +567,7 @@ def process(self): msg="Service name already in use for type: " + current.type ) + # Set the maintenance self.handle_maintenance(current) # Handle service-wide configurations From db3a94f0634d8c473d9b44547abec1e4853b8604 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 9 Apr 2025 23:10:43 -0400 Subject: [PATCH 15/27] Check for running commands on and stop running services before deletion Signed-off-by: Webster Mudge --- tests/unit/__init__.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 8d50d98c..de4b7a6c 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -46,6 +46,8 @@ from cm_client.rest import ApiException from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + wait_command, + wait_commands, resolve_parameter_updates, ) from ansible_collections.cloudera.cluster.plugins.module_utils.host_utils import ( @@ -218,6 +220,35 @@ def deregister_service(api_client: ApiClient, registry: list[ApiService]) -> Non # Delete the services for s in registry: try: + # Check for running commands and wait for them to finish + active_cmds = service_api.list_active_commands( + cluster_name=s.cluster_ref.cluster_name, + service_name=s.name, + ) + + wait_commands( + api_client=api_client, + commands=active_cmds, + ) + + # If the service is running, stop it + current = service_api.read_service( + cluster_name=s.cluster_ref.cluster_name, + service_name=s.name, + ) + + if current.service_state == ApiServiceState.STARTED: + stop_cmd = service_api.stop_command( + cluster_name=s.cluster_ref.cluster_name, + service_name=s.name, + ) + + wait_command( + api_client=api_client, + command=stop_cmd, + ) + + # Delete the service service_api.delete_service( cluster_name=s.cluster_ref.cluster_name, service_name=s.name, From 4e0622e9946e8fc9c95bf25090978eb050109543 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 9 Apr 2025 23:11:10 -0400 Subject: [PATCH 16/27] Add resettable_cluster fixture Signed-off-by: Webster Mudge --- .../modules/service/test_service_rcgs.py | 30 ++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/tests/unit/plugins/modules/service/test_service_rcgs.py b/tests/unit/plugins/modules/service/test_service_rcgs.py index 918e5802..4421c5e8 100644 --- a/tests/unit/plugins/modules/service/test_service_rcgs.py +++ b/tests/unit/plugins/modules/service/test_service_rcgs.py @@ -51,7 +51,6 @@ ) from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, - AnsibleFailJson, deregister_service, register_service, deregister_role, @@ -133,6 +132,35 @@ def server_role(cm_api_client, base_cluster, zookeeper): class TestServiceProvisionRoleConfigGroups: + @pytest.fixture(autouse=True) + def resettable_cluster(self, cm_api_client, base_cluster): + # Keep track of the existing ZOOKEEPER services + initial_services = set( + [ + s.name + for s in ServicesResourceApi(cm_api_client) + .read_services( + cluster_name=base_cluster.name, + ) + .items + ] + ) + + # Yield to the test + yield + + # Remove any added services + services_to_remove = [ + s + for s in ServicesResourceApi(cm_api_client) + .read_services( + cluster_name=base_cluster.name, + ) + .items + if s.name not in initial_services + ] + deregister_service(cm_api_client, services_to_remove) + def test_service_provision_custom_rcg( self, conn, module_args, base_cluster, request ): From 47e3e00c0a594f18e9397af5f84cafcb523d5e2b Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 9 Apr 2025 23:12:38 -0400 Subject: [PATCH 17/27] Remove legacy tests Signed-off-by: Webster Mudge --- plugins/module_utils/cluster_utils.py | 1 + .../plugins/modules/service/test_service.py | 246 +---- .../modules/service/test_service_roles.py | 916 ++++++++++++++++++ 3 files changed, 919 insertions(+), 244 deletions(-) create mode 100644 tests/unit/plugins/modules/service/test_service_roles.py diff --git a/plugins/module_utils/cluster_utils.py b/plugins/module_utils/cluster_utils.py index e11512c2..f3f90caa 100644 --- a/plugins/module_utils/cluster_utils.py +++ b/plugins/module_utils/cluster_utils.py @@ -51,6 +51,7 @@ def parse_cluster_result(cluster: ApiCluster) -> dict: return output +# TODO Convert to use cluster_name vs the ApiCluster object for broader usage in pytest fixtures def get_cluster_hosts(api_client: ApiClient, cluster: ApiCluster) -> list[ApiHost]: return ( ClustersResourceApi(api_client) diff --git a/tests/unit/plugins/modules/service/test_service.py b/tests/unit/plugins/modules/service/test_service.py index d0c27380..7983c1b3 100644 --- a/tests/unit/plugins/modules/service/test_service.py +++ b/tests/unit/plugins/modules/service/test_service.py @@ -809,6 +809,8 @@ def test_service_existing_tags_purge( class TestServiceStates: + # TODO Finish states + def test_service_existing_state_absent(self, conn, module_args, zookeeper): module_args( { @@ -831,247 +833,3 @@ def test_service_existing_state_absent(self, conn, module_args, zookeeper): assert e.value.changed == False assert not e.value.service - - -## PREVIOUS TESTS - - -@pytest.mark.skip(reason="legacy") -def test_present_create_service(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - display_name="Example Service", - type="ZOOKEEPER", - ) - module_args(conn) - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert e.value.changed == True - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert e.value.changed == False - - -@pytest.mark.skip(reason="legacy") -def test_present_update_service(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - display_name="Example Service by Ansible", - ) - module_args(conn) - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert e.value.changed == True - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert e.value.changed == False - - -@pytest.mark.skip(reason="legacy") -def test_present_maintenance_mode(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - maintenance="yes", - ) - module_args(conn) - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert e.value.service["maintenance_mode"] == True - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert e.value.service["maintenance_mode"] == True - assert e.value.changed == False - - conn.update( - maintenance="no", - ) - module_args(conn) - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert e.value.service["maintenance_mode"] == False - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert e.value.service["maintenance_mode"] == False - assert e.value.changed == False - - -@pytest.mark.skip(reason="legacy") -def test_present_set_tags(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - tags=dict( - test="Ansible", key="Value", empty_string="", blank_string=" ", none=None - ), - ) - module_args(conn) - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert ( - recursive_diff(e.value.service["tags"], dict(test="Ansible", key="Value")) - is None - ) - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert ( - recursive_diff(e.value.service["tags"], dict(test="Ansible", key="Value")) - is None - ) - assert e.value.changed == False - - -@pytest.mark.skip(reason="legacy") -def test_present_append_tags(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - tags=dict(more="Tags", key="Value"), - ) - module_args(conn) - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert ( - recursive_diff( - e.value.service["tags"], dict(test="Ansible", key="Value", more="Tags") - ) - is None - ) - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert ( - recursive_diff( - e.value.service["tags"], dict(test="Ansible", key="Value", more="Tags") - ) - is None - ) - assert e.value.changed == False - - -@pytest.mark.skip("Move to separate DIFF test suite.") -def test_update_tags_check_mode(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - type="ZOOKEEPER", - tags=dict( - test="Ansible", - empty_string="", - none=None, - long_empty_string=" ", - ), - _ansible_check_mode=True, - _ansible_diff=True, - ) - module_args(conn) - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert e.value.changed == True - assert e.value.diff["before"]["tags"] == dict() - assert e.value.diff["after"]["tags"] == dict(test="Ansible") - - -@pytest.mark.skip(reason="legacy") -def test_present_purge_tags(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - tags=dict(purge="Ansible"), - purge=True, - ) - module_args(conn) - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert recursive_diff(e.value.service["tags"], dict(purge="Ansible")) is None - assert e.value.changed == True - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert recursive_diff(e.value.service["tags"], dict(purge="Ansible")) is None - assert e.value.changed == False - - -@pytest.mark.skip(reason="legacy") -def test_started(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - state="started", - _ansible_verbosity=3, - ) - module_args(conn) - - with pytest.raises(AnsibleExitJson): - service.main() - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert e.value.changed == False - - -@pytest.mark.skip(reason="legacy") -def test_stopped(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - state="stopped", - ) - module_args(conn) - - with pytest.raises(AnsibleExitJson): - service.main() - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert e.value.changed == False - - -@pytest.mark.skip(reason="legacy") -def test_absent(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - state="absent", - ) - module_args(conn) - - with pytest.raises(AnsibleExitJson): - service.main() - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert e.value.changed == False diff --git a/tests/unit/plugins/modules/service/test_service_roles.py b/tests/unit/plugins/modules/service/test_service_roles.py new file mode 100644 index 00000000..4a150286 --- /dev/null +++ b/tests/unit/plugins/modules/service/test_service_roles.py @@ -0,0 +1,916 @@ +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from collections.abc import Generator + +from pathlib import Path + +from cm_client import ( + ApiConfig, + ApiConfigList, + ApiCluster, + ApiEntityTag, + ApiHost, + ApiHostRef, + ApiRole, + ApiRoleConfigGroup, + ApiRoleNameList, + ApiService, + RoleConfigGroupsResourceApi, + RolesResourceApi, + ServicesResourceApi, +) + +from ansible_collections.cloudera.cluster.plugins.modules import service +from ansible_collections.cloudera.cluster.plugins.module_utils.cluster_utils import ( + get_cluster_hosts, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + create_role_config_group, + get_base_role_config_group, + provision_role_config_groups, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, + provision_service_role, + read_roles, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + get_service_hosts, +) +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + deregister_service, + register_service, +) + +LOG = logging.getLogger(__name__) + + +@pytest.fixture() +def cluster_hosts(cm_api_client, base_cluster) -> list[ApiHost]: + return get_cluster_hosts(cm_api_client, base_cluster) + + +class TestServiceProvisionRoles: + @pytest.fixture(autouse=True) + def resettable_cluster(self, cm_api_client, base_cluster) -> Generator[ApiCluster]: + # Keep track of the existing ZOOKEEPER services + initial_services = set( + [ + s.name + for s in ServicesResourceApi(cm_api_client) + .read_services( + cluster_name=base_cluster.name, + ) + .items + ] + ) + + # Yield to the test + yield base_cluster + + # Remove any added services + services_to_remove = [ + s + for s in ServicesResourceApi(cm_api_client) + .read_services( + cluster_name=base_cluster.name, + ) + .items + if s.name not in initial_services + ] + deregister_service(cm_api_client, services_to_remove) + + def test_service_provision_roles( + self, conn, module_args, cm_api_client, resettable_cluster, request + ): + service_name = f"pytest-{Path(request.node.name)}" + + available_hosts = get_cluster_hosts( + api_client=cm_api_client, cluster=resettable_cluster + ) + + module_args( + { + **conn, + "cluster": resettable_cluster.name, + "name": service_name, + "type": "ZOOKEEPER", + "roles": [ + { + "type": "SERVER", + "hostnames": [h.hostname for h in available_hosts], + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == service_name + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == service_name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["roles"]) == len(available_hosts) + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == service_name + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == service_name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["roles"]) == len(available_hosts) + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + def test_service_provision_roles_custom_rcg( + self, conn, module_args, cm_api_client, resettable_cluster, request + ): + service_name = f"pytest-{Path(request.node.name)}" + + available_hosts = get_cluster_hosts( + api_client=cm_api_client, cluster=resettable_cluster + ) + + module_args( + { + **conn, + "cluster": resettable_cluster.name, + "name": service_name, + "type": "ZOOKEEPER", + "roles": [ + { + "type": "SERVER", + "hostnames": [h.hostname for h in available_hosts], + "role_config_group": "PYTEST_SERVER", + }, + ], + "role_config_groups": [ + { + "name": "PYTEST_SERVER", + "role_type": "SERVER", + }, + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == service_name + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == service_name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["roles"]) == len(available_hosts) + assert ( + len(e.value.service["role_config_groups"]) == 3 + ) # SERVER, GATEWAY bases + PYTEST_SERVER + + assert e.value.service["roles"][0]["role_config_group_name"] == "PYTEST_SERVER" + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == service_name + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == service_name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["roles"]) == len(available_hosts) + assert ( + len(e.value.service["role_config_groups"]) == 3 + ) # SERVER, GATEWAY bases + PYTEST_SERVER + + assert e.value.service["roles"][0]["role_config_group_name"] == "PYTEST_SERVER" + + def test_service_provision_roles_config( + self, conn, module_args, cm_api_client, resettable_cluster, request + ): + service_name = f"pytest-{Path(request.node.name)}" + + available_hosts = get_cluster_hosts( + api_client=cm_api_client, cluster=resettable_cluster + ) + + module_args( + { + **conn, + "cluster": resettable_cluster.name, + "name": service_name, + "type": "ZOOKEEPER", + "roles": [ + { + "type": "SERVER", + "hostnames": [h.hostname for h in available_hosts], + "config": { + "minSessionTimeout": 4801, + }, + }, + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == service_name + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == service_name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["roles"]) == len(available_hosts) + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + assert e.value.service["roles"][0]["config"]["minSessionTimeout"] == "4801" + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == service_name + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == service_name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["roles"]) == len(available_hosts) + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + assert e.value.service["roles"][0]["config"]["minSessionTimeout"] == "4801" + + def test_service_provision_roles_tags( + self, conn, module_args, cm_api_client, resettable_cluster, request + ): + service_name = f"pytest-{Path(request.node.name)}" + + available_hosts = get_cluster_hosts( + api_client=cm_api_client, cluster=resettable_cluster + ) + + module_args( + { + **conn, + "cluster": resettable_cluster.name, + "name": service_name, + "type": "ZOOKEEPER", + "roles": [ + { + "type": "SERVER", + "hostnames": [h.hostname for h in available_hosts], + "tags": { + "pytest": "example", + }, + }, + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == service_name + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == service_name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["roles"]) == len(available_hosts) + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + assert e.value.service["roles"][0]["tags"]["pytest"] == "example" + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == service_name + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == service_name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["roles"]) == len(available_hosts) + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + assert e.value.service["roles"][0]["tags"]["pytest"] == "example" + + +class TestServiceModificationRoles: + @pytest.fixture() + def zookeeper(self, cm_api_client, base_cluster, request) -> Generator[ApiService]: + # Keep track of the provisioned service(s) + service_registry = list[ApiService]() + + # Get the current cluster hosts + hosts = get_cluster_hosts(cm_api_client, base_cluster) + + id = Path(request.node.name).stem + + zk_service = ApiService( + name=f"test-zk-{id}", + type="ZOOKEEPER", + display_name=f"ZooKeeper ({id})", + # Add a SERVER role (so we can start the service -- a ZK requirement!) + roles=[ApiRole(type="SERVER", host_ref=ApiHostRef(hosts[0].host_id))], + ) + + # Provision and yield the created service + yield register_service( + api_client=cm_api_client, + registry=service_registry, + cluster=base_cluster, + service=zk_service, + ) + + # Remove the created service + deregister_service(api_client=cm_api_client, registry=service_registry) + + @pytest.fixture() + def available_hosts(self, cm_api_client, cluster_hosts, zookeeper) -> list[ApiHost]: + service_host_ids = [ + h.host_id + for h in get_service_hosts( + api_client=cm_api_client, + service=zookeeper, + ) + ] + + return [h for h in cluster_hosts if h.host_id not in service_host_ids] + + @pytest.fixture() + def server_role(self, cm_api_client, base_cluster, zookeeper) -> ApiRole: + existing_role_instances = [ + r.host_ref.hostname + for r in read_roles( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + type="SERVER", + ).items + ] + + hosts = [ + h + for h in get_cluster_hosts(cm_api_client, base_cluster) + if h.hostname not in existing_role_instances + ] + + created_role = create_role( + api_client=cm_api_client, + role_type="SERVER", + hostname=hosts[0].hostname, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) + + provisioned_role = provision_service_role( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role=created_role, + ) + + return provisioned_role + + @pytest.fixture() + def server_rcg(self, cm_api_client, zookeeper, request) -> ApiRoleConfigGroup: + custom_rcg = create_role_config_group( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + name=f"pytest-{Path(request.node.name).stem}", + role_type="SERVER", + config=dict(minSessionTimeout=6601), + ) + + provisioned_rcgs = provision_role_config_groups( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_config_groups=[custom_rcg], + ) + + return provisioned_rcgs.items[0] + + @pytest.fixture() + def server_rcg_role(self, cm_api_client, server_role, server_rcg) -> ApiRole: + moved_roles = RoleConfigGroupsResourceApi(cm_api_client).move_roles( + cluster_name=server_role.service_ref.cluster_name, + service_name=server_role.service_ref.service_name, + role_config_group_name=server_rcg.name, + body=ApiRoleNameList(items=[server_role.name]), + ) + + return moved_roles.items[0] + + def test_service_existing_role_rcg( + self, conn, module_args, cm_api_client, zookeeper, server_rcg + ): + existing_hosts = get_service_hosts( + api_client=cm_api_client, + service=zookeeper, + ) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "roles": [ + { + "type": "SERVER", + "hostnames": [h.hostname for h in existing_hosts], + "role_config_group": server_rcg.name, + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == len(existing_hosts) + assert ( + len(e.value.service["role_config_groups"]) == 3 + ) # SERVER, GATEWAY bases + server_rcg + + assert e.value.service["roles"][0]["role_config_group_name"] == server_rcg.name + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == len(existing_hosts) + assert ( + len(e.value.service["role_config_groups"]) == 3 + ) # SERVER, GATEWAY bases + server_rcg + + assert e.value.service["roles"][0]["role_config_group_name"] == server_rcg.name + + def test_service_existing_role_rcg_base( + self, conn, module_args, cm_api_client, zookeeper, server_rcg_role + ): + base_rcg = get_base_role_config_group( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_type=server_rcg_role.type, + ) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "roles": [ + { + "type": server_rcg_role.type, + "hostnames": [server_rcg_role.host_ref.hostname], + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 2 # SERVER + service_rcg_role + assert ( + len(e.value.service["role_config_groups"]) == 3 + ) # SERVER, GATEWAY bases + server_rcg + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_rcg_role.type + and r["hostname"] == server_rcg_role.host_ref.hostname + ][0] + assert result_role["role_config_group_name"] == base_rcg.name + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 2 + assert ( + len(e.value.service["role_config_groups"]) == 3 + ) # SERVER, GATEWAY bases + server_rcg + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_rcg_role.type + and r["hostname"] == server_rcg_role.host_ref.hostname + ][0] + assert result_role["role_config_group_name"] == base_rcg.name + + def test_service_existing_role_tags( + self, conn, module_args, cm_api_client, zookeeper, server_role + ): + RolesResourceApi(cm_api_client).add_tags( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_name=server_role.name, + body=[ + ApiEntityTag(name="tag_one", value="Existing"), + ApiEntityTag(name="tag_two", value="Existing"), + ], + ) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "roles": [ + { + "type": server_role.type, + "hostnames": [server_role.host_ref.hostname], + "tags": { + "tag_one": "Updated", + "tag_three": "Added", + }, + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 2 # SERVER + service_role + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_role.type + and r["hostname"] == server_role.host_ref.hostname + ][0] + assert result_role["tags"] == dict( + tag_one="Updated", tag_two="Existing", tag_three="Added" + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 2 # SERVER + service_role + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_role.type + and r["hostname"] == server_role.host_ref.hostname + ][0] + assert result_role["tags"] == dict( + tag_one="Updated", tag_two="Existing", tag_three="Added" + ) + + def test_service_existing_role_tags_purge( + self, conn, module_args, cm_api_client, zookeeper, server_role + ): + RolesResourceApi(cm_api_client).add_tags( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_name=server_role.name, + body=[ + ApiEntityTag(name="tag_one", value="Existing"), + ApiEntityTag(name="tag_two", value="Existing"), + ], + ) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "roles": [ + { + "type": server_role.type, + "hostnames": [server_role.host_ref.hostname], + "tags": { + "tag_one": "Updated", + "tag_three": "Added", + }, + } + ], + "purge": True, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 # service_role + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_role.type + and r["hostname"] == server_role.host_ref.hostname + ][0] + assert result_role["tags"] == dict(tag_one="Updated", tag_three="Added") + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 # service_role + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_role.type + and r["hostname"] == server_role.host_ref.hostname + ][0] + assert result_role["tags"] == dict(tag_one="Updated", tag_three="Added") + + def test_service_existing_role_config( + self, conn, module_args, cm_api_client, zookeeper, server_role + ): + RolesResourceApi(cm_api_client).update_role_config( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_name=server_role.name, + body=ApiConfigList( + items=[ + ApiConfig(name="minSessionTimeout", value="5501"), + ApiConfig(name="maxSessionTimeout", value="45001"), + ] + ), + ) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "roles": [ + { + "type": server_role.type, + "hostnames": [server_role.host_ref.hostname], + "config": { + "minSessionTimeout": 5601, + "maxClientCnxns": 56, + }, + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 2 # SERVER + service_role + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_role.type + and r["hostname"] == server_role.host_ref.hostname + ][0] + assert ( + result_role["config"].items() + >= dict( + minSessionTimeout="5601", maxSessionTimeout="45001", maxClientCnxns="56" + ).items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 2 # SERVER + service_role + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_role.type + and r["hostname"] == server_role.host_ref.hostname + ][0] + assert ( + result_role["config"].items() + >= dict( + minSessionTimeout="5601", maxSessionTimeout="45001", maxClientCnxns="56" + ).items() + ) + + def test_service_existing_role_config_purge( + self, conn, module_args, cm_api_client, zookeeper, server_role + ): + RolesResourceApi(cm_api_client).update_role_config( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_name=server_role.name, + body=ApiConfigList( + items=[ + ApiConfig(name="minSessionTimeout", value="5501"), + ApiConfig(name="maxSessionTimeout", value="45001"), + ] + ), + ) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "roles": [ + { + "type": server_role.type, + "hostnames": [server_role.host_ref.hostname], + "config": { + "minSessionTimeout": 5601, + "maxClientCnxns": 56, + }, + } + ], + "purge": True, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 # service_role + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_role.type + and r["hostname"] == server_role.host_ref.hostname + ][0] + assert ( + result_role["config"].items() + == dict(minSessionTimeout="5601", maxClientCnxns="56").items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 # service_role + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_role.type + and r["hostname"] == server_role.host_ref.hostname + ][0] + assert ( + result_role["config"].items() + == dict(minSessionTimeout="5601", maxClientCnxns="56").items() + ) + + def test_service_existing_role_add( + self, conn, module_args, zookeeper, available_hosts + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "roles": [ + { + "type": "SERVER", + "hostnames": [available_hosts[0].hostname], + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 2 # SERVER + new SERVER + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert available_hosts[0].hostname in [ + r["hostname"] for r in e.value.service["roles"] if r["type"] == "SERVER" + ] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 2 # SERVER + new SERVER + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert available_hosts[0].hostname in [ + r["hostname"] for r in e.value.service["roles"] if r["type"] == "SERVER" + ] + + def test_service_existing_role_purge( + self, conn, module_args, zookeeper, available_hosts + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "roles": [ + { + "type": "SERVER", + "hostnames": [available_hosts[0].hostname], + "config": { + "serverId": 9, + }, + } + ], + "purge": True, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 # new SERVER + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert available_hosts[0].hostname in [ + r["hostname"] for r in e.value.service["roles"] if r["type"] == "SERVER" + ] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 # new SERVER + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert available_hosts[0].hostname in [ + r["hostname"] for r in e.value.service["roles"] if r["type"] == "SERVER" + ] From 50a5a24da77255d4b2ea1b302bf0789e6bfb57e4 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 10 Apr 2025 08:53:48 -0400 Subject: [PATCH 18/27] Enable restarted state and test all states Signed-off-by: Webster Mudge --- plugins/modules/service.py | 3 +- .../plugins/modules/service/test_service.py | 149 +++++++++++++++--- 2 files changed, 125 insertions(+), 27 deletions(-) diff --git a/plugins/modules/service.py b/plugins/modules/service.py index 2ba957eb..b81f6b48 100644 --- a/plugins/modules/service.py +++ b/plugins/modules/service.py @@ -775,7 +775,8 @@ def main(): ], ), state=dict( - default="present", choices=["present", "absent", "started", "stopped"] + default="present", + choices=["present", "absent", "started", "stopped", "restarted"], ), ), supports_check_mode=True, diff --git a/tests/unit/plugins/modules/service/test_service.py b/tests/unit/plugins/modules/service/test_service.py index 7983c1b3..6ffe9ed1 100644 --- a/tests/unit/plugins/modules/service/test_service.py +++ b/tests/unit/plugins/modules/service/test_service.py @@ -19,50 +19,30 @@ __metaclass__ = type import logging -import os import pytest -from collections.abc import Generator - -from ansible.module_utils.common.dict_transformations import recursive_diff - from pathlib import Path from cm_client import ( - ApiClient, ApiConfig, - ApiConfigList, ApiEntityTag, ApiHostRef, ApiRole, - ApiRoleConfigGroup, - ApiRoleNameList, - ApiRoleState, ApiService, ApiServiceConfig, - ClustersResourceApi, - RoleConfigGroupsResourceApi, - RolesResourceApi, - RoleCommandsResourceApi, + ApiServiceState, ServicesResourceApi, ) from ansible_collections.cloudera.cluster.plugins.modules import service from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - wait_bulk_commands, + wait_command, ) from ansible_collections.cloudera.cluster.plugins.module_utils.cluster_utils import ( get_cluster_hosts, ) -from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( - get_service_hosts, -) -from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( - get_base_role_config_group, -) from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( create_role, - read_role, read_roles, ) from ansible_collections.cloudera.cluster.tests.unit import ( @@ -72,8 +52,6 @@ register_service, deregister_role, register_role, - deregister_role_config_group, - register_role_config_group, ) LOG = logging.getLogger(__name__) @@ -809,9 +787,128 @@ def test_service_existing_tags_purge( class TestServiceStates: - # TODO Finish states + def test_service_existing_state_started( + self, conn, module_args, cm_api_client, zookeeper + ): + if zookeeper.service_state not in [ + ApiServiceState.STOPPED, + ApiServiceState.STOPPING, + ]: + stop_cmd = ServicesResourceApi(cm_api_client).stop_command( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) + + wait_command(cm_api_client, stop_cmd) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "state": "started", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["service_state"] == ApiServiceState.STARTED + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["service_state"] == ApiServiceState.STARTED + + def test_service_existing_state_stopped( + self, conn, module_args, cm_api_client, zookeeper + ): + if zookeeper.service_state not in [ + ApiServiceState.STARTED, + ApiServiceState.STARTING, + ]: + start_cmd = ServicesResourceApi(cm_api_client).start_command( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) + + wait_command(cm_api_client, start_cmd) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "state": "stopped", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["service_state"] == ApiServiceState.STOPPED + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["service_state"] == ApiServiceState.STOPPED + + def test_service_existing_state_restarted( + self, conn, module_args, cm_api_client, zookeeper + ): + if zookeeper.service_state not in [ + ApiServiceState.STARTED, + ApiServiceState.STARTING, + ]: + start_cmd = ServicesResourceApi(cm_api_client).start_command( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) + + wait_command(cm_api_client, start_cmd) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "state": "restarted", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["service_state"] == ApiServiceState.STARTED + + # No idempotency due to the nature of the state + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["service_state"] == ApiServiceState.STARTED + + def test_service_existing_state_absent( + self, conn, module_args, cm_api_client, zookeeper + ): + if zookeeper.service_state not in [ + ApiServiceState.STARTED, + ApiServiceState.STARTING, + ]: + start_cmd = ServicesResourceApi(cm_api_client).start_command( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) + + wait_command(cm_api_client, start_cmd) - def test_service_existing_state_absent(self, conn, module_args, zookeeper): module_args( { **conn, From 96718e6d28dbd6496671fb2f4b28630ff33e24e1 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 10 Apr 2025 15:52:16 -0400 Subject: [PATCH 19/27] Add additional action_groups for module defaults Signed-off-by: Webster Mudge --- meta/runtime.yml | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/meta/runtime.yml b/meta/runtime.yml index a816aba1..6b7c4978 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -39,3 +39,49 @@ action_groups: - cm - cluster_info - cluster + service: + - metadata: + extend_group: + - cm + - service_info + - service + role: + - metadata: + extend_group: + - cm + - service_role_info + - service_role + role_config_group: + - metadata: + extend_group: + - cm + - service_role_config_group_info + - service_role_config_group + host: + - metadata: + extend_group: + - cm + - host_info + - host + host_template: + - metadata: + extend_group: + - cm + - host_template_info + - host_template + parcel: + - metadata: + extend_group: + - cm + - parcel_info + - parcel + deployment: + - metadata: + extend_group: + - cluster + - service + - role + - role_config_group + - host + - host_template + - parcel From 87c66a993d0819f066a3dc3c11865c5462e0a63c Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 17 Apr 2025 11:20:11 -0400 Subject: [PATCH 20/27] Update documentation, examples, and return values. Update argspec for role_config_group parameter Signed-off-by: Webster Mudge --- plugins/modules/service.py | 389 ++++++++++++++++++++++++++++++++++--- 1 file changed, 360 insertions(+), 29 deletions(-) diff --git a/plugins/modules/service.py b/plugins/modules/service.py index b81f6b48..f8e9a490 100644 --- a/plugins/modules/service.py +++ b/plugins/modules/service.py @@ -25,50 +25,139 @@ options: cluster: description: - - The associated cluster. + - The associated cluster of the service. type: str required: yes aliases: - cluster_name - service: + name: description: - - The service. + - The service to manage. + - This is a unique identifier within the cluster. type: str required: yes aliases: - service_name - - name + - service display_name: description: - The Cloudera Manager UI display name for the service. type: str + type: + description: + - The service type. + - Required if O(state) creates a new service. + type: str + aliases: + - service_type maintenance: description: - - Flag for whether the service should be in maintenance mode. + - Flag indicating if the service should be in maintenance mode. type: bool aliases: - maintenance_mode + purge: + description: + - Flag indicating if the declared service-wide configurations, tags, role config groups, and role assignments and configurations should be append-only or fully reconciled. + - If set, the module will actively remove undeclared entries, e.g. remove roles. + - To clear all service-wide configurations and tags, set O(tags={}) or O(config={}), i.e. an empty dictionary, and O(purge=True). + type: bool + default: False + config: + description: + - A set of service-wide configurations for the service. + - To unset a configuration, use V(None) as its value. + - If O(purge=True), undeclared configurations will be removed. + type: dict tags: description: - A set of tags applied to the service. - - To unset a tag, use C(None) as its value. + - To unset a tag, use V(None) as its value. + - If O(purge=True), undeclared tags will be removed. type: dict - type: + roles: description: - - The service type. - - Required if I(state) creates a new service. - type: str - aliases: - - service_type - purge: + - List of service roles to provision directly to cluster hosts. + - If O(purge=True), undeclared roles for the service will be removed from the hosts. + type: list + elements: dict + options: + type: + description: + - The role instance type to provision on the designated cluster hosts. + type: str + required: yes + aliases: + - role_type + hostnames: + description: + - List of hostnames of the cluster hosts receiving the role type instance. + type: list + elements: str + required: yes + aliases: + - cluster_hosts + - cluster_hostnames + config: + description: + - A set of role override configurations for the role instance on the cluster hosts. + - To unset a configuration, use V(None) as its value. + - If O(purge=True), undeclared configurations will be removed. + type: dict + aliases: + - parameters + - params + role_config_group: + description: + - A named (custom) role config group to assign to the role instance on the cluster hosts. + - To unset the assignment, use V(None) as the value. + type: str + tags: + description: + - A set of tags applied to the role type instance on the cluster hosts. + - To unset a tag, use V(None) as its value. + - If O(purge=True), undeclared tags will be removed. + type: dict + role_config_groups: description: - - Flag for whether the declared service tags should append or overwrite any existing tags. - - To clear all tags, set I(tags={}), i.e. an empty dictionary, and I(purge=True). - type: bool - default: False + - List of base and named (custom) role config groups to declare and configure for the service. + - If O(purge=True), undeclared named (custom) role config groups will be removed and their + associated role instances reassigned to each role type's base role config group. (Base role + config groups cannot be removed.) + type: list + elements: dict + options: + name: + description: + - The name of a custom role config group. + type: str + aliases: + - role_config_group_name + - role_config_group + display_name: + description: + - The Cloudera Manager UI display name for the role config group. + type: str + role_type: + description: + - The role type of the base or named (custom) role config group. + type: str + required: yes + aliases: + - type + config: + description: + - A set of role config group configurations. + - To unset a configuration, use V(None) as its value. + - If O(purge=True), undeclared configurations will be removed. + type: dict + aliases: + - parameters + - params state: description: - The state of the service. + - Setting O(state=restarted) will always result in a V(changed=True) result. type: str default: present choices: @@ -165,6 +254,98 @@ tags: {} purge: yes +- name: Update (append) several service-wide configurations on a cluster service + cloudera.cluster.service: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example_cluster + service: example_ecs + config: + param_one: 1 + param_two: Two + +- name: Update (purge) the service-wide configurations on a cluster service + cloudera.cluster.service: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example_cluster + service: example_ecs + config: + param_one: 1 + param_three: three + purge: yes + +- name: Remove all the service-wide configurations on a cluster service + cloudera.cluster.service: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example_cluster + service: example_ecs + config: {} + purge: yes + +- name: Provision role instances on cluster hosts for a cluster service + cloudera.cluster.service: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example_cluster + service: example_ecs + roles: + - type: SERVER + hostnames: + - host1.example + - host2.example + config: + param_one: 1 + +- name: Provision role config groups (base and named) for a cluster service + cloudera.cluster.service: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example_cluster + service: example_ecs + role_config_group: + - name: custom_server_1 + display_name: Custom Server (1) + role_type: SERVER + config: + param_two: Two + - role_type: SERVER # This is the base role config group for SERVER + config: + param_three: three + +- name: Provision a cluster service with hosts, role config groups, and role assignments + cloudera.cluster.service: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example_cluster + service: example_ecs + roles: + - type: SERVER + hostnames: + - host1.example + config: + param_two: Twelve + role_config_group: custom_server_1 + - type: SERVER # Will use the base role config group for SERVER + hostnames: + - host2.example + role_config_group: + - name: custom_server_1 + display_name: Custom Server (1) + role_type: SERVER + config: + param_two: Two + - role_type: SERVER # This is the base role config group for SERVER + config: + param_three: three + - name: Remove a cluster service cloudera.cluster.service: host: example.cloudera.com @@ -293,15 +474,169 @@ description: Version of the service. type: str returned: when supported + config: + description: Service-wide configuration details about a cluster service. + type: dict + returned: when supported + role_config_groups: + description: List of base and custom role config groups for the cluster service. + type: list + elements: dict + contains: + name: + description: + - The unique name of this role config group. + type: str + returned: always + role_type: + description: + - The type of the roles in this group. + type: str + returned: always + base: + description: + - Flag indicating whether this is a base group. + type: bool + returned: always + display_name: + description: + - A user-friendly name of the role config group, as would have been shown in the web UI. + type: str + returned: when supported + config: + description: Set of configurations for the role config group. + type: dict + returned: when supported + returned: when supported + roles: + description: List of provisioned role instances on cluster hosts for the cluster service. + type: list + elements: dict + contains: + name: + description: The cluster service role name. + type: str + returned: always + type: + description: The cluster service role type. + type: str + returned: always + sample: + - NAMENODE + - DATANODE + - TASKTRACKER + host_id: + description: The unique ID of the cluster host. + type: str + returned: always + hostname: + description: The hostname of the cluster host. + type: str + returned: always + role_state: + description: State of the cluster service role. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + commission_state: + description: Commission state of the cluster service role. + type: str + returned: always + health_summary: + description: The high-level health status of the cluster service role. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + config_staleness_status: + description: Status of configuration staleness for the cluster service role. + type: str + returned: always + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + health_checks: + description: Lists all available health checks for cluster service role. + type: list + elements: dict + returned: when supported + contains: + name: + description: Unique name of this health check. + type: str + returned: always + summary: + description: The high-level health status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + explanation: + description: The explanation of this health check. + type: str + returned: when supported + suppressed: + description: + - Whether this health check is suppressed. + - A suppressed health check is not considered when computing the role's overall health. + type: bool + returned: when supported + maintenance_mode: + description: Whether the cluster service role is in maintenance mode. + type: bool + returned: when supported + maintenance_owners: + description: The list of objects that trigger this service to be in maintenance mode. + type: list + elements: str + returned: when supported + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + role_config_group_name: + description: The name of the cluster service role config group, which uniquely identifies it in a Cloudera Manager installation. + type: str + returned: when supported + config: + description: Set of role configurations for the cluster service role. + type: dict + returned: when supported + tags: + description: The dictionary of tags for the cluster service role. + type: dict + returned: when supported + zoo_keeper_server_mode: + description: + - The Zookeeper server mode for this cluster service role. + - Note that for non-Zookeeper Server roles, this will be C(null). + type: str + returned: when supported + returned: when supported """ from cm_client import ( - ApiEntityTag, - ApiRoleConfigGroup, - ApiRoleConfigGroupList, - ApiRoleNameList, ApiService, - ApiServiceList, ClustersResourceApi, RoleConfigGroupsResourceApi, ServicesResourceApi, @@ -312,7 +647,6 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerMutableModule, - resolve_tag_updates, ConfigListUpdates, TagUpdates, ) @@ -328,7 +662,7 @@ RoleException, ) from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( - create_service, + create_service_model, parse_service_result, provision_service, read_service, @@ -408,7 +742,7 @@ def process(self): self.module.fail_json(msg=f"missing required arguments: type") # Create and provision the service - service = create_service( + service = create_service_model( api_client=self.api_client, name=self.name, type=self.type, @@ -767,12 +1101,9 @@ def main(): options=dict( name=dict(aliases=["role_config_group_name", "role_config_group"]), display_name=dict(), - role_type=dict(aliases=["type"]), + role_type=dict(required=True, aliases=["type"]), config=dict(type="dict", aliases=["params", "parameters"]), ), - required_one_of=[ - ["name", "role_type"], - ], ), state=dict( default="present", From 34f0984009e82199c476ee63cd68a8dc951e75ec Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 17 Apr 2025 11:20:57 -0400 Subject: [PATCH 21/27] Remove service_name from role_config_group and roles return values Signed-off-by: Webster Mudge --- plugins/module_utils/service_utils.py | 49 ++++++--------------------- 1 file changed, 11 insertions(+), 38 deletions(-) diff --git a/plugins/module_utils/service_utils.py b/plugins/module_utils/service_utils.py index 95d12d0b..67ca08ed 100644 --- a/plugins/module_utils/service_utils.py +++ b/plugins/module_utils/service_utils.py @@ -114,16 +114,21 @@ def parse_service_result(service: ApiService) -> dict: # Parse the role config groups via util function if service.role_config_groups is not None: + parsed_rcgs = [ + parse_role_config_group_result(rcg) for rcg in service.role_config_groups + ] output.update( - role_config_groups=[ - parse_role_config_group_result(rcg) - for rcg in service.role_config_groups - ] + # Remove service_name from output + role_config_groups=[{k: v for k, v in parsed_rcgs if k != "service_name"}] ) # Parse the roles via util function if service.roles is not None: - output.update(roles=[parse_role_result(r) for r in service.roles]) + parsed_roles = [parse_role_result(r) for r in service.roles] + output.update( + # Remove service_name from output + roles=[{k: v for k, v in parsed_roles if k != "service_name"}] + ) return output @@ -171,7 +176,7 @@ def read_service( return service -def create_service( +def create_service_model( api_client: ApiClient, name: str, type: str, @@ -179,8 +184,6 @@ def create_service( display_name: str = None, config: dict = None, tags: dict = None, - # role_config_groups: list[ApiRoleConfigGroup] = None, - # roles: list[ApiRole] = None, ) -> ApiService: if ( type.upper() @@ -210,36 +213,6 @@ def create_service( if tags: service.tags = [ApiEntityTag(k, v) for k, v in tags.items()] - # # Role config groups - # # TODO Use a role_config_group utility to marshal the ApiRoleConfigGroup list - # # Keep the incoming type, but use it to create another via the utility call - # # This includes passing in the role type as an external reference - # if role_config_groups: - # available_types = ServicesResourceApi(api_client).list_role_types( - # cluster_name=cluster_name, - # service_name=name, - # ).items - - # for rcg in role_config_groups: - # if rcg.role_type not in available_types: - # raise InvalidRoleType("Unable to find role type: " + rcg.role_type) - - # service.role_config_groups = role_config_groups - - # # Roles - # # TODO Use the create_role() utility to marshal the ApiRole list - # # Keep the incoming ApiRole type, but use it to create another via the utility call - # # Need to pass in the role types and role config groups as external references (the latter because they - # # might be defined within the service) - # # For the former, the reference replaces an inline lookup. For the latter, the reference is a initial - # # lookup and then a fallback to the inline lookup - # # This might not work, as the references might fail because the service is not yet available... or - # # break up the provisioning flow to spin up an initial, "core" service, then have additional utility - # # calls to spin up RCG and roles, which then would be able to have the inline lookups (still would need - # # the to-be reference list for RCGs, however). - # if roles: - # pass - return service From fde3b88429972a46945bbf82c681fded65c30146 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 17 Apr 2025 11:21:28 -0400 Subject: [PATCH 22/27] Update docstring for parse_role_config_group_result Signed-off-by: Webster Mudge --- plugins/module_utils/role_config_group_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/module_utils/role_config_group_utils.py b/plugins/module_utils/role_config_group_utils.py index 5210c5e1..d0213cd6 100644 --- a/plugins/module_utils/role_config_group_utils.py +++ b/plugins/module_utils/role_config_group_utils.py @@ -49,6 +49,7 @@ def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dic - base (bool) - display_name (str) - config (dict) + - service_name (str) Args: role_config_group (ApiRoleConfigGroup): Role Config Group From 2c94c67e4db0012e708e630a63d27b5dbc27fabf Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 17 Apr 2025 11:21:48 -0400 Subject: [PATCH 23/27] Add required role_type parameter to tests Signed-off-by: Webster Mudge --- tests/unit/plugins/modules/service/test_service_rcgs.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/unit/plugins/modules/service/test_service_rcgs.py b/tests/unit/plugins/modules/service/test_service_rcgs.py index 4421c5e8..296fcb44 100644 --- a/tests/unit/plugins/modules/service/test_service_rcgs.py +++ b/tests/unit/plugins/modules/service/test_service_rcgs.py @@ -549,6 +549,7 @@ def test_service_existing_custom_rcg( "role_config_groups": [ { "name": custom_rcg_server.name, + "type": custom_rcg_server.role_type, "config": { "minSessionTimeout": 5501, "maxSessionTimeout": 45001, @@ -605,6 +606,7 @@ def test_service_existing_custom_rcg_purge( "role_config_groups": [ { "name": custom_rcg_server.name, + "type": custom_rcg_server.role_type, "config": { "maxSessionTimeout": 45001, }, From 523475bfc954b158bf208e80afb7a2a653dd627d Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 17 Apr 2025 13:13:32 -0400 Subject: [PATCH 24/27] Remove unused function Signed-off-by: Webster Mudge --- tests/unit/conftest.py | 80 ------------------------------------------ 1 file changed, 80 deletions(-) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 54c374d6..dd82d8ee 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -1113,86 +1113,6 @@ def monitor_command( raise Exception(command.result_message) -# @pytest.fixture(scope="module") -# def test_service(cm_api_client) -> Generator[Callable[[ApiCluster, ApiService], ApiService]]: -# service_api = ServicesResourceApi(cm_api_client) -# cm_api = ClustersResourceApi(cm_api_client) - -# services = [] - -# # Consider returning a class with basic functions like initialize? -# def _provision_service(cluster: ApiCluster, service: ApiService) -> ApiService: -# # Check the cluster hosts -# hosts = [ -# h -# for i, h in enumerate(cm_api.list_hosts(cluster_name=cluster.name).items) -# if i < 3 -# ] - -# if len(hosts) != 3: -# raise Exception( -# "Not enough available hosts to assign service roles; the cluster must have 3 or more hosts." -# ) - -# # Create the service -# created_service = service_api.create_services( -# cluster_name=cluster.name, body=ApiServiceList(items=[service]) -# ).items[0] - -# # Record the service -# services.append(created_service) - -# # Start the service -# first_run_cmd = service_api.first_run( -# cluster_name=cluster.name, -# service_name=created_service.name, -# ) -# wait_for_command(cm_api_client, first_run_cmd) - -# # Refresh the service -# created_service = service_api.read_service( -# cluster_name=cluster.name, service_name=created_service.name -# ) - -# # Establish the maintenance mode of the service -# if service.maintenance_mode: -# maintenance_cmd = service_api.enter_maintenance_mode( -# cluster_name=cluster.name, -# service_name=created_service.name -# ) -# wait_for_command(cm_api_client, maintenance_cmd) -# created_service = service_api.read_service( -# cluster_name=cluster.name, service_name=created_service.name -# ) - -# # Establish the state the of the service -# if created_service.service_state != service.service_state: -# if service.service_state == ApiServiceState.STOPPED: -# stop_cmd = service_api.stop_command( -# cluster_name=cluster.name, -# service_name=created_service.name, -# ) -# wait_for_command(cm_api_client, stop_cmd) -# created_service = service_api.read_service( -# cluster_name=cluster.name, service_name=created_service.name -# ) -# else: -# raise Exception("Unsupported service state for fixture: " + service.service_state) - -# # Return the provisioned service -# return created_service - -# # Yield the service to the tests -# yield _provision_service - -# # Delete the services -# for s in services: -# service_api.delete_service( -# cluster_name=s.cluster_ref.cluster_name, -# service_name=s.name, -# ) - - @pytest.fixture(scope="module") def service_factory( cm_api_client, From 2e8e613bd8d7ff8dc7e47a731472edff920b23e5 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 17 Apr 2025 13:14:19 -0400 Subject: [PATCH 25/27] Add read_services() utility function Signed-off-by: Webster Mudge --- plugins/module_utils/service_utils.py | 62 ++++++++++++++++++++++----- 1 file changed, 52 insertions(+), 10 deletions(-) diff --git a/plugins/module_utils/service_utils.py b/plugins/module_utils/service_utils.py index 67ca08ed..8ca2967a 100644 --- a/plugins/module_utils/service_utils.py +++ b/plugins/module_utils/service_utils.py @@ -24,9 +24,6 @@ ConfigListUpdates, TagUpdates, ) -from ansible_collections.cloudera.cluster.plugins.module_utils.host_utils import ( - get_host, -) from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( create_role_config_group, get_base_role_config_group, @@ -38,10 +35,6 @@ read_roles, read_roles_by_type, parse_role_result, - provision_service_role, - toggle_role_maintenance, - toggle_role_state, - InvalidRoleTypeException, ) from cm_client import ( @@ -119,7 +112,10 @@ def parse_service_result(service: ApiService) -> dict: ] output.update( # Remove service_name from output - role_config_groups=[{k: v for k, v in parsed_rcgs if k != "service_name"}] + role_config_groups=[ + {k: v for k, v in rcg_dict.items() if k != "service_name"} + for rcg_dict in parsed_rcgs + ] ) # Parse the roles via util function @@ -127,7 +123,10 @@ def parse_service_result(service: ApiService) -> dict: parsed_roles = [parse_role_result(r) for r in service.roles] output.update( # Remove service_name from output - roles=[{k: v for k, v in parsed_roles if k != "service_name"}] + roles=[ + {k: v for k, v in role_dict.items() if k != "service_name"} + for role_dict in parsed_roles + ] ) return output @@ -148,7 +147,6 @@ def read_service( """ service_api = ServicesResourceApi(api_client) rcg_api = RoleConfigGroupsResourceApi(api_client) - role_api = RolesResourceApi(api_client) service = service_api.read_service( cluster_name=cluster_name, service_name=service_name @@ -176,6 +174,50 @@ def read_service( return service +def read_services(api_client: ApiClient, cluster_name: str) -> list[ApiService]: + """Read the cluster services and gather each services' role config group and role dependents. + + Args: + api_client (ApiClient): _description_ + cluster_name (str): _description_ + + Returns: + ApiService: _description_ + """ + service_api = ServicesResourceApi(api_client) + rcg_api = RoleConfigGroupsResourceApi(api_client) + + services = list[ApiService]() + + discovered_services = service_api.read_services( + cluster_name=cluster_name, + ).items + + for service in discovered_services: + # Gather the service-wide configuration + service.config = service_api.read_service_config( + cluster_name=cluster_name, service_name=service.name + ) + + # Gather each role config group configuration + service.role_config_groups = rcg_api.read_role_config_groups( + cluster_name=cluster_name, + service_name=service.name, + ).items + + # Gather each role and its config + service.roles = read_roles( + api_client=api_client, + cluster_name=cluster_name, + service_name=service.name, + ).items + + # Add it to the output + services.append(service) + + return services + + def create_service_model( api_client: ApiClient, name: str, From 22fe2c82c7af2c5c310c4a21fb1b22c683cd5f13 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 17 Apr 2025 13:15:03 -0400 Subject: [PATCH 26/27] Remove obsolete test Signed-off-by: Webster Mudge --- plugins/modules/service.py | 1 - .../plugins/modules/service/test_service.py | 19 ------------------- 2 files changed, 20 deletions(-) diff --git a/plugins/modules/service.py b/plugins/modules/service.py index f8e9a490..de18b7cb 100644 --- a/plugins/modules/service.py +++ b/plugins/modules/service.py @@ -1089,7 +1089,6 @@ def main(): elements="str", aliases=["cluster_hosts", "cluster_hostnames"], ), - # maintenance=dict(type="bool", aliases=["maintenance_mode"]), config=dict(type="dict", aliases=["parameters", "params"]), role_config_group=dict(), tags=dict(type="dict"), diff --git a/tests/unit/plugins/modules/service/test_service.py b/tests/unit/plugins/modules/service/test_service.py index 6ffe9ed1..753da8d9 100644 --- a/tests/unit/plugins/modules/service/test_service.py +++ b/tests/unit/plugins/modules/service/test_service.py @@ -189,25 +189,6 @@ def test_service_roles_missing_hostnames(self, conn, module_args): with pytest.raises(AnsibleFailJson, match="hostnames found in roles"): service.main() - def test_service_role_config_group_missing_one_of(self, conn, module_args): - module_args( - { - **conn, - "cluster": "example", - "name": "example", - "role_config_groups": [ - { - "display_name": "example", - } - ], - } - ) - - with pytest.raises( - AnsibleFailJson, match="name, role_type found in role_config_groups" - ): - service.main() - class TestServiceInvalidParameters: def test_present_invalid_cluster(self, conn, module_args): From 67ca892735267463f7f7827400fe3b21de98db89 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 17 Apr 2025 13:16:12 -0400 Subject: [PATCH 27/27] Update service_info module to align with utilities and output Update service_info tests to use pytest fixtures Signed-off-by: Webster Mudge --- plugins/modules/service_info.py | 271 ++++++++++++++---- .../modules/service_info/test_service_info.py | 154 +++++++--- 2 files changed, 329 insertions(+), 96 deletions(-) diff --git a/plugins/modules/service_info.py b/plugins/modules/service_info.py index 5ca2858f..8821832a 100644 --- a/plugins/modules/service_info.py +++ b/plugins/modules/service_info.py @@ -1,6 +1,7 @@ +#!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,33 +15,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - ClouderaManagerModule, -) -from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( - parse_service_result, -) - -from cm_client import ServicesResourceApi -from cm_client.rest import ApiException - - -ANSIBLE_METADATA = { - "metadata_version": "1.1", - "status": ["preview"], - "supported_by": "community", -} - DOCUMENTATION = r""" ---- module: service_info short_description: Retrieve information about the services of cluster description: - Gather information about services of a CDP cluster. author: - "Webster Mudge (@wmudge)" -requirements: - - cm_client options: cluster: description: @@ -49,34 +30,31 @@ required: yes aliases: - cluster_name - service: + name: description: - A service to retrieve. - If absent, the module will return all services. type: str aliases: - service_name - - name - view: - description: - - The view to materialize. - - C(healthcheck) is the equivalent to I(full_with_health_check_explanation). - - C(redacted) is the equivalent to I(export_redacted). - type: str - default: summary - choices: - - summary - - full - - healthcheck - - export - - redacted + - service extends_documentation_fragment: - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: all +requirements: + - cm-client +seealso: + - module: cloudera.cluster.service """ EXAMPLES = r""" ---- - name: Gather details of the services of a cluster cloudera.cluster.service_info: host: "example.cloudera.host" @@ -95,7 +73,6 @@ """ RETURN = r""" ---- services: description: Details about the services of a cluster. type: list @@ -214,8 +191,182 @@ description: Version of the service. type: str returned: when supported + config: + description: Service-wide configuration details about a cluster service. + type: dict + returned: when supported + role_config_groups: + description: List of base and custom role config groups for the cluster service. + type: list + elements: dict + contains: + name: + description: + - The unique name of this role config group. + type: str + returned: always + role_type: + description: + - The type of the roles in this group. + type: str + returned: always + base: + description: + - Flag indicating whether this is a base group. + type: bool + returned: always + display_name: + description: + - A user-friendly name of the role config group, as would have been shown in the web UI. + type: str + returned: when supported + config: + description: Set of configurations for the role config group. + type: dict + returned: when supported + returned: when supported + roles: + description: List of provisioned role instances on cluster hosts for the cluster service. + type: list + elements: dict + contains: + name: + description: The cluster service role name. + type: str + returned: always + type: + description: The cluster service role type. + type: str + returned: always + sample: + - NAMENODE + - DATANODE + - TASKTRACKER + host_id: + description: The unique ID of the cluster host. + type: str + returned: always + hostname: + description: The hostname of the cluster host. + type: str + returned: always + role_state: + description: State of the cluster service role. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + commission_state: + description: Commission state of the cluster service role. + type: str + returned: always + health_summary: + description: The high-level health status of the cluster service role. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + config_staleness_status: + description: Status of configuration staleness for the cluster service role. + type: str + returned: always + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + health_checks: + description: Lists all available health checks for cluster service role. + type: list + elements: dict + returned: when supported + contains: + name: + description: Unique name of this health check. + type: str + returned: always + summary: + description: The high-level health status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + explanation: + description: The explanation of this health check. + type: str + returned: when supported + suppressed: + description: + - Whether this health check is suppressed. + - A suppressed health check is not considered when computing the role's overall health. + type: bool + returned: when supported + maintenance_mode: + description: Whether the cluster service role is in maintenance mode. + type: bool + returned: when supported + maintenance_owners: + description: The list of objects that trigger this service to be in maintenance mode. + type: list + elements: str + returned: when supported + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + role_config_group_name: + description: The name of the cluster service role config group, which uniquely identifies it in a Cloudera Manager installation. + type: str + returned: when supported + config: + description: Set of role configurations for the cluster service role. + type: dict + returned: when supported + tags: + description: The dictionary of tags for the cluster service role. + type: dict + returned: when supported + zoo_keeper_server_mode: + description: + - The Zookeeper server mode for this cluster service role. + - Note that for non-Zookeeper Server roles, this will be C(null). + type: str + returned: when supported + returned: when supported """ +from cm_client import ( + ClustersResourceApi, + ServicesResourceApi, +) +from cm_client.rest import ApiException + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerModule, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + parse_service_result, + read_service, + read_services, +) + class ClusterServiceInfo(ClouderaManagerModule): def __init__(self, module): @@ -223,32 +374,35 @@ def __init__(self, module): # Set the parameters self.cluster = self.get_param("cluster") - self.service = self.get_param("service") + self.name = self.get_param("name") self.view = self.get_param("view") # Initialize the return values - self.services = [] + self.output = [] # Execute the logic self.process() @ClouderaManagerModule.handle_process def process(self): - api_instance = ServicesResourceApi(self.api_client) + try: + ClustersResourceApi(self.api_client).read_cluster(self.cluster) + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg="Cluster does not exist: " + self.cluster) + else: + raise ex - if self.view == "healthcheck": - self.view = "full_with_health_check_explanation" - elif self.view == "redacted": - self.view = "export_redacted" + service_api = ServicesResourceApi(self.api_client) - if self.service: + if self.name: try: - self.services.append( + self.output.append( parse_service_result( - api_instance.read_service( + read_service( + api_client=self.api_client, cluster_name=self.cluster, - service_name=self.service, - view=self.view, + service_name=self.name, ) ) ) @@ -256,11 +410,12 @@ def process(self): if e.status != 404: raise e else: - self.services = [ + self.output = [ parse_service_result(s) - for s in api_instance.read_services( - cluster_name=self.cluster, view=self.view - ).items + for s in read_services( + api_client=self.api_client, + cluster_name=self.cluster, + ) ] @@ -268,11 +423,7 @@ def main(): module = ClouderaManagerModule.ansible_module( argument_spec=dict( cluster=dict(required=True, aliases=["cluster_name"]), - service=dict(aliases=["service_name", "name"]), - view=dict( - default="summary", - choices=["summary", "full", "healthcheck", "export", "redacted"], - ), + name=dict(aliases=["service_name", "service"]), ), supports_check_mode=True, ) @@ -281,7 +432,7 @@ def main(): output = dict( changed=False, - services=result.services, + services=result.output, ) if result.debug: diff --git a/tests/unit/plugins/modules/service_info/test_service_info.py b/tests/unit/plugins/modules/service_info/test_service_info.py index 3a70af52..45a03fa5 100644 --- a/tests/unit/plugins/modules/service_info/test_service_info.py +++ b/tests/unit/plugins/modules/service_info/test_service_info.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,74 +22,127 @@ import os import pytest +from pathlib import Path + +from cm_client import ( + ApiConfig, + ApiEntityTag, + ApiHost, + ApiHostRef, + ApiRole, + ApiService, + ApiServiceConfig, + ApiServiceState, + ServicesResourceApi, +) + from ansible_collections.cloudera.cluster.plugins.modules import service_info +from ansible_collections.cloudera.cluster.plugins.module_utils.cluster_utils import ( + get_cluster_hosts, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + get_service_hosts, +) from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, + deregister_service, + register_service, ) LOG = logging.getLogger(__name__) -@pytest.fixture() -def conn(): - conn = dict(username=os.getenv("CM_USERNAME"), password=os.getenv("CM_PASSWORD")) +@pytest.fixture(scope="module") +def zookeeper(cm_api_client, base_cluster, request): + # Keep track of the provisioned service(s) + service_registry = list[ApiService]() + + # Get the current cluster hosts + hosts = get_cluster_hosts(cm_api_client, base_cluster) + + id = Path(request.node.name).stem + + zk_service = ApiService( + name=f"test-zk-{id}", + type="ZOOKEEPER", + display_name=f"ZooKeeper ({id})", + # Add a SERVER role (so we can start the service -- a ZK requirement!) + roles=[ApiRole(type="SERVER", host_ref=ApiHostRef(hosts[0].host_id))], + ) + + # Provision and yield the created service + yield register_service( + api_client=cm_api_client, + registry=service_registry, + cluster=base_cluster, + service=zk_service, + ) + + # Remove the created service + deregister_service(api_client=cm_api_client, registry=service_registry) - if os.getenv("CM_HOST", None): - conn.update(host=os.getenv("CM_HOST")) - if os.getenv("CM_PORT", None): - conn.update(port=os.getenv("CM_PORT")) +@pytest.fixture() +def cluster_hosts(cm_api_client, base_cluster) -> list[ApiHost]: + return get_cluster_hosts(cm_api_client, base_cluster) - if os.getenv("CM_ENDPOINT", None): - conn.update(url=os.getenv("CM_ENDPOINT")) - if os.getenv("CM_PROXY", None): - conn.update(proxy=os.getenv("CM_PROXY")) +@pytest.fixture() +def available_hosts(cm_api_client, cluster_hosts, zookeeper) -> list[ApiHost]: + service_host_ids = [ + h.host_id + for h in get_service_hosts( + api_client=cm_api_client, + service=zookeeper, + ) + ] - return { - **conn, - "verify_tls": "no", - "debug": "no", - } + return [h for h in cluster_hosts if h.host_id not in service_host_ids] def test_missing_required(conn, module_args): - module_args(conn) + module_args( + { + **conn, + } + ) with pytest.raises(AnsibleFailJson, match="cluster"): service_info.main() def test_missing_cluster(conn, module_args): - conn.update(service="example") - module_args(conn) + module_args( + { + **conn, + "service": "example", + } + ) with pytest.raises(AnsibleFailJson, match="cluster"): service_info.main() -def test_invalid_service(conn, module_args): +def test_invalid_cluster(conn, module_args): module_args( { **conn, - "cluster": os.getenv("CM_CLUSTER"), - "service": "BOOM", + "cluster": "invalid", + "service": "example", } ) - with pytest.raises(AnsibleExitJson) as e: + with pytest.raises(AnsibleFailJson, match="Cluster does not exist") as e: service_info.main() - assert len(e.value.services) == 0 - -def test_invalid_cluster(conn, module_args): +def test_invalid_service(conn, module_args, base_cluster): module_args( { **conn, - "cluster": "BOOM", - "service": os.getenv("CM_SERVICE"), + "cluster": base_cluster.name, + "service": "not_found", } ) @@ -99,30 +152,59 @@ def test_invalid_cluster(conn, module_args): assert len(e.value.services) == 0 -def test_view_all_services(conn, module_args): +def test_all_services( + conn, + module_args, + request, + base_cluster, + zookeeper, + available_hosts, + service_factory, +): + id = Path(request.node.name) + + # Add an additional ZooKeeper service + zookeeper_two = service_factory( + cluster=base_cluster, + service=ApiService( + name=f"test-zk-{id}", + type="ZOOKEEPER", + display_name=f"ZooKeeper ({id})", + # Add a SERVER role (so we can start the service -- a ZK requirement!) + roles=[ + ApiRole(type="SERVER", host_ref=ApiHostRef(available_hosts[0].host_id)) + ], + ), + ) + module_args( { **conn, - "cluster": os.getenv("CM_CLUSTER"), + "cluster": zookeeper.cluster_ref.cluster_name, } ) with pytest.raises(AnsibleExitJson) as e: service_info.main() - assert len(e.value.services) > 0 + assert len(e.value.services) == 3 # 2 ZK and 1 core settings + service_names = [s["name"] for s in e.value.services] + assert zookeeper.name in service_names + assert zookeeper_two.name in service_names -def test_view_single_service(conn, module_args): +def test_named_service(conn, module_args, zookeeper): module_args( { **conn, - "cluster": os.getenv("CM_CLUSTER"), - "service": os.getenv("CM_SERVICE"), + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, } ) with pytest.raises(AnsibleExitJson) as e: service_info.main() - assert len(e.value.services) == 1 + assert len(e.value.services) == 1 # Single named ZK + service_names = [s["name"] for s in e.value.services] + assert zookeeper.name in service_names