From 4df1dcd22835786be53c043f786a2d5395452de0 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 17 Dec 2024 09:24:23 -0500 Subject: [PATCH 01/58] Update pytest marker for 'service_config' Signed-off-by: Webster Mudge --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index a36945c5..e46f1c79 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,7 +53,7 @@ filterwarnings = [ "ignore:'crypt' is deprecated and slated for removal in Python 3.13:DeprecationWarning", ] markers = [ - "prepare: Prepare Cloudera Manager and resources for tests", + "service_config: Prepare service-wide configurations for tests", ] [build-system] From 4d0dea74ec902872910e5a212ab19d7852fb65fe Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 17 Dec 2024 09:24:23 -0500 Subject: [PATCH 02/58] Update message for cms_service_config changes Signed-off-by: Webster Mudge --- tests/unit/conftest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 5fb502e1..268192db 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -295,7 +295,7 @@ def cms_service_config(cm_api_client, cms, request): for k, v in marker.args[0].items(): try: api.update_service_config( - message=f"{request.node.name}::set", + message=f"{Path(request.node.parent.name).stem}::{request.node.name}::set", body=ApiServiceConfig(items=[ApiConfig(name=k, value=v)]), ) except ApiException as ae: @@ -321,6 +321,6 @@ def cms_service_config(cm_api_client, cms, request): ) api.update_service_config( - message=f"{request.node.name}::reset", + message=f"{Path(request.node.parent.name).stem}::{request.node.name}::reset", body=ApiServiceConfig(items=reconciled), ) From c9798f10c8e3545ecc26cad7630623e909395673 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 17 Dec 2024 09:24:24 -0500 Subject: [PATCH 03/58] Move role and role config group result parsing to separate modules. Update general output parsing function. Signed-off-by: Webster Mudge --- plugins/module_utils/cluster_utils.py | 4 +- plugins/module_utils/cm_utils.py | 50 +------------------ plugins/module_utils/data_context_utils.py | 6 +-- plugins/module_utils/parcel_utils.py | 4 +- .../module_utils/role_config_group_utils.py | 35 +++++++++++++ plugins/module_utils/role_utils.py | 48 ++++++++++++++++++ plugins/module_utils/service_utils.py | 9 +++- plugins/modules/service_role.py | 5 +- plugins/modules/service_role_config_group.py | 3 ++ .../service_role_config_group_config_info.py | 2 +- .../modules/service_role_config_group_info.py | 4 ++ plugins/modules/service_role_info.py | 4 ++ 12 files changed, 115 insertions(+), 59 deletions(-) create mode 100644 plugins/module_utils/role_config_group_utils.py create mode 100644 plugins/module_utils/role_utils.py diff --git a/plugins/module_utils/cluster_utils.py b/plugins/module_utils/cluster_utils.py index 203d4230..bf3cae1f 100644 --- a/plugins/module_utils/cluster_utils.py +++ b/plugins/module_utils/cluster_utils.py @@ -17,7 +17,7 @@ """ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - _parse_output, + normalize_output, ) from cm_client import ApiCluster @@ -42,5 +42,5 @@ def parse_cluster_result(cluster: ApiCluster) -> dict: # Retrieve full_version as version output = dict(version=cluster.full_version) - output.update(_parse_output(cluster.to_dict(), CLUSTER_OUTPUT)) + output.update(normalize_output(cluster.to_dict(), CLUSTER_OUTPUT)) return output diff --git a/plugins/module_utils/cm_utils.py b/plugins/module_utils/cm_utils.py index a1667986..c892b229 100644 --- a/plugins/module_utils/cm_utils.py +++ b/plugins/module_utils/cm_utils.py @@ -35,8 +35,6 @@ ApiClient, ApiCommand, ApiConfigList, - ApiRole, - ApiRoleConfigGroup, Configuration, ) from cm_client.rest import ApiException, RESTClientObject @@ -47,34 +45,8 @@ __credits__ = ["frisch@cloudera.com"] __maintainer__ = ["wmudge@cloudera.com"] -ROLE_OUTPUT = [ - "commission_state", - "config_staleness_status", - "ha_status", - "health_checks", - "health_summary", - # "host_ref", - "maintenance_mode", - "maintenance_owners", - "name", - # "role_config_group_ref", - "role_state", - # "service_ref", - "tags", - "type", - "zoo_keeper_server_mode", -] - -ROLE_CONFIG_GROUP = [ - "name", - "role_type", - "base", - "display_name", - # "service_ref", -] - - -def _parse_output(entity: dict, filter: list) -> dict: + +def normalize_output(entity: dict, filter: list) -> dict: output = {} for k in filter: if k == "tags": @@ -85,24 +57,6 @@ def _parse_output(entity: dict, filter: list) -> dict: return output -def parse_role_result(role: ApiRole) -> dict: - # Retrieve only the host_id, role_config_group, and service identifiers - output = dict( - host_id=role.host_ref.host_id, - role_config_group_name=role.role_config_group_ref.role_config_group_name, - service_name=role.service_ref.service_name, - ) - output.update(_parse_output(role.to_dict(), ROLE_OUTPUT)) - return output - - -def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dict: - # Retrieve only the service identifier - output = dict(service_name=role_config_group.service_ref.service_name) - output.update(_parse_output(role_config_group.to_dict(), ROLE_CONFIG_GROUP)) - return output - - def normalize_values(add: dict) -> dict: """Normalize parameter values. Strings have whitespace trimmed, integers are converted to strings, and Boolean values are converted their string representation diff --git a/plugins/module_utils/data_context_utils.py b/plugins/module_utils/data_context_utils.py index 4b3f54f7..be4f7c57 100644 --- a/plugins/module_utils/data_context_utils.py +++ b/plugins/module_utils/data_context_utils.py @@ -17,7 +17,7 @@ """ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - _parse_output, + normalize_output, ) from cm_client import ApiDataContextList @@ -38,9 +38,9 @@ ] -def _parse_output(data: dict, keys: list) -> dict: +def normalize_output(data: dict, keys: list) -> dict: return {key: data[key] for key in keys if key in data} def parse_data_context_result(data_contexts: ApiDataContextList) -> list: - return [_parse_output(item, DATA_CONTEXT_OUTPUT) for item in data_contexts.items] + return [normalize_output(item, DATA_CONTEXT_OUTPUT) for item in data_contexts.items] diff --git a/plugins/module_utils/parcel_utils.py b/plugins/module_utils/parcel_utils.py index 88d13793..38a50c5a 100644 --- a/plugins/module_utils/parcel_utils.py +++ b/plugins/module_utils/parcel_utils.py @@ -23,7 +23,7 @@ from cm_client import ApiParcel, ParcelResourceApi from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - _parse_output, + normalize_output, ) @@ -138,5 +138,5 @@ def activate(self): def parse_parcel_result(parcel: ApiParcel) -> dict: # Retrieve only the cluster identifier output = dict(cluster_name=parcel.cluster_ref.cluster_name) - output.update(_parse_output(parcel.to_dict(), PARCEL)) + output.update(normalize_output(parcel.to_dict(), PARCEL)) return output diff --git a/plugins/module_utils/role_config_group_utils.py b/plugins/module_utils/role_config_group_utils.py new file mode 100644 index 00000000..b17e8160 --- /dev/null +++ b/plugins/module_utils/role_config_group_utils.py @@ -0,0 +1,35 @@ +# Copyright 2024 Cloudera, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + normalize_output, +) + +from cm_client import ApiRoleConfigGroup + + +ROLE_CONFIG_GROUP = [ + "name", + "role_type", + "base", + "display_name", + # "service_ref", +] + + +def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dict: + # Retrieve only the service identifier + output = dict(service_name=role_config_group.service_ref.service_name) + output.update(normalize_output(role_config_group.to_dict(), ROLE_CONFIG_GROUP)) + return output diff --git a/plugins/module_utils/role_utils.py b/plugins/module_utils/role_utils.py new file mode 100644 index 00000000..55bb463b --- /dev/null +++ b/plugins/module_utils/role_utils.py @@ -0,0 +1,48 @@ +# Copyright 2024 Cloudera, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + normalize_output, +) + +from cm_client import ApiRole + +ROLE_OUTPUT = [ + "commission_state", + "config_staleness_status", + "ha_status", + "health_checks", + "health_summary", + # "host_ref", + "maintenance_mode", + "maintenance_owners", + "name", + # "role_config_group_ref", + "role_state", + # "service_ref", + "tags", + "type", + "zoo_keeper_server_mode", +] + + +def parse_role_result(role: ApiRole) -> dict: + # Retrieve only the host_id, role_config_group, and service identifiers + output = dict( + host_id=role.host_ref.host_id, + role_config_group_name=role.role_config_group_ref.role_config_group_name, + service_name=role.service_ref.service_name, + ) + output.update(normalize_output(role.to_dict(), ROLE_OUTPUT)) + return output diff --git a/plugins/module_utils/service_utils.py b/plugins/module_utils/service_utils.py index c11a2d79..9e65bff3 100644 --- a/plugins/module_utils/service_utils.py +++ b/plugins/module_utils/service_utils.py @@ -17,7 +17,7 @@ """ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - _parse_output, + normalize_output, resolve_parameter_updates, ) @@ -47,10 +47,15 @@ def parse_service_result(service: ApiService) -> dict: # Retrieve only the cluster_name output = dict(cluster_name=service.cluster_ref.cluster_name) - output.update(_parse_output(service.to_dict(), SERVICE_OUTPUT)) + output.update(normalize_output(service.to_dict(), SERVICE_OUTPUT)) return output +def parse_cm_service_result(service: ApiService) -> dict: + # Ignore cluster_name + return normalize_output(service.to_dict(), SERVICE_OUTPUT) + + class ServiceConfigUpdates(object): def __init__(self, existing: ApiServiceConfig, updates: dict, purge: bool) -> None: current = {r.name: r.value for r in existing.items} diff --git a/plugins/modules/service_role.py b/plugins/modules/service_role.py index f9bed4a1..d92ca533 100644 --- a/plugins/modules/service_role.py +++ b/plugins/modules/service_role.py @@ -16,9 +16,11 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerMutableModule, - parse_role_result, resolve_tag_updates, ) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + parse_role_result, +) from cm_client import ( ApiEntityTag, @@ -34,6 +36,7 @@ ) from cm_client.rest import ApiException + ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], diff --git a/plugins/modules/service_role_config_group.py b/plugins/modules/service_role_config_group.py index 5d1f4449..b54ffeef 100644 --- a/plugins/modules/service_role_config_group.py +++ b/plugins/modules/service_role_config_group.py @@ -16,6 +16,9 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerMutableModule, +) + +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( parse_role_config_group_result, ) diff --git a/plugins/modules/service_role_config_group_config_info.py b/plugins/modules/service_role_config_group_config_info.py index fc127dc8..ba25a6cb 100644 --- a/plugins/modules/service_role_config_group_config_info.py +++ b/plugins/modules/service_role_config_group_config_info.py @@ -16,7 +16,6 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerModule, - parse_role_config_group_result, ) from cm_client import ( @@ -26,6 +25,7 @@ ) from cm_client.rest import ApiException + ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], diff --git a/plugins/modules/service_role_config_group_info.py b/plugins/modules/service_role_config_group_info.py index 46e95af4..cc71314b 100644 --- a/plugins/modules/service_role_config_group_info.py +++ b/plugins/modules/service_role_config_group_info.py @@ -16,6 +16,9 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerModule, +) + +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( parse_role_config_group_result, ) @@ -26,6 +29,7 @@ ) from cm_client.rest import ApiException + ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], diff --git a/plugins/modules/service_role_info.py b/plugins/modules/service_role_info.py index c0e1f63f..9581a8bb 100644 --- a/plugins/modules/service_role_info.py +++ b/plugins/modules/service_role_info.py @@ -18,12 +18,16 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerModule, +) + +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( parse_role_result, ) from cm_client import ClustersResourceApi, RolesResourceApi, ServicesResourceApi from cm_client.rest import ApiException + ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], From e72cb3b2fbc29c4a05169ef3893eb2513ffb447d Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 17 Dec 2024 09:24:24 -0500 Subject: [PATCH 04/58] Enable service-wide configuration management. Update return object. Signed-off-by: Webster Mudge --- plugins/modules/cm_service.py | 536 +++++++++++++++++++--------------- 1 file changed, 302 insertions(+), 234 deletions(-) diff --git a/plugins/modules/cm_service.py b/plugins/modules/cm_service.py index 921981cf..6528169d 100644 --- a/plugins/modules/cm_service.py +++ b/plugins/modules/cm_service.py @@ -1,3 +1,6 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright 2024 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,24 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - ClouderaManagerModule, -) - -from cm_client.rest import ApiException -from cm_client import MgmtRolesResourceApi -from cm_client import MgmtServiceResourceApi -from cm_client import MgmtRoleCommandsResourceApi -from cm_client import HostsResourceApi - -ANSIBLE_METADATA = { - "metadata_version": "1.1", - "status": ["preview"], - "supported_by": "community", -} - DOCUMENTATION = r""" ---- module: cm_service short_description: Manage Cloudera Manager service roles description: @@ -68,7 +54,6 @@ """ EXAMPLES = r""" ---- - name: Start Cloudera Manager service roles cloudera.cluster.cm_version: host: "10.10.10.10" @@ -114,8 +99,7 @@ """ RETURN = r""" ---- -cloudera_manager: +service: description: List of Cloudera Manager roles type: dict contains: @@ -185,241 +169,325 @@ returned: optional """ +import json + +from cm_client import ( + HostsResourceApi, + MgmtRolesResourceApi, + MgmtRoleConfigGroupsResourceApi, + MgmtRoleCommandsResourceApi, + MgmtServiceResourceApi, +) +from cm_client.rest import ApiException + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + ServiceConfigUpdates, + parse_cm_service_result, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + parse_role_result, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + parse_role_config_group_result, +) + -class ClouderaService(ClouderaManagerModule): +class ClouderaManagerService(ClouderaManagerMutableModule): def __init__(self, module): - super(ClouderaService, self).__init__(module) + super(ClouderaManagerService, self).__init__(module) - self.role = self.get_param("role") + # Set the parameters + self.params = self.get_param("parameters") + self.roles = self.get_param("roles") self.state = self.get_param("state") self.purge = self.get_param("purge") + self.view = self.get_param("view") + + # Initialize the return value + self.changed = False + self.cm_service = {} + + if self.module._diff: + self.diff = dict(before=dict(), after=dict()) + else: + self.diff = {} + + # Execute the logic self.process() - @ClouderaManagerModule.handle_process + @ClouderaManagerMutableModule.handle_process def process(self): - try: - api_instance = MgmtServiceResourceApi(self.api_client) - role_api_instance = MgmtRolesResourceApi(self.api_client) - role_cmd_api_instance = MgmtRoleCommandsResourceApi(self.api_client) - mgmt_service_api_instance = MgmtServiceResourceApi(self.api_client) - host_api_instance = HostsResourceApi(self.api_client) - - get_host_infomation = host_api_instance.read_hosts().to_dict() - for item in get_host_infomation["items"]: - if self.host == item["hostname"]: - host_id = item["host_id"] - - if not self.purge: - available_roles_info = role_api_instance.read_roles().to_dict() - existing_roles = [] - for item in available_roles_info["items"]: - existing_roles.append(item["type"]) - - if self.state in ["present"]: - not_existing_roles = [] - for role in self.role: - if role not in existing_roles: - not_existing_roles.append(role) - if not_existing_roles: - body = { - "items": [ - {"type": role, "hostRef": {"hostId": host_id}} - for role in not_existing_roles - ] - } - role_api_instance.create_roles(body=body) - self.cm_service_output = role_api_instance.read_roles().to_dict() - self.changed = True - elif self.state in ["absent"]: - roles_to_remove = [ - role for role in self.role if role in existing_roles - ] - roles_to_remove_extended_info = [] - for role in roles_to_remove: - for item in available_roles_info["items"]: - if role == item["type"]: - roles_to_remove_extended_info.append(item["name"]) - if not roles_to_remove_extended_info: - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() - ) - self.changed = False - else: - for role in roles_to_remove_extended_info: - role_api_instance.delete_role(role_name=role) - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() + service_api = MgmtServiceResourceApi(self.api_client) + role_api = MgmtRolesResourceApi(self.api_client) + role_cmd_api = MgmtRoleCommandsResourceApi(self.api_client) + rcg_api = MgmtRoleConfigGroupsResourceApi(self.api_client) + host_api = HostsResourceApi(self.api_client) + + # Manage service-wide configurations + if self.params or self.purge: + try: + existing_params = service_api.read_service_config() + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg=json.loads(ex.body)["message"]) + else: + raise ex + + service_wide = ServiceConfigUpdates( + existing_params, self.params, self.purge + ) + + if service_wide.changed: + self.changed = True + + if self.module._diff: + self.diff["before"].update(params=service_wide.diff["before"]) + self.diff["after"].update(params=service_wide.diff["after"]) + + if not self.module.check_mode: + service_api.update_service_config( + message=self.message, body=service_wide.config + ) + + # Manage roles + if self.roles: + try: + # Get a list of all host and find itself + # This is hardcoded, so needs to be broken into host + # assignment per-role + hosts = host_api.read_hosts() + for h in hosts.items(): + if self.host == h.hostname: + host_id = h.host_id + + # CHECK MODE + if not self.purge: + available_roles_info = role_api.read_roles().to_dict() + existing_roles = [] + for item in available_roles_info["items"]: + existing_roles.append(item["type"]) + + if self.state in ["present"]: + not_existing_roles = [] + for role in self.roles: + if role not in existing_roles: + not_existing_roles.append(role) + if not_existing_roles: + body = { + "items": [ + {"type": role, "hostRef": {"hostId": host_id}} + for role in not_existing_roles + ] + } + role_api.create_roles(body=body) + self.cm_service = parse_cm_service_result( + service_api.read_service() ) self.changed = True - elif self.state in ["started"]: - - matching_roles = [] - new_roles = [] - for role in self.role: - if role in existing_roles: - matching_roles.append(role) + elif self.state in ["absent"]: + roles_to_remove = [ + role for role in self.roles if role in existing_roles + ] + roles_to_remove_extended_info = [] + for role in roles_to_remove: + for item in available_roles_info["items"]: + if role == item["type"]: + roles_to_remove_extended_info.append(item["name"]) + if not roles_to_remove_extended_info: + self.cm_service = role_api.read_roles().to_dict() + self.changed = False + else: + for role in roles_to_remove_extended_info: + role_api.delete_role(role_name=role) + self.cm_service = role_api.read_roles().to_dict() + self.changed = True + + elif self.state in ["started"]: + + matching_roles = [] + new_roles = [] + for role in self.roles: + if role in existing_roles: + matching_roles.append(role) + else: + new_roles.append(role) + + new_roles_to_start = [] + if new_roles: + body = { + "items": [ + {"type": role, "hostRef": {"hostId": host_id}} + for role in new_roles + ] + } + newly_added_roles = role_api.create_roles( + body=body + ).to_dict() + + for role in newly_added_roles["items"]: + new_roles_to_start.append(role["name"]) + body = {"items": new_roles_to_start} + + existing_roles_state = [] + for role in matching_roles: + for item in available_roles_info["items"]: + if role == item["type"]: + existing_roles_state.append( + { + "type": item["type"], + "role_state": item["role_state"].lower(), + "name": item["name"], + } + ) + + existing_roles_to_start = [] + for role in existing_roles_state: + if role["role_state"] == "stopped": + existing_roles_to_start.append(role["name"]) + + all_roles_to_start = ( + new_roles_to_start + existing_roles_to_start + ) + body = {"items": all_roles_to_start} + + if all_roles_to_start: + start_roles_request = role_cmd_api.start_command( + body=body + ).to_dict() + command_id = start_roles_request["items"][0]["id"] + self.wait_for_command_state( + command_id=command_id, polling_interval=5 + ) + self.cm_service = role_api.read_roles().to_dict() + self.changed = True + else: + self.cm_service = role_api.read_roles().to_dict() + self.changed = False + + elif self.state in ["stopped"]: + matching_roles = [] + for role in self.roles: + if role in existing_roles: + matching_roles.append(role) + + matching_roles_state = [] + for role in matching_roles: + for item in available_roles_info["items"]: + if role == item["type"]: + matching_roles_state.append( + { + "type": item["type"], + "role_state": item["role_state"].lower(), + "name": item["name"], + } + ) + + roles_to_stop = [] + for role in matching_roles_state: + if role["role_state"] == "started": + roles_to_stop.append(role["name"]) + body = {"items": roles_to_stop} + + if roles_to_stop: + role_cmd_api.stop_command(body=body) + self.cm_service = role_api.read_roles().to_dict() + self.changed = True else: - new_roles.append(role) - - new_roles_to_start = [] - if new_roles: - body = { - "items": [ - {"type": role, "hostRef": {"hostId": host_id}} - for role in new_roles - ] - } - newly_added_roles = role_api_instance.create_roles( - body=body - ).to_dict() - - for role in newly_added_roles["items"]: - new_roles_to_start.append(role["name"]) - body = {"items": new_roles_to_start} - - existing_roles_state = [] - for role in matching_roles: - for item in available_roles_info["items"]: - if role == item["type"]: - existing_roles_state.append( - { - "type": item["type"], - "role_state": item["role_state"].lower(), - "name": item["name"], - } - ) - - existing_roles_to_start = [] - for role in existing_roles_state: - if role["role_state"] == "stopped": - existing_roles_to_start.append(role["name"]) - - all_roles_to_start = new_roles_to_start + existing_roles_to_start - body = {"items": all_roles_to_start} - - if all_roles_to_start: - start_roles_request = role_cmd_api_instance.start_command( - body=body - ).to_dict() - command_id = start_roles_request["items"][0]["id"] + self.cm_service = role_api.read_roles().to_dict() + self.changed = False + + elif self.state in ["restarted"]: + matching_roles = [] + for role in self.roles: + if role in existing_roles: + matching_roles.append(role) + + matching_roles_state = [] + for role in matching_roles: + for item in available_roles_info["items"]: + if role == item["type"]: + matching_roles_state.append( + { + "type": item["type"], + "role_state": item["role_state"].lower(), + "name": item["name"], + } + ) + + roles_to_restart = [] + for role in matching_roles_state: + roles_to_restart.append(role["name"]) + body = {"items": roles_to_restart} + + if roles_to_restart: + role_cmd_api.restart_command(body=body) + self.cm_service = role_api.read_roles().to_dict() + self.changed = True + + if self.purge: + service_api.delete_cms() + body = {"roles": [{"type": role} for role in self.roles]} + service_api.setup_cms(body=body) + self.cm_service = role_api.read_roles().to_dict() + + if self.state in ["started"]: + start_roles_request = service_api.start_command().to_dict() + command_id = start_roles_request["id"] self.wait_for_command_state( command_id=command_id, polling_interval=5 ) - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() + self.cm_service = role_api.read_roles().to_dict() + self.changed = True + except ApiException as e: + if e.status == 404 or 400: + roles_dict = {"roles": [{"type": role} for role in self.roles]} + service_api.setup_cms(body=roles_dict) + + if self.state in ["started"]: + start_roles_request = service_api.start_command().to_dict() + command_id = start_roles_request["id"] + self.wait_for_command_state( + command_id=command_id, polling_interval=5 ) - self.changed = True + self.cm_service = role_api.read_roles().to_dict() else: - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() - ) - self.changed = False - - elif self.state in ["stopped"]: - matching_roles = [] - for role in self.role: - if role in existing_roles: - matching_roles.append(role) - - matching_roles_state = [] - for role in matching_roles: - for item in available_roles_info["items"]: - if role == item["type"]: - matching_roles_state.append( - { - "type": item["type"], - "role_state": item["role_state"].lower(), - "name": item["name"], - } - ) - - roles_to_stop = [] - for role in matching_roles_state: - if role["role_state"] == "started": - roles_to_stop.append(role["name"]) - body = {"items": roles_to_stop} - - if roles_to_stop: - role_cmd_api_instance.stop_command(body=body) - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() - ) - self.changed = True - else: - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() - ) - self.changed = False - - elif self.state in ["restarted"]: - matching_roles = [] - for role in self.role: - if role in existing_roles: - matching_roles.append(role) - - matching_roles_state = [] - for role in matching_roles: - for item in available_roles_info["items"]: - if role == item["type"]: - matching_roles_state.append( - { - "type": item["type"], - "role_state": item["role_state"].lower(), - "name": item["name"], - } - ) - - roles_to_restart = [] - for role in matching_roles_state: - roles_to_restart.append(role["name"]) - body = {"items": roles_to_restart} - - if roles_to_restart: - role_cmd_api_instance.restart_command(body=body) - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() - ) - self.changed = True - - if self.purge: - mgmt_service_api_instance.delete_cms() - body = {"roles": [{"type": role} for role in self.role]} - mgmt_service_api_instance.setup_cms(body=body) - self.cm_service_output = role_api_instance.read_roles().to_dict() - - if self.state in ["started"]: - start_roles_request = api_instance.start_command().to_dict() - command_id = start_roles_request["id"] - self.wait_for_command_state( - command_id=command_id, polling_interval=5 - ) - self.cm_service_output = role_api_instance.read_roles().to_dict() - self.changed = True - - except ApiException as e: - if e.status == 404 or 400: - roles_dict = {"roles": [{"type": role} for role in self.role]} - api_instance.setup_cms(body=roles_dict) + self.cm_service = role_api.read_roles().to_dict() + self.changed = True - if self.state in ["started"]: - start_roles_request = api_instance.start_command().to_dict() - command_id = start_roles_request["id"] - self.wait_for_command_state( - command_id=command_id, polling_interval=5 - ) - self.cm_service_output = role_api_instance.read_roles().to_dict() - else: - self.cm_service_output = role_api_instance.read_roles().to_dict() - self.changed = True + # Read and generate payload for Cloudera Manager Service + self.cm_service = parse_cm_service_result(service_api.read_service()) + self.cm_service.update( + config=[ + c.to_dict() + for c in service_api.read_service_config(view=self.view).items + ] + ) + self.cm_service.update( + roles=[parse_role_result(r) for r in role_api.read_roles().items] + ) + self.cm_service.update( + role_config_groups=[ + parse_role_config_group_result(rcg) + for rcg in rcg_api.read_role_config_groups().items + ] + ) def main(): - module = ClouderaManagerModule.ansible_module( + module = ClouderaManagerMutableModule.ansible_module( argument_spec=dict( - role=dict(required=True, type="list"), - purge=dict(required=False, type="bool", default=False), + parameters=dict(type="dict", aliases=["params"]), + roles=dict(type="list"), + purge=dict(type="bool", default=False), + view=dict( + default="summary", + choices=["summary", "full"], + ), state=dict( type="str", default="started", @@ -429,13 +497,13 @@ def main(): supports_check_mode=False, ) - result = ClouderaService(module) + result = ClouderaManagerService(module) changed = result.changed output = dict( changed=changed, - cloudera_manager=result.cm_service_output, + service=result.cm_service, ) if result.debug: From 7612a7e76325097c70e25be4bfcc734f22ad62f7 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 17 Dec 2024 09:24:24 -0500 Subject: [PATCH 05/58] Update to use pytest fixtures for Cloudera Manager Service Signed-off-by: Webster Mudge --- .../modules/cm_service/test_cm_service.py | 147 ++++++++++++++++-- 1 file changed, 134 insertions(+), 13 deletions(-) diff --git a/tests/unit/plugins/modules/cm_service/test_cm_service.py b/tests/unit/plugins/modules/cm_service/test_cm_service.py index 5614fe61..af679312 100644 --- a/tests/unit/plugins/modules/cm_service/test_cm_service.py +++ b/tests/unit/plugins/modules/cm_service/test_cm_service.py @@ -17,7 +17,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -import os + import logging import pytest @@ -30,23 +30,144 @@ LOG = logging.getLogger(__name__) -def test_pytest_cm_service(module_args): +def test_minimal(conn, module_args, cms): + module_args(conn) + + with pytest.raises(AnsibleExitJson): + cm_service.main() + + +@pytest.mark.service_config(dict(log_event_retry_frequency=10)) +def test_set_parameters(conn, module_args, cms_service_config): + module_args( + { + **conn, + "parameters": dict(mgmt_emit_sensitive_data_in_stderr=True), + "message": "test_cm_service::test_set_parameters", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict( + mgmt_emit_sensitive_data_in_stderr="True", log_event_retry_frequency="10" + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert ( + expected.items() + <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert ( + expected.items() + <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() + ) + + +@pytest.mark.service_config( + dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) +) +def test_unset_parameters(conn, module_args, cms_service_config): + module_args( + { + **conn, + "parameters": dict(mgmt_emit_sensitive_data_in_stderr=None), + "message": "test_cm_service::test_unset_parameters", + } + ) + + expected = dict(log_event_retry_frequency="10") + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert ( + expected.items() + <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert ( + expected.items() + <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() + ) + + +@pytest.mark.service_config( + dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) +) +def test_set_parameters_with_purge(conn, module_args, cms_service_config): + module_args( + { + **conn, + "parameters": dict(mgmt_emit_sensitive_data_in_stderr=True), + "purge": True, + "message": "test_cm_service::test_set_parameters_with_purge", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_emit_sensitive_data_in_stderr="True") + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert ( + expected.items() + <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert ( + expected.items() + <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() + ) + + +@pytest.mark.service_config( + dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) +) +def test_purge_all_parameters(conn, module_args, cms_service_config): module_args( { - "username": os.getenv("CM_USERNAME"), - "password": os.getenv("CM_PASSWORD"), - "host": os.getenv("CM_HOST"), - "port": "7180", - "verify_tls": "no", - "debug": "yes", - "state": "started", - "role": ["SERVICEMONITOR", "HOSTMONITOR", "EVENTSERVER", "ALERTPUBLISHER"], + **conn, + "parameters": dict(), + "purge": True, + "message": "test_cm_service::test_purge_all_parameters", + # _ansible_check_mode=True, + # _ansible_diff=True, } ) - # with pytest.raises(AnsibleFailJson, match=r"boom") as e: with pytest.raises(AnsibleExitJson) as e: cm_service.main() - # LOG.info(str(e.value)) - LOG.info(str(e.value.cloudera_manager)) + assert e.value.changed == True + assert len(e.value.service["config"]) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["config"]) == 0 From 0b5c98f0b926f6f89684fcc5afd4e749499afc19 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 17 Dec 2024 09:24:24 -0500 Subject: [PATCH 06/58] Update conn object from fixture Signed-off-by: Webster Mudge --- .../modules/cm_service_config/test_cm_service_config.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py b/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py index 9e208227..ad54716b 100644 --- a/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py +++ b/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py @@ -38,10 +38,12 @@ def test_missing_required(conn, module_args): def test_present_invalid_parameter(conn, module_args): - conn.update( - parameters=dict(example="Example"), + module_args( + { + **conn, + "parameters": dict(example="Example"), + } ) - module_args(conn) with pytest.raises( AnsibleFailJson, match="Unknown configuration attribute 'example'" From a91c59f5e412ff9a298d6b46bebcf90e34cbf0f7 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 18 Dec 2024 12:14:28 -0500 Subject: [PATCH 07/58] Create generic configuration list reconcilation utility Signed-off-by: Webster Mudge --- plugins/module_utils/cm_utils.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/plugins/module_utils/cm_utils.py b/plugins/module_utils/cm_utils.py index c892b229..c55d523c 100644 --- a/plugins/module_utils/cm_utils.py +++ b/plugins/module_utils/cm_utils.py @@ -34,6 +34,7 @@ from cm_client import ( ApiClient, ApiCommand, + ApiConfig, ApiConfigList, Configuration, ) @@ -145,6 +146,25 @@ def resolve_tag_updates( return (delta_add, delta_del) +class ConfigListUpdates(object): + def __init__(self, existing: ApiConfigList, updates: dict, purge: bool) -> None: + current = {r.name: r.value for r in existing.items} + changeset = resolve_parameter_updates(current, updates, purge) + + self.diff = dict( + before={k: current[k] if k in current else None for k in changeset.keys()}, + after=changeset, + ) + + self.config = ApiConfigList( + items=[ApiConfig(name=k, value=v) for k, v in changeset.items()] + ) + + @property + def changed(self) -> bool: + return bool(self.config.items) + + class ClusterTemplate(object): IDEMPOTENT_IDS = frozenset( ["refName", "name", "clusterName", "hostName", "product"] From fdfbfb1ecbc469836e90eb77802f4903f0633ebb Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 18 Dec 2024 12:17:43 -0500 Subject: [PATCH 08/58] Add cm_service_role_config module Signed-off-by: Webster Mudge --- plugins/modules/cm_service_role_config.py | 322 +++++++++++++++ pyproject.toml | 3 + tests/unit/__init__.py | 84 ++++ tests/unit/conftest.py | 6 +- .../test_cm_service_role_config.py | 365 ++++++++++++++++++ 5 files changed, 778 insertions(+), 2 deletions(-) create mode 100644 plugins/modules/cm_service_role_config.py create mode 100644 tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py diff --git a/plugins/modules/cm_service_role_config.py b/plugins/modules/cm_service_role_config.py new file mode 100644 index 00000000..fc6efbf3 --- /dev/null +++ b/plugins/modules/cm_service_role_config.py @@ -0,0 +1,322 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = r""" +module: cm_service_role_config +short_description: Manage a service role configuration in cluster +description: + - Manage a service role configuration (role-specific) in a cluster. +author: + - "Webster Mudge (@wmudge)" +requirements: + - cm-client +options: + name: + description: + - A Cloudera Manager Service role name to manage. + - One of C(name) or C(type) is required. + type: str + aliases: + - role_name + - role + type: + description: + - A Cloudera Manager Service role type to manage. + - One of C(name) or C(type) is required. + type: str + aliases: + - role_type + parameters: + description: + - The role-specific configuration to set, i.e. role overrides. + - To unset a parameter, use C(None) as the value. + type: dict + required: yes + aliases: + - params + purge: + description: + - Flag for whether the declared parameters should append or overwrite any existing parameters. + - To clear all parameters, set I(parameters={}), i.e. an empty dictionary, and I(purge=True). + type: bool + default: False + view: + description: + - The view to return. + type: str + default: summary + choices: + - summary + - full +extends_documentation_fragment: + - ansible.builtin.action_common_attributes + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint + - cloudera.cluster.purge + - cloudera.cluster.message +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: all +""" + +EXAMPLES = r""" +- name: Update (append) Cloudera manager Service Host Monitor role parameters + cloudera.cluster.cm_service_role_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: + a_configuration: "schema://host:port" + another_configuration: 234 + +- name: Reset a Cloudera manager Service Host Monitor role parameter + cloudera.cluster.cm_service_role_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + name: "a-non-default-role-name" + parameters: + more_configuration: None + +- name: Update (with purge) Cloudera manager Service Host Monitor role parameters + cloudera.cluster.cluster_service_role_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: + config_one: None + config_two: ValueTwo + config_three: 2345 + +- name: Reset all Cloudera manager Service Host Monitor role parameters + cloudera.cluster.cluster_service_role_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: {} + purge: yes +""" + +RETURN = r""" +config: + description: + - List of Cloudera Manager Service role configurations. + - Returns the C(summary) view of the resulting configuration. + type: list + elements: dict + returned: always + contains: + name: + description: + - The canonical name that identifies this configuration parameter. + type: str + returned: when supported + value: + description: + - The user-defined value. + - When absent, the default value (if any) will be used. + - Can also be absent, when enumerating allowed configs. + type: str + returned: when supported + required: + description: + - Whether this configuration is required for the object. + - If any required configuration is not set, operations on the object may not work. + - Requires I(full) view. + type: bool + returned: when supported + default: + description: + - The default value. + - Requires I(full) view. + type: str + returned: when supported + display_name: + description: + - A user-friendly name of the parameters, as would have been shown in the web UI. + - Requires I(full) view. + type: str + returned: when supported + description: + description: + - A textual description of the parameter. + - Requires I(full) view. + type: str + returned: when supported + related_name: + description: + - If applicable, contains the related configuration variable used by the source project. + - Requires I(full) view. + type: str + returned: when supported + sensitive: + description: + - Whether this configuration is sensitive, i.e. contains information such as passwords, which might affect how the value of this configuration might be shared by the caller. + type: bool + returned: when supported + validate_state: + description: + - State of the configuration parameter after validation. + - Requires I(full) view. + type: str + returned: when supported + validation_message: + description: + - A message explaining the parameter's validation state. + - Requires I(full) view. + type: str + returned: when supported + validation_warnings_suppressed: + description: + - Whether validation warnings associated with this parameter are suppressed. + - In general, suppressed validation warnings are hidden in the Cloudera Manager UI. + - Configurations that do not produce warnings will not contain this field. + - Requires I(full) view. + type: bool + returned: when supported +""" + +import json + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + ConfigListUpdates, +) + +from cm_client import MgmtRolesResourceApi +from cm_client.rest import ApiException + + +class ClouderaManagerServiceRoleConfig(ClouderaManagerMutableModule): + def __init__(self, module): + super(ClouderaManagerServiceRoleConfig, self).__init__(module) + + # Set the parameters + self.name = self.get_param("name") + self.type = self.get_param("type") + self.params = self.get_param("parameters") + self.purge = self.get_param("purge") + self.view = self.get_param("view") + + # Initialize the return value + self.changed = False + self.diff = {} + self.config = [] + + # Execute the logic + self.process() + + @ClouderaManagerMutableModule.handle_process + def process(self): + refresh = True + role_api = MgmtRolesResourceApi(self.api_client) + + try: + if self.name is None: + role = next( + iter( + [r for r in role_api.read_roles().items if r.type == self.type] + ), + None, + ) + if role is None: + self.module.fail_json( + msg=f"Unable to find Cloudera Manager Service role type '{self.type}" + ) + else: + self.name = role.name + + # For some reason, the call to read_roles() doesn't retrieve the configuration + existing = role_api.read_role_config(self.name) + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg=json.loads(ex.body)["message"]) + else: + raise ex + + updates = ConfigListUpdates(existing, self.params, self.purge) + + if updates.changed: + self.changed = True + + if self.module._diff: + self.diff = updates.diff + + if not self.module.check_mode: + self.config = [ + p.to_dict() + for p in role_api.update_role_config( + self.name, + message=self.message, + body=updates.config, + ).items + ] + + if self.view == "full": + refresh = False + + if refresh: + self.config = [ + p.to_dict() + for p in role_api.read_role_config(self.name, view=self.view).items + ] + + +def main(): + module = ClouderaManagerMutableModule.ansible_module( + argument_spec=dict( + name=dict(aliases=["role_name", "role"]), + type=dict(aliases=["role_type"]), + parameters=dict(type="dict", required=True, aliases=["params"]), + purge=dict(type="bool", default=False), + view=dict( + default="summary", + choices=["summary", "full"], + ), + ), + required_one_of=[ + ["name", "type"], + ], + supports_check_mode=True, + ) + + result = ClouderaManagerServiceRoleConfig(module) + + output = dict( + changed=result.changed, + config=result.config, + ) + + if module._diff: + output.update(diff=result.diff) + + if result.debug: + log = result.log_capture.getvalue() + output.update(debug=log, debug_lines=log.split("\n")) + + module.exit_json(**output) + + +if __name__ == "__main__": + main() diff --git a/pyproject.toml b/pyproject.toml index e46f1c79..716e06c4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,9 +51,12 @@ testpaths = [ filterwarnings = [ "ignore:AnsibleCollectionFinder has already been configured", "ignore:'crypt' is deprecated and slated for removal in Python 3.13:DeprecationWarning", + "ignore:URLs without a scheme:DeprecationWarning", + "ignore:HTTPResponse.getheaders():DeprecationWarning", ] markers = [ "service_config: Prepare service-wide configurations for tests", + "role_config: Prepare role override configurations for tests", ] [build-system] diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index f593ed44..62a6e4a2 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -22,11 +22,15 @@ ApiCluster, ApiCommand, ApiConfig, + ApiConfigList, + ApiRole, + ApiRoleList, ApiService, ApiServiceConfig, ApiServiceList, ClustersResourceApi, CommandsResourceApi, + MgmtRolesResourceApi, ServicesResourceApi, ) from cm_client.rest import ApiException @@ -179,3 +183,83 @@ def service_wide_config( message=f"{message}::reset", body=ApiServiceConfig(items=reconciled), ) + + +def provision_cm_role( + api_client: ApiClient, role_name: str, role_type: str, host_id: str +) -> Generator[ApiRole]: + api = MgmtRolesResourceApi(api_client) + + role = ApiRole( + name=role_name, + type=role_type, + host_ref=dict(hostId=host_id), + ) + + yield next(iter(api.create_roles(body=ApiRoleList(items=[role])).items), None) + + api.delete_role(role_name=role_name) + + +def cm_role_config( + api_client: ApiClient, role: ApiRole, params: dict, message: str +) -> Generator[ApiRole]: + """Update a role configuration for a given role. Yields the + role, resetting the configuration to its prior state. Use with + 'yield from' within a pytest fixture. + + Args: + api_client (ApiClient): _description_ + role (ApiRole): _description_ + params (dict): _description_ + message (str): _description_ + + Raises: + Exception: _description_ + + Yields: + ApiRole: _description_ + """ + role_api = MgmtRolesResourceApi(api_client) + + # Retrieve all of the pre-setup configurations + pre = role_api.read_role_config(role.name) + + # Set the test configurations + # Do so serially, since a failed update due to defaults (see ApiException) will cause remaining + # configuration entries to not run. Long-term solution is to check-and-set, which is + # what the Ansible modules do... + for k, v in params.items(): + try: + role_api.update_role_config( + role_name=role.name, + message=f"{message}::set", + body=ApiConfigList(items=[ApiConfig(name=k, value=v)]), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) + + # Yield the targeted role + yield role_api.read_role(role_name=role.name) + + # Retrieve all of the post-setup configurations + post = role_api.read_role_config(role_name=role.name) + + # Reconcile the configurations + pre_set = set([c.name for c in pre.items]) + + reconciled = pre.items.copy() + reconciled.extend( + [ + ApiConfig(name=k.name, value=None) + for k in post.items + if k.name not in pre_set + ] + ) + + role_api.update_role_config( + role_name=role.name, + message=f"{message}::reset", + body=ApiConfigList(items=reconciled), + ) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 268192db..41669cfe 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -26,6 +26,7 @@ import sys import yaml +from collections.abc import Generator from pathlib import Path from cm_client import ( @@ -40,6 +41,7 @@ ClustersResourceApi, Configuration, HostsResourceApi, + MgmtRolesResourceApi, MgmtServiceResourceApi, ParcelResourceApi, ParcelsResourceApi, @@ -250,7 +252,7 @@ def base_cluster(cm_api_client, request): @pytest.fixture(scope="session") -def cms(cm_api_client, request): +def cms(cm_api_client, request) -> Generator[ApiService]: """Provisions Cloudera Manager Service.""" api = MgmtServiceResourceApi(cm_api_client) @@ -275,7 +277,7 @@ def cms(cm_api_client, request): @pytest.fixture(scope="function") -def cms_service_config(cm_api_client, cms, request): +def cms_config(cm_api_client, cms, request) -> Generator[ApiService]: """Configures service-wide configurations for the Cloudera Manager Service""" marker = request.node.get_closest_marker("service_config") diff --git a/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py b/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py new file mode 100644 index 00000000..d230005a --- /dev/null +++ b/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py @@ -0,0 +1,365 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from collections.abc import Generator +from pathlib import Path + +from cm_client import ( + ApiRole, + ClustersResourceApi, + MgmtRolesResourceApi, +) + +from ansible_collections.cloudera.cluster.plugins.modules import cm_service_role_config +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, + provision_cm_role, + cm_role_config, +) + +LOG = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: + api = MgmtRolesResourceApi(cm_api_client) + + hm = next( + iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None + ) + + if hm is not None: + yield hm + else: + cluster_api = ClustersResourceApi(cm_api_client) + + # Get first host of the cluster + hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + else: + name = Path(request.fixturename).stem + yield from provision_cm_role( + cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId + ) + + +@pytest.fixture(scope="function") +def host_monitor_config(cm_api_client, host_monitor, request) -> Generator[ApiRole]: + marker = request.node.get_closest_marker("role_config") + + if marker is None: + raise Exception("No role_config marker found.") + + yield from cm_role_config( + api_client=cm_api_client, + role=host_monitor, + params=marker.args[0], + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) + + +def test_missing_required(conn, module_args): + module_args(conn) + + with pytest.raises(AnsibleFailJson, match="parameters"): + cm_service_role_config.main() + + +def test_missing_required_if(conn, module_args): + module_args( + { + **conn, + "parameters": dict(), + } + ) + + with pytest.raises(AnsibleFailJson, match="name, type"): + cm_service_role_config.main() + + +def test_present_invalid_parameter(conn, module_args, host_monitor): + module_args( + { + **conn, + "role": host_monitor.name, + "parameters": dict(example="Example"), + } + ) + + with pytest.raises( + AnsibleFailJson, match="Unknown configuration attribute 'example'" + ): + cm_service_role_config.main() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_role_type(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role_type": host_monitor_config.type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_unset_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_unset_parameters_role_type(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "type": host_monitor_config.type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_with_purge(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_with_purge_role_type( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "role_type": host_monitor_config.type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_purge_all_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0 + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_purge_all_parameters_role_type( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.type, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0 From c0a82f9c24c610f29eedcb498a3c93f3b049a1a0 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 18 Dec 2024 15:21:30 -0500 Subject: [PATCH 09/58] Add utilities for discovering base role config group for a given role type Signed-off-by: Webster Mudge --- .../module_utils/role_config_group_utils.py | 42 ++++++++++++++++++- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/plugins/module_utils/role_config_group_utils.py b/plugins/module_utils/role_config_group_utils.py index b17e8160..50e82afd 100644 --- a/plugins/module_utils/role_config_group_utils.py +++ b/plugins/module_utils/role_config_group_utils.py @@ -16,8 +16,12 @@ normalize_output, ) -from cm_client import ApiRoleConfigGroup - +from cm_client import ( + ApiClient, + ApiRoleConfigGroup, + RoleConfigGroupsResourceApi, + MgmtRoleConfigGroupsResourceApi, +) ROLE_CONFIG_GROUP = [ "name", @@ -28,8 +32,42 @@ ] +class BaseRoleConfigGroupDiscoveryException(Exception): + pass + + def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dict: # Retrieve only the service identifier output = dict(service_name=role_config_group.service_ref.service_name) output.update(normalize_output(role_config_group.to_dict(), ROLE_CONFIG_GROUP)) return output + + +def get_base_role_config_group( + api_client: ApiClient, cluster_name: str, service_name: str, role_type: str +) -> ApiRoleConfigGroup: + rcg_api = RoleConfigGroupsResourceApi(api_client) + rcgs = [ + r + for r in rcg_api.read_role_config_groups(cluster_name, service_name).items + if r.role_type == role_type and r.base + ] + if len(rcgs) != 1: + raise BaseRoleConfigGroupDiscoveryException(role_count=len(rcgs)) + else: + return rcgs[0] + + +def get_mgmt_base_role_config_group( + api_client: ApiClient, role_type: str +) -> ApiRoleConfigGroup: + rcg_api = MgmtRoleConfigGroupsResourceApi(api_client) + rcgs = [ + r + for r in rcg_api.read_role_config_groups().items + if r.role_type == role_type and r.base + ] + if len(rcgs) != 1: + raise BaseRoleConfigGroupDiscoveryException(role_count=len(rcgs)) + else: + return rcgs[0] From eab3d18e96eed9509dc0f6badecfd1c728b4b857 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 18 Dec 2024 15:22:18 -0500 Subject: [PATCH 10/58] Add utilities for getting roles by role type Signed-off-by: Webster Mudge --- plugins/module_utils/role_utils.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/plugins/module_utils/role_utils.py b/plugins/module_utils/role_utils.py index 55bb463b..c3a962e5 100644 --- a/plugins/module_utils/role_utils.py +++ b/plugins/module_utils/role_utils.py @@ -16,6 +16,12 @@ normalize_output, ) +from cm_client import ( + ApiClient, + ApiRoleList, + RolesResourceApi, + MgmtRolesResourceApi, +) from cm_client import ApiRole ROLE_OUTPUT = [ @@ -46,3 +52,23 @@ def parse_role_result(role: ApiRole) -> dict: ) output.update(normalize_output(role.to_dict(), ROLE_OUTPUT)) return output + + +def get_mgmt_roles(api_client: ApiClient, role_type: str) -> ApiRoleList: + role_api = MgmtRolesResourceApi(api_client) + return ApiRoleList( + items=[r for r in role_api.read_roles().items if r.type == role_type] + ) + + +def get_roles( + api_client: ApiClient, cluster_name: str, service_name: str, role_type: str +) -> ApiRoleList: + role_api = RolesResourceApi(api_client) + return ApiRoleList( + items=[ + r + for r in role_api.read_roles(cluster_name, service_name).items + if r.type == role_type + ] + ) From 6191de9724c005edf770632c4909f4e4fb45ee2f Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 18 Dec 2024 15:24:38 -0500 Subject: [PATCH 11/58] Add utility to set Cloudera Manager Service role config group configurations for tests Signed-off-by: Webster Mudge --- tests/unit/__init__.py | 69 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 62a6e4a2..74d34146 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -24,6 +24,7 @@ ApiConfig, ApiConfigList, ApiRole, + ApiRoleConfigGroup, ApiRoleList, ApiService, ApiServiceConfig, @@ -31,6 +32,7 @@ ClustersResourceApi, CommandsResourceApi, MgmtRolesResourceApi, + MgmtRoleConfigGroupsResourceApi, ServicesResourceApi, ) from cm_client.rest import ApiException @@ -263,3 +265,70 @@ def cm_role_config( message=f"{message}::reset", body=ApiConfigList(items=reconciled), ) + + +def set_cm_role_config_group_config( + api_client: ApiClient, + role_config_group: ApiRoleConfigGroup, + params: dict, + message: str, +) -> Generator[ApiRoleConfigGroup]: + """Update a configuration for a given Cloudera Manager Service role config group. + Yields the role config group, resetting the configuration to its prior state. + Use with 'yield from' within a pytest fixture. + + Args: + api_client (ApiClient): _description_ + role_config_group (ApiRoleConfigGroup): _description_ + params (dict): _description_ + message (str): _description_ + + Raises: + Exception: _description_ + + Yields: + ApiRoleConfigGroup: _description_ + """ + rcg_api = MgmtRoleConfigGroupsResourceApi(api_client) + + # Retrieve all of the pre-setup configurations + pre = rcg_api.read_config(role_config_group.name) + + # Set the test configurations + # Do so serially, since a failed update due to defaults (see ApiException) will cause remaining + # configuration entries to not run. Long-term solution is to check-and-set, which is + # what the Ansible modules do... + for k, v in params.items(): + try: + rcg_api.update_config( + role_config_group.name, + message=f"{message}::set", + body=ApiConfigList(items=[ApiConfig(name=k, value=v)]), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) + + # Yield the targeted role + yield rcg_api.read_role_config_group(role_config_group.name) + + # Retrieve all of the post-setup configurations + post = rcg_api.read_config(role_config_group.name) + + # Reconcile the configurations + pre_set = set([c.name for c in pre.items]) + + reconciled = pre.items.copy() + reconciled.extend( + [ + ApiConfig(name=k.name, value=None) + for k in post.items + if k.name not in pre_set + ] + ) + + rcg_api.update_config( + role_config_group.name, + message=f"{message}::reset", + body=ApiConfigList(items=reconciled), + ) From 8f6334e54d45798394e799258709c6e6d0da17df Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 18 Dec 2024 15:25:10 -0500 Subject: [PATCH 12/58] Add cm_service_role_config_group module and tests Signed-off-by: Webster Mudge --- .../cm_service_role_config_group_config.py | 308 ++++++++++++++ pyproject.toml | 1 + ...est_cm_service_role_config_group_config.py | 379 ++++++++++++++++++ 3 files changed, 688 insertions(+) create mode 100644 plugins/modules/cm_service_role_config_group_config.py create mode 100644 tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py diff --git a/plugins/modules/cm_service_role_config_group_config.py b/plugins/modules/cm_service_role_config_group_config.py new file mode 100644 index 00000000..8e61090b --- /dev/null +++ b/plugins/modules/cm_service_role_config_group_config.py @@ -0,0 +1,308 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = r""" +module: cm_service_role_config_group_config +short_description: Manage the configuration of a Cloudera Manager Service role config group. +description: + - Manage the configuration details of a role config group of the Cloudera Manager Service. +author: + - "Webster Mudge (@wmudge)" +requirements: + - cm-client +options: + name: + description: + - A role config group name to manage. + - One of C(name) or C(type) is required. + type: str + aliases: + - role_config_group + type: + description: + - The role type of the role config group to manage. + - Retrieves the default role config group for the given role type. + - One of C(name) or C(type) is required. + type: str + aliases: + - role_type + parameters: + description: + - The role configuration to set. + - To unset a parameter, use C(None) as the value. + type: dict + required: yes + aliases: + - params + view: + description: + - The view to materialize. + type: str + default: summary + choices: + - summary + - full +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint + - cloudera.cluster.purge + - cloudera.cluster.message +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: all +""" + +EXAMPLES = r""" +- name: Update (append) several role config group parameters for a Cloudera Manager Service role type + cloudera.cluster.cm_service_role_config_group_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: + a_configuration: "schema://host:port" + another_configuration: 234 + +- name: Reset a role config group parameter for a Cloudera Manager Service role type + cloudera.cluster.cm_service_role_config_group_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: + some_conf: None + +- name: Update (purge) role config group parameters (by name) for a Cloudera Manager Service role + cloudera.cluster.service_role_config_group_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + name: "a-non-default-rcg" + parameters: + config_one: ValueOne + config_two: 4567 + purge: yes + +- name: Reset all role config group parameters for a Cloudera Manager Service role type + cloudera.cluster.service_role_config_group_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: {} + purge: yes +""" + +RETURN = r""" +config: + description: + - List of configurations for a Cloudera Manager Service role config group. + type: list + elements: dict + returned: always + contains: + name: + description: + - The canonical name that identifies this configuration parameter. + type: str + returned: when supported + value: + description: + - The user-defined value. + - When absent, the default value (if any) will be used. + - Can also be absent, when enumerating allowed configs. + type: str + returned: when supported + required: + description: + - Whether this configuration is required for the object. + - If any required configuration is not set, operations on the object may not work. + - Requires I(full) view. + type: bool + returned: when supported + default: + description: + - The default value. + - Requires I(full) view. + type: str + returned: when supported + display_name: + description: + - A user-friendly name of the parameters, as would have been shown in the web UI. + - Requires I(full) view. + type: str + returned: when supported + description: + description: + - A textual description of the parameter. + - Requires I(full) view. + type: str + returned: when supported + related_name: + description: + - If applicable, contains the related configuration variable used by the source project. + - Requires I(full) view. + type: str + returned: when supported + sensitive: + description: + - Whether this configuration is sensitive, i.e. contains information such as passwords, which might affect how the value of this configuration might be shared by the caller. + type: bool + returned: when supported + validate_state: + description: + - State of the configuration parameter after validation. + - Requires I(full) view. + type: str + returned: when supported + validation_message: + description: + - A message explaining the parameter's validation state. + - Requires I(full) view. + type: str + returned: when supported + validation_warnings_suppressed: + description: + - Whether validation warnings associated with this parameter are suppressed. + - In general, suppressed validation warnings are hidden in the Cloudera Manager UI. + - Configurations that do not produce warnings will not contain this field. + - Requires I(full) view. + type: bool + returned: when supported +""" + +import json + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + ConfigListUpdates, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + BaseRoleConfigGroupDiscoveryException, + get_mgmt_base_role_config_group, +) + +from cm_client import MgmtRoleConfigGroupsResourceApi +from cm_client.rest import ApiException + + +class ClouderaManagerServiceRoleConfigGroupConfig(ClouderaManagerMutableModule): + def __init__(self, module): + super(ClouderaManagerServiceRoleConfigGroupConfig, self).__init__(module) + + # Set the parameters + self.name = self.get_param("name") + self.type = self.get_param("type") + self.params = self.get_param("parameters") + self.purge = self.get_param("purge") + self.view = self.get_param("view") + + # Initialize the return values + self.changed = False + self.diff = {} + self.config = [] + + # Execute the logic + self.process() + + @ClouderaManagerMutableModule.handle_process + def process(self): + refresh = True + rcg_api = MgmtRoleConfigGroupsResourceApi(self.api_client) + + try: + if self.name is None: + rcg = get_mgmt_base_role_config_group(self.api_client, self.type) + self.name = rcg.name + + existing = rcg_api.read_config(self.name) + except ApiException as ae: + if ae.status == 404: + self.module.fail_json(msg=json.loads(ae.body)["message"]) + else: + raise ae + except BaseRoleConfigGroupDiscoveryException as be: + self.module.fail_json( + msg=f"Unable to find Cloudera Manager Service base role config group for role type '{self.type}'" + ) + + updates = ConfigListUpdates(existing, self.params, self.purge) + + if updates.changed: + self.changed = True + + if self.module._diff: + self.diff = updates.diff + + if not self.module.check_mode: + self.config = [ + p.to_dict() + for p in rcg_api.update_config( + self.name, + message=self.message, + body=updates.config, + ).items + ] + + if self.view == "full": + refresh = False + + if refresh: + self.config = [ + p.to_dict() + for p in rcg_api.read_config(self.name, view=self.view).items + ] + + +def main(): + module = ClouderaManagerMutableModule.ansible_module( + argument_spec=dict( + name=dict(aliases=["role_config_group"]), + type=dict(aliases=["role_type"]), + parameters=dict(type="dict", required=True, aliases=["params"]), + purge=dict(type="bool", default=False), + view=dict( + default="summary", + choices=["summary", "full"], + ), + ), + required_one_of=[ + ["name", "type"], + ], + supports_check_mode=True, + ) + + result = ClouderaManagerServiceRoleConfigGroupConfig(module) + + output = dict( + changed=result.changed, + config=result.config, + ) + + if result.debug: + log = result.log_capture.getvalue() + output.update(debug=log, debug_lines=log.split("\n")) + + module.exit_json(**output) + + +if __name__ == "__main__": + main() diff --git a/pyproject.toml b/pyproject.toml index 716e06c4..86417da5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,6 +57,7 @@ filterwarnings = [ markers = [ "service_config: Prepare service-wide configurations for tests", "role_config: Prepare role override configurations for tests", + "role_config_group_config: Prepare role config group configurations for tests", ] [build-system] diff --git a/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py b/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py new file mode 100644 index 00000000..3b740f3d --- /dev/null +++ b/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py @@ -0,0 +1,379 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from collections.abc import Generator +from pathlib import Path + +from cm_client import ( + ApiRole, + ApiRoleConfigGroup, + ClustersResourceApi, + MgmtRolesResourceApi, + MgmtRoleConfigGroupsResourceApi, +) + +from ansible_collections.cloudera.cluster.plugins.modules import ( + cm_service_role_config_group_config, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + get_mgmt_base_role_config_group, +) + +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, + provision_cm_role, + set_cm_role_config_group_config, +) + +LOG = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: + api = MgmtRolesResourceApi(cm_api_client) + + hm = next( + iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None + ) + + if hm is not None: + yield hm + else: + cluster_api = ClustersResourceApi(cm_api_client) + + # Get first host of the cluster + hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + else: + name = Path(request.fixturename).stem + yield from provision_cm_role( + cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId + ) + + +@pytest.fixture(scope="function") +def host_monitor_config( + cm_api_client, host_monitor, request +) -> Generator[ApiRoleConfigGroup]: + marker = request.node.get_closest_marker("role_config_group_config") + + if marker is None: + raise Exception("No role_config_group_config marker found.") + + rcg_api = MgmtRoleConfigGroupsResourceApi(cm_api_client) + + yield from set_cm_role_config_group_config( + api_client=cm_api_client, + role_config_group=rcg_api.read_role_config_group( + host_monitor.role_config_group_ref.role_config_group_name + ), + params=marker.args[0], + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) + + +def test_missing_required(conn, module_args): + module_args(conn) + + with pytest.raises(AnsibleFailJson, match="parameters"): + cm_service_role_config_group_config.main() + + +def test_missing_required_if(conn, module_args): + module_args( + { + **conn, + "parameters": dict(), + } + ) + + with pytest.raises(AnsibleFailJson, match="name, type"): + cm_service_role_config_group_config.main() + + +def test_present_invalid_parameter(conn, module_args, host_monitor): + module_args( + { + **conn, + "name": host_monitor.role_config_group_ref.role_config_group_name, + "parameters": dict(example="Example"), + } + ) + + with pytest.raises( + AnsibleFailJson, match="Unknown configuration attribute 'example'" + ): + cm_service_role_config_group_config.main() + + +@pytest.mark.role_config_group_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "name": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config_group_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_role_type(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config_group_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_unset_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "name": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config_group_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_unset_parameters_role_type(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config_group_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_with_purge(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "name": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config_group_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_with_purge_role_type( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config_group_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_purge_all_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "name": host_monitor_config.name, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0 + + +@pytest.mark.role_config_group_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_purge_all_parameters_role_type( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0 From a4e6394f2c54b1408db99206f55cf83381529067 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 19 Dec 2024 14:55:27 -0500 Subject: [PATCH 13/58] Add 'config' dictionary to parsed Role Config Group results Signed-off-by: Webster Mudge --- plugins/module_utils/role_config_group_utils.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/plugins/module_utils/role_config_group_utils.py b/plugins/module_utils/role_config_group_utils.py index 50e82afd..8b1fa561 100644 --- a/plugins/module_utils/role_config_group_utils.py +++ b/plugins/module_utils/role_config_group_utils.py @@ -37,9 +37,25 @@ class BaseRoleConfigGroupDiscoveryException(Exception): def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dict: + """Parse a Role Config Group into a normalized dictionary. + + Returns the following: + - name (str) + - role_type (str) + - base (bool) + - display_name (str) + - config (dict) + + Args: + role_config_group (ApiRoleConfigGroup): Role Config Group + + Returns: + dict: Normalized dictionary of returned values + """ # Retrieve only the service identifier output = dict(service_name=role_config_group.service_ref.service_name) output.update(normalize_output(role_config_group.to_dict(), ROLE_CONFIG_GROUP)) + output.update(config={c.name: c.value for c in role_config_group.config.items}) return output From 1a275f227ece5bf83267c2fa44226867d84ddae4 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 19 Dec 2024 14:57:05 -0500 Subject: [PATCH 14/58] Update utlity function for Role Config Group tests to handle all parameters, not just configuration values Signed-off-by: Webster Mudge --- tests/unit/__init__.py | 67 +++++++++++------------------------------- 1 file changed, 17 insertions(+), 50 deletions(-) diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 74d34146..1103f609 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -267,68 +267,35 @@ def cm_role_config( ) -def set_cm_role_config_group_config( +def set_cm_role_config_group( api_client: ApiClient, role_config_group: ApiRoleConfigGroup, - params: dict, + update: ApiRoleConfigGroup, message: str, ) -> Generator[ApiRoleConfigGroup]: - """Update a configuration for a given Cloudera Manager Service role config group. - Yields the role config group, resetting the configuration to its prior state. - Use with 'yield from' within a pytest fixture. + """ + Update a configuration for a given Cloudera Manager Service role config group. + Yields the role config group and upon returning control, will reset the + configuration to its prior state. + Use with 'yield from' within a pytest fixture. Args: - api_client (ApiClient): _description_ - role_config_group (ApiRoleConfigGroup): _description_ - params (dict): _description_ - message (str): _description_ - - Raises: - Exception: _description_ + api_client (ApiClient): CM API client + role_config_group (ApiRoleConfigGroup): The Role Config Group to manage + update (ApiRoleConfigGroup): The state to set + message (str): Transaction descriptor; will be appended with '::[re]set' Yields: - ApiRoleConfigGroup: _description_ + ApiRoleConfigGroup: The updated Role Config Group """ rcg_api = MgmtRoleConfigGroupsResourceApi(api_client) - # Retrieve all of the pre-setup configurations - pre = rcg_api.read_config(role_config_group.name) + pre = rcg_api.read_role_config_group(role_config_group.name) - # Set the test configurations - # Do so serially, since a failed update due to defaults (see ApiException) will cause remaining - # configuration entries to not run. Long-term solution is to check-and-set, which is - # what the Ansible modules do... - for k, v in params.items(): - try: - rcg_api.update_config( - role_config_group.name, - message=f"{message}::set", - body=ApiConfigList(items=[ApiConfig(name=k, value=v)]), - ) - except ApiException as ae: - if ae.status != 400 or "delete with template" not in str(ae.body): - raise Exception(str(ae)) - - # Yield the targeted role - yield rcg_api.read_role_config_group(role_config_group.name) - - # Retrieve all of the post-setup configurations - post = rcg_api.read_config(role_config_group.name) - - # Reconcile the configurations - pre_set = set([c.name for c in pre.items]) - - reconciled = pre.items.copy() - reconciled.extend( - [ - ApiConfig(name=k.name, value=None) - for k in post.items - if k.name not in pre_set - ] + yield rcg_api.update_role_config_group( + role_config_group.name, message=f"{message}::set", body=update ) - rcg_api.update_config( - role_config_group.name, - message=f"{message}::reset", - body=ApiConfigList(items=reconciled), + rcg_api.update_role_config_group( + role_config_group.name, message=f"{message}::reset", body=pre ) From 44af849c42e9a92cc0b8c0bcca4c2063465553c8 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 19 Dec 2024 14:58:03 -0500 Subject: [PATCH 15/58] Move host_monitor and host_monitor_config fixtures and update pytest Marker Signed-off-by: Webster Mudge --- pyproject.toml | 1 + tests/unit/conftest.py | 53 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 86417da5..38f16eaa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,6 +58,7 @@ markers = [ "service_config: Prepare service-wide configurations for tests", "role_config: Prepare role override configurations for tests", "role_config_group_config: Prepare role config group configurations for tests", + "role_config_group: Prepare a role config group for tests.", ] [build-system] diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 41669cfe..3528132f 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -36,11 +36,14 @@ ApiConfig, ApiHostRef, ApiHostRefList, + ApiRole, + ApiRoleConfigGroup, ApiService, ApiServiceConfig, ClustersResourceApi, Configuration, HostsResourceApi, + MgmtRoleConfigGroupsResourceApi, MgmtRolesResourceApi, MgmtServiceResourceApi, ParcelResourceApi, @@ -58,6 +61,8 @@ from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleFailJson, AnsibleExitJson, + provision_cm_role, + set_cm_role_config_group, ) @@ -326,3 +331,51 @@ def cms_config(cm_api_client, cms, request) -> Generator[ApiService]: message=f"{Path(request.node.parent.name).stem}::{request.node.name}::reset", body=ApiServiceConfig(items=reconciled), ) + + +@pytest.fixture(scope="module") +def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: + api = MgmtRolesResourceApi(cm_api_client) + + hm = next( + iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None + ) + + if hm is not None: + yield hm + else: + cluster_api = ClustersResourceApi(cm_api_client) + + # Get first host of the cluster + hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + else: + name = Path(request.fixturename).stem + yield from provision_cm_role( + cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId + ) + + +@pytest.fixture(scope="function") +def host_monitor_config( + cm_api_client, host_monitor, request +) -> Generator[ApiRoleConfigGroup]: + marker = request.node.get_closest_marker("role_config_group") + + if marker is None: + raise Exception("No role_config_group marker found.") + + rcg_api = MgmtRoleConfigGroupsResourceApi(cm_api_client) + + yield from set_cm_role_config_group( + api_client=cm_api_client, + role_config_group=rcg_api.read_role_config_group( + host_monitor.role_config_group_ref.role_config_group_name + ), + update=marker.args[0], + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) From 72418dcdc74ac8c237bb8c4eb7e6e051d9a8eeb4 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 19 Dec 2024 14:58:59 -0500 Subject: [PATCH 16/58] Update to use reworked host_monitor_config fixture Signed-off-by: Webster Mudge --- ...est_cm_service_role_config_group_config.py | 164 ++++++++++-------- 1 file changed, 90 insertions(+), 74 deletions(-) diff --git a/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py b/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py index 3b740f3d..99df06c4 100644 --- a/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py +++ b/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py @@ -21,82 +21,26 @@ import logging import pytest -from collections.abc import Generator from pathlib import Path from cm_client import ( - ApiRole, + ApiConfig, + ApiConfigList, ApiRoleConfigGroup, - ClustersResourceApi, - MgmtRolesResourceApi, - MgmtRoleConfigGroupsResourceApi, ) from ansible_collections.cloudera.cluster.plugins.modules import ( cm_service_role_config_group_config, ) -from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( - get_mgmt_base_role_config_group, -) from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, - provision_cm_role, - set_cm_role_config_group_config, ) LOG = logging.getLogger(__name__) -@pytest.fixture(scope="module") -def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: - api = MgmtRolesResourceApi(cm_api_client) - - hm = next( - iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None - ) - - if hm is not None: - yield hm - else: - cluster_api = ClustersResourceApi(cm_api_client) - - # Get first host of the cluster - hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) - - if not hosts.items: - raise Exception( - "No available hosts to assign the Cloudera Manager Service role." - ) - else: - name = Path(request.fixturename).stem - yield from provision_cm_role( - cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId - ) - - -@pytest.fixture(scope="function") -def host_monitor_config( - cm_api_client, host_monitor, request -) -> Generator[ApiRoleConfigGroup]: - marker = request.node.get_closest_marker("role_config_group_config") - - if marker is None: - raise Exception("No role_config_group_config marker found.") - - rcg_api = MgmtRoleConfigGroupsResourceApi(cm_api_client) - - yield from set_cm_role_config_group_config( - api_client=cm_api_client, - role_config_group=rcg_api.read_role_config_group( - host_monitor.role_config_group_ref.role_config_group_name - ), - params=marker.args[0], - message=f"{Path(request.node.parent.name).stem}::{request.node.name}", - ) - - def test_missing_required(conn, module_args): module_args(conn) @@ -131,8 +75,17 @@ def test_present_invalid_parameter(conn, module_args, host_monitor): cm_service_role_config_group_config.main() -@pytest.mark.role_config_group_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) ) def test_set_parameters(conn, module_args, host_monitor_config, request): module_args( @@ -162,8 +115,17 @@ def test_set_parameters(conn, module_args, host_monitor_config, request): assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() -@pytest.mark.role_config_group_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) ) def test_set_parameters_role_type(conn, module_args, host_monitor_config, request): module_args( @@ -193,8 +155,17 @@ def test_set_parameters_role_type(conn, module_args, host_monitor_config, reques assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() -@pytest.mark.role_config_group_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) ) def test_unset_parameters(conn, module_args, host_monitor_config, request): module_args( @@ -222,8 +193,17 @@ def test_unset_parameters(conn, module_args, host_monitor_config, request): assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() -@pytest.mark.role_config_group_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) ) def test_unset_parameters_role_type(conn, module_args, host_monitor_config, request): module_args( @@ -251,8 +231,17 @@ def test_unset_parameters_role_type(conn, module_args, host_monitor_config, requ assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() -@pytest.mark.role_config_group_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) ) def test_set_parameters_with_purge(conn, module_args, host_monitor_config, request): module_args( @@ -283,8 +272,17 @@ def test_set_parameters_with_purge(conn, module_args, host_monitor_config, reque assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() -@pytest.mark.role_config_group_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) ) def test_set_parameters_with_purge_role_type( conn, module_args, host_monitor_config, request @@ -317,8 +315,17 @@ def test_set_parameters_with_purge_role_type( assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() -@pytest.mark.role_config_group_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) ) def test_purge_all_parameters(conn, module_args, host_monitor_config, request): module_args( @@ -347,8 +354,17 @@ def test_purge_all_parameters(conn, module_args, host_monitor_config, request): assert len(e.value.config) == 0 -@pytest.mark.role_config_group_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) ) def test_purge_all_parameters_role_type( conn, module_args, host_monitor_config, request From c7602e5b07df19101b1d6a60d525725035d120ef Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 19 Dec 2024 15:01:10 -0500 Subject: [PATCH 17/58] Add cm_service_role_config_group module and tests Signed-off-by: Webster Mudge --- .../modules/cm_service_role_config_group.py | 351 ++++++++++++++++++ .../test_cm_service_role_config_group.py | 242 ++++++++++++ 2 files changed, 593 insertions(+) create mode 100644 plugins/modules/cm_service_role_config_group.py create mode 100644 tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py diff --git a/plugins/modules/cm_service_role_config_group.py b/plugins/modules/cm_service_role_config_group.py new file mode 100644 index 00000000..0a264870 --- /dev/null +++ b/plugins/modules/cm_service_role_config_group.py @@ -0,0 +1,351 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +DOCUMENTATION = r""" +module: cm_service_role_config_group +short_description: Manage a Cloudera Manager Service role config group. +description: + - Manage a Cloudera Manager Service role config group. +author: + - "Webster Mudge (@wmudge)" +requirements: + - cm-client +options: + type: + description: + - The role type defining the role config group. + type: str + required: True + aliases: + - role_type + display_name: + description: + - The display name for this role config group in the Cloudera Manager UI. + config: + description: + - The role configuration to set. + - To unset a parameter, use C(None) as the value. + type: dict + aliases: + - params + - parameters + purge: + description: + - Flag indicating whether to reset configuration parameters to only the declared entries. + type: bool + default: False +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint + - cloudera.cluster.message +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: all +""" + +EXAMPLES = r""" +- name: Update the configuration of a Cloudera Manager Service role config group + cloudera.cluster.cm_service_role_config_group: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: + some_parameter: True + +- name: Update the configuration of a Cloudera Manager Service role config group, purging undeclared parameters + cloudera.cluster.cm_service_role_config_group: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: + another_parameter: 3456 + purge: yes + +- name: Reset the configuration of a Cloudera Manager Service role config group + cloudera.cluster.cm_service_role_config_group: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: {} + purge: yes + +- name: Set the display name of a Cloudera Manager Service role config group + cloudera.cluster.cm_service_role_config_group: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + display_name: A new name +""" + +RETURN = r""" +role_config_group: + description: + - A Cloudera Manager Service role config group. + type: dict + returned: always + contains: + name: + description: + - The unique name of this role config group. + type: str + returned: always + role_type: + description: + - The type of the roles in this group. + type: str + returned: always + base: + description: + - Flag indicating whether this is a base group. + type: bool + returned: always + display_name: + description: + - A user-friendly name of the role config group, as would have been shown in the web UI. + type: str + returned: when supported + service_name: + description: + - The service name associated with this role config group. + type: str + returned: always + role_names: + description: + - List of role names associated with this role config group. + type: list + elements: str + returned: when supported + config: + description: + - List of configurations. + type: list + elements: dict + returned: always + contains: + name: + description: + - The canonical name that identifies this configuration parameter. + type: str + returned: when supported + value: + description: + - The user-defined value. + - When absent, the default value (if any) will be used. + - Can also be absent, when enumerating allowed configs. + type: str + returned: when supported + required: + description: + - Whether this configuration is required for the object. + - If any required configuration is not set, operations on the object may not work. + - Requires I(full) view. + type: bool + returned: when supported + default: + description: + - The default value. + - Requires I(full) view. + type: str + returned: when supported + display_name: + description: + - A user-friendly name of the parameters, as would have been shown in the web UI. + - Requires I(full) view. + type: str + returned: when supported + description: + description: + - A textual description of the parameter. + - Requires I(full) view. + type: str + returned: when supported + related_name: + description: + - If applicable, contains the related configuration variable used by the source project. + - Requires I(full) view. + type: str + returned: when supported + sensitive: + description: + - Whether this configuration is sensitive, i.e. contains information such as passwords, which might affect how the value of this configuration might be shared by the caller. + type: bool + returned: when supported + validate_state: + description: + - State of the configuration parameter after validation. + - Requires I(full) view. + type: str + returned: when supported + validation_message: + description: + - A message explaining the parameter's validation state. + - Requires I(full) view. + type: str + returned: when supported + validation_warnings_suppressed: + description: + - Whether validation warnings associated with this parameter are suppressed. + - In general, suppressed validation warnings are hidden in the Cloudera Manager UI. + - Configurations that do not produce warnings will not contain this field. + - Requires I(full) view. + type: bool + returned: when supported +""" + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + ConfigListUpdates, +) + +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + BaseRoleConfigGroupDiscoveryException, + parse_role_config_group_result, + get_mgmt_base_role_config_group, +) + +from cm_client import ( + ApiRoleConfigGroup, + MgmtRoleConfigGroupsResourceApi, + MgmtServiceResourceApi, +) +from cm_client.rest import ApiException + + +class ClouderaManagerServiceRoleConfigGroup(ClouderaManagerMutableModule): + def __init__(self, module): + super(ClouderaManagerServiceRoleConfigGroup, self).__init__(module) + + # Set the parameters + self.type = self.get_param("type") + self.display_name = self.get_param("display_name") + self.config = self.get_param("config", default=dict()) + self.purge = self.get_param("purge") + + # Initialize the return value + self.changed = False + self.diff = dict(before=dict(), after=dict()) + self.output = {} + + # Execute the logic + self.process() + + @ClouderaManagerMutableModule.handle_process + def process(self): + # Confirm that CMS is present + try: + MgmtServiceResourceApi(self.api_client).read_service() + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg="Cloudera Management Service does not exist") + else: + raise ex + + rcg_api = MgmtRoleConfigGroupsResourceApi(self.api_client) + + # Retrieve the base RCG (the _only_ RCG for CMS roles) + try: + existing = get_mgmt_base_role_config_group(self.api_client, self.type) + except ApiException as ex: + if ex.status != 404: + raise ex + except BaseRoleConfigGroupDiscoveryException as be: + self.module.fail_json( + msg=f"Unable to find Cloudera Manager Service base role config group for role type '{self.type}'" + ) + + payload = ApiRoleConfigGroup() + + # Update display name + if self.display_name and self.display_name != existing.display_name: + self.changed = True + + if self.module._diff: + self.diff["before"].update(display_name=existing.display_name) + self.diff["after"].update(display_name=self.display_name) + + payload.display_name = self.display_name + + # Reconcile configurations + if self.config or self.purge: + updates = ConfigListUpdates(existing.config, self.config, self.purge) + + if updates.changed: + self.changed = True + + if self.module._diff: + self.diff["before"].update(config=updates.diff["before"]) + self.diff["after"].update(config=updates.diff["after"]) + + payload.config = updates.config + + # Execute changes if needed + if self.changed and not self.module.check_mode: + self.output = parse_role_config_group_result( + rcg_api.update_role_config_group( + existing.name, + message=self.message, + body=payload, + ) + ) + else: + self.output = parse_role_config_group_result(existing) + + # Report on any role associations + self.output.update( + role_names=[r.name for r in rcg_api.read_roles(existing.name).items] + ) + + +def main(): + module = ClouderaManagerMutableModule.ansible_module( + argument_spec=dict( + display_name=dict(), + type=dict(required=True, aliases=["role_type"]), + config=dict(type="dict", aliases=["params", "parameters"]), + purge=dict(type="bool", default=False), + ), + supports_check_mode=True, + ) + + result = ClouderaManagerServiceRoleConfigGroup(module) + + output = dict( + changed=result.changed, + role_config_group=result.output, + ) + + if module._diff: + output.update(diff=result.diff) + + if result.debug: + log = result.log_capture.getvalue() + output.update(debug=log, debug_lines=log.split("\n")) + + module.exit_json(**output) + + +if __name__ == "__main__": + main() diff --git a/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py b/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py new file mode 100644 index 00000000..2006d225 --- /dev/null +++ b/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py @@ -0,0 +1,242 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from pathlib import Path + +from cm_client import ( + ApiConfig, + ApiConfigList, + ApiRoleConfigGroup, +) + +from ansible_collections.cloudera.cluster.plugins.modules import ( + cm_service_role_config_group, +) +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, +) + +LOG = logging.getLogger(__name__) + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_cm_role_config_group_config_set( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == True + assert expected.items() <= e.value.role_config_group["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == False + assert expected.items() <= e.value.role_config_group["config"].items() + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_cm_role_config_group_config_unset( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == True + assert expected.items() <= e.value.role_config_group["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == False + assert expected.items() <= e.value.role_config_group["config"].items() + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_cm_role_config_group_config_set_purge( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == True + assert expected.items() <= e.value.role_config_group["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == False + assert expected.items() <= e.value.role_config_group["config"].items() + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_cm_role_config_group_config_purge_all( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict() + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == True + assert expected.items() <= e.value.role_config_group["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == False + assert expected.items() <= e.value.role_config_group["config"].items() + + +@pytest.mark.role_config_group(ApiRoleConfigGroup(display_name="Test")) +def test_cm_role_config_group_display_name_set( + conn, module_args, host_monitor_config, request +): + expected = "Updated Test" + + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "display_name": expected, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == True + assert expected == e.value.role_config_group["display_name"] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == False + assert expected == e.value.role_config_group["display_name"] From f7835a853335e856b4a6f425e31090413e4c1666 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 20 Dec 2024 14:36:11 -0500 Subject: [PATCH 18/58] Add host utilities Signed-off-by: Webster Mudge --- plugins/module_utils/host_utils.py | 57 ++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 plugins/module_utils/host_utils.py diff --git a/plugins/module_utils/host_utils.py b/plugins/module_utils/host_utils.py new file mode 100644 index 00000000..645cf466 --- /dev/null +++ b/plugins/module_utils/host_utils.py @@ -0,0 +1,57 @@ +# Copyright 2024 Cloudera, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A common functions for Cloudera Manager hosts +""" + +from cm_client import ( + ApiClient, + ApiHost, + ApiHostRef, + HostsResourceApi, +) +from cm_client.rest import ApiException + + +def get_host( + api_client: ApiClient, hostname: str = None, host_id: str = None +) -> ApiHost: + if hostname: + return next( + ( + h + for h in HostsResourceApi(api_client).read_hosts().items + if h.hostname == hostname + ), + None, + ) + else: + try: + return HostsResourceApi(api_client).read_host(host_id) + except ApiException as ex: + if ex.status != 404: + raise ex + else: + return None + + +def get_host_ref( + api_client: ApiClient, hostname: str = None, host_id: str = None +) -> ApiHostRef: + host = get_host(api_client, hostname, host_id) + if host is not None: + return ApiHostRef(host.host_id, host.hostname) + else: + return None From e4f5e9f8c915b2d3c683e062e675d944d8eafebe Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 20 Dec 2024 14:36:57 -0500 Subject: [PATCH 19/58] Add utility function for role config group retrieval Signed-off-by: Webster Mudge --- .../module_utils/role_config_group_utils.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/plugins/module_utils/role_config_group_utils.py b/plugins/module_utils/role_config_group_utils.py index 8b1fa561..a7b8ec70 100644 --- a/plugins/module_utils/role_config_group_utils.py +++ b/plugins/module_utils/role_config_group_utils.py @@ -19,6 +19,7 @@ from cm_client import ( ApiClient, ApiRoleConfigGroup, + ApiRoleConfigGroupRef, RoleConfigGroupsResourceApi, MgmtRoleConfigGroupsResourceApi, ) @@ -36,6 +37,10 @@ class BaseRoleConfigGroupDiscoveryException(Exception): pass +class RoleConfigGroupDiscoveryException(Exception): + pass + + def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dict: """Parse a Role Config Group into a normalized dictionary. @@ -87,3 +92,16 @@ def get_mgmt_base_role_config_group( raise BaseRoleConfigGroupDiscoveryException(role_count=len(rcgs)) else: return rcgs[0] + + +def get_role_config_group( + api_client: ApiClient, cluster_name: str, service_name: str, name: str +) -> ApiRoleConfigGroup: + rcg_api = RoleConfigGroupsResourceApi(api_client) + + rcg = rcg_api.read_role_config_group(cluster_name, name, service_name) + + if rcg is None: + raise RoleConfigGroupDiscoveryException(name) + else: + return rcg From e287cbff745c1320790ec984c9232e914426411f Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 20 Dec 2024 14:37:38 -0500 Subject: [PATCH 20/58] Add config parameters to parse_role_result. Add utility for role data model creation. Signed-off-by: Webster Mudge --- plugins/module_utils/role_utils.py | 58 ++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/plugins/module_utils/role_utils.py b/plugins/module_utils/role_utils.py index c3a962e5..479691bb 100644 --- a/plugins/module_utils/role_utils.py +++ b/plugins/module_utils/role_utils.py @@ -15,10 +15,19 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( normalize_output, ) +from ansible_collections.cloudera.cluster.plugins.module_utils.host_utils import ( + get_host_ref, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + get_role_config_group, +) from cm_client import ( ApiClient, + ApiConfig, + ApiConfigList, ApiRoleList, + ApiRoleConfigGroupRef, RolesResourceApi, MgmtRolesResourceApi, ) @@ -51,6 +60,7 @@ def parse_role_result(role: ApiRole) -> dict: service_name=role.service_ref.service_name, ) output.update(normalize_output(role.to_dict(), ROLE_OUTPUT)) + output.update(config={c.name: c.value for c in role.config.items}) return output @@ -72,3 +82,51 @@ def get_roles( if r.type == role_type ] ) + + +class RoleHostNotFoundException(Exception): + pass + + +def create_role( + api_client: ApiClient, + role_type: str, + hostname: str, + host_id: str, + name: str = None, + config: dict = None, + cluster_name: str = None, + service_name: str = None, + role_config_group: str = None, +) -> ApiRole: + # Set up the role + role = ApiRole(type=str(role_type).upper()) + + # Name + if name: + role.name = name # No name allows auto-generation + + # Host assignment + host_ref = get_host_ref(api_client, hostname, host_id) + if host_ref is None: + raise RoleHostNotFoundException( + f"Host not found: hostname='{hostname}', host_id='{host_id}'" + ) + else: + role.host_ref = host_ref + + # Role config group + if role_config_group: + role.role_config_group_ref = ApiRoleConfigGroupRef( + get_role_config_group( + api_client, cluster_name, service_name, role_config_group + ).name + ) + + # Role override configurations + if config: + role.config = ApiConfigList( + items=[ApiConfig(name=k, value=v) for k, v in config.items()] + ) + + return role From 6904d8930637325662b04858bfbdbafca03a3511 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 20 Dec 2024 14:39:38 -0500 Subject: [PATCH 21/58] Rename Host Monitor fixtures. Add host_monitor_state for general role testing Signed-off-by: Webster Mudge --- tests/unit/conftest.py | 138 +++++++++++++++++- .../test_cm_service_role_config_group.py | 20 +-- ...est_cm_service_role_config_group_config.py | 4 +- 3 files changed, 145 insertions(+), 17 deletions(-) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 3528132f..d814a574 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -28,21 +28,29 @@ from collections.abc import Generator from pathlib import Path +from time import sleep from cm_client import ( + ApiBulkCommandList, ApiClient, ApiClusterList, ApiCluster, + ApiCommand, ApiConfig, + ApiConfigList, ApiHostRef, ApiHostRefList, ApiRole, ApiRoleConfigGroup, + ApiRoleNameList, + ApiRoleState, ApiService, ApiServiceConfig, ClustersResourceApi, + CommandsResourceApi, Configuration, HostsResourceApi, + MgmtRoleCommandsResourceApi, MgmtRoleConfigGroupsResourceApi, MgmtRolesResourceApi, MgmtServiceResourceApi, @@ -334,7 +342,7 @@ def cms_config(cm_api_client, cms, request) -> Generator[ApiService]: @pytest.fixture(scope="module") -def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: +def host_monitor_role(cm_api_client, cms, request) -> Generator[ApiRole]: api = MgmtRolesResourceApi(cm_api_client) hm = next( @@ -361,21 +369,141 @@ def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: @pytest.fixture(scope="function") -def host_monitor_config( - cm_api_client, host_monitor, request +def host_monitor_role_group_config( + cm_api_client, host_monitor_role, request ) -> Generator[ApiRoleConfigGroup]: marker = request.node.get_closest_marker("role_config_group") if marker is None: - raise Exception("No role_config_group marker found.") + raise Exception("No 'role_config_group' marker found.") rcg_api = MgmtRoleConfigGroupsResourceApi(cm_api_client) yield from set_cm_role_config_group( api_client=cm_api_client, role_config_group=rcg_api.read_role_config_group( - host_monitor.role_config_group_ref.role_config_group_name + host_monitor_role.role_config_group_ref.role_config_group_name ), update=marker.args[0], message=f"{Path(request.node.parent.name).stem}::{request.node.name}", ) + + +@pytest.fixture(scope="function") +def host_monitor_state(cm_api_client, host_monitor_role, request) -> Generator[ApiRole]: + marker = request.node.get_closest_marker("role") + + if marker is None: + raise Exception("No 'role' marker found.") + + role = marker.args[0] + + role_api = MgmtRolesResourceApi(cm_api_client) + cmd_api = MgmtRoleCommandsResourceApi(cm_api_client) + + # Get the current state + pre_role = role_api.read_role(host_monitor_role.name) + pre_role.config = role_api.read_role_config(host_monitor_role.name) + + # Set config + for c in role.config.items: + try: + role_api.update_role_config( + role_name=host_monitor_role.name, + message=f"{Path(request.node.parent.name).stem}::{request.node.name}::set", + body=ApiConfigList(items=[c]), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) + + # Update maintenance + if role.maintenance_mode: + role_api.enter_maintenance_mode(host_monitor_role.name) + else: + role_api.exit_maintenance_mode(host_monitor_role.name) + + # Update state + if role.role_state is not None: + if role.role_state in [ApiRoleState.STARTED]: + handle_commands( + cmd_api.stop_command( + body=ApiRoleNameList(items=[host_monitor_role.name]) + ) + ) + elif role.role_state in [ApiRoleState.STOPPED]: + handle_commands( + cmd_api.start_command( + body=ApiRoleNameList(items=[host_monitor_role.name]) + ) + ) + + # Yield the role + current_role = role_api.read_role(host_monitor_role.name) + current_role.config = role_api.read_role_config(host_monitor_role.name) + yield current_role + + # Retrieve the test changes + post_role = role_api.read_role(role_name=host_monitor_role.name) + post_role.config = role_api.read_role_config(role_name=host_monitor_role.name) + + # Reset state + if pre_role.role_state != post_role.role_state: + if pre_role.role_state in [ApiRoleState.STARTED]: + handle_commands( + cmd_api.start_command( + body=ApiRoleNameList(items=[host_monitor_role.name]) + ) + ) + elif pre_role.role_state in [ApiRoleState.STOPPED]: + handle_commands( + cmd_api.stop_command( + body=ApiRoleNameList(items=[host_monitor_role.name]) + ) + ) + + # Reset maintenance + if pre_role.maintenance_mode != post_role.maintenance_mode: + if pre_role.maintenance_mode: + role_api.enter_maintenance_mode(host_monitor_role.name) + else: + role_api.exit_maintenance_mode(host_monitor_role.name) + + # Reset config + pre_role_config_set = set([c.name for c in pre_role.config.items]) + + reconciled = pre_role.config.items.copy() + config_reset = [ + c for c in post_role.config.items if c.name not in pre_role_config_set + ] + reconciled.extend([ApiConfig(c.name, None) for c in config_reset]) + + role_api.update_role_config( + role_name=host_monitor_role.name, + message=f"{Path(request.node.parent.name).stem}::{request.node.name}::reset", + body=ApiConfigList(items=reconciled), + ) + + +def handle_commands(api_client: ApiClient, commands: ApiBulkCommandList): + if commands.errors: + error_msg = "\n".join(commands.errors) + raise Exception(error_msg) + + for cmd in commands.items: + # Serial monitoring + monitor_command(api_client, cmd) + + +def monitor_command( + api_client: ApiClient, command: ApiCommand, polling: int = 10, delay: int = 15 +): + poll_count = 0 + while command.active: + if poll_count > polling: + raise Exception("Command timeout: " + str(command.id)) + sleep(delay) + poll_count += 1 + command = CommandsResourceApi(api_client).read_command(command.id) + if not command.success: + raise Exception(command.result_message) diff --git a/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py b/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py index 2006d225..3d198107 100644 --- a/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py +++ b/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py @@ -54,12 +54,12 @@ ) ) def test_cm_role_config_group_config_set( - conn, module_args, host_monitor_config, request + conn, module_args, host_monitor_role_group_config, request ): module_args( { **conn, - "type": host_monitor_config.role_type, + "type": host_monitor_role_group_config.role_type, "parameters": dict(mgmt_num_descriptor_fetch_tries=32), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, @@ -96,12 +96,12 @@ def test_cm_role_config_group_config_set( ) ) def test_cm_role_config_group_config_unset( - conn, module_args, host_monitor_config, request + conn, module_args, host_monitor_role_group_config, request ): module_args( { **conn, - "type": host_monitor_config.role_type, + "type": host_monitor_role_group_config.role_type, "parameters": dict(mgmt_num_descriptor_fetch_tries=None), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, @@ -138,12 +138,12 @@ def test_cm_role_config_group_config_unset( ) ) def test_cm_role_config_group_config_set_purge( - conn, module_args, host_monitor_config, request + conn, module_args, host_monitor_role_group_config, request ): module_args( { **conn, - "type": host_monitor_config.role_type, + "type": host_monitor_role_group_config.role_type, "parameters": dict(mgmt_num_descriptor_fetch_tries=32), "purge": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", @@ -181,12 +181,12 @@ def test_cm_role_config_group_config_set_purge( ) ) def test_cm_role_config_group_config_purge_all( - conn, module_args, host_monitor_config, request + conn, module_args, host_monitor_role_group_config, request ): module_args( { **conn, - "type": host_monitor_config.role_type, + "type": host_monitor_role_group_config.role_type, "parameters": dict(), "purge": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", @@ -213,14 +213,14 @@ def test_cm_role_config_group_config_purge_all( @pytest.mark.role_config_group(ApiRoleConfigGroup(display_name="Test")) def test_cm_role_config_group_display_name_set( - conn, module_args, host_monitor_config, request + conn, module_args, host_monitor_role_group_config, request ): expected = "Updated Test" module_args( { **conn, - "type": host_monitor_config.role_type, + "type": host_monitor_role_group_config.role_type, "display_name": expected, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, diff --git a/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py b/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py index 99df06c4..14218d3c 100644 --- a/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py +++ b/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py @@ -60,11 +60,11 @@ def test_missing_required_if(conn, module_args): cm_service_role_config_group_config.main() -def test_present_invalid_parameter(conn, module_args, host_monitor): +def test_present_invalid_parameter(conn, module_args, host_monitor_role): module_args( { **conn, - "name": host_monitor.role_config_group_ref.role_config_group_name, + "name": host_monitor_role.role_config_group_ref.role_config_group_name, "parameters": dict(example="Example"), } ) From 26402b2be0457643b8f4f1dd20b8e9f379982314 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 20 Dec 2024 14:40:18 -0500 Subject: [PATCH 22/58] Add cm_service_role and tests Signed-off-by: Webster Mudge --- plugins/modules/cm_service_role.py | 740 ++++++++++++++++++ .../cm_service_role/test_cm_service_role.py | 374 +++++++++ 2 files changed, 1114 insertions(+) create mode 100644 plugins/modules/cm_service_role.py create mode 100644 tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py diff --git a/plugins/modules/cm_service_role.py b/plugins/modules/cm_service_role.py new file mode 100644 index 00000000..db5db3cd --- /dev/null +++ b/plugins/modules/cm_service_role.py @@ -0,0 +1,740 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = r""" +module: cm_service_role +short_description: Manage a Cloudera Manager Service role +description: + - Manage a Cloudera Manager Service role +author: + - "Webster Mudge (@wmudge)" +requirements: + - cm-client +options: + cms_hostname: + description: + - The hostname of a cluster instance for the role. + - Mutually exclusive with I(cluster_host_id). + type: str + aliases: + - cluster_host + cms_host_id: + description: + - The host ID of a cluster instance for the role. + - Mutually exclusive with I(cluster_hostname). + type: str + type: + description: + - A role type for the role. + - Required if the I(state) creates a new role. + type: str + aliases: + - role_type + role_config_group: + description: + - A role type for the role. + - Required if the I(state) creates a new role. + type: str + aliases: + - role_type + config: + description: + - The role configuration to set, i.e. overrides. + - To unset a parameter, use C(None) as the value. + type: dict + aliases: + - params + - parameters + maintenance: + description: + - Flag for whether the role should be in maintenance mode. + type: bool + aliases: + - maintenance_mode + tags: + description: + - A set of tags applied to the role. + - To unset a tag, use C(None) as its value. + type: dict + purge: + description: + - Flag for whether the declared role tags should append or overwrite any existing tags. + - To clear all tags, set I(tags={}), i.e. an empty dictionary, and I(purge=True). + type: bool + default: False + state: + description: + - The state of the role. + - Note, if the declared state is invalid for the role, for example, the role is a C(HDFS GATEWAY), the module will return an error. + type: str + default: present + choices: + - present + - absent + - restarted + - started + - stopped +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: all +""" + +EXAMPLES = r""" +- name: Establish a service role (auto-generated name) + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-hdfs + type: GATEWAY + cluster_hostname: worker-01.cloudera.internal + +- name: Establish a service role (defined name) + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-hdfs + type: GATEWAY + name: example-gateway + cluster_hostname: worker-01.cloudera.internal + +- name: Set a service role to maintenance mode + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-hdfs + name: example-gateway + maintenance: yes + +- name: Update (append) tags to a service role + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-hdfs + name: example-gateway + tags: + tag_one: value_one + tag_two: value_two + +- name: Set (purge) tags to a service role + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + cluster: example-cluster + service: example-hdfs + name: example-gateway + tags: + tag_three: value_three + purge: yes + +- name: Remove all tags on a service role + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-hdfs + name: example-gateway + tags: {} + purge: yes + +- name: Start a service role + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-hdfs + name: example-gateway + state: started + +- name: Force a restart to a service role + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-hdfs + name: example-gateway + state: restarted + +- name: Start a service role + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-hdfs + name: example-gateway + state: started + +- name: Remove a service role + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-hdfs + name: example-gateway + state: absent +""" + +RETURN = r""" +role: + description: Details about the service role. + type: dict + contains: + name: + description: The cluster service role name. + type: str + returned: always + type: + description: The cluster service role type. + type: str + returned: always + sample: + - NAMENODE + - DATANODE + - TASKTRACKER + host_id: + description: The unique ID of the cluster host. + type: str + returned: always + service_name: + description: The name of the cluster service, which uniquely identifies it in a cluster. + type: str + returned: always + role_state: + description: State of the cluster service role. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + commission_state: + description: Commission state of the cluster service role. + type: str + returned: always + health_summary: + description: The high-level health status of the cluster service role. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + config_staleness_status: + description: Status of configuration staleness for the cluster service role. + type: str + returned: always + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + health_checks: + description: Lists all available health checks for cluster service role. + type: list + elements: dict + returned: when supported + contains: + name: + description: Unique name of this health check. + type: str + returned: always + summary: + description: The high-level health status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + explanation: + description: The explanation of this health check. + type: str + returned: when supported + suppressed: + description: + - Whether this health check is suppressed. + - A suppressed health check is not considered when computing the role's overall health. + type: bool + returned: when supported + maintenance_mode: + description: Whether the cluster service role is in maintenance mode. + type: bool + returned: when supported + maintenance_owners: + description: The list of objects that trigger this service to be in maintenance mode. + type: list + elements: str + returned: when supported + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + role_config_group_name: + description: The name of the cluster service role config group, which uniquely identifies it in a Cloudera Manager installation. + type: str + returned: when supported + tags: + description: The dictionary of tags for the cluster service role. + type: dict + returned: when supported + zoo_keeper_server_mode: + description: + - The Zookeeper server mode for this cluster service role. + - Note that for non-Zookeeper Server roles, this will be C(null). + type: str + returned: when supported +""" + +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + ConfigListUpdates, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, + parse_role_result, +) + +from cm_client import ( + ApiBulkCommandList, + ApiRole, + ApiRoleList, + ApiRoleNameList, + ApiRoleState, + MgmtRolesResourceApi, + MgmtRoleCommandsResourceApi, + MgmtServiceResourceApi, +) +from cm_client.rest import ApiException + + +class ClouderaManagerServiceRole(ClouderaManagerMutableModule): + def __init__(self, module): + super(ClouderaManagerServiceRole, self).__init__(module) + + # Set the parameters + self.cluster_hostname = self.get_param("cluster_hostname") + self.cluster_host_id = self.get_param("cluster_host_id") + self.config = self.get_param("config") + self.maintenance = self.get_param("maintenance") + self.type = self.get_param("type") + self.state = self.get_param("state") + self.purge = self.get_param("purge") + + # Initialize the return values + self.changed = False + self.diff = dict(before={}, after={}) + self.output = {} + + # Execute the logic + self.process() + + @ClouderaManagerMutableModule.handle_process + def process(self): + # Confirm that CMS is present + try: + MgmtServiceResourceApi(self.api_client).read_service() + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg="Cloudera Management Service does not exist") + else: + raise ex + + self.role_api = MgmtRolesResourceApi(self.api_client) + + current = None + + # Discover the role by its type + try: + current = next( + iter( + [r for r in self.role_api.read_roles().items if r.type == self.type] + ), + None, + ) + current.config = self.role_api.read_role_config(current.name) + except ApiException as ex: + if ex.status != 404: + raise ex + + # If deleting, do so and exit + if self.state == "absent": + if current: + self.deprovision_role(current) + + # Otherwise, manage the configuration and state + elif self.state in ["present", "restarted", "started", "stopped"]: + # If it is a new role + if not current: + new_role = create_role( + api_client=self.api_client, + role_type=self.type, + hostname=self.cluster_hostname, + host_id=self.cluster_host_id, + config=self.config, + ) + current = self.provision_role(new_role) + # # If it exists, but the type has changed, destroy and rebuild completely + # elif self.type and self.type != current.type: + # new_role = create_role( + # api_client=self.api_client, + # role_type=self.type, + # hostname=current.host_ref.hostname, + # host_id=current.host_ref.host_id, + # config=self.config + # ) + # current = self.reprovision_role(current, new_role) + # Else it exists, so address any changes + else: + # Handle role override configurations + if self.config or self.purge: + updates = ConfigListUpdates(current.config, self.config, self.purge) + + if updates.changed: + self.changed = True + + if self.module._diff: + self.diff["before"].update(config=updates.diff["before"]) + self.diff["after"].update(config=updates.diff["after"]) + + if not self.module.check_mode: + self.role_api.update_role_config( + current.name, + message=self.message, + body=updates.config, + ) + + # Handle maintenance mode + if ( + self.maintenance is not None + and self.maintenance != current.maintenance_mode + ): + self.changed = True + + if self.module._diff: + self.diff["before"].update( + maintenance_mode=current.maintenance_mode + ) + self.diff["after"].update(maintenance_mode=self.maintenance) + + if not self.module.check_mode: + if self.maintenance: + maintenance_cmd = self.role_api.enter_maintenance_mode( + current.name + ) + else: + maintenance_cmd = self.role_api.exit_maintenance_mode( + current.name + ) + + if maintenance_cmd.success is False: + self.module.fail_json( + msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}" + ) + + # Handle the various states + if self.state == "started" and current.role_state not in [ + ApiRoleState.STARTED + ]: + self.changed = True + + if self.module._diff: + self.diff["before"].update(role_state=current.role_state) + self.diff["after"].update(role_state="STARTED") + + if not self.module.check_mode: + self.handle_commands( + MgmtRoleCommandsResourceApi(self.api_client).start_command( + body=ApiRoleNameList(items=[current.name]), + ) + ) + + elif self.state == "stopped" and current.role_state not in [ + ApiRoleState.STOPPED, + ApiRoleState.NA, + ]: + self.changed = True + + if self.module._diff: + self.diff["before"].update(role_state=current.role_state) + self.diff["after"].update(role_state="STOPPED") + + if not self.module.check_mode: + self.handle_commands( + MgmtRoleCommandsResourceApi(self.api_client).stop_command( + body=ApiRoleNameList(items=[current.name]), + ) + ) + + elif self.state == "restarted": + self.changed = True + + if self.module._diff: + self.diff["before"].update(role_state=current.role_state) + self.diff["after"].update(role_state="STARTED") + + if not self.module.check_mode: + self.handle_commands( + MgmtRoleCommandsResourceApi(self.api_client).restart_command( + body=ApiRoleNameList(items=[current.name]), + ) + ) + + # If there are changes, get a refresh read + if self.changed: + refresh = self.role_api.read_role(current.name) + refresh.config = self.role_api.read_role_config(current.name) + self.output = parse_role_result(refresh) + # Otherwise return the existing + else: + self.output = parse_role_result(current) + else: + self.module.fail_json(msg=f"Invalid state: {self.state}") + + def provision_role(self, role: ApiRole) -> ApiRole: + self.changed = True + + if self.module._diff: + self.diff = dict( + before={}, + after=role.to_dict(), + ) + + if not self.module.check_mode: + created_role = next( + ( + iter( + self.role_api.create_roles( + body=ApiRoleList(items=[role]), + ).items + ) + ), + {}, + ) + if not created_role: + self.module.fail_json( + msg="Unable to create new role", role=to_native(role.to_dict()) + ) + + def reprovision_role(self, existing_role: ApiRole, new_role: ApiRole) -> ApiRole: + self.changed = True + + if self.module._diff: + self.diff = dict( + before=existing_role.to_dict(), + after=new_role.to_dict(), + ) + + if not self.module.check_mode: + self.role_api.delete_role(existing_role.name) + + rebuilt_role = next( + ( + iter( + self.role_api.create_roles( + body=ApiRoleList(items=[new_role]), + ).items + ) + ), + {}, + ) + if not rebuilt_role: + self.module.fail_json( + msg="Unable to recreate role, " + existing_role.name, + role=to_native(rebuilt_role.to_dict()), + ) + + def deprovision_role(self, role: ApiRole): + self.changed = True + + if self.module._diff: + self.diff = dict(before=role.to_dict(), after=dict()) + + if not self.module.check_mode: + self.role_api.delete_role(role.name) + + # def xxxcreate_role(self) -> ApiRole: + # # Check for required creation parameters + # missing_params = [] + + # if self.type is None: + # missing_params.append("type") + + # if self.cluster_hostname is None and self.cluster_host_id is None: + # missing_params += ["cluster_hostname", "cluster_host_id"] + + # if missing_params: + # self.module.fail_json( + # msg=f"Unable to create new role, missing required arguments: {', '.join(sorted(missing_params)) }" + # ) + + # # Set up the role + # payload = ApiRole(type=str(self.type).upper()) + + # # Name + # if self.name: + # payload.name = self.name # No name allows auto-generation + + # # Host assignment + # host_ref = get_host_ref(self.api_client, self.cluster_hostname, self.cluster_host_id) + + # if host_ref is None: + # self.module.fail_json(msg="Invalid host reference") + # else: + # payload.host_ref = host_ref + + # # Role override configurations + # if self.config: + # payload.config = ApiConfigList(items=[ApiConfig(name=k, value=v) for k, v in self.config.items()]) + + # # Execute the creation + # self.changed = True + + # if self.module._diff: + # self.diff = dict( + # before={}, + # after=payload.to_dict(), + # ) + + # if not self.module.check_mode: + # created_role = next( + # ( + # iter( + # self.role_api.create_roles( + # body=ApiRoleList(items=[payload]), + # ).items + # ) + # ), + # {}, + # ) + + # # Maintenance + # if self.maintenance: + # if self.module._diff: + # self.diff["after"].update(maintenance_mode=True) + + # maintenance_cmd = self.role_api.enter_maintenance_mode( + # created_role.name + # ) + + # if maintenance_cmd.success is False: + # self.module.fail_json( + # msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}" + # ) + + # if self.state in ["started", "restarted"]: + # self.handle_commands(MgmtRoleCommandsResourceApi(self.api_client).start_command( + # body=ApiRoleNameList(items=[created_role.name]), + # )) + + # elif self.state == "stopped": + # self.handle_commands(MgmtRoleCommandsResourceApi(self.api_client).stop_command( + # body=ApiRoleNameList(items=[created_role.name]), + # )) + + # if refresh: + # self.output = parse_role_result( + # self.role_api.read_role( + # self.cluster, + # created_role.name, + # self.service, + # view="full", + # ) + # ) + # else: + # self.output = parse_role_result(created_role) + + def handle_commands(self, commands: ApiBulkCommandList): + if commands.errors: + error_msg = "\n".join(commands.errors) + self.module.fail_json(msg=error_msg) + + for c in commands.items: + # Not in parallel, but should only be a single command + self.wait_command(c) + + +def main(): + module = ClouderaManagerMutableModule.ansible_module( + argument_spec=dict( + cluster_hostname=dict(aliases=["cluster_host"]), + cluster_host_id=dict(), + maintenance=dict(type="bool", aliases=["maintenance_mode"]), + config=dict(type="dict", aliases=["params", "parameters"]), + purge=dict(type="bool", default=False), + type=dict(required=True), + state=dict( + default="present", + choices=["present", "absent", "restarted", "started", "stopped"], + ), + ), + mutually_exclusive=[ + ["cluster_hostname", "cluster_host_id"], + ], + supports_check_mode=True, + ) + + result = ClouderaManagerServiceRole(module) + + output = dict( + changed=result.changed, + role=result.output, + ) + + if module._diff: + output.update(diff=result.diff) + + if result.debug: + log = result.log_capture.getvalue() + output.update(debug=log, debug_lines=log.split("\n")) + + module.exit_json(**output) + + +if __name__ == "__main__": + main() diff --git a/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py new file mode 100644 index 00000000..ed5ce806 --- /dev/null +++ b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py @@ -0,0 +1,374 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from collections.abc import Generator +from pathlib import Path + +from cm_client import ( + ApiConfig, + ApiConfigList, + ApiRole, + ClustersResourceApi, + MgmtRolesResourceApi, +) + +from ansible_collections.cloudera.cluster.plugins.modules import cm_service_role +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, + provision_cm_role, + cm_role_config, +) + +LOG = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: + api = MgmtRolesResourceApi(cm_api_client) + + hm = next( + iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None + ) + + if hm is not None: + yield hm + else: + cluster_api = ClustersResourceApi(cm_api_client) + + # Get first host of the cluster + hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + else: + name = Path(request.fixturename).stem + yield from provision_cm_role( + cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId + ) + + +@pytest.fixture(scope="function") +def host_monitor_config(cm_api_client, host_monitor, request) -> Generator[ApiRole]: + marker = request.node.get_closest_marker("role_config") + + if marker is None: + raise Exception("No role_config marker found.") + + yield from cm_role_config( + api_client=cm_api_client, + role=host_monitor, + params=marker.args[0], + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) + + +def test_missing_required(conn, module_args): + module_args(conn) + + with pytest.raises(AnsibleFailJson, match="parameters"): + cm_service_role.main() + + +def test_missing_required_if(conn, module_args): + module_args( + { + **conn, + "parameters": dict(), + } + ) + + with pytest.raises(AnsibleFailJson, match="name, type"): + cm_service_role.main() + + +def test_present_invalid_parameter(conn, module_args, host_monitor): + module_args( + { + **conn, + "role": host_monitor.name, + "parameters": dict(example="Example"), + } + ) + + with pytest.raises( + AnsibleFailJson, match="Unknown configuration attribute 'example'" + ): + cm_service_role.main() + + +@pytest.mark.role( + ApiRole( + config=ApiConfigList( + items=[ + ApiConfig("mgmt_num_descriptor_fetch_tries", 11), + ApiConfig("process_start_secs", 21), + ] + ) + ) +) +def test_set_parameters(conn, module_args, host_monitor_state, request): + module_args( + { + **conn, + "type": host_monitor_state.type, + "config": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert expected.items() <= e.value.role["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert expected.items() <= e.value.role["config"].items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_role_type(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role_type": host_monitor_config.type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_unset_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_unset_parameters_role_type(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "type": host_monitor_config.type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_with_purge(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_with_purge_role_type( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "role_type": host_monitor_config.type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_purge_all_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0 + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_purge_all_parameters_role_type( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.type, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0 From 092d3dcf6ee65ec5c66fac35bebbd3c83c0ebe46 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 20 Dec 2024 14:40:30 -0500 Subject: [PATCH 23/58] Add 'role' marker Signed-off-by: Webster Mudge --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 38f16eaa..5f8c1bd0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,6 +59,7 @@ markers = [ "role_config: Prepare role override configurations for tests", "role_config_group_config: Prepare role config group configurations for tests", "role_config_group: Prepare a role config group for tests.", + "role: Prepare a role for tests.", ] [build-system] From 904b9c2da2baf20b63c03819faf91e9e6fd86974 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Sun, 29 Dec 2024 10:43:43 -0500 Subject: [PATCH 24/58] Consolidate host_monitor role Signed-off-by: Webster Mudge --- tests/unit/conftest.py | 46 +++++++++++++++++++ .../cm_service_role/test_cm_service_role.py | 42 ----------------- .../test_cm_service_role_config.py | 42 ----------------- 3 files changed, 46 insertions(+), 84 deletions(-) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index d814a574..95bcd110 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -168,6 +168,10 @@ def cm_api_client(conn) -> ApiClient: # Handle redirects redirect = rest.GET(url).urllib3_response.geturl() + + if redirect == None: + raise Exception("Unable to establish connection to Cloudera Manager") + if redirect != "/": url = redirect @@ -341,6 +345,48 @@ def cms_config(cm_api_client, cms, request) -> Generator[ApiService]: ) +@pytest.fixture(scope="module") +def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: + api = MgmtRolesResourceApi(cm_api_client) + + hm = next( + iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None + ) + + if hm is not None: + yield hm + else: + cluster_api = ClustersResourceApi(cm_api_client) + + # Get first host of the cluster + hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + else: + name = Path(request.fixturename).stem + yield from provision_cm_role( + cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId + ) + + +@pytest.fixture(scope="function") +def host_monitor_config(cm_api_client, host_monitor, request) -> Generator[ApiRole]: + marker = request.node.get_closest_marker("role_config") + + if marker is None: + raise Exception("No role_config marker found.") + + yield from cm_role_config( + api_client=cm_api_client, + role=host_monitor, + params=marker.args[0], + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) + + @pytest.fixture(scope="module") def host_monitor_role(cm_api_client, cms, request) -> Generator[ApiRole]: api = MgmtRolesResourceApi(cm_api_client) diff --git a/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py index ed5ce806..2ec761d9 100644 --- a/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py +++ b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py @@ -43,48 +43,6 @@ LOG = logging.getLogger(__name__) -@pytest.fixture(scope="module") -def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: - api = MgmtRolesResourceApi(cm_api_client) - - hm = next( - iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None - ) - - if hm is not None: - yield hm - else: - cluster_api = ClustersResourceApi(cm_api_client) - - # Get first host of the cluster - hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) - - if not hosts.items: - raise Exception( - "No available hosts to assign the Cloudera Manager Service role." - ) - else: - name = Path(request.fixturename).stem - yield from provision_cm_role( - cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId - ) - - -@pytest.fixture(scope="function") -def host_monitor_config(cm_api_client, host_monitor, request) -> Generator[ApiRole]: - marker = request.node.get_closest_marker("role_config") - - if marker is None: - raise Exception("No role_config marker found.") - - yield from cm_role_config( - api_client=cm_api_client, - role=host_monitor, - params=marker.args[0], - message=f"{Path(request.node.parent.name).stem}::{request.node.name}", - ) - - def test_missing_required(conn, module_args): module_args(conn) diff --git a/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py b/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py index d230005a..8f6c2337 100644 --- a/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py +++ b/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py @@ -41,48 +41,6 @@ LOG = logging.getLogger(__name__) -@pytest.fixture(scope="module") -def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: - api = MgmtRolesResourceApi(cm_api_client) - - hm = next( - iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None - ) - - if hm is not None: - yield hm - else: - cluster_api = ClustersResourceApi(cm_api_client) - - # Get first host of the cluster - hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) - - if not hosts.items: - raise Exception( - "No available hosts to assign the Cloudera Manager Service role." - ) - else: - name = Path(request.fixturename).stem - yield from provision_cm_role( - cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId - ) - - -@pytest.fixture(scope="function") -def host_monitor_config(cm_api_client, host_monitor, request) -> Generator[ApiRole]: - marker = request.node.get_closest_marker("role_config") - - if marker is None: - raise Exception("No role_config marker found.") - - yield from cm_role_config( - api_client=cm_api_client, - role=host_monitor, - params=marker.args[0], - message=f"{Path(request.node.parent.name).stem}::{request.node.name}", - ) - - def test_missing_required(conn, module_args): module_args(conn) From 44e0fc07f2f23dff079a93cb087a9de9296a2531 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 3 Jan 2025 16:53:37 -0500 Subject: [PATCH 25/58] Fix invalid fixture references Signed-off-by: Webster Mudge --- .../unit/plugins/modules/cm_service/test_cm_service.py | 8 ++++---- .../cm_service_config/test_cm_service_config.py | 8 ++++---- .../modules/service_config/test_service_config.py | 10 +++++----- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/unit/plugins/modules/cm_service/test_cm_service.py b/tests/unit/plugins/modules/cm_service/test_cm_service.py index af679312..21004145 100644 --- a/tests/unit/plugins/modules/cm_service/test_cm_service.py +++ b/tests/unit/plugins/modules/cm_service/test_cm_service.py @@ -38,7 +38,7 @@ def test_minimal(conn, module_args, cms): @pytest.mark.service_config(dict(log_event_retry_frequency=10)) -def test_set_parameters(conn, module_args, cms_service_config): +def test_set_parameters(conn, module_args, cms_config): module_args( { **conn, @@ -76,7 +76,7 @@ def test_set_parameters(conn, module_args, cms_service_config): @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) ) -def test_unset_parameters(conn, module_args, cms_service_config): +def test_unset_parameters(conn, module_args, cms_config): module_args( { **conn, @@ -110,7 +110,7 @@ def test_unset_parameters(conn, module_args, cms_service_config): @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) ) -def test_set_parameters_with_purge(conn, module_args, cms_service_config): +def test_set_parameters_with_purge(conn, module_args, cms_config): module_args( { **conn, @@ -147,7 +147,7 @@ def test_set_parameters_with_purge(conn, module_args, cms_service_config): @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) ) -def test_purge_all_parameters(conn, module_args, cms_service_config): +def test_purge_all_parameters(conn, module_args, cms_config): module_args( { **conn, diff --git a/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py b/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py index ad54716b..39856c63 100644 --- a/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py +++ b/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py @@ -54,7 +54,7 @@ def test_present_invalid_parameter(conn, module_args): @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=False, log_event_retry_frequency=10) ) -def test_set_parameters(conn, module_args, cms_service_config): +def test_set_parameters(conn, module_args, cms_config): module_args( { **conn, @@ -86,7 +86,7 @@ def test_set_parameters(conn, module_args, cms_service_config): @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) ) -def test_unset_parameters(conn, module_args, cms_service_config): +def test_unset_parameters(conn, module_args, cms_config): module_args( { **conn, @@ -114,7 +114,7 @@ def test_unset_parameters(conn, module_args, cms_service_config): @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) ) -def test_set_parameters_with_purge(conn, module_args, cms_service_config): +def test_set_parameters_with_purge(conn, module_args, cms_config): module_args( { **conn, @@ -145,7 +145,7 @@ def test_set_parameters_with_purge(conn, module_args, cms_service_config): @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) ) -def test_purge_all_parameters(conn, module_args, cms_service_config): +def test_purge_all_parameters(conn, module_args, cms_config): module_args( { **conn, diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index 707208a8..cf767d40 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -128,11 +128,11 @@ def test_present_invalid_cluster(conn, module_args): service_config.main() -def test_present_invalid_service(conn, module_args, target_service): +def test_present_invalid_service(conn, module_args, zk_service): module_args( { **conn, - "cluster": target_service.cluster_ref.cluster_name, + "cluster": zk_service.cluster_ref.cluster_name, "service": "example", "parameters": dict(example="Example"), } @@ -142,12 +142,12 @@ def test_present_invalid_service(conn, module_args, target_service): service_config.main() -def test_present_invalid_parameter(conn, module_args, target_service): +def test_present_invalid_parameter(conn, module_args, zk_service): module_args( { **conn, - "cluster": target_service.cluster_ref.cluster_name, - "service": target_service.name, + "cluster": zk_service.cluster_ref.cluster_name, + "service": zk_service.name, "parameters": dict(example="Example"), } ) From 159d0d0c881209d40ff03bc98b8f4e4a80517cc3 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 3 Jan 2025 16:53:37 -0500 Subject: [PATCH 26/58] Update testing requirements.txt Signed-off-by: Webster Mudge --- tests/unit/requirements.txt | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/unit/requirements.txt b/tests/unit/requirements.txt index ece294ca..db7e089e 100644 --- a/tests/unit/requirements.txt +++ b/tests/unit/requirements.txt @@ -12,4 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -cm-client +pre-commit +pytest +pytest-mock +ansible-core<2.17 # For RHEL 8 support +molecule +molecule-plugins +molecule-plugins[ec2] +tox-ansible From 516af16494d259ea4323bcfb7c4804e1e62b50ac Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 3 Jan 2025 16:53:37 -0500 Subject: [PATCH 27/58] Fix name of Cloudera Manager client library Signed-off-by: Webster Mudge --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 78e23e5b..49d92201 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,4 +16,4 @@ jmespath # For cm_service lookup -cm_client +cm-client From 4a446acfda87309a2d9c360dc7c879c639a17475 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 3 Jan 2025 16:53:37 -0500 Subject: [PATCH 28/58] Add pytest comment and update formatting Signed-off-by: Webster Mudge --- pyproject.toml | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5f8c1bd0..bae03de2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,12 +6,13 @@ readme = "README.md" requires-python = ">=3.8" license = "Apache-2.0" keywords = [] -authors = [ - { name = "Webster Mudge", email = "wmudge@cloudera.com" }, -] +authors = [{ name = "Webster Mudge", email = "wmudge@cloudera.com" }] classifiers = [] dependencies = [] +[tool.hatch.build.targets.wheel] +bypass-selection = true + [tool.hatch.version] path = "galaxy.yml" pattern = "version:\\s+(?P[\\d\\.]+)" @@ -29,7 +30,7 @@ dependencies = [ "molecule-plugins", "molecule-plugins[ec2]", "tox-ansible", - "ansible-core<2.17", # For RHEL 8 support + "ansible-core<2.17", # For RHEL 8 support "jmespath", "cm-client", ] @@ -37,17 +38,17 @@ dependencies = [ [tool.hatch.envs.lint] python = "3.12" skip-install = true -extra-dependencies = [ - "ansible-lint", -] +extra-dependencies = ["ansible-lint"] [tool.hatch.envs.lint.scripts] run = "pre-commit run -a" [tool.pytest.ini_options] -testpaths = [ - "tests", -] +# addopts = [ +# "--lf", +# "--nf", +# ] +testpaths = ["tests"] filterwarnings = [ "ignore:AnsibleCollectionFinder has already been configured", "ignore:'crypt' is deprecated and slated for removal in Python 3.13:DeprecationWarning", From 3558133e4058db8e78728a5e8776920854a8fad5 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 3 Jan 2025 16:53:37 -0500 Subject: [PATCH 29/58] Update docstrings and add missing imports for fixtures and fixture utilities Signed-off-by: Webster Mudge --- tests/unit/__init__.py | 33 ++++++++++++++++++++++----------- tests/unit/conftest.py | 3 ++- 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 1103f609..c68bf4ba 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -61,7 +61,7 @@ def __init__(self, kwargs): def wait_for_command( api_client: ApiClient, command: ApiCommand, polling: int = 120, delay: int = 5 ): - """Polls Cloudera Manager to wait for a Command to complete.""" + """Polls Cloudera Manager to wait for given Command to succeed or fail.""" poll_count = 0 while command.active: @@ -90,7 +90,7 @@ def provision_service( Exception: _description_ Yields: - ApiService: _description_ + Generator[ApiService]: _description_ """ api = ServicesResourceApi(api_client) @@ -132,7 +132,7 @@ def service_wide_config( Exception: _description_ Yields: - ApiService: _description_ + Generator[ApiService]: _description_ """ service_api = ServicesResourceApi(api_client) @@ -190,6 +190,18 @@ def service_wide_config( def provision_cm_role( api_client: ApiClient, role_name: str, role_type: str, host_id: str ) -> Generator[ApiRole]: + """Yield a newly-created Cloudera Manager Service role, deleting the + role after use. Use with 'yield from' within a pytest fixture. + + Args: + api_client (ApiClient): _description_ + role_name (str): _description_ + role_type (str): _description_ + host_id (str): _description_ + + Yields: + Generator[ApiRole]: _description_ + """ api = MgmtRolesResourceApi(api_client) role = ApiRole( @@ -203,7 +215,7 @@ def provision_cm_role( api.delete_role(role_name=role_name) -def cm_role_config( +def set_cm_role_config( api_client: ApiClient, role: ApiRole, params: dict, message: str ) -> Generator[ApiRole]: """Update a role configuration for a given role. Yields the @@ -220,7 +232,7 @@ def cm_role_config( Exception: _description_ Yields: - ApiRole: _description_ + Generator[ApiRole]: _description_ """ role_api = MgmtRolesResourceApi(api_client) @@ -273,11 +285,10 @@ def set_cm_role_config_group( update: ApiRoleConfigGroup, message: str, ) -> Generator[ApiRoleConfigGroup]: - """ - Update a configuration for a given Cloudera Manager Service role config group. - Yields the role config group and upon returning control, will reset the - configuration to its prior state. - Use with 'yield from' within a pytest fixture. + """Update a configuration for a given Cloudera Manager Service role config group. + Yields the role config group and upon returning control, will reset the + configuration to its prior state. + Use with 'yield from' within a pytest fixture. Args: api_client (ApiClient): CM API client @@ -286,7 +297,7 @@ def set_cm_role_config_group( message (str): Transaction descriptor; will be appended with '::[re]set' Yields: - ApiRoleConfigGroup: The updated Role Config Group + Generator[ApiRoleConfigGroup]: The updated Role Config Group """ rcg_api = MgmtRoleConfigGroupsResourceApi(api_client) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 95bcd110..a19c2b8d 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -70,6 +70,7 @@ AnsibleFailJson, AnsibleExitJson, provision_cm_role, + set_cm_role_config, set_cm_role_config_group, ) @@ -379,7 +380,7 @@ def host_monitor_config(cm_api_client, host_monitor, request) -> Generator[ApiRo if marker is None: raise Exception("No role_config marker found.") - yield from cm_role_config( + yield from set_cm_role_config( api_client=cm_api_client, role=host_monitor, params=marker.args[0], From 7c5aa310cbf6e00de5f07475cae35de639d06b4e Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 3 Jan 2025 16:53:38 -0500 Subject: [PATCH 30/58] Remove unused imports Signed-off-by: Webster Mudge --- .../test_cm_service_role_config.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py b/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py index 8f6c2337..d9581e4d 100644 --- a/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py +++ b/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py @@ -21,21 +21,12 @@ import logging import pytest -from collections.abc import Generator from pathlib import Path -from cm_client import ( - ApiRole, - ClustersResourceApi, - MgmtRolesResourceApi, -) - from ansible_collections.cloudera.cluster.plugins.modules import cm_service_role_config from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, - provision_cm_role, - cm_role_config, ) LOG = logging.getLogger(__name__) From b3c5556e5a8d684625efff8ce23ce39eaa73898a Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 3 Jan 2025 16:53:38 -0500 Subject: [PATCH 31/58] Update cm_service_role and tests to support role override configurations, host relocation, and states. Signed-off-by: Webster Mudge --- plugins/modules/cm_service_role.py | 292 ++++---------- tests/unit/__init__.py | 92 ++++- tests/unit/conftest.py | 89 ++++- .../cm_service_role/test_cm_service_role.py | 366 +++++++++++++----- 4 files changed, 518 insertions(+), 321 deletions(-) diff --git a/plugins/modules/cm_service_role.py b/plugins/modules/cm_service_role.py index db5db3cd..fe8688fe 100644 --- a/plugins/modules/cm_service_role.py +++ b/plugins/modules/cm_service_role.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,30 +25,25 @@ requirements: - cm-client options: - cms_hostname: + cluster_hostname: description: - The hostname of a cluster instance for the role. + - If the hostname is different that the existing host for the I(type), the role will be destroyed and rebuilt on the declared host. - Mutually exclusive with I(cluster_host_id). type: str aliases: - cluster_host - cms_host_id: + cluster_host_id: description: - The host ID of a cluster instance for the role. + - If the host ID is different that the existing host for the I(type), the role will be destroyed and rebuilt on the declared host. - Mutually exclusive with I(cluster_hostname). type: str type: description: - A role type for the role. - - Required if the I(state) creates a new role. - type: str - aliases: - - role_type - role_config_group: - description: - - A role type for the role. - - Required if the I(state) creates a new role. type: str + required: True aliases: - role_type config: @@ -65,21 +60,17 @@ type: bool aliases: - maintenance_mode - tags: - description: - - A set of tags applied to the role. - - To unset a tag, use C(None) as its value. - type: dict purge: description: - - Flag for whether the declared role tags should append or overwrite any existing tags. - - To clear all tags, set I(tags={}), i.e. an empty dictionary, and I(purge=True). + - Flag for whether the declared role configurations should append or overwrite any existing configurations. + - To clear all role configurations, set I(config={}), i.e. an empty dictionary, or omit entirely, and set I(purge=True). type: bool default: False state: description: - The state of the role. - - Note, if the declared state is invalid for the role, for example, the role is a C(HDFS GATEWAY), the module will return an error. + - Note, if the declared state is invalid for the role, the module will return an error. + - Note, I(restarted) is always force a change of state of the role. type: str default: present choices: @@ -101,139 +92,101 @@ """ EXAMPLES = r""" -- name: Establish a service role (auto-generated name) - cloudera.cluster.service_role: - host: example.cloudera.com - username: "jane_smith" - password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - type: GATEWAY - cluster_hostname: worker-01.cloudera.internal - -- name: Establish a service role (defined name) - cloudera.cluster.service_role: +- name: Establish a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - type: GATEWAY - name: example-gateway + type: HOSTMONITOR cluster_hostname: worker-01.cloudera.internal -- name: Set a service role to maintenance mode - cloudera.cluster.service_role: +- name: Set a Cloudera Manager Service role to maintenance mode + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway + type: HOSTMONITOR maintenance: yes -- name: Update (append) tags to a service role - cloudera.cluster.service_role: +- name: Update (append) role configurations to a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway - tags: - tag_one: value_one - tag_two: value_two + type: HOSTMONITOR + config: + some_config: value_one + another_config: value_two -- name: Set (purge) tags to a service role - cloudera.cluster.service_role: +- name: Set (purge) role configurations to a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" - cluster: example-cluster - service: example-hdfs - name: example-gateway - tags: - tag_three: value_three + type: HOSTMONITOR + config: + yet_another_config: value_three purge: yes -- name: Remove all tags on a service role - cloudera.cluster.service_role: +- name: Remove all role configurations on a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway - tags: {} + type: HOSTMONITOR purge: yes -- name: Start a service role - cloudera.cluster.service_role: +- name: Start a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway + type: HOSTMONITOR state: started -- name: Force a restart to a service role - cloudera.cluster.service_role: +- name: Force a restart to a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway + type: HOSTMONITOR state: restarted -- name: Start a service role - cloudera.cluster.service_role: +- name: Remove a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway - state: started - -- name: Remove a service role - cloudera.cluster.service_role: - host: example.cloudera.com - username: "jane_smith" - password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway + type: HOSTMONITOR state: absent """ RETURN = r""" role: - description: Details about the service role. + description: Details about the Cloudera Manager Service role. type: dict contains: name: - description: The cluster service role name. + description: + - The Cloudera Manager Service role name. + - Note, this is an auto-generated name and cannot be changed. type: str returned: always type: - description: The cluster service role type. + description: The Cloudera Manager Service role type. type: str returned: always sample: - - NAMENODE - - DATANODE - - TASKTRACKER + - HOSTMONITOR host_id: description: The unique ID of the cluster host. type: str returned: always service_name: - description: The name of the cluster service, which uniquely identifies it in a cluster. + description: The name of the Cloudera Manager Service, which uniquely identifies it in a deployment. type: str - returned: always + returned: when supported role_state: - description: State of the cluster service role. + description: State of the Cloudera Manager Service role. type: str returned: always sample: @@ -245,11 +198,11 @@ - STOPPED - NA commission_state: - description: Commission state of the cluster service role. + description: Commission state of the Cloudera Manager Service role. type: str returned: always health_summary: - description: The high-level health status of the cluster service role. + description: The high-level health status of the Cloudera Manager Service role. type: str returned: always sample: @@ -260,7 +213,7 @@ - CONCERNING - BAD config_staleness_status: - description: Status of configuration staleness for the cluster service role. + description: Status of configuration staleness for the Cloudera Manager Service role. type: str returned: always sample: @@ -268,7 +221,7 @@ - STALE_REFRESHABLE - STALE health_checks: - description: Lists all available health checks for cluster service role. + description: Lists all available health checks for Cloudera Manager Service role. type: list elements: dict returned: when supported @@ -299,7 +252,7 @@ type: bool returned: when supported maintenance_mode: - description: Whether the cluster service role is in maintenance mode. + description: Whether the Cloudera Manager Service role is in maintenance mode. type: bool returned: when supported maintenance_owners: @@ -314,16 +267,16 @@ - HOST - CONTROL_PLANE role_config_group_name: - description: The name of the cluster service role config group, which uniquely identifies it in a Cloudera Manager installation. + description: The name of the Cloudera Manager Service role config group, which uniquely identifies it in a Cloudera Manager installation. type: str returned: when supported tags: - description: The dictionary of tags for the cluster service role. + description: The dictionary of tags for the Cloudera Manager Service role. type: dict returned: when supported zoo_keeper_server_mode: description: - - The Zookeeper server mode for this cluster service role. + - The Zookeeper server mode for this Cloudera Manager Service role. - Note that for non-Zookeeper Server roles, this will be C(null). type: str returned: when supported @@ -397,7 +350,8 @@ def process(self): ), None, ) - current.config = self.role_api.read_role_config(current.name) + if current is not None: + current.config = self.role_api.read_role_config(current.name) except ApiException as ex: if ex.status != 404: raise ex @@ -419,16 +373,27 @@ def process(self): config=self.config, ) current = self.provision_role(new_role) - # # If it exists, but the type has changed, destroy and rebuild completely - # elif self.type and self.type != current.type: - # new_role = create_role( - # api_client=self.api_client, - # role_type=self.type, - # hostname=current.host_ref.hostname, - # host_id=current.host_ref.host_id, - # config=self.config - # ) - # current = self.reprovision_role(current, new_role) + # If it exists, but the host has changed, destroy and rebuild completely + elif ( + self.cluster_hostname is not None + and self.cluster_hostname != current.host_ref.hostname + ) or ( + self.cluster_host_id is not None + and self.cluster_host_id != current.host_ref.host_id + ): + if self.config: + new_config = self.config + else: + new_config = {c.name: c.value for c in current.config.items} + + new_role = create_role( + api_client=self.api_client, + role_type=current.type, + hostname=self.cluster_hostname, + host_id=self.cluster_host_id, + config=new_config, + ) + current = self.reprovision_role(current, new_role) # Else it exists, so address any changes else: # Handle role override configurations @@ -560,6 +525,7 @@ def provision_role(self, role: ApiRole) -> ApiRole: self.module.fail_json( msg="Unable to create new role", role=to_native(role.to_dict()) ) + return created_role def reprovision_role(self, existing_role: ApiRole, new_role: ApiRole) -> ApiRole: self.changed = True @@ -588,8 +554,11 @@ def reprovision_role(self, existing_role: ApiRole, new_role: ApiRole) -> ApiRole msg="Unable to recreate role, " + existing_role.name, role=to_native(rebuilt_role.to_dict()), ) + return rebuilt_role + else: + return existing_role - def deprovision_role(self, role: ApiRole): + def deprovision_role(self, role: ApiRole) -> None: self.changed = True if self.module._diff: @@ -598,97 +567,6 @@ def deprovision_role(self, role: ApiRole): if not self.module.check_mode: self.role_api.delete_role(role.name) - # def xxxcreate_role(self) -> ApiRole: - # # Check for required creation parameters - # missing_params = [] - - # if self.type is None: - # missing_params.append("type") - - # if self.cluster_hostname is None and self.cluster_host_id is None: - # missing_params += ["cluster_hostname", "cluster_host_id"] - - # if missing_params: - # self.module.fail_json( - # msg=f"Unable to create new role, missing required arguments: {', '.join(sorted(missing_params)) }" - # ) - - # # Set up the role - # payload = ApiRole(type=str(self.type).upper()) - - # # Name - # if self.name: - # payload.name = self.name # No name allows auto-generation - - # # Host assignment - # host_ref = get_host_ref(self.api_client, self.cluster_hostname, self.cluster_host_id) - - # if host_ref is None: - # self.module.fail_json(msg="Invalid host reference") - # else: - # payload.host_ref = host_ref - - # # Role override configurations - # if self.config: - # payload.config = ApiConfigList(items=[ApiConfig(name=k, value=v) for k, v in self.config.items()]) - - # # Execute the creation - # self.changed = True - - # if self.module._diff: - # self.diff = dict( - # before={}, - # after=payload.to_dict(), - # ) - - # if not self.module.check_mode: - # created_role = next( - # ( - # iter( - # self.role_api.create_roles( - # body=ApiRoleList(items=[payload]), - # ).items - # ) - # ), - # {}, - # ) - - # # Maintenance - # if self.maintenance: - # if self.module._diff: - # self.diff["after"].update(maintenance_mode=True) - - # maintenance_cmd = self.role_api.enter_maintenance_mode( - # created_role.name - # ) - - # if maintenance_cmd.success is False: - # self.module.fail_json( - # msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}" - # ) - - # if self.state in ["started", "restarted"]: - # self.handle_commands(MgmtRoleCommandsResourceApi(self.api_client).start_command( - # body=ApiRoleNameList(items=[created_role.name]), - # )) - - # elif self.state == "stopped": - # self.handle_commands(MgmtRoleCommandsResourceApi(self.api_client).stop_command( - # body=ApiRoleNameList(items=[created_role.name]), - # )) - - # if refresh: - # self.output = parse_role_result( - # self.role_api.read_role( - # self.cluster, - # created_role.name, - # self.service, - # view="full", - # ) - # ) - # else: - # self.output = parse_role_result(created_role) - def handle_commands(self, commands: ApiBulkCommandList): if commands.errors: error_msg = "\n".join(commands.errors) @@ -707,7 +585,7 @@ def main(): maintenance=dict(type="bool", aliases=["maintenance_mode"]), config=dict(type="dict", aliases=["params", "parameters"]), purge=dict(type="bool", default=False), - type=dict(required=True), + type=dict(required=True, aliases=["role_type"]), state=dict( default="present", choices=["present", "absent", "restarted", "started", "stopped"], diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index c68bf4ba..1a2f8423 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2024 Cloudera, Inc. +# Copyright 2025 Cloudera, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,17 +26,27 @@ ApiRole, ApiRoleConfigGroup, ApiRoleList, + ApiRoleNameList, + ApiRoleState, ApiService, ApiServiceConfig, ApiServiceList, ClustersResourceApi, CommandsResourceApi, MgmtRolesResourceApi, + MgmtRoleCommandsResourceApi, MgmtRoleConfigGroupsResourceApi, ServicesResourceApi, ) from cm_client.rest import ApiException +from ansible_collections.cloudera.cluster.plugins.module_utils.host_utils import ( + get_host_ref, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + get_mgmt_roles, +) + class AnsibleExitJson(Exception): """Exception class to be raised by module.exit_json and caught by the test case""" @@ -215,6 +225,86 @@ def provision_cm_role( api.delete_role(role_name=role_name) +def set_cm_role( + api_client: ApiClient, cluster: ApiCluster, role: ApiRole +) -> Generator[ApiRole]: + """Set a net-new Cloudera Manager Service role. Yields the new role, + resetting to any existing role upon completion. Use with 'yield from' + within a pytest fixture. + """ + role_api = MgmtRolesResourceApi(api_client) + role_cmd_api = MgmtRoleCommandsResourceApi(api_client) + + # Check for existing management role + pre_role = next( + iter([r for r in get_mgmt_roles(api_client, role.type).items]), None + ) + + if pre_role is not None: + # Get the current state + pre_role.config = role_api.read_role_config(role_name=pre_role.name) + + # Remove the prior role + role_api.delete_role(role_name=pre_role.name) + + if not role.host_ref: + cluster_api = ClustersResourceApi(api_client) + + # Get first host of the cluster + hosts = cluster_api.list_hosts(cluster_name=cluster.name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + + role.host_ref = get_host_ref(api_client, host_id=hosts.items[0].host_id) + + # Create the role under test + current_role = next( + iter(role_api.create_roles(body=ApiRoleList(items=[role])).items), None + ) + current_role.config = role_api.read_role_config(role_name=current_role.name) + + if role.maintenance_mode: + role_api.enter_maintenance_mode(role_name=current_role.name) + + if role.role_state in [ApiRoleState.STARTING, ApiRoleState.STARTED]: + start_cmds = role_cmd_api.start_command( + body=ApiRoleNameList(items=[current_role.name]) + ) + if start_cmds.errors: + error_msg = "\n".join(start_cmds.errors) + raise Exception(error_msg) + + for cmd in start_cmds.items: + # Serial monitoring + wait_for_command(api_client=api_client, command=cmd) + + # Yield the role under test + yield current_role + + # Remove the role under test + current_role = role_api.delete_role(role_name=current_role.name) + + # Reinstate the previous role + if pre_role is not None: + role_api.create_roles(body=ApiRoleList(items=[pre_role])) + if pre_role.maintenance_mode: + role_api.enter_maintenance_mode(pre_role.name) + if pre_role.role_state in [ApiRoleState.STARTED, ApiRoleState.STARTING]: + restart_cmds = role_cmd_api.restart_command( + body=ApiRoleNameList(items=[pre_role.name]) + ) + if restart_cmds.errors: + error_msg = "\n".join(restart_cmds.errors) + raise Exception(error_msg) + + for cmd in restart_cmds.items: + # Serial monitoring + wait_for_command(api_client=api_client, command=cmd) + + def set_cm_role_config( api_client: ApiClient, role: ApiRole, params: dict, message: str ) -> Generator[ApiRole]: diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index a19c2b8d..cda5e8d9 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -42,6 +42,7 @@ ApiHostRefList, ApiRole, ApiRoleConfigGroup, + ApiRoleList, ApiRoleNameList, ApiRoleState, ApiService, @@ -66,6 +67,10 @@ Parcel, ) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + get_mgmt_roles, +) + from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleFailJson, AnsibleExitJson, @@ -194,7 +199,23 @@ def cm_api_client(conn) -> ApiClient: @pytest.fixture(scope="session") def base_cluster(cm_api_client, request): - """Provision a CDH Base cluster.""" + """Provision a CDH Base cluster. If the variable 'CM_CLUSTER' is present, + will attempt to read and yield a reference to this cluster. Otherwise, + will yield a new base cluster with a single host, deleting the cluster + once completed. + + Args: + cm_api_client (_type_): _description_ + request (_type_): _description_ + + Raises: + Exception: _description_ + Exception: _description_ + Exception: _description_ + + Yields: + _type_: _description_ + """ cluster_api = ClustersResourceApi(cm_api_client) @@ -270,14 +291,32 @@ def base_cluster(cm_api_client, request): @pytest.fixture(scope="session") -def cms(cm_api_client, request) -> Generator[ApiService]: - """Provisions Cloudera Manager Service.""" +def cms(cm_api_client: ApiClient, request) -> Generator[ApiService]: + """Provisions Cloudera Manager Service. If the Cloudera Manager Service + is present, will read and yield this reference. Otherwise, will + yield a new Cloudera Manager Service, deleting it after use. - api = MgmtServiceResourceApi(cm_api_client) + NOTE! A new Cloudera Manager Service will _not_ be provisioned if + there are any existing clusters within the deployment! Therefore, + you must only run this fixture to provision a net-new Cloudera Manager + Service on a bare deployment, i.e. Cloudera Manager and hosts only. + + Args: + cm_api_client (ApiClient): _description_ + request (_type_): _description_ + + Raises: + Exception: _description_ + + Yields: + Generator[ApiService]: _description_ + """ + + cms_api = MgmtServiceResourceApi(cm_api_client) # Return if the Cloudera Manager Service is already present try: - yield api.read_service() + yield cms_api.read_service() return except ApiException as ae: if ae.status != 404 or "Cannot find management service." not in str(ae.body): @@ -289,9 +328,12 @@ def cms(cm_api_client, request) -> Generator[ApiService]: type="MGMT", ) - yield api.setup_cms(body=service) + cm_service = cms_api.setup_cms(body=service) + cms_api.auto_assign_roles() - api.delete_cms() + yield cm_service + + cms_api.delete_cms() @pytest.fixture(scope="function") @@ -419,6 +461,7 @@ def host_monitor_role(cm_api_client, cms, request) -> Generator[ApiRole]: def host_monitor_role_group_config( cm_api_client, host_monitor_role, request ) -> Generator[ApiRoleConfigGroup]: + """Configures the base Role Config Group for the Host Monitor role of a Cloudera Manager Service.""" marker = request.node.get_closest_marker("role_config_group") if marker is None: @@ -532,6 +575,38 @@ def host_monitor_state(cm_api_client, host_monitor_role, request) -> Generator[A ) +@pytest.fixture(scope="function") +def host_monitor_cleared(cm_api_client, cms) -> Generator[None]: + role_api = MgmtRolesResourceApi(cm_api_client) + role_cmd_api = MgmtRoleCommandsResourceApi(cm_api_client) + + # Check for existing management role + pre_role = next( + iter([r for r in get_mgmt_roles(cm_api_client, "HOSTMONITOR").items]), None + ) + + if pre_role is not None: + # Get the current state + pre_role.config = role_api.read_role_config(role_name=pre_role.name) + + # Remove the prior role + role_api.delete_role(role_name=pre_role.name) + + # Yield now that the role has been removed + yield + + # Reinstate the previous role + if pre_role is not None: + role_api.create_roles(body=ApiRoleList(items=[pre_role])) + if pre_role.maintenance_mode: + role_api.enter_maintenance_mode(pre_role.name) + if pre_role.role_state in [ApiRoleState.STARTED, ApiRoleState.STARTING]: + restart_cmds = role_cmd_api.restart_command( + body=ApiRoleNameList(items=[pre_role.name]) + ) + handle_commands(api_client=cm_api_client, commands=restart_cmds) + + def handle_commands(api_client: ApiClient, commands: ApiBulkCommandList): if commands.errors: error_msg = "\n".join(commands.errors) diff --git a/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py index 2ec761d9..b1a9c98a 100644 --- a/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py +++ b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -28,6 +28,8 @@ ApiConfig, ApiConfigList, ApiRole, + ApiRoleList, + ApiRoleState, ClustersResourceApi, MgmtRolesResourceApi, ) @@ -36,46 +38,142 @@ from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, - provision_cm_role, - cm_role_config, + set_cm_role, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.host_utils import ( + get_host_ref, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + get_mgmt_roles, ) LOG = logging.getLogger(__name__) +@pytest.fixture(scope="function") +def target_cm_role(cm_api_client, cms, base_cluster, request) -> Generator[ApiRole]: + marker = request.node.get_closest_marker("role") + + if marker is None: + role = ApiRole( + type="HOSTMONITOR", + ) + else: + role = marker.args[0] + role.type = "HOSTMONITOR" + + yield from set_cm_role(cm_api_client, base_cluster, role) + + +@pytest.fixture(scope="function") +def target_cm_role_cleared( + cm_api_client, base_cluster, host_monitor_cleared, request +) -> Generator[ApiRole]: + marker = request.node.get_closest_marker("role") + + if marker is None: + role = ApiRole( + type="HOSTMONITOR", + ) + else: + role = marker.args[0] + role.type = "HOSTMONITOR" + + role_api = MgmtRolesResourceApi(cm_api_client) + + if not role.host_ref: + cluster_api = ClustersResourceApi(cm_api_client) + + # Get first host of the cluster + hosts = cluster_api.list_hosts(cluster_name=base_cluster.name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + + role.host_ref = get_host_ref(cm_api_client, host_id=hosts.items[0].host_id) + + # Create and yield the role under test + current_role = next( + iter(role_api.create_roles(body=ApiRoleList(items=[role])).items), None + ) + current_role.config = role_api.read_role_config(role_name=current_role.name) + + yield current_role + + # Clear out any remaining roles + remaining_roles = get_mgmt_roles(cm_api_client, "HOSTMONITOR") + + for r in remaining_roles.items: + role_api.delete_role(role_name=r.name) + + def test_missing_required(conn, module_args): module_args(conn) - with pytest.raises(AnsibleFailJson, match="parameters"): + with pytest.raises(AnsibleFailJson, match="type"): cm_service_role.main() -def test_missing_required_if(conn, module_args): - module_args( - { - **conn, - "parameters": dict(), - } - ) +def test_mutually_exclusive(conn, module_args): + module_args({**conn, "cluster_hostname": "hostname", "cluster_host_id": "host_id"}) - with pytest.raises(AnsibleFailJson, match="name, type"): + with pytest.raises( + AnsibleFailJson, + match="parameters are mutually exclusive: cluster_hostname|cluster_host_id", + ): cm_service_role.main() -def test_present_invalid_parameter(conn, module_args, host_monitor): +@pytest.mark.role(ApiRole()) +def test_relocate_host( + conn, module_args, cm_api_client, base_cluster, target_cm_role_cleared, request +): + cluster_api = ClustersResourceApi(cm_api_client) + + # Get second host of the cluster + hosts = cluster_api.list_hosts(cluster_name=base_cluster.name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + filtered_hosts = [ + h for h in hosts.items if h.host_id != target_cm_role_cleared.host_ref.host_id + ] + + if len(filtered_hosts) < 1: + raise Exception( + "Not enough hosts to reassign the Cloudera Manager Service role." + ) + module_args( { **conn, - "role": host_monitor.name, - "parameters": dict(example="Example"), + "type": target_cm_role_cleared.type, + "cluster_hostname": filtered_hosts[0].hostname, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, } ) - with pytest.raises( - AnsibleFailJson, match="Unknown configuration attribute 'example'" - ): + expected = filtered_hosts[0].host_id + + with pytest.raises(AnsibleExitJson) as e: cm_service_role.main() + assert e.value.changed == True + assert expected == e.value.role["host_id"] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert expected == e.value.role["host_id"] + @pytest.mark.role( ApiRole( @@ -87,19 +185,19 @@ def test_present_invalid_parameter(conn, module_args, host_monitor): ) ) ) -def test_set_parameters(conn, module_args, host_monitor_state, request): +def test_set_config(conn, module_args, target_cm_role, request): module_args( { **conn, - "type": host_monitor_state.type, - "config": dict(mgmt_num_descriptor_fetch_tries=32), + "type": target_cm_role.type, + "config": dict(mgmt_num_descriptor_fetch_tries=55), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, } ) - expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + expected = dict(mgmt_num_descriptor_fetch_tries="55", process_start_secs="21") with pytest.raises(AnsibleExitJson) as e: cm_service_role.main() @@ -115,171 +213,206 @@ def test_set_parameters(conn, module_args, host_monitor_state, request): assert expected.items() <= e.value.role["config"].items() -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role( + ApiRole( + config=ApiConfigList( + items=[ + ApiConfig("mgmt_num_descriptor_fetch_tries", 12), + ApiConfig("process_start_secs", 22), + ] + ) + ) ) -def test_set_parameters_role_type(conn, module_args, host_monitor_config, request): +def test_unset_config(conn, module_args, target_cm_role, request): module_args( { **conn, - "role_type": host_monitor_config.type, - "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "type": target_cm_role.type, + "config": dict(mgmt_num_descriptor_fetch_tries=None), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - # _ansible_check_mode=True, - # _ansible_diff=True, } ) - expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + expected = dict(process_start_secs="22") with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert expected.items() <= e.value.role["config"].items() # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert expected.items() <= e.value.role["config"].items() -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role( + ApiRole( + config=ApiConfigList( + items=[ + ApiConfig("mgmt_num_descriptor_fetch_tries", 13), + ApiConfig("process_start_secs", 23), + ] + ) + ) ) -def test_unset_parameters(conn, module_args, host_monitor_config, request): +def test_set_config_purge(conn, module_args, target_cm_role, request): module_args( { **conn, - "role": host_monitor_config.name, - "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "type": target_cm_role.type, + "config": dict(mgmt_num_descriptor_fetch_tries=33), + "purge": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, } ) - expected = dict(process_start_secs="21") + expected = dict(mgmt_num_descriptor_fetch_tries="33") with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert expected.items() <= e.value.role["config"].items() # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert expected.items() <= e.value.role["config"].items() -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role( + ApiRole( + config=ApiConfigList( + items=[ + ApiConfig("mgmt_num_descriptor_fetch_tries", 14), + ApiConfig("process_start_secs", 24), + ] + ) + ) ) -def test_unset_parameters_role_type(conn, module_args, host_monitor_config, request): +def test_set_config_purge_all(conn, module_args, target_cm_role, request): module_args( { **conn, - "type": host_monitor_config.type, - "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "type": target_cm_role.type, + "config": dict(), + "purge": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, } ) - expected = dict(process_start_secs="21") - with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert len(e.value.role["config"]) == 0 # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert len(e.value.role["config"]) == 0 -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) -) -def test_set_parameters_with_purge(conn, module_args, host_monitor_config, request): +@pytest.mark.role(ApiRole(maintenance_mode=False)) +def test_maintenance_mode_enabled(conn, module_args, target_cm_role, request): module_args( { **conn, - "role": host_monitor_config.name, - "parameters": dict(mgmt_num_descriptor_fetch_tries=32), - "purge": True, + "type": target_cm_role.type, + "maintenance": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, } ) - expected = dict(mgmt_num_descriptor_fetch_tries="32") - with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert e.value.role["maintenance_mode"] == True # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert e.value.role["maintenance_mode"] == True -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) -) -def test_set_parameters_with_purge_role_type( - conn, module_args, host_monitor_config, request -): +@pytest.mark.role(ApiRole(maintenance_mode=True)) +def test_maintenance_mode_disabled(conn, module_args, target_cm_role, request): module_args( { **conn, - "role_type": host_monitor_config.type, - "parameters": dict(mgmt_num_descriptor_fetch_tries=32), - "purge": True, + "type": target_cm_role.type, + "maintenance": False, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, } ) - expected = dict(mgmt_num_descriptor_fetch_tries="32") + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert e.value.role["maintenance_mode"] == False + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert e.value.role["maintenance_mode"] == False + + +@pytest.mark.role(ApiRole(role_state=ApiRoleState.STOPPED)) +def test_state_started(conn, module_args, target_cm_role, request): + module_args( + { + **conn, + "type": target_cm_role.type, + "state": "started", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert e.value.role["role_state"] == "STARTED" # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert e.value.role["role_state"] == "STARTED" -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) -) -def test_purge_all_parameters(conn, module_args, host_monitor_config, request): +@pytest.mark.role(ApiRole(role_state=ApiRoleState.STARTED)) +def test_state_started(conn, module_args, target_cm_role, request): module_args( { **conn, - "role": host_monitor_config.name, - "parameters": dict(), - "purge": True, + "type": target_cm_role.type, + "state": "stopped", "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, @@ -287,31 +420,26 @@ def test_purge_all_parameters(conn, module_args, host_monitor_config, request): ) with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert len(e.value.config) == 0 + assert e.value.role["role_state"] == "STOPPED" # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert len(e.value.config) == 0 + assert e.value.role["role_state"] == "STOPPED" -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) -) -def test_purge_all_parameters_role_type( - conn, module_args, host_monitor_config, request -): +@pytest.mark.role(ApiRole(role_state=ApiRoleState.STOPPED)) +def test_state_restarted(conn, module_args, target_cm_role, request): module_args( { **conn, - "type": host_monitor_config.type, - "parameters": dict(), - "purge": True, + "type": target_cm_role.type, + "state": "restarted", "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, @@ -319,14 +447,40 @@ def test_purge_all_parameters_role_type( ) with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert len(e.value.config) == 0 + assert e.value.role["role_state"] == "STARTED" + + # Idempotency is not possible due to this state + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert e.value.role["role_state"] == "STARTED" + + +def test_state_absent(conn, module_args, target_cm_role_cleared, request): + module_args( + { + **conn, + "type": target_cm_role_cleared.type, + "state": "absent", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert not e.value.role # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert len(e.value.config) == 0 + assert not e.value.role From 500f16f9df87a34975cce361a9bb86483c12198c Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 10 Jan 2025 10:06:56 -0500 Subject: [PATCH 32/58] Update provision_cm_role to handle absent role on deletion and set_cm_role_config_group to modify, not replace Signed-off-by: Webster Mudge --- tests/unit/__init__.py | 45 +++++++++++++++++++++++++++++++++++------- 1 file changed, 38 insertions(+), 7 deletions(-) diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 1a2f8423..44b4ba0a 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -23,6 +23,7 @@ ApiCommand, ApiConfig, ApiConfigList, + ApiHostRef, ApiRole, ApiRoleConfigGroup, ApiRoleList, @@ -40,6 +41,9 @@ ) from cm_client.rest import ApiException +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + resolve_parameter_updates, +) from ansible_collections.cloudera.cluster.plugins.module_utils.host_utils import ( get_host_ref, ) @@ -217,12 +221,20 @@ def provision_cm_role( role = ApiRole( name=role_name, type=role_type, - host_ref=dict(hostId=host_id), + host_ref=ApiHostRef(host_id=host_id), + ) + + provisioned_role = next( + iter(api.create_roles(body=ApiRoleList(items=[role])).items), None ) - yield next(iter(api.create_roles(body=ApiRoleList(items=[role])).items), None) + yield provisioned_role - api.delete_role(role_name=role_name) + try: + api.delete_role(role_name=provisioned_role.name) + except ApiException as ae: + if ae.status != 404: + raise ae def set_cm_role( @@ -391,12 +403,31 @@ def set_cm_role_config_group( """ rcg_api = MgmtRoleConfigGroupsResourceApi(api_client) - pre = rcg_api.read_role_config_group(role_config_group.name) + # Ensure the modification (not a replacement) of the existing role config group + update.name = role_config_group.name - yield rcg_api.update_role_config_group( + # Update the role config group + pre_rcg = rcg_api.update_role_config_group( role_config_group.name, message=f"{message}::set", body=update ) - rcg_api.update_role_config_group( - role_config_group.name, message=f"{message}::reset", body=pre + yield pre_rcg + + # Reread the role config group + post_rcg = rcg_api.read_role_config_group(role_config_group_name=pre_rcg.name) + + # Revert the changes + config_revert = resolve_parameter_updates( + {c.name: c.value for c in post_rcg.config.items}, + {c.name: c.value for c in role_config_group.config.items}, + True, ) + + if config_revert: + role_config_group.config = ApiConfigList( + items=[ApiConfig(name=k, value=v) for k, v in config_revert.items()] + ) + + rcg_api.update_role_config_group( + role_config_group.name, message=f"{message}::reset", body=role_config_group + ) From 6e4b9422a13e33265c9bfd5fabffe4c0427bebda Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 10 Jan 2025 10:11:43 -0500 Subject: [PATCH 33/58] Update CMS and Host Monitor fixtures. Update cms to not auto-assign roles. Add cms_cleared to remove and then restore existing CMS. Add cms_auto to install and start CMS on first non-cluster host. Add cms_auto_no_start to install CMS on first non-cluster host, but do not start. Change fixture scope for host_monitor and install role on first non-cluster host. Remove host_monitor_role as it is redundant and update dependent fixtures. Signed-off-by: Webster Mudge --- tests/unit/conftest.py | 255 ++++++++++++++++++++++++++++++----------- 1 file changed, 190 insertions(+), 65 deletions(-) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index cda5e8d9..7eb27f1a 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -36,6 +36,7 @@ ApiClusterList, ApiCluster, ApiCommand, + ApiCommandList, ApiConfig, ApiConfigList, ApiHostRef, @@ -47,6 +48,7 @@ ApiRoleState, ApiService, ApiServiceConfig, + ApiServiceState, ClustersResourceApi, CommandsResourceApi, Configuration, @@ -329,13 +331,171 @@ def cms(cm_api_client: ApiClient, request) -> Generator[ApiService]: ) cm_service = cms_api.setup_cms(body=service) - cms_api.auto_assign_roles() + + # Do not set up any roles -- just the CMS service itself + # cms_api.auto_assign_roles() yield cm_service cms_api.delete_cms() +@pytest.fixture(scope="function") +def cms_cleared(cm_api_client) -> Generator[None]: + """Clears any existing Cloudera Manager Service, yields, and upon + return, removes any new service and reinstates the existing service, + if present. + + Args: + cm_api_client (_type_): _description_ + + Raises: + ae: _description_ + + Yields: + Generator[None]: _description_ + """ + service_api = MgmtServiceResourceApi(cm_api_client) + rcg_api = MgmtRoleConfigGroupsResourceApi(cm_api_client) + role_api = MgmtRolesResourceApi(cm_api_client) + + pre_service = None + + try: + pre_service = service_api.read_service() + except ApiException as ae: + if ae.status != 404: + raise ae + + if pre_service is not None: + # Get the current state + pre_service.config = service_api.read_service_config() + + # Get the role config groups' state + if pre_service.role_config_groups is not None: + for rcg in pre_service.role_config_groups: + rcg.config = rcg_api.read_config(rcg.name) + + # Get each of its roles' state + if pre_service.roles is not None: + for r in pre_service.roles: + r.config = role_api.read_role_config(role_name=r.name) + + # Remove the prior CMS + service_api.delete_cms() + + # Yield now that the prior CMS has been removed + yield + + # Remove any created CMS + try: + service_api.delete_cms() + except ApiException as ae: + if ae.status != 404: + raise ae + + # Reinstate the prior CMS + if pre_service is not None: + service_api.setup_cms(body=pre_service) + if pre_service.maintenance_mode: + maintenance_cmd = service_api.enter_maintenance_mode() + monitor_command(api_client=cm_api_client, command=maintenance_cmd) + if pre_service.service_state in [ + ApiServiceState.STARTED, + ApiServiceState.STARTING, + ]: + restart_cmd = service_api.restart_command() + monitor_command(api_client=cm_api_client, command=restart_cmd) + + +@pytest.fixture(scope="function") +def cms_auto(cm_api_client, cms_cleared) -> Generator[ApiService]: + """Create a new Cloudera Manager Service on the first available host and auto-configures + the following roles: + - HOSTMONITOR + - SERVICEMONITOR + - EVENTSERVER + - ALERTPUBLISHER + + It starts this Cloudera Manager Service, yields, and will remove this service if the tests + do not. (This fixture delegates to the 'cms_cleared' fixture.) + + Args: + cm_api_client (_type_): _description_ + cms_cleared (_type_): _description_ + + Yields: + Generator[ApiService]: _description_ + """ + service_api = MgmtServiceResourceApi(cm_api_client) + host_api = HostsResourceApi(cm_api_client) + + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) + + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service roles") + + service_api.setup_cms( + body=ApiService( + type="MGMT", + roles=[ + ApiRole(type="HOSTMONITOR"), + ApiRole(type="SERVICEMONITOR"), + ApiRole(type="EVENTSERVER"), + ApiRole(type="ALERTPUBLISHER"), + ], + ) + ) + service_api.auto_configure() + + monitor_command(cm_api_client, service_api.start_command()) + + yield service_api.read_service() + + +@pytest.fixture(scope="function") +def cms_auto_no_start(cm_api_client, cms_cleared) -> Generator[ApiService]: + """Create a new Cloudera Manager Service on the first available host and auto-configures + the following roles: + - HOSTMONITOR + - SERVICEMONITOR + - EVENTSERVER + - ALERTPUBLISHER + + It does not start this Cloudera Manager Service, yields, and will remove this service if + the tests do not. (This fixture delegates to the 'cms_cleared' fixture.) + + Args: + cm_api_client (_type_): _description_ + cms_cleared (_type_): _description_ + + Yields: + Generator[ApiService]: _description_ + """ + service_api = MgmtServiceResourceApi(cm_api_client) + host_api = HostsResourceApi(cm_api_client) + + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) + + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service roles") + + service_api.setup_cms( + body=ApiService( + type="MGMT", + roles=[ + ApiRole(type="HOSTMONITOR"), + ApiRole(type="SERVICEMONITOR"), + ApiRole(type="EVENTSERVER"), + ApiRole(type="ALERTPUBLISHER"), + ], + ) + ) + service_api.auto_configure() + + yield service_api.read_service() + + @pytest.fixture(scope="function") def cms_config(cm_api_client, cms, request) -> Generator[ApiService]: """Configures service-wide configurations for the Cloudera Manager Service""" @@ -388,7 +548,7 @@ def cms_config(cm_api_client, cms, request) -> Generator[ApiService]: ) -@pytest.fixture(scope="module") +@pytest.fixture(scope="function") def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: api = MgmtRolesResourceApi(cm_api_client) @@ -399,19 +559,17 @@ def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: if hm is not None: yield hm else: - cluster_api = ClustersResourceApi(cm_api_client) - - # Get first host of the cluster - hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) + host_api = HostsResourceApi(cm_api_client) + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) - if not hosts.items: + if host is None: raise Exception( - "No available hosts to assign the Cloudera Manager Service role." + "No available hosts to assign Cloudera Manager Service role" ) else: name = Path(request.fixturename).stem yield from provision_cm_role( - cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId + cm_api_client, name, "HOSTMONITOR", host.host_id ) @@ -430,36 +588,9 @@ def host_monitor_config(cm_api_client, host_monitor, request) -> Generator[ApiRo ) -@pytest.fixture(scope="module") -def host_monitor_role(cm_api_client, cms, request) -> Generator[ApiRole]: - api = MgmtRolesResourceApi(cm_api_client) - - hm = next( - iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None - ) - - if hm is not None: - yield hm - else: - cluster_api = ClustersResourceApi(cm_api_client) - - # Get first host of the cluster - hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) - - if not hosts.items: - raise Exception( - "No available hosts to assign the Cloudera Manager Service role." - ) - else: - name = Path(request.fixturename).stem - yield from provision_cm_role( - cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId - ) - - @pytest.fixture(scope="function") def host_monitor_role_group_config( - cm_api_client, host_monitor_role, request + cm_api_client, host_monitor, request ) -> Generator[ApiRoleConfigGroup]: """Configures the base Role Config Group for the Host Monitor role of a Cloudera Manager Service.""" marker = request.node.get_closest_marker("role_config_group") @@ -468,19 +599,21 @@ def host_monitor_role_group_config( raise Exception("No 'role_config_group' marker found.") rcg_api = MgmtRoleConfigGroupsResourceApi(cm_api_client) + rcg = rcg_api.read_role_config_group( + host_monitor.role_config_group_ref.role_config_group_name + ) + rcg.config = rcg_api.read_config(role_config_group_name=rcg.name) yield from set_cm_role_config_group( api_client=cm_api_client, - role_config_group=rcg_api.read_role_config_group( - host_monitor_role.role_config_group_ref.role_config_group_name - ), + role_config_group=rcg, update=marker.args[0], message=f"{Path(request.node.parent.name).stem}::{request.node.name}", ) @pytest.fixture(scope="function") -def host_monitor_state(cm_api_client, host_monitor_role, request) -> Generator[ApiRole]: +def host_monitor_state(cm_api_client, host_monitor, request) -> Generator[ApiRole]: marker = request.node.get_closest_marker("role") if marker is None: @@ -492,14 +625,14 @@ def host_monitor_state(cm_api_client, host_monitor_role, request) -> Generator[A cmd_api = MgmtRoleCommandsResourceApi(cm_api_client) # Get the current state - pre_role = role_api.read_role(host_monitor_role.name) - pre_role.config = role_api.read_role_config(host_monitor_role.name) + pre_role = role_api.read_role(host_monitor.name) + pre_role.config = role_api.read_role_config(host_monitor.name) # Set config for c in role.config.items: try: role_api.update_role_config( - role_name=host_monitor_role.name, + role_name=host_monitor.name, message=f"{Path(request.node.parent.name).stem}::{request.node.name}::set", body=ApiConfigList(items=[c]), ) @@ -509,55 +642,47 @@ def host_monitor_state(cm_api_client, host_monitor_role, request) -> Generator[A # Update maintenance if role.maintenance_mode: - role_api.enter_maintenance_mode(host_monitor_role.name) + role_api.enter_maintenance_mode(host_monitor.name) else: - role_api.exit_maintenance_mode(host_monitor_role.name) + role_api.exit_maintenance_mode(host_monitor.name) # Update state if role.role_state is not None: if role.role_state in [ApiRoleState.STARTED]: handle_commands( - cmd_api.stop_command( - body=ApiRoleNameList(items=[host_monitor_role.name]) - ) + cmd_api.stop_command(body=ApiRoleNameList(items=[host_monitor.name])) ) elif role.role_state in [ApiRoleState.STOPPED]: handle_commands( - cmd_api.start_command( - body=ApiRoleNameList(items=[host_monitor_role.name]) - ) + cmd_api.start_command(body=ApiRoleNameList(items=[host_monitor.name])) ) # Yield the role - current_role = role_api.read_role(host_monitor_role.name) - current_role.config = role_api.read_role_config(host_monitor_role.name) + current_role = role_api.read_role(host_monitor.name) + current_role.config = role_api.read_role_config(host_monitor.name) yield current_role # Retrieve the test changes - post_role = role_api.read_role(role_name=host_monitor_role.name) - post_role.config = role_api.read_role_config(role_name=host_monitor_role.name) + post_role = role_api.read_role(role_name=host_monitor.name) + post_role.config = role_api.read_role_config(role_name=host_monitor.name) # Reset state if pre_role.role_state != post_role.role_state: if pre_role.role_state in [ApiRoleState.STARTED]: handle_commands( - cmd_api.start_command( - body=ApiRoleNameList(items=[host_monitor_role.name]) - ) + cmd_api.start_command(body=ApiRoleNameList(items=[host_monitor.name])) ) elif pre_role.role_state in [ApiRoleState.STOPPED]: handle_commands( - cmd_api.stop_command( - body=ApiRoleNameList(items=[host_monitor_role.name]) - ) + cmd_api.stop_command(body=ApiRoleNameList(items=[host_monitor.name])) ) # Reset maintenance if pre_role.maintenance_mode != post_role.maintenance_mode: if pre_role.maintenance_mode: - role_api.enter_maintenance_mode(host_monitor_role.name) + role_api.enter_maintenance_mode(host_monitor.name) else: - role_api.exit_maintenance_mode(host_monitor_role.name) + role_api.exit_maintenance_mode(host_monitor.name) # Reset config pre_role_config_set = set([c.name for c in pre_role.config.items]) @@ -569,7 +694,7 @@ def host_monitor_state(cm_api_client, host_monitor_role, request) -> Generator[A reconciled.extend([ApiConfig(c.name, None) for c in config_reset]) role_api.update_role_config( - role_name=host_monitor_role.name, + role_name=host_monitor.name, message=f"{Path(request.node.parent.name).stem}::{request.node.name}::reset", body=ApiConfigList(items=reconciled), ) From 9781736470894ea2361aef0b815c5187b3c7da20 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 10 Jan 2025 10:12:20 -0500 Subject: [PATCH 34/58] Add TODO note for config defaults Signed-off-by: Webster Mudge --- plugins/module_utils/cm_utils.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugins/module_utils/cm_utils.py b/plugins/module_utils/cm_utils.py index c55d523c..fad3f7e7 100644 --- a/plugins/module_utils/cm_utils.py +++ b/plugins/module_utils/cm_utils.py @@ -104,6 +104,11 @@ def resolve_parameter_updates( diff = recursive_diff(current, normalize_values(incoming)) if diff is not None: + # TODO Lookup default for v=None to avoid issues with CM + # CM sometimes fails to find the default value for a parameter + # However, a view=full will return the default, so if we can + # change this method's signature to include that reference, we + # can short-circuit CM's problematic lookup of the default value. updates = { k: v for k, v in diff[1].items() From a1847537503a0ed8a20977e23f4d141a9c95eb57 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 10 Jan 2025 10:12:49 -0500 Subject: [PATCH 35/58] Add stub for role config group reconcilation Signed-off-by: Webster Mudge --- plugins/module_utils/role_config_group_utils.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/module_utils/role_config_group_utils.py b/plugins/module_utils/role_config_group_utils.py index a7b8ec70..d89e5dc1 100644 --- a/plugins/module_utils/role_config_group_utils.py +++ b/plugins/module_utils/role_config_group_utils.py @@ -19,7 +19,6 @@ from cm_client import ( ApiClient, ApiRoleConfigGroup, - ApiRoleConfigGroupRef, RoleConfigGroupsResourceApi, MgmtRoleConfigGroupsResourceApi, ) @@ -105,3 +104,9 @@ def get_role_config_group( raise RoleConfigGroupDiscoveryException(name) else: return rcg + + +def reconcile_role_config_group( + api_client: ApiClient, existing: ApiRoleConfigGroup, updates: dict +): + pass From 13dfede8498e60a9cb044fd5832a93b925f161e1 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 10 Jan 2025 10:15:39 -0500 Subject: [PATCH 36/58] Expand parse_service_result to handle service subelements (service-wide configs, roles, role config groups) Add read_service function to gather a cluster service and its subelements Add read_cm_service to gather the Cloudera Manager service and its subelements Update the before/after keys for ServiceConfigUpdates Signed-off-by: Webster Mudge --- plugins/module_utils/service_utils.py | 128 ++++++++++++++++++++++++-- 1 file changed, 121 insertions(+), 7 deletions(-) diff --git a/plugins/module_utils/service_utils.py b/plugins/module_utils/service_utils.py index 9e65bff3..53e405d7 100644 --- a/plugins/module_utils/service_utils.py +++ b/plugins/module_utils/service_utils.py @@ -20,11 +20,24 @@ normalize_output, resolve_parameter_updates, ) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + parse_role_config_group_result, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + parse_role_result, +) from cm_client import ( + ApiClient, ApiConfig, ApiService, ApiServiceConfig, + MgmtServiceResourceApi, + MgmtRoleConfigGroupsResourceApi, + MgmtRolesResourceApi, + RoleConfigGroupsResourceApi, + RolesResourceApi, + ServicesResourceApi, ) SERVICE_OUTPUT = [ @@ -45,15 +58,111 @@ def parse_service_result(service: ApiService) -> dict: - # Retrieve only the cluster_name - output = dict(cluster_name=service.cluster_ref.cluster_name) + # Retrieve only the cluster_name if it exists + if service.cluster_ref is not None: + output = dict(cluster_name=service.cluster_ref.cluster_name) + else: + output = dict(cluster_name=None) + + # Parse the service itself output.update(normalize_output(service.to_dict(), SERVICE_OUTPUT)) + + # Parse the service-wide configurations + if service.config is not None: + output.update(config={c.name: c.value for c in service.config.items}) + + # Parse the role config groups via util function + if service.role_config_groups is not None: + output.update( + role_config_groups=[ + parse_role_config_group_result(rcg) + for rcg in service.role_config_groups + ] + ) + + # Parse the roles via util function + if service.roles is not None: + output.update(roles=[parse_role_result(r) for r in service.roles]) + return output -def parse_cm_service_result(service: ApiService) -> dict: - # Ignore cluster_name - return normalize_output(service.to_dict(), SERVICE_OUTPUT) +def read_service( + api_client: ApiClient, cluster_name: str, service_name: str +) -> ApiService: + """Read a cluster service and its role config group and role dependents. + + Args: + api_client (ApiClient): _description_ + cluster_name (str): _description_ + service_name (str): _description_ + + Returns: + ApiService: _description_ + """ + service_api = ServicesResourceApi(api_client) + rcg_api = RoleConfigGroupsResourceApi(api_client) + role_api = RolesResourceApi(api_client) + + service = service_api.read_service( + cluster_name=cluster_name, service_name=service_name + ) + + if service is not None: + # Gather the service-wide configuration + service.config = service_api.read_service_config( + cluster_name=cluster_name, service_name=service_name + ) + + # Gather each role config group configuration + for rcg in service.role_config_groups: + rcg.config = rcg_api.read_config( + cluster_name=cluster_name, + service_name=service_name, + role_config_group_name=rcg.name, + ) + + # Gather each role configuration + for role in service.roles: + role.config = role_api.read_role_config( + cluster_name=cluster_name, + service_name=service_name, + role_name=role.name, + ) + + return service + + +def read_cm_service(api_client: ApiClient) -> ApiService: + """Read the Cloudera Manager service and its role config group and role dependents. + + Args: + api_client (ApiClient): _description_ + + Returns: + ApiService: _description_ + """ + service_api = MgmtServiceResourceApi(api_client) + rcg_api = MgmtRoleConfigGroupsResourceApi(api_client) + role_api = MgmtRolesResourceApi(api_client) + + service = service_api.read_service() + + if service is not None: + # Gather the service-wide configuration + service.config = service_api.read_service_config() + + # Gather each role config group configuration + service.role_config_groups = [ + rcg for rcg in rcg_api.read_role_config_groups().items if rcg.config.items + ] + + # Gather each role configuration + service.roles = role_api.read_roles().items + for role in service.roles: + role.config = role_api.read_role_config(role_name=role.name) + + return service class ServiceConfigUpdates(object): @@ -61,9 +170,14 @@ def __init__(self, existing: ApiServiceConfig, updates: dict, purge: bool) -> No current = {r.name: r.value for r in existing.items} changeset = resolve_parameter_updates(current, updates, purge) + self.before = { + k: current[k] if k in current else None for k in changeset.keys() + } + self.after = changeset + self.diff = dict( - before={k: current[k] if k in current else None for k in changeset.keys()}, - after=changeset, + before=self.before, + after=self.after, ) self.config = ApiServiceConfig( From 1585a8dc5c6363b2bf8f7d69fa285d3cadbdcd1f Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 10 Jan 2025 10:16:07 -0500 Subject: [PATCH 37/58] Update to use parse_role_result utility Signed-off-by: Webster Mudge --- plugins/modules/cm_service_role.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/modules/cm_service_role.py b/plugins/modules/cm_service_role.py index fe8688fe..c1de0c41 100644 --- a/plugins/modules/cm_service_role.py +++ b/plugins/modules/cm_service_role.py @@ -415,6 +415,7 @@ def process(self): ) # Handle maintenance mode + # TODO Move first if ( self.maintenance is not None and self.maintenance != current.maintenance_mode @@ -490,7 +491,7 @@ def process(self): ) ) - # If there are changes, get a refresh read + # If there are changes, get a fresh read if self.changed: refresh = self.role_api.read_role(current.name) refresh.config = self.role_api.read_role_config(current.name) @@ -562,7 +563,7 @@ def deprovision_role(self, role: ApiRole) -> None: self.changed = True if self.module._diff: - self.diff = dict(before=role.to_dict(), after=dict()) + self.diff = dict(before=parse_role_result(role), after=dict()) if not self.module.check_mode: self.role_api.delete_role(role.name) From cdb53f237cca7dce3283580a087ff1df71b876cf Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 10 Jan 2025 10:17:15 -0500 Subject: [PATCH 38/58] Update cm_service for full lifecycle management of the service and its subelements (service-wide config, role config groups, and roles) Signed-off-by: Webster Mudge --- plugins/modules/cm_service.py | 673 +++++++++++------- .../modules/cm_service/test_cm_service.py | 295 +++++++- .../test_cm_service_role_config_groups.py | 262 +++++++ .../cm_service/test_cm_service_roles.py | 454 ++++++++++++ 4 files changed, 1400 insertions(+), 284 deletions(-) create mode 100644 tests/unit/plugins/modules/cm_service/test_cm_service_role_config_groups.py create mode 100644 tests/unit/plugins/modules/cm_service/test_cm_service_roles.py diff --git a/plugins/modules/cm_service.py b/plugins/modules/cm_service.py index 6528169d..41452821 100644 --- a/plugins/modules/cm_service.py +++ b/plugins/modules/cm_service.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -169,29 +169,38 @@ returned: optional """ -import json +from collections.abc import Callable from cm_client import ( - HostsResourceApi, + ApiBulkCommandList, + ApiCommand, + ApiConfigList, + ApiRoleList, + ApiRoleConfigGroup, + ApiService, + ApiServiceState, MgmtRolesResourceApi, MgmtRoleConfigGroupsResourceApi, - MgmtRoleCommandsResourceApi, MgmtServiceResourceApi, ) from cm_client.rest import ApiException +from ansible.module_utils.common.text.converters import to_native + from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerMutableModule, + ConfigListUpdates, ) from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( ServiceConfigUpdates, - parse_cm_service_result, -) -from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( - parse_role_result, + parse_service_result, + read_cm_service, ) from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( - parse_role_config_group_result, + get_mgmt_base_role_config_group, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, ) @@ -200,20 +209,24 @@ def __init__(self, module): super(ClouderaManagerService, self).__init__(module) # Set the parameters - self.params = self.get_param("parameters") + self.maintenance = self.get_param("maintenance") + self.config = self.get_param("config") + self.role_config_groups = self.get_param("role_config_groups") self.roles = self.get_param("roles") self.state = self.get_param("state") self.purge = self.get_param("purge") - self.view = self.get_param("view") + # self.view = self.get_param("view") # Initialize the return value self.changed = False - self.cm_service = {} + self.output = dict() if self.module._diff: self.diff = dict(before=dict(), after=dict()) + self.before = dict() + self.after = dict() else: - self.diff = {} + self.diff = dict() # Execute the logic self.process() @@ -223,287 +236,451 @@ def process(self): service_api = MgmtServiceResourceApi(self.api_client) role_api = MgmtRolesResourceApi(self.api_client) - role_cmd_api = MgmtRoleCommandsResourceApi(self.api_client) rcg_api = MgmtRoleConfigGroupsResourceApi(self.api_client) - host_api = HostsResourceApi(self.api_client) - - # Manage service-wide configurations - if self.params or self.purge: - try: - existing_params = service_api.read_service_config() - except ApiException as ex: - if ex.status == 404: - self.module.fail_json(msg=json.loads(ex.body)["message"]) - else: - raise ex - service_wide = ServiceConfigUpdates( - existing_params, self.params, self.purge - ) + current = None + + # Discover the CM service and retrieve its configured dependents + try: + # TODO This is only used once... so revert + current = read_cm_service(self.api_client) + # current = service_api.read_service() + # if current is not None: + # # Gather the service-wide configuration + # current.config = service_api.read_service_config() + + # # Gather each role config group configuration + # for rcg in current.role_config_groups: + # rcg.config = rcg_api.read_config(role_config_group_name=rcg.name) + + # # Gather each role configuration + # for role in current.roles: + # role.config = role_api.read_role_config(role_name=role.name) + + except ApiException as ex: + if ex.status != 404: + raise ex + + # If deleting, do so and exit + if self.state == "absent": + if current: + self.changed = True + + if self.module._diff: + self.before = parse_service_result(current) + + if not self.module.check_mode: + service_api.delete_cms() - if service_wide.changed: + # Otherwise, manage the configurations of the service, its role config + # groups, its roles, and its state + elif self.state in ["present", "restarted", "started", "stopped"]: + # If it is a new service, create the initial service + if not current: + self.changed = True + new_service = ApiService(type="MGMT") + current = service_api.setup_cms(body=new_service) + current.config = service_api.read_service_config() + current.role_config_groups = [] + current.roles = [] + + # Handle maintenance mode + if ( + self.maintenance is not None + and self.maintenance != current.maintenance_mode + ): self.changed = True if self.module._diff: - self.diff["before"].update(params=service_wide.diff["before"]) - self.diff["after"].update(params=service_wide.diff["after"]) + self.before.update(maintenance_mode=current.maintenance_mode) + self.after.update(maintenance_mode=self.maintenance_mode) if not self.module.check_mode: - service_api.update_service_config( - message=self.message, body=service_wide.config + if self.maintenance: + maintenance_cmd = service_api.enter_maintenance_mode() + else: + maintenance_cmd = service_api.exit_maintenance_mode() + + if maintenance_cmd.success is False: + self.module.fail_json( + msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}" ) - # Manage roles - if self.roles: - try: - # Get a list of all host and find itself - # This is hardcoded, so needs to be broken into host - # assignment per-role - hosts = host_api.read_hosts() - for h in hosts.items(): - if self.host == h.hostname: - host_id = h.host_id - - # CHECK MODE - if not self.purge: - available_roles_info = role_api.read_roles().to_dict() - existing_roles = [] - for item in available_roles_info["items"]: - existing_roles.append(item["type"]) - - if self.state in ["present"]: - not_existing_roles = [] - for role in self.roles: - if role not in existing_roles: - not_existing_roles.append(role) - if not_existing_roles: - body = { - "items": [ - {"type": role, "hostRef": {"hostId": host_id}} - for role in not_existing_roles - ] - } - role_api.create_roles(body=body) - self.cm_service = parse_cm_service_result( - service_api.read_service() + # Handle service-wide changes + if self.config or self.purge: + if self.config is None: + self.config = dict() + + updates = ServiceConfigUpdates(current.config, self.config, self.purge) + + if updates.changed: + self.changed = True + + if self.module._diff: + self.before.update(config=updates.before) + self.after.update(config=updates.after) + + if not self.module.check_mode: + service_api.update_service_config( + message=self.message, body=updates.config ) + + # Manage role config groups (base only) + if self.role_config_groups or self.purge: + # Get existing role config groups (ApiRoleConfigGroup) + current_rcgs_map = { + rcg.role_type: rcg for rcg in current.role_config_groups + } + + # Get the incoming role config groups (dict) + if self.role_config_groups is None: + incoming_rcgs_map = dict() + else: + incoming_rcgs_map = { + rcg["type"]: rcg for rcg in self.role_config_groups + } + + # Create sets of each role config group by type + current_set = set(current_rcgs_map.keys()) + incoming_set = set(incoming_rcgs_map.keys()) + + # Update any existing role config groups + for rcg_type in current_set & incoming_set: + existing_rcg = current_rcgs_map[rcg_type] + incoming_rcg = incoming_rcgs_map[rcg_type] + + if incoming_rcg["config"] is None: + incoming_rcg["config"] = dict() + + # TODO Consolidate into util function; see cm_service_role_config_group:279-302 + payload = ApiRoleConfigGroup() + + # Update display name + incoming_display_name = incoming_rcg.get("display_name") + if ( + incoming_display_name is not None + and incoming_display_name != existing_rcg.display_name + ): self.changed = True + payload.display_name = incoming_display_name + + # Reconcile configurations + if existing_rcg.config or self.purge: + updates = ConfigListUpdates( + existing_rcg.config, incoming_rcg["config"], self.purge + ) - elif self.state in ["absent"]: - roles_to_remove = [ - role for role in self.roles if role in existing_roles - ] - roles_to_remove_extended_info = [] - for role in roles_to_remove: - for item in available_roles_info["items"]: - if role == item["type"]: - roles_to_remove_extended_info.append(item["name"]) - if not roles_to_remove_extended_info: - self.cm_service = role_api.read_roles().to_dict() - self.changed = False - else: - for role in roles_to_remove_extended_info: - role_api.delete_role(role_name=role) - self.cm_service = role_api.read_roles().to_dict() + if updates.changed: self.changed = True - elif self.state in ["started"]: - - matching_roles = [] - new_roles = [] - for role in self.roles: - if role in existing_roles: - matching_roles.append(role) - else: - new_roles.append(role) - - new_roles_to_start = [] - if new_roles: - body = { - "items": [ - {"type": role, "hostRef": {"hostId": host_id}} - for role in new_roles - ] - } - newly_added_roles = role_api.create_roles( - body=body - ).to_dict() - - for role in newly_added_roles["items"]: - new_roles_to_start.append(role["name"]) - body = {"items": new_roles_to_start} - - existing_roles_state = [] - for role in matching_roles: - for item in available_roles_info["items"]: - if role == item["type"]: - existing_roles_state.append( - { - "type": item["type"], - "role_state": item["role_state"].lower(), - "name": item["name"], - } - ) + if self.module._diff: + rcg_diff["before"].update(config=updates.diff["before"]) + rcg_diff["after"].update(config=updates.diff["after"]) - existing_roles_to_start = [] - for role in existing_roles_state: - if role["role_state"] == "stopped": - existing_roles_to_start.append(role["name"]) + payload.config = updates.config - all_roles_to_start = ( - new_roles_to_start + existing_roles_to_start + # Execute changes if needed + if ( + payload.display_name is not None or payload.config is not None + ) and not self.module.check_mode: + rcg_api.update_role_config_group( + existing_rcg.name, message=self.message, body=payload ) - body = {"items": all_roles_to_start} - - if all_roles_to_start: - start_roles_request = role_cmd_api.start_command( - body=body - ).to_dict() - command_id = start_roles_request["items"][0]["id"] - self.wait_for_command_state( - command_id=command_id, polling_interval=5 + + # Add any new role config groups + for rcg_type in incoming_set - current_set: + self.changed = True + + if self.module._diff: + rcg_diff = dict(before=dict(), after=dict()) + + existing_rcg = get_mgmt_base_role_config_group( + self.api_client, rcg_type + ) + incoming_rcg = incoming_rcgs_map[rcg_type] + + payload = ApiRoleConfigGroup() + + incoming_display_name = incoming_rcg.get("display_name") + if incoming_display_name is not None: + if self.module._diff: + rcg_diff["before"].update( + display_name=existing_rcg.display_name ) - self.cm_service = role_api.read_roles().to_dict() - self.changed = True - else: - self.cm_service = role_api.read_roles().to_dict() - self.changed = False - - elif self.state in ["stopped"]: - matching_roles = [] - for role in self.roles: - if role in existing_roles: - matching_roles.append(role) - - matching_roles_state = [] - for role in matching_roles: - for item in available_roles_info["items"]: - if role == item["type"]: - matching_roles_state.append( - { - "type": item["type"], - "role_state": item["role_state"].lower(), - "name": item["name"], - } - ) + rcg_diff["after"].update(display_name=incoming_display_name) + payload.display_name = incoming_display_name - roles_to_stop = [] - for role in matching_roles_state: - if role["role_state"] == "started": - roles_to_stop.append(role["name"]) - body = {"items": roles_to_stop} + incoming_rcg_config = incoming_rcg.get("config") + if incoming_rcg_config: + updates = ConfigListUpdates( + existing_rcg.config, incoming_rcg_config, self.purge + ) - if roles_to_stop: - role_cmd_api.stop_command(body=body) - self.cm_service = role_api.read_roles().to_dict() - self.changed = True - else: - self.cm_service = role_api.read_roles().to_dict() - self.changed = False - - elif self.state in ["restarted"]: - matching_roles = [] - for role in self.roles: - if role in existing_roles: - matching_roles.append(role) - - matching_roles_state = [] - for role in matching_roles: - for item in available_roles_info["items"]: - if role == item["type"]: - matching_roles_state.append( - { - "type": item["type"], - "role_state": item["role_state"].lower(), - "name": item["name"], - } - ) + if self.module._diff: + rcg_diff["before"].update(config=updates.diff["before"]) + rcg_diff["after"].update(config=updates.diff["after"]) - roles_to_restart = [] - for role in matching_roles_state: - roles_to_restart.append(role["name"]) - body = {"items": roles_to_restart} + payload.config = updates.config + else: + payload.config = ApiConfigList() - if roles_to_restart: - role_cmd_api.restart_command(body=body) - self.cm_service = role_api.read_roles().to_dict() - self.changed = True + if not self.module.check_mode: + rcg_api.update_role_config_group( + existing_rcg.name, message=self.message, body=payload + ) + # Remove any undeclared role config groups if self.purge: - service_api.delete_cms() - body = {"roles": [{"type": role} for role in self.roles]} - service_api.setup_cms(body=body) - self.cm_service = role_api.read_roles().to_dict() - - if self.state in ["started"]: - start_roles_request = service_api.start_command().to_dict() - command_id = start_roles_request["id"] - self.wait_for_command_state( - command_id=command_id, polling_interval=5 + for rcg_type in current_set - incoming_set: + self.changed = True + + if self.module._diff: + rcg_diff = dict(before=dict(), after=dict()) + + existing_rcg = get_mgmt_base_role_config_group( + self.api_client, rcg_type ) - self.cm_service = role_api.read_roles().to_dict() - self.changed = True - except ApiException as e: - if e.status == 404 or 400: - roles_dict = {"roles": [{"type": role} for role in self.roles]} - service_api.setup_cms(body=roles_dict) - - if self.state in ["started"]: - start_roles_request = service_api.start_command().to_dict() - command_id = start_roles_request["id"] - self.wait_for_command_state( - command_id=command_id, polling_interval=5 + + payload = ApiRoleConfigGroup( + display_name=f"mgmt-{rcg_type}-BASE" + ) + + updates = ConfigListUpdates( + existing_rcg.config, dict(), self.purge + ) + + if self.module._diff: + rcg_diff["before"].update(config=updates.diff["before"]) + rcg_diff["after"].update(config=updates.diff["after"]) + + payload.config = updates.config + + if not self.module.check_mode: + rcg_api.update_role_config_group( + existing_rcg.name, message=self.message, body=payload + ) + + # Manage roles + if self.roles or self.purge: + # Get existing roles (ApiRole) + current_roles_map = {r.type: r for r in current.roles} + + # Get incoming roles (dict) + if self.roles is None: + incoming_roles_map = dict() + else: + incoming_roles_map = {r["type"]: r for r in self.roles} + + # Create sets of the roles + current_set = set(current_roles_map.keys()) + incoming_set = set(incoming_roles_map.keys()) + + # Update any existing roles + for role_type in current_set & incoming_set: + existing_role = current_roles_map[role_type] + incoming_role = incoming_roles_map[role_type] + + if incoming_role["config"] is None: + incoming_role["config"] = dict() + + # If the host has changed, destroy and rebuild completely + incoming_hostname = incoming_role.get("cluster_hostname") + incoming_host_id = incoming_role.get("cluster_host_id") + if ( + incoming_hostname is not None + and incoming_hostname != existing_role.host_ref.hostname + ) or ( + incoming_host_id is not None + and incoming_host_id != existing_role.host_ref.host_id + ): + self.changed = True + + # Use the new configuration or copy from the existing + new_config = ( + incoming_role["config"] + if incoming_role["config"] + else {c.name: c.value for c in existing_role.config.items} + ) + + new_role = create_role( + api_client=self.api_client, + role_type=existing_role.type, + hostname=incoming_hostname, + host_id=incoming_host_id, + config=new_config, ) - self.cm_service = role_api.read_roles().to_dict() + + if not self.module.check_mode: + role_api.delete_role(existing_role.name) + + rebuilt_role = next( + ( + iter( + role_api.create_roles( + body=ApiRoleList(items=[new_role]) + ).items + ) + ), + {}, + ) + if not rebuilt_role: + self.module.fail_json( + msg="Unable to recreate role, " + + existing_role.name, + role=to_native(rebuilt_role.to_dict()), + ) + + # Else address any updates else: - self.cm_service = role_api.read_roles().to_dict() + updates = ConfigListUpdates( + existing_role.config, + incoming_role["config"], + self.purge, + ) + + if updates.changed: + self.changed = True + + if not self.module.check_mode: + role_api.update_role_config( + role_name=existing_role.name, + message=self.message, + body=updates.config, + ) + + # Add any new roles + for role_type in incoming_set - current_set: self.changed = True - # Read and generate payload for Cloudera Manager Service - self.cm_service = parse_cm_service_result(service_api.read_service()) - self.cm_service.update( - config=[ - c.to_dict() - for c in service_api.read_service_config(view=self.view).items - ] - ) - self.cm_service.update( - roles=[parse_role_result(r) for r in role_api.read_roles().items] - ) - self.cm_service.update( - role_config_groups=[ - parse_role_config_group_result(rcg) - for rcg in rcg_api.read_role_config_groups().items - ] - ) + incoming_role = incoming_roles_map[role_type] + + new_role = create_role( + api_client=self.api_client, + role_type=incoming_role.get("type"), + hostname=incoming_role.get("cluster_hostname"), + host_id=incoming_role.get("cluster_host_id"), + config=incoming_role.get("config"), + ) + + if not self.module.check_mode: + created_role = next( + ( + iter( + role_api.create_roles( + body=ApiRoleList(items=[new_role]) + ).items + ) + ), + {}, + ) + if not created_role: + self.module.fail_json( + msg="Unable to create new role", + role=to_native(new_role.to_dict()), + ) + + # Remove any undeclared roles if directed + if self.purge: + for role_type in current_set - incoming_set: + self.changed = True + + existing_role = current_roles_map[role_type] + + if not self.module.check_mode: + role_api.delete_role(existing_role.name) + + # Handle various states + if self.state == "started" and current.service_state not in [ + ApiServiceState.STARTED + ]: + self.exec_service_command( + current, ApiServiceState.STARTED, service_api.start_command + ) + elif self.state == "stopped" and current.service_state not in [ + ApiServiceState.STOPPED, + ApiServiceState.NA, + ]: + self.exec_service_command( + current, ApiServiceState.STOPPED, service_api.stop_command + ) + elif self.state == "restarted": + self.exec_service_command( + current, ApiServiceState.STARTED, service_api.restart_command + ) + + # If there are changes, get a fresh read + if self.changed: + refresh = read_cm_service(self.api_client) + self.output = parse_service_result(refresh) + # Otherwise, return the existing + else: + self.output = parse_service_result(current) + else: + self.module.fail_json(msg=f"Invalid state: {self.state}") + + def exec_service_command( + self, service: ApiService, value: str, cmd: Callable[[None], ApiCommand] + ): + self.changed = True + if self.module._diff: + self.diff["before"].update(service_state=service.service_state) + self.diff["after"].update(service_state=value) + + if not self.module.check_mode: + self.wait_command(cmd()) + + def handle_commands(self, commands: ApiBulkCommandList): + if commands.errors: + error_msg = "\n".join(commands.errors) + self.module.fail_json(msg=error_msg) + + for c in commands.items: + # Not in parallel, but should only be a single command + self.wait_command(c) def main(): module = ClouderaManagerMutableModule.ansible_module( argument_spec=dict( - parameters=dict(type="dict", aliases=["params"]), - roles=dict(type="list"), - purge=dict(type="bool", default=False), - view=dict( - default="summary", - choices=["summary", "full"], + config=dict(type="dict", aliases=["params", "parameters"]), + role_config_groups=dict( + type="list", + elements="dict", + options=dict( + display_name=dict(), # TODO Remove display_name as an option + type=dict(required=True, aliases=["role_type"]), + config=dict( + required=True, type="dict", aliases=["params", "parameters"] + ), + ), + ), + roles=dict( + type="list", + elements="dict", + options=dict( + cluster_hostname=dict(aliases=["cluster_host"]), + cluster_host_id=dict(), + config=dict(type="dict", aliases=["params", "parameters"]), + type=dict(required=True, aliases=["role_type"]), + ), + mutually_exclusive=[["cluster_hostname", "cluster_host_id"]], ), + maintenance=dict(type="bool", aliases=["maintenance_mode"]), + purge=dict(type="bool", default=False), state=dict( type="str", - default="started", + default="present", choices=["started", "stopped", "absent", "present", "restarted"], ), ), - supports_check_mode=False, + supports_check_mode=True, ) result = ClouderaManagerService(module) - changed = result.changed - output = dict( - changed=changed, - service=result.cm_service, + changed=result.changed, + service=result.output, ) if result.debug: diff --git a/tests/unit/plugins/modules/cm_service/test_cm_service.py b/tests/unit/plugins/modules/cm_service/test_cm_service.py index 21004145..5abace09 100644 --- a/tests/unit/plugins/modules/cm_service/test_cm_service.py +++ b/tests/unit/plugins/modules/cm_service/test_cm_service.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,6 +21,14 @@ import logging import pytest +from pathlib import Path + +from cm_client import ( + ApiService, + ApiServiceState, + MgmtServiceResourceApi, +) + from ansible_collections.cloudera.cluster.plugins.modules import cm_service from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, @@ -30,20 +38,253 @@ LOG = logging.getLogger(__name__) -def test_minimal(conn, module_args, cms): - module_args(conn) +def test_state_present(conn, module_args, cms_cleared, request): + module_args( + { + **conn, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + + +def test_state_absent(conn, module_args, cm_api_client, cms_cleared, request): + module_args( + { + **conn, + "state": "absent", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + service_api = MgmtServiceResourceApi(cm_api_client) + service_api.setup_cms(body=ApiService(type="MGMT")) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert not e.value.service + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert not e.value.service + + +def test_state_absent_running_roles(conn, module_args, cms_auto, request): + module_args( + { + **conn, + "state": "absent", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert not e.value.service + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert not e.value.service + + +def test_state_started(conn, module_args, cm_api_client, cms_auto_no_start, request): + module_args( + { + **conn, + "state": "started", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert e.value.service["service_state"] == ApiServiceState.STARTED + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert e.value.service["service_state"] == ApiServiceState.STARTED + + +def test_state_stopped(conn, module_args, cm_api_client, cms_auto, request): + module_args( + { + **conn, + "state": "stopped", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert e.value.service["service_state"] == ApiServiceState.STOPPED + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert e.value.service["service_state"] == ApiServiceState.STOPPED + + +def test_state_restarted(conn, module_args, cm_api_client, cms_auto, request): + module_args( + { + **conn, + "state": "restarted", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert e.value.service["service_state"] == ApiServiceState.STARTED + + # Idempotency (rather, demonstrate that restart always invokes a changed state) + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert e.value.service["service_state"] == ApiServiceState.STARTED + + +def test_new_maintenance_enabled(conn, module_args, cms_cleared, request): + module_args( + { + **conn, + "maintenance": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert e.value.service["maintenance_mode"] == True + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert e.value.service["maintenance_mode"] == True + + +def test_new_config(conn, module_args, cms_cleared, request): + module_args( + { + **conn, + "parameters": dict(mgmt_emit_sensitive_data_in_stderr=True), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_emit_sensitive_data_in_stderr="True") + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert expected.items() <= e.value.service["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert expected.items() <= e.value.service["config"].items() + + +def test_existing_maintenance_enabled(conn, module_args, cm_api_client, cms, request): + module_args( + { + **conn, + "maintenance": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + service_api = MgmtServiceResourceApi(cm_api_client) + service_api.exit_maintenance_mode() + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert e.value.service["maintenance_mode"] == True + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert e.value.service["maintenance_mode"] == True + + +def test_existing_maintenance_disabled(conn, module_args, cm_api_client, cms, request): + module_args( + { + **conn, + "maintenance": False, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + service_api = MgmtServiceResourceApi(cm_api_client) + service_api.enter_maintenance_mode() + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() - with pytest.raises(AnsibleExitJson): + assert e.value.changed == True + assert e.value.service["maintenance_mode"] == False + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: cm_service.main() + assert e.value.changed == False + assert e.value.service["maintenance_mode"] == False + @pytest.mark.service_config(dict(log_event_retry_frequency=10)) -def test_set_parameters(conn, module_args, cms_config): +def test_existing_set_parameters(conn, module_args, cms_config, request): module_args( { **conn, "parameters": dict(mgmt_emit_sensitive_data_in_stderr=True), - "message": "test_cm_service::test_set_parameters", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, } @@ -57,31 +298,25 @@ def test_set_parameters(conn, module_args, cms_config): cm_service.main() assert e.value.changed == True - assert ( - expected.items() - <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() - ) + assert expected.items() <= e.value.service["config"].items() # Idempotency with pytest.raises(AnsibleExitJson) as e: cm_service.main() assert e.value.changed == False - assert ( - expected.items() - <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() - ) + assert expected.items() <= e.value.service["config"].items() @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) ) -def test_unset_parameters(conn, module_args, cms_config): +def test_existing_unset_parameters(conn, module_args, cms_config, request): module_args( { **conn, "parameters": dict(mgmt_emit_sensitive_data_in_stderr=None), - "message": "test_cm_service::test_unset_parameters", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", } ) @@ -91,32 +326,26 @@ def test_unset_parameters(conn, module_args, cms_config): cm_service.main() assert e.value.changed == True - assert ( - expected.items() - <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() - ) + assert expected.items() <= e.value.service["config"].items() # Idempotency with pytest.raises(AnsibleExitJson) as e: cm_service.main() assert e.value.changed == False - assert ( - expected.items() - <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() - ) + assert expected.items() <= e.value.service["config"].items() @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) ) -def test_set_parameters_with_purge(conn, module_args, cms_config): +def test_existing_set_parameters_with_purge(conn, module_args, cms_config, request): module_args( { **conn, "parameters": dict(mgmt_emit_sensitive_data_in_stderr=True), "purge": True, - "message": "test_cm_service::test_set_parameters_with_purge", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, } @@ -128,32 +357,26 @@ def test_set_parameters_with_purge(conn, module_args, cms_config): cm_service.main() assert e.value.changed == True - assert ( - expected.items() - <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() - ) + assert expected.items() <= e.value.service["config"].items() # Idempotency with pytest.raises(AnsibleExitJson) as e: cm_service.main() assert e.value.changed == False - assert ( - expected.items() - <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() - ) + assert expected.items() <= e.value.service["config"].items() @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) ) -def test_purge_all_parameters(conn, module_args, cms_config): +def test_existing_purge_all_parameters(conn, module_args, cms_config, request): module_args( { **conn, "parameters": dict(), "purge": True, - "message": "test_cm_service::test_purge_all_parameters", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, } diff --git a/tests/unit/plugins/modules/cm_service/test_cm_service_role_config_groups.py b/tests/unit/plugins/modules/cm_service/test_cm_service_role_config_groups.py new file mode 100644 index 00000000..8a62895c --- /dev/null +++ b/tests/unit/plugins/modules/cm_service/test_cm_service_role_config_groups.py @@ -0,0 +1,262 @@ +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from pathlib import Path + +from cm_client import ( + ApiConfig, + ApiConfigList, + ApiRoleConfigGroup, +) + +from ansible_collections.cloudera.cluster.plugins.modules import cm_service +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, +) + +LOG = logging.getLogger(__name__) + + +def test_new_role_config_group(conn, module_args, cms_cleared, request): + expected = dict(alert_mailserver_username="FooBar") + + module_args( + { + **conn, + "role_config_groups": [ + { + "type": "ALERTPUBLISHER", + "config": expected, + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["role_config_groups"]) == 1 + assert ( + expected.items() <= e.value.service["role_config_groups"][0]["config"].items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["role_config_groups"]) == 1 + assert ( + expected.items() <= e.value.service["role_config_groups"][0]["config"].items() + ) + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + role_type="HOSTMONITOR", + config=ApiConfigList( + items=[ + ApiConfig(name="mgmt_num_descriptor_fetch_tries", value=16), + ApiConfig(name="process_start_secs", value=36), + ] + ), + ) +) +def test_existing_role_config_group_set( + conn, module_args, host_monitor_role_group_config, request +): + expected = dict(mgmt_num_descriptor_fetch_tries="16", process_start_secs="96") + + module_args( + { + **conn, + "role_config_groups": [ + { + "type": "HOSTMONITOR", + "config": dict(process_start_secs="96"), + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["role_config_groups"]) == 1 + assert ( + expected.items() <= e.value.service["role_config_groups"][0]["config"].items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["role_config_groups"]) == 1 + assert ( + expected.items() <= e.value.service["role_config_groups"][0]["config"].items() + ) + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + role_type="HOSTMONITOR", + config=ApiConfigList( + items=[ + ApiConfig(name="mgmt_num_descriptor_fetch_tries", value=17), + ApiConfig(name="process_start_secs", value=37), + ] + ), + ) +) +def test_existing_role_config_group_unset( + conn, module_args, host_monitor_role_group_config, request +): + expected = dict( + mgmt_num_descriptor_fetch_tries="17", + ) + + module_args( + { + **conn, + "role_config_groups": [ + { + "type": "HOSTMONITOR", + "config": dict(process_start_secs=None), + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["role_config_groups"]) == 1 + assert ( + expected.items() <= e.value.service["role_config_groups"][0]["config"].items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["role_config_groups"]) == 1 + assert ( + expected.items() <= e.value.service["role_config_groups"][0]["config"].items() + ) + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + role_type="HOSTMONITOR", + config=ApiConfigList( + items=[ + ApiConfig(name="mgmt_num_descriptor_fetch_tries", value=18), + ApiConfig(name="process_start_secs", value=38), + ] + ), + ) +) +def test_existing_role_config_group_purge( + conn, module_args, host_monitor_role_group_config, request +): + expected = dict( + mgmt_num_descriptor_fetch_tries="28", + ) + + module_args( + { + **conn, + "role_config_groups": [ + { + "type": "HOSTMONITOR", + "config": dict(mgmt_num_descriptor_fetch_tries=28), + } + ], + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["role_config_groups"]) == 1 + assert ( + expected.items() <= e.value.service["role_config_groups"][0]["config"].items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["role_config_groups"]) == 1 + assert ( + expected.items() <= e.value.service["role_config_groups"][0]["config"].items() + ) + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + role_type="HOSTMONITOR", + config=ApiConfigList( + items=[ + ApiConfig(name="mgmt_num_descriptor_fetch_tries", value=18), + ApiConfig(name="process_start_secs", value=38), + ] + ), + ) +) +def test_existing_role_config_group_purge_all( + conn, module_args, host_monitor_role_group_config, request +): + module_args( + { + **conn, + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["role_config_groups"]) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["role_config_groups"]) == 0 diff --git a/tests/unit/plugins/modules/cm_service/test_cm_service_roles.py b/tests/unit/plugins/modules/cm_service/test_cm_service_roles.py new file mode 100644 index 00000000..2de5ee43 --- /dev/null +++ b/tests/unit/plugins/modules/cm_service/test_cm_service_roles.py @@ -0,0 +1,454 @@ +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from pathlib import Path + +from cm_client import ( + HostsResourceApi, +) + +from ansible_collections.cloudera.cluster.plugins.modules import cm_service +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, +) + +LOG = logging.getLogger(__name__) + + +def test_new_role(conn, module_args, cm_api_client, cms_cleared, request): + host_api = HostsResourceApi(cm_api_client) + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service role") + + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + "cluster_host_id": host.host_id, + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 + assert not e.value.service["roles"][0]["config"] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 + assert not e.value.service["roles"][0]["config"] + + +def test_new_role_config(conn, module_args, cm_api_client, cms_cleared, request): + host_api = HostsResourceApi(cm_api_client) + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service role") + + expected = dict(mgmt_num_descriptor_fetch_tries="15") + + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + "cluster_host_id": host.host_id, + "config": expected, + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + +def test_existing_role_new(conn, module_args, cm_api_client, cms, request): + host_api = HostsResourceApi(cm_api_client) + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service role") + + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + "cluster_host_id": host.host_id, + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 + assert not e.value.service["roles"][0]["config"] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 + assert not e.value.service["roles"][0]["config"] + + +def test_existing_role_new_config_set(conn, module_args, cm_api_client, cms, request): + host_api = HostsResourceApi(cm_api_client) + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service role") + + expected = dict(mgmt_num_descriptor_fetch_tries="15") + + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + "cluster_host_id": host.host_id, + "config": expected, + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=15, process_start_secs=35) +) +def test_existing_role_existing_config_set( + conn, module_args, cm_api_client, host_monitor_config, request +): + expected = dict(process_start_secs="35") + + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + # "cluster_host_id": host.host_id, + "config": { + "mgmt_num_descriptor_fetch_tries": None, + }, + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=15, process_start_secs=35) +) +def test_existing_role_existing_config_unset( + conn, module_args, cm_api_client, host_monitor_config, request +): + expected = dict(process_start_secs="35") + + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + # "cluster_host_id": host.host_id, + "config": { + "mgmt_num_descriptor_fetch_tries": None, + }, + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=16, process_start_secs=36) +) +def test_existing_role_existing_config_purge( + conn, module_args, cm_api_client, host_monitor_config, request +): + expected = dict(process_start_secs="36") + + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + # "cluster_host_id": host.host_id, + "config": { + "process_start_secs": 36, + }, + } + ], + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=17, process_start_secs=37) +) +def test_existing_role_existing_config_purge_all( + conn, module_args, cm_api_client, host_monitor_config, request +): + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + # "cluster_host_id": host.host_id, + } + ], + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 + assert not e.value.service["roles"][0]["config"] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 + assert not e.value.service["roles"][0]["config"] + + +def test_existing_role_config_invalid(conn, module_args, cm_api_client, cms, request): + host_api = HostsResourceApi(cm_api_client) + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service role") + + expected = dict(mgmt_emit_sensitive_data_in_stderr=True) + + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + "cluster_host_id": host.host_id, + "config": expected, + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleFailJson, match="Unknown configuration attribute"): + cm_service.main() + + +def test_existing_role_relocate( + conn, module_args, cm_api_client, host_monitor, request +): + host_api = HostsResourceApi(cm_api_client) + host = next( + ( + h + for h in host_api.read_hosts().items + if not h.cluster_ref and h.host_id != host_monitor.host_ref.host_id + ), + None, + ) + if host is None: + raise Exception("No available hosts to relocate Cloudera Manager Service role") + + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + "cluster_host_id": host.host_id, + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 + assert e.value.service["roles"][0]["host_id"] == host.host_id + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 + assert e.value.service["roles"][0]["host_id"] == host.host_id + + +def test_existing_role_purge(conn, module_args, host_monitor, request): + module_args( + { + **conn, + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert not e.value.service["roles"] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert not e.value.service["roles"] From c882974ceb45013c4daec1c15d35a9f79f1758a2 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 10 Jan 2025 14:24:38 -0500 Subject: [PATCH 39/58] Update documentation for cm_service (and add a reference document) Signed-off-by: Webster Mudge --- cm_service_return_values.yml | 303 +++++++++ plugins/modules/cm_service.py | 604 ++++++++++++++---- .../test_cm_service_role_config_groups.py | 1 - 3 files changed, 793 insertions(+), 115 deletions(-) create mode 100644 cm_service_return_values.yml diff --git a/cm_service_return_values.yml b/cm_service_return_values.yml new file mode 100644 index 00000000..3c6469fe --- /dev/null +++ b/cm_service_return_values.yml @@ -0,0 +1,303 @@ +# Copyright 2025 Cloudera, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +service: + description: The Cloudera Manager service. + type: dict + contains: + client_config_staleness_status: + description: Status of client configuration for the Cloudera Manager service. + type: str + returned: optional + cluster_name: + description: The associated cluster name. + type: str + returned: optional + config: + description: Service-wide configuration for the Cloudera Manager service. + type: dict + returned: optional + config_staleness_status: + description: Status of configuration staleness for the Cloudera Manager service. + type: str + returned: optional + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + display_name: + description: Display name of the Cloudera Manager service. + type: str + returned: always + health_checks: + description: Lists all available health checks for the Cloudera Manager service. + type: list + elements: dict + returned: optional + contains: + explanation: + description: A descriptor for the health check. + type: str + returned: optional + name: + description: Unique name fore the health check. + type: str + returned: always + summary: + description: The summary status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + suppressed: + description: + - Whether the health check is suppressed. + - A suppressed health check is not considered when computing the overall health. + type: bool + returned: always + health_summary: + description: Health of the Cloudera Manager service. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + maintenance_mode: + description: Whether maintance mode is enabled for the Cloudera Manager service. + type: bool + returned: always + maintenance_owners: + description: List of objects that trigger the Cloudera Manager service to be in maintenance mode. + type: list + elements: str + returned: optional + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + name: + description: Name (identifier) of the Cloudera Manager service. + type: str + returned: always + role_config_groups: + description: List of role configuration groups for Cloudera Manager service. + type: list + elements: dict + returned: optional + contains: + base: + description: Whether the role config group is a base (default) group. + type: bool + returned: always + config: + description: Configuration for the role config group. + type: dict + returned: optional + display_name: + description: Display name for the role config group. + type: str + returned: always + name: + description: Name (identifier) of the role config group. + type: str + returned: always + role_type: + description: The type of roles in this group. + type: str + returned: always + service_name: + description: Name (identifier) of the associated service of the role config group. + type: str + returned: always + roles: + description: List of role instances for Cloudera Manager service. + type: list + elements: dict + returned: optional + contains: + commission_state: + description: Commission state of the Cloudera Manager service role. + type: str + returned: always + sample: + - COMMISSIONED + - DECOMMISSIONING + - DECOMMISSIONED + - UNKNOWN + - OFFLINING + - OFFLINED + config: + description: Role override configuration for the Cloudera Manager service. + type: dict + returned: optional + config_staleness_status: + description: Status of configuration staleness for the Cloudera Manager service role. + type: str + returned: always + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + ha_status: + description: High-availability status for the Cloudera Manager service. + type: str + returned: optional + sample: + - ACTIVE + - STANDBY + - UNKNOWN + health_checks: + description: List of all available health checks for Cloudera Manager service role. + type: list + elements: dict + returned: optional + contains: + explanation: + description: The explanation of this health check. + type: str + returned: optional + name: + description: Unique name of this health check. + type: str + returned: always + summary: + description: The high-level health status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + suppressed: + description: + - Whether this health check is suppressed. + - A suppressed health check is not considered when computing the role's overall health. + type: bool + returned: optional + health_summary: + description: The high-level health status of the Cloudera Manager service role. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + host_id: + description: The unique ID of the cluster host. + type: str + returned: always + maintenance_mode: + description: Whether the Cloudera Manager service role is in maintenance mode. + type: bool + returned: always + maintenance_owners: + description: List of objects that trigger the Cloudera Manager service role to be in maintenance mode. + type: list + elements: str + returned: optional + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + name: + description: + - The Cloudera Manager service role name. + - Note, this is an auto-generated name and cannot be changed. + type: str + returned: always + role_config_group_name: + description: The name of the Cloudera Manager Service role config group, which uniquely identifies it in a Cloudera Manager installation. + type: str + returned: always + role_state: + description: State of the Cloudera Manager service role. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + service_name: + description: The name of the Cloudera Manager service, which uniquely identifies it in a deployment. + type: str + returned: always + tags: + description: Set of tags for the Cloudera Manager service role. + type: dict + returned: optional + type: + description: The Cloudera Manager service role type. + type: str + returned: always + sample: + - HOSTMONITOR + - ALERTPUBLISHER + - SERVICEMONITOR + - REPORTSMANAGER + - EVENTSERVER + zoo_keeper_server_mode: + description: + - The Zookeeper server mode for this Cloudera Manager service role. + - Note that for non-Zookeeper Server roles, this will be V(null). + type: str + returned: optional + service_state: + description: Run state of the Cloudera Manager service. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + service_version: + description: Version of Cloudera Manager service. + type: str + returned: always + tags: + description: List of tags for Cloudera Manager service. + type: list + returned: optional + type: + description: Type of the Cloudera Manager service, i.e. MGMT. + type: str + returned: always + sample: + - MGMT diff --git a/plugins/modules/cm_service.py b/plugins/modules/cm_service.py index 41452821..e6d9b126 100644 --- a/plugins/modules/cm_service.py +++ b/plugins/modules/cm_service.py @@ -17,156 +17,532 @@ DOCUMENTATION = r""" module: cm_service -short_description: Manage Cloudera Manager service roles +short_description: Manage Cloudera Manager service description: - - Create or remove one or more Cloudera Manager service roles. - - Start, stop or restart one or more Cloudera Manager service roles. + - Manage the Cloudera Manager service (CMS), its role config groups and roles, and its operations. author: - - "Ronald Suplina (@rsuplina)" + - Ronald Suplina (@rsuplina) + - Webster Mudge (@wmudge) options: - role: + config: description: - - A list of one or more service roles to be configured. + - The service-wide configuration to set. + - To unset a parameter, use V(None) as the value. + type: dict + aliases: + - params + - parameters + role_config_groups: + description: + - A list of one or more role config groups to manage. + - Each role config group is the I(base) for the O(type). + type: list + elements: dict + suboptions: + type: + description: + - The role type defining the role config group. + required: yes + aliases: + - role_type + config: + description: + - The configuration for the role config group. + - To unset a configuration, use V(None) as the value. + - This configuration is applied to role instances. + - To override these configuration values, use role overrides. + type: dict + required: yes + aliases: + - params + - parameters + roles: + description: + - A list of one or more role instances to manage. + - Each role instance is the application and configuration of a role type to a host. type: list - elements: str - required: True + elements: dict + suboptions: + cluster_hostname: + description: + - The hostname of an instance for the role. + - If the hostname is different than that of the existing instance for the O(type), the role will be destroyed and rebuilt on the declared host. + - Mutually exclusive with O(cluster_host_id). + type: str + aliases: + - cluster_host + cluster_host_id: + description: + - The host ID of the instance for the role. + - If the host ID is different than that of the existing instance for the O(type), the role will be destroyed and rebuilt on the declared host. + - Mutually exclusive with O(cluster_hostname). + type: str + config: + description: + - The configuration for the role overrides. + - To unset a configuration, use V(None) as the value. + - This configuration is applied to role, overriding any role config group or default values. + type: dict + aliases: + - params + - parameters + type: + description: + - The role type of the role to manage on the instance. + type: str + required: yes + aliases: + - role_type + maintenance: + description: + - Flag for whether the service should be in maintenance mode. + type: bool + aliases: + - maintenance_mode purge: description: - - Delete all current roles and setup only the roles provided + - Flag for whether the declared service-wide configurations, role config groups, and roles should update existing configuration or reset to match the declared state only. + - To clear configurations - service-wide, role config groups, and roles - set O(config={}), i.e. an empty dictionary, or omit entirely, and set O(purge=True). + - To clear role config groups and roles, set O(role_config_groups=[]) or O(roles=[]), i.e. an empty list, or omit entirely, and set O(purge=True). type: bool - required: False - default: False + required: no + default: no state: description: - - The desired state of roles + - The operating state of the service. + - The V(restarted) value will always restart the service and set RV(changed=True). type: str - default: 'started' + default: started choices: - - 'started' - - 'stopped' - - 'absent' - - 'present' - - 'restarted' - required: False - + - started + - stopped + - absent + - present + - restarted + required: no +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint + - cloudera.cluster.message +attributes: + check_mode: + support: full requirements: - - cm_client + - cm-client +seealso: + - module: cloudera.cluster.cm_service_role + - module: cloudera.cluster.cm_service_role_config_group """ EXAMPLES = r""" -- name: Start Cloudera Manager service roles - cloudera.cluster.cm_version: - host: "10.10.10.10" +- name: Define and start the Cloudera Manager service and its roles + cloudera.cluster.cm_service: + host: "cm.example.com" + username: "jane_smith" + password: "S&peR4Ec*re" + state: started + roles: + - type: SERVICEMONITOR + cluster_hostname: "services01.example.com" + - type: HOSTMONITOR + cluster_hostname: "services02.example.com" + - type: EVENTSERVER + cluster_hostname: "services02.example.com" + - type: ALERTPUBLISHER + cluster_hostname: "services01.example.com" + +- name: Set the service-wide configuration for Cloudera Manager service + cloudera.cluster.cm_service: + host: "cm.example.com" + username: "jane_smith" + password: "S&peR4Ec*re" + config: + mgmt_pause_duration_window: 10 + ldap_monitoring_enabled: no + +- name: Unset a service-wide configuration for Cloudera Manager service + cloudera.cluster.cm_service: + host: "cm.example.com" + username: "jane_smith" + password: "S&peR4Ec*re" + config: + ldap_monitoring_enabled: None + +- name: Set the role config group for the Host Monitor role + cloudera.cluster.cm_service: + host: "cm.example.com" username: "jane_smith" password: "S&peR4Ec*re" - port: "7180" - purge: False - state: "started" - role: [ "SERVICEMONITOR" , "HOSTMONITOR", "EVENTSERVER", "ALERTPUBLISHER" ] - register: cm_output - -- name: Purge all roles then create and start new roles - cloudera.cluster.cm_version: - host: "10.10.10.10" + role_config_groups: + - type: HOSTMONITOR + config: + mgmt_num_descriptor_fetch_tries: 25 + process_start_secs: 30 + +- name: Unset a configuration in the role config group for the Host Monitor role + cloudera.cluster.cm_service: + host: "cm.example.com" username: "jane_smith" password: "S&peR4Ec*re" - port: "7180" - purge: True - state: "started" - role: [ "SERVICEMONITOR" , "HOSTMONITOR", "EVENTSERVER", "ALERTPUBLISHER" ] - register: cm_output - -- name: Stop two Cloudera Manager service roles - cloudera.cluster.cm_version: - host: "10.10.10.10" + role_config_groups: + - type: HOSTMONITOR + config: + process_start_secs: None + +- name: Set the role overrides for the Host Monitor role instance + cloudera.cluster.cm_service: + host: "cm.example.com" + username: "jane_smith" + password: "S&peR4Ec*re" + roles: + - type: HOSTMONITOR + cluster_hostname: "services02.example.com" + config: + mgmt_num_descriptor_fetch_tries: 30 + process_start_secs: 45 + +- name: Unset a role override for the Host Monitor role instance + cloudera.cluster.cm_service: + host: "cm.example.com" + username: "jane_smith" + password: "S&peR4Ec*re" + roles: + - type: HOSTMONITOR + cluster_hostname: "services02.example.com" + config: + process_start_secs: None + +- name: Update the service state to only the declared configuration + cloudera.cluster.cm_service + host: "cm.example.com" + username: "jane_smith" + password: "S&peR4Ec*re" + state: started + purge: yes + config: + mgmt_pause_duration_window: 10 + role_config_groups: + - type: HOSTMONITOR + config: + mgmt_num_descriptor_fetch_tries: 25 + process_start_secs: 30 + roles: + - type: SERVICEMONITOR + cluster_hostname: "services01.example.com" + - type: HOSTMONITOR + cluster_hostname: "services02.example.com" + config: + mgmt_num_descriptor_fetch_tries: 30 + - type: EVENTSERVER + cluster_hostname: "services02.example.com" + - type: ALERTPUBLISHER + cluster_hostname: "services01.example.com" + +- name: Stop the Cloudera Manager service + cloudera.cluster.cm_service + host: "cm.example.com" username: "jane_smith" password: "S&peR4Ec*re" - port: "7180" state: "stopped" - role: [ "EVENTSERVER", "ALERTPUBLISHER" ] - register: cm_output -- name: Remove Cloudera Manager service role - cloudera.cluster.cm_version: - host: "10.10.10.10" +- name: Remove the Cloudera Manager service and its roles and role config groups + cloudera.cluster.cm_service + host: "cm.example.com" username: "jane_smith" password: "S&peR4Ec*re" - port: "7180" - purge: False state: "absent" - role: [ "ALERTPUBLISHER" ] - register: cm_output """ RETURN = r""" service: - description: List of Cloudera Manager roles - type: dict - contains: + description: The Cloudera Manager service. + type: dict + contains: + client_config_staleness_status: + description: Status of client configuration for the Cloudera Manager service. + type: str + returned: optional + cluster_name: + description: The associated cluster name. + type: str + returned: optional + config: + description: Service-wide configuration for the Cloudera Manager service. + type: dict + returned: optional + config_staleness_status: + description: Status of configuration staleness for the Cloudera Manager service. + type: str + returned: optional + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + display_name: + description: Display name of the Cloudera Manager service. + type: str + returned: always + health_checks: + description: Lists all available health checks for the Cloudera Manager service. + type: list + elements: dict + returned: optional + contains: + explanation: + description: A descriptor for the health check. + type: str + returned: optional name: - description: The Cloudera Manager role name. - type: str - returned: optional - type: - description: The Cloudera Manager role type. - type: str - returned: optional - serviceRef: - description: Reference to a service. - type: str - returned: optional - service_url: - description: Role url for Cloudera Manager Role. - type: str - returned: optional - hostRef: - description: Reference to a host. - type: str - returned: optional - role_state: - description: State of the Cloudera Manager Role. - type: str - returned: optional - commissionState: - description: Commission state of the role. - type: str - returned: optional - health_summary: - description: Health of the Cloudera Manager Role. - type: str - returned: optional - roleConfigGroupRef: - description: Reference to role config groups. - type: str - returned: optional - configStalenessStatus: - description: Status of configuration staleness for Cloudera Manager Role. - type: str - returned: optional + description: Unique name fore the health check. + type: str + returned: always + summary: + description: The summary status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + suppressed: + description: + - Whether the health check is suppressed. + - A suppressed health check is not considered when computing the overall health. + type: bool + returned: always + health_summary: + description: Health of the Cloudera Manager service. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + maintenance_mode: + description: Whether maintance mode is enabled for the Cloudera Manager service. + type: bool + returned: always + maintenance_owners: + description: List of objects that trigger the Cloudera Manager service to be in maintenance mode. + type: list + elements: str + returned: optional + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + name: + description: Name (identifier) of the Cloudera Manager service. + type: str + returned: always + role_config_groups: + description: List of role configuration groups for Cloudera Manager service. + type: list + elements: dict + returned: optional + contains: + base: + description: Whether the role config group is a base (default) group. + type: bool + returned: always + config: + description: Configuration for the role config group. + type: dict + returned: optional + display_name: + description: Display name for the role config group. + type: str + returned: always + name: + description: Name (identifier) of the role config group. + type: str + returned: always + role_type: + description: The type of roles in this group. + type: str + returned: always + service_name: + description: Name (identifier) of the associated service of the role config group. + type: str + returned: always + roles: + description: List of role instances for Cloudera Manager service. + type: list + elements: dict + returned: optional + contains: + commission_state: + description: Commission state of the Cloudera Manager service role. + type: str + returned: always + sample: + - COMMISSIONED + - DECOMMISSIONING + - DECOMMISSIONED + - UNKNOWN + - OFFLINING + - OFFLINED + config: + description: Role override configuration for the Cloudera Manager service. + type: dict + returned: optional + config_staleness_status: + description: Status of configuration staleness for the Cloudera Manager service role. + type: str + returned: always + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + ha_status: + description: High-availability status for the Cloudera Manager service. + type: str + returned: optional + sample: + - ACTIVE + - STANDBY + - UNKNOWN health_checks: - description: Lists all available health checks for Cloudera Manager Service. - type: dict - returned: optional - role_instances_url: - description: Role instance url for Cloudera Manager Service. - type: str - returned: optional + description: List of all available health checks for Cloudera Manager service role. + type: list + elements: dict + returned: optional + contains: + explanation: + description: The explanation of this health check. + type: str + returned: optional + name: + description: Unique name of this health check. + type: str + returned: always + summary: + description: The high-level health status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + suppressed: + description: + - Whether this health check is suppressed. + - A suppressed health check is not considered when computing the role's overall health. + type: bool + returned: optional + health_summary: + description: The high-level health status of the Cloudera Manager service role. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + host_id: + description: The unique ID of the cluster host. + type: str + returned: always maintenance_mode: - description: Maintance mode of Cloudera Manager Role. - type: bool - returned: optional + description: Whether the Cloudera Manager service role is in maintenance mode. + type: bool + returned: always maintenance_owners: - description: List of Maintance owners for Cloudera Manager Service. - type: list - returned: optional - entity_status: - description: Health status of entities for Cloudera Manager Role. - type: str - returned: optional + description: List of objects that trigger the Cloudera Manager service role to be in maintenance mode. + type: list + elements: str + returned: optional + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + name: + description: + - The Cloudera Manager service role name. + - Note, this is an auto-generated name and cannot be changed. + type: str + returned: always + role_config_group_name: + description: The name of the Cloudera Manager Service role config group, which uniquely identifies it in a Cloudera Manager installation. + type: str + returned: always + role_state: + description: State of the Cloudera Manager service role. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + service_name: + description: The name of the Cloudera Manager service, which uniquely identifies it in a deployment. + type: str + returned: always tags: - description: List of tags for Cloudera Manager Role. - type: list - returned: optional + description: Set of tags for the Cloudera Manager service role. + type: dict + returned: optional + type: + description: The Cloudera Manager service role type. + type: str + returned: always + sample: + - HOSTMONITOR + - ALERTPUBLISHER + - SERVICEMONITOR + - REPORTSMANAGER + - EVENTSERVER + zoo_keeper_server_mode: + description: + - The Zookeeper server mode for this Cloudera Manager service role. + - Note that for non-Zookeeper Server roles, this will be V(null). + type: str + returned: optional + service_state: + description: Run state of the Cloudera Manager service. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + service_version: + description: Version of Cloudera Manager service. + type: str + returned: always + tags: + description: List of tags for Cloudera Manager service. + type: list + returned: optional + type: + description: Type of the Cloudera Manager service, i.e. MGMT. + type: str + returned: always + sample: + - MGMT """ from collections.abc import Callable diff --git a/tests/unit/plugins/modules/cm_service/test_cm_service_role_config_groups.py b/tests/unit/plugins/modules/cm_service/test_cm_service_role_config_groups.py index 8a62895c..3f99ef5f 100644 --- a/tests/unit/plugins/modules/cm_service/test_cm_service_role_config_groups.py +++ b/tests/unit/plugins/modules/cm_service/test_cm_service_role_config_groups.py @@ -32,7 +32,6 @@ from ansible_collections.cloudera.cluster.plugins.modules import cm_service from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, - AnsibleFailJson, ) LOG = logging.getLogger(__name__) From b57bb9fd197ead761a34d1f563bc3c22eadb5e41 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 10 Jan 2025 14:35:38 -0500 Subject: [PATCH 40/58] Update cm_service_info to use common utilities for collection and parsing of Cloudera Manager service Signed-off-by: Webster Mudge --- plugins/modules/cm_service.py | 14 - plugins/modules/cm_service_info.py | 434 +++++++++++++++++++++-------- 2 files changed, 323 insertions(+), 125 deletions(-) diff --git a/plugins/modules/cm_service.py b/plugins/modules/cm_service.py index e6d9b126..0b3fba8a 100644 --- a/plugins/modules/cm_service.py +++ b/plugins/modules/cm_service.py @@ -618,21 +618,7 @@ def process(self): # Discover the CM service and retrieve its configured dependents try: - # TODO This is only used once... so revert current = read_cm_service(self.api_client) - # current = service_api.read_service() - # if current is not None: - # # Gather the service-wide configuration - # current.config = service_api.read_service_config() - - # # Gather each role config group configuration - # for rcg in current.role_config_groups: - # rcg.config = rcg_api.read_config(role_config_group_name=rcg.name) - - # # Gather each role configuration - # for role in current.roles: - # role.config = role_api.read_role_config(role_name=role.name) - except ApiException as ex: if ex.status != 404: raise ex diff --git a/plugins/modules/cm_service_info.py b/plugins/modules/cm_service_info.py index 4c72d84f..8142360f 100644 --- a/plugins/modules/cm_service_info.py +++ b/plugins/modules/cm_service_info.py @@ -1,4 +1,7 @@ -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,34 +15,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - ClouderaManagerModule, -) - -from cm_client import MgmtServiceResourceApi - -ANSIBLE_METADATA = { - "metadata_version": "1.1", - "status": ["preview"], - "supported_by": "community", -} - DOCUMENTATION = r""" ---- module: cm_service_info -short_description: Retrieve information about the Cloudera Management Services +short_description: Retrieve information about the Cloudera Management service description: - Gather information about the Cloudera Manager service. author: - - "Ronald Suplina (@rsuplina)" + - Ronald Suplina (@rsuplina) + - Webster Mudge (@wmudge) +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint +attributes: + check_mode: + support: full requirements: - - cm_client + - cm-client +seealso: + - module: cloudera.cluster.cm_service + - module: cloudera.cluster.cm_service_role + - module: cloudera.cluster.cm_service_role_config_group """ EXAMPLES = r""" ---- -- name: Gather details using an host - cloudera.cluster.cm_version: +- name: Gather details of the Cloudera Manager service + cloudera.cluster.cm_service_info: host: "example.cloudera.host" username: "will_jordan" password: "S&peR4Ec*re" @@ -47,116 +47,328 @@ """ RETURN = r""" ---- -cloudera_manager: - description: Details about Cloudera Manager Service - type: dict - contains: +service: + description: The Cloudera Manager service. + type: dict + contains: + client_config_staleness_status: + description: Status of client configuration for the Cloudera Manager service. + type: str + returned: optional + cluster_name: + description: The associated cluster name. + type: str + returned: optional + config: + description: Service-wide configuration for the Cloudera Manager service. + type: dict + returned: optional + config_staleness_status: + description: Status of configuration staleness for the Cloudera Manager service. + type: str + returned: optional + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + display_name: + description: Display name of the Cloudera Manager service. + type: str + returned: always + health_checks: + description: Lists all available health checks for the Cloudera Manager service. + type: list + elements: dict + returned: optional + contains: + explanation: + description: A descriptor for the health check. + type: str + returned: optional name: - description: The Cloudera Manager service name. - type: str - returned: optional - type: - description: The Cloudera Manager service type. - type: str - returned: optional - cluster_ref: - description: Reference to a cluster. - type: str - returned: optional - service_state: - description: State of the Cloudera Manager Service. - type: str - returned: optional - health_summary: - description: Health of the Cloudera Manager Service. - type: str - returned: optional - config_stale: - description: Configuration state of Cloudera Manager Service. - type: str - returned: optional + description: Unique name fore the health check. + type: str + returned: always + summary: + description: The summary status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + suppressed: + description: + - Whether the health check is suppressed. + - A suppressed health check is not considered when computing the overall health. + type: bool + returned: always + health_summary: + description: Health of the Cloudera Manager service. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + maintenance_mode: + description: Whether maintance mode is enabled for the Cloudera Manager service. + type: bool + returned: always + maintenance_owners: + description: List of objects that trigger the Cloudera Manager service to be in maintenance mode. + type: list + elements: str + returned: optional + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + name: + description: Name (identifier) of the Cloudera Manager service. + type: str + returned: always + role_config_groups: + description: List of role configuration groups for Cloudera Manager service. + type: list + elements: dict + returned: optional + contains: + base: + description: Whether the role config group is a base (default) group. + type: bool + returned: always + config: + description: Configuration for the role config group. + type: dict + returned: optional + display_name: + description: Display name for the role config group. + type: str + returned: always + name: + description: Name (identifier) of the role config group. + type: str + returned: always + role_type: + description: The type of roles in this group. + type: str + returned: always + service_name: + description: Name (identifier) of the associated service of the role config group. + type: str + returned: always + roles: + description: List of role instances for Cloudera Manager service. + type: list + elements: dict + returned: optional + contains: + commission_state: + description: Commission state of the Cloudera Manager service role. + type: str + returned: always + sample: + - COMMISSIONED + - DECOMMISSIONING + - DECOMMISSIONED + - UNKNOWN + - OFFLINING + - OFFLINED + config: + description: Role override configuration for the Cloudera Manager service. + type: dict + returned: optional config_staleness_status: - description: Status of configuration staleness for Cloudera Manager Service. - type: str - returned: optional - client_config_staleness_status: - description: Status of Client configuration for Cloudera Manager Service. - type: str - returned: optional + description: Status of configuration staleness for the Cloudera Manager service role. + type: str + returned: always + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + ha_status: + description: High-availability status for the Cloudera Manager service. + type: str + returned: optional + sample: + - ACTIVE + - STANDBY + - UNKNOWN health_checks: - description: Lists all available health checks for Cloudera Manager Service. - type: dict - returned: optional - service_url: - description: Service url for Cloudera Manager Service. - type: str - returned: optional - role_instances_url: - description: Role instance url for Cloudera Manager Service. - type: str - returned: optional + description: List of all available health checks for Cloudera Manager service role. + type: list + elements: dict + returned: optional + contains: + explanation: + description: The explanation of this health check. + type: str + returned: optional + name: + description: Unique name of this health check. + type: str + returned: always + summary: + description: The high-level health status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + suppressed: + description: + - Whether this health check is suppressed. + - A suppressed health check is not considered when computing the role's overall health. + type: bool + returned: optional + health_summary: + description: The high-level health status of the Cloudera Manager service role. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + host_id: + description: The unique ID of the cluster host. + type: str + returned: always maintenance_mode: - description: Maintance mode of Cloudera Manager Service. - type: bool - returned: optional + description: Whether the Cloudera Manager service role is in maintenance mode. + type: bool + returned: always maintenance_owners: - description: List of Maintance owners for Cloudera Manager Service. - type: list - returned: optional - config: - description: Configuration details for Cloudera Manager Service. - type: dict - returned: optional - roles: - description: Role list of Cloudera Manager Service. - type: dict - returned: optional - display_name: - description: Display name of Cloudera Manager Service. - type: dict - returned: optional - role_config_groups: - description: List of role configuration groups for Cloudera Manager Service. - type: list - returned: optional - replication_schedules: - description: List of replication schedules for Cloudera Manager Service. - type: list - returned: optional - snapshot_policies: - description: Snapshot policy for Cloudera Manager Service. - type: str - returned: optional - entity_status: - description: Health status of entities for Cloudera Manager Service. - type: str - returned: optional + description: List of objects that trigger the Cloudera Manager service role to be in maintenance mode. + type: list + elements: str + returned: optional + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + name: + description: + - The Cloudera Manager service role name. + - Note, this is an auto-generated name and cannot be changed. + type: str + returned: always + role_config_group_name: + description: The name of the Cloudera Manager Service role config group, which uniquely identifies it in a Cloudera Manager installation. + type: str + returned: always + role_state: + description: State of the Cloudera Manager service role. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + service_name: + description: The name of the Cloudera Manager service, which uniquely identifies it in a deployment. + type: str + returned: always tags: - description: List of tags for Cloudera Manager Service. - type: list - returned: optional - service_version: - description: Version of Cloudera Manager Service. - type: str - returned: optional + description: Set of tags for the Cloudera Manager service role. + type: dict + returned: optional + type: + description: The Cloudera Manager service role type. + type: str + returned: always + sample: + - HOSTMONITOR + - ALERTPUBLISHER + - SERVICEMONITOR + - REPORTSMANAGER + - EVENTSERVER + zoo_keeper_server_mode: + description: + - The Zookeeper server mode for this Cloudera Manager service role. + - Note that for non-Zookeeper Server roles, this will be V(null). + type: str + returned: optional + service_state: + description: Run state of the Cloudera Manager service. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + service_version: + description: Version of Cloudera Manager service. + type: str + returned: always + tags: + description: List of tags for Cloudera Manager service. + type: list + returned: optional + type: + description: Type of the Cloudera Manager service, i.e. MGMT. + type: str + returned: always + sample: + - MGMT """ +from cm_client.rest import ApiException + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerModule, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + parse_service_result, + read_cm_service, +) + + class ClouderaServiceInfo(ClouderaManagerModule): def __init__(self, module): super(ClouderaServiceInfo, self).__init__(module) # Initialize the return values - self.cm_service_info = dict() + self.output = dict() # Execute the logic self.process() @ClouderaManagerModule.handle_process def process(self): - api_instance = MgmtServiceResourceApi(self.api_client) - self.cm_service_info = api_instance.read_service().to_dict() + try: + current = read_cm_service(self.api_client) + except ApiException as ex: + if ex.status != 404: + raise ex + + self.output = parse_service_result(current) def main(): @@ -166,7 +378,7 @@ def main(): output = dict( changed=False, - cloudera_manager=result.cm_service_info, + service=result.output, ) if result.debug: From a62cb3adc98264b823f11fc1b4390316a584f183 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 10 Jan 2025 16:44:21 -0500 Subject: [PATCH 41/58] Rename and move return value reference YAML Signed-off-by: Webster Mudge --- .../return-values-cm_service.yml | 0 ...rn-values-cm_service_role_config_group.yml | 48 +++++++++++++++++++ 2 files changed, 48 insertions(+) rename cm_service_return_values.yml => tests/return-values-cm_service.yml (100%) create mode 100644 tests/return-values-cm_service_role_config_group.yml diff --git a/cm_service_return_values.yml b/tests/return-values-cm_service.yml similarity index 100% rename from cm_service_return_values.yml rename to tests/return-values-cm_service.yml diff --git a/tests/return-values-cm_service_role_config_group.yml b/tests/return-values-cm_service_role_config_group.yml new file mode 100644 index 00000000..9b09b813 --- /dev/null +++ b/tests/return-values-cm_service_role_config_group.yml @@ -0,0 +1,48 @@ +# Copyright 2025 Cloudera, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +role_config_group: + description: A Cloudera Manager service role config group. + type: dict + returned: always + contains: + base: + description: Whether the role config group is a base group. + type: bool + returned: always + config: + description: Set of configurations for the role config group. + type: dict + returned: optional + display_name: + description: Display name of the role config group. + type: str + returned: always + name: + description: Name (identifier) of the role config group. + type: str + returned: always + role_names: + description: List of role names (identifiers) associated with this role config group. + type: list + elements: str + returned: optional + role_type: + description: The type of the roles in this role config group. + type: str + returned: always + service_name: + description: Service name associated with this role config group. + type: str + returned: always From 256abbe8babb06c5b9919f57e8ab0a1609dcf6ca Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 10 Jan 2025 16:44:44 -0500 Subject: [PATCH 42/58] Add new modules to module action group in collection Signed-off-by: Webster Mudge --- meta/runtime.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/meta/runtime.yml b/meta/runtime.yml index 51339f2d..a816aba1 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -1,6 +1,6 @@ ---- +# -*- coding: utf-8 -*- -# Copyright 2023 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,10 @@ action_groups: - cm_resource - cm_service_info - cm_service + - cm_service_role_config_group_info + - cm_service_role_config_group + - cm_service_role_info + - cm_service_role - cm_trial_license - cm_version_info - cm_endpoint_info From 853d3f30c43c136c0694828761786d85dc25ff4f Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 10 Jan 2025 16:45:03 -0500 Subject: [PATCH 43/58] Remove unused function Signed-off-by: Webster Mudge --- plugins/module_utils/role_config_group_utils.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/plugins/module_utils/role_config_group_utils.py b/plugins/module_utils/role_config_group_utils.py index d89e5dc1..ffea32a4 100644 --- a/plugins/module_utils/role_config_group_utils.py +++ b/plugins/module_utils/role_config_group_utils.py @@ -1,4 +1,4 @@ -# Copyright 2024 Cloudera, Inc. +# Copyright 2025 Cloudera, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -104,9 +104,3 @@ def get_role_config_group( raise RoleConfigGroupDiscoveryException(name) else: return rcg - - -def reconcile_role_config_group( - api_client: ApiClient, existing: ApiRoleConfigGroup, updates: dict -): - pass From 68ce8613ba1e5b15ff9c93e33a7d8671e816b529 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 10 Jan 2025 16:45:29 -0500 Subject: [PATCH 44/58] Add diff_mode and platform documentation attributes Signed-off-by: Webster Mudge --- plugins/modules/cm_service.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/modules/cm_service.py b/plugins/modules/cm_service.py index 0b3fba8a..05251640 100644 --- a/plugins/modules/cm_service.py +++ b/plugins/modules/cm_service.py @@ -127,6 +127,10 @@ attributes: check_mode: support: full + diff_mode: + support: full + platform: + platforms: all requirements: - cm-client seealso: From d47f5fc68f1e7bbe1210172ab53e9f4d66bc58b7 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 10 Jan 2025 16:46:02 -0500 Subject: [PATCH 45/58] Update cm_service_role_config_group and tests Signed-off-by: Webster Mudge --- .../modules/cm_service_role_config_group.py | 230 +++++------------- .../test_cm_service_role_config_group.py | 31 --- 2 files changed, 67 insertions(+), 194 deletions(-) diff --git a/plugins/modules/cm_service_role_config_group.py b/plugins/modules/cm_service_role_config_group.py index 0a264870..d6622cd1 100644 --- a/plugins/modules/cm_service_role_config_group.py +++ b/plugins/modules/cm_service_role_config_group.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,40 +15,37 @@ # See the License for the specific language governing permissions and # limitations under the License. - DOCUMENTATION = r""" module: cm_service_role_config_group short_description: Manage a Cloudera Manager Service role config group. description: - Manage a Cloudera Manager Service role config group. author: - - "Webster Mudge (@wmudge)" -requirements: - - cm-client + - Webster Mudge (@wmudge) options: type: description: - The role type defining the role config group. type: str - required: True + required: yes aliases: - role_type display_name: description: - - The display name for this role config group in the Cloudera Manager UI. + - The display name for this role config group. config: description: - The role configuration to set. - - To unset a parameter, use C(None) as the value. + - To unset a parameter, use V(None) as the value. type: dict aliases: - params - parameters purge: description: - - Flag indicating whether to reset configuration parameters to only the declared entries. + - Whether to reset configuration parameters to only the declared entries. type: bool - default: False + default: no extends_documentation_fragment: - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint @@ -60,179 +57,95 @@ support: full platform: platforms: all +requirements: + - cm-client +seealso: + - module: cloudera.cluster.cm_service """ EXAMPLES = r""" -- name: Update the configuration of a Cloudera Manager Service role config group +- name: Update the configuration of a Cloudera Manager service role config group cloudera.cluster.cm_service_role_config_group: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" type: HOSTMONITOR - parameters: + config: some_parameter: True -- name: Update the configuration of a Cloudera Manager Service role config group, purging undeclared parameters +- name: Update the configuration of a Cloudera Manager service role config group, purging undeclared parameters cloudera.cluster.cm_service_role_config_group: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" type: HOSTMONITOR - parameters: + config: another_parameter: 3456 purge: yes -- name: Reset the configuration of a Cloudera Manager Service role config group +- name: Reset the configuration of a Cloudera Manager service role config group cloudera.cluster.cm_service_role_config_group: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" type: HOSTMONITOR - parameters: {} purge: yes - -- name: Set the display name of a Cloudera Manager Service role config group - cloudera.cluster.cm_service_role_config_group: - host: example.cloudera.com - username: "jane_smith" - password: "S&peR4Ec*re" - type: HOSTMONITOR - display_name: A new name """ RETURN = r""" role_config_group: - description: - - A Cloudera Manager Service role config group. + description: A Cloudera Manager service role config group. type: dict returned: always contains: - name: - description: - - The unique name of this role config group. - type: str - returned: always - role_type: - description: - - The type of the roles in this group. - type: str - returned: always base: - description: - - Flag indicating whether this is a base group. + description: Whether the role config group is a base group. type: bool returned: always + config: + description: Set of configurations for the role config group. + type: dict + returned: optional display_name: - description: - - A user-friendly name of the role config group, as would have been shown in the web UI. + description: Display name of the role config group. type: str - returned: when supported - service_name: - description: - - The service name associated with this role config group. + returned: always + name: + description: Name (identifier) of the role config group. type: str returned: always role_names: - description: - - List of role names associated with this role config group. + description: List of role names (identifiers) associated with this role config group. type: list elements: str - returned: when supported - config: - description: - - List of configurations. - type: list - elements: dict + returned: optional + role_type: + description: The type of the roles in this role config group. + type: str + returned: always + service_name: + description: Service name associated with this role config group. + type: str returned: always - contains: - name: - description: - - The canonical name that identifies this configuration parameter. - type: str - returned: when supported - value: - description: - - The user-defined value. - - When absent, the default value (if any) will be used. - - Can also be absent, when enumerating allowed configs. - type: str - returned: when supported - required: - description: - - Whether this configuration is required for the object. - - If any required configuration is not set, operations on the object may not work. - - Requires I(full) view. - type: bool - returned: when supported - default: - description: - - The default value. - - Requires I(full) view. - type: str - returned: when supported - display_name: - description: - - A user-friendly name of the parameters, as would have been shown in the web UI. - - Requires I(full) view. - type: str - returned: when supported - description: - description: - - A textual description of the parameter. - - Requires I(full) view. - type: str - returned: when supported - related_name: - description: - - If applicable, contains the related configuration variable used by the source project. - - Requires I(full) view. - type: str - returned: when supported - sensitive: - description: - - Whether this configuration is sensitive, i.e. contains information such as passwords, which might affect how the value of this configuration might be shared by the caller. - type: bool - returned: when supported - validate_state: - description: - - State of the configuration parameter after validation. - - Requires I(full) view. - type: str - returned: when supported - validation_message: - description: - - A message explaining the parameter's validation state. - - Requires I(full) view. - type: str - returned: when supported - validation_warnings_suppressed: - description: - - Whether validation warnings associated with this parameter are suppressed. - - In general, suppressed validation warnings are hidden in the Cloudera Manager UI. - - Configurations that do not produce warnings will not contain this field. - - Requires I(full) view. - type: bool - returned: when supported """ +from cm_client import ( + ApiRoleConfigGroup, + MgmtRoleConfigGroupsResourceApi, + MgmtServiceResourceApi, +) +from cm_client.rest import ApiException + from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerMutableModule, ConfigListUpdates, ) - from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( BaseRoleConfigGroupDiscoveryException, parse_role_config_group_result, get_mgmt_base_role_config_group, ) -from cm_client import ( - ApiRoleConfigGroup, - MgmtRoleConfigGroupsResourceApi, - MgmtServiceResourceApi, -) -from cm_client.rest import ApiException - class ClouderaManagerServiceRoleConfigGroup(ClouderaManagerMutableModule): def __init__(self, module): @@ -240,8 +153,7 @@ def __init__(self, module): # Set the parameters self.type = self.get_param("type") - self.display_name = self.get_param("display_name") - self.config = self.get_param("config", default=dict()) + self.config = self.get_param("config") self.purge = self.get_param("purge") # Initialize the return value @@ -259,7 +171,7 @@ def process(self): MgmtServiceResourceApi(self.api_client).read_service() except ApiException as ex: if ex.status == 404: - self.module.fail_json(msg="Cloudera Management Service does not exist") + self.module.fail_json(msg="Cloudera Management service does not exist") else: raise ex @@ -267,30 +179,21 @@ def process(self): # Retrieve the base RCG (the _only_ RCG for CMS roles) try: - existing = get_mgmt_base_role_config_group(self.api_client, self.type) + current = get_mgmt_base_role_config_group(self.api_client, self.type) except ApiException as ex: if ex.status != 404: raise ex except BaseRoleConfigGroupDiscoveryException as be: self.module.fail_json( - msg=f"Unable to find Cloudera Manager Service base role config group for role type '{self.type}'" + msg=f"Unable to find Cloudera Manager service base role config group for role type '{self.type}'" ) - payload = ApiRoleConfigGroup() - - # Update display name - if self.display_name and self.display_name != existing.display_name: - self.changed = True - - if self.module._diff: - self.diff["before"].update(display_name=existing.display_name) - self.diff["after"].update(display_name=self.display_name) - - payload.display_name = self.display_name - # Reconcile configurations if self.config or self.purge: - updates = ConfigListUpdates(existing.config, self.config, self.purge) + if self.config is None: + self.config = dict() + + updates = ConfigListUpdates(current.config, self.config, self.purge) if updates.changed: self.changed = True @@ -299,32 +202,33 @@ def process(self): self.diff["before"].update(config=updates.diff["before"]) self.diff["after"].update(config=updates.diff["after"]) - payload.config = updates.config - - # Execute changes if needed - if self.changed and not self.module.check_mode: - self.output = parse_role_config_group_result( - rcg_api.update_role_config_group( - existing.name, - message=self.message, - body=payload, - ) - ) - else: - self.output = parse_role_config_group_result(existing) + # Execute changes if needed + if not self.module.check_mode: + current = rcg_api.update_role_config_group( + current.name, + message=self.message, + body=ApiRoleConfigGroup( + name=current.name, + role_type=current.role_type, + config=updates.config, + display_name=current.display_name, + ), + ) + + # Parse the results + self.output = parse_role_config_group_result(current) # Report on any role associations self.output.update( - role_names=[r.name for r in rcg_api.read_roles(existing.name).items] + role_names=[r.name for r in rcg_api.read_roles(current.name).items] ) def main(): module = ClouderaManagerMutableModule.ansible_module( argument_spec=dict( - display_name=dict(), type=dict(required=True, aliases=["role_type"]), - config=dict(type="dict", aliases=["params", "parameters"]), + config=dict(required=True, type="dict", aliases=["params", "parameters"]), purge=dict(type="bool", default=False), ), supports_check_mode=True, diff --git a/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py b/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py index 3d198107..059524c8 100644 --- a/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py +++ b/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py @@ -209,34 +209,3 @@ def test_cm_role_config_group_config_purge_all( assert e.value.changed == False assert expected.items() <= e.value.role_config_group["config"].items() - - -@pytest.mark.role_config_group(ApiRoleConfigGroup(display_name="Test")) -def test_cm_role_config_group_display_name_set( - conn, module_args, host_monitor_role_group_config, request -): - expected = "Updated Test" - - module_args( - { - **conn, - "type": host_monitor_role_group_config.role_type, - "display_name": expected, - "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - # _ansible_check_mode=True, - # _ansible_diff=True, - } - ) - - with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config_group.main() - - assert e.value.changed == True - assert expected == e.value.role_config_group["display_name"] - - # Idempotency - with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config_group.main() - - assert e.value.changed == False - assert expected == e.value.role_config_group["display_name"] From 95f8694deba50b7847736ec0f6774d9fb2eb7a3d Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 10 Jan 2025 16:46:33 -0500 Subject: [PATCH 46/58] Add cm_service_role_config_group_info module Signed-off-by: Webster Mudge --- .../cm_service_role_config_group_info.py | 199 ++++++++++++++++++ 1 file changed, 199 insertions(+) create mode 100644 plugins/modules/cm_service_role_config_group_info.py diff --git a/plugins/modules/cm_service_role_config_group_info.py b/plugins/modules/cm_service_role_config_group_info.py new file mode 100644 index 00000000..cbc4c540 --- /dev/null +++ b/plugins/modules/cm_service_role_config_group_info.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = r""" +module: cm_service_role_config_group_info +short_description: Retrieve information about Cloudera Management service role config groups. +description: + - Gather information about Cloudera Manager service role config groups. +author: + - Webster Mudge (@wmudge) +options: + type: + description: + - The role type defining the role config group. + type: str + aliases: + - role_type +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint +attributes: + check_mode: + support: full +requirements: + - cm-client +seealso: + - module: cloudera.cluster.cm_service_role_config_group +""" + +EXAMPLES = r""" +- name: Gather details of an individual Cloudera Manager service role config group. + cloudera.cluster.cm_service_role_config_group_info: + host: "example.cloudera.host" + username: "will_jordan" + password: "S&peR4Ec*re" + type: HOSTMONITOR + register: cm_output + +- name: Gather details of all Cloudera Manager service role config groups. + cloudera.cluster.cm_service_role_config_group_info: + host: "example.cloudera.host" + username: "will_jordan" + password: "S&peR4Ec*re" + register: cm_output +""" + +RETURN = r""" +role_config_groups: + description: List of Cloudera Manager service role config groups. + type: list + elements: dict + returned: always + contains: + base: + description: Whether the role config group is a base group. + type: bool + returned: always + config: + description: Set of configurations for the role config group. + type: dict + returned: optional + display_name: + description: Display name of the role config group. + type: str + returned: always + name: + description: Name (identifier) of the role config group. + type: str + returned: always + role_names: + description: List of role names (identifiers) associated with this role config group. + type: list + elements: str + returned: optional + role_type: + description: The type of the roles in this role config group. + type: str + returned: always + service_name: + description: Service name associated with this role config group. + type: str + returned: always +""" + + +from cm_client import ( + ApiRoleConfigGroup, + MgmtRoleConfigGroupsResourceApi, + MgmtServiceResourceApi, +) +from cm_client.rest import ApiException + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerModule, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + BaseRoleConfigGroupDiscoveryException, + parse_role_config_group_result, + get_mgmt_base_role_config_group, +) + + +class ClouderaServiceRoleConfigGroupInfo(ClouderaManagerModule): + def __init__(self, module): + super(ClouderaServiceRoleConfigGroupInfo, self).__init__(module) + + # Set the parameters + self.type = self.get_param("type") + + # Initialize the return values + self.output = list() + + # Execute the logic + self.process() + + @ClouderaManagerModule.handle_process + def process(self): + # Confirm that CMS is present + try: + MgmtServiceResourceApi(self.api_client).read_service() + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg="Cloudera Management service does not exist") + else: + raise ex + + rcg_api = MgmtRoleConfigGroupsResourceApi(self.api_client) + + # Retrieve the base RCG (the _only_ RCG for CMS roles) + if self.type: + try: + current = get_mgmt_base_role_config_group(self.api_client, self.type) + except ApiException as ex: + if ex.status != 404: + raise ex + except BaseRoleConfigGroupDiscoveryException as be: + self.module.fail_json( + msg=f"Unable to find Cloudera Manager service base role config group for role type '{self.type}'" + ) + + result = parse_role_config_group_result(current) + result.update( + role_names=[r.name for r in rcg_api.read_roles(current.name).items] + ) + self.output.append(result) + else: + + def process_result(rcg: ApiRoleConfigGroup) -> dict: + result = parse_role_config_group_result(rcg) + result.update( + role_names=[r.name for r in rcg_api.read_roles(rcg.name).items] + ) + return result + + self.output = [ + process_result(r) + for r in rcg_api.read_role_config_groups().items + if r.base + ] + + +def main(): + module = ClouderaManagerModule.ansible_module( + argument_spec=dict( + type=dict(aliases=["role_type"]), + ), + supports_check_mode=False, + ) + + result = ClouderaServiceRoleConfigGroupInfo(module) + + output = dict( + changed=False, + role_config_groups=result.output, + ) + + if result.debug: + log = result.log_capture.getvalue() + output.update(debug=log, debug_lines=log.split("\n")) + + module.exit_json(**output) + + +if __name__ == "__main__": + main() From becefc7616e3bbf6dc28d2925a48f9708bc4304d Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 13 Jan 2025 14:34:37 -0500 Subject: [PATCH 47/58] Remove host_monitor_state fixture Signed-off-by: Webster Mudge --- tests/unit/conftest.py | 145 +++++++++++++++++------------------------ 1 file changed, 58 insertions(+), 87 deletions(-) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 7eb27f1a..3394ff15 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -36,9 +36,7 @@ ApiClusterList, ApiCluster, ApiCommand, - ApiCommandList, ApiConfig, - ApiConfigList, ApiHostRef, ApiHostRefList, ApiRole, @@ -613,48 +611,69 @@ def host_monitor_role_group_config( @pytest.fixture(scope="function") -def host_monitor_state(cm_api_client, host_monitor, request) -> Generator[ApiRole]: - marker = request.node.get_closest_marker("role") +def host_monitor_cleared(cm_api_client, cms) -> Generator[None]: + role_api = MgmtRolesResourceApi(cm_api_client) + role_cmd_api = MgmtRoleCommandsResourceApi(cm_api_client) + + # Check for existing management role + pre_role = next( + iter([r for r in get_mgmt_roles(cm_api_client, "HOSTMONITOR").items]), None + ) + + if pre_role is not None: + # Get the current state + pre_role.config = role_api.read_role_config(role_name=pre_role.name) + + # Remove the prior role + role_api.delete_role(role_name=pre_role.name) + + # Yield now that the role has been removed + yield + + # Reinstate the previous role + if pre_role is not None: + role_api.create_roles(body=ApiRoleList(items=[pre_role])) + if pre_role.maintenance_mode: + role_api.enter_maintenance_mode(pre_role.name) + if pre_role.role_state in [ApiRoleState.STARTED, ApiRoleState.STARTING]: + restart_cmds = role_cmd_api.restart_command( + body=ApiRoleNameList(items=[pre_role.name]) + ) + handle_commands(api_client=cm_api_client, commands=restart_cmds) + + +@pytest.fixture(scope="function") +def host_monitor_state( + cm_api_client, host_monitor, request +) -> Generator[ApiRoleConfigGroup]: + marker = request.node.get_closest_marker("role_state") if marker is None: - raise Exception("No 'role' marker found.") + raise Exception("No 'role_state' marker found.") - role = marker.args[0] + role_state = marker.args[0] role_api = MgmtRolesResourceApi(cm_api_client) cmd_api = MgmtRoleCommandsResourceApi(cm_api_client) # Get the current state pre_role = role_api.read_role(host_monitor.name) - pre_role.config = role_api.read_role_config(host_monitor.name) - - # Set config - for c in role.config.items: - try: - role_api.update_role_config( - role_name=host_monitor.name, - message=f"{Path(request.node.parent.name).stem}::{request.node.name}::set", - body=ApiConfigList(items=[c]), - ) - except ApiException as ae: - if ae.status != 400 or "delete with template" not in str(ae.body): - raise Exception(str(ae)) - # Update maintenance - if role.maintenance_mode: - role_api.enter_maintenance_mode(host_monitor.name) - else: - role_api.exit_maintenance_mode(host_monitor.name) - - # Update state - if role.role_state is not None: - if role.role_state in [ApiRoleState.STARTED]: + # Set the role state + if pre_role.role_state != role_state: + if role_state in [ApiRoleState.STARTED]: handle_commands( - cmd_api.stop_command(body=ApiRoleNameList(items=[host_monitor.name])) + api_client=cm_api_client, + commands=cmd_api.start_command( + body=ApiRoleNameList(items=[host_monitor.name]) + ), ) - elif role.role_state in [ApiRoleState.STOPPED]: + elif role_state in [ApiRoleState.STOPPED]: handle_commands( - cmd_api.start_command(body=ApiRoleNameList(items=[host_monitor.name])) + api_client=cm_api_client, + commands=cmd_api.stop_command( + body=ApiRoleNameList(items=[host_monitor.name]) + ), ) # Yield the role @@ -670,67 +689,19 @@ def host_monitor_state(cm_api_client, host_monitor, request) -> Generator[ApiRol if pre_role.role_state != post_role.role_state: if pre_role.role_state in [ApiRoleState.STARTED]: handle_commands( - cmd_api.start_command(body=ApiRoleNameList(items=[host_monitor.name])) + api_client=cm_api_client, + commands=cmd_api.start_command( + body=ApiRoleNameList(items=[host_monitor.name]) + ), ) elif pre_role.role_state in [ApiRoleState.STOPPED]: handle_commands( - cmd_api.stop_command(body=ApiRoleNameList(items=[host_monitor.name])) + api_client=cm_api_client, + commands=cmd_api.stop_command( + body=ApiRoleNameList(items=[host_monitor.name]) + ), ) - # Reset maintenance - if pre_role.maintenance_mode != post_role.maintenance_mode: - if pre_role.maintenance_mode: - role_api.enter_maintenance_mode(host_monitor.name) - else: - role_api.exit_maintenance_mode(host_monitor.name) - - # Reset config - pre_role_config_set = set([c.name for c in pre_role.config.items]) - - reconciled = pre_role.config.items.copy() - config_reset = [ - c for c in post_role.config.items if c.name not in pre_role_config_set - ] - reconciled.extend([ApiConfig(c.name, None) for c in config_reset]) - - role_api.update_role_config( - role_name=host_monitor.name, - message=f"{Path(request.node.parent.name).stem}::{request.node.name}::reset", - body=ApiConfigList(items=reconciled), - ) - - -@pytest.fixture(scope="function") -def host_monitor_cleared(cm_api_client, cms) -> Generator[None]: - role_api = MgmtRolesResourceApi(cm_api_client) - role_cmd_api = MgmtRoleCommandsResourceApi(cm_api_client) - - # Check for existing management role - pre_role = next( - iter([r for r in get_mgmt_roles(cm_api_client, "HOSTMONITOR").items]), None - ) - - if pre_role is not None: - # Get the current state - pre_role.config = role_api.read_role_config(role_name=pre_role.name) - - # Remove the prior role - role_api.delete_role(role_name=pre_role.name) - - # Yield now that the role has been removed - yield - - # Reinstate the previous role - if pre_role is not None: - role_api.create_roles(body=ApiRoleList(items=[pre_role])) - if pre_role.maintenance_mode: - role_api.enter_maintenance_mode(pre_role.name) - if pre_role.role_state in [ApiRoleState.STARTED, ApiRoleState.STARTING]: - restart_cmds = role_cmd_api.restart_command( - body=ApiRoleNameList(items=[pre_role.name]) - ) - handle_commands(api_client=cm_api_client, commands=restart_cmds) - def handle_commands(api_client: ApiClient, commands: ApiBulkCommandList): if commands.errors: From eefc7f296e755e0331ecb9598f7cc25e2925f577 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 13 Jan 2025 14:35:28 -0500 Subject: [PATCH 48/58] Update to remove custom Exceptions Remove get_role_config_group Signed-off-by: Webster Mudge --- .../module_utils/role_config_group_utils.py | 61 +++++++------------ 1 file changed, 22 insertions(+), 39 deletions(-) diff --git a/plugins/module_utils/role_config_group_utils.py b/plugins/module_utils/role_config_group_utils.py index ffea32a4..99471f7f 100644 --- a/plugins/module_utils/role_config_group_utils.py +++ b/plugins/module_utils/role_config_group_utils.py @@ -32,14 +32,6 @@ ] -class BaseRoleConfigGroupDiscoveryException(Exception): - pass - - -class RoleConfigGroupDiscoveryException(Exception): - pass - - def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dict: """Parse a Role Config Group into a normalized dictionary. @@ -67,40 +59,31 @@ def get_base_role_config_group( api_client: ApiClient, cluster_name: str, service_name: str, role_type: str ) -> ApiRoleConfigGroup: rcg_api = RoleConfigGroupsResourceApi(api_client) - rcgs = [ - r - for r in rcg_api.read_role_config_groups(cluster_name, service_name).items - if r.role_type == role_type and r.base - ] - if len(rcgs) != 1: - raise BaseRoleConfigGroupDiscoveryException(role_count=len(rcgs)) - else: - return rcgs[0] + return next( + iter( + [ + r + for r in rcg_api.read_role_config_groups( + cluster_name, service_name + ).items + if r.role_type == role_type and r.base + ] + ), + None, + ) def get_mgmt_base_role_config_group( api_client: ApiClient, role_type: str ) -> ApiRoleConfigGroup: rcg_api = MgmtRoleConfigGroupsResourceApi(api_client) - rcgs = [ - r - for r in rcg_api.read_role_config_groups().items - if r.role_type == role_type and r.base - ] - if len(rcgs) != 1: - raise BaseRoleConfigGroupDiscoveryException(role_count=len(rcgs)) - else: - return rcgs[0] - - -def get_role_config_group( - api_client: ApiClient, cluster_name: str, service_name: str, name: str -) -> ApiRoleConfigGroup: - rcg_api = RoleConfigGroupsResourceApi(api_client) - - rcg = rcg_api.read_role_config_group(cluster_name, name, service_name) - - if rcg is None: - raise RoleConfigGroupDiscoveryException(name) - else: - return rcg + return next( + iter( + [ + r + for r in rcg_api.read_role_config_groups().items + if r.role_type == role_type and r.base + ] + ), + None, + ) From abc099517e8b7eaf69a339de778a7f84dc8ec902 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 13 Jan 2025 14:37:40 -0500 Subject: [PATCH 49/58] Rename get_roles to read_roles and update to retrieve role override configurations Add read_role, read_roles_by_type functions Update create_role to handle errors for invalid role config group assignment Signed-off-by: Webster Mudge --- plugins/module_utils/role_utils.py | 95 +++++++++++++++++++++++++----- 1 file changed, 79 insertions(+), 16 deletions(-) diff --git a/plugins/module_utils/role_utils.py b/plugins/module_utils/role_utils.py index 479691bb..7746bda4 100644 --- a/plugins/module_utils/role_utils.py +++ b/plugins/module_utils/role_utils.py @@ -18,9 +18,6 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.host_utils import ( get_host_ref, ) -from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( - get_role_config_group, -) from cm_client import ( ApiClient, @@ -28,6 +25,7 @@ ApiConfigList, ApiRoleList, ApiRoleConfigGroupRef, + RoleConfigGroupsResourceApi, RolesResourceApi, MgmtRolesResourceApi, ) @@ -71,20 +69,78 @@ def get_mgmt_roles(api_client: ApiClient, role_type: str) -> ApiRoleList: ) -def get_roles( +def read_role( + api_client: ApiClient, cluster_name: str, service_name: str, name: str +) -> ApiRole: + role_api = RolesResourceApi(api_client) + role = role_api.read_role( + cluster_name=cluster_name, service_name=service_name, role_name=name + ) + if role is not None: + role.config = role_api.read_role_config( + cluster_name=cluster_name, service_name=service_name, role_name=role.name + ) + return role + + +def read_roles( + api_client: ApiClient, cluster_name: str, service_name: str +) -> ApiRoleList: + role_api = RolesResourceApi(api_client) + roles = role_api.read_roles(cluster_name, service_name).items + for r in roles: + r.config = role_api.read_role_config( + api_client=api_client, + cluster_name=cluster_name, + service_name=service_name, + role_name=r.name, + ) + return ApiRoleList(items=roles) + + +def read_roles_by_type( api_client: ApiClient, cluster_name: str, service_name: str, role_type: str ) -> ApiRoleList: role_api = RolesResourceApi(api_client) - return ApiRoleList( - items=[ - r - for r in role_api.read_roles(cluster_name, service_name).items - if r.type == role_type - ] + roles = [ + r + for r in role_api.read_roles(cluster_name, service_name).items + if r.type == role_type + ] + for r in roles: + r.config = role_api.read_role_config( + api_client=api_client, + cluster_name=cluster_name, + service_name=service_name, + role_name=r.name, + ) + return ApiRoleList(items=roles) + + +def read_cm_role(api_client: ApiClient, role_type: str) -> ApiRole: + role_api = MgmtRolesResourceApi(api_client) + role = next( + iter([r for r in role_api.read_roles().items if r.type == role_type]), + None, ) + if role is not None: + role.config = role_api.read_role_config(role.name) + return role + + +def read_cm_roles(api_client: ApiClient) -> ApiRoleList: + role_api = MgmtRolesResourceApi(api_client) + roles = role_api.read_roles().items + for r in roles: + r.config = role_api.read_role_config(role_name=r.name) + return ApiRoleList(items=roles) + + +class HostNotFoundException(Exception): + pass -class RoleHostNotFoundException(Exception): +class RoleConfigGroupNotFoundException(Exception): pass @@ -109,7 +165,7 @@ def create_role( # Host assignment host_ref = get_host_ref(api_client, hostname, host_id) if host_ref is None: - raise RoleHostNotFoundException( + raise HostNotFoundException( f"Host not found: hostname='{hostname}', host_id='{host_id}'" ) else: @@ -117,11 +173,18 @@ def create_role( # Role config group if role_config_group: - role.role_config_group_ref = ApiRoleConfigGroupRef( - get_role_config_group( - api_client, cluster_name, service_name, role_config_group - ).name + rcg_api = RoleConfigGroupsResourceApi(api_client) + rcg = rcg_api.read_role_config_group( + cluster_name=cluster_name, + service_name=service_name, + role_config_group_name=role_config_group, ) + if rcg is None: + raise RoleConfigGroupNotFoundException( + f"Role config group not found: {role_config_group}" + ) + else: + role.role_config_group_ref = ApiRoleConfigGroupRef(rcg.name) # Role override configurations if config: From 5f1ef3224aab8d1d81b052c918d9084af0540840 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 13 Jan 2025 14:40:01 -0500 Subject: [PATCH 50/58] Update to remove custom Exception handling Signed-off-by: Webster Mudge --- plugins/modules/cm_service_role_config_group.py | 9 ++++----- .../cm_service_role_config_group_config.py | 9 ++++----- .../modules/cm_service_role_config_group_info.py | 16 ++++++---------- 3 files changed, 14 insertions(+), 20 deletions(-) diff --git a/plugins/modules/cm_service_role_config_group.py b/plugins/modules/cm_service_role_config_group.py index d6622cd1..e2d0bb59 100644 --- a/plugins/modules/cm_service_role_config_group.py +++ b/plugins/modules/cm_service_role_config_group.py @@ -141,7 +141,6 @@ ConfigListUpdates, ) from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( - BaseRoleConfigGroupDiscoveryException, parse_role_config_group_result, get_mgmt_base_role_config_group, ) @@ -180,13 +179,13 @@ def process(self): # Retrieve the base RCG (the _only_ RCG for CMS roles) try: current = get_mgmt_base_role_config_group(self.api_client, self.type) + if current is None: + self.module.fail_json( + msg=f"Unable to find Cloudera Manager service base role config group for role type '{self.type}'" + ) except ApiException as ex: if ex.status != 404: raise ex - except BaseRoleConfigGroupDiscoveryException as be: - self.module.fail_json( - msg=f"Unable to find Cloudera Manager service base role config group for role type '{self.type}'" - ) # Reconcile configurations if self.config or self.purge: diff --git a/plugins/modules/cm_service_role_config_group_config.py b/plugins/modules/cm_service_role_config_group_config.py index 8e61090b..c6beca9c 100644 --- a/plugins/modules/cm_service_role_config_group_config.py +++ b/plugins/modules/cm_service_role_config_group_config.py @@ -196,7 +196,6 @@ ConfigListUpdates, ) from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( - BaseRoleConfigGroupDiscoveryException, get_mgmt_base_role_config_group, ) @@ -231,6 +230,10 @@ def process(self): try: if self.name is None: rcg = get_mgmt_base_role_config_group(self.api_client, self.type) + if rcg is None: + self.module.fail_json( + msg=f"Unable to find Cloudera Manager Service base role config group for role type '{self.type}'" + ) self.name = rcg.name existing = rcg_api.read_config(self.name) @@ -239,10 +242,6 @@ def process(self): self.module.fail_json(msg=json.loads(ae.body)["message"]) else: raise ae - except BaseRoleConfigGroupDiscoveryException as be: - self.module.fail_json( - msg=f"Unable to find Cloudera Manager Service base role config group for role type '{self.type}'" - ) updates = ConfigListUpdates(existing, self.params, self.purge) diff --git a/plugins/modules/cm_service_role_config_group_info.py b/plugins/modules/cm_service_role_config_group_info.py index cbc4c540..d4cffe5a 100644 --- a/plugins/modules/cm_service_role_config_group_info.py +++ b/plugins/modules/cm_service_role_config_group_info.py @@ -108,7 +108,6 @@ ClouderaManagerModule, ) from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( - BaseRoleConfigGroupDiscoveryException, parse_role_config_group_result, get_mgmt_base_role_config_group, ) @@ -147,16 +146,13 @@ def process(self): except ApiException as ex: if ex.status != 404: raise ex - except BaseRoleConfigGroupDiscoveryException as be: - self.module.fail_json( - msg=f"Unable to find Cloudera Manager service base role config group for role type '{self.type}'" - ) - result = parse_role_config_group_result(current) - result.update( - role_names=[r.name for r in rcg_api.read_roles(current.name).items] - ) - self.output.append(result) + if current is not None: + result = parse_role_config_group_result(current) + result.update( + role_names=[r.name for r in rcg_api.read_roles(current.name).items] + ) + self.output.append(result) else: def process_result(rcg: ApiRoleConfigGroup) -> dict: From d316425165588f8bd80566ba9b66fec1634630dd Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 13 Jan 2025 14:42:05 -0500 Subject: [PATCH 51/58] Add cm_service_role_info module and tests Signed-off-by: Webster Mudge --- plugins/modules/cm_service_role_info.py | 290 ++++++++++++++++++ .../test_cm_service_role_info.py | 79 +++++ 2 files changed, 369 insertions(+) create mode 100644 plugins/modules/cm_service_role_info.py create mode 100644 tests/unit/plugins/modules/cm_service_role_info/test_cm_service_role_info.py diff --git a/plugins/modules/cm_service_role_info.py b/plugins/modules/cm_service_role_info.py new file mode 100644 index 00000000..a8aad2f5 --- /dev/null +++ b/plugins/modules/cm_service_role_info.py @@ -0,0 +1,290 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = r""" +module: cm_service_role_info +short_description: Retrieve information about Cloudera Management service roles. +description: + - Gather information about one or all Cloudera Manager service roles. +author: + - Webster Mudge (@wmudge) +options: + type: + description: + - The role type of the role. + type: str + aliases: + - role_type +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint +attributes: + check_mode: + support: full +requirements: + - cm-client +seealso: + - module: cloudera.cluster.cm_service_role +""" + +EXAMPLES = r""" +- name: Gather details of an individual Cloudera Manager service role. + cloudera.cluster.cm_service_role_info: + host: "example.cloudera.host" + username: "john_doe" + password: "S&peR4Ec*re" + type: HOSTMONITOR + register: cm_output + +- name: Gather details of all Cloudera Manager service roles. + cloudera.cluster.cm_service_role_info: + host: "example.cloudera.host" + username: "john_doe" + password: "S&peR4Ec*re" + register: cm_output +""" + +RETURN = r""" +roles: + description: List of Cloudera Manager service roles. + type: list + elements: dict + returned: always + contains: + commission_state: + description: Commission state of the Cloudera Manager service role. + type: str + returned: always + sample: + - COMMISSIONED + - DECOMMISSIONING + - DECOMMISSIONED + - UNKNOWN + - OFFLINING + - OFFLINED + config: + description: Role override configuration for the Cloudera Manager service. + type: dict + returned: optional + config_staleness_status: + description: Status of configuration staleness for the Cloudera Manager service role. + type: str + returned: always + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + ha_status: + description: High-availability status for the Cloudera Manager service. + type: str + returned: optional + sample: + - ACTIVE + - STANDBY + - UNKNOWN + health_checks: + description: List of all available health checks for Cloudera Manager service role. + type: list + elements: dict + returned: optional + contains: + explanation: + description: The explanation of this health check. + type: str + returned: optional + name: + description: Unique name of this health check. + type: str + returned: always + summary: + description: The high-level health status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + suppressed: + description: + - Whether this health check is suppressed. + - A suppressed health check is not considered when computing the role's overall health. + type: bool + returned: optional + health_summary: + description: The high-level health status of the Cloudera Manager service role. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + host_id: + description: The unique ID of the cluster host. + type: str + returned: always + maintenance_mode: + description: Whether the Cloudera Manager service role is in maintenance mode. + type: bool + returned: always + maintenance_owners: + description: List of objects that trigger the Cloudera Manager service role to be in maintenance mode. + type: list + elements: str + returned: optional + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + name: + description: + - The Cloudera Manager service role name. + - Note, this is an auto-generated name and cannot be changed. + type: str + returned: always + role_config_group_name: + description: The name of the Cloudera Manager Service role config group, which uniquely identifies it in a Cloudera Manager installation. + type: str + returned: always + role_state: + description: State of the Cloudera Manager service role. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + service_name: + description: The name of the Cloudera Manager service, which uniquely identifies it in a deployment. + type: str + returned: always + tags: + description: Set of tags for the Cloudera Manager service role. + type: dict + returned: optional + type: + description: The Cloudera Manager service role type. + type: str + returned: always + sample: + - HOSTMONITOR + - ALERTPUBLISHER + - SERVICEMONITOR + - REPORTSMANAGER + - EVENTSERVER + zoo_keeper_server_mode: + description: + - The Zookeeper server mode for this Cloudera Manager service role. + - Note that for non-Zookeeper Server roles, this will be V(null). + type: str + returned: optional +""" + + +from cm_client import ( + MgmtServiceResourceApi, +) +from cm_client.rest import ApiException + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerModule, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + parse_role_result, + read_cm_role, + read_cm_roles, +) + + +class ClouderaServiceRoleInfo(ClouderaManagerModule): + def __init__(self, module): + super(ClouderaServiceRoleInfo, self).__init__(module) + + # Set the parameters + self.type = self.get_param("type") + + # Initialize the return values + self.output = list() + + # Execute the logic + self.process() + + @ClouderaManagerModule.handle_process + def process(self): + # Confirm that CMS is present + try: + MgmtServiceResourceApi(self.api_client).read_service() + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg="Cloudera Management service does not exist") + else: + raise ex + + # Retrieve the specified role by type + if self.type: + result = None + + try: + result = read_cm_role(api_client=self.api_client, role_type=self.type) + except ApiException as ex: + if ex.status != 404: + raise ex + + if result is not None: + self.output.append(parse_role_result(result)) + else: + self.output = [ + parse_role_result(r) + for r in read_cm_roles(api_client=self.api_client).items + ] + + +def main(): + module = ClouderaManagerModule.ansible_module( + argument_spec=dict( + type=dict(aliases=["role_type"]), + ), + supports_check_mode=False, + ) + + result = ClouderaServiceRoleInfo(module) + + output = dict( + changed=False, + roles=result.output, + ) + + if result.debug: + log = result.log_capture.getvalue() + output.update(debug=log, debug_lines=log.split("\n")) + + module.exit_json(**output) + + +if __name__ == "__main__": + main() diff --git a/tests/unit/plugins/modules/cm_service_role_info/test_cm_service_role_info.py b/tests/unit/plugins/modules/cm_service_role_info/test_cm_service_role_info.py new file mode 100644 index 00000000..dee3402d --- /dev/null +++ b/tests/unit/plugins/modules/cm_service_role_info/test_cm_service_role_info.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + + +from ansible_collections.cloudera.cluster.plugins.modules import cm_service_role_info +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, +) + +LOG = logging.getLogger(__name__) + + +def test_read_roles(conn, module_args, cms_auto): + module_args({**conn}) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_info.main() + + assert e.value.changed == False + assert len(e.value.roles) == 4 + + +def test_read_role(conn, module_args, cms_auto): + module_args( + { + **conn, + "type": "HOSTMONITOR", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_info.main() + + assert e.value.changed == False + assert len(e.value.roles) == 1 + + +def test_read_role_nonexistent(conn, module_args, cms_auto): + module_args( + { + **conn, + "type": "DOESNOTEXIST", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_info.main() + + assert len(e.value.roles) == 0 + + +def test_read_service_nonexistent(conn, module_args): + module_args({**conn}) + + with pytest.raises( + AnsibleFailJson, match="Cloudera Management service does not exist" + ) as e: + cm_service_role_info.main() From 200b9153e4f068573f30df5dec10ed63ebd9fd5e Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 13 Jan 2025 14:43:47 -0500 Subject: [PATCH 52/58] Update documentation and reconcilation logic for maintenance and operational state Signed-off-by: Webster Mudge --- plugins/modules/cm_service_role.py | 383 +++++++++--------- .../cm_service_role/test_cm_service_role.py | 318 ++++++++------- 2 files changed, 356 insertions(+), 345 deletions(-) diff --git a/plugins/modules/cm_service_role.py b/plugins/modules/cm_service_role.py index c1de0c41..69feda02 100644 --- a/plugins/modules/cm_service_role.py +++ b/plugins/modules/cm_service_role.py @@ -19,25 +19,23 @@ module: cm_service_role short_description: Manage a Cloudera Manager Service role description: - - Manage a Cloudera Manager Service role + - Manage a Cloudera Manager Service role. author: - - "Webster Mudge (@wmudge)" -requirements: - - cm-client + - Webster Mudge (@wmudge) options: cluster_hostname: description: - - The hostname of a cluster instance for the role. - - If the hostname is different that the existing host for the I(type), the role will be destroyed and rebuilt on the declared host. - - Mutually exclusive with I(cluster_host_id). + - The hostname of an instance for the role. + - If the hostname is different that the existing host for the O(type), the role will be destroyed and rebuilt on the declared host. + - Mutually exclusive with O(cluster_host_id). type: str aliases: - cluster_host cluster_host_id: description: - - The host ID of a cluster instance for the role. - - If the host ID is different that the existing host for the I(type), the role will be destroyed and rebuilt on the declared host. - - Mutually exclusive with I(cluster_hostname). + - The host ID of an instance for the role. + - If the host ID is different that the existing host for the O(type), the role will be destroyed and rebuilt on the declared host. + - Mutually exclusive with O(cluster_hostname). type: str type: description: @@ -48,8 +46,8 @@ - role_type config: description: - - The role configuration to set, i.e. overrides. - - To unset a parameter, use C(None) as the value. + - The role configuration to set, i.e. role overrides, for the instance. + - To unset a parameter, use V(None) as the value. type: dict aliases: - params @@ -63,14 +61,14 @@ purge: description: - Flag for whether the declared role configurations should append or overwrite any existing configurations. - - To clear all role configurations, set I(config={}), i.e. an empty dictionary, or omit entirely, and set I(purge=True). + - To clear all role configurations, set O(config={}), i.e. an empty dictionary, or omit entirely, and set O(purge=True). type: bool default: False state: description: - The state of the role. - Note, if the declared state is invalid for the role, the module will return an error. - - Note, I(restarted) is always force a change of state of the role. + - Note, V(restarted) is always force a change of state of the role. type: str default: present choices: @@ -82,6 +80,7 @@ extends_documentation_fragment: - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint + - cloudera.cluster.message attributes: check_mode: support: full @@ -89,6 +88,11 @@ support: full platform: platforms: all +requirements: + - cm-client +seealso: + - module: cloudera.cluster.cm_service + - module: cloudera.cluster.cm_service_role_config_group """ EXAMPLES = r""" @@ -162,70 +166,51 @@ RETURN = r""" role: - description: Details about the Cloudera Manager Service role. + description: Details about the Cloudera Manager service role. type: dict + returned: always contains: - name: - description: - - The Cloudera Manager Service role name. - - Note, this is an auto-generated name and cannot be changed. - type: str - returned: always - type: - description: The Cloudera Manager Service role type. - type: str - returned: always - sample: - - HOSTMONITOR - host_id: - description: The unique ID of the cluster host. - type: str - returned: always - service_name: - description: The name of the Cloudera Manager Service, which uniquely identifies it in a deployment. - type: str - returned: when supported - role_state: - description: State of the Cloudera Manager Service role. - type: str - returned: always - sample: - - HISTORY_NOT_AVAILABLE - - UNKNOWN - - STARTING - - STARTED - - STOPPING - - STOPPED - - NA commission_state: - description: Commission state of the Cloudera Manager Service role. - type: str - returned: always - health_summary: - description: The high-level health status of the Cloudera Manager Service role. + description: Commission state of the Cloudera Manager service role. type: str returned: always sample: - - DISABLED - - HISTORY_NOT_AVAILABLE - - NOT_AVAILABLE - - GOOD - - CONCERNING - - BAD + - COMMISSIONED + - DECOMMISSIONING + - DECOMMISSIONED + - UNKNOWN + - OFFLINING + - OFFLINED + config: + description: Role override configuration for the Cloudera Manager service. + type: dict + returned: optional config_staleness_status: - description: Status of configuration staleness for the Cloudera Manager Service role. + description: Status of configuration staleness for the Cloudera Manager service role. type: str returned: always sample: - FRESH - STALE_REFRESHABLE - STALE + ha_status: + description: High-availability status for the Cloudera Manager service. + type: str + returned: optional + sample: + - ACTIVE + - STANDBY + - UNKNOWN health_checks: - description: Lists all available health checks for Cloudera Manager Service role. + description: List of all available health checks for Cloudera Manager service role. type: list elements: dict - returned: when supported + returned: optional contains: + explanation: + description: The explanation of this health check. + type: str + returned: optional name: description: Unique name of this health check. type: str @@ -241,60 +226,95 @@ - GOOD - CONCERNING - BAD - explanation: - description: The explanation of this health check. - type: str - returned: when supported suppressed: description: - Whether this health check is suppressed. - A suppressed health check is not considered when computing the role's overall health. type: bool - returned: when supported + returned: optional + health_summary: + description: The high-level health status of the Cloudera Manager service role. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + host_id: + description: The unique ID of the cluster host. + type: str + returned: always maintenance_mode: - description: Whether the Cloudera Manager Service role is in maintenance mode. + description: Whether the Cloudera Manager service role is in maintenance mode. type: bool - returned: when supported + returned: always maintenance_owners: - description: The list of objects that trigger this service to be in maintenance mode. + description: List of objects that trigger the Cloudera Manager service role to be in maintenance mode. type: list elements: str - returned: when supported + returned: optional sample: - CLUSTER - SERVICE - ROLE - HOST - CONTROL_PLANE + name: + description: + - The Cloudera Manager service role name. + - Note, this is an auto-generated name and cannot be changed. + type: str + returned: always role_config_group_name: description: The name of the Cloudera Manager Service role config group, which uniquely identifies it in a Cloudera Manager installation. type: str - returned: when supported + returned: always + role_state: + description: State of the Cloudera Manager service role. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + service_name: + description: The name of the Cloudera Manager service, which uniquely identifies it in a deployment. + type: str + returned: always tags: - description: The dictionary of tags for the Cloudera Manager Service role. + description: Set of tags for the Cloudera Manager service role. type: dict - returned: when supported + returned: optional + type: + description: The Cloudera Manager service role type. + type: str + returned: always + sample: + - HOSTMONITOR + - ALERTPUBLISHER + - SERVICEMONITOR + - REPORTSMANAGER + - EVENTSERVER zoo_keeper_server_mode: description: - - The Zookeeper server mode for this Cloudera Manager Service role. - - Note that for non-Zookeeper Server roles, this will be C(null). + - The Zookeeper server mode for this Cloudera Manager service role. + - Note that for non-Zookeeper Server roles, this will be V(null). type: str - returned: when supported + returned: optional """ -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - ClouderaManagerMutableModule, - ConfigListUpdates, -) -from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( - create_role, - parse_role_result, -) +from collections.abc import Callable from cm_client import ( ApiBulkCommandList, + ApiCommand, ApiRole, ApiRoleList, ApiRoleNameList, @@ -305,6 +325,18 @@ ) from cm_client.rest import ApiException +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + ConfigListUpdates, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, + parse_role_result, + read_cm_role, +) + class ClouderaManagerServiceRole(ClouderaManagerMutableModule): def __init__(self, module): @@ -322,36 +354,32 @@ def __init__(self, module): # Initialize the return values self.changed = False self.diff = dict(before={}, after={}) - self.output = {} + self.output = dict() # Execute the logic self.process() @ClouderaManagerMutableModule.handle_process def process(self): + + service_api = MgmtServiceResourceApi(self.api_client) + role_api = MgmtRolesResourceApi(self.api_client) + role_cmd_api = MgmtRoleCommandsResourceApi(self.api_client) + # Confirm that CMS is present try: - MgmtServiceResourceApi(self.api_client).read_service() + service_api.read_service() except ApiException as ex: if ex.status == 404: - self.module.fail_json(msg="Cloudera Management Service does not exist") + self.module.fail_json(msg="Cloudera Management service does not exist") else: raise ex - self.role_api = MgmtRolesResourceApi(self.api_client) - current = None # Discover the role by its type try: - current = next( - iter( - [r for r in self.role_api.read_roles().items if r.type == self.type] - ), - None, - ) - if current is not None: - current.config = self.role_api.read_role_config(current.name) + current = read_cm_role(api_client=self.api_client, role_type=self.type) except ApiException as ex: if ex.status != 404: raise ex @@ -359,7 +387,7 @@ def process(self): # If deleting, do so and exit if self.state == "absent": if current: - self.deprovision_role(current) + self.deprovision_role(role_api, current) # Otherwise, manage the configuration and state elif self.state in ["present", "restarted", "started", "stopped"]: @@ -372,14 +400,19 @@ def process(self): host_id=self.cluster_host_id, config=self.config, ) - current = self.provision_role(new_role) - # If it exists, but the host has changed, destroy and rebuild completely + current = self.provision_role(role_api, new_role) + self.handle_maintenance(role_api, current) + # Else if it exists, but the host has changed, destroy and rebuild completely elif ( - self.cluster_hostname is not None - and self.cluster_hostname != current.host_ref.hostname - ) or ( - self.cluster_host_id is not None - and self.cluster_host_id != current.host_ref.host_id + current + and ( + self.cluster_hostname is not None + and self.cluster_hostname != current.host_ref.hostname + ) + or ( + self.cluster_host_id is not None + and self.cluster_host_id != current.host_ref.host_id + ) ): if self.config: new_config = self.config @@ -393,11 +426,17 @@ def process(self): host_id=self.cluster_host_id, config=new_config, ) - current = self.reprovision_role(current, new_role) + current = self.reprovision_role(role_api, current, new_role) + self.handle_maintenance(role_api, current) # Else it exists, so address any changes else: + self.handle_maintenance(role_api, current) + # Handle role override configurations if self.config or self.purge: + if self.config is None: + self.config = dict() + updates = ConfigListUpdates(current.config, self.config, self.purge) if updates.changed: @@ -408,93 +447,35 @@ def process(self): self.diff["after"].update(config=updates.diff["after"]) if not self.module.check_mode: - self.role_api.update_role_config( + role_api.update_role_config( current.name, message=self.message, body=updates.config, ) - # Handle maintenance mode - # TODO Move first - if ( - self.maintenance is not None - and self.maintenance != current.maintenance_mode - ): - self.changed = True - - if self.module._diff: - self.diff["before"].update( - maintenance_mode=current.maintenance_mode - ) - self.diff["after"].update(maintenance_mode=self.maintenance) - - if not self.module.check_mode: - if self.maintenance: - maintenance_cmd = self.role_api.enter_maintenance_mode( - current.name - ) - else: - maintenance_cmd = self.role_api.exit_maintenance_mode( - current.name - ) - - if maintenance_cmd.success is False: - self.module.fail_json( - msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}" - ) - # Handle the various states if self.state == "started" and current.role_state not in [ ApiRoleState.STARTED ]: - self.changed = True - - if self.module._diff: - self.diff["before"].update(role_state=current.role_state) - self.diff["after"].update(role_state="STARTED") - - if not self.module.check_mode: - self.handle_commands( - MgmtRoleCommandsResourceApi(self.api_client).start_command( - body=ApiRoleNameList(items=[current.name]), - ) - ) - + self.exec_role_command( + current, ApiRoleState.STARTED, role_cmd_api.start_command + ) elif self.state == "stopped" and current.role_state not in [ ApiRoleState.STOPPED, ApiRoleState.NA, ]: - self.changed = True - - if self.module._diff: - self.diff["before"].update(role_state=current.role_state) - self.diff["after"].update(role_state="STOPPED") - - if not self.module.check_mode: - self.handle_commands( - MgmtRoleCommandsResourceApi(self.api_client).stop_command( - body=ApiRoleNameList(items=[current.name]), - ) - ) - + self.exec_role_command( + current, ApiRoleState.STOPPED, role_cmd_api.stop_command + ) elif self.state == "restarted": - self.changed = True - - if self.module._diff: - self.diff["before"].update(role_state=current.role_state) - self.diff["after"].update(role_state="STARTED") - - if not self.module.check_mode: - self.handle_commands( - MgmtRoleCommandsResourceApi(self.api_client).restart_command( - body=ApiRoleNameList(items=[current.name]), - ) - ) + self.exec_role_command( + current, ApiRoleState.STARTED, role_cmd_api.restart_command + ) # If there are changes, get a fresh read if self.changed: - refresh = self.role_api.read_role(current.name) - refresh.config = self.role_api.read_role_config(current.name) + refresh = role_api.read_role(current.name) + refresh.config = role_api.read_role_config(current.name) self.output = parse_role_result(refresh) # Otherwise return the existing else: @@ -502,7 +483,37 @@ def process(self): else: self.module.fail_json(msg=f"Invalid state: {self.state}") - def provision_role(self, role: ApiRole) -> ApiRole: + def exec_role_command( + self, role: ApiRole, value: str, cmd: Callable[[ApiRoleNameList], ApiCommand] + ): + self.changed = True + if self.module._diff: + self.diff["before"].update(role_state=role.role_state) + self.diff["after"].update(role_state=value) + + if not self.module.check_mode: + self.handle_commands(cmd(body=ApiRoleNameList(items=[role.name]))) + + def handle_maintenance(self, role_api: MgmtRolesResourceApi, role: ApiRole) -> None: + if self.maintenance is not None and self.maintenance != role.maintenance_mode: + self.changed = True + + if self.module._diff: + self.diff["before"].update(maintenance_mode=role.maintenance_mode) + self.diff["after"].update(maintenance_mode=self.maintenance) + + if not self.module.check_mode: + if self.maintenance: + maintenance_cmd = role_api.enter_maintenance_mode(role.name) + else: + maintenance_cmd = role_api.exit_maintenance_mode(role.name) + + if maintenance_cmd.success is False: + self.module.fail_json( + msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}" + ) + + def provision_role(self, role_api: MgmtRolesResourceApi, role: ApiRole) -> ApiRole: self.changed = True if self.module._diff: @@ -515,7 +526,7 @@ def provision_role(self, role: ApiRole) -> ApiRole: created_role = next( ( iter( - self.role_api.create_roles( + role_api.create_roles( body=ApiRoleList(items=[role]), ).items ) @@ -528,7 +539,9 @@ def provision_role(self, role: ApiRole) -> ApiRole: ) return created_role - def reprovision_role(self, existing_role: ApiRole, new_role: ApiRole) -> ApiRole: + def reprovision_role( + self, role_api: MgmtRolesResourceApi, existing_role: ApiRole, new_role: ApiRole + ) -> ApiRole: self.changed = True if self.module._diff: @@ -538,12 +551,12 @@ def reprovision_role(self, existing_role: ApiRole, new_role: ApiRole) -> ApiRole ) if not self.module.check_mode: - self.role_api.delete_role(existing_role.name) + role_api.delete_role(existing_role.name) rebuilt_role = next( ( iter( - self.role_api.create_roles( + role_api.create_roles( body=ApiRoleList(items=[new_role]), ).items ) @@ -559,14 +572,14 @@ def reprovision_role(self, existing_role: ApiRole, new_role: ApiRole) -> ApiRole else: return existing_role - def deprovision_role(self, role: ApiRole) -> None: + def deprovision_role(self, role_api: MgmtRolesResourceApi, role: ApiRole) -> None: self.changed = True if self.module._diff: self.diff = dict(before=parse_role_result(role), after=dict()) if not self.module.check_mode: - self.role_api.delete_role(role.name) + role_api.delete_role(role.name) def handle_commands(self, commands: ApiBulkCommandList): if commands.errors: diff --git a/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py index b1a9c98a..1501ce85 100644 --- a/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py +++ b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py @@ -21,16 +21,11 @@ import logging import pytest -from collections.abc import Generator from pathlib import Path from cm_client import ( - ApiConfig, - ApiConfigList, - ApiRole, - ApiRoleList, ApiRoleState, - ClustersResourceApi, + HostsResourceApi, MgmtRolesResourceApi, ) @@ -38,158 +33,142 @@ from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, - set_cm_role, -) -from ansible_collections.cloudera.cluster.plugins.module_utils.host_utils import ( - get_host_ref, -) -from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( - get_mgmt_roles, ) LOG = logging.getLogger(__name__) -@pytest.fixture(scope="function") -def target_cm_role(cm_api_client, cms, base_cluster, request) -> Generator[ApiRole]: - marker = request.node.get_closest_marker("role") +def test_missing_required(conn, module_args): + module_args(conn) - if marker is None: - role = ApiRole( - type="HOSTMONITOR", - ) - else: - role = marker.args[0] - role.type = "HOSTMONITOR" + with pytest.raises(AnsibleFailJson, match="type"): + cm_service_role.main() - yield from set_cm_role(cm_api_client, base_cluster, role) +def test_mutually_exclusive(conn, module_args): + module_args({**conn, "cluster_hostname": "hostname", "cluster_host_id": "host_id"}) -@pytest.fixture(scope="function") -def target_cm_role_cleared( - cm_api_client, base_cluster, host_monitor_cleared, request -) -> Generator[ApiRole]: - marker = request.node.get_closest_marker("role") + with pytest.raises( + AnsibleFailJson, + match="parameters are mutually exclusive: cluster_hostname|cluster_host_id", + ): + cm_service_role.main() - if marker is None: - role = ApiRole( - type="HOSTMONITOR", - ) - else: - role = marker.args[0] - role.type = "HOSTMONITOR" - role_api = MgmtRolesResourceApi(cm_api_client) +def test_existing_relocate(conn, module_args, cm_api_client, host_monitor, request): + host_api = HostsResourceApi(cm_api_client) + host = next( + ( + h + for h in host_api.read_hosts().items + if not h.cluster_ref and h.host_id != host_monitor.host_ref.host_id + ), + None, + ) + if host is None: + raise Exception("No available hosts to relocate Cloudera Manager Service role") - if not role.host_ref: - cluster_api = ClustersResourceApi(cm_api_client) + module_args( + { + **conn, + "type": host_monitor.type, + "cluster_host_id": host.host_id, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) - # Get first host of the cluster - hosts = cluster_api.list_hosts(cluster_name=base_cluster.name) + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() - if not hosts.items: - raise Exception( - "No available hosts to assign the Cloudera Manager Service role." - ) + assert e.value.changed == True + assert e.value.role["host_id"] == host.host_id - role.host_ref = get_host_ref(cm_api_client, host_id=hosts.items[0].host_id) + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() - # Create and yield the role under test - current_role = next( - iter(role_api.create_roles(body=ApiRoleList(items=[role])).items), None - ) - current_role.config = role_api.read_role_config(role_name=current_role.name) + assert e.value.changed == False + assert e.value.role["host_id"] == host.host_id - yield current_role - # Clear out any remaining roles - remaining_roles = get_mgmt_roles(cm_api_client, "HOSTMONITOR") +def test_new(conn, module_args, cm_api_client, cms, request): + host_api = HostsResourceApi(cm_api_client) + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) - for r in remaining_roles.items: - role_api.delete_role(role_name=r.name) + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service role") + module_args( + { + **conn, + "type": "HOSTMONITOR", + "cluster_host_id": host.host_id, + "config": dict(mgmt_num_descriptor_fetch_tries=55), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) -def test_missing_required(conn, module_args): - module_args(conn) + expected = dict(mgmt_num_descriptor_fetch_tries="55") - with pytest.raises(AnsibleFailJson, match="type"): + with pytest.raises(AnsibleExitJson) as e: cm_service_role.main() + assert e.value.changed == True + assert expected.items() <= e.value.role["config"].items() -def test_mutually_exclusive(conn, module_args): - module_args({**conn, "cluster_hostname": "hostname", "cluster_host_id": "host_id"}) - - with pytest.raises( - AnsibleFailJson, - match="parameters are mutually exclusive: cluster_hostname|cluster_host_id", - ): + # Idempotency + with pytest.raises(AnsibleExitJson) as e: cm_service_role.main() + assert e.value.changed == False + assert expected.items() <= e.value.role["config"].items() -@pytest.mark.role(ApiRole()) -def test_relocate_host( - conn, module_args, cm_api_client, base_cluster, target_cm_role_cleared, request -): - cluster_api = ClustersResourceApi(cm_api_client) - - # Get second host of the cluster - hosts = cluster_api.list_hosts(cluster_name=base_cluster.name) - if not hosts.items: - raise Exception( - "No available hosts to assign the Cloudera Manager Service role." - ) - filtered_hosts = [ - h for h in hosts.items if h.host_id != target_cm_role_cleared.host_ref.host_id - ] +def test_new_maintenance_mode_enabled(conn, module_args, cm_api_client, cms, request): + host_api = HostsResourceApi(cm_api_client) + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) - if len(filtered_hosts) < 1: - raise Exception( - "Not enough hosts to reassign the Cloudera Manager Service role." - ) + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service role") module_args( { **conn, - "type": target_cm_role_cleared.type, - "cluster_hostname": filtered_hosts[0].hostname, + "type": "HOSTMONITOR", + "cluster_host_id": host.host_id, + "maintenance": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, } ) - expected = filtered_hosts[0].host_id - with pytest.raises(AnsibleExitJson) as e: cm_service_role.main() assert e.value.changed == True - assert expected == e.value.role["host_id"] + assert e.value.role["maintenance_mode"] == True # Idempotency with pytest.raises(AnsibleExitJson) as e: cm_service_role.main() assert e.value.changed == False - assert expected == e.value.role["host_id"] + assert e.value.role["maintenance_mode"] == True -@pytest.mark.role( - ApiRole( - config=ApiConfigList( - items=[ - ApiConfig("mgmt_num_descriptor_fetch_tries", 11), - ApiConfig("process_start_secs", 21), - ] - ) - ) +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) ) -def test_set_config(conn, module_args, target_cm_role, request): +def test_existing_set(conn, module_args, host_monitor_config, request): module_args( { **conn, - "type": target_cm_role.type, + "type": host_monitor_config.type, "config": dict(mgmt_num_descriptor_fetch_tries=55), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, @@ -213,21 +192,14 @@ def test_set_config(conn, module_args, target_cm_role, request): assert expected.items() <= e.value.role["config"].items() -@pytest.mark.role( - ApiRole( - config=ApiConfigList( - items=[ - ApiConfig("mgmt_num_descriptor_fetch_tries", 12), - ApiConfig("process_start_secs", 22), - ] - ) - ) +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=12, process_start_secs=22) ) -def test_unset_config(conn, module_args, target_cm_role, request): +def test_existing_unset(conn, module_args, host_monitor_config, request): module_args( { **conn, - "type": target_cm_role.type, + "type": host_monitor_config.type, "config": dict(mgmt_num_descriptor_fetch_tries=None), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", } @@ -249,21 +221,14 @@ def test_unset_config(conn, module_args, target_cm_role, request): assert expected.items() <= e.value.role["config"].items() -@pytest.mark.role( - ApiRole( - config=ApiConfigList( - items=[ - ApiConfig("mgmt_num_descriptor_fetch_tries", 13), - ApiConfig("process_start_secs", 23), - ] - ) - ) +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=13, process_start_secs=23) ) -def test_set_config_purge(conn, module_args, target_cm_role, request): +def test_existing_purge(conn, module_args, host_monitor_config, request): module_args( { **conn, - "type": target_cm_role.type, + "type": host_monitor_config.type, "config": dict(mgmt_num_descriptor_fetch_tries=33), "purge": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", @@ -288,22 +253,14 @@ def test_set_config_purge(conn, module_args, target_cm_role, request): assert expected.items() <= e.value.role["config"].items() -@pytest.mark.role( - ApiRole( - config=ApiConfigList( - items=[ - ApiConfig("mgmt_num_descriptor_fetch_tries", 14), - ApiConfig("process_start_secs", 24), - ] - ) - ) +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=14, process_start_secs=24) ) -def test_set_config_purge_all(conn, module_args, target_cm_role, request): +def test_existing_purge_all(conn, module_args, host_monitor_config, request): module_args( { **conn, - "type": target_cm_role.type, - "config": dict(), + "type": host_monitor_config.type, "purge": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, @@ -325,12 +282,13 @@ def test_set_config_purge_all(conn, module_args, target_cm_role, request): assert len(e.value.role["config"]) == 0 -@pytest.mark.role(ApiRole(maintenance_mode=False)) -def test_maintenance_mode_enabled(conn, module_args, target_cm_role, request): +def test_existing_maintenance_mode_enabled( + conn, module_args, cm_api_client, host_monitor, request +): module_args( { **conn, - "type": target_cm_role.type, + "type": host_monitor.type, "maintenance": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, @@ -338,6 +296,9 @@ def test_maintenance_mode_enabled(conn, module_args, target_cm_role, request): } ) + role_api = MgmtRolesResourceApi(cm_api_client) + role_api.exit_maintenance_mode(host_monitor.name) + with pytest.raises(AnsibleExitJson) as e: cm_service_role.main() @@ -352,12 +313,13 @@ def test_maintenance_mode_enabled(conn, module_args, target_cm_role, request): assert e.value.role["maintenance_mode"] == True -@pytest.mark.role(ApiRole(maintenance_mode=True)) -def test_maintenance_mode_disabled(conn, module_args, target_cm_role, request): +def test_existing_maintenance_mode_disabled( + conn, module_args, cm_api_client, host_monitor, request +): module_args( { **conn, - "type": target_cm_role.type, + "type": host_monitor.type, "maintenance": False, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, @@ -365,6 +327,10 @@ def test_maintenance_mode_disabled(conn, module_args, target_cm_role, request): } ) + # TODO Turn this into a fixture - host_monitor_maintenance + role_api = MgmtRolesResourceApi(cm_api_client) + role_api.enter_maintenance_mode(host_monitor.name) + with pytest.raises(AnsibleExitJson) as e: cm_service_role.main() @@ -379,12 +345,40 @@ def test_maintenance_mode_disabled(conn, module_args, target_cm_role, request): assert e.value.role["maintenance_mode"] == False -@pytest.mark.role(ApiRole(role_state=ApiRoleState.STOPPED)) -def test_state_started(conn, module_args, target_cm_role, request): +def test_existing_state_present(conn, module_args, host_monitor, request): module_args( { **conn, - "type": target_cm_role.type, + "type": host_monitor.type, + "state": "present", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert e.value.role + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert e.value.role + + +@pytest.mark.role_state(ApiRoleState.STOPPED) +def test_existing_state_started( + conn, module_args, cms_auto, host_monitor_state, request +): + module_args( + { + **conn, + "type": host_monitor_state.type, "state": "started", "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, @@ -396,22 +390,24 @@ def test_state_started(conn, module_args, target_cm_role, request): cm_service_role.main() assert e.value.changed == True - assert e.value.role["role_state"] == "STARTED" + assert e.value.role["role_state"] == ApiRoleState.STARTED # Idempotency with pytest.raises(AnsibleExitJson) as e: cm_service_role.main() assert e.value.changed == False - assert e.value.role["role_state"] == "STARTED" + assert e.value.role["role_state"] == ApiRoleState.STARTED -@pytest.mark.role(ApiRole(role_state=ApiRoleState.STARTED)) -def test_state_started(conn, module_args, target_cm_role, request): +@pytest.mark.role_state(ApiRoleState.STARTED) +def test_existing_state_stopped( + conn, module_args, cms_auto, host_monitor_state, request +): module_args( { **conn, - "type": target_cm_role.type, + "type": host_monitor_state.type, "state": "stopped", "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, @@ -423,22 +419,24 @@ def test_state_started(conn, module_args, target_cm_role, request): cm_service_role.main() assert e.value.changed == True - assert e.value.role["role_state"] == "STOPPED" + assert e.value.role["role_state"] == ApiRoleState.STOPPED # Idempotency with pytest.raises(AnsibleExitJson) as e: cm_service_role.main() assert e.value.changed == False - assert e.value.role["role_state"] == "STOPPED" + assert e.value.role["role_state"] == ApiRoleState.STOPPED -@pytest.mark.role(ApiRole(role_state=ApiRoleState.STOPPED)) -def test_state_restarted(conn, module_args, target_cm_role, request): +@pytest.mark.role_state(ApiRoleState.STARTED) +def test_existing_state_restarted( + conn, module_args, cms_auto, host_monitor_state, request +): module_args( { **conn, - "type": target_cm_role.type, + "type": host_monitor_state.type, "state": "restarted", "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, @@ -450,21 +448,21 @@ def test_state_restarted(conn, module_args, target_cm_role, request): cm_service_role.main() assert e.value.changed == True - assert e.value.role["role_state"] == "STARTED" + assert e.value.role["role_state"] == ApiRoleState.STARTED - # Idempotency is not possible due to this state + # Idempotency (restart always forces a changed state) with pytest.raises(AnsibleExitJson) as e: cm_service_role.main() assert e.value.changed == True - assert e.value.role["role_state"] == "STARTED" + assert e.value.role["role_state"] == ApiRoleState.STARTED -def test_state_absent(conn, module_args, target_cm_role_cleared, request): +def test_existing_state_absent(conn, module_args, cms_auto, host_monitor, request): module_args( { **conn, - "type": target_cm_role_cleared.type, + "type": host_monitor.type, "state": "absent", "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, From 13d05e924e9db98b461f4433644633212261b7ce Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 13 Jan 2025 14:44:53 -0500 Subject: [PATCH 53/58] Update for non-existence of CM service Update test to use pytest and fixtures Signed-off-by: Webster Mudge --- plugins/modules/cm_service_info.py | 6 ++- .../cm_service_info/test_cm_service_info.py | 51 ++++++++----------- 2 files changed, 26 insertions(+), 31 deletions(-) diff --git a/plugins/modules/cm_service_info.py b/plugins/modules/cm_service_info.py index 8142360f..6d67e19e 100644 --- a/plugins/modules/cm_service_info.py +++ b/plugins/modules/cm_service_info.py @@ -362,13 +362,15 @@ def __init__(self, module): @ClouderaManagerModule.handle_process def process(self): + result = None try: - current = read_cm_service(self.api_client) + result = read_cm_service(self.api_client) except ApiException as ex: if ex.status != 404: raise ex - self.output = parse_service_result(current) + if result is not None: + self.output = parse_service_result(result) def main(): diff --git a/tests/unit/plugins/modules/cm_service_info/test_cm_service_info.py b/tests/unit/plugins/modules/cm_service_info/test_cm_service_info.py index 0561dba6..eb679787 100644 --- a/tests/unit/plugins/modules/cm_service_info/test_cm_service_info.py +++ b/tests/unit/plugins/modules/cm_service_info/test_cm_service_info.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,39 +18,32 @@ __metaclass__ = type -import os +import logging import pytest -import unittest from ansible_collections.cloudera.cluster.plugins.modules import cm_service_info -from ansible_collections.cloudera.cluster.tests.unit.plugins.modules.utils import ( +from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, - AnsibleFailJson, - ModuleTestCase, - setup_module_args, ) +LOG = logging.getLogger(__name__) -@unittest.skipUnless( - os.getenv("CM_USERNAME"), "Cloudera Manager access parameters not set" -) -class TestCMServiceInfo(ModuleTestCase): - def test_service_info(self): - setup_module_args( - { - "username": os.getenv("CM_USERNAME"), - "password": os.getenv("CM_PASSWORD"), - "host": os.getenv("CM_HOST"), - "port": "7180", - "verify_tls": "no", - "debug": "yes", - } - ) - - with pytest.raises(AnsibleExitJson) as e: - cm_service_info.main() - - -if __name__ == "__main__": - unittest.main() + +def test_read_service(conn, module_args, cms_auto): + module_args({**conn}) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_info.main() + + assert e.value.changed == False + assert cms_auto.name == e.value.service["name"] + + +def test_read_service_nonexistent(conn, module_args): + module_args({**conn}) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_info.main() + + assert not e.value.service From 17e25ed3ae3090826b9f02c1cfa8a27ffe596eda Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 13 Jan 2025 14:45:16 -0500 Subject: [PATCH 54/58] Remove view parameter Signed-off-by: Webster Mudge --- plugins/modules/cm_service.py | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/modules/cm_service.py b/plugins/modules/cm_service.py index 05251640..1ae5da6f 100644 --- a/plugins/modules/cm_service.py +++ b/plugins/modules/cm_service.py @@ -595,7 +595,6 @@ def __init__(self, module): self.roles = self.get_param("roles") self.state = self.get_param("state") self.purge = self.get_param("purge") - # self.view = self.get_param("view") # Initialize the return value self.changed = False From 77c8e35547b548f84078b46577e8456d0fcbf5f7 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 13 Jan 2025 14:45:58 -0500 Subject: [PATCH 55/58] Add documentation for CM service role return values Signed-off-by: Webster Mudge --- tests/return-values-cm_service_role.yml | 157 ++++++++++++++++++++++++ 1 file changed, 157 insertions(+) create mode 100644 tests/return-values-cm_service_role.yml diff --git a/tests/return-values-cm_service_role.yml b/tests/return-values-cm_service_role.yml new file mode 100644 index 00000000..f91235a7 --- /dev/null +++ b/tests/return-values-cm_service_role.yml @@ -0,0 +1,157 @@ +# Copyright 2025 Cloudera, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +role: + description: Details about the Cloudera Manager service role. + type: dict + returned: always + contains: + commission_state: + description: Commission state of the Cloudera Manager service role. + type: str + returned: always + sample: + - COMMISSIONED + - DECOMMISSIONING + - DECOMMISSIONED + - UNKNOWN + - OFFLINING + - OFFLINED + config: + description: Role override configuration for the Cloudera Manager service. + type: dict + returned: optional + config_staleness_status: + description: Status of configuration staleness for the Cloudera Manager service role. + type: str + returned: always + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + ha_status: + description: High-availability status for the Cloudera Manager service. + type: str + returned: optional + sample: + - ACTIVE + - STANDBY + - UNKNOWN + health_checks: + description: List of all available health checks for Cloudera Manager service role. + type: list + elements: dict + returned: optional + contains: + explanation: + description: The explanation of this health check. + type: str + returned: optional + name: + description: Unique name of this health check. + type: str + returned: always + summary: + description: The high-level health status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + suppressed: + description: + - Whether this health check is suppressed. + - A suppressed health check is not considered when computing the role's overall health. + type: bool + returned: optional + health_summary: + description: The high-level health status of the Cloudera Manager service role. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + host_id: + description: The unique ID of the cluster host. + type: str + returned: always + maintenance_mode: + description: Whether the Cloudera Manager service role is in maintenance mode. + type: bool + returned: always + maintenance_owners: + description: List of objects that trigger the Cloudera Manager service role to be in maintenance mode. + type: list + elements: str + returned: optional + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + name: + description: + - The Cloudera Manager service role name. + - Note, this is an auto-generated name and cannot be changed. + type: str + returned: always + role_config_group_name: + description: The name of the Cloudera Manager Service role config group, which uniquely identifies it in a Cloudera Manager installation. + type: str + returned: always + role_state: + description: State of the Cloudera Manager service role. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + service_name: + description: The name of the Cloudera Manager service, which uniquely identifies it in a deployment. + type: str + returned: always + tags: + description: Set of tags for the Cloudera Manager service role. + type: dict + returned: optional + type: + description: The Cloudera Manager service role type. + type: str + returned: always + sample: + - HOSTMONITOR + - ALERTPUBLISHER + - SERVICEMONITOR + - REPORTSMANAGER + - EVENTSERVER + zoo_keeper_server_mode: + description: + - The Zookeeper server mode for this Cloudera Manager service role. + - Note that for non-Zookeeper Server roles, this will be V(null). + type: str + returned: optional From 6634e82134aaef2f6b7aa532935acfd854b37c1a Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 13 Jan 2025 14:46:22 -0500 Subject: [PATCH 56/58] Add 'role_state' to pytest markers Signed-off-by: Webster Mudge --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index bae03de2..f06438c1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -61,6 +61,7 @@ markers = [ "role_config_group_config: Prepare role config group configurations for tests", "role_config_group: Prepare a role config group for tests.", "role: Prepare a role for tests.", + "role_state: Prepare a role state for tests.", ] [build-system] From 67493b35eed7c737d293fbc3b787a7ed1b30b2c4 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 13 Jan 2025 14:46:56 -0500 Subject: [PATCH 57/58] Update pytest fixture usage Signed-off-by: Webster Mudge --- .../test_cm_service_role_config_group_config.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py b/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py index 14218d3c..b2898baf 100644 --- a/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py +++ b/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py @@ -60,11 +60,11 @@ def test_missing_required_if(conn, module_args): cm_service_role_config_group_config.main() -def test_present_invalid_parameter(conn, module_args, host_monitor_role): +def test_present_invalid_parameter(conn, module_args, host_monitor): module_args( { **conn, - "name": host_monitor_role.role_config_group_ref.role_config_group_name, + "name": host_monitor.role_config_group_ref.role_config_group_name, "parameters": dict(example="Example"), } ) @@ -87,11 +87,11 @@ def test_present_invalid_parameter(conn, module_args, host_monitor_role): ) ) ) -def test_set_parameters(conn, module_args, host_monitor_config, request): +def test_set_parameters(conn, module_args, host_monitor_role_group_config, request): module_args( { **conn, - "name": host_monitor_config.name, + "name": host_monitor_role_group_config.name, "parameters": dict(mgmt_num_descriptor_fetch_tries=32), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, From d3f5d57abb1d3d57d831f3c8e383731d1496acf6 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 13 Jan 2025 14:47:19 -0500 Subject: [PATCH 58/58] Add test for cm_service_role_config_info Signed-off-by: Webster Mudge --- .../test_cm_service_role_config_group_info.py | 83 +++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 tests/unit/plugins/modules/cm_service_role_config_group_info/test_cm_service_role_config_group_info.py diff --git a/tests/unit/plugins/modules/cm_service_role_config_group_info/test_cm_service_role_config_group_info.py b/tests/unit/plugins/modules/cm_service_role_config_group_info/test_cm_service_role_config_group_info.py new file mode 100644 index 00000000..c5d1605b --- /dev/null +++ b/tests/unit/plugins/modules/cm_service_role_config_group_info/test_cm_service_role_config_group_info.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + + +from ansible_collections.cloudera.cluster.plugins.modules import ( + cm_service_role_config_group_info, +) +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, +) + +LOG = logging.getLogger(__name__) + + +def test_read_role_config_groups(conn, module_args, cms_auto): + module_args({**conn}) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_info.main() + + assert e.value.changed == False + assert ( + len(e.value.role_config_groups) == 9 + ) # Gets all the base RCGs for all potential CM service roles + + +def test_read_role_config_group(conn, module_args, cms_auto): + module_args( + { + **conn, + "type": "HOSTMONITOR", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_info.main() + + assert e.value.changed == False + assert len(e.value.role_config_groups) == 1 + + +def test_read_role_config_group_nonexistent(conn, module_args, cms_auto): + module_args( + { + **conn, + "type": "DOESNOTEXIST", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_info.main() + + assert len(e.value.role_config_groups) == 0 + + +def test_read_service_nonexistent(conn, module_args): + module_args({**conn}) + + with pytest.raises( + AnsibleFailJson, match="Cloudera Management service does not exist" + ) as e: + cm_service_role_config_group_info.main()