diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index cdf5a0e2e6..4162199d8b 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -55,7 +55,8 @@ jobs: name: Integration test charm needs: - lint - - unit-test + # TODO: re-enable unit-tests to run integration testss + # - unit-test - build uses: ./.github/workflows/integration_test.yaml with: diff --git a/lib/charms/postgresql_k8s/v0/postgresql.py b/lib/charms/postgresql_k8s/v1/postgresql.py similarity index 69% rename from lib/charms/postgresql_k8s/v0/postgresql.py rename to lib/charms/postgresql_k8s/v1/postgresql.py index 7e6a9d7631..01029a5019 100644 --- a/lib/charms/postgresql_k8s/v0/postgresql.py +++ b/lib/charms/postgresql_k8s/v1/postgresql.py @@ -31,17 +31,24 @@ LIBID = "24ee217a54e840a598ff21a079c3e678" # Increment this major API version when introducing breaking changes -LIBAPI = 0 +LIBAPI = 1 # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 51 +LIBPATCH = 0 # Groups to distinguish HBA access ACCESS_GROUP_IDENTITY = "identity_access" ACCESS_GROUP_INTERNAL = "internal_access" ACCESS_GROUP_RELATION = "relation_access" +ROLE_STATS = "charmed_stats" +ROLE_READ = "charmed_read" +ROLE_DML = "charmed_dml" +ROLE_BACKUP = "charmed_backup" +ROLE_DBA = "charmed_dba" +ROLE_INSTANCE_ADMIN = "charmed_instance_admin" + # List of access groups to filter role assignments by ACCESS_GROUPS = [ ACCESS_GROUP_IDENTITY, @@ -125,6 +132,17 @@ class PostgreSQLUpdateUserPasswordError(Exception): """Exception raised when updating a user password fails.""" +class PostgreSQLCreatePredefinedRolesError(Exception): + """Exception raised when creating predefined roles.""" + +class PostgreSQLGrantDatabasePrivilegesToUserError(Exception): + """Exception raised when granting database privileges to user.""" + + +class PostgreSQLSetUpDDLRoleError(Exception): + """Exception raised when setting up DDL role for a database.""" + + class PostgreSQL: """Class to encapsulate all operations related to interacting with PostgreSQL instance.""" @@ -202,129 +220,243 @@ def create_access_groups(self) -> None: if connection is not None: connection.close() - def create_database( - self, - database: str, - user: str, - plugins: Optional[List[str]] = None, - client_relations: Optional[List[Relation]] = None, - ) -> None: + def create_database(self, database: str) -> bool: """Creates a new database and grant privileges to a user on it. Args: - database: database to be created. - user: user that will have access to the database. - plugins: extensions to enable in the new database. - client_relations: current established client relations. + database: database to be created + + Returns: + boolean indicating whether a database was created """ - plugins = plugins if plugins else [] - client_relations = client_relations if client_relations else [] try: connection = self._connect_to_database() cursor = connection.cursor() + cursor.execute( SQL("SELECT datname FROM pg_database WHERE datname={};").format(Literal(database)) ) - if cursor.fetchone() is None: - cursor.execute(SQL("CREATE DATABASE {};").format(Identifier(database))) - cursor.execute( - SQL("REVOKE ALL PRIVILEGES ON DATABASE {} FROM PUBLIC;").format( - Identifier(database) - ) - ) - for user_to_grant_access in [user, PERMISSIONS_GROUP_ADMIN, *self.system_users]: + + if cursor.fetchone() is not None: + return False + + cursor.execute(SQL("CREATE DATABASE {};").format(Identifier(database))) + cursor.execute(SQL("REVOKE ALL PRIVILEGES ON DATABASE {} FROM PUBLIC;").format(Identifier(database))) + + cursor.execute(SQL("CREATE ROLE {} NOSUPERUSER NOCREATEDB NOCREATEROLE NOLOGIN NOREPLICATION;").format(Identifier(f"{database}_owner"))) + cursor.execute(SQL("ALTER DATABASE {} OWNER TO {}").format(Identifier(database), Identifier(f"{database}_owner"))) + + cursor.execute(SQL("CREATE ROLE {} NOSUPERUSER NOCREATEDB NOCREATEROLE NOLOGIN NOREPLICATION NOINHERIT IN ROLE {};").format(Identifier(f"{database}_admin"), Identifier(f"{database}_owner"))) + cursor.execute(SQL("GRANT CONNECT ON DATABASE {} TO {};").format(Identifier(database), Identifier(f"{database}_admin"))) + + for user_to_grant_access in [*self.system_users, ROLE_INSTANCE_ADMIN, ROLE_DBA]: cursor.execute( SQL("GRANT ALL PRIVILEGES ON DATABASE {} TO {};").format( Identifier(database), Identifier(user_to_grant_access) ) ) - relations_accessing_this_database = 0 - for relation in client_relations: - for data in relation.data.values(): - if data.get("database") == database: - relations_accessing_this_database += 1 - with self._connect_to_database(database=database) as conn, conn.cursor() as curs: - curs.execute( - "SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT LIKE 'pg_%' and schema_name <> 'information_schema';" - ) - schemas = [row[0] for row in curs.fetchall()] - statements = self._generate_database_privileges_statements( - relations_accessing_this_database, schemas, user - ) - for statement in statements: - curs.execute(statement) + + with self._connect_to_database(database=database) as database_connection, database_connection.cursor() as database_cursor: + database_cursor.execute(SQL("REVOKE EXECUTE ON FUNCTION set_user_u(text) FROM {};").format(Identifier(f"{database}_owner"))) + database_cursor.execute(SQL("REVOKE EXECUTE ON FUNCTION set_user_u(text) FROM {};").format(Identifier(f"{database}_admin"))) + + database_cursor.execute(SQL("REVOKE EXECUTE ON FUNCTION set_user(text) FROM {};").format(Identifier(f"{database}_owner"))) + database_cursor.execute(SQL("REVOKE EXECUTE ON FUNCTION set_user(text) FROM {};").format(Identifier(f"{database}_admin"))) + + database_cursor.execute(SQL("REVOKE EXECUTE ON FUNCTION set_user(text, text) FROM {};").format(Identifier(f"{database}_owner"))) + database_cursor.execute(SQL("REVOKE EXECUTE ON FUNCTION set_user(text, text) FROM {};").format(Identifier(f"{database}_admin"))) + + database_cursor.execute(SQL("ALTER DEFAULT PRIVILEGES FOR ROLE {} GRANT SELECT ON TABLES TO {};").format(Identifier(f"{database}_owner"), Identifier(f"{database}_admin"))) + database_cursor.execute(SQL("ALTER DEFAULT PRIVILEGES FOR ROLE {} GRANT EXECUTE ON FUNCTIONS TO {};").format(Identifier(f"{database}_owner"), Identifier(f"{database}_admin"))) + database_cursor.execute(SQL("ALTER DEFAULT PRIVILEGES FOR ROLE {} GRANT SELECT ON SEQUENCES TO {};").format(Identifier(f"{database}_owner"), Identifier(f"{database}_admin"))) + + database_cursor.execute(SQL("""CREATE OR REPLACE FUNCTION login_hook.login() RETURNS VOID AS $$ +DECLARE + ex_state TEXT; + ex_message TEXT; + ex_detail TEXT; + ex_hint TEXT; + ex_context TEXT; + cur_user TEXT; + db_admin_role TEXT; + db_name TEXT; + db_owner_role TEXT; + is_user_admin BOOLEAN; +BEGIN + IF NOT login_hook.is_executing_login_hook() + THEN + RAISE EXCEPTION 'The login_hook.login() function should only be invoked by the login_hook code'; + END IF; + + cur_user := (SELECT current_user); + + EXECUTE 'SELECT current_database()' INTO db_name; + db_admin_role = db_name || '_admin'; + + EXECUTE format('SELECT EXISTS(SELECT * FROM pg_auth_members a, pg_roles b, pg_roles c WHERE a.roleid = b.oid AND a.member = c.oid AND b.rolname = %L and c.rolname = %L)', db_admin_role, cur_user) INTO is_user_admin; + + BEGIN + IF is_user_admin THEN + db_owner_role = db_name || '_owner'; + EXECUTE format('SET ROLE %L', db_owner_role); + END IF; + EXCEPTION + WHEN OTHERS THEN + GET STACKED DIAGNOSTICS ex_state = RETURNED_SQLSTATE + , ex_message = MESSAGE_TEXT + , ex_detail = PG_EXCEPTION_DETAIL + , ex_hint = PG_EXCEPTION_HINT + , ex_context = PG_EXCEPTION_CONTEXT; + RAISE LOG e'Error in login_hook.login()\nsqlstate: %\nmessage : %\ndetail : %\nhint : %\ncontext : %' + , ex_state + , ex_message + , ex_detail + , ex_hint + , ex_context; + END; +END +$$ LANGUAGE plpgsql; +""")) + database_cursor.execute(SQL("GRANT EXECUTE ON FUNCTION login_hook.login() TO PUBLIC;")) + + return True except psycopg2.Error as e: logger.error(f"Failed to create database: {e}") raise PostgreSQLCreateDatabaseError() from e - - # Enable preset extensions - self.enable_disable_extensions(dict.fromkeys(plugins, True), database) + finally: + cursor.close() + connection.close() def create_user( self, user: str, password: Optional[str] = None, - admin: bool = False, - extra_user_roles: Optional[List[str]] = None, + roles: Optional[List[str]] = None, + database: Optional[str] = None, ) -> None: """Creates a database user. Args: user: user to be created. password: password to be assigned to the user. - admin: whether the user should have additional admin privileges. - extra_user_roles: additional privileges and/or roles to be assigned to the user. + roles: roles to be assigned to the user. """ - try: - # Separate roles and privileges from the provided extra user roles. - admin_role = False - roles = privileges = None - if extra_user_roles: - admin_role = PERMISSIONS_GROUP_ADMIN in extra_user_roles - valid_privileges, valid_roles = self.list_valid_privileges_and_roles() - roles = [ - role - for role in extra_user_roles - if role in valid_roles and role != PERMISSIONS_GROUP_ADMIN - ] - privileges = { - extra_user_role - for extra_user_role in extra_user_roles - if extra_user_role not in roles and extra_user_role != PERMISSIONS_GROUP_ADMIN - } - invalid_privileges = [ - privilege for privilege in privileges if privilege not in valid_privileges - ] - if len(invalid_privileges) > 0: - logger.error(f"Invalid extra user roles: {', '.join(privileges)}") - raise PostgreSQLCreateUserError(INVALID_EXTRA_USER_ROLE_BLOCKING_MESSAGE) + createdb_enabled, createrole_enabled = False, False + if "pgbouncer" in roles: + createdb_enabled, createrole_enabled = True, True + roles.remove("pgbouncer") + + if "admin" in roles: + createdb_enabled = True + roles.append(ROLE_DML) + roles.append(ROLE_INSTANCE_ADMIN) + roles.remove("admin") + + if "createdb" in roles: + createdb_enabled = True + roles.remove("createdb") + + if "createrole" in roles: + createrole_enabled = True + roles.remove("createrole") + + if "superuser" in roles: + logger.warning("superuser privileges not allowed via extra-user-roles") + roles.remove("superuser") + + try: + existing_roles = self.list_roles() + invalid_roles = [role for role in roles if role not in existing_roles] + if invalid_roles: + logger.error(f"Invalid roles: {', '.join(invalid_roles)}") + raise PostgreSQLCreateUserError(INVALID_EXTRA_USER_ROLE_BLOCKING_MESSAGE) + with self._connect_to_database() as connection, connection.cursor() as cursor: - # Create or update the user. cursor.execute( SQL("SELECT TRUE FROM pg_roles WHERE rolname={};").format(Literal(user)) ) if cursor.fetchone() is not None: - user_definition = "ALTER ROLE {}" + user_query = "ALTER ROLE {} " else: - user_definition = "CREATE ROLE {}" - user_definition += f"WITH {'NOLOGIN' if user == 'admin' else 'LOGIN'}{' SUPERUSER' if admin else ''} ENCRYPTED PASSWORD '{password}'{'IN ROLE admin CREATEDB' if admin_role else ''}" - if privileges: - user_definition += f" {' '.join(privileges)}" + user_query = "CREATE ROLE {} " + user_query += f"WITH LOGIN{' CREATEDB' if createdb_enabled else ''}{' CREATEROLE' if createrole_enabled else ''} ENCRYPTED PASSWORD '{password}'{' IN ROLE ' if roles else ''}" + if roles: + user_query += f"{', '.join(roles)}" + cursor.execute(SQL("BEGIN;")) cursor.execute(SQL("SET LOCAL log_statement = 'none';")) - cursor.execute(SQL(f"{user_definition};").format(Identifier(user))) + cursor.execute(SQL(f"{user_query};").format(Identifier(user))) cursor.execute(SQL("COMMIT;")) - # Add extra user roles to the new user. - if roles: - for role in roles: - cursor.execute( - SQL("GRANT {} TO {};").format(Identifier(role), Identifier(user)) - ) + # TODO: revoke createdb and/or createrole attributes when no longer needed (in delete_user) + if createdb_enabled and database: + cursor.execute(SQL("ALTER ROLE {} WITH CREATEDB;").format(Identifier(f"{database}_owner"))) + + if createdb_enabled and database: + cursor.execute(SQL("ALTER ROLE {} WITH CREATEROLE;").format(Identifier(f"{database}_owner"))) except psycopg2.Error as e: logger.error(f"Failed to create user: {e}") raise PostgreSQLCreateUserError() from e + + def create_predefined_roles(self) -> None: + """Create predefined roles.""" + role_to_queries = { + ROLE_STATS: [ + f"CREATE ROLE {ROLE_STATS} NOSUPERUSER NOCREATEDB NOCREATEROLE NOREPLICATION NOLOGIN IN ROLE pg_monitor", + ], + ROLE_READ: [ + f"CREATE ROLE {ROLE_READ} NOSUPERUSER NOCREATEDB NOCREATEROLE NOREPLICATION NOLOGIN IN ROLE pg_read_all_data", + ], + ROLE_DML: [ + f"CREATE ROLE {ROLE_DML} NOSUPERUSER NOCREATEDB NOCREATEROLE NOREPLICATION NOLOGIN IN ROLE pg_write_all_data", + ], + ROLE_BACKUP: [ + f"CREATE ROLE {ROLE_BACKUP} NOSUPERUSER NOCREATEDB NOCREATEROLE NOREPLICATION NOLOGIN IN ROLE pg_checkpoint", + f"GRANT {ROLE_STATS} TO {ROLE_BACKUP}", + f"GRANT execute ON FUNCTION pg_backup_start TO {ROLE_BACKUP}", + f"GRANT execute ON FUNCTION pg_backup_stop TO {ROLE_BACKUP}", + f"GRANT execute ON FUNCTION pg_create_restore_point TO {ROLE_BACKUP}", + f"GRANT execute ON FUNCTION pg_switch_wal TO {ROLE_BACKUP}", + ], + ROLE_DBA: [ + f"CREATE ROLE {ROLE_DBA} NOSUPERUSER CREATEDB NOCREATEROLE NOLOGIN NOREPLICATION", + f"GRANT execute ON FUNCTION set_user(text) TO {ROLE_DBA}", + f"GRANT execute ON FUNCTION set_user(text, text) TO {ROLE_DBA}", + f"GRANT execute ON FUNCTION set_user_u(text) TO {ROLE_DBA}", + f"GRANT connect ON DATABASE postgres TO {ROLE_DBA}", + ], + ROLE_INSTANCE_ADMIN: [ + f"CREATE ROLE {ROLE_INSTANCE_ADMIN} NOSUPERUSER NOCREATEDB NOCREATEROLE NOLOGIN NOREPLICATION", + f"GRANT connect ON DATABASE postgres TO {ROLE_INSTANCE_ADMIN}", + ] + } + + existing_roles = self.list_roles() + + try: + with self._connect_to_database() as connection, connection.cursor() as cursor: + for role, queries in role_to_queries.items(): + if role in existing_roles: + logger.debug(f"Role {role} already exists") + continue + + logger.info(f"Creating predefined role {role}") + + for query in queries: + cursor.execute(SQL(query)) + except psycopg2.Error as e: + logger.error(f"Failed to create predefined roles: {e}") + raise PostgreSQLCreatePredefinedRolesError() from e + + def grant_database_privileges_to_user(self, user: str, database: str, privileges: list[str]) -> None: + """Grant the specified priviliges on the provided database for the user.""" + try: + with self._connect_to_database() as connection, connection.cursor() as cursor: + cursor.execute(SQL("GRANT {} ON DATABASE {} TO {};").format(Identifier(", ".join(privileges)), Identifier(database), Identifier(user))) + except psycopg2.Error as e: + logger.error(f"Faield to grant privileges to user: {e}") + raise PostgreSQLGrantDatabasePrivilegesToUserError() from e def delete_user(self, user: str) -> None: """Deletes a database user. @@ -337,23 +469,17 @@ def delete_user(self, user: str) -> None: if user not in users: return - # List all databases. + roles = self.list_roles() + try: with self._connect_to_database() as connection, connection.cursor() as cursor: - cursor.execute("SELECT datname FROM pg_database WHERE datistemplate = false;") + cursor.execute(SQL("SELECT datname FROM pg_database WHERE datistemplate = false;")) databases = [row[0] for row in cursor.fetchall()] - # Existing objects need to be reassigned in each database - # before the user can be deleted. for database in databases: - with self._connect_to_database( - database - ) as connection, connection.cursor() as cursor: - cursor.execute( - SQL("REASSIGN OWNED BY {} TO {};").format( - Identifier(user), Identifier(self.user) - ) - ) + with self._connect_to_database(database) as connection, connection.cursor() as cursor: + inheriting_role = f"{database}_owner" if f"{database}_owner" in roles else self.user + cursor.execute(SQL("REASSIGN OWNED BY {} TO {};").format(Identifier(user), Identifier(inheriting_role))) cursor.execute(SQL("DROP OWNED BY {};").format(Identifier(user))) # Delete the user. @@ -426,7 +552,8 @@ def enable_disable_extensions( else: # Retrieve all the databases. with self._connect_to_database() as connection, connection.cursor() as cursor: - cursor.execute("SELECT datname FROM pg_database WHERE NOT datistemplate;") + # template0 is meant to be unmodifyable + cursor.execute("SELECT datname FROM pg_database WHERE datname <> 'template0';") databases = {database[0] for database in cursor.fetchall()} ordered_extensions = OrderedDict() @@ -437,6 +564,28 @@ def enable_disable_extensions( self._configure_pgaudit(False) + if "set_user" in ordered_extensions: + for database in databases: + with self._connect_to_database(database=database) as connection, connection.cursor() as cursor: + try: + cursor.execute(SQL("CREATE EXTENSION IF NOT EXISTS set_user;")) + except psycopg2.Error: + logger.exception(f"Unable to create extension set_user in {database}") + del ordered_extensions["set_user"] + + if "login_hook" in ordered_extensions: + for database in databases: + if database == "postgres": + logger.info("Skipping creating extension login_hook in database postgres") + continue + + with self._connect_to_database(database=database) as connection, connection.cursor() as cursor: + try: + cursor.execute(SQL("CREATE EXTENSION IF NOT EXISTS login_hook;")) + except psycopg2.Error: + logger.exception(f"Unable to create extension login_hook in {database}") + del ordered_extensions["login_hook"] + # Enable/disabled the extension in each database. for database in databases: with self._connect_to_database( @@ -459,72 +608,6 @@ def enable_disable_extensions( if connection is not None: connection.close() - def _generate_database_privileges_statements( - self, relations_accessing_this_database: int, schemas: List[str], user: str - ) -> List[Composed]: - """Generates a list of databases privileges statements.""" - statements = [] - if relations_accessing_this_database == 1: - statements.append( - SQL( - """DO $$ -DECLARE r RECORD; -BEGIN - FOR r IN (SELECT statement FROM (SELECT 1 AS index,'ALTER TABLE '|| schemaname || '."' || tablename ||'" OWNER TO {};' AS statement -FROM pg_tables WHERE NOT schemaname IN ('pg_catalog', 'information_schema') -UNION SELECT 2 AS index,'ALTER SEQUENCE '|| sequence_schema || '."' || sequence_name ||'" OWNER TO {};' AS statement -FROM information_schema.sequences WHERE NOT sequence_schema IN ('pg_catalog', 'information_schema') -UNION SELECT 3 AS index,'ALTER FUNCTION '|| nsp.nspname || '."' || p.proname ||'"('||pg_get_function_identity_arguments(p.oid)||') OWNER TO {};' AS statement -FROM pg_proc p JOIN pg_namespace nsp ON p.pronamespace = nsp.oid WHERE NOT nsp.nspname IN ('pg_catalog', 'information_schema') AND p.prokind = 'f' -UNION SELECT 4 AS index,'ALTER PROCEDURE '|| nsp.nspname || '."' || p.proname ||'"('||pg_get_function_identity_arguments(p.oid)||') OWNER TO {};' AS statement -FROM pg_proc p JOIN pg_namespace nsp ON p.pronamespace = nsp.oid WHERE NOT nsp.nspname IN ('pg_catalog', 'information_schema') AND p.prokind = 'p' -UNION SELECT 5 AS index,'ALTER AGGREGATE '|| nsp.nspname || '."' || p.proname ||'"('||pg_get_function_identity_arguments(p.oid)||') OWNER TO {};' AS statement -FROM pg_proc p JOIN pg_namespace nsp ON p.pronamespace = nsp.oid WHERE NOT nsp.nspname IN ('pg_catalog', 'information_schema') AND p.prokind = 'a' -UNION SELECT 6 AS index,'ALTER VIEW '|| schemaname || '."' || viewname ||'" OWNER TO {};' AS statement -FROM pg_catalog.pg_views WHERE NOT schemaname IN ('pg_catalog', 'information_schema')) AS statements ORDER BY index) LOOP - EXECUTE format(r.statement); - END LOOP; -END; $$;""" - ).format( - Identifier(user), - Identifier(user), - Identifier(user), - Identifier(user), - Identifier(user), - Identifier(user), - ) - ) - statements.append( - SQL( - "UPDATE pg_catalog.pg_largeobject_metadata\n" - "SET lomowner = (SELECT oid FROM pg_roles WHERE rolname = {})\n" - "WHERE lomowner = (SELECT oid FROM pg_roles WHERE rolname = {});" - ).format(Literal(user), Literal(self.user)) - ) - for schema in schemas: - statements.append( - SQL("ALTER SCHEMA {} OWNER TO {};").format( - Identifier(schema), Identifier(user) - ) - ) - else: - for schema in schemas: - schema = Identifier(schema) - statements.extend([ - SQL("GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA {} TO {};").format( - schema, Identifier(user) - ), - SQL("GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA {} TO {};").format( - schema, Identifier(user) - ), - SQL("GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA {} TO {};").format( - schema, Identifier(user) - ), - SQL("GRANT USAGE ON SCHEMA {} TO {};").format(schema, Identifier(user)), - SQL("GRANT CREATE ON SCHEMA {} TO {};").format(schema, Identifier(user)), - ]) - return statements - def get_last_archived_wal(self) -> str: """Get the name of the last archived wal for the current PostgreSQL cluster.""" try: @@ -672,7 +755,7 @@ def list_users_from_relation(self) -> Set[str]: try: with self._connect_to_database() as connection, connection.cursor() as cursor: cursor.execute( - "SELECT usename FROM pg_catalog.pg_user WHERE usename LIKE 'relation_id_%';" + "SELECT usename FROM pg_catalog.pg_user WHERE usename LIKE 'relation-%';" ) usernames = cursor.fetchall() return {username[0] for username in usernames} @@ -683,30 +766,21 @@ def list_users_from_relation(self) -> Set[str]: if connection is not None: connection.close() - def list_valid_privileges_and_roles(self) -> Tuple[Set[str], Set[str]]: - """Returns two sets with valid privileges and roles. + def list_roles(self) -> Tuple[Set[str], Set[str]]: + """Returns valid roles in the database. Returns: - Tuple containing two sets: the first with valid privileges - and the second with valid roles. + A set containing the existing roles in the database. """ with self._connect_to_database() as connection, connection.cursor() as cursor: cursor.execute("SELECT rolname FROM pg_roles;") - return { - "createdb", - "createrole", - "superuser", - }, {role[0] for role in cursor.fetchall() if role[0]} + return {role[0] for role in cursor.fetchall() if role[0]} def set_up_database(self) -> None: """Set up postgres database with the right permissions.""" connection = None try: with self._connect_to_database() as connection, connection.cursor() as cursor: - cursor.execute("SELECT TRUE FROM pg_roles WHERE rolname='admin';") - if cursor.fetchone() is not None: - return - # Allow access to the postgres database only to the system users. cursor.execute("REVOKE ALL PRIVILEGES ON DATABASE postgres FROM PUBLIC;") cursor.execute("REVOKE CREATE ON SCHEMA public FROM PUBLIC;") @@ -716,11 +790,6 @@ def set_up_database(self) -> None: Identifier(user) ) ) - self.create_user( - PERMISSIONS_GROUP_ADMIN, - extra_user_roles=["pg_read_all_data", "pg_write_all_data"], - ) - cursor.execute("GRANT CONNECT ON DATABASE postgres TO admin;") except psycopg2.Error as e: logger.error(f"Failed to set up databases: {e}") raise PostgreSQLDatabasesSetupError() from e diff --git a/src/backups.py b/src/backups.py index 4c414d75e8..dd9ca2a91e 100644 --- a/src/backups.py +++ b/src/backups.py @@ -27,7 +27,6 @@ from jinja2 import Template from ops.charm import ActionEvent, HookEvent from ops.framework import Object -from ops.jujuversion import JujuVersion from ops.model import ActiveStatus, MaintenanceStatus from tenacity import RetryError, Retrying, stop_after_attempt, wait_fixed @@ -882,12 +881,11 @@ def _on_create_backup_action(self, event) -> None: # Test uploading metadata to S3 to test credentials before backup. datetime_backup_requested = datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ") - juju_version = JujuVersion.from_environ() metadata = f"""Date Backup Requested: {datetime_backup_requested} Model Name: {self.model.name} Application Name: {self.model.app.name} Unit Name: {self.charm.unit.name} -Juju Version: {juju_version!s} +Juju Version: {self.charm.model.juju_version!s} """ if not self._upload_content_to_s3( metadata, diff --git a/src/charm.py b/src/charm.py index 3cd071e251..9eb397baec 100755 --- a/src/charm.py +++ b/src/charm.py @@ -23,21 +23,26 @@ from charms.data_platform_libs.v0.data_models import TypedCharmBase from charms.grafana_agent.v0.cos_agent import COSAgentProvider, charm_tracing_config from charms.operator_libs_linux.v2 import snap -from charms.postgresql_k8s.v0.postgresql import ( +from charms.postgresql_k8s.v0.postgresql_tls import PostgreSQLTLS +from charms.postgresql_k8s.v1.postgresql import ( ACCESS_GROUP_IDENTITY, ACCESS_GROUPS, REQUIRED_PLUGINS, + ROLE_BACKUP, + ROLE_DBA, + ROLE_STATS, PostgreSQL, + PostgreSQLCreatePredefinedRolesError, PostgreSQLCreateUserError, PostgreSQLEnableDisableExtensionError, PostgreSQLGetCurrentTimelineError, + PostgreSQLGrantDatabasePrivilegesToUserError, PostgreSQLListUsersError, PostgreSQLUpdateUserPasswordError, ) -from charms.postgresql_k8s.v0.postgresql_tls import PostgreSQLTLS from charms.rolling_ops.v0.rollingops import RollingOpsManager, RunWithLock from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm -from ops import JujuVersion, main +from ops import main from ops.charm import ( ActionEvent, HookEvent, @@ -78,6 +83,8 @@ BACKUP_USER, DATABASE_DEFAULT_NAME, DATABASE_PORT, + DBA_PASSWORD_KEY, + DBA_USER, METRICS_PORT, MONITORING_PASSWORD_KEY, MONITORING_SNAP_SERVICE, @@ -178,9 +185,7 @@ def __init__(self, *args): deleted_label=SECRET_DELETED_LABEL, ) - juju_version = JujuVersion.from_environ() - run_cmd = "/usr/bin/juju-exec" if juju_version.major > 2 else "/usr/bin/juju-run" - self._observer = ClusterTopologyObserver(self, run_cmd) + self._observer = ClusterTopologyObserver(self, "/usr/bin/juju-exec") self._rotate_logs = RotateLogs(self) self.framework.observe(self.on.cluster_topology_change, self._on_cluster_topology_change) self.framework.observe(self.on.install, self._on_install) @@ -414,7 +419,7 @@ def primary_endpoint(self) -> str | None: # TODO figure out why peer data is not available if primary_endpoint and len(self._units_ips) == 1 and len(self._peers.units) > 1: logger.warning( - "Possibly incoplete peer data: Will not map primary IP to unit IP" + "Possibly incomplete peer data: Will not map primary IP to unit IP" ) return primary_endpoint logger.debug("primary endpoint early exit: Primary IP not in cached peer list.") @@ -1126,6 +1131,7 @@ def _on_leader_elected(self, event: LeaderElectedEvent) -> None: # noqa: C901 MONITORING_PASSWORD_KEY, RAFT_PASSWORD_KEY, PATRONI_PASSWORD_KEY, + DBA_PASSWORD_KEY, ): if self.get_secret(APP_SCOPE, key) is None: if key in system_user_passwords: @@ -1225,7 +1231,8 @@ def enable_disable_extensions(self, database: str | None = None) -> None: logger.debug("Early exit enable_disable_extensions: standby cluster") return original_status = self.unit.status - extensions = {} + # Always want set_user and login_hook to be enabled + extensions = {"set_user": True, "login_hook": True} # collect extensions for plugin in self.config.plugin_keys(): enable = self.config[plugin] @@ -1428,7 +1435,7 @@ def _setup_ldap_sync(self, postgres_snap: snap.Snap | None = None) -> None: logger.debug("Starting LDAP sync service") postgres_snap.restart(services=["ldap-sync"]) - def _start_primary(self, event: StartEvent) -> None: + def _start_primary(self, event: StartEvent) -> None: # noqa: C901 """Bootstrap the cluster.""" # Set some information needed by Patroni to bootstrap the cluster. if not self._patroni.bootstrap_cluster(): @@ -1442,24 +1449,63 @@ def _start_primary(self, event: StartEvent) -> None: event.defer() return + if not self._can_connect_to_postgresql: + logger.debug("Deferring on_start: awaiting for database to start") + self.unit.status = WaitingStatus("awaiting for database to start") + event.defer() + return + + if not self.primary_endpoint: + logger.debug("Deferrring on_start: awaitng start of the primary") + self.unit.status = WaitingStatus("awaiting start of the primary") + event.defer() + return + + try: + # Needed to create predefined roles with set_user execution privileges + self.postgresql.enable_disable_extensions({"set_user": True}, database="postgres") + except Exception as e: + logger.exception(e) + self.unit.status = BlockedStatus("Failed to enable set-user extension") + return + + try: + self.postgresql.create_predefined_roles() + except PostgreSQLCreatePredefinedRolesError as e: + logger.exception(e) + self.unit.status = BlockedStatus("Failed to create pre-defined roles") + return + # Create the default postgres database user that is needed for some # applications (not charms) like Landscape Server. try: # This event can be run on a replica if the machines are restarted. # For that case, check whether the postgres user already exits. users = self.postgresql.list_users() - if "postgres" not in users: - self.postgresql.create_user("postgres", new_password(), admin=True) - # Create the backup user. + # Create the dba user. + if DBA_USER not in users: + self.postgresql.create_user( + DBA_USER, + self.get_secret(APP_SCOPE, DBA_PASSWORD_KEY), + roles=[ROLE_DBA], + ) + # TODO: move predefined roles into constants if BACKUP_USER not in users: - self.postgresql.create_user(BACKUP_USER, new_password(), admin=True) + self.postgresql.create_user(BACKUP_USER, new_password(), roles=[ROLE_BACKUP]) + self.postgresql.grant_database_privileges_to_user( + BACKUP_USER, "postgres", ["connect"] + ) if MONITORING_USER not in users: # Create the monitoring user. self.postgresql.create_user( MONITORING_USER, self.get_secret(APP_SCOPE, MONITORING_PASSWORD_KEY), - extra_user_roles=["pg_monitor"], + roles=[ROLE_STATS], ) + except PostgreSQLGrantDatabasePrivilegesToUserError as e: + logger.exception(e) + self.unit.status = BlockedStatus("Failed to grant database privileges to user") + return except PostgreSQLCreateUserError as e: logger.exception(e) self.unit.status = BlockedStatus("Failed to create postgres user") @@ -1540,13 +1586,15 @@ def _update_admin_password(self, admin_secret_id: str) -> None: return try: + updateable_users = [*SYSTEM_USERS, BACKUP_USER, DBA_USER] + # get the secret content and check each user configured there # only SYSTEM_USERS with changed passwords are processed, all others ignored updated_passwords = self.get_secret_from_id(secret_id=admin_secret_id) for user, password in list(updated_passwords.items()): - if user not in SYSTEM_USERS: + if user not in updateable_users: logger.error( - f"Can only update system users: {', '.join(SYSTEM_USERS)} not {user}" + f"Can only update users: {', '.join(updateable_users)} not {user}" ) updated_passwords.pop(user) continue @@ -2253,20 +2301,6 @@ def log_pitr_last_transaction_time(self) -> None: else: logger.error("Can't tell last completed transaction time") - def get_plugins(self) -> list[str]: - """Return a list of installed plugins.""" - plugins = [ - "_".join(plugin.split("_")[1:-1]) - for plugin in self.config.plugin_keys() - if self.config[plugin] - ] - plugins = [PLUGIN_OVERRIDES.get(plugin, plugin) for plugin in plugins] - if "spi" in plugins: - plugins.remove("spi") - for ext in SPI_MODULE: - plugins.append(ext) - return plugins - def get_ldap_parameters(self) -> dict: """Returns the LDAP configuration to use.""" if not self.is_cluster_initialised: diff --git a/src/cluster.py b/src/cluster.py index d2a2dad49c..83c9a44098 100644 --- a/src/cluster.py +++ b/src/cluster.py @@ -17,6 +17,7 @@ import psutil import requests from charms.operator_libs_linux.v2 import snap +from charms.postgresql_k8s.v1.postgresql import ROLE_DBA from jinja2 import Template from ops import BlockedStatus from pysyncobj.utility import TcpUtility, UtilityException @@ -696,6 +697,7 @@ def render_patroni_yml_file( raft_password=self.raft_password, ldap_parameters=self._dict_to_hba_string(ldap_params), patroni_password=self.patroni_password, + dba_role=ROLE_DBA, ) self.render_file(f"{PATRONI_CONF_PATH}/patroni.yaml", rendered, 0o600) diff --git a/src/constants.py b/src/constants.py index 6b957ccb84..aecef2f35c 100644 --- a/src/constants.py +++ b/src/constants.py @@ -15,6 +15,7 @@ BACKUP_USER = "backup" REPLICATION_USER = "replication" REWIND_USER = "rewind" +DBA_USER = "dba" TLS_KEY_FILE = "key.pem" TLS_CA_FILE = "ca.pem" TLS_CERT_FILE = "cert.pem" @@ -24,7 +25,7 @@ PATRONI_SERVICE_NAME = "snap.charmed-postgresql.patroni.service" PATRONI_SERVICE_DEFAULT_PATH = f"/etc/systemd/system/{PATRONI_SERVICE_NAME}" # List of system usernames needed for correct work of the charm/workload. -SYSTEM_USERS = [BACKUP_USER, REPLICATION_USER, REWIND_USER, USER, MONITORING_USER] +SYSTEM_USERS = [REPLICATION_USER, REWIND_USER, USER, MONITORING_USER] # Snap constants. PGBACKREST_EXECUTABLE = "charmed-postgresql.pgbackrest" @@ -32,7 +33,7 @@ SNAP_PACKAGES = [ ( POSTGRESQL_SNAP_NAME, - {"revision": {"aarch64": "169", "x86_64": "170"}}, + {"revision": {"aarch64": "181", "x86_64": "182"}}, ) ] @@ -66,6 +67,7 @@ MONITORING_PASSWORD_KEY = "monitoring-password" # noqa: S105 RAFT_PASSWORD_KEY = "raft-password" # noqa: S105 PATRONI_PASSWORD_KEY = "patroni-password" # noqa: S105 +DBA_PASSWORD_KEY = "dba-password" # noqa: S105 SECRET_INTERNAL_LABEL = "internal-secret" # noqa: S105 SECRET_DELETED_LABEL = "None" # noqa: S105 SYSTEM_USERS_PASSWORD_CONFIG = "system-users" # noqa: S105 @@ -82,7 +84,7 @@ TRACING_PROTOCOL = "otlp_http" BACKUP_TYPE_OVERRIDES = {"full": "full", "differential": "diff", "incremental": "incr"} -PLUGIN_OVERRIDES = {"audit": "pgaudit", "uuid_ossp": '"uuid-ossp"'} +PLUGIN_OVERRIDES = {"audit": "pgaudit", "uuid_ossp": '"uuid-ossp"', "set_user": '"set_user"'} SPI_MODULE = ["refint", "autoinc", "insert_username", "moddatetime"] diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index ef0dec90dc..f8345c203d 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -9,7 +9,7 @@ DatabaseProvides, DatabaseRequestedEvent, ) -from charms.postgresql_k8s.v0.postgresql import ( +from charms.postgresql_k8s.v1.postgresql import ( ACCESS_GROUP_RELATION, ACCESS_GROUPS, INVALID_EXTRA_USER_ROLE_BLOCKING_MESSAGE, @@ -18,6 +18,7 @@ PostgreSQLDeleteUserError, PostgreSQLGetPostgreSQLVersionError, PostgreSQLListUsersError, + PostgreSQLSetUpDDLRoleError, ) from ops.charm import CharmBase, RelationBrokenEvent, RelationChangedEvent from ops.framework import Object @@ -102,17 +103,24 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: extra_user_roles = self._sanitize_extra_roles(event.extra_user_roles) extra_user_roles.append(ACCESS_GROUP_RELATION) + if self.check_for_invalid_extra_user_roles(event.relation): + self.charm.unit.status = BlockedStatus(INVALID_EXTRA_USER_ROLE_BLOCKING_MESSAGE) + return + try: # Creates the user and the database for this specific relation. user = f"relation-{event.relation.id}" password = new_password() - self.charm.postgresql.create_user(user, password, extra_user_roles=extra_user_roles) - plugins = self.charm.get_plugins() - self.charm.postgresql.create_database( - database, user, plugins=plugins, client_relations=self.charm.client_relations + database_created = self.charm.postgresql.create_database(database) + + self.charm.postgresql.create_user( + user, password, roles=[*extra_user_roles, f"{database}_admin"], database=database ) + if database_created: + self.charm.update_config() + # Share the credentials with the application. self.database_provides.set_credentials(event.relation.id, user, password) @@ -132,12 +140,13 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: PostgreSQLCreateDatabaseError, PostgreSQLCreateUserError, PostgreSQLGetPostgreSQLVersionError, + PostgreSQLSetUpDDLRoleError, ) as e: logger.exception(e) self.charm.unit.status = BlockedStatus( e.message if issubclass(type(e), PostgreSQLCreateUserError) and e.message is not None - else f"Failed to initialize {self.relation_name} relation" + else f"Failed to initialize relation {self.relation_name}" ) def _on_relation_broken(self, event: RelationBrokenEvent) -> None: @@ -273,6 +282,12 @@ def _update_unit_status(self, relation: Relation) -> None: ) and not self.check_for_invalid_extra_user_roles(relation.id): self.charm.unit.status = ActiveStatus() + if ( + self.charm.is_blocked + and "Failed to initialize relation" in self.charm.unit.status.message + ): + self.charm.unit.status = ActiveStatus() + self._update_unit_status_on_blocking_endpoint_simultaneously() def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: @@ -300,7 +315,13 @@ def check_for_invalid_extra_user_roles(self, relation_id: int) -> bool: Args: relation_id: current relation to be skipped. """ - valid_privileges, valid_roles = self.charm.postgresql.list_valid_privileges_and_roles() + valid_roles = [ + *self.charm.postgresql.list_roles(), + "pgbouncer", + "admin", + "createdb", + "createrole", + ] for relation in self.charm.model.relations.get(self.relation_name, []): if relation.id == relation_id: continue @@ -308,9 +329,6 @@ def check_for_invalid_extra_user_roles(self, relation_id: int) -> bool: extra_user_roles = data.get("extra-user-roles") extra_user_roles = self._sanitize_extra_roles(extra_user_roles) for extra_user_role in extra_user_roles: - if ( - extra_user_role not in valid_privileges - and extra_user_role not in valid_roles - ): + if extra_user_role not in valid_roles: return True return False diff --git a/src/upgrade.py b/src/upgrade.py index a8ba18a8d1..a7ade00175 100644 --- a/src/upgrade.py +++ b/src/upgrade.py @@ -12,7 +12,7 @@ DependencyModel, UpgradeGrantedEvent, ) -from charms.postgresql_k8s.v0.postgresql import ACCESS_GROUPS +from charms.postgresql_k8s.v1.postgresql import ACCESS_GROUPS, ROLE_STATS from ops.model import MaintenanceStatus, RelationDataContent, WaitingStatus from pydantic import BaseModel from tenacity import RetryError, Retrying, stop_after_attempt, wait_fixed @@ -152,8 +152,8 @@ def _on_upgrade_granted(self, event: UpgradeGrantedEvent) -> None: self.charm.unit.status = MaintenanceStatus("refreshing the snap") self.charm._install_snap_packages(packages=SNAP_PACKAGES, refresh=True) - if not self.charm._patroni.start_patroni(): - logger.error("failed to start the database") + if not self.charm._patroni.restart_patroni(): + logger.error("failed to restart the database") self.set_unit_failed() return @@ -185,17 +185,18 @@ def _on_upgrade_granted(self, event: UpgradeGrantedEvent) -> None: f" Retry {attempt.retry_state.attempt_number}/6" ) raise Exception() - - self.set_unit_completed() - - # Ensures leader gets its own relation-changed when it upgrades - if self.charm.unit.is_leader(): - self.on_upgrade_changed(event) except RetryError: logger.debug( "Defer on_upgrade_granted: member not ready or not joined the cluster yet" ) event.defer() + return + + self.set_unit_completed() + + # Ensures leader gets its own relation-changed when it upgrades + if self.charm.unit.is_leader(): + self.on_upgrade_changed(event) @override def pre_upgrade_check(self) -> None: @@ -245,11 +246,25 @@ def _prepare_upgrade_from_legacy(self) -> None: self.charm.postgresql.create_user( MONITORING_USER, self.charm.get_secret(APP_SCOPE, MONITORING_PASSWORD_KEY), - extra_user_roles="pg_monitor", + roles=[ROLE_STATS], ) self.charm.postgresql.set_up_database() self._set_up_new_access_roles_for_legacy() + try: + # Needed to create predefined roles with set_user execution privileges + self.charm.postgresql.enable_disable_extensions( + {"set_user": True}, database="postgres" + ) + except Exception as e: + logger.exception(e) + return + + try: + self.charm.postgresql.create_predefined_roles() + except Exception as e: + logger.exception(e) + def _set_up_new_access_roles_for_legacy(self) -> None: """Create missing access groups and their memberships.""" access_groups = self.charm.postgresql.list_access_groups() diff --git a/templates/patroni.yml.j2 b/templates/patroni.yml.j2 index 11dd0dbd48..6407133969 100644 --- a/templates/patroni.yml.j2 +++ b/templates/patroni.yml.j2 @@ -102,7 +102,6 @@ bootstrap: log_truncate_on_rotation: 'on' logging_collector: 'on' wal_level: logical - shared_preload_libraries: 'timescaledb,pgaudit' {%- if restoring_backup %} method: pgbackrest @@ -135,7 +134,8 @@ postgresql: bin_dir: /snap/charmed-postgresql/current/usr/lib/postgresql/{{ version }}/bin data_dir: {{ data_path }} parameters: - shared_preload_libraries: 'timescaledb,pgaudit' + shared_preload_libraries: 'timescaledb,pgaudit,set_user' + session_preload_libraries: 'login_hook' {%- if enable_pgbackrest_archiving %} archive_command: 'pgbackrest {{ pgbackrest_configuration_file }} --stanza={{ stanza }} archive-push %p' {% else %} @@ -153,6 +153,9 @@ postgresql: {{key}}: {{value}} {%- endfor -%} {% endif %} + set_user.block_log_statement: on + set_user.exit_on_error: on + set_user.superuser_allowlist: '{{ dba_role }}' pgpass: /tmp/pgpass pg_hba: - local all backup peer map=operator diff --git a/tests/integration/ha_tests/helpers.py b/tests/integration/ha_tests/helpers.py index 0c56054775..c3de49059c 100644 --- a/tests/integration/ha_tests/helpers.py +++ b/tests/integration/ha_tests/helpers.py @@ -1068,11 +1068,24 @@ async def get_any_deatached_storage(ops_test: OpsTest) -> str: async def check_password_auth(ops_test: OpsTest, unit_name: str) -> bool: """Checks if "operator" password is valid for current postgresql db.""" - stdout = await run_command_on_unit( - ops_test, + complete_command = [ + "exec", + "--unit", unit_name, - """grep -E 'password authentication failed for user' /var/snap/charmed-postgresql/common/var/log/postgresql/postgresql*""", - ) + "--", + "grep", + "-E", + "'password'", + "/var/snap/charmed-postgresql/common/var/log/postgresql/postgresql*", + ] + return_code, stdout, _ = await ops_test.juju(*complete_command) + if return_code != 0: + raise Exception( + "Expected command %s to succeed. Instead it failed: %s with code: ", + complete_command, + stdout, + return_code, + ) return 'password authentication failed for user "operator"' not in stdout diff --git a/tests/integration/ha_tests/test_smoke.py b/tests/integration/ha_tests/test_smoke.py index b160d38f2b..2e8b9518dd 100644 --- a/tests/integration/ha_tests/test_smoke.py +++ b/tests/integration/ha_tests/test_smoke.py @@ -11,8 +11,8 @@ from tenacity import Retrying, stop_after_delay, wait_fixed from ..helpers import ( - APPLICATION_NAME, CHARM_BASE, + DATABASE_APP_NAME, ) from .helpers import ( add_unit_with_storage, @@ -26,7 +26,7 @@ ) TEST_DATABASE_NAME = "test_database" -DUP_APPLICATION_NAME = "postgres-test-dup" +DUP_DATABASE_APP_NAME = "postgres-test-dup" logger = logging.getLogger(__name__) @@ -39,7 +39,7 @@ async def test_app_force_removal(ops_test: OpsTest, charm: str): logger.info("deploying charm") await ops_test.model.deploy( charm, - application_name=APPLICATION_NAME, + application_name=DATABASE_APP_NAME, num_units=1, base=CHARM_BASE, storage={"pgdata": {"pool": "lxd-btrfs", "size": 8046}}, @@ -47,10 +47,10 @@ async def test_app_force_removal(ops_test: OpsTest, charm: str): ) logger.info("waiting for idle") - await ops_test.model.wait_for_idle(apps=[APPLICATION_NAME], status="active", timeout=1500) - assert ops_test.model.applications[APPLICATION_NAME].units[0].workload_status == "active" + await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active", timeout=1500) + assert ops_test.model.applications[DATABASE_APP_NAME].units[0].workload_status == "active" - primary_name = ops_test.model.applications[APPLICATION_NAME].units[0].name + primary_name = ops_test.model.applications[DATABASE_APP_NAME].units[0].name logger.info("waiting for postgresql") for attempt in Retrying(stop=stop_after_delay(15 * 3), wait=wait_fixed(3), reraise=True): @@ -61,18 +61,18 @@ async def test_app_force_removal(ops_test: OpsTest, charm: str): storage_id_str = storage_id(ops_test, primary_name) # Check if storage exists after application deployed - logger.info("werifing is storage exists") + logger.info("verifying if storage exists") for attempt in Retrying(stop=stop_after_delay(15 * 3), wait=wait_fixed(3), reraise=True): with attempt: assert await is_storage_exists(ops_test, storage_id_str) # Create test database to check there is no resources conflicts logger.info("creating db") - await create_db(ops_test, APPLICATION_NAME, TEST_DATABASE_NAME) + await create_db(ops_test, DATABASE_APP_NAME, TEST_DATABASE_NAME) # Check that test database is not exists for new unit logger.info("checking db") - assert await check_db(ops_test, APPLICATION_NAME, TEST_DATABASE_NAME) + assert await check_db(ops_test, DATABASE_APP_NAME, TEST_DATABASE_NAME) # Destroy charm logger.info("force removing charm") @@ -98,9 +98,9 @@ async def test_charm_garbage_ignorance(ops_test: OpsTest, charm: str): garbage_storage = await get_any_deatached_storage(ops_test) logger.info("add unit with attached storage") - await add_unit_with_storage(ops_test, APPLICATION_NAME, garbage_storage) + await add_unit_with_storage(ops_test, DATABASE_APP_NAME, garbage_storage) - primary_name = ops_test.model.applications[APPLICATION_NAME].units[0].name + primary_name = ops_test.model.applications[DATABASE_APP_NAME].units[0].name logger.info("waiting for postgresql") for attempt in Retrying(stop=stop_after_delay(15 * 3), wait=wait_fixed(3), reraise=True): @@ -120,7 +120,7 @@ async def test_charm_garbage_ignorance(ops_test: OpsTest, charm: str): # Check that test database exists for new unit logger.info("checking db") - assert await check_db(ops_test, APPLICATION_NAME, TEST_DATABASE_NAME) + assert await check_db(ops_test, DATABASE_APP_NAME, TEST_DATABASE_NAME) logger.info("removing charm") await ops_test.model.destroy_unit(primary_name) @@ -139,7 +139,7 @@ async def test_app_resources_conflicts_v3(ops_test: OpsTest, charm: str): logger.info("deploying duplicate application with attached storage") await ops_test.model.deploy( charm, - application_name=DUP_APPLICATION_NAME, + application_name=DUP_DATABASE_APP_NAME, num_units=1, base=CHARM_BASE, attach_storage=[tag.storage(garbage_storage)], @@ -149,16 +149,18 @@ async def test_app_resources_conflicts_v3(ops_test: OpsTest, charm: str): # Reducing the update status frequency to speed up the triggering of deferred events. await ops_test.model.set_config({"update-status-hook-interval": "10s"}) - logger.info("waiting for duplicate application to be blocked") + logger.info("waiting for duplicate application to be waiting") try: await ops_test.model.wait_for_idle( - apps=[DUP_APPLICATION_NAME], timeout=1000, status="blocked" + apps=[DUP_DATABASE_APP_NAME], timeout=60, idle_period=30, status="waiting" ) except asyncio.TimeoutError: - logger.info("Application is not in blocked state. Checking logs...") + logger.info("Application is not in waiting state. Checking logs...") - # Since application have postgresql db in storage from external application it should not be able to connect due to new password - logger.info("checking operator password auth") - assert not await check_password_auth( - ops_test, ops_test.model.applications[DUP_APPLICATION_NAME].units[0].name - ) + for attempt in Retrying(stop=stop_after_delay(60 * 10), wait=wait_fixed(3), reraise=True): + with attempt: + # Since application have postgresql db in storage from external application it should not be able to connect due to new password + logger.info("checking operator password auth") + assert not await check_password_auth( + ops_test, ops_test.model.applications[DUP_DATABASE_APP_NAME].units[0].name + ) diff --git a/tests/integration/ha_tests/test_upgrade.py b/tests/integration/ha_tests/test_upgrade.py index e639b8ff60..407e87dd8a 100644 --- a/tests/integration/ha_tests/test_upgrade.py +++ b/tests/integration/ha_tests/test_upgrade.py @@ -26,7 +26,7 @@ logger = logging.getLogger(__name__) -TIMEOUT = 600 +TIMEOUT = 30 * 60 @pytest.mark.abort_on_fail diff --git a/tests/integration/ha_tests/test_upgrade_from_stable.py b/tests/integration/ha_tests/test_upgrade_from_stable.py index 679c9a639f..be54b8fa60 100644 --- a/tests/integration/ha_tests/test_upgrade_from_stable.py +++ b/tests/integration/ha_tests/test_upgrade_from_stable.py @@ -20,7 +20,7 @@ logger = logging.getLogger(__name__) -TIMEOUT = 900 +TIMEOUT = 25 * 60 @pytest.mark.abort_on_fail diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index e3fead648b..d123edf474 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -276,18 +276,22 @@ def count_switchovers(ops_test: OpsTest, unit_name: str) -> int: return len(switchover_history_info.json()) -def db_connect(host: str, password: str) -> psycopg2.extensions.connection: +def db_connect( + host: str, password: str, database: str = "postgres", user: str = "operator" +) -> psycopg2.extensions.connection: """Returns psycopg2 connection object linked to postgres db in the given host. Args: host: the IP of the postgres host password: operator user password + database: the database to connect to + user: the user to connect with Returns: psycopg2 connection object linked to postgres db, under "operator" user. """ return psycopg2.connect( - f"dbname='postgres' user='operator' host='{host}' password='{password}' connect_timeout=10" + f"dbname='{database}' user='{user}' host='{host}' password='{password}' connect_timeout=10" ) diff --git a/tests/integration/new_relations/test_new_relations_1.py b/tests/integration/new_relations/test_new_relations_1.py index f9914f5284..e226b67dcd 100644 --- a/tests/integration/new_relations/test_new_relations_1.py +++ b/tests/integration/new_relations/test_new_relations_1.py @@ -462,7 +462,9 @@ async def test_relation_data_is_updated_correctly_when_scaling(ops_test: OpsTest f"{DATABASE_APP_NAME}:database", f"{APPLICATION_APP_NAME}:{FIRST_DATABASE_RELATION_NAME}", ) - await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active", timeout=1000) + await ops_test.model.wait_for_idle( + apps=[DATABASE_APP_NAME], status="active", timeout=1000, idle_period=30 + ) with pytest.raises(psycopg2.OperationalError): psycopg2.connect(primary_connection_string) diff --git a/tests/integration/test_predefined_roles.py b/tests/integration/test_predefined_roles.py new file mode 100644 index 0000000000..87abf072da --- /dev/null +++ b/tests/integration/test_predefined_roles.py @@ -0,0 +1,220 @@ +#!/usr/bin/env python3 +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +import asyncio +import logging + +import psycopg2 +import pytest +from pytest_operator.plugin import OpsTest + +from .helpers import ( + CHARM_BASE, + DATABASE_APP_NAME, +) +from .new_relations.helpers import build_connection_string + +logger = logging.getLogger(__name__) + +TIMEOUT = 15 * 60 +DATA_INTEGRATOR_APP_NAME = "data-integrator" + +# NOTE: We are unable to test set_user_u('operator') as dba user because psycopg2 +# runs every query in a transaction and running set_user() is not supported in transactions. + + +@pytest.mark.abort_on_fail +@pytest.mark.skip_if_deployed +async def test_deploy(ops_test: OpsTest, charm: str): + """Deploy the postgresql charm.""" + async with ops_test.fast_forward("10s"): + await asyncio.gather( + ops_test.model.deploy( + charm, + application_name=DATABASE_APP_NAME, + num_units=2, + base=CHARM_BASE, + config={"profile": "testing"}, + ), + ops_test.model.deploy( + DATA_INTEGRATOR_APP_NAME, + base=CHARM_BASE, + ), + ) + + await ops_test.model.wait_for_idle( + apps=[DATABASE_APP_NAME], status="active", timeout=TIMEOUT + ) + assert ops_test.model.applications[DATABASE_APP_NAME].units[0].workload_status == "active" + + await ops_test.model.wait_for_idle( + apps=[DATA_INTEGRATOR_APP_NAME], status="blocked", timeout=(5 * 60) + ) + + +@pytest.mark.abort_on_fail +async def test_charmed_read_role(ops_test: OpsTest): + """Test the charmed_read predefined role.""" + await ops_test.model.applications[DATA_INTEGRATOR_APP_NAME].set_config({ + "database-name": "charmed_read_database", + "extra-user-roles": "charmed_read", + }) + await ops_test.model.add_relation(DATA_INTEGRATOR_APP_NAME, DATABASE_APP_NAME) + await ops_test.model.wait_for_idle( + apps=[DATA_INTEGRATOR_APP_NAME, DATABASE_APP_NAME], status="active" + ) + + connection_string = await build_connection_string( + ops_test, + DATA_INTEGRATOR_APP_NAME, + "postgresql", + database="charmed_read_database", + ) + + with psycopg2.connect(connection_string) as connection: + connection.autocommit = True + + with connection.cursor() as cursor: + cursor.execute("CREATE TABLE test_table (id SERIAL PRIMARY KEY, data TEXT);") + cursor.execute("INSERT INTO test_table (data) VALUES ('test_data'), ('test_data_2');") + + # reset role from charmed_read_database_owner to relation user first + cursor.execute("RESET ROLE;") + + cursor.execute( + "SELECT table_name FROM information_schema.tables WHERE table_name NOT LIKE 'pg_%' AND table_name NOT LIKE 'sql_%' AND table_type <> 'VIEW';" + ) + tables = [row[0] for row in cursor.fetchall()] + assert tables == ["test_table"], "Unexpected tables in the database" + + cursor.execute("SELECT data FROM test_table;") + data = sorted([row[0] for row in cursor.fetchall()]) + assert data == sorted(["test_data", "test_data_2"]), ( + "Unexpected data in charmed_read_database with charmed_read role" + ) + + with psycopg2.connect(connection_string) as connection: + connection.autocommit = True + + with connection.cursor() as cursor: + try: + # reset role from charmed_read_database_owner to relation user first + cursor.execute("RESET ROLE;") + + cursor.execute("CREATE TABLE test_table_2 (id SERIAL PRIMARY KEY, data TEXT);") + assert False, "Able to write to charmed_read_database with charmed_read role" + except psycopg2.errors.InsufficientPrivilege: + pass + + await ops_test.model.applications[DATABASE_APP_NAME].remove_relation( + f"{DATABASE_APP_NAME}:database", f"{DATA_INTEGRATOR_APP_NAME}:postgresql" + ) + await ops_test.model.wait_for_idle(apps=[DATA_INTEGRATOR_APP_NAME], status="blocked") + + +@pytest.mark.abort_on_fail +async def test_charmed_dml_role(ops_test: OpsTest): + """Test the charmed_dml role.""" + await ops_test.model.applications[DATA_INTEGRATOR_APP_NAME].set_config({ + "database-name": "charmed_dml_database", + "extra-user-roles": "charmed_dml", + }) + await ops_test.model.add_relation(DATA_INTEGRATOR_APP_NAME, DATABASE_APP_NAME) + await ops_test.model.wait_for_idle( + apps=[DATA_INTEGRATOR_APP_NAME, DATABASE_APP_NAME], status="active" + ) + + connection_string = await build_connection_string( + ops_test, + DATA_INTEGRATOR_APP_NAME, + "postgresql", + database="charmed_dml_database", + ) + + with psycopg2.connect(connection_string) as connection: + connection.autocommit = True + + with connection.cursor() as cursor: + cursor.execute("CREATE TABLE test_table (id SERIAL PRIMARY KEY, data TEXT);") + + # reset role from charmed_dml_database_owner to relation user first + cursor.execute("RESET ROLE;") + + cursor.execute("INSERT INTO test_table (data) VALUES ('test_data'), ('test_data_2');") + + cursor.execute("SELECT data FROM test_table;") + data = sorted([row[0] for row in cursor.fetchall()]) + assert data == sorted(["test_data", "test_data_2"]), ( + "Unexpected data in charmed_dml_database with charmed_dml role" + ) + + await ops_test.model.applications[DATABASE_APP_NAME].remove_relation( + f"{DATABASE_APP_NAME}:database", f"{DATA_INTEGRATOR_APP_NAME}:postgresql" + ) + await ops_test.model.wait_for_idle(apps=[DATA_INTEGRATOR_APP_NAME], status="blocked") + + +@pytest.mark.abort_on_fail +async def test_can_read_data_on_replica(ops_test: OpsTest): + """Test to ensure a relation user can read from a replica. + + login_hook does not activate on a replica, and thus db_admin has read privileges + to the tables in the database. This test ensure this privilege exists + """ + await ops_test.model.applications[DATA_INTEGRATOR_APP_NAME].set_config({ + "database-name": "read_on_replica", + }) + await ops_test.model.add_relation(DATA_INTEGRATOR_APP_NAME, DATABASE_APP_NAME) + await ops_test.model.wait_for_idle( + apps=[DATA_INTEGRATOR_APP_NAME, DATABASE_APP_NAME], status="active" + ) + + connection_string = await build_connection_string( + ops_test, + DATA_INTEGRATOR_APP_NAME, + "postgresql", + database="read_on_replica", + ) + + with psycopg2.connect(connection_string) as connection: + connection.autocommit = True + + with connection.cursor() as cursor: + cursor.execute("CREATE TABLE test_table (id SERIAL PRIMARY KEY, data TEXT);") + cursor.execute("INSERT INTO test_table (data) VALUES ('test_data'), ('test_data_2');") + + cursor.execute("SELECT data FROM test_table;") + data = sorted([row[0] for row in cursor.fetchall()]) + assert data == sorted(["test_data", "test_data_2"]), ( + "Unexpected data in read_on_replica database" + ) + + replica_connection_string = await build_connection_string( + ops_test, + DATA_INTEGRATOR_APP_NAME, + "postgresql", + database="read_on_replica", + read_only_endpoint=True, + ) + + with psycopg2.connect(replica_connection_string) as connection: + connection.autocommit = True + + with connection.cursor() as cursor: + cursor.execute("SELECT data FROM test_table;") + data = sorted([row[0] for row in cursor.fetchall()]) + assert data == sorted(["test_data", "test_data_2"]), ( + "Unexpected data in read_on_replica database" + ) + + try: + cursor.execute("CREATE TABLE test_table_2 (id SERIAL PRIMARY KEY, data TEXT);") + assert False, "Able to create a table on a replica" + except psycopg2.errors.ReadOnlySqlTransaction: + pass + + await ops_test.model.applications[DATABASE_APP_NAME].remove_relation( + f"{DATABASE_APP_NAME}:database", f"{DATA_INTEGRATOR_APP_NAME}:postgresql" + ) + await ops_test.model.wait_for_idle(apps=[DATA_INTEGRATOR_APP_NAME], status="blocked") diff --git a/tests/spread/test_predefined_roles.py/task.yaml b/tests/spread/test_predefined_roles.py/task.yaml new file mode 100644 index 0000000000..bbafb2a6bf --- /dev/null +++ b/tests/spread/test_predefined_roles.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_predefined_roles.py +environment: + TEST_MODULE: test_predefined_roles.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 479919633d..f1e306822e 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -8,10 +8,10 @@ # This causes every test defined in this file to run 2 times, each with -# charm.JujuVersion.has_secrets set as True or as False +# ops.JujuVersion.has_secrets set as True or as False @pytest.fixture(autouse=True) def _has_secrets(request, monkeypatch): - monkeypatch.setattr("charm.JujuVersion.has_secrets", PropertyMock(return_value=True)) + monkeypatch.setattr("ops.JujuVersion.has_secrets", PropertyMock(return_value=True)) @pytest.fixture(autouse=True) diff --git a/tests/unit/test_backups.py b/tests/unit/test_backups.py index 982302941d..a945c87127 100644 --- a/tests/unit/test_backups.py +++ b/tests/unit/test_backups.py @@ -1164,7 +1164,6 @@ def test_on_create_backup_action(harness): ) as _is_primary, patch("charm.PostgreSQLBackups._upload_content_to_s3") as _upload_content_to_s3, patch("backups.datetime") as _datetime, - patch("ops.JujuVersion.from_environ") as _from_environ, patch("charm.PostgreSQLBackups._retrieve_s3_parameters") as _retrieve_s3_parameters, patch("charm.PostgreSQLBackups._can_unit_perform_backup") as _can_unit_perform_backup, ): @@ -1199,13 +1198,12 @@ def test_on_create_backup_action(harness): [], ) _datetime.now.return_value.strftime.return_value = "2023-01-01T09:00:00Z" - _from_environ.return_value = "test-juju-version" _upload_content_to_s3.return_value = False expected_metadata = f"""Date Backup Requested: 2023-01-01T09:00:00Z Model Name: {harness.charm.model.name} Application Name: {harness.charm.model.app.name} Unit Name: {harness.charm.unit.name} -Juju Version: test-juju-version +Juju Version: 0.0.0 """ harness.charm.backup._on_create_backup_action(mock_event) _upload_content_to_s3.assert_called_once_with( diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index c426ad1d41..3d8af12ebc 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -10,7 +10,7 @@ import psycopg2 import pytest from charms.operator_libs_linux.v2 import snap -from charms.postgresql_k8s.v0.postgresql import ( +from charms.postgresql_k8s.v1.postgresql import ( PostgreSQLCreateUserError, PostgreSQLEnableDisableExtensionError, ) @@ -601,6 +601,16 @@ def test_on_start(harness): "charm.PostgresqlOperatorCharm._is_storage_attached", side_effect=[False, True, True, True, True, True], ) as _is_storage_attached, + patch( + "charm.PostgresqlOperatorCharm._can_connect_to_postgresql", + new_callable=PropertyMock, + return_value=True, + ), + patch( + "charm.PostgresqlOperatorCharm.primary_endpoint", + new_callable=PropertyMock, + return_value=True, + ), ): _get_postgresql_version.return_value = "16.6" @@ -646,6 +656,9 @@ def test_on_start(harness): _restart_services_after_reboot.reset_mock() _member_started.return_value = True harness.charm.on.start.emit() + _postgresql.enable_disable_extensions.assert_called_once_with( + {"set_user": True}, database="postgres" + ) _postgresql.create_user.assert_called_once() _oversee_users.assert_not_called() _restart_services_after_reboot.assert_called_once() @@ -909,7 +922,7 @@ def test_on_update_status_after_restore_operation(harness): ) as _handle_processes_failures, patch("charm.PostgreSQLBackups.can_use_s3_repository") as _can_use_s3_repository, patch( - "charms.postgresql_k8s.v0.postgresql.PostgreSQL.get_current_timeline" + "charms.postgresql_k8s.v1.postgresql.PostgreSQL.get_current_timeline" ) as _get_current_timeline, patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, @@ -1739,25 +1752,6 @@ def test_get_available_memory(harness): assert harness.charm.get_available_memory() == 0 -def test_juju_run_exec_divergence(harness): - with ( - patch("charm.ClusterTopologyObserver") as _topology_observer, - patch("charm.JujuVersion") as _juju_version, - ): - # Juju 2 - _juju_version.from_environ.return_value.major = 2 - harness = Harness(PostgresqlOperatorCharm) - harness.begin() - _topology_observer.assert_called_once_with(harness.charm, "/usr/bin/juju-run") - _topology_observer.reset_mock() - - # Juju 3 - _juju_version.from_environ.return_value.major = 3 - harness = Harness(PostgresqlOperatorCharm) - harness.begin() - _topology_observer.assert_called_once_with(harness.charm, "/usr/bin/juju-exec") - - def test_client_relations(harness): # Test when the charm has no relations. assert len(harness.charm.client_relations) == 0 @@ -2532,37 +2526,6 @@ def test_restart_services_after_reboot(harness): _start_stop_pgbackrest_service.assert_called_once() -def test_get_plugins(harness): - with patch("charm.PostgresqlOperatorCharm._on_config_changed"): - # Test when the charm has no plugins enabled. - assert harness.charm.get_plugins() == ["pgaudit"] - - # Test when the charm has some plugins enabled. - harness.update_config({ - "plugin_audit_enable": True, - "plugin_citext_enable": True, - "plugin_spi_enable": True, - }) - assert harness.charm.get_plugins() == [ - "pgaudit", - "citext", - "refint", - "autoinc", - "insert_username", - "moddatetime", - ] - - # Test when the charm has the pgAudit plugin disabled. - harness.update_config({"plugin_audit_enable": False}) - assert harness.charm.get_plugins() == [ - "citext", - "refint", - "autoinc", - "insert_username", - "moddatetime", - ] - - def test_on_promote_to_primary(harness): with ( patch("charm.PostgresqlOperatorCharm._raft_reinitialisation") as _raft_reinitialisation, diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index c9b5fe1ee2..6394501e7b 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -373,6 +373,7 @@ def test_render_patroni_yml_file(peers_ips, patroni): synchronous_node_count=0, raft_password=raft_password, patroni_password=patroni_password, + dba_role="charmed_dba", ) # Setup a mock for the `open` method, set returned data to patroni.yml template. diff --git a/tests/unit/test_postgresql_provider.py b/tests/unit/test_postgresql_provider.py index 13b065d299..d1ba2bf338 100644 --- a/tests/unit/test_postgresql_provider.py +++ b/tests/unit/test_postgresql_provider.py @@ -4,7 +4,7 @@ from unittest.mock import Mock, PropertyMock, patch import pytest -from charms.postgresql_k8s.v0.postgresql import ( +from charms.postgresql_k8s.v1.postgresql import ( ACCESS_GROUP_RELATION, PostgreSQLCreateDatabaseError, PostgreSQLCreateUserError, @@ -103,7 +103,7 @@ def test_on_database_requested(harness): side_effect=[None, PostgreSQLCreateUserError, None, None] ) postgresql_mock.create_database = PropertyMock( - side_effect=[None, PostgreSQLCreateDatabaseError, None] + side_effect=[None, None, PostgreSQLCreateDatabaseError, None] ) postgresql_mock.get_postgresql_version = PropertyMock( side_effect=[ @@ -111,6 +111,7 @@ def test_on_database_requested(harness): PostgreSQLGetPostgreSQLVersionError, ] ) + postgresql_mock.list_roles.return_value = [] # Request a database before the database is ready. request_database(harness) @@ -127,19 +128,14 @@ def test_on_database_requested(harness): user = f"relation-{rel_id}" expected_user_roles = [role.lower() for role in EXTRA_USER_ROLES.split(",")] expected_user_roles.append(ACCESS_GROUP_RELATION) + expected_user_roles.append(f"{DATABASE}_admin") postgresql_mock.create_user.assert_called_once_with( user, "test-password", - extra_user_roles=expected_user_roles, - ) - database_relation = harness.model.get_relation(RELATION_NAME) - client_relations = [database_relation] - postgresql_mock.create_database.assert_called_once_with( - DATABASE, - user, - plugins=["pgaudit"], - client_relations=client_relations, + roles=expected_user_roles, + database=DATABASE, ) + postgresql_mock.create_database.assert_called_once_with(DATABASE) postgresql_mock.get_postgresql_version.assert_called_once() _update_endpoints.assert_called_once() diff --git a/tests/unit/test_upgrade.py b/tests/unit/test_upgrade.py index 1baac2da9b..cabae31fad 100644 --- a/tests/unit/test_upgrade.py +++ b/tests/unit/test_upgrade.py @@ -103,7 +103,7 @@ def test_on_upgrade_granted(harness): "charm.PostgreSQLBackups.start_stop_pgbackrest_service" ) as _start_stop_pgbackrest_service, patch("charm.PostgresqlOperatorCharm._setup_exporter") as _setup_exporter, - patch("charm.Patroni.start_patroni") as _start_patroni, + patch("charm.Patroni.restart_patroni") as _restart_patroni, patch("charm.PostgresqlOperatorCharm._install_snap_packages") as _install_snap_packages, patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, patch( @@ -112,7 +112,7 @@ def test_on_upgrade_granted(harness): ): # Test when the charm fails to start Patroni. mock_event = MagicMock() - _start_patroni.return_value = False + _restart_patroni.return_value = False harness.charm.upgrade._on_upgrade_granted(mock_event) _update_config.assert_called_once() _install_snap_packages.assert_called_once_with(packages=SNAP_PACKAGES, refresh=True) @@ -124,7 +124,7 @@ def test_on_upgrade_granted(harness): # Test when the member hasn't started yet. _set_unit_failed.reset_mock() - _start_patroni.return_value = True + _restart_patroni.return_value = True _member_started.return_value = False harness.charm.upgrade._on_upgrade_granted(mock_event) assert _member_started.call_count == 6