Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,13 @@
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
Expand Down Expand Up @@ -314,8 +316,9 @@ private static void setupNodeDirectories(File baseDirectory,
private final Map<Integer, BrokerServer> brokers;
private final File baseDirectory;
private final SimpleFaultHandlerFactory faultHandlerFactory;
private final PreboundSocketFactoryManager socketFactoryManager;
private PreboundSocketFactoryManager socketFactoryManager;
private final String controllerListenerName;
private Map<Integer, Set<String>> nodeIdToListeners = new HashMap<>();

private KafkaClusterTestKit(
TestKitNodes nodes,
Expand Down Expand Up @@ -437,6 +440,130 @@ public void startup() throws ExecutionException, InterruptedException {
}
}

public void shutdown() throws Exception {
List<Entry<String, Future<?>>> futureEntries = new ArrayList<>();
try {
// Note the shutdown order here is chosen to be consistent with
// `KafkaRaftServer`. See comments in that class for an explanation.
for (Entry<Integer, BrokerServer> entry : brokers.entrySet()) {
int brokerId = entry.getKey();
BrokerServer broker = entry.getValue();
nodeIdToListeners.computeIfAbsent(brokerId, __ -> new HashSet<>());
Set<String> listeners = nodeIdToListeners.get(brokerId);
broker.socketServer().dataPlaneAcceptors().forEach((endpoint, acceptor) -> {
listeners.add(endpoint.listenerName().value() + "://" + endpoint.host() + ":" + acceptor.localPort());
});
Comment on lines +448 to +455
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Resource Leak Risk

Multiple HashSet allocations occur when nodeIdToListeners.get() is called immediately after computeIfAbsent. This creates unnecessary object allocation and increases GC pressure during cluster restarts.

Suggested change
for (Entry<Integer, BrokerServer> entry : brokers.entrySet()) {
int brokerId = entry.getKey();
BrokerServer broker = entry.getValue();
nodeIdToListeners.computeIfAbsent(brokerId, __ -> new HashSet<>());
Set<String> listeners = nodeIdToListeners.get(brokerId);
broker.socketServer().dataPlaneAcceptors().forEach((endpoint, acceptor) -> {
listeners.add(endpoint.listenerName().value() + "://" + endpoint.host() + ":" + acceptor.localPort());
});
for (Entry<Integer, BrokerServer> entry : brokers.entrySet()) {
int brokerId = entry.getKey();
BrokerServer broker = entry.getValue();
Set<String> listeners = nodeIdToListeners.computeIfAbsent(brokerId, __ -> new HashSet<>());
broker.socketServer().dataPlaneAcceptors().forEach((endpoint, acceptor) -> {
listeners.add(endpoint.listenerName().value() + "://" + endpoint.host() + ":" + acceptor.localPort());
});
Standards
  • ISO-IEC-25010-Performance-Resource-Utilization
  • Algorithm-Opt-Object-Reuse
  • Java-Memory-Efficiency

if (!broker.socketServer().controlPlaneAcceptorOpt().isEmpty()) {
listeners.add(broker.socketServer().controlPlaneAcceptorOpt().get().endPoint().listenerName().value() + "://" +
broker.socketServer().controlPlaneAcceptorOpt().get().endPoint().host() + ":" +
broker.socketServer().controlPlaneAcceptorOpt().get().localPort());
}
Comment on lines +451 to +460

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

Consider extracting this logic for collecting listeners into a separate, well-named method to improve readability and maintainability. This would also reduce code duplication, as the same logic is repeated for brokers and controllers.

private Set<String> collectListeners(SharedServer server, int nodeId) {
    Set<String> listeners = nodeIdToListeners.computeIfAbsent(nodeId, __ -> new HashSet<>());
    server.socketServer().dataPlaneAcceptors().forEach((endpoint, acceptor) -> {
        listeners.add(endpoint.listenerName().value() + "://" + endpoint.host() + ":" + acceptor.localPort());
    });
    if (!server.socketServer().controlPlaneAcceptorOpt().isEmpty()) {
        listeners.add(server.socketServer().controlPlaneAcceptorOpt().get().endPoint().listenerName().value() + "://" +
            server.socketServer().controlPlaneAcceptorOpt().get().endPoint().host() + ":" +
            server.socketServer().controlPlaneAcceptorOpt().get().localPort());
    }
    return listeners;
}

// Usage in shutdown method:
int brokerId = entry.getKey();
BrokerServer broker = entry.getValue();
Set<String> listeners = collectListeners(broker.sharedServer(), brokerId);
nodeIdToListeners.put(brokerId, listeners);

nodeIdToListeners.put(brokerId, listeners);
futureEntries.add(new SimpleImmutableEntry<>("broker" + brokerId,
executorService.submit((Runnable) broker::shutdown)));
}
waitForAllFutures(futureEntries);
futureEntries.clear();
for (Entry<Integer, ControllerServer> entry : controllers.entrySet()) {
int controllerId = entry.getKey();
ControllerServer controller = entry.getValue();
nodeIdToListeners.computeIfAbsent(controllerId, __ -> new HashSet<>());
Set<String> listeners = nodeIdToListeners.get(controllerId);
controller.socketServer().dataPlaneAcceptors().forEach((endpoint, acceptor) -> {
listeners.add(endpoint.listenerName().value() + "://" + endpoint.host() + ":" + acceptor.localPort());
});
if (!controller.socketServer().controlPlaneAcceptorOpt().isEmpty()) {
listeners.add(controller.socketServer().controlPlaneAcceptorOpt().get().endPoint().listenerName().value() + "://" +
controller.socketServer().controlPlaneAcceptorOpt().get().endPoint().host() + ":" +
controller.socketServer().controlPlaneAcceptorOpt().get().localPort());
}
nodeIdToListeners.put(controllerId, listeners);
futureEntries.add(new SimpleImmutableEntry<>("controller" + controllerId,
Comment on lines +452 to +481
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Duplicated Listener Collection

Identical listener collection logic is duplicated for both brokers and controllers. This violates DRY principle and increases maintenance burden when modifying listener collection logic.

Suggested change
Set<String> listeners = nodeIdToListeners.get(brokerId);
broker.socketServer().dataPlaneAcceptors().forEach((endpoint, acceptor) -> {
listeners.add(endpoint.listenerName().value() + "://" + endpoint.host() + ":" + acceptor.localPort());
});
if (!broker.socketServer().controlPlaneAcceptorOpt().isEmpty()) {
listeners.add(broker.socketServer().controlPlaneAcceptorOpt().get().endPoint().listenerName().value() + "://" +
broker.socketServer().controlPlaneAcceptorOpt().get().endPoint().host() + ":" +
broker.socketServer().controlPlaneAcceptorOpt().get().localPort());
}
nodeIdToListeners.put(brokerId, listeners);
futureEntries.add(new SimpleImmutableEntry<>("broker" + brokerId,
executorService.submit((Runnable) broker::shutdown)));
}
waitForAllFutures(futureEntries);
futureEntries.clear();
for (Entry<Integer, ControllerServer> entry : controllers.entrySet()) {
int controllerId = entry.getKey();
ControllerServer controller = entry.getValue();
nodeIdToListeners.computeIfAbsent(controllerId, __ -> new HashSet<>());
Set<String> listeners = nodeIdToListeners.get(controllerId);
controller.socketServer().dataPlaneAcceptors().forEach((endpoint, acceptor) -> {
listeners.add(endpoint.listenerName().value() + "://" + endpoint.host() + ":" + acceptor.localPort());
});
if (!controller.socketServer().controlPlaneAcceptorOpt().isEmpty()) {
listeners.add(controller.socketServer().controlPlaneAcceptorOpt().get().endPoint().listenerName().value() + "://" +
controller.socketServer().controlPlaneAcceptorOpt().get().endPoint().host() + ":" +
controller.socketServer().controlPlaneAcceptorOpt().get().localPort());
}
nodeIdToListeners.put(controllerId, listeners);
futureEntries.add(new SimpleImmutableEntry<>("controller" + controllerId,
private void collectListeners(int nodeId, SocketServer socketServer) {
Set<String> listeners = nodeIdToListeners.computeIfAbsent(nodeId, __ -> new HashSet<>());
socketServer.dataPlaneAcceptors().forEach((endpoint, acceptor) -> {
listeners.add(endpoint.listenerName().value() + "://" + endpoint.host() + ":" + acceptor.localPort());
});
if (!socketServer.controlPlaneAcceptorOpt().isEmpty()) {
listeners.add(socketServer.controlPlaneAcceptorOpt().get().endPoint().listenerName().value() + "://" +
socketServer.controlPlaneAcceptorOpt().get().endPoint().host() + ":" +
socketServer.controlPlaneAcceptorOpt().get().localPort());
}
}
// Then replace the duplicated code in shutdown() with:
// In brokers loop:
collectListeners(brokerId, broker.socketServer());
// In controllers loop:
collectListeners(controllerId, controller.socketServer());
Standards
  • Clean-Code-DRY
  • Refactoring-Extract-Method
  • SOLID-SRP

executorService.submit(controller::shutdown)));
}
waitForAllFutures(futureEntries);
futureEntries.clear();
socketFactoryManager.close();
Comment on lines +443 to +486
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

shutdown() assumes acceptors are present – restart will break if the cluster was never started

broker.socketServer().dataPlaneAcceptors() and the control-plane accessor are empty until startup() has bound the ports.
When they are empty, nodeIdToListeners.get(id) remains null, and the subsequent String.join(",", null) in restart() throws an NPE.

Either bail out early when the servers were never started, or initialise nodeIdToListeners with the original listener strings from the config:

-                nodeIdToListeners.computeIfAbsent(brokerId, __ -> new HashSet<>());
+                nodeIdToListeners
+                        .computeIfAbsent(brokerId, __ ->
+                                new HashSet<>(List.of(broker.config()
+                                    .originals()
+                                    .getOrDefault(SocketServerConfigs.LISTENERS_CONFIG, "")
+                                    .toString().split(","))));

A similar fix is needed for the controller loop below.
Without this, any test that formats the cluster but calls restart() before startup() will consistently fail.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Resource Leak Risk

socketFactoryManager.close() called without error handling. If close() throws an exception, the shutdown process terminates prematurely, potentially leaving resources unclosed and causing resource leaks.

Suggested change
socketFactoryManager.close();
try {
socketFactoryManager.close();
} catch (Exception e) {
log.error("Error closing socketFactoryManager", e);
}
Standards
  • ISO-IEC-25010-Reliability-Fault-Tolerance
  • ISO-IEC-25010-Functional-Correctness-Appropriateness
  • SRE-Error-Handling

} catch (Exception e) {
for (Entry<String, Future<?>> entry : futureEntries) {
entry.getValue().cancel(true);
}
throw e;
}
}

public void restart(Map<Integer, Map<String, Object>> perServerOverriddenConfig) throws Exception {

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

It's crucial to ensure that the shutdown() method is robust and handles all possible exceptions gracefully. Consider adding more specific exception handling and logging to identify and address any potential issues during shutdown.

    public void restart(Map<Integer, Map<String, Object>> perServerOverriddenConfig) throws Exception {
        try {
            shutdown();
        } catch (Exception e) {
            log.error("Exception during shutdown: {}", e.getMessage(), e);
            throw e; // Re-throw the exception to prevent restart
        }

shutdown();

Comment on lines +495 to +497
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Resource Cleanup Risk

If shutdown() throws an exception, the restart method will abort without completing initialization of new servers. This creates a state where the system is partially shut down but not restarted, potentially causing test failures.

Suggested change
public void restart(Map<Integer, Map<String, Object>> perServerOverriddenConfig) throws Exception {
shutdown();
try {
shutdown();
} catch (Exception e) {
log.error("Error during shutdown phase of restart", e);
throw new RuntimeException("Failed to shutdown cluster during restart", e);
}
Map<Integer, SharedServer> jointServers = new HashMap<>();
Standards
  • ISO-IEC-25010-Reliability-Recoverability
  • ISO-IEC-25010-Functional-Correctness-Appropriateness
  • SRE-Error-Handling

Comment on lines +496 to +497
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Unvalidated Configuration Override

Configuration overrides are applied without validation, allowing potentially insecure settings. An attacker could inject malicious configurations that compromise security controls or expose sensitive information.

Suggested change
shutdown();
private static final Set<String> SECURITY_SENSITIVE_CONFIGS = Set.of(
"ssl.keystore.password", "ssl.key.password", "ssl.truststore.password",
"sasl.jaas.config", "listeners.security.protocol.map"
);
private void validateAndApplyConfigs(Map<String, Object> config, Map<Integer, Map<String, Object>> overrides, int id) {
Map<String, Object> defaultOverrides = overrides.getOrDefault(-1, Collections.emptyMap());
Map<String, Object> nodeOverrides = overrides.getOrDefault(id, Collections.emptyMap());
// Validate security-sensitive configurations
Stream.concat(defaultOverrides.keySet().stream(), nodeOverrides.keySet().stream())
.filter(SECURITY_SENSITIVE_CONFIGS::contains)
.findAny()
.ifPresent(key -> {
throw new IllegalArgumentException("Cannot override security-sensitive configuration: " + key);
});
config.putAll(defaultOverrides);
config.putAll(nodeOverrides);
}
Standards
  • CWE-15
  • OWASP-A05

Map<Integer, SharedServer> jointServers = new HashMap<>();
Comment on lines +496 to +498
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Resource Leak Risk

The restart() method creates a new socketFactoryManager without properly closing the previous one. If an exception occurs during restart, the old socketFactoryManager resources may leak. The shutdown() method closes it but exception paths don't ensure closure.

Suggested change
shutdown();
Map<Integer, SharedServer> jointServers = new HashMap<>();
PreboundSocketFactoryManager oldSocketFactoryManager = socketFactoryManager;
socketFactoryManager = new PreboundSocketFactoryManager();
try {
controllers.forEach((id, controller) -> {
Map<String, Object> config = controller.config().originals();
// rest of the method...
});
// rest of the method...
startup();
} catch (Exception e) {
Utils.closeQuietly(socketFactoryManager, "new socketFactoryManager");
socketFactoryManager = oldSocketFactoryManager;
throw e;
} finally {
Utils.closeQuietly(oldSocketFactoryManager, "old socketFactoryManager");
}
Standards
  • Logic-Verification-Resource-Management
  • Algorithm-Correctness-Exception-Safety
  • Business-Rule-Resource-Cleanup


socketFactoryManager = new PreboundSocketFactoryManager();

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

Reinitializing socketFactoryManager here might lead to resource leaks if the old socket factories are not properly closed. Ensure that the old socket factories are closed before creating new ones.

        if (socketFactoryManager != null) {
            try {
                socketFactoryManager.close();
            } catch (Exception e) {
                log.warn("Exception while closing socketFactoryManager: {}", e.getMessage(), e);
            }
        }
        socketFactoryManager = new PreboundSocketFactoryManager();

controllers.forEach((id, controller) -> {
Map<String, Object> config = controller.config().originals();
config.putAll(perServerOverriddenConfig.getOrDefault(-1, Collections.emptyMap()));
config.putAll(perServerOverriddenConfig.getOrDefault(id, Collections.emptyMap()));
Comment on lines +502 to +504
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Duplicate Null Check

Missing null check for perServerConfigOverrides parameter in controller configuration. If null is passed to restart(), a NullPointerException will occur when attempting to call getOrDefault(), breaking cluster restart functionality.

Suggested change
Map<String, Object> config = controller.config().originals();
config.putAll(perServerOverriddenConfig.getOrDefault(-1, Collections.emptyMap()));
config.putAll(perServerOverriddenConfig.getOrDefault(id, Collections.emptyMap()));
Map<String, Object> config = controller.config().originals();
if (perServerConfigOverrides != null) {
config.putAll(perServerConfigOverrides.getOrDefault(-1, Collections.emptyMap()));
config.putAll(perServerConfigOverrides.getOrDefault(id, Collections.emptyMap()));
}
Standards
  • Logic-Verification-Null-Safety
  • Business-Rule-Parameter-Validation
  • Algorithm-Correctness-Defensive-Programming

config.put(SocketServerConfigs.LISTENERS_CONFIG, String.join(",", nodeIdToListeners.get(id)));

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

This line uses String.join to create a comma-separated string of listeners. Ensure that the nodeIdToListeners map contains the correct listener information for each node before joining them. Also, consider adding a check to ensure that the listeners are not empty.

            Set<String> listenersForNode = nodeIdToListeners.get(id);
            if (listenersForNode == null || listenersForNode.isEmpty()) {
                log.warn("No listeners found for node {}", id);
                // Handle the case where there are no listeners, possibly by using a default listener
            }
            config.put(SocketServerConfigs.LISTENERS_CONFIG, String.join(",", listenersForNode != null ? listenersForNode : Collections.emptySet()));


Comment on lines +495 to +506
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Port reservations are lost on restart – risk of “Address already in use”

restart() creates a fresh PreboundSocketFactoryManager, but the new manager is not told to re-use the previously
bound ports captured in nodeIdToListeners.
If another process grabs one of those ports between shutdown and restart, the cluster start-up will fail.

Consider feeding the cached listener URIs back into the new manager before constructing servers, e.g.:

socketFactoryManager = new PreboundSocketFactoryManager();
nodeIdToListeners.forEach((id, listeners) ->
        listeners.forEach(l -> socketFactoryManager.reserve(id, l)));

(or expose a helper in PreboundSocketFactoryManager).

TestKitNode node = nodes.controllerNodes().get(id);
KafkaConfig nodeConfig = new KafkaConfig(config, false);
SharedServer sharedServer = new SharedServer(
nodeConfig,
node.initialMetaPropertiesEnsemble(),
Time.SYSTEM,
new Metrics(),
CompletableFuture.completedFuture(QuorumConfig.parseVoterConnections(nodeConfig.quorumConfig().voters())),
Collections.emptyList(),
faultHandlerFactory,
socketFactoryManager.getOrCreateSocketFactory(node.id())
);
try {
controller = new ControllerServer(
sharedServer,
KafkaRaftServer.configSchema(),
nodes.bootstrapMetadata());
} catch (Throwable e) {
log.error("Error creating controller {}", node.id(), e);
Utils.swallow(log, Level.WARN, "sharedServer.stopForController error", sharedServer::stopForController);
throw e;
Comment on lines +524 to +527

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The error handling here uses Utils.swallow. While this prevents the exception from propagating, it might mask important information about why the controller creation failed. Consider re-throwing a more specific exception or logging additional details to aid debugging.

            } catch (Throwable e) {
                log.error("Error creating controller {}", node.id(), e);
                try {
                    sharedServer.stopForController();
                } catch (Throwable e2) {
                    log.warn("sharedServer.stopForController error", e2);
                }
                throw new RuntimeException("Error creating controller " + node.id(), e);
            }

}
controllers.put(node.id(), controller);
jointServers.put(node.id(), sharedServer);
});

brokers.forEach((id, broker) -> {
Map<String, Object> config = broker.config().originals();
config.putAll(perServerOverriddenConfig.getOrDefault(-1, Collections.emptyMap()));
Comment on lines +533 to +535
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Missing Null Check

Missing null check for perServerConfigOverrides parameter. If null is passed to restart(), a NullPointerException will occur when attempting to call getOrDefault(), breaking cluster restart functionality.

Suggested change
brokers.forEach((id, broker) -> {
Map<String, Object> config = broker.config().originals();
config.putAll(perServerOverriddenConfig.getOrDefault(-1, Collections.emptyMap()));
Map<String, Object> config = broker.config().originals();
if (perServerConfigOverrides != null) {
config.putAll(perServerConfigOverrides.getOrDefault(-1, Collections.emptyMap()));
config.putAll(perServerConfigOverrides.getOrDefault(id, Collections.emptyMap()));
}
Standards
  • Logic-Verification-Null-Safety
  • Business-Rule-Parameter-Validation
  • Algorithm-Correctness-Defensive-Programming

config.putAll(perServerOverriddenConfig.getOrDefault(id, Collections.emptyMap()));
config.put(SocketServerConfigs.LISTENERS_CONFIG, String.join(",", nodeIdToListeners.get(id)));

TestKitNode node = nodes.brokerNodes().get(id);
KafkaConfig nodeConfig = new KafkaConfig(config);
SharedServer sharedServer = jointServers.computeIfAbsent(
node.id(),
Comment on lines +533 to +542
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Mutating the live config map may have side-effects

controller.config().originals() and broker.config().originals() return the live backing map of the old
KafkaConfig. Mutating it after shutdown is risky (if another thread still holds a reference) and obscures intent.

Use a defensive copy before modifications:

-            Map<String, Object> config = broker.config().originals();
+            Map<String, Object> config = new HashMap<>(broker.config().originals());

Apply the same change to the controller block above.

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
brokers.forEach((id, broker) -> {
Map<String, Object> config = broker.config().originals();
config.putAll(perServerOverriddenConfig.getOrDefault(-1, Collections.emptyMap()));
config.putAll(perServerOverriddenConfig.getOrDefault(id, Collections.emptyMap()));
config.put(SocketServerConfigs.LISTENERS_CONFIG, String.join(",", nodeIdToListeners.get(id)));
TestKitNode node = nodes.brokerNodes().get(id);
KafkaConfig nodeConfig = new KafkaConfig(config);
SharedServer sharedServer = jointServers.computeIfAbsent(
node.id(),
brokers.forEach((id, broker) -> {
- Map<String, Object> config = broker.config().originals();
+ Map<String, Object> config = new HashMap<>(broker.config().originals());
config.putAll(perServerOverriddenConfig.getOrDefault(-1, Collections.emptyMap()));
config.putAll(perServerOverriddenConfig.getOrDefault(id, Collections.emptyMap()));
config.put(SocketServerConfigs.LISTENERS_CONFIG, String.join(",", nodeIdToListeners.get(id)));
TestKitNode node = nodes.brokerNodes().get(id);
KafkaConfig nodeConfig = new KafkaConfig(config);
SharedServer sharedServer = jointServers.computeIfAbsent(
node.id(),

nodeId -> new SharedServer(
nodeConfig,
node.initialMetaPropertiesEnsemble(),
Time.SYSTEM,
new Metrics(),
CompletableFuture.completedFuture(QuorumConfig.parseVoterConnections(nodeConfig.quorumConfig().voters())),
Collections.emptyList(),
faultHandlerFactory,
socketFactoryManager.getOrCreateSocketFactory(node.id())
)
);
try {
broker = new BrokerServer(sharedServer);
} catch (Throwable e) {
log.error("Error creating broker {}", node.id(), e);
Utils.swallow(log, Level.WARN, "sharedServer.stopForBroker error", sharedServer::stopForBroker);
Comment on lines +524 to +558
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Unchecked Exception Handling

Raw exceptions are rethrown without wrapping in both controller and broker creation. This loses context about the failure source and can lead to confusing error messages during test failures.

Suggested change
} catch (Throwable e) {
log.error("Error creating controller {}", node.id(), e);
Utils.swallow(log, Level.WARN, "sharedServer.stopForController error", sharedServer::stopForController);
throw e;
}
controllers.put(node.id(), controller);
jointServers.put(node.id(), sharedServer);
});
brokers.forEach((id, broker) -> {
Map<String, Object> config = broker.config().originals();
config.putAll(perServerOverriddenConfig.getOrDefault(-1, Collections.emptyMap()));
config.putAll(perServerOverriddenConfig.getOrDefault(id, Collections.emptyMap()));
config.put(SocketServerConfigs.LISTENERS_CONFIG, String.join(",", nodeIdToListeners.get(id)));
TestKitNode node = nodes.brokerNodes().get(id);
KafkaConfig nodeConfig = new KafkaConfig(config);
SharedServer sharedServer = jointServers.computeIfAbsent(
node.id(),
nodeId -> new SharedServer(
nodeConfig,
node.initialMetaPropertiesEnsemble(),
Time.SYSTEM,
new Metrics(),
CompletableFuture.completedFuture(QuorumConfig.parseVoterConnections(nodeConfig.quorumConfig().voters())),
Collections.emptyList(),
faultHandlerFactory,
socketFactoryManager.getOrCreateSocketFactory(node.id())
)
);
try {
broker = new BrokerServer(sharedServer);
} catch (Throwable e) {
log.error("Error creating broker {}", node.id(), e);
Utils.swallow(log, Level.WARN, "sharedServer.stopForBroker error", sharedServer::stopForBroker);
// For controller creation error (line 524):
throw new RuntimeException("Failed to create controller " + node.id(), e);
// For broker creation error (line 558):
throw new RuntimeException("Failed to create broker " + node.id(), e);
Standards
  • ISO-IEC-25010-Reliability-Maturity
  • ISO-IEC-25010-Functional-Correctness-Appropriateness
  • SRE-Error-Handling

throw e;
Comment on lines +556 to +559

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

Similar to the controller creation, the error handling here uses Utils.swallow. Consider re-throwing a more specific exception or logging additional details to aid debugging.

            } catch (Throwable e) {
                log.error("Error creating broker {}", node.id(), e);
                try {
                    sharedServer.stopForBroker();
                } catch (Throwable e2) {
                    log.warn("sharedServer.stopForBroker error", e2);
                }
                throw new RuntimeException("Error creating broker " + node.id(), e);
            }

}
brokers.put(node.id(), broker);
});

startup();
}

/**
* Wait for a controller to mark all the brokers as ready (registered and unfenced).
* And also wait for the metadata cache up-to-date in each broker server.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,12 @@ default SocketServer anyControllerSocketServer() {
.orElseThrow(() -> new RuntimeException("No controller SocketServers found"));
}

default void restart() throws Exception {
restart(Map.of());
}

void restart(Map<Integer, Map<String, Object>> perServerConfigOverrides) throws Exception;

String clusterId();

//---------------------------[producer/consumer/admin]---------------------------//
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,11 @@ public void stop() {
}
}

@Override
public void restart(Map<Integer, Map<String, Object>> perServerConfigOverrides) throws Exception {
clusterTestKit.restart(perServerConfigOverrides);
}
Comment on lines +196 to +199
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Guard against restarting an instance that has never been started

If a test calls clusterInstance.restart() before start(), the underlying KafkaClusterTestKit#shutdown tries to gather listener
information from SocketServer acceptors that are not yet created, resulting in NPEs and an empty LISTENERS config on restart.

Add a fast-fail guard:

@@
     @Override
     public void restart(Map<Integer, Map<String, Object>> perServerConfigOverrides) throws Exception {
-        clusterTestKit.restart(perServerConfigOverrides);
+        if (!started.get()) {
+            throw new IllegalStateException("Cannot restart a cluster that has not been started");
+        }
+        clusterTestKit.restart(perServerConfigOverrides);
     }
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
@Override
public void restart(Map<Integer, Map<String, Object>> perServerConfigOverrides) throws Exception {
clusterTestKit.restart(perServerConfigOverrides);
}
@Override
public void restart(Map<Integer, Map<String, Object>> perServerConfigOverrides) throws Exception {
if (!started.get()) {
throw new IllegalStateException("Cannot restart a cluster that has not been started");
}
clusterTestKit.restart(perServerConfigOverrides);
}


@Override
public void shutdownBroker(int brokerId) {
findBrokerOrThrow(brokerId).shutdown();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -331,4 +331,32 @@ public void testControllerListenerName(ClusterInstance cluster) throws Execution
assertEquals(1, admin.describeMetadataQuorum().quorumInfo().get().nodes().size());
}
}

@ClusterTest(types = {Type.CO_KRAFT, Type.KRAFT}, serverProperties = {
@ClusterConfigProperty(key = "offset.storage.replication.factor", value = "1"),
})
public void testRestartWithOverriddenConfig(ClusterInstance clusterInstance) throws Exception {
clusterInstance.restart(Collections.singletonMap(-1, Collections.singletonMap("default.replication.factor", 2)));
clusterInstance.waitForReadyBrokers();
clusterInstance.brokers().values().forEach(broker -> {
Assertions.assertEquals(2, broker.config().getInt("default.replication.factor"));
});
clusterInstance.controllers().values().forEach(controller -> {
Assertions.assertEquals(2, controller.config().getInt("default.replication.factor"));
});
}

@ClusterTest(types = {Type.CO_KRAFT, Type.KRAFT}, serverProperties = {
@ClusterConfigProperty(key = "offset.storage.replication.factor", value = "1"),
})
public void testRestartWithoutOverriddenConfig(ClusterInstance clusterInstance) throws Exception {
clusterInstance.restart();
clusterInstance.waitForReadyBrokers();
clusterInstance.brokers().values().forEach(broker -> {
Assertions.assertEquals(1, broker.config().getInt("default.replication.factor"));
});
clusterInstance.controllers().values().forEach(controller -> {
Assertions.assertEquals(1, controller.config().getInt("default.replication.factor"));
});
}
}
Loading