Skip to content

Commit 1eb704c

Browse files
swap ordering of logic in server
1 parent ebb7b13 commit 1eb704c

File tree

1 file changed

+34
-25
lines changed

1 file changed

+34
-25
lines changed

src/sdam/server.ts

Lines changed: 34 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -401,37 +401,46 @@ export class Server extends TypedEventEmitter<ServerEvents> {
401401
const isNetworkTimeoutBeforeHandshakeError =
402402
error instanceof MongoNetworkError && error.beforeHandshake;
403403
const isAuthHandshakeError = error.hasErrorLabel(MongoErrorLabel.HandshakeError);
404-
if (isNetworkNonTimeoutError || isNetworkTimeoutBeforeHandshakeError || isAuthHandshakeError) {
405-
// In load balanced mode we never mark the server as unknown and always
406-
// clear for the specific service id.
404+
405+
// TODO: considering parse errors as SDAM unrecoverable errors seem
406+
// questionable. What if the parse error only comes from an application connection,
407+
// indicating some bytes were lost in transmission? It seems overkill to completely
408+
// kill the server.
409+
// Parse errors from monitoring connections are already handled because the
410+
// error would be wrapped in a ServerHeartbeatFailedEvent, which would mark the
411+
// server unknown and clear the pool. Can we remove this?
412+
if (isStateChangeError(error) || error instanceof MongoParseError) {
413+
const shouldClearPool = isNodeShuttingDownError(error);
414+
415+
// from the SDAM spec: The driver MUST synchronize clearing the pool with updating the topology.
416+
// In load balanced mode: there is no monitoring, so there is no topology to update. We simply clear the pool.
417+
// For other topologies: the `ResetPool` label instructs the topology to clear the server's pool in `updateServer()`.
418+
if (!this.loadBalanced) {
419+
if (shouldClearPool) {
420+
error.addErrorLabel(MongoErrorLabel.ResetPool);
421+
}
422+
markServerUnknown(this, error);
423+
process.nextTick(() => this.requestCheck());
424+
return;
425+
}
426+
427+
if (connection && shouldClearPool) {
428+
this.pool.clear({ serviceId: connection.serviceId });
429+
}
430+
} else if (
431+
isNetworkNonTimeoutError ||
432+
isNetworkTimeoutBeforeHandshakeError ||
433+
isAuthHandshakeError
434+
) {
435+
// from the SDAM spec: The driver MUST synchronize clearing the pool with updating the topology.
436+
// In load balanced mode: there is no monitoring, so there is no topology to update. We simply clear the pool.
437+
// For other topologies: the `ResetPool` label instructs the topology to clear the server's pool in `updateServer()`.
407438
if (!this.loadBalanced) {
408439
error.addErrorLabel(MongoErrorLabel.ResetPool);
409440
markServerUnknown(this, error);
410441
} else if (connection) {
411442
this.pool.clear({ serviceId: connection.serviceId });
412443
}
413-
} else {
414-
// TODO: considering parse errors as SDAM unrecoverable errors seem
415-
// questionable. What if the parse error only comes from an application connection,
416-
// indicating some bytes were lost in transmission? It seems overkill to completely
417-
// kill the server.
418-
// Parse errors from monitoring connections are already handled because the
419-
// error would be wrapped in a ServerHeartbeatFailedEvent, which would mark the
420-
// server unknown and clear the pool. Can we remove this?
421-
if (isStateChangeError(error) || error instanceof MongoParseError) {
422-
const shouldClearPool = isNodeShuttingDownError(error);
423-
if (this.loadBalanced && connection && shouldClearPool) {
424-
this.pool.clear({ serviceId: connection.serviceId });
425-
}
426-
427-
if (!this.loadBalanced) {
428-
if (shouldClearPool) {
429-
error.addErrorLabel(MongoErrorLabel.ResetPool);
430-
}
431-
markServerUnknown(this, error);
432-
process.nextTick(() => this.requestCheck());
433-
}
434-
}
435444
}
436445
}
437446

0 commit comments

Comments
 (0)