Skip to content

Commit 694667e

Browse files
Avoid using unfortunate terms in code comments and log messages
We have switched all doc guides to use "mirror" or "secondary replica" years ago but these were never updated. Renaming functions and record/HTTP API fields (including CLI tools) would be major a breaking change, so they will be aliased or renamed with a lot more extensive review in the future. (cherry picked from commit b76bd6d)
1 parent 870f893 commit 694667e

13 files changed

+85
-85
lines changed

src/rabbit_amqqueue.erl

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -679,7 +679,7 @@ with(#resource{} = Name, F, E, RetriesLeft) ->
679679
{ok, Q} when ?amqqueue_state_is(Q, crashed) ->
680680
E({absent, Q, crashed});
681681
%% The queue process has been stopped by a supervisor.
682-
%% In that case a synchronised slave can take over
682+
%% In that case a synchronised mirror can take over
683683
%% so we should retry.
684684
{ok, Q} when ?amqqueue_state_is(Q, stopped) ->
685685
%% The queue process was stopped by the supervisor
@@ -715,7 +715,7 @@ retry_wait(Q, F, E, RetriesLeft) ->
715715
QState = amqqueue:get_state(Q),
716716
case {QState, is_replicated(Q)} of
717717
%% We don't want to repeat an operation if
718-
%% there are no slaves to migrate to
718+
%% there are no mirrors to migrate to
719719
{stopped, false} ->
720720
E({absent, Q, stopped});
721721
_ ->
@@ -1869,7 +1869,7 @@ forget_node_for_queue(DeadNode, Q) ->
18691869
forget_node_for_queue(DeadNode, RS, Q).
18701870

18711871
forget_node_for_queue(_DeadNode, [], Q) ->
1872-
%% No slaves to recover from, queue is gone.
1872+
%% No mirrors to recover from, queue is gone.
18731873
%% Don't process_deletions since that just calls callbacks and we
18741874
%% are not really up.
18751875
Name = amqqueue:get_name(Q),
@@ -1986,7 +1986,7 @@ maybe_clear_recoverable_node(Node, Q) ->
19861986
%% by the incoming slave node and this function, called
19871987
%% by the master node. If this function is executed after
19881988
%% record_synchronised/1, the node is erroneously removed
1989-
%% from the recoverable slaves list.
1989+
%% from the recoverable mirrors list.
19901990
%%
19911991
%% We check if the slave node's queue PID is alive. If it is
19921992
%% the case, then this function is executed after. In this
@@ -2134,7 +2134,7 @@ deliver(Qs, Delivery = #delivery{flow = Flow,
21342134
noflow -> ok
21352135
end,
21362136

2137-
%% We let slaves know that they were being addressed as slaves at
2137+
%% We let mirrors know that they were being addressed as mirrors at
21382138
%% the time - if they receive such a message from the channel
21392139
%% after they have become master they should mark the message as
21402140
%% 'delivered' since they do not know what the master may have

src/rabbit_mirror_queue_coordinator.erl

Lines changed: 34 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@
4848
%% +----------+ +-------+--------------+-----------...etc...
4949
%% | | |
5050
%% V V V
51-
%% amqqueue_process---+ slave-----+ slave-----+ ...etc...
51+
%% amqqueue_process---+ mirror-----+ mirror-----+ ...etc...
5252
%% | BQ = master----+ | | BQ = vq | | BQ = vq |
5353
%% | | BQ = vq | | +-+-------+ +-+-------+
5454
%% | +-+-------+ | | |
@@ -63,50 +63,50 @@
6363
%% consumers
6464
%%
6565
%% The master is merely an implementation of bq, and thus is invoked
66-
%% through the normal bq interface by the amqqueue_process. The slaves
66+
%% through the normal bq interface by the amqqueue_process. The mirrors
6767
%% meanwhile are processes in their own right (as is the
68-
%% coordinator). The coordinator and all slaves belong to the same gm
68+
%% coordinator). The coordinator and all mirrors belong to the same gm
6969
%% group. Every member of a gm group receives messages sent to the gm
7070
%% group. Because the master is the bq of amqqueue_process, it doesn't
7171
%% have sole control over its mailbox, and as a result, the master
7272
%% itself cannot be passed messages directly (well, it could by via
7373
%% the amqqueue:run_backing_queue callback but that would induce
7474
%% additional unnecessary loading on the master queue process), yet it
75-
%% needs to react to gm events, such as the death of slaves. Thus the
75+
%% needs to react to gm events, such as the death of mirrors. Thus the
7676
%% master creates the coordinator, and it is the coordinator that is
7777
%% the gm callback module and event handler for the master.
7878
%%
7979
%% Consumers are only attached to the master. Thus the master is
80-
%% responsible for informing all slaves when messages are fetched from
80+
%% responsible for informing all mirrors when messages are fetched from
8181
%% the bq, when they're acked, and when they're requeued.
8282
%%
83-
%% The basic goal is to ensure that all slaves performs actions on
83+
%% The basic goal is to ensure that all mirrors performs actions on
8484
%% their bqs in the same order as the master. Thus the master
8585
%% intercepts all events going to its bq, and suitably broadcasts
86-
%% these events on the gm. The slaves thus receive two streams of
86+
%% these events on the gm. The mirrors thus receive two streams of
8787
%% events: one stream is via the gm, and one stream is from channels
8888
%% directly. Whilst the stream via gm is guaranteed to be consistently
89-
%% seen by all slaves, the same is not true of the stream via
89+
%% seen by all mirrors , the same is not true of the stream via
9090
%% channels. For example, in the event of an unexpected death of a
9191
%% channel during a publish, only some of the mirrors may receive that
9292
%% publish. As a result of this problem, the messages broadcast over
93-
%% the gm contain published content, and thus slaves can operate
93+
%% the gm contain published content, and thus mirrors can operate
9494
%% successfully on messages that they only receive via the gm.
9595
%%
9696
%% The key purpose of also sending messages directly from the channels
97-
%% to the slaves is that without this, in the event of the death of
97+
%% to the mirrors is that without this, in the event of the death of
9898
%% the master, messages could be lost until a suitable slave is
9999
%% promoted. However, that is not the only reason. A slave cannot send
100100
%% confirms for a message until it has seen it from the
101101
%% channel. Otherwise, it might send a confirm to a channel for a
102102
%% message that it might *never* receive from that channel. This can
103-
%% happen because new slaves join the gm ring (and thus receive
103+
%% happen because new mirrors join the gm ring (and thus receive
104104
%% messages from the master) before inserting themselves in the
105105
%% queue's mnesia record (which is what channels look at for routing).
106106
%% As it turns out, channels will simply ignore such bogus confirms,
107107
%% but relying on that would introduce a dangerously tight coupling.
108108
%%
109-
%% Hence the slaves have to wait until they've seen both the publish
109+
%% Hence the mirrors have to wait until they've seen both the publish
110110
%% via gm, and the publish via the channel before they issue the
111111
%% confirm. Either form of publish can arrive first, and a slave can
112112
%% be upgraded to the master at any point during this
@@ -116,7 +116,7 @@
116116
%% amqqueue API. However, it does not need to implement all parts: for
117117
%% example, no ack or consumer-related message can arrive directly at
118118
%% a slave from a channel: it is only publishes that pass both
119-
%% directly to the slaves and go via gm.
119+
%% directly to the mirrors and go via gm.
120120
%%
121121
%% Slaves can be added dynamically. When this occurs, there is no
122122
%% attempt made to sync the current contents of the master with the
@@ -144,18 +144,18 @@
144144
%% the master queue but can't go back in the slave, since we don't
145145
%% want "holes" in the slave queue. Note that the depth, and the
146146
%% length likewise, must always be shorter on the slave - we assert
147-
%% that in various places. In case slaves are joined to an empty queue
147+
%% that in various places. In case mirrors are joined to an empty queue
148148
%% which only goes on to receive publishes, they start by asking the
149-
%% master to broadcast its depth. This is enough for slaves to always
149+
%% master to broadcast its depth. This is enough for mirrors to always
150150
%% be able to work out when their head does not differ from the master
151151
%% (and is much simpler and cheaper than getting the master to hang on
152152
%% to the guid of the msg at the head of its queue). When a slave is
153153
%% promoted to a master, it unilaterally broadcasts its depth, in
154-
%% order to solve the problem of depth requests from new slaves being
154+
%% order to solve the problem of depth requests from new mirrors being
155155
%% unanswered by a dead master.
156156
%%
157157
%% Obviously, due to the async nature of communication across gm, the
158-
%% slaves can fall behind. This does not matter from a sync pov: if
158+
%% mirrors can fall behind. This does not matter from a sync pov: if
159159
%% they fall behind and the master dies then a) no publishes are lost
160160
%% because all publishes go to all mirrors anyway; b) the worst that
161161
%% happens is that acks get lost and so messages come back to
@@ -164,12 +164,12 @@
164164
%% but close enough for jazz).
165165
%%
166166
%% Because acktags are issued by the bq independently, and because
167-
%% there is no requirement for the master and all slaves to use the
167+
%% there is no requirement for the master and all mirrors to use the
168168
%% same bq, all references to msgs going over gm is by msg_id. Thus
169169
%% upon acking, the master must convert the acktags back to msg_ids
170170
%% (which happens to be what bq:ack returns), then sends the msg_ids
171-
%% over gm, the slaves must convert the msg_ids to acktags (a mapping
172-
%% the slaves themselves must maintain).
171+
%% over gm, the mirrors must convert the msg_ids to acktags (a mapping
172+
%% the mirrors themselves must maintain).
173173
%%
174174
%% When the master dies, a slave gets promoted. This will be the
175175
%% eldest slave, and thus the hope is that that slave is most likely
@@ -196,9 +196,9 @@
196196
%% mirrors to be able to detect this and tidy up as necessary to avoid
197197
%% leaks. If we just had the master monitoring all senders then we
198198
%% would have the possibility that a sender appears and only sends the
199-
%% message to a few of the slaves before dying. Those slaves would
199+
%% message to a few of the mirrors before dying. Those mirrors would
200200
%% then hold on to the message, assuming they'll receive some
201-
%% instruction eventually from the master. Thus we have both slaves
201+
%% instruction eventually from the master. Thus we have both mirrors
202202
%% and the master monitor all senders they become aware of. But there
203203
%% is a race: if the slave receives a DOWN of a sender, how does it
204204
%% know whether or not the master is going to send it instructions
@@ -209,8 +209,8 @@
209209
%% coordinator receives a DOWN message from a sender, it informs the
210210
%% master via a callback. This allows the master to do any tidying
211211
%% necessary, but more importantly allows the master to broadcast a
212-
%% sender_death message to all the slaves, saying the sender has
213-
%% died. Once the slaves receive the sender_death message, they know
212+
%% sender_death message to all the mirrors , saying the sender has
213+
%% died. Once the mirrors receive the sender_death message, they know
214214
%% that they're not going to receive any more instructions from the gm
215215
%% regarding that sender. However, it is possible that the coordinator
216216
%% receives the DOWN and communicates that to the master before the
@@ -230,11 +230,11 @@
230230
%% received the sender_death message from the master via gm already,
231231
%% then it will wait 20 seconds before broadcasting a request for
232232
%% confirmation from the master that the sender really has died.
233-
%% Should a sender have only sent a publish to slaves, this allows
234-
%% slaves to inform the master of the previous existence of the
233+
%% Should a sender have only sent a publish to mirrors , this allows
234+
%% mirrors to inform the master of the previous existence of the
235235
%% sender. The master will thus monitor the sender, receive the DOWN,
236236
%% and subsequently broadcast the sender_death message, allowing the
237-
%% slaves to tidy up. This process can repeat for the same sender:
237+
%% mirrors to tidy up. This process can repeat for the same sender:
238238
%% consider one slave receives the publication, then the DOWN, then
239239
%% asks for confirmation of death, then the master broadcasts the
240240
%% sender_death message. Only then does another slave receive the
@@ -248,7 +248,7 @@
248248
%% When the 20 second timer expires, the slave first checks to see
249249
%% whether it still needs confirmation of the death before requesting
250250
%% it. This prevents unnecessary traffic on gm as it allows one
251-
%% broadcast of the sender_death message to satisfy many slaves.
251+
%% broadcast of the sender_death message to satisfy many mirrors.
252252
%%
253253
%% If we consider the promotion of a slave at this point, we have two
254254
%% possibilities: that of the slave that has received the DOWN and is
@@ -257,14 +257,14 @@
257257
%% DOWN. In the first case, in the act of promotion to master, the new
258258
%% master will monitor again the dead sender, and after it has
259259
%% finished promoting itself, it should find another DOWN waiting,
260-
%% which it will then broadcast. This will allow slaves to tidy up as
260+
%% which it will then broadcast. This will allow mirrors to tidy up as
261261
%% normal. In the second case, we have the possibility that
262262
%% confirmation-of-sender-death request has been broadcast, but that
263263
%% it was broadcast before the master failed, and that the slave being
264264
%% promoted does not know anything about that sender, and so will not
265265
%% monitor it on promotion. Thus a slave that broadcasts such a
266266
%% request, at the point of broadcasting it, recurses, setting another
267-
%% 20 second timer. As before, on expiry of the timer, the slaves
267+
%% 20 second timer. As before, on expiry of the timer, the mirrors
268268
%% checks to see whether it still has not received a sender_death
269269
%% message for the dead sender, and if not, broadcasts a death
270270
%% confirmation request. Thus this ensures that even when a master
@@ -273,12 +273,12 @@
273273
%% dead sender, receive the DOWN and broadcast the sender_death
274274
%% message.
275275
%%
276-
%% The preceding commentary deals with the possibility of slaves
276+
%% The preceding commentary deals with the possibility of mirrors
277277
%% receiving publications from senders which the master does not, and
278278
%% the need to prevent memory leaks in such scenarios. The inverse is
279279
%% also possible: a partial publication may cause only the master to
280280
%% receive a publication. It will then publish the message via gm. The
281-
%% slaves will receive it via gm, will publish it to their BQ and will
281+
%% mirrors will receive it via gm, will publish it to their BQ and will
282282
%% set up monitoring on the sender. They will then receive the DOWN
283283
%% message and the master will eventually publish the corresponding
284284
%% sender_death message. The slave will then be able to tidy up its
@@ -419,7 +419,7 @@ handle_pre_hibernate(State = #state { gm = GM }) ->
419419
%% timely notification of slave death if policy changes when
420420
%% everything is idle. So cause some activity just before we
421421
%% sleep. This won't cause us to go into perpetual motion as the
422-
%% heartbeat does not wake up coordinator or slaves.
422+
%% heartbeat does not wake up coordinator or mirrors.
423423
gm:broadcast(GM, hibernate_heartbeat),
424424
{hibernate, State}.
425425

@@ -446,7 +446,7 @@ handle_msg([_CPid], _From, {delete_and_terminate, _Reason}) ->
446446
%% actually delivered. Then it calls handle_terminate/2 below so the
447447
%% coordinator is stopped.
448448
%%
449-
%% If we stop the coordinator right now, remote slaves could see the
449+
%% If we stop the coordinator right now, remote mirrors could see the
450450
%% coordinator DOWN before delete_and_terminate was delivered to all
451451
%% GMs. One of those GM would be promoted as the master, and this GM
452452
%% would hang forever, waiting for other GMs to stop.

src/rabbit_mirror_queue_master.erl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -118,9 +118,9 @@ init_with_existing_bq(Q0, BQ, BQS) when ?is_amqqueue(Q0) ->
118118
{_MNode, SNodes} = rabbit_mirror_queue_misc:suggested_queue_nodes(Q0),
119119
%% We need synchronous add here (i.e. do not return until the
120120
%% slave is running) so that when queue declaration is finished
121-
%% all slaves are up; we don't want to end up with unsynced slaves
121+
%% all mirrors are up; we don't want to end up with unsynced mirrors
122122
%% just by declaring a new queue. But add can't be synchronous all
123-
%% the time as it can be called by slaves and that's
123+
%% the time as it can be called by mirrors and that's
124124
%% deadlock-prone.
125125
rabbit_mirror_queue_misc:add_mirrors(QName, SNodes, sync),
126126
#state{name = QName,
@@ -207,7 +207,7 @@ terminate(Reason,
207207
true -> %% Remove the whole queue to avoid data loss
208208
rabbit_mirror_queue_misc:log_warning(
209209
QName, "Stopping all nodes on master shutdown since no "
210-
"synchronised slave is available~n", []),
210+
"synchronised mirror (replica) is available~n", []),
211211
stop_all_slaves(Reason, State);
212212
false -> %% Just let some other slave take over.
213213
ok

src/rabbit_mirror_queue_misc.erl

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -317,11 +317,11 @@ store_updated_slaves(Q0) when ?is_amqqueue(Q0) ->
317317

318318
%% Recoverable nodes are those which we could promote if the whole
319319
%% cluster were to suddenly stop and we then lose the master; i.e. all
320-
%% nodes with running slaves, and all stopped nodes which had running
321-
%% slaves when they were up.
320+
%% nodes with running mirrors , and all stopped nodes which had running
321+
%% mirrors when they were up.
322322
%%
323-
%% Therefore we aim here to add new nodes with slaves, and remove
324-
%% running nodes without slaves, We also try to keep the order
323+
%% Therefore we aim here to add new nodes with mirrors , and remove
324+
%% running nodes without mirrors , We also try to keep the order
325325
%% constant, and similar to the live SPids field (i.e. oldest
326326
%% first). That's not necessarily optimal if nodes spend a long time
327327
%% down, but we don't have a good way to predict what the optimal is
@@ -337,10 +337,10 @@ update_recoverable(SPids, RS) ->
337337
stop_all_slaves(Reason, SPids, QName, GM, WaitTimeout) ->
338338
PidsMRefs = [{Pid, erlang:monitor(process, Pid)} || Pid <- [GM | SPids]],
339339
ok = gm:broadcast(GM, {delete_and_terminate, Reason}),
340-
%% It's possible that we could be partitioned from some slaves
340+
%% It's possible that we could be partitioned from some mirrors
341341
%% between the lookup and the broadcast, in which case we could
342342
%% monitor them but they would not have received the GM
343-
%% message. So only wait for slaves which are still
343+
%% message. So only wait for mirrors which are still
344344
%% not-partitioned.
345345
PendingSlavePids = lists:foldl(fun({Pid, MRef}, Acc) ->
346346
case rabbit_mnesia:on_running_node(Pid) of
@@ -365,7 +365,7 @@ stop_all_slaves(Reason, SPids, QName, GM, WaitTimeout) ->
365365
[Q0] = mnesia:read({rabbit_queue, QName}),
366366
Q1 = amqqueue:set_gm_pids(Q0, []),
367367
Q2 = amqqueue:set_slave_pids(Q1, []),
368-
%% Restarted slaves on running nodes can
368+
%% Restarted mirrors on running nodes can
369369
%% ensure old incarnations are stopped using
370370
%% the pending slave pids.
371371
Q3 = amqqueue:set_slave_pids_pending_shutdown(Q2, PendingSlavePids),
@@ -534,10 +534,10 @@ update_mirrors(Q) when ?is_amqqueue(Q) ->
534534
OldNodes = [OldMNode | OldSNodes],
535535
NewNodes = [NewMNode | NewSNodes],
536536
%% When a mirror dies, remove_from_queue/2 might have to add new
537-
%% slaves (in "exactly" mode). It will check mnesia to see which
538-
%% slaves there currently are. If drop_mirror/2 is invoked first
537+
%% mirrors (in "exactly" mode). It will check mnesia to see which
538+
%% mirrors there currently are. If drop_mirror/2 is invoked first
539539
%% then when we end up in remove_from_queue/2 it will not see the
540-
%% slaves that add_mirror/2 will add, and also want to add them
540+
%% mirrors that add_mirror/2 will add, and also want to add them
541541
%% (even though we are not responding to the death of a
542542
%% mirror). Breakage ensues.
543543
add_mirrors (QName, NewNodes -- OldNodes, async),
@@ -589,7 +589,7 @@ wait_for_new_master(QName, Destination, N) ->
589589

590590
%% The arrival of a newly synced slave may cause the master to die if
591591
%% the policy does not want the master but it has been kept alive
592-
%% because there were no synced slaves.
592+
%% because there were no synced mirrors.
593593
%%
594594
%% We don't just call update_mirrors/2 here since that could decide to
595595
%% start a slave for some other reason, and since we are the slave ATM
@@ -608,7 +608,7 @@ maybe_drop_master_after_sync(Q) when ?is_amqqueue(Q) ->
608608
end,
609609
ok.
610610
%% [0] ASSERTION - if the policy wants the master to change, it has
611-
%% not just shuffled it into the slaves. All our modes ensure this
611+
%% not just shuffled it into the mirrors. All our modes ensure this
612612
%% does not happen, but we should guard against a misbehaving plugin.
613613

614614
%%----------------------------------------------------------------------------

0 commit comments

Comments
 (0)