Skip to content

Commit 81d5b1e

Browse files
Avoid using unfortunate terms in more places
We have switched all doc guides to use "mirror" or "secondary" years ago but these were never updated. Renaming functions and record/HTTP API fields (including CLI tools) would be major a breaking change, so they will be aliased or renamed with a lot more extensive review in the future. (cherry picked from commit bd3c189)
1 parent 694667e commit 81d5b1e

11 files changed

+82
-82
lines changed

src/rabbit_amqqueue.erl

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -688,7 +688,7 @@ with(#resource{} = Name, F, E, RetriesLeft) ->
688688
fun () -> F(Q) end);
689689
%% The queue is supposed to be active.
690690
%% The master node can go away or queue can be killed
691-
%% so we retry, waiting for a slave to take over.
691+
%% so we retry, waiting for a mirror to take over.
692692
{ok, Q} when ?amqqueue_state_is(Q, live) ->
693693
%% We check is_process_alive(QPid) in case we receive a
694694
%% nodedown (for example) in F() that has nothing to do
@@ -1526,7 +1526,7 @@ wait_for_promoted_or_stopped(Q0) ->
15261526
true ->
15271527
timer:sleep(100),
15281528
wait_for_promoted_or_stopped(Q);
1529-
%% All slave pids are stopped.
1529+
%% All mirror pids are stopped.
15301530
%% No process left for the queue
15311531
false -> {stopped, Q}
15321532
end
@@ -1857,8 +1857,8 @@ forget_all_durable(Node) ->
18571857
end),
18581858
ok.
18591859

1860-
%% Try to promote a slave while down - it should recover as a
1861-
%% master. We try to take the oldest slave here for best chance of
1860+
%% Try to promote a mirror while down - it should recover as a
1861+
%% master. We try to take the oldest mirror here for best chance of
18621862
%% recovery.
18631863
forget_node_for_queue(DeadNode, Q)
18641864
when ?amqqueue_is_quorum(Q) ->
@@ -1983,12 +1983,12 @@ maybe_clear_recoverable_node(Node, Q) ->
19831983
true ->
19841984
%% There is a race with
19851985
%% rabbit_mirror_queue_slave:record_synchronised/1 called
1986-
%% by the incoming slave node and this function, called
1986+
%% by the incoming mirror node and this function, called
19871987
%% by the master node. If this function is executed after
19881988
%% record_synchronised/1, the node is erroneously removed
19891989
%% from the recoverable mirrors list.
19901990
%%
1991-
%% We check if the slave node's queue PID is alive. If it is
1991+
%% We check if the mirror node's queue PID is alive. If it is
19921992
%% the case, then this function is executed after. In this
19931993
%% situation, we don't touch the queue record, it is already
19941994
%% correct.
@@ -2120,9 +2120,9 @@ deliver(Qs, Delivery = #delivery{flow = Flow,
21202120
confirm = Confirm}, QueueState0) ->
21212121
{Quorum, MPids, SPids} = qpids(Qs),
21222122
QPids = MPids ++ SPids,
2123-
%% We use up two credits to send to a slave since the message
2124-
%% arrives at the slave from two directions. We will ack one when
2125-
%% the slave receives the message direct from the channel, and the
2123+
%% We use up two credits to send to a mirror since the message
2124+
%% arrives at the mirror from two directions. We will ack one when
2125+
%% the mirror receives the message direct from the channel, and the
21262126
%% other when it receives it via GM.
21272127

21282128
case Flow of

src/rabbit_amqqueue_process.erl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1554,7 +1554,7 @@ handle_cast({deliver,
15541554
end,
15551555
State1 = State#q{senders = Senders1},
15561556
noreply(maybe_deliver_or_enqueue(Delivery, SlaveWhenPublished, State1));
1557-
%% [0] The second ack is since the channel thought we were a slave at
1557+
%% [0] The second ack is since the channel thought we were a mirror at
15581558
%% the time it published this message, so it used two credits (see
15591559
%% rabbit_amqqueue:deliver/2).
15601560

@@ -1656,7 +1656,7 @@ handle_cast(notify_decorators, State) ->
16561656
handle_cast(policy_changed, State = #q{q = Q0}) ->
16571657
Name = amqqueue:get_name(Q0),
16581658
%% We depend on the #q.q field being up to date at least WRT
1659-
%% policy (but not slave pids) in various places, so when it
1659+
%% policy (but not mirror pids) in various places, so when it
16601660
%% changes we go and read it from Mnesia again.
16611661
%%
16621662
%% This also has the side effect of waking us up so we emit a
@@ -1666,7 +1666,7 @@ handle_cast(policy_changed, State = #q{q = Q0}) ->
16661666

16671667
handle_cast({sync_start, _, _}, State = #q{q = Q}) ->
16681668
Name = amqqueue:get_name(Q),
1669-
%% Only a slave should receive this, it means we are a duplicated master
1669+
%% Only a mirror should receive this, it means we are a duplicated master
16701670
rabbit_mirror_queue_misc:log_warning(
16711671
Name, "Stopping after receiving sync_start from another master", []),
16721672
stop(State).

src/rabbit_mirror_queue_coordinator.erl

Lines changed: 39 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -95,8 +95,8 @@
9595
%%
9696
%% The key purpose of also sending messages directly from the channels
9797
%% to the mirrors is that without this, in the event of the death of
98-
%% the master, messages could be lost until a suitable slave is
99-
%% promoted. However, that is not the only reason. A slave cannot send
98+
%% the master, messages could be lost until a suitable mirror is
99+
%% promoted. However, that is not the only reason. A mirror cannot send
100100
%% confirms for a message until it has seen it from the
101101
%% channel. Otherwise, it might send a confirm to a channel for a
102102
%% message that it might *never* receive from that channel. This can
@@ -108,48 +108,48 @@
108108
%%
109109
%% Hence the mirrors have to wait until they've seen both the publish
110110
%% via gm, and the publish via the channel before they issue the
111-
%% confirm. Either form of publish can arrive first, and a slave can
111+
%% confirm. Either form of publish can arrive first, and a mirror can
112112
%% be upgraded to the master at any point during this
113113
%% process. Confirms continue to be issued correctly, however.
114114
%%
115-
%% Because the slave is a full process, it impersonates parts of the
115+
%% Because the mirror is a full process, it impersonates parts of the
116116
%% amqqueue API. However, it does not need to implement all parts: for
117117
%% example, no ack or consumer-related message can arrive directly at
118-
%% a slave from a channel: it is only publishes that pass both
118+
%% a mirror from a channel: it is only publishes that pass both
119119
%% directly to the mirrors and go via gm.
120120
%%
121121
%% Slaves can be added dynamically. When this occurs, there is no
122122
%% attempt made to sync the current contents of the master with the
123-
%% new slave, thus the slave will start empty, regardless of the state
124-
%% of the master. Thus the slave needs to be able to detect and ignore
123+
%% new slave, thus the mirror will start empty, regardless of the state
124+
%% of the master. Thus the mirror needs to be able to detect and ignore
125125
%% operations which are for messages it has not received: because of
126126
%% the strict FIFO nature of queues in general, this is
127-
%% straightforward - all new publishes that the new slave receives via
127+
%% straightforward - all new publishes that the new mirror receives via
128128
%% gm should be processed as normal, but fetches which are for
129-
%% messages the slave has never seen should be ignored. Similarly,
130-
%% acks for messages the slave never fetched should be
129+
%% messages the mirror has never seen should be ignored. Similarly,
130+
%% acks for messages the mirror never fetched should be
131131
%% ignored. Similarly, we don't republish rejected messages that we
132132
%% haven't seen. Eventually, as the master is consumed from, the
133133
%% messages at the head of the queue which were there before the slave
134-
%% joined will disappear, and the slave will become fully synced with
134+
%% joined will disappear, and the mirror will become fully synced with
135135
%% the state of the master.
136136
%%
137137
%% The detection of the sync-status is based on the depth of the BQs,
138138
%% where the depth is defined as the sum of the length of the BQ (as
139139
%% per BQ:len) and the messages pending an acknowledgement. When the
140-
%% depth of the slave is equal to the master's, then the slave is
140+
%% depth of the mirror is equal to the master's, then the mirror is
141141
%% synchronised. We only store the difference between the two for
142142
%% simplicity. Comparing the length is not enough since we need to
143143
%% take into account rejected messages which will make it back into
144144
%% the master queue but can't go back in the slave, since we don't
145-
%% want "holes" in the slave queue. Note that the depth, and the
146-
%% length likewise, must always be shorter on the slave - we assert
145+
%% want "holes" in the mirror queue. Note that the depth, and the
146+
%% length likewise, must always be shorter on the mirror - we assert
147147
%% that in various places. In case mirrors are joined to an empty queue
148148
%% which only goes on to receive publishes, they start by asking the
149149
%% master to broadcast its depth. This is enough for mirrors to always
150150
%% be able to work out when their head does not differ from the master
151151
%% (and is much simpler and cheaper than getting the master to hang on
152-
%% to the guid of the msg at the head of its queue). When a slave is
152+
%% to the guid of the msg at the head of its queue). When a mirror is
153153
%% promoted to a master, it unilaterally broadcasts its depth, in
154154
%% order to solve the problem of depth requests from new mirrors being
155155
%% unanswered by a dead master.
@@ -171,23 +171,23 @@
171171
%% over gm, the mirrors must convert the msg_ids to acktags (a mapping
172172
%% the mirrors themselves must maintain).
173173
%%
174-
%% When the master dies, a slave gets promoted. This will be the
175-
%% eldest slave, and thus the hope is that that slave is most likely
174+
%% When the master dies, a mirror gets promoted. This will be the
175+
%% eldest slave, and thus the hope is that that mirror is most likely
176176
%% to be sync'd with the master. The design of gm is that the
177177
%% notification of the death of the master will only appear once all
178178
%% messages in-flight from the master have been fully delivered to all
179-
%% members of the gm group. Thus at this point, the slave that gets
179+
%% members of the gm group. Thus at this point, the mirror that gets
180180
%% promoted cannot broadcast different events in a different order
181181
%% than the master for the same msgs: there is no possibility for the
182182
%% same msg to be processed by the old master and the new master - if
183183
%% it was processed by the old master then it will have been processed
184-
%% by the slave before the slave was promoted, and vice versa.
184+
%% by the mirror before the mirror was promoted, and vice versa.
185185
%%
186186
%% Upon promotion, all msgs pending acks are requeued as normal, the
187-
%% slave constructs state suitable for use in the master module, and
187+
%% mirror constructs state suitable for use in the master module, and
188188
%% then dynamically changes into an amqqueue_process with the master
189189
%% as the bq, and the slave's bq as the master's bq. Thus the very
190-
%% same process that was the slave is now a full amqqueue_process.
190+
%% same process that was the mirror is now a full amqqueue_process.
191191
%%
192192
%% It is important that we avoid memory leaks due to the death of
193193
%% senders (i.e. channels) and partial publications. A sender
@@ -200,7 +200,7 @@
200200
%% then hold on to the message, assuming they'll receive some
201201
%% instruction eventually from the master. Thus we have both mirrors
202202
%% and the master monitor all senders they become aware of. But there
203-
%% is a race: if the slave receives a DOWN of a sender, how does it
203+
%% is a race: if the mirror receives a DOWN of a sender, how does it
204204
%% know whether or not the master is going to send it instructions
205205
%% regarding those messages?
206206
%%
@@ -221,12 +221,12 @@
221221
%% master will ask the coordinator to set up a new monitor, and
222222
%% will continue to process the messages normally. Slaves may thus
223223
%% receive publishes via gm from previously declared "dead" senders,
224-
%% but again, this is fine: should the slave have just thrown out the
224+
%% but again, this is fine: should the mirror have just thrown out the
225225
%% message it had received directly from the sender (due to receiving
226226
%% a sender_death message via gm), it will be able to cope with the
227227
%% publication purely from the master via gm.
228228
%%
229-
%% When a slave receives a DOWN message for a sender, if it has not
229+
%% When a mirror receives a DOWN message for a sender, if it has not
230230
%% received the sender_death message from the master via gm already,
231231
%% then it will wait 20 seconds before broadcasting a request for
232232
%% confirmation from the master that the sender really has died.
@@ -235,40 +235,40 @@
235235
%% sender. The master will thus monitor the sender, receive the DOWN,
236236
%% and subsequently broadcast the sender_death message, allowing the
237237
%% mirrors to tidy up. This process can repeat for the same sender:
238-
%% consider one slave receives the publication, then the DOWN, then
238+
%% consider one mirror receives the publication, then the DOWN, then
239239
%% asks for confirmation of death, then the master broadcasts the
240-
%% sender_death message. Only then does another slave receive the
240+
%% sender_death message. Only then does another mirror receive the
241241
%% publication and thus set up its monitoring. Eventually that slave
242242
%% too will receive the DOWN, ask for confirmation and the master will
243243
%% monitor the sender again, receive another DOWN, and send out
244244
%% another sender_death message. Given the 20 second delay before
245245
%% requesting death confirmation, this is highly unlikely, but it is a
246246
%% possibility.
247247
%%
248-
%% When the 20 second timer expires, the slave first checks to see
248+
%% When the 20 second timer expires, the mirror first checks to see
249249
%% whether it still needs confirmation of the death before requesting
250250
%% it. This prevents unnecessary traffic on gm as it allows one
251251
%% broadcast of the sender_death message to satisfy many mirrors.
252252
%%
253-
%% If we consider the promotion of a slave at this point, we have two
254-
%% possibilities: that of the slave that has received the DOWN and is
253+
%% If we consider the promotion of a mirror at this point, we have two
254+
%% possibilities: that of the mirror that has received the DOWN and is
255255
%% thus waiting for confirmation from the master that the sender
256-
%% really is down; and that of the slave that has not received the
256+
%% really is down; and that of the mirror that has not received the
257257
%% DOWN. In the first case, in the act of promotion to master, the new
258258
%% master will monitor again the dead sender, and after it has
259259
%% finished promoting itself, it should find another DOWN waiting,
260260
%% which it will then broadcast. This will allow mirrors to tidy up as
261261
%% normal. In the second case, we have the possibility that
262262
%% confirmation-of-sender-death request has been broadcast, but that
263-
%% it was broadcast before the master failed, and that the slave being
263+
%% it was broadcast before the master failed, and that the mirror being
264264
%% promoted does not know anything about that sender, and so will not
265-
%% monitor it on promotion. Thus a slave that broadcasts such a
265+
%% monitor it on promotion. Thus a mirror that broadcasts such a
266266
%% request, at the point of broadcasting it, recurses, setting another
267267
%% 20 second timer. As before, on expiry of the timer, the mirrors
268268
%% checks to see whether it still has not received a sender_death
269269
%% message for the dead sender, and if not, broadcasts a death
270270
%% confirmation request. Thus this ensures that even when a master
271-
%% dies and the new slave has no knowledge of the dead sender, it will
271+
%% dies and the new mirror has no knowledge of the dead sender, it will
272272
%% eventually receive a death confirmation request, shall monitor the
273273
%% dead sender, receive the DOWN and broadcast the sender_death
274274
%% message.
@@ -281,17 +281,17 @@
281281
%% mirrors will receive it via gm, will publish it to their BQ and will
282282
%% set up monitoring on the sender. They will then receive the DOWN
283283
%% message and the master will eventually publish the corresponding
284-
%% sender_death message. The slave will then be able to tidy up its
284+
%% sender_death message. The mirror will then be able to tidy up its
285285
%% state as normal.
286286
%%
287287
%% Recovery of mirrored queues is straightforward: as nodes die, the
288288
%% remaining nodes record this, and eventually a situation is reached
289289
%% in which only one node is alive, which is the master. This is the
290290
%% only node which, upon recovery, will resurrect a mirrored queue:
291-
%% nodes which die and then rejoin as a slave will start off empty as
291+
%% nodes which die and then rejoin as a mirror will start off empty as
292292
%% if they have no mirrored content at all. This is not surprising: to
293293
%% achieve anything more sophisticated would require the master and
294-
%% recovering slave to be able to check to see whether they agree on
294+
%% recovering mirror to be able to check to see whether they agree on
295295
%% the last seen state of the queue: checking depth alone is not
296296
%% sufficient in this case.
297297
%%
@@ -361,8 +361,8 @@ handle_cast({gm_deaths, DeadGMPids}, State = #state{q = Q}) when ?amqqueue_pid_r
361361
noreply(State);
362362
{ok, _MPid0, DeadPids, _ExtraNodes} ->
363363
%% see rabbitmq-server#914;
364-
%% Different slave is now master, stop current coordinator normally.
365-
%% Initiating queue is now slave and the least we could do is report
364+
%% Different mirror is now master, stop current coordinator normally.
365+
%% Initiating queue is now mirror and the least we could do is report
366366
%% deaths which we 'think' we saw.
367367
%% NOTE: Reported deaths here, could be inconsistent.
368368
rabbit_mirror_queue_misc:report_deaths(MPid, false, QueueName,
@@ -416,7 +416,7 @@ code_change(_OldVsn, State, _Extra) ->
416416

417417
handle_pre_hibernate(State = #state { gm = GM }) ->
418418
%% Since GM notifications of deaths are lazy we might not get a
419-
%% timely notification of slave death if policy changes when
419+
%% timely notification of mirror death if policy changes when
420420
%% everything is idle. So cause some activity just before we
421421
%% sleep. This won't cause us to go into perpetual motion as the
422422
%% heartbeat does not wake up coordinator or mirrors.

src/rabbit_mirror_queue_master.erl

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ init_with_existing_bq(Q0, BQ, BQS) when ?is_amqqueue(Q0) ->
117117
ok = rabbit_misc:execute_mnesia_transaction(Fun),
118118
{_MNode, SNodes} = rabbit_mirror_queue_misc:suggested_queue_nodes(Q0),
119119
%% We need synchronous add here (i.e. do not return until the
120-
%% slave is running) so that when queue declaration is finished
120+
%% mirror is running) so that when queue declaration is finished
121121
%% all mirrors are up; we don't want to end up with unsynced mirrors
122122
%% just by declaring a new queue. But add can't be synchronous all
123123
%% the time as it can be called by mirrors and that's
@@ -209,7 +209,7 @@ terminate(Reason,
209209
QName, "Stopping all nodes on master shutdown since no "
210210
"synchronised mirror (replica) is available~n", []),
211211
stop_all_slaves(Reason, State);
212-
false -> %% Just let some other slave take over.
212+
false -> %% Just let some other mirror take over.
213213
ok
214214
end,
215215
State #state { backing_queue_state = BQ:terminate(Reason, BQS) }.
@@ -262,7 +262,7 @@ batch_publish(Publishes, ChPid, Flow,
262262
MsgSizes),
263263
BQS1 = BQ:batch_publish(Publishes2, ChPid, Flow, BQS),
264264
ensure_monitoring(ChPid, State #state { backing_queue_state = BQS1 }).
265-
%% [0] When the slave process handles the publish command, it sets the
265+
%% [0] When the mirror process handles the publish command, it sets the
266266
%% IsDelivered flag to true, so to avoid iterating over the messages
267267
%% again at the slave, we do it here.
268268

@@ -464,7 +464,7 @@ is_duplicate(Message = #basic_message { id = MsgId },
464464
{Result, BQS1} = BQ:is_duplicate(Message, BQS),
465465
{Result, State #state { backing_queue_state = BQS1 }};
466466
{ok, published} ->
467-
%% It already got published when we were a slave and no
467+
%% It already got published when we were a mirror and no
468468
%% confirmation is waiting. amqqueue_process will have, in
469469
%% its msg_id_to_channel mapping, the entry for dealing
470470
%% with the confirm when that comes back in (it's added
@@ -474,7 +474,7 @@ is_duplicate(Message = #basic_message { id = MsgId },
474474
{{true, drop}, State #state { seen_status = maps:remove(MsgId, SS) }};
475475
{ok, Disposition}
476476
when Disposition =:= confirmed
477-
%% It got published when we were a slave via gm, and
477+
%% It got published when we were a mirror via gm, and
478478
%% confirmed some time after that (maybe even after
479479
%% promotion), but before we received the publish from the
480480
%% channel, so couldn't previously know what the

0 commit comments

Comments
 (0)