Codebase list rabbitmq-server / 97e022a
Imported Upstream version 3.5.7 James Page 8 years ago
119 changed file(s) with 3004 addition(s) and 750 deletion(s). Raw diff Collapse all Expand all
4747 BASIC_PLT=basic.plt
4848 RABBIT_PLT=rabbit.plt
4949
50 ifndef USE_SPECS
51 # our type specs rely on dict:dict/0 etc, which are only available in 17.0
52 # upwards.
53 USE_SPECS:=$(shell erl -noshell -eval 'io:format([list_to_integer(X) || X <- string:tokens(erlang:system_info(version), ".")] >= [5,11]), halt().')
54 endif
55
5650 ifndef USE_PROPER_QC
5751 # PropEr needs to be installed for property checking
5852 # http://proper.softlab.ntua.gr/
59 USE_PROPER_QC:=$(shell erl -noshell -eval 'io:format({module, proper} =:= code:ensure_loaded(proper)), halt().')
53 USE_PROPER_QC=$(shell erl -noshell -eval 'io:format({module, proper} =:= code:ensure_loaded(proper)), halt().')
6054 endif
6155
6256 #other args: +native +"{hipe,[o3,verbose]}" -Ddebug=true +debug_info +no_strict_record_tests
63 ERLC_OPTS=-I $(INCLUDE_DIR) -Wall -v +debug_info $(call boolean_macro,$(USE_SPECS),use_specs) $(call boolean_macro,$(USE_PROPER_QC),use_proper_qc)
57 ERLC_OPTS=-I $(INCLUDE_DIR) -Wall +warn_export_vars -v +debug_info $(call boolean_macro,$(USE_SPECS),use_specs) $(call boolean_macro,$(USE_PROPER_QC),use_proper_qc)
58
59 # Our type specs rely on dict:dict/0 etc, which are only available in
60 # 17.0 upwards.
61 define compare_version
62 $(shell awk 'BEGIN {
63 split("$(1)", v1, "\.");
64 version1 = v1[1] * 1000000 + v1[2] * 10000 + v1[3] * 100 + v1[4];
65
66 split("$(2)", v2, "\.");
67 version2 = v2[1] * 1000000 + v2[2] * 10000 + v2[3] * 100 + v2[4];
68
69 if (version1 $(3) version2) {
70 print "true";
71 } else {
72 print "false";
73 }
74 }')
75 endef
76
77 ERTS_VER = $(shell erl -version 2>&1 | sed -E 's/.* version //')
78 USE_SPECS_MIN_ERTS_VER = 5.11
79 ifeq ($(call compare_version,$(ERTS_VER),$(USE_SPECS_MIN_ERTS_VER),>=),true)
80 ERLC_OPTS += -Duse_specs
81 endif
6482
6583 ifdef INSTRUMENT_FOR_QC
6684 ERLC_OPTS += -DINSTR_MOD=gm_qc
269287 $(ERL_CALL)
270288
271289 stop-node:
272 -$(ERL_CALL) -q
290 -( \
291 pid=$$(./scripts/rabbitmqctl -n $(RABBITMQ_NODENAME) eval 'os:getpid().') && \
292 $(ERL_CALL) -q && \
293 while ps -p $$pid >/dev/null 2>&1; do sleep 1; done \
294 )
273295
274296 # code coverage will be created for subdirectory "ebin" of COVER_DIR
275297 COVER_DIR=.
00 all:
1 echo "Please select a target from the Makefile."
1 @echo "Please select a target from the Makefile."
22
33 clean:
44 rm -f *.pyc
99 ##
1010 ## The Original Code is RabbitMQ.
1111 ##
12 ## The Initial Developer of the Original Code is GoPivotal, Inc.
12 ## The Initial Developer of the Original Code is Pivotal Software, Inc.
1313 ## Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
1414 ##
1515
104104 %%
105105 %% The Original Code is RabbitMQ.
106106 %%
107 %% The Initial Developer of the Original Code is GoPivotal, Inc.
107 %% The Initial Developer of the Original Code is Pivotal Software, Inc.
108108 %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
109109 %%"""
110110
181181 %%
182182 %% {vm_memory_high_watermark, 0.4},
183183
184 %% Alternatively, we can set a limit (in bytes) of RAM used by the node.
185 %%
186 %% {vm_memory_high_watermark, {absolute, 1073741824}},
187
184188 %% Fraction of the high watermark limit at which queues start to
185189 %% page message out to disc in order to free up memory.
186190 %%
721721 </varlistentry>
722722
723723 <varlistentry>
724 <term>
725 <cmdsynopsis>
726 <command>authenticate_user</command> <arg choice="req"><replaceable>username</replaceable></arg> <arg choice="req"><replaceable>password</replaceable></arg>
727 </cmdsynopsis>
728 </term>
729 <listitem>
730 <variablelist>
731 <varlistentry>
732 <term>username</term>
733 <listitem><para>The name of the user.</para></listitem>
734 </varlistentry>
735 <varlistentry>
736 <term>password</term>
737 <listitem><para>The password of the user.</para></listitem>
738 </varlistentry>
739 </variablelist>
740 <para role="example-prefix">For example:</para>
741 <screen role="example">rabbitmqctl authenticate_user tonyg verifyit</screen>
742 <para role="example">
743 This command instructs the RabbitMQ broker to authenticate the
744 user named <command>tonyg</command> with password
745 <command>verifyit</command>.
746 </para>
747 </listitem>
748 </varlistentry>
749
750 <varlistentry>
724751 <term><cmdsynopsis><command>set_user_tags</command> <arg choice="req"><replaceable>username</replaceable></arg> <arg choice="req"><replaceable>tag</replaceable> ...</arg></cmdsynopsis></term>
725752 <listitem>
726753 <variablelist>
12291256 queue is non-exclusive.</para></listitem>
12301257 </varlistentry>
12311258 <varlistentry>
1259 <term>exclusive</term>
1260 <listitem><para>True if queue is exclusive (i.e. has
1261 owner_pid), false otherwise</para></listitem>
1262 </varlistentry>
1263 <varlistentry>
12321264 <term>exclusive_consumer_pid</term>
12331265 <listitem><para>Id of the Erlang process representing the channel of the
12341266 exclusive consumer subscribed to this queue. Empty if
19201952 </variablelist>
19211953 </listitem>
19221954 </varlistentry>
1955 <varlistentry>
1956 <term><cmdsynopsis><command>set_vm_memory_high_watermark absolute</command> <arg choice="req"><replaceable>memory_limit_in_bytes</replaceable></arg></cmdsynopsis></term>
1957 <listitem>
1958 <variablelist>
1959 <varlistentry>
1960 <term>memory_limit_in_bytes</term>
1961 <listitem><para>
1962 The new memory limit at which flow control is
1963 triggered, expressed in bytes as an integer number
1964 greater than or equal to 0.
1965 </para></listitem>
1966 </varlistentry>
1967 </variablelist>
1968 </listitem>
1969 </varlistentry>
19231970 </variablelist>
19241971 </refsect2>
19251972 </refsect1>
00 {application, rabbit, %% -*- erlang -*-
11 [{description, "RabbitMQ"},
22 {id, "RabbitMQ"},
3 {vsn, "3.5.4"},
3 {vsn, "3.5.7"},
44 {modules, []},
55 {registered, [rabbit_amqqueue_sup,
66 rabbit_log,
2525 %% breaks the QPid Java client
2626 {frame_max, 131072},
2727 {channel_max, 0},
28 {heartbeat, 580},
28 {heartbeat, 60},
2929 {msg_store_file_size_limit, 16777216},
30 {queue_index_max_journal_entries, 65536},
30 {fhc_write_buffering, true},
31 {fhc_read_buffering, true},
32 {queue_index_max_journal_entries, 32768},
3133 {queue_index_embed_msgs_below, 4096},
3234 {default_user, <<"guest">>},
3335 {default_pass, <<"guest">>},
8082 gen_fsm, ssl]},
8183 {ssl_apps, [asn1, crypto, public_key, ssl]},
8284 %% see rabbitmq-server#114
83 {mirroring_flow_control, true}
85 {mirroring_flow_control, true},
86 %% see rabbitmq-server#227 and related tickets.
87 %% msg_store_credit_disc_bound only takes effect when
88 %% messages are persisted to the message store. If messages
89 %% are embedded on the queue index, then modifying this
90 %% setting has no effect because credit_flow is not used when
91 %% writing to the queue index. See the setting
92 %% queue_index_embed_msgs_below above.
93 {msg_store_credit_disc_bound, {2000, 500}},
94 {msg_store_io_batch_size, 2048},
95 %% see rabbitmq-server#143
96 {credit_flow_default_credit, {200, 50}}
8497 ]}]}.
99 %%
1010 %% The Original Code is RabbitMQ.
1111 %%
12 %% The Initial Developer of the Original Code is GoPivotal, Inc.
12 %% The Initial Developer of the Original Code is Pivotal Software, Inc.
1313 %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
1414 %%
1515
99 %%
1010 %% The Original Code is RabbitMQ.
1111 %%
12 %% The Initial Developer of the Original Code is GoPivotal, Inc.
12 %% The Initial Developer of the Original Code is Pivotal Software, Inc.
1313 %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
1414 %%
1515
121121 -define(HIBERNATE_AFTER_MIN, 1000).
122122 -define(DESIRED_HIBERNATE, 10000).
123123 -define(CREDIT_DISC_BOUND, {2000, 500}).
124 %% When we discover that we should write some indices to disk for some
125 %% betas, the IO_BATCH_SIZE sets the number of betas that we must be
126 %% due to write indices for before we do any work at all.
127 -define(IO_BATCH_SIZE, 2048). %% next power-of-2 after ?CREDIT_DISC_BOUND
124128
125129 -define(INVALID_HEADERS_KEY, <<"x-invalid-headers">>).
126130 -define(ROUTING_HEADERS, [<<"CC">>, <<"BCC">>]).
99 %%
1010 %% The Original Code is RabbitMQ.
1111 %%
12 %% The Initial Developer of the Original Code is GoPivotal, Inc.
12 %% The Initial Developer of the Original Code is Pivotal Software, Inc.
1313 %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
1414 %%
1515
99 %%
1010 %% The Original Code is RabbitMQ.
1111 %%
12 %% The Initial Developer of the Original Code is GoPivotal, Inc.
12 %% The Initial Developer of the Original Code is Pivotal Software, Inc.
1313 %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
1414 %%
1515
8787 Count = length(List),
8888 Compound = lists:map(fun generate/1, List),
8989 S = iolist_size(Compound),
90 %% S < 256 -> Count < 256
91 if S > 255 -> [<<16#d0, (S + 4):32/unsigned, Count:32/unsigned>>, Compound];
92 true -> [<<16#c0, (S + 1):8/unsigned, Count:8/unsigned>>, Compound]
90 %% If the list contains less than (256 - 1) elements and if the
91 %% encoded size (including the encoding of "Count", thus S + 1
92 %% in the test) is less than 256 bytes, we use the short form.
93 %% Otherwise, we use the large form.
94 if Count >= (256 - 1) orelse (S + 1) >= 256 ->
95 [<<16#d0, (S + 4):32/unsigned, Count:32/unsigned>>, Compound];
96 true ->
97 [<<16#c0, (S + 1):8/unsigned, Count:8/unsigned>>, Compound]
9398 end;
9499
95100 generate({map, ListOfPairs}) ->
99104 (generate(Val))]
100105 end, ListOfPairs),
101106 S = iolist_size(Compound),
102 if S > 255 -> [<<16#d1,(S + 4):32,Count:32>>, Compound];
103 true -> [<<16#c1,(S + 1):8,Count:8>>, Compound]
107 %% See generate({list, ...}) for an explanation of this test.
108 if Count >= (256 - 1) orelse (S + 1) >= 256 ->
109 [<<16#d1, (S + 4):32, Count:32>>, Compound];
110 true ->
111 [<<16#c1, (S + 1):8, Count:8>>, Compound]
104112 end;
105113
106114 generate({array, Type, List}) ->
108116 Body = iolist_to_binary(
109117 [constructor(Type), [generate(Type, I) || I <- List]]),
110118 S = size(Body),
111 %% S < 256 -> Count < 256
112 if S > 255 -> [<<16#f0, (S + 4):32/unsigned, Count:32/unsigned>>, Body];
113 true -> [<<16#e0, (S + 1):8/unsigned, Count:8/unsigned>>, Body]
119 %% See generate({list, ...}) for an explanation of this test.
120 if Count >= (256 - 1) orelse (S + 1) >= 256 ->
121 [<<16#f0, (S + 4):32/unsigned, Count:32/unsigned>>, Body];
122 true ->
123 [<<16#e0, (S + 1):8/unsigned, Count:8/unsigned>>, Body]
114124 end;
115125
116126 generate({as_is, TypeCode, Bin}) ->
347347 handle_1_0_connection_frame(#'v1_0.open'{ max_frame_size = ClientFrameMax,
348348 channel_max = ClientChannelMax,
349349 idle_time_out = IdleTimeout,
350 hostname = Hostname,
351 properties = Props },
350 hostname = Hostname },
352351 State = #v1{
353352 connection_state = starting,
354353 connection = Connection,
355354 throttle = Throttle,
356355 helper_sup = HelperSupPid,
357356 sock = Sock}) ->
358 ClientProps = case Props of
359 undefined -> [];
360 {map, Ps} -> Ps
361 end,
362357 ClientHeartbeatSec = case IdleTimeout of
363358 undefined -> 0;
364359 {uint, Interval} -> Interval div 1000
366361 FrameMax = case ClientFrameMax of
367362 undefined -> unlimited;
368363 {_, FM} -> FM
369 end,
370 ChannelMax = case ClientChannelMax of
371 undefined -> unlimited;
372 {_, CM} -> CM
373364 end,
374365 {ok, HeartbeatSec} = application:get_env(rabbit, heartbeat),
375366 State1 =
221221 %% content records. However, that's already been handled for us, we're
222222 %% just sending a chunk, so from this perspective it's just a binary.
223223
224 assemble_frames(Channel, Performative, Content, FrameMax,
224 assemble_frames(Channel, Performative, Content, _FrameMax,
225225 rabbit_amqp1_0_framing) ->
226226 ?DEBUG("Channel ~p <-~n~p~n followed by ~p bytes of content~n~n",
227227 [Channel, rabbit_amqp1_0_framing:pprint(Performative),
0 sudo: true
1 language: erlang
2 notifications:
3 email:
4 - alerts@rabbitmq.com
5 addons:
6 apt:
7 packages:
8 - slapd
9 - ldap-utils
10 - xsltproc
11 otp_release:
12 - "R16B03-1"
13 - "17.5"
14 - "18.0"
15 install:
16 - if [ ! -d "$HOME/rabbitmq-public-umbrella/.git" ]; then git clone https://github.com/rabbitmq/rabbitmq-public-umbrella.git $HOME/rabbitmq-public-umbrella; fi
17 - cd $HOME/rabbitmq-public-umbrella
18 - make co
19 - make up
20 services:
21 - slapd
22 before_script:
23 - IFS="/" read -a PARTS <<< "$TRAVIS_REPO_SLUG"
24 - export TEST_DIR=$HOME/rabbitmq-public-umbrella/${PARTS[1]}
25 - rm -rf ${TEST_DIR}
26 - cp -r ${TRAVIS_BUILD_DIR} ${TEST_DIR}
27 - cd ${TEST_DIR}
28 - ./example/setup.sh
29 script: make test
30 before_cache:
31 - rm -rf ${TEST_DIR}
32 - cd $HOME
33 cache:
34 apt: true
35 directories:
36 - $HOME/rabbitmq-public-umbrella
00 %% -*- erlang -*-
1 [{rabbit, [{auth_backends, [rabbit_auth_backend_ldap]},
2 {default_vhost, <<"test">>}]},
1 [{rabbit, [{default_vhost, <<"test">>}]},
32 {rabbitmq_auth_backend_ldap,
43 [ {servers, ["localhost"]},
54 {user_dn_pattern, "cn=${username},ou=People,dc=example,dc=com"},
3635 {'not', {equals, "${username}", "Mike Bridgen"}}]}
3736 ]}}
3837 ]}}
39 ]}}
38 ]}},
39 {tag_queries, [{administrator, {constant, false}},
40 {management, {constant, false}}]}
4041 ]}
4142 ].
6969
7070 user_login_authorization(Username) ->
7171 case user_login_authentication(Username, []) of
72 {ok, #auth_user{impl = Impl}} -> {ok, Impl};
73 Else -> Else
72 {ok, #auth_user{impl = Impl, tags = Tags}} -> {ok, Impl, Tags};
73 Else -> Else
7474 end.
7575
7676 check_vhost_access(User = #auth_user{username = Username,
1818 -include_lib("eunit/include/eunit.hrl").
1919 -include_lib("amqp_client/include/amqp_client.hrl").
2020
21 -define(SIMON, #amqp_params_network{username = <<"Simon MacMullen">>,
21 -define(SIMON_NAME, "Simon MacMullen").
22 -define(MIKEB_NAME, "Mike Bridgen").
23 -define(VHOST, "test").
24
25 -define(SIMON, #amqp_params_network{username = << ?SIMON_NAME >>,
2226 password = <<"password">>,
23 virtual_host = <<"test">>}).
24
25 -define(MIKEB, #amqp_params_network{username = <<"Mike Bridgen">>,
27 virtual_host = << ?VHOST >>}).
28
29 -define(MIKEB, #amqp_params_network{username = << ?MIKEB_NAME >>,
2630 password = <<"password">>,
27 virtual_host = <<"test">>}).
28
29 %%--------------------------------------------------------------------
30
31 login_test_() ->
31 virtual_host = << ?VHOST >>}).
32
33 %%--------------------------------------------------------------------
34
35 ldap_only_test_() ->
36 { setup,
37 fun () -> ok = application:set_env(rabbit, auth_backends,
38 [rabbit_auth_backend_ldap]) end,
39 fun (_) -> ok = application:unset_env(rabbit, auth_backends) end,
40 [ {"LDAP Login", login()},
41 {"LDAP In group", in_group()},
42 {"LDAP Constant", const()},
43 {"LDAP String match", string_match()},
44 {"LDAP Boolean check", boolean_logic()},
45 {"LDAP Tags", tag_check([])}
46 ]}.
47
48 ldap_and_internal_test_() ->
49 { setup,
50 fun () ->
51 ok = application:set_env(rabbit, auth_backends,
52 [{rabbit_auth_backend_ldap, rabbit_auth_backend_internal}]),
53 ok = control_action(add_user, [ ?SIMON_NAME, ""]),
54 ok = control_action(set_permissions, [ ?SIMON_NAME, "prefix-.*", "prefix-.*", "prefix-.*"]),
55 ok = control_action(set_user_tags, [ ?SIMON_NAME, "management", "foo"]),
56 ok = control_action(add_user, [ ?MIKEB_NAME, ""]),
57 ok = control_action(set_permissions, [ ?MIKEB_NAME, "", "", ""])
58 end,
59 fun (_) ->
60 ok = application:unset_env(rabbit, auth_backends),
61 ok = control_action(delete_user, [ ?SIMON_NAME ]),
62 ok = control_action(delete_user, [ ?MIKEB_NAME ])
63 end,
64 [ {"LDAP&Internal Login", login()},
65 {"LDAP&Internal Permissions", permission_match()},
66 {"LDAP&Internal Tags", tag_check([management, foo])}
67 ]}.
68
69 internal_followed_ldap_and_internal_test_() ->
70 { setup,
71 fun () ->
72 ok = application:set_env(rabbit, auth_backends,
73 [rabbit_auth_backend_internal, {rabbit_auth_backend_ldap, rabbit_auth_backend_internal}]),
74 ok = control_action(add_user, [ ?SIMON_NAME, ""]),
75 ok = control_action(set_permissions, [ ?SIMON_NAME, "prefix-.*", "prefix-.*", "prefix-.*"]),
76 ok = control_action(set_user_tags, [ ?SIMON_NAME, "management", "foo"]),
77 ok = control_action(add_user, [ ?MIKEB_NAME, ""]),
78 ok = control_action(set_permissions, [ ?MIKEB_NAME, "", "", ""])
79 end,
80 fun (_) ->
81 ok = application:unset_env(rabbit, auth_backends),
82 ok = control_action(delete_user, [ ?SIMON_NAME ]),
83 ok = control_action(delete_user, [ ?MIKEB_NAME ])
84 end,
85 [ {"Internal, LDAP&Internal Login", login()},
86 {"Internal, LDAP&Internal Permissions", permission_match()},
87 {"Internal, LDAP&Internal Tags", tag_check([management, foo])}
88 ]}.
89
90
91 %%--------------------------------------------------------------------
92
93 login() ->
3294 [test_login(Env, L, case {LGood, EnvGood} of
3395 {good, good} -> fun succ/1;
3496 _ -> fun fail/1
89151
90152 %%--------------------------------------------------------------------
91153
92 in_group_test_() ->
154 in_group() ->
93155 X = [#'exchange.declare'{exchange = <<"test">>}],
94156 test_resource_funs([{?SIMON, X, ok},
95157 {?MIKEB, X, fail}]).
96158
97 const_test_() ->
159 const() ->
98160 Q = [#'queue.declare'{queue = <<"test">>}],
99161 test_resource_funs([{?SIMON, Q, ok},
100162 {?MIKEB, Q, fail}]).
101163
102 string_match_test_() ->
164 string_match() ->
103165 B = fun(N) ->
104166 [#'exchange.declare'{exchange = N},
105167 #'queue.declare'{queue = <<"test">>},
109171 {?SIMON, B(<<"abc123">>), fail},
110172 {?SIMON, B(<<"xch-Someone Else-abc123">>), fail}]).
111173
112 boolean_logic_test_() ->
174 boolean_logic() ->
113175 Q1 = [#'queue.declare'{queue = <<"test1">>},
114176 #'basic.consume'{queue = <<"test1">>}],
115177 Q2 = [#'queue.declare'{queue = <<"test2">>},
118180 {?SIMON, Q2, ok},
119181 {?MIKEB, Q1, fail},
120182 {?MIKEB, Q2, fail}]].
183
184 permission_match() ->
185 B = fun(N) ->
186 [#'exchange.declare'{exchange = N},
187 #'queue.declare'{queue = <<"prefix-test">>},
188 #'queue.bind'{exchange = N, queue = <<"prefix-test">>}]
189 end,
190 test_resource_funs([{?SIMON, B(<<"prefix-abc123">>), ok},
191 {?SIMON, B(<<"abc123">>), fail},
192 {?SIMON, B(<<"xch-Simon MacMullen-abc123">>), fail}]).
193
194 tag_check(Tags) ->
195 fun() ->
196 {ok, User} = rabbit_access_control:check_user_pass_login(
197 << ?SIMON_NAME >>, <<"password">>),
198 ?assertEqual(Tags, User#user.tags)
199 end.
200
201
202 %%--------------------------------------------------------------------
121203
122204 test_resource_funs(PTRs) -> [test_resource_fun(PTR) || PTR <- PTRs].
123205
134216 end)
135217 end.
136218
137 %%--------------------------------------------------------------------
219 control_action(Command, Args) ->
220 control_action(Command, node(), Args, default_options()).
221
222 control_action(Command, Args, NewOpts) ->
223 control_action(Command, node(), Args,
224 expand_options(default_options(), NewOpts)).
225
226 control_action(Command, Node, Args, Opts) ->
227 case catch rabbit_control_main:action(
228 Command, Node, Args, Opts,
229 fun (Format, Args1) ->
230 io:format(Format ++ " ...~n", Args1)
231 end) of
232 ok ->
233 io:format("done.~n"),
234 ok;
235 Other ->
236 io:format("failed.~n"),
237 Other
238 end.
239
240 default_options() -> [{"-p", ?VHOST}, {"-q", "false"}].
241
242 expand_options(As, Bs) ->
243 lists:foldl(fun({K, _}=A, R) ->
244 case proplists:is_defined(K, R) of
245 true -> R;
246 false -> [A | R]
247 end
248 end, Bs, As).
249
0 sudo: false
1 language: erlang
2 addons:
3 apt:
4 packages:
5 - xsltproc
6 otp_release:
7 - R16B03-1
8 - 17.5
9 - 18.0
10 install:
11 - if [ ! -d "$HOME/rabbitmq-public-umbrella/.git" ]; then git clone https://github.com/rabbitmq/rabbitmq-public-umbrella.git $HOME/rabbitmq-public-umbrella; fi
12 - cd $HOME/rabbitmq-public-umbrella
13 - make co
14 - make up
15 before_script:
16 - IFS="/" read -a PARTS <<< "$TRAVIS_REPO_SLUG"
17 - export TEST_DIR=$HOME/rabbitmq-public-umbrella/${PARTS[1]}
18 - rm -rf ${TEST_DIR}
19 - cp -r ${TRAVIS_BUILD_DIR} ${TEST_DIR}
20 - cd ${TEST_DIR}
21 script: make test
22 before_cache:
23 - rm -rf ${TEST_DIR}
24 - cd $HOME
25 cache:
26 apt: true
27 directories:
28 - $HOME/rabbitmq-public-umbrella
29 notifications:
30 email:
31 - alerts@rabbitmq.com
8585
8686 validate(_X) -> ok.
8787
88 validate_binding(_X, _B) -> ok.
88 validate_binding(_X, #binding { key = K }) ->
89 try
90 V = list_to_integer(binary_to_list(K)),
91 case V < 1 of
92 true -> {error, {binding_invalid, "The binding key must be greater than 0", []}};
93 false -> ok
94 end
95 catch error:badarg ->
96 {error, {binding_invalid, "The binding key must be an integer: ~p", [K]}}
97 end.
8998
9099 create(_Tx, _X) -> ok.
91100
1616 -module(rabbit_exchange_type_consistent_hash_test).
1717 -export([test/0]).
1818 -include_lib("amqp_client/include/amqp_client.hrl").
19 -include_lib("eunit/include/eunit.hrl").
1920
2021 %% Because the routing is probabilistic, we can't really test a great
2122 %% deal here.
2829 t(Qs) ->
2930 ok = test_with_rk(Qs),
3031 ok = test_with_header(Qs),
32 ok = test_binding_with_negative_routing_key(),
33 ok = test_binding_with_non_numeric_routing_key(),
3134 ok.
3235
3336 test_with_rk(Qs) ->
6265 type = <<"x-consistent-hash">>,
6366 auto_delete = true,
6467 arguments = DeclareArgs
65 }),
68 }),
6669 [#'queue.declare_ok'{} =
6770 amqp_channel:call(Chan, #'queue.declare' {
68 queue = Q, exclusive = true }) || Q <- Queues],
71 queue = Q, exclusive = true}) || Q <- Queues],
6972 [#'queue.bind_ok'{} =
70 amqp_channel:call(Chan, #'queue.bind' { queue = Q,
73 amqp_channel:call(Chan, #'queue.bind' {queue = Q,
7174 exchange = <<"e">>,
72 routing_key = <<"10">> })
75 routing_key = <<"10">>})
7376 || Q <- [Q1, Q2]],
7477 [#'queue.bind_ok'{} =
75 amqp_channel:call(Chan, #'queue.bind' { queue = Q,
78 amqp_channel:call(Chan, #'queue.bind' {queue = Q,
7679 exchange = <<"e">>,
77 routing_key = <<"20">> })
80 routing_key = <<"20">>})
7881 || Q <- [Q3, Q4]],
7982 #'tx.select_ok'{} = amqp_channel:call(Chan, #'tx.select'{}),
8083 [amqp_channel:call(Chan,
8588 [begin
8689 #'queue.declare_ok'{message_count = M} =
8790 amqp_channel:call(Chan, #'queue.declare' {queue = Q,
88 exclusive = true }),
91 exclusive = true}),
8992 M
9093 end || Q <- Queues],
9194 Count = lists:sum(Counts), %% All messages got routed
9295 [true = C > 0.01 * Count || C <- Counts], %% We are not *grossly* unfair
93 amqp_channel:call(Chan, #'exchange.delete' { exchange = <<"e">> }),
94 [amqp_channel:call(Chan, #'queue.delete' { queue = Q }) || Q <- Queues],
96 amqp_channel:call(Chan, #'exchange.delete' {exchange = <<"e">>}),
97 [amqp_channel:call(Chan, #'queue.delete' {queue = Q}) || Q <- Queues],
9598 amqp_channel:close(Chan),
9699 amqp_connection:close(Conn),
97100 ok.
101
102 test_binding_with_negative_routing_key() ->
103 {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
104 {ok, Chan} = amqp_connection:open_channel(Conn),
105 Declare1 = #'exchange.declare'{exchange = <<"bind-fail">>,
106 type = <<"x-consistent-hash">>},
107 #'exchange.declare_ok'{} = amqp_channel:call(Chan, Declare1),
108 Q = <<"test-queue">>,
109 Declare2 = #'queue.declare'{queue = Q},
110 #'queue.declare_ok'{} = amqp_channel:call(Chan, Declare2),
111 process_flag(trap_exit, true),
112 Cmd = #'queue.bind'{exchange = <<"bind-fail">>,
113 routing_key = <<"-1">>},
114 ?assertExit(_, amqp_channel:call(Chan, Cmd)),
115 {ok, Ch2} = amqp_connection:open_channel(Conn),
116 amqp_channel:call(Ch2, #'queue.delete'{queue = Q}),
117 amqp_connection:close(Conn),
118 ok.
119
120 test_binding_with_non_numeric_routing_key() ->
121 {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
122 {ok, Chan} = amqp_connection:open_channel(Conn),
123 Declare1 = #'exchange.declare'{exchange = <<"bind-fail">>,
124 type = <<"x-consistent-hash">>},
125 #'exchange.declare_ok'{} = amqp_channel:call(Chan, Declare1),
126 Q = <<"test-queue">>,
127 Declare2 = #'queue.declare'{queue = Q},
128 #'queue.declare_ok'{} = amqp_channel:call(Chan, Declare2),
129 process_flag(trap_exit, true),
130 Cmd = #'queue.bind'{exchange = <<"bind-fail">>,
131 routing_key = <<"not-a-number">>},
132 ?assertExit(_, amqp_channel:call(Chan, Cmd)),
133 {ok, Ch2} = amqp_connection:open_channel(Conn),
134 amqp_channel:call(Ch2, #'queue.delete'{queue = Q}),
135 amqp_connection:close(Conn),
136 ok.
2828 port = undefined,
2929 channel_max = 0,
3030 frame_max = 0,
31 heartbeat = 0,
31 heartbeat = 10,
3232 connection_timeout = infinity,
3333 ssl_options = none,
3434 auth_mechanisms =
3636 rabbit_queue_collector,
3737 rabbit_queue_decorator,
3838 rabbit_amqqueue,
39 supervisor2
39 ssl_compat,
40 supervisor2,
41 time_compat
4042 ]},
4143 {registered, []},
4244 {env, []},
+0
-4
plugins-src/rabbitmq-federation/README less more
0 Generic build instructions are at:
1 http://www.rabbitmq.com/plugin-development.html
2
3 See http://www.rabbitmq.com/federation.html
0 ## RabbitMQ Federation
1
2 RabbitMQ federation offers a group of features for loosely
3 coupled and WAN-friendly distributed RabbitMQ setups. Note that
4 this is not an alternative to queue mirroring.
5
6
7 ## Supported RabbitMQ Versions
8
9 This plugin ships with RabbitMQ, there is no need to
10 install it separately.
11
12
13 ## Documentation
14
15 See [RabbitMQ federation plugin](http://www.rabbitmq.com/federation.html) on rabbitmq.com.
16
17
18 ## License and Copyright
19
20 Released under [the same license as RabbitMQ](https://www.rabbitmq.com/mpl.html).
21
22 2007-2015 (c) Pivotal Software Inc.
252252 %% routing key the first time a message gets
253253 %% forwarded; after that it's known that they were
254254 %% <<>> and QueueName respectively.
255 {rabbit_misc:set_table_value(
256 rabbit_misc:set_table_value(
257 Headers, <<"x-original-exchange">>, longstr, X),
258 <<"x-original-routing-key">>, longstr, K), 0};
255 {init_x_original_source_headers(Headers, X, K), 0};
259256 {array, Been} ->
260 {Found, Been1} = lists:partition(
261 fun (I) -> visit_match(I, Table) end,
262 Been),
263 C = case Found of
264 [] -> 0;
265 [{table, T}] -> case rabbit_misc:table_lookup(
266 T, <<"visit-count">>) of
267 {_, I} when is_number(I) -> I;
268 _ -> 0
269 end
270 end,
271 {rabbit_misc:set_table_value(
272 Headers, ?ROUTING_HEADER, array, Been1), C}
257 update_visit_count(Table, Been, Headers);
258 %% this means the header comes from the client
259 %% which re-published the message, most likely unintentionally.
260 %% We can't assume much about the value, so we simply ignore it.
261 _Other ->
262 {init_x_original_source_headers(Headers, X, K), 0}
273263 end,
274264 rabbit_basic:prepend_table_header(
275265 ?ROUTING_HEADER, Table ++ [{<<"redelivered">>, bool, Redelivered},
276266 {<<"visit-count">>, long, Count + 1}],
277267 swap_cc_header(Headers1)).
268
269 init_x_original_source_headers(Headers, X, K) ->
270 rabbit_misc:set_table_value(
271 rabbit_misc:set_table_value(
272 Headers, <<"x-original-exchange">>, longstr, X),
273 <<"x-original-routing-key">>, longstr, K).
274
275 update_visit_count(Table, Been, Headers) ->
276 {Found, Been1} = lists:partition(
277 fun(I) -> visit_match(I, Table) end,
278 Been),
279 C = case Found of
280 [] -> 0;
281 [{table, T}] -> case rabbit_misc:table_lookup(
282 T, <<"visit-count">>) of
283 {_, I} when is_number(I) -> I;
284 _ -> 0
285 end
286 end,
287 {rabbit_misc:set_table_value(
288 Headers, ?ROUTING_HEADER, array, Been1), C}.
278289
279290 swap_cc_header(Table) ->
280291 [{case K of
121121 exchange_name = bget(exchange, US, U, name(XorQ)),
122122 queue_name = bget(queue, US, U, name(XorQ)),
123123 prefetch_count = bget('prefetch-count', US, U, ?DEF_PREFETCH),
124 reconnect_delay = bget('reconnect-delay', US, U, 1),
124 reconnect_delay = bget('reconnect-delay', US, U, 5),
125125 max_hops = bget('max-hops', US, U, 1),
126126 expires = bget(expires, US, U, none),
127127 message_ttl = bget('message-ttl', US, U, none),
4949 'Maximum number of unacknowledged messages that may be in flight over a federation link at one time. Defaults to 1000 if not set.';
5050
5151 HELP['federation-reconnect'] =
52 'Time in seconds to wait after a network link goes down before attempting reconnection. Defaults to 1 if not set.';
52 'Time in seconds to wait after a network link goes down before attempting reconnection. Defaults to 5 if not set.';
5353
5454 HELP['federation-ack-mode'] =
5555 '<dl>\
0 sudo: false
1 language: erlang
2 notifications:
3 email:
4 - alerts@rabbitmq.com
5 addons:
6 apt:
7 packages:
8 - xsltproc
9 - python3
10 otp_release:
11 - "R16B03-1"
12 - "17.5"
13 - "18.0"
14 install:
15 - if [ ! -d "$HOME/rabbitmq-public-umbrella/.git" ]; then git clone https://github.com/rabbitmq/rabbitmq-public-umbrella.git $HOME/rabbitmq-public-umbrella; fi
16 - cd $HOME/rabbitmq-public-umbrella
17 - make co
18 - make up
19 before_script:
20 - IFS="/" read -a PARTS <<< "$TRAVIS_REPO_SLUG"
21 - export TEST_DIR=$HOME/rabbitmq-public-umbrella/${PARTS[1]}
22 - rm -rf ${TEST_DIR}
23 - cp -r ${TRAVIS_BUILD_DIR} ${TEST_DIR}
24 - cd ${TEST_DIR}
25 script: make test
26 before_cache:
27 - rm -rf ${TEST_DIR}
28 - cd $HOME
29 cache:
30 apt: true
31 directories:
32 - $HOME/rabbitmq-public-umbrella
366366 (options.node, options.config, error))
367367 else:
368368 for key, val in new_conf.items():
369 setattr(options, key, val)
369 if key == 'ssl':
370 setattr(options, key, val == "True")
371 else:
372 setattr(options, key, val)
370373
371374 return (options, args)
372375
204204 if (num == undefined) return UNKNOWN_REPR;
205205 else if (num < 1) return num.toFixed(2);
206206 else if (num < 10) return num.toFixed(1);
207 else return fmt_num_thousands(num.toFixed(0));
207 else return fmt_num_thousands(num);
208208 }
209209
210210 function fmt_num_thousands(num) {
211 if (num == undefined) return UNKNOWN_REPR;
212 num = '' + num;
213 if (num.length < 4) return num;
214 return fmt_num_thousands(num.slice(0, -3)) + ',' + num.slice(-3);
211 var conv_num = parseFloat(num); // to avoid errors, if someone calls fmt_num_thousands(someNumber.toFixed(0))
212 return fmt_num_thousands_unfixed(conv_num.toFixed(0));
213 }
214
215 function fmt_num_thousands_unfixed(num) {
216 if (num == undefined) return UNKNOWN_REPR;
217 num = '' + num;
218 if (num.length < 4) return num;
219 res= fmt_num_thousands_unfixed(num.slice(0, -3)) + ',' + num.slice(-3);
220 return res;
215221 }
216222
217223 function fmt_percent(num) {
727733 var prefix = '';
728734 if (current_sort == sort) {
729735 prefix = '<span class="arrow">' +
730 (current_sort_reverse ? '&#9650; ' : '&#9660; ') +
736 (current_sort_reverse ? '&#9660; ' : '&#9650; ') +
731737 '</span>';
732738 }
733739 return '<a class="sort" sort="' + sort + '">' + prefix + display + '</a>';
2323 -export([to_amqp_table/1, listener/1, properties/1, basic_properties/1]).
2424 -export([record/2, to_basic_properties/1]).
2525 -export([addr/1, port/1]).
26 -export([format_nulls/1]).
2627
2728 -import(rabbit_misc, [pget/2, pset/3]).
2829
99100 xmerl_ucs:from_utf8(V),
100101 V
101102 catch exit:{ucs, _} ->
102 Enc = base64:encode(V),
103 <<"Invalid UTF-8, base64 is: ", Enc/binary>>
103 Enc = split_lines(base64:encode(V)),
104 <<"Not UTF-8, base64 is: ", Enc/binary>>
104105 end.
106
107 % MIME enforces a limit on line length of base 64-encoded data to 76 characters.
108 split_lines(<<Text:76/binary, Rest/binary>>) ->
109 <<Text/binary, $\n, (split_lines(Rest))/binary>>;
110 split_lines(Text) ->
111 Text.
105112
106113 parameter(P) -> pset(value, rabbit_misc:term_to_json(pget(value, P)), P).
107114
318325 ]);
319326
320327 strip_pids(Items) -> [strip_pids(I) || I <- Items].
328
329 %% Format for JSON replies. Transforms '' into null
330 format_nulls(Items) when is_list(Items) ->
331 lists:foldr(fun (Pair, Acc) ->
332 [format_null_item(Pair) | Acc]
333 end, [], Items);
334 format_nulls(Item) ->
335 format_null_item(Item).
336
337 format_null_item({Key, ''}) ->
338 {Key, null};
339 format_null_item({Key, Value}) when is_list(Value) ->
340 {Key, format_nulls(Value)};
341 format_null_item({Key, {struct, Struct}}) ->
342 {Key, {struct, format_nulls(Struct)}};
343 format_null_item({Key, {array, Struct}}) ->
344 {Key, {array, format_nulls(Struct)}};
345 format_null_item({Key, Value}) ->
346 {Key, Value};
347 format_null_item([{_K, _V} | _T] = L) ->
348 format_nulls(L);
349 format_null_item(Value) ->
350 Value.
5656 %%----------------------------------------------------------------------------
5757
5858 init([]) ->
59 {ok, {{one_for_one, 0, 1}, [sup()]}}.
59 %% see above as well as https://github.com/rabbitmq/rabbitmq-management/pull/84.
60 %% we sent a message to ourselves so that if there's a conflict
61 %% with the mirrored supervisor already being started on another node,
62 %% we fail and let the other node win in a way that doesn't
63 %% prevent rabbitmq_management and, in turn, the entire
64 %% node fail to start.
65 timer:apply_after(0, ?MODULE, start_child, []),
66 {ok, {{one_for_one, 0, 1}, []}}.
6067
6168 sup() ->
6269 {rabbit_mgmt_sup, {rabbit_mgmt_sup, start_link, []},
177177 reply0(Facts, ReqData, Context) ->
178178 ReqData1 = set_resp_header("Cache-Control", "no-cache", ReqData),
179179 try
180 {mochijson2:encode(Facts), ReqData1, Context}
180 {mochijson2:encode(rabbit_mgmt_format:format_nulls(Facts)), ReqData1,
181 Context}
181182 catch exit:{json_encode, E} ->
182183 Error = iolist_to_binary(
183184 io_lib:format("JSON encode error: ~p", [E])),
288289 Json = {struct, [{error, Type},
289290 {reason, rabbit_mgmt_format:tuple(Reason)}]},
290291 ReqData1 = wrq:append_to_response_body(mochijson2:encode(Json), ReqData),
291 {{halt, Code}, ReqData1, Context}.
292 {{halt, Code}, set_resp_header(
293 "Content-Type", "application/json", ReqData1), Context}.
292294
293295 id(Key, ReqData) when Key =:= exchange;
294296 Key =:= source;
869869 http_delete("/vhosts/vh1", ?NO_CONTENT),
870870 ok.
871871
872 format_output_test() ->
873 QArgs = [],
874 PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
875 http_put("/vhosts/vh1", none, ?NO_CONTENT),
876 http_put("/permissions/vh1/guest", PermArgs, ?NO_CONTENT),
877 http_put("/queues/%2f/test0", QArgs, ?NO_CONTENT),
878 assert_list([[{name, <<"test0">>},
879 {consumer_utilisation, null},
880 {exclusive_consumer_tag, null},
881 {recoverable_slaves, null}]], http_get("/queues", ?OK)),
882 http_delete("/queues/%2f/test0", ?NO_CONTENT),
883 http_delete("/vhosts/vh1", ?NO_CONTENT),
884 ok.
885
872886 columns_test() ->
873887 http_put("/queues/%2f/test", [{arguments, [{<<"foo">>, <<"bar">>}]}],
874888 ?NO_CONTENT),
11781192 rabbit_runtime_parameters_test:unregister(),
11791193 ok.
11801194
1195 issue67_test()->
1196 {ok, {{_, 401, _}, Headers, _}} = req(get, "/queues",
1197 [auth_header("user_no_access", "password_no_access")]),
1198 ?assertEqual("application/json",
1199 proplists:get_value("content-type",Headers)),
1200 ok.
11811201
11821202 extensions_test() ->
11831203 [[{javascript,<<"dispatcher.js">>}]] = http_get("/extensions", ?OK),
162162 self.assert_table([exp_msg('test', 0, False, 'test_3')], ['get', 'queue=test', 'requeue=false'])
163163 self.run_success(['publish', 'routing_key=test'], stdin=b'test_4')
164164 filename = '/tmp/rabbitmq-test/get.txt'
165 ensure_dir(filename)
165166 self.run_success(['get', 'queue=test', 'requeue=false', 'payload_file=' + filename])
166167 with open(filename) as f:
167168 self.assertEqual('test_4', f.read())
241242 # routing_key, exchange, message_count, payload, payload_bytes, payload_encoding, properties, redelivered
242243 return [key, '', str(count), payload, str(len(payload)), 'string', '', str(redelivered)]
243244
245 def ensure_dir(f):
246 d = os.path.dirname(f)
247 if not os.path.exists(d):
248 os.makedirs(d)
249
244250 if __name__ == '__main__':
245251 print("\nrabbitmqadmin tests\n===================\n")
246252 suite = unittest.TestLoader().loadTestsFromTestCase(TestRabbitMQAdmin)
1818 -behaviour(rabbit_mgmt_extension).
1919
2020 -export([dispatcher/0, web_ui/0]).
21
2122 dispatcher() -> [{["all"], rabbit_mgmt_wm_all, []},
2223 {["all", vhost], rabbit_mgmt_wm_all, []}].
2324 web_ui() -> [{javascript, <<"visualiser.js">>}].
00 RELEASABLE:=true
11 DEPS:=rabbitmq-server rabbitmq-erlang-client rabbitmq-test
2 STANDALONE_TEST_COMMANDS:=eunit:test(rabbit_mqtt_util)
23 WITH_BROKER_TEST_SCRIPTS:=$(PACKAGE_DIR)/test/test.sh
34 WITH_BROKER_TEST_CONFIG:=$(PACKAGE_DIR)/test/ebin/test
45 WITH_BROKER_SETUP_SCRIPTS:=$(PACKAGE_DIR)/test/setup-rabbit-test.sh
1112 mkdir -p $(PACKAGE_DIR)/test/ebin
1213 sed -E -e "s|%%CERTS_DIR%%|$(abspath $(PACKAGE_DIR))/test/certs|g" < $(PACKAGE_DIR)/test/src/test.config > $(PACKAGE_DIR)/test/ebin/test.config
1314 $(MAKE) -C $(PACKAGE_DIR)/../rabbitmq-test/certs all PASSWORD=bunnychow DIR=$(abspath $(PACKAGE_DIR))/test/certs
15 cp $(PACKAGE_DIR)/test/src/rabbitmq_mqtt_standalone.app.src $(PACKAGE_DIR)/test/ebin/rabbitmq_mqtt.app
1416
1517 $(PACKAGE_DIR)+clean::
1618 rm -rf $(PACKAGE_DIR)/test/certs
372372 DefaultPass = rabbit_mqtt_util:env(default_pass),
373373 {ok, Anon} = application:get_env(?APP, allow_anonymous),
374374 {ok, TLSAuth} = application:get_env(?APP, ssl_cert_login),
375 U = case {User =/= undefined, is_binary(DefaultUser),
376 Anon =:= true, (TLSAuth andalso SSLLoginName =/= none)} of
375 U = case {User =/= undefined,
376 is_binary(DefaultUser),
377 Anon =:= true,
378 (TLSAuth andalso SSLLoginName =/= none)} of
379 %% username provided
377380 {true, _, _, _} -> list_to_binary(User);
381 %% anonymous, default user is configured, no TLS
382 {false, true, true, false} -> DefaultUser;
383 %% no username provided, TLS certificate is present,
384 %% rabbitmq_mqtt.ssl_cert_login is true
378385 {false, _, _, true} -> SSLLoginName;
379 {false, true, true, false} -> DefaultUser;
380386 _ -> nocreds
381387 end,
382388 case U of
383389 nocreds ->
384390 nocreds;
385391 _ ->
386 case {Pass =/= undefined, is_binary(DefaultPass), Anon =:= true, SSLLoginName == U} of
392 case {Pass =/= undefined,
393 is_binary(DefaultPass),
394 Anon =:= true,
395 TLSAuth} of
396 %% password provided
387397 {true, _, _, _} -> {U, list_to_binary(Pass)};
388 {false, _, _, _} -> {U, none};
398 %% password not provided, TLS certificate is present,
399 %% rabbitmq_mqtt.ssl_cert_login is true
400 {false, _, _, true} -> {U, none};
401 %% anonymous, default password is configured
389402 {false, true, true, _} -> {U, DefaultPass};
390403 _ -> {U, none}
391404 end
428441 {QueueQ1,
429442 #'queue.declare'{ queue = QueueQ1,
430443 durable = true,
444 %% Clean session means a transient connection,
445 %% translating into auto-delete.
446 %%
447 %% see rabbitmq/rabbitmq-mqtt#37
431448 auto_delete = CleanSess,
432449 arguments = Qos1Args },
433450 #'basic.consume'{ queue = QueueQ1,
4242
4343 env(Key) ->
4444 case application:get_env(rabbitmq_mqtt, Key) of
45 {ok, Val} -> Val;
45 {ok, Val} -> coerce_env_value(Key, Val);
4646 undefined -> undefined
4747 end.
48
49 coerce_env_value(default_pass, Val) -> to_binary(Val);
50 coerce_env_value(default_user, Val) -> to_binary(Val);
51 coerce_env_value(exchange, Val) -> to_binary(Val);
52 coerce_env_value(vhost, Val) -> to_binary(Val);
53 coerce_env_value(_, Val) -> Val.
54
55 to_binary(Val) when is_list(Val) -> list_to_binary(Val);
56 to_binary(Val) -> Val.
4857
4958 table_lookup(undefined, _Key) ->
5059 undefined;
1616 package com.rabbitmq.mqtt.test;
1717
1818 import com.rabbitmq.client.*;
19 import junit.framework.Assert;
2019 import junit.framework.TestCase;
20 import org.junit.Assert;
2121 import org.eclipse.paho.client.mqttv3.IMqttDeliveryToken;
2222 import org.eclipse.paho.client.mqttv3.MqttCallback;
2323 import org.eclipse.paho.client.mqttv3.MqttClient;
3838 import java.net.Socket;
3939 import java.util.ArrayList;
4040 import java.util.Arrays;
41 import java.util.HashMap;
4142 import java.util.List;
43 import java.util.Map;
4244 import java.util.concurrent.TimeoutException;
4345
4446 /***
6466 private long lastReceipt;
6567 private boolean expectConnectionFailure;
6668
67 private ConnectionFactory connectionFactory;
6869 private Connection conn;
6970 private Channel ch;
7071
8990 client2 = new MqttClient(brokerUrl, clientId2, null);
9091 conOpt = new MyConnOpts();
9192 setConOpts(conOpt);
92 receivedMessages = new ArrayList();
93 receivedMessages = new ArrayList<MqttMessage>();
9394 expectConnectionFailure = false;
9495 }
9596
100101 client = new MqttClient(brokerUrl, clientId, null);
101102 try {
102103 client.connect(conOpt);
103 client.disconnect();
104 } catch (Exception _) {}
104 client.disconnect(3000);
105 } catch (Exception ignored) {}
105106
106107 client2 = new MqttClient(brokerUrl, clientId2, null);
107108 try {
108109 client2.connect(conOpt);
109 client2.disconnect();
110 } catch (Exception _) {}
110 client2.disconnect(3000);
111 } catch (Exception ignored) {}
111112 }
112113
113114 private void setUpAmqp() throws IOException, TimeoutException {
114 connectionFactory = new ConnectionFactory();
115 connectionFactory.setHost(host);
116 conn = connectionFactory.newConnection();
115 ConnectionFactory cf = new ConnectionFactory();
116 cf.setHost(host);
117 conn = cf.newConnection();
117118 ch = conn.createChannel();
118119 }
119120
120121 private void tearDownAmqp() throws IOException {
121 conn.close();
122 if(conn.isOpen()) {
123 conn.close();
124 }
122125 }
123126
124127 private void setConOpts(MqttConnectOptions conOpts) {
139142 mqttOut.flush();
140143 mqttIn.readMqttWireMessage();
141144 fail("Error expected if CONNECT is not first packet");
142 } catch (IOException _) {}
145 } catch (IOException ignored) {}
143146 }
144147
145148 public void testInvalidUser() throws MqttException {
149152 fail("Authentication failure expected");
150153 } catch (MqttException ex) {
151154 Assert.assertEquals(MqttException.REASON_CODE_FAILED_AUTHENTICATION, ex.getReasonCode());
155 }
156 }
157
158 // rabbitmq/rabbitmq-mqtt#37: QoS 1, clean session = false
159 public void testQos1AndCleanSessionUnset()
160 throws MqttException, IOException, TimeoutException, InterruptedException {
161 testQueuePropertiesWithCleanSessionUnset("qos1-no-clean-session", 1, true, false);
162 }
163
164 protected void testQueuePropertiesWithCleanSessionSet(String cid, int qos, boolean durable, boolean autoDelete)
165 throws IOException, MqttException, TimeoutException, InterruptedException {
166 testQueuePropertiesWithCleanSession(true, cid, qos, durable, autoDelete);
167 }
168
169 protected void testQueuePropertiesWithCleanSessionUnset(String cid, int qos, boolean durable, boolean autoDelete)
170 throws IOException, MqttException, TimeoutException, InterruptedException {
171 testQueuePropertiesWithCleanSession(false, cid, qos, durable, autoDelete);
172 }
173
174 protected void testQueuePropertiesWithCleanSession(boolean cleanSession, String cid, int qos,
175 boolean durable, boolean autoDelete)
176 throws MqttException, IOException, TimeoutException, InterruptedException {
177 MqttClient c = new MqttClient(brokerUrl, cid, null);
178 MqttConnectOptions opts = new MyConnOpts();
179 opts.setCleanSession(cleanSession);
180 c.connect(opts);
181
182 setUpAmqp();
183 Channel tmpCh = conn.createChannel();
184
185 String q = "mqtt-subscription-" + cid + "qos" + String.valueOf(qos);
186
187 c.subscribe(topic, qos);
188 // there is no server-sent notification about subscription
189 // success so we inject a delay
190 Thread.sleep(testDelay);
191
192 // ensure the queue is declared with the arguments we expect
193 // e.g. mqtt-subscription-client-3aqos0
194 try {
195 // first ensure the queue exists
196 tmpCh.queueDeclarePassive(q);
197 // then assert on properties
198 Map<String, Object> args = new HashMap<String, Object>();
199 args.put("x-expires", 1800000);
200 tmpCh.queueDeclare(q, durable, autoDelete, false, args);
201 } finally {
202 if(c.isConnected()) {
203 c.disconnect(3000);
204 }
205
206 Channel tmpCh2 = conn.createChannel();
207 tmpCh2.queueDelete(q);
208 tmpCh2.close();
209 tearDownAmqp();
152210 }
153211 }
154212
9191 try {
9292 client.connect(conOpt);
9393 client.disconnect();
94 } catch (Exception _) {
94 } catch (Exception ignored) {
9595 }
9696
9797 client2 = new MqttClient(brokerUrl, clientId2, null);
9898 try {
9999 client2.connect(conOpt);
100100 client2.disconnect();
101 } catch (Exception _) {
101 } catch (Exception ignored) {
102102 }
103103 }
104104
0 %% The contents of this file are subject to the Mozilla Public License
1 %% Version 1.1 (the "License"); you may not use this file except in
2 %% compliance with the License. You may obtain a copy of the License
3 %% at http://www.mozilla.org/MPL/
4 %%
5 %% Software distributed under the License is distributed on an "AS IS"
6 %% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
7 %% the License for the specific language governing rights and
8 %% limitations under the License.
9 %%
10 %% The Original Code is RabbitMQ.
11 %%
12 %% The Initial Developer of the Original Code is GoPivotal, Inc.
13 %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
14 %%
15
16 -module(rabbit_mqtt_util_tests).
17
18 -include_lib("eunit/include/eunit.hrl").
19
20 all_test_() ->
21 {setup,
22 fun setup/0,
23 [fun coerce_exchange/0,
24 fun coerce_vhost/0,
25 fun coerce_default_user/0,
26 fun coerce_default_pass/0]}.
27
28 setup() ->
29 application:load(rabbitmq_mqtt).
30
31 coerce_exchange() ->
32 ?assertEqual(<<"amq.topic">>, rabbit_mqtt_util:env(exchange)).
33
34 coerce_vhost() ->
35 ?assertEqual(<<"/">>, rabbit_mqtt_util:env(vhost)).
36
37 coerce_default_user() ->
38 ?assertEqual(<<"guest_user">>, rabbit_mqtt_util:env(default_user)).
39
40 coerce_default_pass() ->
41 ?assertEqual(<<"guest_pass">>, rabbit_mqtt_util:env(default_pass)).
0 {application, rabbitmq_mqtt,
1 [{description, "RabbitMQ MQTT Adapter"},
2 {vsn, "%%VSN%%"},
3 {modules, []},
4 {registered, []},
5 {mod, {rabbit_mqtt, []}},
6 {env, [{default_user, "guest_user"},
7 {default_pass, "guest_pass"},
8 {ssl_cert_login,false},
9 {allow_anonymous, true},
10 {vhost, "/"},
11 {exchange, "amq.topic"},
12 {subscription_ttl, 1800000}, % 30 min
13 {prefetch, 10},
14 {ssl_listeners, []},
15 {tcp_listeners, [1883]},
16 {tcp_listen_options, [binary,
17 {packet, raw},
18 {reuseaddr, true},
19 {backlog, 128},
20 {nodelay, true}]}]},
21 {applications, [kernel, stdlib, rabbit, amqp_client]}]}.
0 sudo: false
1 language: erlang
2 notifications:
3 email:
4 - alerts@rabbitmq.com
5 addons:
6 apt:
7 packages:
8 - xsltproc
9 otp_release:
10 - R16B03-1
11 - "17.5"
12 - "18.0"
13 install:
14 - if [ ! -d "$HOME/rabbitmq-public-umbrella/.git" ]; then git clone https://github.com/rabbitmq/rabbitmq-public-umbrella.git $HOME/rabbitmq-public-umbrella; fi
15 - cd $HOME/rabbitmq-public-umbrella
16 - make co
17 - make up
18 before_script:
19 - IFS="/" read -a PARTS <<< "$TRAVIS_REPO_SLUG"
20 - export TEST_DIR=$HOME/rabbitmq-public-umbrella/${PARTS[1]}
21 - rm -rf ${TEST_DIR}
22 - cp -r ${TRAVIS_BUILD_DIR} ${TEST_DIR}
23 - cd ${TEST_DIR}
24 script: make test
25 before_cache:
26 - rm -rf ${TEST_DIR}
27 - cd $HOME
28 cache:
29 apt: true
30 directories:
31 - $HOME/rabbitmq-public-umbrella
0 UPSTREAM_GIT=https://github.com/pika/pika.git
1 REVISION=0.9.14
2
3 LIB_DIR=pika
4 CHECKOUT_DIR=pika-git
5
6 TARGETS=$(LIB_DIR)
7
8 all: $(TARGETS)
9
10 clean:
11 rm -rf $(LIB_DIR)
12
13 distclean: clean
14 rm -rf $(CHECKOUT_DIR)
15
16 $(LIB_DIR) : $(CHECKOUT_DIR)
17 rm -rf $@
18 cp -R $< $@
19
20 $(CHECKOUT_DIR):
21 git clone $(UPSTREAM_GIT) $@
22 (cd $@ && git checkout $(REVISION)) || rm -rf $@
23
24 echo-revision:
25 @echo $(REVISION)
26
3333 -define(HEADER_PREFETCH_COUNT, "prefetch-count").
3434 -define(HEADER_PRIORITY, "priority").
3535 -define(HEADER_RECEIPT, "receipt").
36 -define(HEADER_REDELIVERED, "redelivered").
3637 -define(HEADER_REPLY_TO, "reply-to").
3738 -define(HEADER_SERVER, "server").
3839 -define(HEADER_SESSION, "session").
4243 -define(HEADER_TYPE, "type").
4344 -define(HEADER_USER_ID, "user-id").
4445 -define(HEADER_VERSION, "version").
46 -define(HEADER_X_DEAD_LETTER_EXCHANGE, "x-dead-letter-exchange").
47 -define(HEADER_X_DEAD_LETTER_ROUTING_KEY, "x-dead-letter-routing-key").
48 -define(HEADER_X_EXPIRES, "x-expires").
49 -define(HEADER_X_MAX_LENGTH, "x-max-length").
50 -define(HEADER_X_MAX_LENGTH_BYTES, "x-max-length-bytes").
51 -define(HEADER_X_MAX_PRIORITY, "x-max-priority").
52 -define(HEADER_X_MESSAGE_TTL, "x-message-ttl").
53 -define(HEADER_X_QUEUE_NAME, "x-queue-name").
4554
4655 -define(MESSAGE_ID_SEPARATOR, "@@").
4756
4857 -define(HEADERS_NOT_ON_SEND, [?HEADER_MESSAGE_ID]).
4958
5059 -define(TEMP_QUEUE_ID_PREFIX, "/temp-queue/").
60
61 -define(HEADER_ARGUMENTS, [
62 ?HEADER_X_DEAD_LETTER_EXCHANGE,
63 ?HEADER_X_DEAD_LETTER_ROUTING_KEY,
64 ?HEADER_X_EXPIRES,
65 ?HEADER_X_MAX_LENGTH,
66 ?HEADER_X_MAX_LENGTH_BYTES,
67 ?HEADER_X_MAX_PRIORITY,
68 ?HEADER_X_MESSAGE_TTL
69 ]).
00 RELEASABLE:=true
11 DEPS:=rabbitmq-server rabbitmq-erlang-client rabbitmq-test
2 #STANDALONE_TEST_COMMANDS:=eunit:test([rabbit_stomp_test_util,rabbit_stomp_test_frame],[verbose])
2 STANDALONE_TEST_COMMANDS:=eunit:test([rabbit_stomp_test_util,rabbit_stomp_test_frame],[verbose])
33 WITH_BROKER_TEST_SCRIPTS:=$(PACKAGE_DIR)/test/src/test.py $(PACKAGE_DIR)/test/src/test_connect_options.py $(PACKAGE_DIR)/test/src/test_ssl.py
4 #WITH_BROKER_TEST_COMMANDS:=rabbit_stomp_test:all_tests() rabbit_stomp_amqqueue_test:all_tests()
4 WITH_BROKER_TEST_COMMANDS:=rabbit_stomp_test:all_tests() rabbit_stomp_amqqueue_test:all_tests()
55 WITH_BROKER_TEST_CONFIG:=$(PACKAGE_DIR)/test/ebin/test
66
77 define package_rules
1313 sed -e "s|%%CERTS_DIR%%|$(abspath $(PACKAGE_DIR))/test/certs|g" < $(PACKAGE_DIR)/test/src/test.config > $(PACKAGE_DIR)/test/ebin/test.config
1414 $(MAKE) -C $(PACKAGE_DIR)/../rabbitmq-test/certs all PASSWORD=test DIR=$(abspath $(PACKAGE_DIR))/test/certs
1515 $(MAKE) -C $(PACKAGE_DIR)/deps/stomppy
16 $(MAKE) -C $(PACKAGE_DIR)/deps/pika
1617
1718 $(PACKAGE_DIR)+clean::
1819 rm -rf $(PACKAGE_DIR)/test/certs
1920
2021 $(PACKAGE_DIR)+clean-with-deps::
2122 $(MAKE) -C $(PACKAGE_DIR)/deps/stomppy distclean
23 $(MAKE) -C $(PACKAGE_DIR)/deps/pika distclean
2224
2325 endef
2626 boolean_header/2, boolean_header/3,
2727 integer_header/2, integer_header/3,
2828 binary_header/2, binary_header/3]).
29 -export([serialize/1]).
29 -export([serialize/1, serialize/2]).
3030
3131 initial_state() -> none.
3232
221221
222222 binary_header(F, K, D) -> default_value(binary_header(F, K), D).
223223
224 serialize(Frame) ->
225 serialize(Frame, true).
226
227 %% second argument controls whether a trailing linefeed
228 %% character should be added, see rabbitmq/rabbitmq-stomp#39.
229 serialize(Frame, true) ->
230 serialize(Frame, false) ++ [?LF];
224231 serialize(#stomp_frame{command = Command,
225232 headers = Headers,
226 body_iolist = BodyFragments}) ->
233 body_iolist = BodyFragments}, false) ->
227234 Len = iolist_size(BodyFragments),
228235 [Command, ?LF,
229236 lists:map(fun serialize_header/1,
232239 Len > 0 -> [?HEADER_CONTENT_LENGTH ++ ":", integer_to_list(Len), ?LF];
233240 true -> []
234241 end,
235 ?LF, BodyFragments, 0, ?LF].
242 ?LF, BodyFragments, 0].
236243
237244 serialize_header({K, V}) when is_integer(V) -> hdr(escape(K), integer_to_list(V));
245 serialize_header({K, V}) when is_boolean(V) -> hdr(escape(K), boolean_to_list(V));
238246 serialize_header({K, V}) when is_list(V) -> hdr(escape(K), escape(V)).
247
248 boolean_to_list(true) -> "true";
249 boolean_to_list(_) -> "false".
239250
240251 hdr(K, V) -> [K, ?COLON, V, ?LF].
241252
2929 -record(state, {session_id, channel, connection, subscriptions,
3030 version, start_heartbeat_fun, pending_receipts,
3131 config, route_state, reply_queues, frame_transformer,
32 adapter_info, send_fun, ssl_login_name, peer_addr}).
32 adapter_info, send_fun, ssl_login_name, peer_addr,
33 %% see rabbitmq/rabbitmq-stomp#39
34 trailing_lf}).
3335
3436 -record(subscription, {dest_hdr, ack_mode, multi_ack, description}).
3537
7072 config = Configuration,
7173 route_state = rabbit_routing_util:init_state(),
7274 reply_queues = dict:new(),
73 frame_transformer = undefined},
75 frame_transformer = undefined,
76 trailing_lf = rabbit_misc:get_env(rabbitmq_stomp, trailing_lf, true)},
7477 hibernate,
7578 {backoff, 1000, 1000, 10000}
7679 }.
448451 ?HEADER_PERSISTENT, false) of
449452 true ->
450453 {ok, Id} = rabbit_stomp_frame:header(Frame, ?HEADER_ID),
451 QName = rabbit_stomp_util:subscription_queue_name(Name, Id),
454 QName = rabbit_stomp_util:subscription_queue_name(Name, Id, Frame),
452455 amqp_channel:call(Channel,
453456 #'queue.delete'{queue = list_to_binary(QName),
454457 nowait = false}),
591594 _ -> amqp_channel:call(
592595 Channel, #'basic.qos'{prefetch_count = Prefetch})
593596 end,
594 ExchangeAndKey = rabbit_routing_util:parse_routing(Destination),
595 try
596 amqp_channel:subscribe(Channel,
597 #'basic.consume'{
598 queue = Queue,
599 consumer_tag = ConsumerTag,
600 no_local = false,
601 no_ack = (AckMode == auto),
602 exclusive = false,
603 arguments = []},
604 self()),
605 ok = rabbit_routing_util:ensure_binding(
606 Queue, ExchangeAndKey, Channel)
607 catch exit:Err ->
608 %% it's safe to delete this queue, it was server-named
609 %% and declared by us
610 case Destination of
611 {exchange, _} ->
612 ok = maybe_clean_up_queue(Queue, State);
613 {topic, _} ->
614 ok = maybe_clean_up_queue(Queue, State);
615 _ ->
616 ok
597 case dict:find(ConsumerTag, Subs) of
598 {ok, _} ->
599 Message = "Duplicated subscription identifier",
600 Detail = "A subscription identified by '~s' alredy exists.",
601 error(Message, Detail, [ConsumerTag], State),
602 send_error(Message, Detail, [ConsumerTag], State),
603 {stop, normal, close_connection(State)};
604 error ->
605 ExchangeAndKey =
606 rabbit_routing_util:parse_routing(Destination),
607 try
608 amqp_channel:subscribe(Channel,
609 #'basic.consume'{
610 queue = Queue,
611 consumer_tag = ConsumerTag,
612 no_local = false,
613 no_ack = (AckMode == auto),
614 exclusive = false,
615 arguments = []},
616 self()),
617 ok = rabbit_routing_util:ensure_binding(
618 Queue, ExchangeAndKey, Channel)
619 catch exit:Err ->
620 %% it's safe to delete this queue, it
621 %% was server-named and declared by us
622 case Destination of
623 {exchange, _} ->
624 ok = maybe_clean_up_queue(Queue, State);
625 {topic, _} ->
626 ok = maybe_clean_up_queue(Queue, State);
627 _ ->
628 ok
629 end,
630 exit(Err)
617631 end,
618 exit(Err)
619 end,
620 ok(State#state{subscriptions =
621 dict:store(
622 ConsumerTag,
623 #subscription{dest_hdr = DestHdr,
624 ack_mode = AckMode,
625 multi_ack = IsMulti,
626 description = Description},
627 Subs),
628 route_state = RouteState1});
632 ok(State#state{subscriptions =
633 dict:store(
634 ConsumerTag,
635 #subscription{dest_hdr = DestHdr,
636 ack_mode = AckMode,
637 multi_ack = IsMulti,
638 description = Description},
639 Subs),
640 route_state = RouteState1})
641 end;
629642 {error, _} = Err ->
630643 Err
631644 end.
972985 ensure_endpoint(_Direction, {queue, []}, _Frame, _Channel, _State) ->
973986 {error, {invalid_destination, "Destination cannot be blank"}};
974987
975 ensure_endpoint(source, EndPoint, Frame, Channel, State) ->
988 ensure_endpoint(source, EndPoint, {_, _, Headers, _} = Frame, Channel, State) ->
976989 Params =
977 case rabbit_stomp_frame:boolean_header(
978 Frame, ?HEADER_PERSISTENT, false) of
979 true ->
980 [{subscription_queue_name_gen,
981 fun () ->
982 {ok, Id} = rabbit_stomp_frame:header(Frame, ?HEADER_ID),
983 {_, Name} = rabbit_routing_util:parse_routing(EndPoint),
984 list_to_binary(
985 rabbit_stomp_util:subscription_queue_name(Name,
986 Id))
987 end},
988 {durable, true}];
989 false ->
990 [{subscription_queue_name_gen,
991 fun () ->
992 Id = rabbit_guid:gen_secure(),
993 {_, Name} = rabbit_routing_util:parse_routing(EndPoint),
994 list_to_binary(
995 rabbit_stomp_util:subscription_queue_name(Name,
996 Id))
997 end},
998 {durable, false}]
999 end,
1000 rabbit_routing_util:ensure_endpoint(source, Channel, EndPoint, Params, State);
1001
1002 ensure_endpoint(Direction, Endpoint, _Frame, Channel, State) ->
1003 rabbit_routing_util:ensure_endpoint(Direction, Channel, Endpoint, State).
990 [{subscription_queue_name_gen,
991 fun () ->
992 Id = build_subscription_id(Frame),
993 {_, Name} = rabbit_routing_util:parse_routing(EndPoint),
994 list_to_binary(rabbit_stomp_util:subscription_queue_name(Name, Id, Frame))
995 end},
996 {durable, rabbit_stomp_frame:boolean_header(Frame, ?HEADER_PERSISTENT, false)}],
997 Arguments = rabbit_stomp_util:build_arguments(Headers),
998 rabbit_routing_util:ensure_endpoint(source, Channel, EndPoint,
999 [Arguments | Params], State);
1000
1001 ensure_endpoint(Direction, Endpoint, {_, _, Headers, _}, Channel, State) ->
1002 Arguments = rabbit_stomp_util:build_arguments(Headers),
1003 rabbit_routing_util:ensure_endpoint(Direction, Channel, Endpoint,
1004 [Arguments], State).
1005
1006 build_subscription_id(Frame) ->
1007 case rabbit_stomp_frame:boolean_header(Frame, ?HEADER_PERSISTENT, false) of
1008 true ->
1009 {ok, Id} = rabbit_stomp_frame:header(Frame, ?HEADER_ID),
1010 Id;
1011 false ->
1012 rabbit_guid:gen_secure()
1013 end.
10041014
10051015 %%----------------------------------------------------------------------------
10061016 %% Success/error handling
10501060 body_iolist = BodyFragments},
10511061 State).
10521062
1053 send_frame(Frame, State = #state{send_fun = SendFun}) ->
1054 SendFun(async, rabbit_stomp_frame:serialize(Frame)),
1063 send_frame(Frame, State = #state{send_fun = SendFun,
1064 trailing_lf = TrailingLF}) ->
1065 SendFun(async, rabbit_stomp_frame:serialize(Frame, TrailingLF)),
10551066 State.
10561067
10571068 send_error_frame(Message, ExtraHeaders, Format, Args, State) ->
1515
1616 -module(rabbit_stomp_util).
1717
18 -export([parse_message_id/1, subscription_queue_name/2]).
18 -export([parse_message_id/1, subscription_queue_name/3]).
1919 -export([longstr_field/2]).
2020 -export([ack_mode/1, consumer_tag_reply_to/1, consumer_tag/1, message_headers/1,
2121 headers_post_process/1, headers/5, message_properties/1, tag_to_id/1,
22 msg_header_name/1, ack_header_name/1]).
22 msg_header_name/1, ack_header_name/1, build_arguments/1]).
2323 -export([negotiate_version/2]).
2424 -export([trim_headers/1]).
2525
121121 #'basic.deliver'{consumer_tag = ConsumerTag,
122122 delivery_tag = DeliveryTag,
123123 exchange = ExchangeBin,
124 routing_key = RoutingKeyBin}) ->
124 routing_key = RoutingKeyBin,
125 redelivered = Redelivered}) ->
125126 case tag_to_id(ConsumerTag) of
126127 {ok, {internal, Id}} -> [{?HEADER_SUBSCRIPTION, Id}];
127128 _ -> []
130131 format_destination(binary_to_list(ExchangeBin),
131132 binary_to_list(RoutingKeyBin))},
132133 {?HEADER_MESSAGE_ID,
133 create_message_id(ConsumerTag, SessionId, DeliveryTag)}] ++
134 create_message_id(ConsumerTag, SessionId, DeliveryTag)},
135 {?HEADER_REDELIVERED, Redelivered}] ++
134136 case AckMode == client andalso Version == "1.2" of
135137 true -> [{?HEADER_ACK,
136138 create_message_id(ConsumerTag, SessionId, DeliveryTag)}];
259261 msg_header_name("1.1") -> ?HEADER_MESSAGE_ID;
260262 msg_header_name("1.0") -> ?HEADER_MESSAGE_ID.
261263
264 build_arguments(Headers) ->
265 Arguments =
266 lists:foldl(fun({K, V}, Acc) ->
267 case lists:member(K, ?HEADER_ARGUMENTS) of
268 true -> [build_argument(K, V) | Acc];
269 false -> Acc
270 end
271 end,
272 [],
273 Headers),
274 {arguments, Arguments}.
275
276 %% build the actual value thru pattern matching
277 build_argument(?HEADER_X_DEAD_LETTER_EXCHANGE, Val) ->
278 {list_to_binary(?HEADER_X_DEAD_LETTER_EXCHANGE), longstr,
279 list_to_binary(string:strip(Val))};
280 build_argument(?HEADER_X_DEAD_LETTER_ROUTING_KEY, Val) ->
281 {list_to_binary(?HEADER_X_DEAD_LETTER_ROUTING_KEY), longstr,
282 list_to_binary(string:strip(Val))};
283 build_argument(?HEADER_X_EXPIRES, Val) ->
284 {list_to_binary(?HEADER_X_EXPIRES), long,
285 list_to_integer(string:strip(Val))};
286 build_argument(?HEADER_X_MAX_LENGTH, Val) ->
287 {list_to_binary(?HEADER_X_MAX_LENGTH), long,
288 list_to_integer(string:strip(Val))};
289 build_argument(?HEADER_X_MAX_LENGTH_BYTES, Val) ->
290 {list_to_binary(?HEADER_X_MAX_LENGTH_BYTES), long,
291 list_to_integer(string:strip(Val))};
292 build_argument(?HEADER_X_MAX_PRIORITY, Val) ->
293 {list_to_binary(?HEADER_X_MAX_PRIORITY), long,
294 list_to_integer(string:strip(Val))};
295 build_argument(?HEADER_X_MESSAGE_TTL, Val) ->
296 {list_to_binary(?HEADER_X_MESSAGE_TTL), long,
297 list_to_integer(string:strip(Val))}.
298
262299 %%--------------------------------------------------------------------
263300 %% Destination Formatting
264301 %%--------------------------------------------------------------------
276313 %% Destination Parsing
277314 %%--------------------------------------------------------------------
278315
279 subscription_queue_name(Destination, SubscriptionId) ->
280 %% We need a queue name that a) can be derived from the
281 %% Destination and SubscriptionId, and b) meets the constraints on
282 %% AMQP queue names. It doesn't need to be secure; we use md5 here
283 %% simply as a convenient means to bound the length.
284 rabbit_guid:string(
285 erlang:md5(term_to_binary({Destination, SubscriptionId})),
286 "stomp-subscription").
316 subscription_queue_name(Destination, SubscriptionId, Frame) ->
317 case rabbit_stomp_frame:header(Frame, ?HEADER_X_QUEUE_NAME, undefined) of
318 undefined ->
319 %% We need a queue name that a) can be derived from the
320 %% Destination and SubscriptionId, and b) meets the constraints on
321 %% AMQP queue names. It doesn't need to be secure; we use md5 here
322 %% simply as a convenient means to bound the length.
323 rabbit_guid:string(
324 erlang:md5(term_to_binary({Destination, SubscriptionId})),
325 "stomp-subscription");
326 Name ->
327 Name
328 end.
287329
288330 %% ---- Helpers ----
289331
1515 {packet, raw},
1616 {reuseaddr, true},
1717 {backlog, 128},
18 {nodelay, true}]}]},
18 {nodelay, true}]},
19 %% see rabbitmq/rabbitmq-stomp#39
20 {trailing_lf, true}]},
1921 {applications, [kernel, stdlib, rabbit, amqp_client]}]}.
215215
216216 self.conn.send_frame("NACK", {self.ack_id_header: message_id, "requeue": False})
217217 self.assertFalse(self.listener.await(4), "Received message after NACK with requeue = False")
218
219 class TestAck11(TestAck):
220
221 def create_connection_obj(self, version='1.1', vhost='/', heartbeats=(0, 0)):
222 conn = stomp.StompConnection11(vhost=vhost,
223 heartbeats=heartbeats)
224 self.ack_id_source_header = 'message-id'
225 self.ack_id_header = 'message-id'
226 return conn
227
228 def test_version(self):
229 self.assertEquals('1.1', self.conn.version)
230
231 class TestAck12(TestAck):
232
233 def create_connection_obj(self, version='1.2', vhost='/', heartbeats=(0, 0)):
234 conn = stomp.StompConnection12(vhost=vhost,
235 heartbeats=heartbeats)
236 self.ack_id_source_header = 'ack'
237 self.ack_id_header = 'id'
238 return conn
239
240 def test_version(self):
241 self.assertEquals('1.2', self.conn.version)
498498 self.__subscribe(destination, conn2, "other.id")
499499
500500 for l in [self.listener, listener2]:
501 self.assertTrue(l.await(20))
502 self.assertEquals(100, len(l.messages))
501 self.assertTrue(l.await(15))
502 self.assertTrue(len(l.messages) >= 90)
503 self.assertTrue(len(l.messages) <= 100)
503504
504505 finally:
505506 conn2.disconnect()
11 import stomp
22 import base
33 import time
4
5 class TestErrorsAndCloseConnection(base.BaseTest):
6 def __test_duplicate_consumer_tag_with_headers(self, destination, headers):
7 self.subscribe_dest(self.conn, destination, None,
8 headers = headers)
9
10 self.subscribe_dest(self.conn, destination, None,
11 headers = headers)
12
13 self.assertTrue(self.listener.await())
14
15 self.assertEquals(1, len(self.listener.errors))
16 errorReceived = self.listener.errors[0]
17 self.assertEquals("Duplicated subscription identifier", errorReceived['headers']['message'])
18 self.assertEquals("A subscription identified by 'T_1' alredy exists.", errorReceived['message'])
19 time.sleep(2)
20 self.assertFalse(self.conn.is_connected())
21
22
23 def test_duplicate_consumer_tag_with_transient_destination(self):
24 destination = "/exchange/amq.direct/duplicate-consumer-tag-test1"
25 self.__test_duplicate_consumer_tag_with_headers(destination, {'id': 1})
26
27 def test_duplicate_consumer_tag_with_durable_destination(self):
28 destination = "/queue/duplicate-consumer-tag-test2"
29 self.__test_duplicate_consumer_tag_with_headers(destination, {'id': 1,
30 'persistent': True})
31
432
533 class TestErrors(base.BaseTest):
634
6391 self.assertEquals("'" + content + "' is not a valid " +
6492 dtype + " destination\n",
6593 err['message'])
66
8383 resp = ('MESSAGE\n'
8484 'destination:/exchange/amq.fanout\n'
8585 'message-id:Q_/exchange/amq.fanout@@session-(.*)\n'
86 'redelivered:false\n'
8687 'content-type:text/plain\n'
8788 'content-length:6\n'
8889 '\n'
101102 resp = ('MESSAGE\n'
102103 'destination:/exchange/amq.fanout\n'
103104 'message-id:Q_/exchange/amq.fanout@@session-(.*)\n'
105 'redelivered:false\n'
104106 'content-length:6\n'
105107 '\n'
106108 'hello\n\0')
120122 resp = ('MESSAGE\n'
121123 'destination:/exchange/amq.fanout\n'
122124 'message-id:Q_/exchange/amq.fanout@@session-(.*)\n'
125 'redelivered:false\n'
123126 'content-length:'+str(len(msg))+'\n'
124127 '\n'
125128 + msg + '\0')
138141 resp = ('MESSAGE\n'
139142 'destination:/exchange/amq.fanout\n'
140143 'message-id:Q_/exchange/amq.fanout@@session-(.*)\n'
144 'redelivered:false\n'
141145 'content-type:text/plain\n'
142146 'content-length:6\n'
143147 '\n'
187191 'subscription:(.*)\n'
188192 'destination:/topic/da9d4779\n'
189193 'message-id:(.*)\n'
194 'redelivered:false\n'
190195 'content-type:text/plain\n'
191196 'content-length:8\n'
192197 '\n'
226231 'subscription:(.*)\n' # 14 + subscription
227232 +resp_dest+ # 44
228233 'message-id:(.*)\n' # 12 + message-id
234 'redelivered:false\n' # 18
229235 'content-type:text/plain\n' # 24
230236 'content-length:%i\n' # 16 + 4==len('1024')
231237 '\n' # 1
232238 '(.*)$' # prefix of body+null (potentially)
233239 % len(message) )
234 headlen = 8 + 24 + 14 + (3) + 44 + 12 + (48) + 16 + (4) + 1 + (1)
240 headlen = 8 + 24 + 14 + (3) + 44 + 12 + 18 + (48) + 16 + (4) + 1 + (1)
235241
236242 headbuf = self.recv_atleast(headlen)
237243 self.assertFalse(len(headbuf) == 0)
285291 'subscription:(.*)\n' # 14 + subscription
286292 +resp_dest+ # 44
287293 'message-id:(.*)\n' # 12 + message-id
294 'redelivered:false\n' # 18
288295 'content-type:text/plain\n' # 24
289296 'content-length:%i\n' # 16 + 4==len('1024')
290297 '\n' # 1
291298 '(.*)$' # prefix of body+null (potentially)
292299 % len(message) )
293 headlen = 8 + 24 + 14 + (3) + 44 + 12 + (48) + 16 + (4) + 1 + (1)
300 headlen = 8 + 24 + 14 + (3) + 44 + 12 + 18 + (48) + 16 + (4) + 1 + (1)
294301
295302 headbuf = self.recv_atleast(headlen)
296303 self.assertFalse(len(headbuf) == 0)
0 import unittest
1 import stomp
2 import pika
3 import base
4 import time
5
6 class TestQueueProperties(base.BaseTest):
7
8 def test_subscribe(self):
9 destination = "/queue/queue-properties-subscribe-test"
10
11 # subscribe
12 self.subscribe_dest(self.conn, destination, None,
13 headers={
14 'x-message-ttl': 60000,
15 'x-expires': 70000,
16 'x-max-length': 10,
17 'x-max-length-bytes': 20000,
18 'x-dead-letter-exchange': 'dead-letter-exchange',
19 'x-dead-letter-routing-key': 'dead-letter-routing-key',
20 'x-max-priority': 6,
21 })
22
23 # now try to declare the queue using pika
24 # if the properties are the same we should
25 # not get any error
26 connection = pika.BlockingConnection(pika.ConnectionParameters(
27 host='localhost'))
28 channel = connection.channel()
29 channel.queue_declare(queue='queue-properties-subscribe-test',
30 durable=True,
31 arguments={
32 'x-message-ttl': 60000,
33 'x-expires': 70000,
34 'x-max-length': 10,
35 'x-max-length-bytes': 20000,
36 'x-dead-letter-exchange': 'dead-letter-exchange',
37 'x-dead-letter-routing-key': 'dead-letter-routing-key',
38 'x-max-priority': 6,
39 })
40
41 self.conn.disconnect()
42 connection.close()
43
44 def test_send(self):
45 destination = "/queue/queue-properties-send-test"
46
47 # send
48 self.conn.send(destination, "test1",
49 headers={
50 'x-message-ttl': 60000,
51 'x-expires': 70000,
52 'x-max-length': 10,
53 'x-max-length-bytes': 20000,
54 'x-dead-letter-exchange': 'dead-letter-exchange',
55 'x-dead-letter-routing-key': 'dead-letter-routing-key',
56 'x-max-priority': 6,
57 })
58
59 # now try to declare the queue using pika
60 # if the properties are the same we should
61 # not get any error
62 connection = pika.BlockingConnection(pika.ConnectionParameters(
63 host='localhost'))
64 channel = connection.channel()
65 channel.queue_declare(queue='queue-properties-send-test',
66 durable=True,
67 arguments={
68 'x-message-ttl': 60000,
69 'x-expires': 70000,
70 'x-max-length': 10,
71 'x-max-length-bytes': 20000,
72 'x-dead-letter-exchange': 'dead-letter-exchange',
73 'x-dead-letter-routing-key': 'dead-letter-routing-key',
74 'x-max-priority': 6,
75 })
76
77 self.conn.disconnect()
78 connection.close()
7272 case rabbit_stomp_frame:parse(Payload, FrameState) of
7373 {ok, Frame, <<>>} ->
7474 recv({Sock, lists:reverse([Frame | FramesRev])});
75 {ok, Frame, <<"\n">>} ->
76 recv({Sock, lists:reverse([Frame | FramesRev])});
7577 {ok, Frame, Rest} ->
7678 parse(Rest, {Sock, [Frame | FramesRev]},
7779 rabbit_stomp_frame:initial_state(), Length);
4040 rabbit_stomp_client:send(
4141 Client, "LOL", [{"", ""}])
4242 end,
43 lists:seq(1, 1000)),
43 lists:seq(1, 100)),
4444 timer:sleep(5000),
4545 N = count_connections(),
4646 ok.
3838 {Key, Value} <- Headers],
3939 #stomp_frame{body_iolist = Body} = Frame,
4040 ?assertEqual(<<"Body Content">>, iolist_to_binary(Body)).
41
42 parse_simple_frame_with_null_test() ->
43 Headers = [{"header1", "value1"}, {"header2", "value2"},
44 {?HEADER_CONTENT_LENGTH, "12"}],
45 Content = frame_string("COMMAND",
46 Headers,
47 "Body\0Content"),
48 {"COMMAND", Frame, _State} = parse_complete(Content),
49 [?assertEqual({ok, Value},
50 rabbit_stomp_frame:header(Frame, Key)) ||
51 {Key, Value} <- Headers],
52 #stomp_frame{body_iolist = Body} = Frame,
53 ?assertEqual(<<"Body\0Content">>, iolist_to_binary(Body)).
54
55 parse_large_content_frame_with_nulls_test() ->
56 BodyContent = string:copies("012345678\0", 1024),
57 Headers = [{"header1", "value1"}, {"header2", "value2"},
58 {?HEADER_CONTENT_LENGTH, integer_to_list(string:len(BodyContent))}],
59 Content = frame_string("COMMAND",
60 Headers,
61 BodyContent),
62 {"COMMAND", Frame, _State} = parse_complete(Content),
63 [?assertEqual({ok, Value},
64 rabbit_stomp_frame:header(Frame, Key)) ||
65 {Key, Value} <- Headers],
66 #stomp_frame{body_iolist = Body} = Frame,
67 ?assertEqual(list_to_binary(BodyContent), iolist_to_binary(Body)).
6841
6942 parse_command_only_test() ->
7043 {ok, #stomp_frame{command = "COMMAND"}, _Rest} = parse("COMMAND\n\n\0").
165138 headers = [{"header", "val:ue"}],
166139 body_iolist = []}).
167140
168 headers_escaping_roundtrip_test() ->
169 Content = "COMMAND\nhead\\r\\c\\ner:\\c\\n\\r\\\\\n\n\0",
170 {ok, Frame, _} = parse(Content),
141 test_frame_serialization(Expected, TrailingLF) ->
142 {ok, Frame, _} = parse(Expected),
171143 {ok, Val} = rabbit_stomp_frame:header(Frame, "head\r:\ner"),
172144 ?assertEqual(":\n\r\\", Val),
173 Serialized = lists:flatten(rabbit_stomp_frame:serialize(Frame)),
174 ?assertEqual(Content, rabbit_misc:format("~s", [Serialized])).
145 Serialized = lists:flatten(rabbit_stomp_frame:serialize(Frame, TrailingLF)),
146 ?assertEqual(Expected, rabbit_misc:format("~s", [Serialized])).
147
148 headers_escaping_roundtrip_test() ->
149 test_frame_serialization("COMMAND\nhead\\r\\c\\ner:\\c\\n\\r\\\\\n\n\0\n", true).
150
151 headers_escaping_roundtrip_without_trailing_lf_test() ->
152 test_frame_serialization("COMMAND\nhead\\r\\c\\ner:\\c\\n\\r\\\\\n\n\0", false).
175153
176154 parse(Content) ->
177155 parse(Content, rabbit_stomp_frame:initial_state()).
188166 frame_string(Command, Headers, BodyContent, Term) ->
189167 HeaderString =
190168 lists:flatten([Key ++ ":" ++ Value ++ Term || {Key, Value} <- Headers]),
191 Command ++ Term ++ HeaderString ++ Term ++ BodyContent ++ "\0".
169 Command ++ Term ++ HeaderString ++ Term ++ BodyContent ++ "\0" ++ "\n".
192170
0 import unittest
1 import stomp
2 import base
3 import time
4
5 class TestRedelivered(base.BaseTest):
6
7 def test_redelivered(self):
8 destination = "/queue/redelivered-test"
9
10 # subscribe and send message
11 self.subscribe_dest(self.conn, destination, None, ack='client')
12 self.conn.send(destination, "test1")
13 self.assertTrue(self.listener.await(4), "initial message not received")
14 self.assertEquals(1, len(self.listener.messages))
15 self.assertEquals('false', self.listener.messages[0]['headers']['redelivered'])
16
17 # disconnect with no ack
18 self.conn.disconnect()
19
20 # now reconnect
21 conn2 = self.create_connection()
22 try:
23 listener2 = base.WaitableListener()
24 listener2.reset(1)
25 conn2.set_listener('', listener2)
26 self.subscribe_dest(conn2, destination, None, ack='client')
27 self.assertTrue(listener2.await(), "message not received again")
28 self.assertEquals(1, len(listener2.messages))
29 self.assertEquals('true', listener2.messages[0]['headers']['redelivered'])
30 finally:
31 conn2.disconnect()
33
44 if __name__ == '__main__':
55 modules = [
6 'ack',
7 'destinations',
8 'errors',
9 'lifecycle',
610 'parsing',
7 'destinations',
8 'lifecycle',
11 'queue_properties',
12 'redelivered',
13 'reliability',
914 'transactions',
10 'ack',
11 'errors',
12 'reliability',
15 'x_queue_name',
1316 ]
1417 test_runner.run_unittests(modules)
1518
66 def add_deps_to_path():
77 deps_dir = os.path.realpath(os.path.join(__file__, "..", "..", "..", "deps"))
88 sys.path.append(os.path.join(deps_dir, "stomppy", "stomppy"))
9 sys.path.append(os.path.join(deps_dir, "pika", "pika"))
910
1011 def run_unittests(modules):
1112 add_deps_to_path()
0 import unittest
1 import stomp
2 import pika
3 import base
4 import time
5
6 class TestUserGeneratedQueueName(base.BaseTest):
7
8 def test_exchange_dest(self):
9 queueName='my-user-generated-queue-name-exchange'
10
11 # subscribe
12 self.subscribe_dest(
13 self.conn,
14 '/exchange/amq.direct/test',
15 None,
16 headers={ 'x-queue-name': queueName }
17 )
18
19 connection = pika.BlockingConnection(
20 pika.ConnectionParameters( host='localhost'))
21 channel = connection.channel()
22
23 # publish a message to the named queue
24 channel.basic_publish(
25 exchange='',
26 routing_key=queueName,
27 body='Hello World!')
28
29 # check if we receive the message from the STOMP subscription
30 self.assertTrue(self.listener.await(2), "initial message not received")
31 self.assertEquals(1, len(self.listener.messages))
32
33 self.conn.disconnect()
34 connection.close()
35
36 def test_topic_dest(self):
37 queueName='my-user-generated-queue-name-topic'
38
39 # subscribe
40 self.subscribe_dest(
41 self.conn,
42 '/topic/test',
43 None,
44 headers={ 'x-queue-name': queueName }
45 )
46
47 connection = pika.BlockingConnection(
48 pika.ConnectionParameters( host='localhost'))
49 channel = connection.channel()
50
51 # publish a message to the named queue
52 channel.basic_publish(
53 exchange='',
54 routing_key=queueName,
55 body='Hello World!')
56
57 # check if we receive the message from the STOMP subscription
58 self.assertTrue(self.listener.await(2), "initial message not received")
59 self.assertEquals(1, len(self.listener.messages))
60
61 self.conn.disconnect()
62 connection.close()
173173 RABBITMQ_ENABLED_PLUGINS_FILE=/does-not-exist \
174174 stop-rabbit-on-node ${COVER_STOP} stop-node
175175
176 define compare_version
177 $(shell awk 'BEGIN {
178 split("$(1)", v1, "\.");
179 version1 = v1[1] * 1000000 + v1[2] * 10000 + v1[3] * 100 + v1[4];
180
181 split("$(2)", v2, "\.");
182 version2 = v2[1] * 1000000 + v2[2] * 10000 + v2[3] * 100 + v2[4];
183
184 if (version1 $(3) version2) {
185 print "true";
186 } else {
187 print "false";
188 }
189 }')
190 endef
191
192 ERLANG_SSL_VER = $(shell erl -noshell -eval '\
193 ok = application:load(ssl), \
194 {ok, VSN} = application:get_key(ssl, vsn), \
195 io:format("~s~n", [VSN]), \
196 halt(0).')
197 MINIMUM_ERLANG_SSL_VER = 5.3
198
199 ifeq ($(call compare_version,$(ERLANG_SSL_VER),$(MINIMUM_ERLANG_SSL_VER),>=),true)
176200 create_ssl_certs:
177201 $(MAKE) -C certs DIR=$(SSL_CERTS_DIR) clean all
178
202 else
203 create_ssl_certs:
204 @# Skip SSL certs if Erlang is older than R16B01 (ssl 5.3).
205 $(MAKE) -C certs DIR=$(SSL_CERTS_DIR) clean
206 @echo "WARNING: Skip SSL certs creation; Erlang's SSL application is too" \
207 "old ($(ERLANG_SSL_VER) < $(MINIMUM_ERLANG_SSL_VER)) and SSL support" \
208 "is disabled in RabbitMQ"
209 endif
11 FILTER:=all
22 COVER:=false
33 WITH_BROKER_TEST_COMMANDS:=rabbit_test_runner:run_in_broker(\"$(PACKAGE_DIR)/test/ebin\",\"$(FILTER)\")
4 STANDALONE_TEST_COMMANDS:=rabbit_test_runner:run_multi(\"$(UMBRELLA_BASE_DIR)/rabbitmq-server\",\"$(PACKAGE_DIR)/test/ebin\",\"$(FILTER)\",$(COVER),none)
54
65 ## Require R15B to compile inet_proxy_dist since it requires includes
76 ## introduced there.
87 ifeq ($(shell erl -noshell -eval 'io:format([list_to_integer(X) || X <- string:tokens(erlang:system_info(version), ".")] >= [5,9]), halt().'),true)
8 STANDALONE_TEST_COMMANDS:=rabbit_test_runner:run_multi(\"$(UMBRELLA_BASE_DIR)/rabbitmq-server\",\"$(PACKAGE_DIR)/test/ebin\",\"$(FILTER)\",$(COVER),none)
99 PACKAGE_ERLC_OPTS+=-Derlang_r15b_or_later
1010 endif
504504 wait_for_cluster_status(0, Max, Status, AllNodes, Nodes).
505505
506506 wait_for_cluster_status(N, Max, Status, _AllNodes, Nodes) when N >= Max ->
507 error({cluster_status_max_tries_failed,
508 [{nodes, Nodes},
509 {expected_status, Status},
510 {max_tried, Max}]});
507 erlang:error({cluster_status_max_tries_failed,
508 [{nodes, Nodes},
509 {expected_status, Status},
510 {max_tried, Max}]});
511511 wait_for_cluster_status(N, Max, Status, AllNodes, Nodes) ->
512512 case lists:all(fun (Node) ->
513513 verify_status_equal(Node, Status, AllNodes)
153153 wait_for_sync_status(0, Max, Status, pget(node, Cfg), Queue).
154154
155155 wait_for_sync_status(N, Max, Status, Node, Queue) when N >= Max ->
156 error({sync_status_max_tries_failed,
157 [{queue, Queue},
158 {node, Node},
159 {expected_status, Status},
160 {max_tried, Max}]});
156 erlang:error({sync_status_max_tries_failed,
157 [{queue, Queue},
158 {node, Node},
159 {expected_status, Status},
160 {max_tried, Max}]});
161161 wait_for_sync_status(N, Max, Status, Node, Queue) ->
162162 Synced = length(slave_pids(Node, Queue)) =:= 1,
163163 case Synced =:= Status of
3232
3333 REM CONF_ENV_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq-env.conf
3434 if "!RABBITMQ_CONF_ENV_FILE!"=="" (
35 set CONF_ENV_FILE=!APPDATA!\RabbitMQ\rabbitmq-env-conf.bat
35 set RABBITMQ_CONF_ENV_FILE=!RABBITMQ_BASE!\rabbitmq-env-conf.bat
3636 )
3434 REM ## Get configuration variables from the configure environment file
3535 REM [ -f ${CONF_ENV_FILE} ] && . ${CONF_ENV_FILE} || true
3636 if exist "!RABBITMQ_CONF_ENV_FILE!" (
37 call !RABBITMQ_CONF_ENV_FILE!
37 call "!RABBITMQ_CONF_ENV_FILE!"
3838 )
3939
4040 REM Check for the short names here too
8383 REM )
8484 REM )
8585
86 REM DOUBLE CHECK THIS LOGIC
8786 if "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
88 if "!NODE_IP_ADDRESS!"=="" (
89 set RABBITMQ_NODE_IP_ADDRESS=auto
90 ) else (
87 if not "!NODE_IP_ADDRESS!"=="" (
9188 set RABBITMQ_NODE_IP_ADDRESS=!NODE_IP_ADDRESS!
9289 )
9390 )
9491
9592 if "!RABBITMQ_NODE_PORT!"=="" (
96 if "!NODE_PORT!"=="" (
97 set RABBITMQ_NODE_PORT=5672
98 ) else (
93 if not "!NODE_PORT!"=="" (
9994 set RABBITMQ_NODE_PORT=!NODE_PORT!
10095 )
96 )
97
98 if "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
99 if not "!RABBITMQ_NODE_PORT!"=="" (
100 set RABBITMQ_NODE_IP_ADDRESS=auto
101 )
102 ) else (
103 if "!RABBITMQ_NODE_PORT!"=="" (
104 set RABBITMQ_NODE_PORT=5672
105 )
101106 )
102107
103108 REM [ "x" = "x$RABBITMQ_DIST_PORT" ] && RABBITMQ_DIST_PORT=${DIST_PORT}
197202 REM [ "x" = "x$RABBITMQ_PLUGINS_DIR" ] && RABBITMQ_PLUGINS_DIR=${PLUGINS_DIR}
198203 if "!RABBITMQ_PLUGINS_DIR!"=="" (
199204 if "!PLUGINS_DIR!"=="" (
200 set RABBITMQ_PLUGINS_DIR=!RABBITMQ_BASE!\plugins
205 set RABBITMQ_PLUGINS_DIR=!RABBITMQ_HOME!\plugins
201206 ) else (
202207 set RABBITMQ_PLUGINS_DIR=!PLUGINS_DIR!
203208 )
208213 REM [ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}.log"
209214 if "!RABBITMQ_LOGS!"=="" (
210215 if "!LOGS!"=="" (
211 set LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!.log
212 ) else (
213 set LOGS=!LOGS!
216 set RABBITMQ_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!.log
217 ) else (
218 set RABBITMQ_LOGS=!LOGS!
214219 )
215220 )
216221
218223 REM [ "x" = "x$RABBITMQ_SASL_LOGS" ] && RABBITMQ_SASL_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}-sasl.log"
219224 if "!RABBITMQ_SASL_LOGS!"=="" (
220225 if "!SASL_LOGS!"=="" (
221 set SASL_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!-sasl.log
222 ) else (
223 set SASL_LOGS=!SASL_LOGS!
226 set RABBITMQ_SASL_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!-sasl.log
227 ) else (
228 set RABBITMQ_SASL_LOGS=!SASL_LOGS!
224229 )
225230 )
226231
2323 -pa "${RABBITMQ_HOME}/ebin" \
2424 -noinput \
2525 -hidden \
26 ${RABBITMQ_PLUGINS_ERL_ARGS} \
26 ${RABBITMQ_CTL_ERL_ARGS} \
2727 -boot "${CLEAN_BOOT_FILE}" \
2828 -s rabbit_plugins_main \
2929 -enabled_plugins_file "$RABBITMQ_ENABLED_PLUGINS_FILE" \
9898 # there is no other way of preventing their expansion.
9999 set -f
100100
101 RABBITMQ_CONFIG_FILE=$RABBITMQ_CONFIG_FILE \
102 exec ${ERL_DIR}erl \
103 -pa ${RABBITMQ_EBIN_ROOT} \
104 ${RABBITMQ_START_RABBIT} \
105 ${RABBITMQ_NAME_TYPE} ${RABBITMQ_NODENAME} \
106 -boot "${SASL_BOOT_FILE}" \
107 ${RABBITMQ_CONFIG_ARG} \
108 +W w \
109 +A ${RABBITMQ_IO_THREAD_POOL_SIZE} \
110 ${RABBITMQ_SERVER_ERL_ARGS} \
111 +K true \
112 -kernel inet_default_connect_options "[{nodelay,true}]" \
113 ${RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS} \
114 ${RABBITMQ_LISTEN_ARG} \
115 -sasl errlog_type error \
116 -sasl sasl_error_logger "$SASL_ERROR_LOGGER" \
117 -rabbit error_logger "$RABBIT_ERROR_LOGGER" \
118 -rabbit sasl_error_logger "$RABBIT_SASL_ERROR_LOGGER" \
119 -rabbit enabled_plugins_file "\"$RABBITMQ_ENABLED_PLUGINS_FILE\"" \
120 -rabbit plugins_dir "\"$RABBITMQ_PLUGINS_DIR\"" \
121 -rabbit plugins_expand_dir "\"$RABBITMQ_PLUGINS_EXPAND_DIR\"" \
122 -os_mon start_cpu_sup false \
123 -os_mon start_disksup false \
124 -os_mon start_memsup false \
125 -mnesia dir "\"${RABBITMQ_MNESIA_DIR}\"" \
126 ${RABBITMQ_SERVER_START_ARGS} \
127 ${RABBITMQ_DIST_ARG} \
128 "$@"
101 start_rabbitmq_server() {
102 RABBITMQ_CONFIG_FILE=$RABBITMQ_CONFIG_FILE \
103 exec ${ERL_DIR}erl \
104 -pa ${RABBITMQ_EBIN_ROOT} \
105 ${RABBITMQ_START_RABBIT} \
106 ${RABBITMQ_NAME_TYPE} ${RABBITMQ_NODENAME} \
107 -boot "${SASL_BOOT_FILE}" \
108 ${RABBITMQ_CONFIG_ARG} \
109 +W w \
110 +A ${RABBITMQ_IO_THREAD_POOL_SIZE} \
111 ${RABBITMQ_SERVER_ERL_ARGS} \
112 +K true \
113 -kernel inet_default_connect_options "[{nodelay,true}]" \
114 ${RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS} \
115 ${RABBITMQ_LISTEN_ARG} \
116 -sasl errlog_type error \
117 -sasl sasl_error_logger "$SASL_ERROR_LOGGER" \
118 -rabbit error_logger "$RABBIT_ERROR_LOGGER" \
119 -rabbit sasl_error_logger "$RABBIT_SASL_ERROR_LOGGER" \
120 -rabbit enabled_plugins_file "\"$RABBITMQ_ENABLED_PLUGINS_FILE\"" \
121 -rabbit plugins_dir "\"$RABBITMQ_PLUGINS_DIR\"" \
122 -rabbit plugins_expand_dir "\"$RABBITMQ_PLUGINS_EXPAND_DIR\"" \
123 -os_mon start_cpu_sup false \
124 -os_mon start_disksup false \
125 -os_mon start_memsup false \
126 -mnesia dir "\"${RABBITMQ_MNESIA_DIR}\"" \
127 ${RABBITMQ_SERVER_START_ARGS} \
128 ${RABBITMQ_DIST_ARG} \
129 "$@"
130 }
131
132 stop_rabbitmq_server() {
133 RABBITMQCTL="$(dirname "$0")/rabbitmqctl"
134
135 if ${RABBITMQCTL} -n ${RABBITMQ_NODENAME} status >/dev/null 2>&1; then
136 ${RABBITMQCTL} -n ${RABBITMQ_NODENAME} stop
137 fi
138 }
139
140 if [ 'x' = "x$RABBITMQ_ALLOW_INPUT" -a -z "$detached" ]; then
141 # When RabbitMQ runs in the foreground but the Erlang shell is
142 # disabled, we setup signal handlers to stop RabbitMQ properly. This
143 # is at least useful in the case of Docker.
144
145 # The Erlang VM should ignore SIGINT.
146 RABBITMQ_SERVER_START_ARGS="${RABBITMQ_SERVER_START_ARGS} +B i"
147
148 # Signal handlers. They all stop RabbitMQ properly (using
149 # rabbitmqctl stop). Depending on the signal, this script will exwit
150 # with a non-zero error code:
151 # SIGHUP SIGTERM SIGTSTP
152 # They are considered a normal process termination, so the script
153 # exits with 0.
154 # SIGINT
155 # They are considered an abnormal process termination, the script
156 # exits with the job exit code.
157 trap "stop_rabbitmq_server; exit 0" HUP TERM TSTP
158 trap "stop_rabbitmq_server" INT
159
160 start_rabbitmq_server "$@" &
161
162 # Block until RabbitMQ exits or a signal is caught.
163 # Waits for last command (which is start_rabbitmq_server)
164 wait $!
165 else
166 start_rabbitmq_server "$@"
167 fi
9595 !RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^
9696 -sasl errlog_type error ^
9797 -sasl sasl_error_logger false ^
98 -rabbit error_logger {file,\""!LOGS:\=/!"\"} ^
99 -rabbit sasl_error_logger {file,\""!SASL_LOGS:\=/!"\"} ^
98 -rabbit error_logger {file,\""!RABBITMQ_LOGS:\=/!"\"} ^
99 -rabbit sasl_error_logger {file,\""!RABBITMQ_SASL_LOGS:\=/!"\"} ^
100100 -rabbit enabled_plugins_file \""!RABBITMQ_ENABLED_PLUGINS_FILE:\=/!"\" ^
101101 -rabbit plugins_dir \""!RABBITMQ_PLUGINS_DIR:\=/!"\" ^
102102 -rabbit plugins_expand_dir \""!RABBITMQ_PLUGINS_EXPAND_DIR:\=/!"\" ^
145145
146146 set RABBITMQ_START_RABBIT=
147147 if "!RABBITMQ_NODE_ONLY!"=="" (
148 set RABBITMQ_START_RABBIT=-s rabbit boot
148 set RABBITMQ_START_RABBIT=-s "!RABBITMQ_BOOT_MODULE!" boot
149149 )
150150
151151 if "!RABBITMQ_IO_THREAD_POOL_SIZE!"=="" (
160160 +W w ^
161161 +A "!RABBITMQ_IO_THREAD_POOL_SIZE!" ^
162162 +P 1048576 ^
163 -kernel inet_default_connect_options "[{nodelay,true}]" ^
164163 !RABBITMQ_LISTEN_ARG! ^
165164 !RABBITMQ_SERVER_ERL_ARGS! ^
165 -kernel inet_default_connect_options "[{nodelay,true}]" ^
166 !RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^
166167 -sasl errlog_type error ^
167168 -sasl sasl_error_logger false ^
168 -rabbit error_logger {file,\""!LOGS:\=/!"\"} ^
169 -rabbit sasl_error_logger {file,\""!SASL_LOGS:\=/!"\"} ^
169 -rabbit error_logger {file,\""!RABBITMQ_LOGS:\=/!"\"} ^
170 -rabbit sasl_error_logger {file,\""!RABBITMQ_SASL_LOGS:\=/!"\"} ^
170171 -rabbit enabled_plugins_file \""!RABBITMQ_ENABLED_PLUGINS_FILE:\=/!"\" ^
171172 -rabbit plugins_dir \""!RABBITMQ_PLUGINS_DIR:\=/!"\" ^
172173 -rabbit plugins_expand_dir \""!RABBITMQ_PLUGINS_EXPAND_DIR:\=/!"\" ^
176177 -os_mon start_memsup false ^
177178 -mnesia dir \""!RABBITMQ_MNESIA_DIR:\=/!"\" ^
178179 !RABBITMQ_SERVER_START_ARGS! ^
179 !RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^
180180 !RABBITMQ_DIST_ARG! ^
181181 !STARVAR!
182182
190190 -stopaction "rabbit:stop_and_halt()." ^
191191 !RABBITMQ_NAME_TYPE! !RABBITMQ_NODENAME! ^
192192 !CONSOLE_FLAG! ^
193 -comment "A robust and scalable messaging broker" ^
193 -comment "Multi-protocol open source messaging broker" ^
194194 -args "!ERLANG_SERVICE_ARGUMENTS!" > NUL
195195
196196 goto END
2323 set STAR=%*
2424 setlocal enabledelayedexpansion
2525
26 REM Get default settings with user overrides for (RABBITMQ_)<var_name>
27 REM Non-empty defaults should be set in rabbitmq-env
28 call "%TDP0%\rabbitmq-env.bat"
29
2630 if not exist "!ERLANG_HOME!\bin\erl.exe" (
2731 echo.
2832 echo ******************************
3438 echo.
3539 exit /B 1
3640 )
37
38 REM Get default settings with user overrides for (RABBITMQ_)<var_name>
39 REM Non-empty defaults should be set in rabbitmq-env
40 call "%TDP0%\rabbitmq-env.bat"
4141
4242 "!ERLANG_HOME!\bin\erl.exe" ^
4343 -pa "!TDP0!..\ebin" ^
2626 %% receiver it will not grant any more credit to its senders when it
2727 %% is itself blocked - thus the only processes that need to check
2828 %% blocked/0 are ones that read from network sockets.
29 %%
30 %% Credit flows left to right when process send messags down the
31 %% chain, starting at the rabbit_reader, ending at the msg_store:
32 %% reader -> channel -> queue_process -> msg_store.
33 %%
34 %% If the message store has a back log, then it will block the
35 %% queue_process, which will block the channel, and finally the reader
36 %% will be blocked, throttling down publishers.
37 %%
38 %% Once a process is unblocked, it will grant credits up the chain,
39 %% possibly unblocking other processes:
40 %% reader <--grant channel <--grant queue_process <--grant msg_store.
41 %%
42 %% Grepping the project files for `credit_flow` will reveal the places
43 %% where this module is currently used, with extra comments on what's
44 %% going on at each instance. Note that credit flow between mirrors
45 %% synchronization has not been documented, since this doesn't affect
46 %% client publishes.
2947
30 -define(DEFAULT_CREDIT, {200, 50}).
48 -define(DEFAULT_INITIAL_CREDIT, 200).
49 -define(DEFAULT_MORE_CREDIT_AFTER, 50).
50
51 -define(DEFAULT_CREDIT,
52 case get(credit_flow_default_credit) of
53 undefined ->
54 Val = rabbit_misc:get_env(rabbit, credit_flow_default_credit,
55 {?DEFAULT_INITIAL_CREDIT,
56 ?DEFAULT_MORE_CREDIT_AFTER}),
57 put(credit_flow_default_credit, Val),
58 Val;
59 Val -> Val
60 end).
3161
3262 -export([send/1, send/2, ack/1, ack/2, handle_bump_msg/1, blocked/0, state/0]).
3363 -export([peer_down/1]).
6090 %% We deliberately allow Var to escape from the case here
6191 %% to be used in Expr. Any temporary var we introduced
6292 %% would also escape, and might conflict.
63 case get(Key) of
64 undefined -> Var = Default;
65 Var -> ok
93 Var = case get(Key) of
94 undefined -> Default;
95 V -> V
6696 end,
6797 put(Key, Expr)
6898 end).
158188 case blocked() of
159189 false -> case erase(credit_deferred) of
160190 undefined -> ok;
161 Credits -> [To ! Msg || {To, Msg} <- Credits]
191 Credits -> _ = [To ! Msg || {To, Msg} <- Credits],
192 ok
162193 end;
163194 true -> ok
164195 end.
130130 end, {[], BadPids}, ResultsNoNode).
131131
132132 invoke_no_result(Pid, FunOrMFA) when is_pid(Pid) andalso node(Pid) =:= node() ->
133 safe_invoke(Pid, FunOrMFA), %% we don't care about any error
133 _ = safe_invoke(Pid, FunOrMFA), %% we don't care about any error
134134 ok;
135135 invoke_no_result(Pid, FunOrMFA) when is_pid(Pid) ->
136136 invoke_no_result([Pid], FunOrMFA);
138138 invoke_no_result([], _FunOrMFA) -> %% optimisation
139139 ok;
140140 invoke_no_result([Pid], FunOrMFA) when node(Pid) =:= node() -> %% optimisation
141 safe_invoke(Pid, FunOrMFA), %% must not die
141 _ = safe_invoke(Pid, FunOrMFA), %% must not die
142142 ok;
143143 invoke_no_result(Pids, FunOrMFA) when is_list(Pids) ->
144144 {LocalPids, Grouped} = group_pids_by_node(Pids),
148148 RemoteNodes, delegate(self(), RemoteNodes),
149149 {invoke, FunOrMFA, Grouped})
150150 end,
151 safe_invoke(LocalPids, FunOrMFA), %% must not die
151 _ = safe_invoke(LocalPids, FunOrMFA), %% must not die
152152 ok.
153153
154154 monitor(process, Pid) when node(Pid) =:= node() ->
246246 {noreply, State#state{monitors = Monitors1}, hibernate};
247247
248248 handle_cast({invoke, FunOrMFA, Grouped}, State = #state{node = Node}) ->
249 safe_invoke(orddict:fetch(Node, Grouped), FunOrMFA),
249 _ = safe_invoke(orddict:fetch(Node, Grouped), FunOrMFA),
250250 {noreply, State, hibernate}.
251251
252252 handle_info({'DOWN', Ref, process, Pid, Info},
6868 %% primary key.
6969 insert(PK, [], V, {P, S}) ->
7070 %% dummy insert to force error if PK exists
71 gb_trees:insert(PK, {gb_sets:empty(), V}, P),
71 _ = gb_trees:insert(PK, {gb_sets:empty(), V}, P),
7272 {P, S};
7373 insert(PK, SKs, V, {P, S}) ->
7474 {gb_trees:insert(PK, {gb_sets:from_list(SKs), V}, P),
342342 [Ref], keep,
343343 fun ([#handle { is_read = false }]) ->
344344 {error, not_open_for_reading};
345 ([#handle{read_buffer_size_limit = 0,
346 hdl = Hdl, offset = Offset} = Handle]) ->
347 %% The read buffer is disabled. This is just an
348 %% optimization: the clauses below can handle this case.
349 case prim_file_read(Hdl, Count) of
350 {ok, Data} -> {{ok, Data},
351 [Handle#handle{offset = Offset+size(Data)}]};
352 eof -> {eof, [Handle #handle { at_eof = true }]};
353 Error -> {Error, Handle}
354 end;
345355 ([Handle = #handle{read_buffer = Buf,
346356 read_buffer_pos = BufPos,
347357 read_buffer_rem = BufRem,
583593 info(Items) -> gen_server2:call(?SERVER, {info, Items}, infinity).
584594
585595 clear_read_cache() ->
586 gen_server2:cast(?SERVER, clear_read_cache),
587 clear_vhost_read_cache(rabbit_vhost:list()).
596 case application:get_env(rabbit, fhc_read_buffering) of
597 {ok, true} ->
598 gen_server2:cast(?SERVER, clear_read_cache),
599 clear_vhost_read_cache(rabbit_vhost:list());
600 _ -> %% undefined or {ok, false}
601 ok
602 end.
588603
589604 clear_vhost_read_cache([]) ->
590605 ok;
601616 %% process because the read buffer is stored in the process
602617 %% dictionary.
603618 Fun = fun(_, State) ->
604 clear_process_read_cache(),
619 _ = clear_process_read_cache(),
605620 State
606621 end,
607622 [rabbit_amqqueue:run_backing_queue(Pid, rabbit_variable_queue, Fun)
659674 end,
660675 case Fun(Handles) of
661676 {Result, Handles1} when is_list(Handles1) ->
662 lists:zipwith(fun put_handle/2, Refs, Handles1),
677 _ = lists:zipwith(fun put_handle/2, Refs, Handles1),
663678 Result;
664679 Result ->
665680 Result
801816 case gb_trees:is_empty(Tree) of
802817 true -> Tree;
803818 false -> {Oldest, _Ref} = gb_trees:smallest(Tree),
804 gen_server2:cast(?SERVER, {update, self(), Oldest})
805 end,
806 Tree
819 gen_server2:cast(?SERVER, {update, self(), Oldest}),
820 Tree
821 end
807822 end).
808823
809824 oldest(Tree, DefaultFun) ->
815830
816831 new_closed_handle(Path, Mode, Options) ->
817832 WriteBufferSize =
818 case proplists:get_value(write_buffer, Options, unbuffered) of
819 unbuffered -> 0;
820 infinity -> infinity;
821 N when is_integer(N) -> N
833 case application:get_env(rabbit, fhc_write_buffering) of
834 {ok, false} -> 0;
835 {ok, true} ->
836 case proplists:get_value(write_buffer, Options, unbuffered) of
837 unbuffered -> 0;
838 infinity -> infinity;
839 N when is_integer(N) -> N
840 end
822841 end,
823842 ReadBufferSize =
824 case proplists:get_value(read_buffer, Options, unbuffered) of
825 unbuffered -> 0;
826 N2 when is_integer(N2) -> N2
843 case application:get_env(rabbit, fhc_read_buffering) of
844 {ok, false} -> 0;
845 {ok, true} ->
846 case proplists:get_value(read_buffer, Options, unbuffered) of
847 unbuffered -> 0;
848 N2 when is_integer(N2) -> N2
849 end
827850 end,
828851 Ref = make_ref(),
829852 put({Ref, fhc_handle}, #handle { hdl = closed,
10481071 %%----------------------------------------------------------------------------
10491072
10501073 init([AlarmSet, AlarmClear]) ->
1051 file_handle_cache_stats:init(),
1074 _ = file_handle_cache_stats:init(),
10521075 Limit = case application:get_env(file_handles_high_watermark) of
10531076 {ok, Watermark} when (is_integer(Watermark) andalso
10541077 Watermark > 0) ->
11871210 State)))};
11881211
11891212 handle_cast(clear_read_cache, State) ->
1190 clear_process_read_cache(),
1213 _ = clear_process_read_cache(),
11911214 {noreply, State}.
11921215
11931216 handle_info(check_counts, State) ->
119119 handle_cast(finish, State = #gstate { forks = Forks, blocked = Blocked }) ->
120120 NewForks = Forks - 1,
121121 NewBlocked = case NewForks of
122 0 -> [gen_server2:reply(From, empty) ||
123 From <- queue:to_list(Blocked)],
122 0 -> _ = [gen_server2:reply(From, empty) ||
123 From <- queue:to_list(Blocked)],
124124 queue:new();
125125 _ -> Blocked
126126 end,
632632 %%% The MAIN loop.
633633 %%% ---------------------------------------------------
634634 loop(GS2State = #gs2_state { time = hibernate,
635 timeout_state = undefined }) ->
636 pre_hibernate(GS2State);
635 timeout_state = undefined,
636 queue = Queue }) ->
637 case priority_queue:is_empty(Queue) of
638 true ->
639 pre_hibernate(GS2State);
640 false ->
641 process_next_msg(GS2State)
642 end;
643
637644 loop(GS2State) ->
638645 process_next_msg(drain(GS2State)).
639646
419419 broadcast_buffer,
420420 broadcast_buffer_sz,
421421 broadcast_timer,
422 txn_executor
422 txn_executor,
423 shutting_down
423424 }).
424425
425426 -record(gm_group, { name, version, members }).
550551 init([GroupName, Module, Args, TxnFun]) ->
551552 put(process_name, {?MODULE, GroupName}),
552553 {MegaSecs, Secs, MicroSecs} = now(),
553 random:seed(MegaSecs, Secs, MicroSecs),
554 _ = random:seed(MegaSecs, Secs, MicroSecs),
554555 Self = make_member(GroupName),
555556 gen_server2:cast(self(), join),
556557 {ok, #state { self = Self,
566567 broadcast_buffer = [],
567568 broadcast_buffer_sz = 0,
568569 broadcast_timer = undefined,
569 txn_executor = TxnFun }, hibernate,
570 txn_executor = TxnFun,
571 shutting_down = false }, hibernate,
570572 {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
571573
574
575 handle_call({confirmed_broadcast, _Msg}, _From,
576 State = #state { shutting_down = {true, _} }) ->
577 reply(shutting_down, State);
572578
573579 handle_call({confirmed_broadcast, _Msg}, _From,
574580 State = #state { members_state = undefined }) ->
643649 handle_callback_result(
644650 if_callback_success(
645651 Result, fun handle_msg_true/3, fun handle_msg_false/3, Msg, State1));
652
653 handle_cast({broadcast, _Msg, _SizeHint},
654 State = #state { shutting_down = {true, _} }) ->
655 noreply(State);
646656
647657 handle_cast({broadcast, _Msg, _SizeHint},
648658 State = #state { members_state = undefined }) ->
741751 end.
742752
743753
744 terminate(Reason, State = #state { module = Module,
745 callback_args = Args }) ->
746 flush_broadcast_buffer(State),
754 terminate(Reason, #state { module = Module, callback_args = Args }) ->
747755 Module:handle_terminate(Args, Reason).
748756
749757
892900 State;
893901 ensure_broadcast_timer(State = #state { broadcast_buffer = [],
894902 broadcast_timer = TRef }) ->
895 erlang:cancel_timer(TRef),
903 _ = erlang:cancel_timer(TRef),
896904 State #state { broadcast_timer = undefined };
897905 ensure_broadcast_timer(State = #state { broadcast_timer = undefined }) ->
898906 TRef = erlang:send_after(?BROADCAST_TIMER, self(), flush),
14261434 activity_false(Result, _Activity, State) ->
14271435 {Result, State}.
14281436
1429 if_callback_success(ok, True, _False, Arg, State) ->
1437 if_callback_success(Result, True, False, Arg, State) ->
1438 {NewResult, NewState} = maybe_stop(Result, State),
1439 if_callback_success1(NewResult, True, False, Arg, NewState).
1440
1441 if_callback_success1(ok, True, _False, Arg, State) ->
14301442 True(ok, Arg, State);
1431 if_callback_success(
1443 if_callback_success1(
14321444 {become, Module, Args} = Result, True, _False, Arg, State) ->
14331445 True(Result, Arg, State #state { module = Module,
14341446 callback_args = Args });
1435 if_callback_success({stop, _Reason} = Result, _True, False, Arg, State) ->
1447 if_callback_success1({stop, _Reason} = Result, _True, False, Arg, State) ->
14361448 False(Result, Arg, State).
1449
1450 maybe_stop({stop, Reason}, #state{ shutting_down = false } = State) ->
1451 ShuttingDown = {true, Reason},
1452 case has_pending_messages(State) of
1453 true -> {ok, State #state{ shutting_down = ShuttingDown }};
1454 false -> {{stop, Reason}, State #state{ shutting_down = ShuttingDown }}
1455 end;
1456 maybe_stop(Result, #state{ shutting_down = false } = State) ->
1457 {Result, State};
1458 maybe_stop(Result, #state{ shutting_down = {true, Reason} } = State) ->
1459 case has_pending_messages(State) of
1460 true -> {Result, State};
1461 false -> {{stop, Reason}, State}
1462 end.
1463
1464 has_pending_messages(#state{ broadcast_buffer = Buffer })
1465 when Buffer =/= [] ->
1466 true;
1467 has_pending_messages(#state{ members_state = MembersState }) ->
1468 [] =/= [M || {_, #member{last_pub = LP, last_ack = LA} = M}
1469 <- MembersState,
1470 LP =/= LA].
14371471
14381472 maybe_confirm(_Self, _Id, Confirms, []) ->
14391473 Confirms;
14511485 Confirms.
14521486
14531487 purge_confirms(Confirms) ->
1454 [gen_server2:reply(From, ok) || {_PubNum, From} <- queue:to_list(Confirms)],
1488 _ = [gen_server2:reply(From, ok) || {_PubNum, From} <- queue:to_list(Confirms)],
14551489 queue:new().
14561490
14571491
346346 {noreply, State};
347347
348348 handle_cast({die, Reason}, State = #state{group = Group}) ->
349 tell_all_peers_to_die(Group, Reason),
349 _ = tell_all_peers_to_die(Group, Reason),
350350 {stop, Reason, State};
351351
352352 handle_cast(Msg, State) ->
363363 %%
364364 %% Therefore if we get here we know we need to cause the entire
365365 %% mirrored sup to shut down, not just fail over.
366 tell_all_peers_to_die(Group, Reason),
366 _ = tell_all_peers_to_die(Group, Reason),
367367 {stop, Reason, State};
368368
369369 handle_info({'DOWN', _Ref, process, Pid, _Reason},
410410
411411 check_start(Group, Overall, Delegate, ChildSpec) ->
412412 case mnesia:wread({?TABLE, {Group, id(ChildSpec)}}) of
413 [] -> write(Group, Overall, ChildSpec),
413 [] -> _ = write(Group, Overall, ChildSpec),
414414 start;
415415 [S] -> #mirrored_sup_childspec{key = {Group, Id},
416416 mirroring_pid = Pid} = S,
417417 case Overall of
418418 Pid -> child(Delegate, Id);
419419 _ -> case supervisor(Pid) of
420 dead -> write(Group, Overall, ChildSpec),
420 dead -> _ = write(Group, Overall, ChildSpec),
421421 start;
422422 Delegate0 -> child(Delegate0, Id)
423423 end
6464
6565 handle_info(timeout, #state{waiting = Waiting} = State) ->
6666 ok = disk_log:sync(latest_log),
67 [gen_server:reply(From, ok) || From <- Waiting],
67 _ = [gen_server:reply(From, ok) || From <- Waiting],
6868 {noreply, State#state{waiting = []}};
6969 handle_info(Message, State) ->
7070 {stop, {unhandled_info, Message}, State}.
5050 -spec create(term()) -> 'ok'.
5151
5252 create(Name) ->
53 ensure_started(),
53 _ = ensure_started(),
5454 case ets:member(pg2_fixed_table, {group, Name}) of
5555 false ->
5656 global:trans({{?MODULE, Name}, self()},
6767 -spec delete(name()) -> 'ok'.
6868
6969 delete(Name) ->
70 ensure_started(),
70 _ = ensure_started(),
7171 global:trans({{?MODULE, Name}, self()},
7272 fun() ->
7373 gen_server:multi_call(?MODULE, {delete, Name})
7777 -spec join(name(), pid()) -> 'ok' | {'error', {'no_such_group', term()}}.
7878
7979 join(Name, Pid) when is_pid(Pid) ->
80 ensure_started(),
80 _ = ensure_started(),
8181 case ets:member(pg2_fixed_table, {group, Name}) of
8282 false ->
8383 {error, {no_such_group, Name}};
9393 -spec leave(name(), pid()) -> 'ok' | {'error', {'no_such_group', name()}}.
9494
9595 leave(Name, Pid) when is_pid(Pid) ->
96 ensure_started(),
96 _ = ensure_started(),
9797 case ets:member(pg2_fixed_table, {group, Name}) of
9898 false ->
9999 {error, {no_such_group, Name}};
109109 -type get_members_ret() :: [pid()] | {'error', {'no_such_group', name()}}.
110110
111111 -spec get_members(name()) -> get_members_ret().
112
112
113113 get_members(Name) ->
114 ensure_started(),
114 _ = ensure_started(),
115115 case ets:member(pg2_fixed_table, {group, Name}) of
116116 true ->
117117 group_members(Name);
122122 -spec get_local_members(name()) -> get_members_ret().
123123
124124 get_local_members(Name) ->
125 ensure_started(),
125 _ = ensure_started(),
126126 case ets:member(pg2_fixed_table, {group, Name}) of
127127 true ->
128128 local_group_members(Name);
133133 -spec which_groups() -> [name()].
134134
135135 which_groups() ->
136 ensure_started(),
136 _ = ensure_started(),
137137 all_groups().
138138
139139 -type gcp_error_reason() :: {'no_process', term()} | {'no_such_group', term()}.
168168
169169 init([]) ->
170170 Ns = nodes(),
171 net_kernel:monitor_nodes(true),
171 _ = net_kernel:monitor_nodes(true),
172172 lists:foreach(fun(N) ->
173173 {?MODULE, N} ! {new_pg2_fixed, node()},
174174 self() ! {nodeup, N}
181181 | {'join', name(), pid()}
182182 | {'leave', name(), pid()}.
183183
184 -spec handle_call(call(), _, #state{}) ->
184 -spec handle_call(call(), _, #state{}) ->
185185 {'reply', 'ok', #state{}}.
186186
187187 handle_call({create, Name}, _From, S) ->
189189 {reply, ok, S};
190190 handle_call({join, Name, Pid}, _From, S) ->
191191 case ets:member(pg2_fixed_table, {group, Name}) of
192 true -> join_group(Name, Pid);
192 true -> _ = join_group(Name, Pid),
193 ok;
193194 _ -> ok
194195 end,
195196 {reply, ok, S};
204205 {reply, ok, S};
205206 handle_call(Request, From, S) ->
206207 error_logger:warning_msg("The pg2_fixed server received an unexpected message:\n"
207 "handle_call(~p, ~p, _)\n",
208 "handle_call(~p, ~p, _)\n",
208209 [Request, From]),
209210 {noreply, S}.
210211
264265 %%% Pid is a member of group Name.
265266
266267 store(List) ->
267 _ = [case assure_group(Name) of
268 true ->
269 [join_group(Name, P) || P <- Members -- group_members(Name)];
270 _ ->
271 ok
272 end || [Name, Members] <- List],
268 _ = [assure_group(Name)
269 andalso
270 [join_group(Name, P) || P <- Members -- group_members(Name)] ||
271 [Name, Members] <- List],
273272 ok.
274273
275274 assure_group(Name) ->
284283 member_died(Ref) ->
285284 [{{ref, Ref}, Pid}] = ets:lookup(pg2_fixed_table, {ref, Ref}),
286285 Names = member_groups(Pid),
287 _ = [leave_group(Name, P) ||
286 _ = [leave_group(Name, P) ||
288287 Name <- Names,
289288 P <- member_in_group(Pid, Name)],
290289 %% Kept for backward compatibility with links. Can be removed, eventually.
293292 ok.
294293
295294 join_group(Name, Pid) ->
296 Ref_Pid = {ref, Pid},
295 Ref_Pid = {ref, Pid},
297296 try _ = ets:update_counter(pg2_fixed_table, Ref_Pid, {4, +1})
298297 catch _:_ ->
299298 {RPid, Ref} = do_monitor(Pid),
313312 Member_Name_Pid = {member, Name, Pid},
314313 try ets:update_counter(pg2_fixed_table, Member_Name_Pid, {2, -1, 0, 0}) of
315314 N ->
316 if
315 if
317316 N =:= 0 ->
318317 true = ets:delete(pg2_fixed_table, {pid, Pid, Name}),
319318 _ = [ets:delete(pg2_fixed_table, {local_member, Name, Pid}) ||
322321 true ->
323322 ok
324323 end,
325 Ref_Pid = {ref, Pid},
324 Ref_Pid = {ref, Pid},
326325 case ets:update_counter(pg2_fixed_table, Ref_Pid, {4, -1}) of
327326 0 ->
328327 [{Ref_Pid,RPid,Ref,0}] = ets:lookup(pg2_fixed_table, Ref_Pid),
341340 [[G, group_members(G)] || G <- all_groups()].
342341
343342 group_members(Name) ->
344 [P ||
343 [P ||
345344 [P, N] <- ets:match(pg2_fixed_table, {{member, Name, '$1'},'$2'}),
346345 _ <- lists:seq(1, N)].
347346
348347 local_group_members(Name) ->
349 [P ||
348 [P ||
350349 [Pid] <- ets:match(pg2_fixed_table, {{local_member, Name, '$1'}}),
351350 P <- member_in_group(Pid, Name)].
352351
388387 %% Assume the node is still up
389388 {Pid, erlang:monitor(process, Pid)};
390389 false ->
391 F = fun() ->
390 F = fun() ->
392391 Ref = erlang:monitor(process, Pid),
393 receive
392 receive
394393 {'DOWN', Ref, process, Pid, _Info} ->
395394 exit(normal)
396395 end
1515 %% All modifications are (C) 2010-2013 GoPivotal, Inc.
1616
1717 %% %CopyrightBegin%
18 %%
18 %%
1919 %% Copyright Ericsson AB 1997-2009. All Rights Reserved.
20 %%
20 %%
2121 %% The contents of this file are subject to the Erlang Public License,
2222 %% Version 1.1, (the "License"); you may not use this file except in
2323 %% compliance with the License. You should have received a copy of the
2424 %% Erlang Public License along with this software. If not, it can be
2525 %% retrieved online at http://www.erlang.org/.
26 %%
26 %%
2727 %% Software distributed under the License is distributed on an "AS IS"
2828 %% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
2929 %% the License for the specific language governing rights and limitations
3030 %% under the License.
31 %%
31 %%
3232 %% %CopyrightEnd%
3333 %%
3434 -module(pg_local).
7070 ensure_started().
7171
7272 join(Name, Pid) when is_pid(Pid) ->
73 ensure_started(),
73 _ = ensure_started(),
7474 gen_server:cast(?MODULE, {join, Name, Pid}).
7575
7676 leave(Name, Pid) when is_pid(Pid) ->
77 ensure_started(),
77 _ = ensure_started(),
7878 gen_server:cast(?MODULE, {leave, Name, Pid}).
7979
8080 get_members(Name) ->
81 ensure_started(),
81 _ = ensure_started(),
8282 group_members(Name).
8383
8484 in_group(Name, Pid) ->
85 ensure_started(),
85 _ = ensure_started(),
8686 %% The join message is a cast and thus can race, but we want to
8787 %% keep it that way to be fast in the common case.
8888 case member_present(Name, Pid) of
9292 end.
9393
9494 sync() ->
95 ensure_started(),
95 _ = ensure_started(),
9696 gen_server:call(?MODULE, sync, infinity).
9797
9898 %%%
110110
111111 handle_call(Request, From, S) ->
112112 error_logger:warning_msg("The pg_local server received an unexpected message:\n"
113 "handle_call(~p, ~p, _)\n",
113 "handle_call(~p, ~p, _)\n",
114114 [Request, From]),
115115 {noreply, S}.
116116
117117 handle_cast({join, Name, Pid}, S) ->
118 join_group(Name, Pid),
118 _ = join_group(Name, Pid),
119119 {noreply, S};
120120 handle_cast({leave, Name, Pid}, S) ->
121121 leave_group(Name, Pid),
154154 member_died(Ref) ->
155155 [{{ref, Ref}, Pid}] = ets:lookup(pg_local_table, {ref, Ref}),
156156 Names = member_groups(Pid),
157 _ = [leave_group(Name, P) ||
157 _ = [leave_group(Name, P) ||
158158 Name <- Names,
159159 P <- member_in_group(Pid, Name)],
160160 ok.
161161
162162 join_group(Name, Pid) ->
163 Ref_Pid = {ref, Pid},
163 Ref_Pid = {ref, Pid},
164164 try _ = ets:update_counter(pg_local_table, Ref_Pid, {3, +1})
165165 catch _:_ ->
166166 Ref = erlang:monitor(process, Pid),
178178 Member_Name_Pid = {member, Name, Pid},
179179 try ets:update_counter(pg_local_table, Member_Name_Pid, {2, -1}) of
180180 N ->
181 if
181 if
182182 N =:= 0 ->
183183 true = ets:delete(pg_local_table, {pid, Pid, Name}),
184184 true = ets:delete(pg_local_table, Member_Name_Pid);
185185 true ->
186186 ok
187187 end,
188 Ref_Pid = {ref, Pid},
188 Ref_Pid = {ref, Pid},
189189 case ets:update_counter(pg_local_table, Ref_Pid, {3, -1}) of
190190 0 ->
191191 [{Ref_Pid,Ref,0}] = ets:lookup(pg_local_table, Ref_Pid),
201201 end.
202202
203203 group_members(Name) ->
204 [P ||
204 [P ||
205205 [P, N] <- ets:match(pg_local_table, {{member, Name, '$1'},'$2'}),
206206 _ <- lists:seq(1, N)].
207207
8383 case dict:find(Item, M) of
8484 {ok, MRef} -> Module:demonitor(MRef),
8585 S#state{dict = dict:erase(Item, M)};
86 error -> M
86 error -> S
8787 end.
8888
8989 is_monitored(Item, #state{dict = M}) -> dict:is_key(Item, M).
2828 %%---------------------------------------------------------------------------
2929 %% Boot steps.
3030 -export([maybe_insert_default_data/0, boot_delegate/0, recover/0]).
31
32 %% for tests
33 -export([validate_msg_store_io_batch_size_and_credit_disc_bound/2]).
3134
3235 -rabbit_boot_step({pre_boot, [{description, "rabbit boot start"}]}).
3336
519522 print_banner(),
520523 log_banner(),
521524 warn_if_kernel_config_dubious(),
525 warn_if_disc_io_options_dubious(),
522526 run_boot_steps(),
523527 {ok, SupPid};
524528 Error ->
847851 true -> ok
848852 end.
849853
854 warn_if_disc_io_options_dubious() ->
855 %% if these values are not set, it doesn't matter since
856 %% rabbit_variable_queue will pick up the values defined in the
857 %% IO_BATCH_SIZE and CREDIT_DISC_BOUND constants.
858 CreditDiscBound = rabbit_misc:get_env(rabbit, msg_store_credit_disc_bound,
859 undefined),
860 IoBatchSize = rabbit_misc:get_env(rabbit, msg_store_io_batch_size,
861 undefined),
862 case catch validate_msg_store_io_batch_size_and_credit_disc_bound(
863 CreditDiscBound, IoBatchSize) of
864 ok -> ok;
865 {error, {Reason, Vars}} ->
866 rabbit_log:warning(Reason, Vars)
867 end.
868
869 validate_msg_store_io_batch_size_and_credit_disc_bound(CreditDiscBound,
870 IoBatchSize) ->
871 case IoBatchSize of
872 undefined ->
873 ok;
874 IoBatchSize when is_integer(IoBatchSize) ->
875 if IoBatchSize < ?IO_BATCH_SIZE ->
876 throw({error,
877 {"io_batch_size of ~b lower than recommended value ~b, "
878 "paging performance may worsen~n",
879 [IoBatchSize, ?IO_BATCH_SIZE]}});
880 true ->
881 ok
882 end;
883 IoBatchSize ->
884 throw({error,
885 {"io_batch_size should be an integer, but ~b given",
886 [IoBatchSize]}})
887 end,
888
889 %% CreditDiscBound = {InitialCredit, MoreCreditAfter}
890 {RIC, RMCA} = ?CREDIT_DISC_BOUND,
891 case CreditDiscBound of
892 undefined ->
893 ok;
894 {IC, MCA} when is_integer(IC), is_integer(MCA) ->
895 if IC < RIC; MCA < RMCA ->
896 throw({error,
897 {"msg_store_credit_disc_bound {~b, ~b} lower than"
898 "recommended value {~b, ~b},"
899 " paging performance may worsen~n",
900 [IC, MCA, RIC, RMCA]}});
901 true ->
902 ok
903 end;
904 {IC, MCA} ->
905 throw({error,
906 {"both msg_store_credit_disc_bound values should be integers, but ~p given",
907 [{IC, MCA}]}});
908 CreditDiscBound ->
909 throw({error,
910 {"invalid msg_store_credit_disc_bound value given: ~p",
911 [CreditDiscBound]}})
912 end,
913
914 case {CreditDiscBound, IoBatchSize} of
915 {undefined, undefined} ->
916 ok;
917 {_CDB, undefined} ->
918 ok;
919 {undefined, _IBS} ->
920 ok;
921 {{InitialCredit, _MCA}, IoBatchSize} ->
922 if IoBatchSize < InitialCredit ->
923 throw(
924 {error,
925 {"msg_store_io_batch_size ~b should be bigger than the initial "
926 "credit value from msg_store_credit_disc_bound ~b,"
927 " paging performance may worsen~n",
928 [IoBatchSize, InitialCredit]}});
929 true ->
930 ok
931 end
932 end.
933
850934 home_dir() ->
851935 case init:get_argument(home) of
852936 {ok, [[Home]]} -> Home;
897981 %% file_handle_cache, we spawn a separate process.
898982 Parent = self(),
899983 TestFun = fun() ->
984 ReadBuf = case application:get_env(rabbit, fhc_read_buffering) of
985 {ok, true} -> "ON";
986 {ok, false} -> "OFF"
987 end,
988 WriteBuf = case application:get_env(rabbit, fhc_write_buffering) of
989 {ok, true} -> "ON";
990 {ok, false} -> "OFF"
991 end,
992 rabbit_log:info(
993 "FHC read buffering: ~s~n"
994 "FHC write buffering: ~s~n", [ReadBuf, WriteBuf]),
900995 Filename = filename:join(code:lib_dir(kernel, ebin), "kernel.app"),
901996 {ok, Fd} = file_handle_cache:open(Filename, [raw, binary, read], []),
902997 {ok, _} = file_handle_cache:read(Fd, 1),
7575 %% it gives us
7676 case try_authenticate(Mod, Username, AuthProps) of
7777 {ok, ModNUser = #auth_user{impl = Impl}} ->
78 user(ModNUser, {ok, [{Mod, Impl}]});
78 user(ModNUser, {ok, [{Mod, Impl}], []});
7979 Else ->
8080 Else
8181 end;
9797
9898 try_authorize(Modules, Username) ->
9999 lists:foldr(
100 fun (Module, {ok, ModsImpls}) ->
100 fun (Module, {ok, ModsImpls, ModsTags}) ->
101101 case Module:user_login_authorization(Username) of
102 {ok, Impl} -> {ok, [{Module, Impl} | ModsImpls]};
102 {ok, Impl, Tags}-> {ok, [{Module, Impl} | ModsImpls], ModsTags ++ Tags};
103 {ok, Impl} -> {ok, [{Module, Impl} | ModsImpls], ModsTags};
103104 {error, E} -> {refused, Username,
104105 "~s failed authorizing ~s: ~p~n",
105106 [Module, Username, E]};
107108 end;
108109 (_, {refused, F, A}) ->
109110 {refused, Username, F, A}
110 end, {ok, []}, Modules).
111 end, {ok, [], []}, Modules).
111112
112 user(#auth_user{username = Username, tags = Tags}, {ok, ModZImpls}) ->
113 user(#auth_user{username = Username, tags = Tags}, {ok, ModZImpls, ModZTags}) ->
113114 {ok, #user{username = Username,
114 tags = Tags,
115 tags = Tags ++ ModZTags,
115116 authz_backends = ModZImpls}};
116117 user(_AuthUser, Error) ->
117118 Error.
872872 %% the slave receives the message direct from the channel, and the
873873 %% other when it receives it via GM.
874874 case Flow of
875 %% Here we are tracking messages sent by the rabbit_channel
876 %% process. We are accessing the rabbit_channel process
877 %% dictionary.
875878 flow -> [credit_flow:send(QPid) || QPid <- QPids],
876879 [credit_flow:send(QPid) || QPid <- SPids];
877880 noflow -> ok
9191 durable,
9292 auto_delete,
9393 arguments,
94 owner_pid
94 owner_pid,
95 exclusive
9596 ]).
9697
9798 -define(INFO_KEYS, [pid | ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [name]]).
661662 exclusive_consumer = Holder,
662663 senders = Senders}) ->
663664 State1 = State#q{senders = case pmon:is_monitored(DownPid, Senders) of
664 false -> Senders;
665 true -> credit_flow:peer_down(DownPid),
666 pmon:demonitor(DownPid, Senders)
665 false ->
666 Senders;
667 true ->
668 %% A rabbit_channel process died. Here credit_flow will take care
669 %% of cleaning up the rabbit_amqqueue_process process dictionary
670 %% with regards to the credit we were tracking for the channel
671 %% process. See handle_cast({deliver, Deliver}, State) in this
672 %% module. In that cast function we process deliveries from the
673 %% channel, which means we credit_flow:ack/1 said
674 %% messages. credit_flow:ack'ing messages means we are increasing
675 %% a counter to know when we need to send MoreCreditAfter. Since
676 %% the process died, the credit_flow flow module will clean up
677 %% that for us.
678 credit_flow:peer_down(DownPid),
679 pmon:demonitor(DownPid, Senders)
667680 end},
668681 case rabbit_queue_consumers:erase_ch(DownPid, Consumers) of
669682 not_found ->
816829 '';
817830 i(owner_pid, #q{q = #amqqueue{exclusive_owner = ExclusiveOwner}}) ->
818831 ExclusiveOwner;
832 i(exclusive, #q{q = #amqqueue{exclusive_owner = ExclusiveOwner}}) ->
833 is_pid(ExclusiveOwner);
819834 i(policy, #q{q = Q}) ->
820835 case rabbit_policy:name(Q) of
821836 none -> '';
11091124 flow = Flow}, SlaveWhenPublished},
11101125 State = #q{senders = Senders}) ->
11111126 Senders1 = case Flow of
1127 %% In both credit_flow:ack/1 we are acking messages to the channel
1128 %% process that sent us the message delivery. See handle_ch_down
1129 %% for more info.
11121130 flow -> credit_flow:ack(Sender),
11131131 case SlaveWhenPublished of
11141132 true -> credit_flow:ack(Sender); %% [0]
12881306
12891307 handle_info({bump_credit, Msg}, State = #q{backing_queue = BQ,
12901308 backing_queue_state = BQS}) ->
1309 %% The message_store is granting us more credit. This means the
1310 %% backing queue (for the rabbit_variable_queue case) might
1311 %% continue paging messages to disk if it still needs to. We
1312 %% consume credits from the message_store whenever we need to
1313 %% persist a message to disk. See:
1314 %% rabbit_variable_queue:msg_store_write/4.
12911315 credit_flow:handle_bump_msg(Msg),
12921316 noreply(State#q{backing_queue_state = BQ:resume(BQS)});
12931317
9191
9292 user_login_authorization(Username) ->
9393 case user_login_authentication(Username, []) of
94 {ok, #auth_user{impl = Impl}} -> {ok, Impl};
95 Else -> Else
94 {ok, #auth_user{impl = Impl, tags = Tags}} -> {ok, Impl, Tags};
95 Else -> Else
9696 end.
9797
9898 internal_check_user_login(Username, Fun) ->
2828 %%
2929 %% Possible responses:
3030 %% {ok, Impl}
31 %% User authorisation succeeded, and here's the impl field.
31 %% {ok, Impl, Tags}
32 %% User authorisation succeeded, and here's the impl and potential extra tags fields.
3233 %% {error, Error}
3334 %% Something went wrong. Log and die.
3435 %% {refused, Msg, Args}
3536 %% User authorisation failed. Log and die.
3637 -callback user_login_authorization(rabbit_types:username()) ->
3738 {'ok', any()} |
39 {'ok', any(), any()} |
3840 {'refused', string(), [any()]} |
3941 {'error', any()}.
4042
132132 gen_server2:cast(Pid, {method, Method, Content, noflow}).
133133
134134 do_flow(Pid, Method, Content) ->
135 %% Here we are tracking messages sent by the rabbit_reader
136 %% process. We are accessing the rabbit_reader process dictionary.
135137 credit_flow:send(Pid),
136138 gen_server2:cast(Pid, {method, Method, Content, flow}).
137139
326328 State = #ch{reader_pid = Reader,
327329 virtual_host = VHost}) ->
328330 case Flow of
331 %% We are going to process a message from the rabbit_reader
332 %% process, so here we ack it. In this case we are accessing
333 %% the rabbit_channel process dictionary.
329334 flow -> credit_flow:ack(Reader);
330335 noflow -> ok
331336 end,
434439 noreply_coalesce(record_confirms(MXs, State#ch{unconfirmed = UC1})).
435440
436441 handle_info({bump_credit, Msg}, State) ->
442 %% A rabbit_amqqueue_process is granting credit to our channel. If
443 %% our channel was being blocked by this process, and no other
444 %% process is blocking our channel, then this channel will be
445 %% unblocked. This means that any credit that was deferred will be
446 %% sent to rabbit_reader processs that might be blocked by this
447 %% particular channel.
437448 credit_flow:handle_bump_msg(Msg),
438449 noreply(State);
439450
451462 State1 = handle_publishing_queue_down(QPid, Reason, State),
452463 State3 = handle_consuming_queue_down(QPid, State1),
453464 State4 = handle_delivering_queue_down(QPid, State3),
465 %% A rabbit_amqqueue_process has died. If our channel was being
466 %% blocked by this process, and no other process is blocking our
467 %% channel, then this channel will be unblocked. This means that
468 %% any credit that was deferred will be sent to the rabbit_reader
469 %% processs that might be blocked by this particular channel.
454470 credit_flow:peer_down(QPid),
455471 #ch{queue_names = QNames, queue_monitors = QMons} = State4,
456472 case dict:find(QPid, QNames) of
6565 case catch DoFun(Command, Node, Args, Opts) of
6666 ok ->
6767 rabbit_misc:quit(0);
68 {ok, Result} ->
69 rabbit_ctl_misc:print_cmd_result(Command, Result),
70 rabbit_misc:quit(0);
6871 {'EXIT', {function_clause, [{?MODULE, action, _} | _]}} -> %% < R15
6972 PrintInvalidCommandError(),
7073 usage(UsageMod);
104107 {badrpc_multi, Reason, Nodes} ->
105108 print_error("unable to connect to nodes ~p: ~w", [Nodes, Reason]),
106109 print_badrpc_diagnostics(Nodes),
110 rabbit_misc:quit(2);
111 {refused, Username, _, _} ->
112 print_error("failed to authenticate user \"~s\"", [Username]),
107113 rabbit_misc:quit(2);
108114 Other ->
109115 print_error("~p", [Other]),
5151 delete_user,
5252 change_password,
5353 clear_password,
54 authenticate_user,
5455 set_user_tags,
5556 list_users,
5657
8586 close_connection,
8687 {trace_on, [?VHOST_DEF]},
8788 {trace_off, [?VHOST_DEF]},
88 set_vm_memory_high_watermark
89 set_vm_memory_high_watermark,
90 help
8991 ]).
9092
9193 -define(GLOBAL_QUERIES,
107109 [stop, stop_app, start_app, wait, reset, force_reset, rotate_logs,
108110 join_cluster, change_cluster_node_type, update_cluster_nodes,
109111 forget_cluster_node, rename_cluster_node, cluster_status, status,
110 environment, eval, force_boot]).
112 environment, eval, force_boot, help]).
111113
112114 -define(COMMANDS_WITH_TIMEOUT,
113115 [list_user_permissions, list_policies, list_queues, list_exchanges,
377379 Inform("Clearing password for user \"~s\"", [Username]),
378380 call(Node, {rabbit_auth_backend_internal, clear_password, Args});
379381
382 action(authenticate_user, Node, Args = [Username, _Password], _Opts, Inform) ->
383 Inform("Authenticating user \"~s\"", [Username]),
384 call(Node, {rabbit_access_control, check_user_pass_login, Args});
385
380386 action(set_user_tags, Node, [Username | TagsStr], _Opts, Inform) ->
381387 Tags = [list_to_atom(T) || T <- TagsStr],
382388 Inform("Setting tags for user \"~s\" to ~p", [Username, Tags]),
408414 end),
409415 Inform("Setting memory threshold on ~p to ~p", [Node, Frac]),
410416 rpc_call(Node, vm_memory_monitor, set_vm_memory_high_watermark, [Frac]);
417
418 action(set_vm_memory_high_watermark, Node, ["absolute", Arg], _Opts, Inform) ->
419 Limit = list_to_integer(Arg),
420 Inform("Setting memory threshold on ~p to ~p bytes", [Node, Limit]),
421 rpc_call(Node, vm_memory_monitor, set_vm_memory_high_watermark,
422 [{absolute, Limit}]);
411423
412424 action(set_permissions, Node, [Username, CPerm, WPerm, RPerm], Opts, Inform) ->
413425 VHost = proplists:get_value(?VHOST_OPT, Opts),
481493 {error, E, _} ->
482494 {error_string, format_parse_error(E)}
483495 end;
496
497 action(help, _Node, _Args, _Opts, _Inform) ->
498 io:format("~s", [rabbit_ctl_usage:usage()]);
484499
485500 action(Command, Node, Args, Opts, Inform) ->
486501 %% For backward compatibility, run commands accepting a timeout with
661676
662677 become(BecomeNode) ->
663678 error_logger:tty(false),
664 ok = net_kernel:stop(),
665679 case net_adm:ping(BecomeNode) of
666680 pong -> exit({node_running, BecomeNode});
667 pang -> io:format(" * Impersonating node: ~s...", [BecomeNode]),
681 pang -> ok = net_kernel:stop(),
682 io:format(" * Impersonating node: ~s...", [BecomeNode]),
668683 {ok, _} = rabbit_cli:start_distribution(BecomeNode),
669684 io:format(" done~n", []),
670685 Dir = mnesia:system_info(directory),
0 %% The contents of this file are subject to the Mozilla Public License
1 %% Version 1.1 (the "License"); you may not use this file except in
2 %% compliance with the License. You may obtain a copy of the License
3 %% at http://www.mozilla.org/MPL/
4 %%
5 %% Software distributed under the License is distributed on an "AS IS"
6 %% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
7 %% the License for the specific language governing rights and
8 %% limitations under the License.
9 %%
10 %% The Original Code is RabbitMQ.
11 %%
12 %% The Initial Developer of the Original Code is GoPivotal, Inc.
13 %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
14 %%
15
16 -module(rabbit_ctl_misc).
17
18 -export([print_cmd_result/2]).
19
20 %%----------------------------------------------------------------------------
21
22 -ifdef(use_specs).
23
24 -spec(print_cmd_result/2 :: (atom(), term()) -> string()).
25
26 -endif.
27
28 %%----------------------------------------------------------------------------
29
30 print_cmd_result(authenticate_user, _Result) -> io:format("Success~n").
3939 min_interval,
4040 max_interval,
4141 timer,
42 alarmed
42 alarmed,
43 enabled
4344 }).
4445
4546 %%----------------------------------------------------------------------------
9596 State = #state{dir = Dir,
9697 min_interval = ?DEFAULT_MIN_DISK_CHECK_INTERVAL,
9798 max_interval = ?DEFAULT_MAX_DISK_CHECK_INTERVAL,
98 alarmed = false},
99 alarmed = false,
100 enabled = true},
99101 case {catch get_disk_free(Dir),
100102 vm_memory_monitor:get_total_memory()} of
101103 {N1, N2} when is_integer(N1), is_integer(N2) ->
103105 Err ->
104106 rabbit_log:info("Disabling disk free space monitoring "
105107 "on unsupported platform:~n~p~n", [Err]),
106 {stop, unsupported_platform}
108 {ok, State#state{enabled = false}}
107109 end.
108110
109111 handle_call(get_disk_free_limit, _From, State = #state{limit = Limit}) ->
110112 {reply, Limit, State};
113
114 handle_call({set_disk_free_limit, _}, _From, #state{enabled = false} = State) ->
115 rabbit_log:info("Cannot set disk free limit: "
116 "disabled disk free space monitoring", []),
117 {reply, ok, State};
111118
112119 handle_call({set_disk_free_limit, Limit}, _From, State) ->
113120 {reply, ok, set_disk_limits(State, Limit)};
2323
2424 -export([safe_handle_event/3]).
2525
26 %% extracted from error_logger_file_h. Since 18.1 the state of the
27 %% error logger module changed. See:
28 %% https://github.com/erlang/otp/commit/003091a1fcc749a182505ef5675c763f71eacbb0#diff-d9a19ba08f5d2b60fadfc3aa1566b324R108
29 %% github issue:
30 %% https://github.com/rabbitmq/rabbitmq-server/issues/324
31 -record(st, {fd,
32 filename,
33 prev_handler,
34 depth = unlimited}).
35
36 %% extracted from error_logger_file_h. See comment above.
37 get_depth() ->
38 case application:get_env(kernel, error_logger_format_depth) of
39 {ok, Depth} when is_integer(Depth) ->
40 erlang:max(10, Depth);
41 undefined ->
42 unlimited
43 end.
44
45 -define(ERTS_NEW_LOGGER_STATE, "7.1").
46
2647 %% rabbit_error_logger_file_h is a wrapper around the error_logger_file_h
2748 %% module because the original's init/1 does not match properly
2849 %% with the result of closing the old handler when swapping handlers.
3354 %% lib/stdlib/src/error_logger_file_h.erl from R14B3 was copied as
3455 %% init_file/2 and changed so that it opens the file in 'append' mode.
3556
36 %% Used only when swapping handlers in log rotation
57 %% Used only when swapping handlers in log rotation, pre OTP 18.1
3758 init({{File, Suffix}, []}) ->
38 case rabbit_file:append_file(File, Suffix) of
39 ok -> file:delete(File),
40 ok;
41 {error, Error} ->
42 rabbit_log:error("Failed to append contents of "
43 "log file '~s' to '~s':~n~p~n",
44 [File, [File, Suffix], Error])
45 end,
59 rotate_logs(File, Suffix),
60 init(File);
61 %% Used only when swapping handlers in log rotation, since OTP 18.1
62 init({{File, Suffix}, ok}) ->
63 rotate_logs(File, Suffix),
4664 init(File);
4765 %% Used only when swapping handlers and the original handler
4866 %% failed to terminate or was never installed
6482
6583 init_file(File, {error_logger, Buf}) ->
6684 case init_file(File, error_logger) of
67 {ok, {Fd, File, PrevHandler}} ->
68 [handle_event(Event, {Fd, File, PrevHandler}) ||
85 {ok, State} ->
86 [handle_event(Event, State) ||
6987 {_, Event} <- lists:reverse(Buf)],
70 {ok, {Fd, File, PrevHandler}};
88 {ok, State};
7189 Error ->
7290 Error
7391 end;
7492 init_file(File, PrevHandler) ->
7593 process_flag(trap_exit, true),
7694 case file:open(File, [append]) of
77 {ok,Fd} -> {ok, {Fd, File, PrevHandler}};
78 Error -> Error
95 {ok, Fd} ->
96 FoundVer = erlang:system_info(version),
97 State =
98 case rabbit_misc:version_compare(
99 ?ERTS_NEW_LOGGER_STATE, FoundVer, lte) of
100 true ->
101 #st{fd = Fd,
102 filename = File,
103 prev_handler = PrevHandler,
104 depth = get_depth()};
105 _ ->
106 {Fd, File, PrevHandler}
107 end,
108 {ok, State};
109 Error -> Error
79110 end.
80111
81112 handle_event(Event, State) ->
133164 %%----------------------------------------------------------------------
134165
135166 t(Term) -> truncate:log_event(Term, ?LOG_TRUNC).
167
168 rotate_logs(File, Suffix) ->
169 case rabbit_file:append_file(File, Suffix) of
170 ok -> file:delete(File),
171 ok;
172 {error, Error} ->
173 rabbit_log:error("Failed to append contents of "
174 "log file '~s' to '~s':~n~p~n",
175 [File, [File, Suffix], Error])
176 end.
7575 rabbit_topic_trie_edge,
7676 rabbit_topic_trie_binding]]
7777 end,
78 [begin
79 Path = [{FinalNode, _} | _] =
80 follow_down_get_path(X, split_topic_key(K)),
81 trie_remove_binding(X, FinalNode, D, Args),
82 remove_path_if_empty(X, Path)
78 [case follow_down_get_path(X, split_topic_key(K)) of
79 {ok, Path = [{FinalNode, _} | _]} ->
80 trie_remove_binding(X, FinalNode, D, Args),
81 remove_path_if_empty(X, Path);
82 {error, _Node, _RestW} ->
83 %% We're trying to remove a binding that no longer exists.
84 %% That's unexpected, but shouldn't be a problem.
85 ok
8386 end || #binding{source = X, key = K, destination = D, args = Args} <- Bs],
8487 ok;
8588 remove_bindings(none, _X, _Bs) ->
136139 follow_down(X, fun (_, Node, _) -> Node end, root, Words).
137140
138141 follow_down_get_path(X, Words) ->
139 {ok, Path} =
140 follow_down(X, fun (W, Node, PathAcc) -> [{Node, W} | PathAcc] end,
141 [{root, none}], Words),
142 Path.
142 follow_down(X, fun (W, Node, PathAcc) -> [{Node, W} | PathAcc] end,
143 [{root, none}], Words).
143144
144145 follow_down(X, AccFun, Acc0, Words) ->
145146 follow_down(X, root, AccFun, Acc0, Words).
368368 handle_cast({ensure_monitoring, Pids}, State = #state { monitors = Mons }) ->
369369 noreply(State #state { monitors = pmon:monitor_all(Pids, Mons) });
370370
371 handle_cast({delete_and_terminate, {shutdown, ring_shutdown}}, State) ->
372 {stop, normal, State};
371373 handle_cast({delete_and_terminate, Reason}, State) ->
372374 {stop, Reason, State}.
373375
415417 ok = gen_server2:cast(CPid, Msg);
416418 handle_msg([CPid], _From, {ensure_monitoring, _Pids} = Msg) ->
417419 ok = gen_server2:cast(CPid, Msg);
418 handle_msg([CPid], _From, {delete_and_terminate, _Reason} = Msg) ->
419 ok = gen_server2:cast(CPid, Msg),
420 handle_msg([_CPid], _From, {delete_and_terminate, _Reason}) ->
421 %% We tell GM to stop, but we don't instruct the coordinator to
422 %% stop yet. The GM will first make sure all pending messages were
423 %% actually delivered. Then it calls handle_terminate/2 below so the
424 %% coordinator is stopped.
425 %%
426 %% If we stop the coordinator right now, remote slaves could see the
427 %% coordinator DOWN before delete_and_terminate was delivered to all
428 %% GMs. One of those GM would be promoted as the master, and this GM
429 %% would hang forever, waiting for other GMs to stop.
420430 {stop, {shutdown, ring_shutdown}};
421431 handle_msg([_CPid], _From, _Msg) ->
422432 ok.
423433
424 handle_terminate([_CPid], _Reason) ->
434 handle_terminate([CPid], Reason) ->
435 ok = gen_server2:cast(CPid, {delete_and_terminate, Reason}),
425436 ok.
426437
427438 %% ---------------------------------------------------------------------------
283283 {SPid, SPids}.
284284
285285 initial_queue_node(Q, DefNode) ->
286 {MNode, _SNodes} = suggested_queue_nodes(Q, DefNode, all_nodes()),
286 {MNode, _SNodes} = suggested_queue_nodes(Q, DefNode, rabbit_nodes:all_running()),
287287 MNode.
288288
289 suggested_queue_nodes(Q) -> suggested_queue_nodes(Q, all_nodes()).
289 suggested_queue_nodes(Q) -> suggested_queue_nodes(Q, rabbit_nodes:all_running()).
290290 suggested_queue_nodes(Q, All) -> suggested_queue_nodes(Q, node(), All).
291291
292292 %% The third argument exists so we can pull a call to
307307 end;
308308 _ -> {MNode, []}
309309 end.
310
311 all_nodes() -> rabbit_mnesia:cluster_nodes(running).
312310
313311 policy(Policy, Q) ->
314312 case rabbit_policy:get(Policy, Q) of
256256 State) ->
257257 %% Asynchronous, non-"mandatory", deliver mode.
258258 case Flow of
259 %% We are acking messages to the channel process that sent us
260 %% the message delivery. See
261 %% rabbit_amqqueue_process:handle_ch_down for more info.
259262 flow -> credit_flow:ack(Sender);
260263 noflow -> ok
261264 end,
100100 ensure_mnesia_running(),
101101 ensure_mnesia_dir(),
102102 case is_virgin_node() of
103 true -> init_from_config();
104 false -> NodeType = node_type(),
105 init_db_and_upgrade(cluster_nodes(all), NodeType,
106 NodeType =:= ram)
103 true ->
104 rabbit_log:info("Database directory at ~s is empty. Initialising from scratch... ~n",
105 [dir()]),
106 init_from_config();
107 false ->
108 NodeType = node_type(),
109 init_db_and_upgrade(cluster_nodes(all), NodeType,
110 NodeType =:= ram)
107111 end,
108112 %% We intuitively expect the global name server to be synced when
109113 %% Mnesia is up. In fact that's not guaranteed to be the case -
7676 %% to callbacks
7777 successfully_recovered, %% boolean: did we recover state?
7878 file_size_limit, %% how big are our files allowed to get?
79 cref_to_msg_ids %% client ref to synced messages mapping
79 cref_to_msg_ids, %% client ref to synced messages mapping
80 credit_disc_bound %% See rabbit.hrl CREDIT_DISC_BOUND
8081 }).
8182
8283 -record(client_msstate,
9091 file_handles_ets,
9192 file_summary_ets,
9293 cur_file_cache_ets,
93 flying_ets
94 flying_ets,
95 credit_disc_bound
9496 }).
9597
9698 -record(file_summary,
133135 file_handles_ets :: ets:tid(),
134136 file_summary_ets :: ets:tid(),
135137 cur_file_cache_ets :: ets:tid(),
136 flying_ets :: ets:tid()}).
138 flying_ets :: ets:tid(),
139 credit_disc_bound :: {pos_integer(), pos_integer()}}).
137140 -type(msg_ref_delta_gen(A) ::
138141 fun ((A) -> 'finished' |
139142 {rabbit_types:msg_id(), non_neg_integer(), A})).
441444 gen_server2:call(
442445 Server, {new_client_state, Ref, self(), MsgOnDiskFun, CloseFDsFun},
443446 infinity),
447 CreditDiscBound = rabbit_misc:get_env(rabbit, msg_store_credit_disc_bound,
448 ?CREDIT_DISC_BOUND),
444449 #client_msstate { server = Server,
445450 client_ref = Ref,
446451 file_handle_cache = dict:new(),
451456 file_handles_ets = FileHandlesEts,
452457 file_summary_ets = FileSummaryEts,
453458 cur_file_cache_ets = CurFileCacheEts,
454 flying_ets = FlyingEts }.
459 flying_ets = FlyingEts,
460 credit_disc_bound = CreditDiscBound }.
455461
456462 client_terminate(CState = #client_msstate { client_ref = Ref }) ->
457463 close_all_handles(CState),
464470
465471 client_ref(#client_msstate { client_ref = Ref }) -> Ref.
466472
467 write_flow(MsgId, Msg, CState = #client_msstate { server = Server }) ->
468 credit_flow:send(whereis(Server), ?CREDIT_DISC_BOUND),
473 write_flow(MsgId, Msg,
474 CState = #client_msstate {
475 server = Server,
476 credit_disc_bound = CreditDiscBound }) ->
477 %% Here we are tracking messages sent by the
478 %% rabbit_amqqueue_process process via the
479 %% rabbit_variable_queue. We are accessing the
480 %% rabbit_amqqueue_process process dictionary.
481 credit_flow:send(whereis(Server), CreditDiscBound),
469482 client_write(MsgId, Msg, flow, CState).
470483
471484 write(MsgId, Msg, CState) -> client_write(MsgId, Msg, noflow, CState).
708721 msg_store = self()
709722 }),
710723
724 CreditDiscBound = rabbit_misc:get_env(rabbit, msg_store_credit_disc_bound,
725 ?CREDIT_DISC_BOUND),
726
711727 State = #msstate { dir = Dir,
712728 index_module = IndexModule,
713729 index_state = IndexState,
727743 clients = Clients,
728744 successfully_recovered = CleanShutdown,
729745 file_size_limit = FileSizeLimit,
730 cref_to_msg_ids = dict:new()
746 cref_to_msg_ids = dict:new(),
747 credit_disc_bound = CreditDiscBound
731748 },
732749
733750 %% If we didn't recover the msg location index then we need to
811828
812829 handle_cast({write, CRef, MsgId, Flow},
813830 State = #msstate { cur_file_cache_ets = CurFileCacheEts,
814 clients = Clients }) ->
831 clients = Clients,
832 credit_disc_bound = CreditDiscBound }) ->
815833 case Flow of
816834 flow -> {CPid, _, _} = dict:fetch(CRef, Clients),
817 credit_flow:ack(CPid, ?CREDIT_DISC_BOUND);
835 %% We are going to process a message sent by the
836 %% rabbit_amqqueue_process. Now we are accessing the
837 %% msg_store process dictionary.
838 credit_flow:ack(CPid, CreditDiscBound);
818839 noflow -> ok
819840 end,
820841 true = 0 =< ets:update_counter(CurFileCacheEts, MsgId, {3, -1}),
889910 noreply(internal_sync(State));
890911
891912 handle_info({'DOWN', _MRef, process, Pid, _Reason}, State) ->
913 %% similar to what happens in
914 %% rabbit_amqqueue_process:handle_ch_down but with a relation of
915 %% msg_store -> rabbit_amqqueue_process instead of
916 %% rabbit_amqqueue_process -> rabbit_channel.
892917 credit_flow:peer_down(Pid),
893918 noreply(State);
894919
474474 cmap(F) -> rabbit_misc:filter_exit_map(F, connections()).
475475
476476 tcp_opts() ->
477 {ok, Opts} = application:get_env(rabbit, tcp_listen_options),
478 Opts.
477 {ok, ConfigOpts} = application:get_env(rabbit, tcp_listen_options),
478 merge_essential_tcp_listen_options(ConfigOpts).
479
480 -define(ESSENTIAL_LISTEN_OPTIONS,
481 [binary,
482 {active, false},
483 {packet, raw},
484 {reuseaddr, true},
485 {nodelay, true}]).
486
487 merge_essential_tcp_listen_options(Opts) ->
488 lists:foldl(fun ({K, _} = Opt, Acc) ->
489 lists:keystore(K, 1, Acc, Opt);
490 (Opt, Acc) ->
491 [Opt | Acc]
492 end , Opts, ?ESSENTIAL_LISTEN_OPTIONS).
479493
480494 %% inet_parse:address takes care of ip string, like "0.0.0.0"
481495 %% inet:getaddr returns immediately for ip tuple {0,0,0,0},
1717
1818 -export([names/1, diagnostics/1, make/1, parts/1, cookie_hash/0,
1919 is_running/2, is_process_running/2,
20 cluster_name/0, set_cluster_name/1, ensure_epmd/0]).
20 cluster_name/0, set_cluster_name/1, ensure_epmd/0,
21 all_running/0]).
2122
2223 -include_lib("kernel/include/inet.hrl").
2324
4142 -spec(cluster_name/0 :: () -> binary()).
4243 -spec(set_cluster_name/1 :: (binary()) -> 'ok').
4344 -spec(ensure_epmd/0 :: () -> 'ok').
45 -spec(all_running/0 :: () -> [node()]).
4446
4547 -endif.
4648
214216 {Port, {exit_status, _Rc}} -> ok;
215217 {Port, _} -> port_shutdown_loop(Port)
216218 end.
219
220 all_running() -> rabbit_mnesia:cluster_nodes(running).
2626 {enable, [?OFFLINE_DEF, ?ONLINE_DEF]},
2727 {disable, [?OFFLINE_DEF, ?ONLINE_DEF]},
2828 {set, [?OFFLINE_DEF, ?ONLINE_DEF]},
29 {sync, []}]).
29 {sync, []},
30 {help, []}]).
3031
3132 %%----------------------------------------------------------------------------
3233
146147 action_change(Opts, Node, Implicit, NewImplicit, State);
147148
148149 action(sync, Node, [], _Opts, State) ->
149 sync(Node, true, State).
150 sync(Node, true, State);
151
152 action(help, _Node, _Args, _Opts, _State) ->
153 io:format("~s", [rabbit_plugins_usage:usage()]).
150154
151155 %%----------------------------------------------------------------------------
152156
112112 case os:getenv("RABBITMQ_DIST_PORT") of
113113 false -> ok;
114114 PortStr -> Port = list_to_integer(PortStr),
115 case gen_tcp:listen(Port, [inet, {reuseaddr, true}]) of
116 {ok, Sock} -> gen_tcp:close(Sock);
117 {error, _} -> dist_port_use_check_fail(Port, NodeHost)
118 end
115 dist_port_use_check_ipv4(NodeHost, Port)
116 end.
117
118 dist_port_use_check_ipv4(NodeHost, Port) ->
119 case gen_tcp:listen(Port, [inet, {reuseaddr, true}]) of
120 {ok, Sock} -> gen_tcp:close(Sock);
121 {error, einval} -> dist_port_use_check_ipv6(NodeHost, Port);
122 {error, _} -> dist_port_use_check_fail(Port, NodeHost)
123 end.
124
125 dist_port_use_check_ipv6(NodeHost, Port) ->
126 case gen_tcp:listen(Port, [inet6, {reuseaddr, true}]) of
127 {ok, Sock} -> gen_tcp:close(Sock);
128 {error, _} -> dist_port_use_check_fail(Port, NodeHost)
119129 end.
120130
121131 -ifdef(use_specs).
278278 subtract_acks(TL, Prefix,
279279 orddict:update_counter(CTag, 1, CTagCounts), QTail);
280280 {{value, V}, QTail} ->
281 subtract_acks(AckTags, [V | Prefix], CTagCounts, QTail)
281 subtract_acks(AckTags, [V | Prefix], CTagCounts, QTail);
282 {empty, _} ->
283 subtract_acks([], Prefix, CTagCounts, AckQ)
282284 end.
283285
284286 possibly_unblock(Update, ChPid, State) ->
0 %% The contents of this file are subject to the Mozilla Public License
1 %% Version 1.1 (the "License"); you may not use this file except in
2 %% compliance with the License. You may obtain a copy of the License
3 %% at http://www.mozilla.org/MPL/
4 %%
5 %% Software distributed under the License is distributed on an "AS IS"
6 %% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
7 %% the License for the specific language governing rights and
8 %% limitations under the License.
9 %%
10 %% The Original Code is RabbitMQ.
11 %%
12 %% The Initial Developer of the Original Code is GoPivotal, Inc.
13 %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
14 %%
15
016 -module(rabbit_queue_decorator).
117
218 -include("rabbit.hrl").
2541 -export([behaviour_info/1]).
2642
2743 behaviour_info(callbacks) ->
28 [{description, 0}, {startup, 1}, {shutdown, 1}, {policy_changed, 2},
44 [{startup, 1}, {shutdown, 1}, {policy_changed, 2},
2945 {active_for, 1}, {consumer_state_changed, 3}];
3046 behaviour_info(_Other) ->
3147 undefined.
1515
1616 -module(rabbit_queue_index).
1717
18 -export([erase/1, init/3, recover/6,
18 -export([erase/1, init/3, reset_state/1, recover/6,
1919 terminate/2, delete_and_terminate/1,
20 pre_publish/7, flush_pre_publish_cache/2,
2021 publish/6, deliver/2, ack/2, sync/1, needs_sync/1, flush/1,
2122 read/3, next_segment_boundary/1, bounds/1, start/1, stop/0]).
2223
126127 %% binary generation/matching with constant vs variable lengths.
127128
128129 -define(REL_SEQ_BITS, 14).
129 -define(SEGMENT_ENTRY_COUNT, 16384). %% trunc(math:pow(2,?REL_SEQ_BITS))).
130 %% calculated as trunc(math:pow(2,?REL_SEQ_BITS))).
131 -define(SEGMENT_ENTRY_COUNT, 16384).
130132
131133 %% seq only is binary 01 followed by 14 bits of rel seq id
132134 %% (range: 0 - 16383)
175177
176178 -record(qistate, {dir, segments, journal_handle, dirty_count,
177179 max_journal_entries, on_sync, on_sync_msg,
178 unconfirmed, unconfirmed_msg}).
179
180 -record(segment, {num, path, journal_entries, unacked}).
180 unconfirmed, unconfirmed_msg,
181 pre_publish_cache, delivered_cache}).
182
183 -record(segment, {num, path, journal_entries,
184 entries_to_segment, unacked}).
181185
182186 -include("rabbit.hrl").
183187
192196
193197 -type(hdl() :: ('undefined' | any())).
194198 -type(segment() :: ('undefined' |
195 #segment { num :: non_neg_integer(),
196 path :: file:filename(),
197 journal_entries :: array:array(),
198 unacked :: non_neg_integer()
199 #segment { num :: non_neg_integer(),
200 path :: file:filename(),
201 journal_entries :: array:array(),
202 entries_to_segment :: array:array(),
203 unacked :: non_neg_integer()
199204 })).
200205 -type(seq_id() :: integer()).
201206 -type(seg_dict() :: {dict:dict(), [segment()]}).
208213 on_sync :: on_sync_fun(),
209214 on_sync_msg :: on_sync_fun(),
210215 unconfirmed :: gb_sets:set(),
211 unconfirmed_msg :: gb_sets:set()
216 unconfirmed_msg :: gb_sets:set(),
217 pre_publish_cache :: list(),
218 delivered_cache :: list()
212219 }).
213220 -type(contains_predicate() :: fun ((rabbit_types:msg_id()) -> boolean())).
214221 -type(walker(A) :: fun ((A) -> 'finished' |
216223 -type(shutdown_terms() :: [term()] | 'non_clean_shutdown').
217224
218225 -spec(erase/1 :: (rabbit_amqqueue:name()) -> 'ok').
226 -spec(reset_state/1 :: (qistate()) -> qistate()).
219227 -spec(init/3 :: (rabbit_amqqueue:name(),
220228 on_sync_fun(), on_sync_fun()) -> qistate()).
221229 -spec(recover/6 :: (rabbit_amqqueue:name(), shutdown_terms(), boolean(),
253261
254262 erase(Name) ->
255263 #qistate { dir = Dir } = blank_state(Name),
256 case rabbit_file:is_dir(Dir) of
257 true -> rabbit_file:recursive_delete([Dir]);
258 false -> ok
259 end.
264 erase_index_dir(Dir).
265
266 %% used during variable queue purge when there are no pending acks
267 reset_state(#qistate{ dir = Dir,
268 on_sync = OnSyncFun,
269 on_sync_msg = OnSyncMsgFun,
270 journal_handle = JournalHdl }) ->
271 ok = case JournalHdl of
272 undefined -> ok;
273 _ -> file_handle_cache:close(JournalHdl)
274 end,
275 ok = erase_index_dir(Dir),
276 blank_state_dir_funs(Dir, OnSyncFun, OnSyncMsgFun).
260277
261278 init(Name, OnSyncFun, OnSyncMsgFun) ->
262279 State = #qistate { dir = Dir } = blank_state(Name),
286303 {_SegmentCounts, State1 = #qistate { dir = Dir }} = terminate(State),
287304 ok = rabbit_file:recursive_delete([Dir]),
288305 State1.
306
307 pre_publish(MsgOrId, SeqId, MsgProps, IsPersistent, IsDelivered, JournalSizeHint,
308 State = #qistate{unconfirmed = UC,
309 unconfirmed_msg = UCM,
310 pre_publish_cache = PPC,
311 delivered_cache = DC}) ->
312 MsgId = case MsgOrId of
313 #basic_message{id = Id} -> Id;
314 Id when is_binary(Id) -> Id
315 end,
316 ?MSG_ID_BYTES = size(MsgId),
317
318 State1 =
319 case {MsgProps#message_properties.needs_confirming, MsgOrId} of
320 {true, MsgId} -> UC1 = gb_sets:add_element(MsgId, UC),
321 State#qistate{unconfirmed = UC1};
322 {true, _} -> UCM1 = gb_sets:add_element(MsgId, UCM),
323 State#qistate{unconfirmed_msg = UCM1};
324 {false, _} -> State
325 end,
326
327 {Bin, MsgBin} = create_pub_record_body(MsgOrId, MsgProps),
328
329 PPC1 =
330 [[<<(case IsPersistent of
331 true -> ?PUB_PERSIST_JPREFIX;
332 false -> ?PUB_TRANS_JPREFIX
333 end):?JPREFIX_BITS,
334 SeqId:?SEQ_BITS, Bin/binary,
335 (size(MsgBin)):?EMBEDDED_SIZE_BITS>>, MsgBin], PPC],
336
337 DC1 =
338 case IsDelivered of
339 true ->
340 [SeqId | DC];
341 false ->
342 DC
343 end,
344
345 add_to_journal(SeqId, {IsPersistent, Bin, MsgBin},
346 maybe_flush_pre_publish_cache(
347 JournalSizeHint,
348 State1#qistate{pre_publish_cache = PPC1,
349 delivered_cache = DC1})).
350
351 %% pre_publish_cache is the entry with most elements when comapred to
352 %% delivered_cache so we only check the former in the guard.
353 maybe_flush_pre_publish_cache(JournalSizeHint,
354 #qistate{pre_publish_cache = PPC} = State)
355 when length(PPC) >= ?SEGMENT_ENTRY_COUNT ->
356 flush_pre_publish_cache(JournalSizeHint, State);
357 maybe_flush_pre_publish_cache(_JournalSizeHint, State) ->
358 State.
359
360 flush_pre_publish_cache(JournalSizeHint, State) ->
361 State1 = flush_pre_publish_cache(State),
362 State2 = flush_delivered_cache(State1),
363 maybe_flush_journal(JournalSizeHint, State2).
364
365 flush_pre_publish_cache(#qistate{pre_publish_cache = []} = State) ->
366 State;
367 flush_pre_publish_cache(State = #qistate{pre_publish_cache = PPC}) ->
368 {JournalHdl, State1} = get_journal_handle(State),
369 file_handle_cache_stats:update(queue_index_journal_write),
370 ok = file_handle_cache:append(JournalHdl, lists:reverse(PPC)),
371 State1#qistate{pre_publish_cache = []}.
372
373 flush_delivered_cache(#qistate{delivered_cache = []} = State) ->
374 State;
375 flush_delivered_cache(State = #qistate{delivered_cache = DC}) ->
376 State1 = deliver(lists:reverse(DC), State),
377 State1#qistate{delivered_cache = []}.
289378
290379 publish(MsgOrId, SeqId, MsgProps, IsPersistent, JournalSizeHint,
291380 State = #qistate{unconfirmed = UC,
427516 %% startup and shutdown
428517 %%----------------------------------------------------------------------------
429518
519 erase_index_dir(Dir) ->
520 case rabbit_file:is_dir(Dir) of
521 true -> rabbit_file:recursive_delete([Dir]);
522 false -> ok
523 end.
524
430525 blank_state(QueueName) ->
431526 blank_state_dir(
432527 filename:join(queues_dir(), queue_name_to_dir_name(QueueName))).
433528
434529 blank_state_dir(Dir) ->
530 blank_state_dir_funs(Dir,
531 fun (_) -> ok end,
532 fun (_) -> ok end).
533
534 blank_state_dir_funs(Dir, OnSyncFun, OnSyncMsgFun) ->
435535 {ok, MaxJournal} =
436536 application:get_env(rabbit, queue_index_max_journal_entries),
437537 #qistate { dir = Dir,
439539 journal_handle = undefined,
440540 dirty_count = 0,
441541 max_journal_entries = MaxJournal,
442 on_sync = fun (_) -> ok end,
443 on_sync_msg = fun (_) -> ok end,
542 on_sync = OnSyncFun,
543 on_sync_msg = OnSyncMsgFun,
444544 unconfirmed = gb_sets:new(),
445 unconfirmed_msg = gb_sets:new() }.
545 unconfirmed_msg = gb_sets:new(),
546 pre_publish_cache = [],
547 delivered_cache = [] }.
446548
447549 init_clean(RecoveredCounts, State) ->
448550 %% Load the journal. Since this is a clean recovery this (almost)
648750
649751 add_to_journal(RelSeq, Action,
650752 Segment = #segment { journal_entries = JEntries,
753 entries_to_segment = EToSeg,
651754 unacked = UnackedCount }) ->
755
756 {Fun, Entry} = action_to_entry(RelSeq, Action, JEntries),
757
758 {JEntries1, EToSeg1} =
759 case Fun of
760 set ->
761 {array:set(RelSeq, Entry, JEntries),
762 array:set(RelSeq, entry_to_segment(RelSeq, Entry, []),
763 EToSeg)};
764 reset ->
765 {array:reset(RelSeq, JEntries),
766 array:reset(RelSeq, EToSeg)}
767 end,
768
652769 Segment #segment {
653 journal_entries = add_to_journal(RelSeq, Action, JEntries),
770 journal_entries = JEntries1,
771 entries_to_segment = EToSeg1,
654772 unacked = UnackedCount + case Action of
655773 ?PUB -> +1;
656774 del -> 0;
657775 ack -> -1
658 end};
659
660 add_to_journal(RelSeq, Action, JEntries) ->
776 end}.
777
778 action_to_entry(RelSeq, Action, JEntries) ->
661779 case array:get(RelSeq, JEntries) of
662780 undefined ->
663 array:set(RelSeq,
664 case Action of
665 ?PUB -> {Action, no_del, no_ack};
666 del -> {no_pub, del, no_ack};
667 ack -> {no_pub, no_del, ack}
668 end, JEntries);
781 {set,
782 case Action of
783 ?PUB -> {Action, no_del, no_ack};
784 del -> {no_pub, del, no_ack};
785 ack -> {no_pub, no_del, ack}
786 end};
669787 ({Pub, no_del, no_ack}) when Action == del ->
670 array:set(RelSeq, {Pub, del, no_ack}, JEntries);
788 {set, {Pub, del, no_ack}};
671789 ({no_pub, del, no_ack}) when Action == ack ->
672 array:set(RelSeq, {no_pub, del, ack}, JEntries);
790 {set, {no_pub, del, ack}};
673791 ({?PUB, del, no_ack}) when Action == ack ->
674 array:reset(RelSeq, JEntries)
792 {reset, none}
675793 end.
676794
677795 maybe_flush_journal(State) ->
702820 notify_sync(State1 #qistate { dirty_count = 0 }).
703821
704822 append_journal_to_segment(#segment { journal_entries = JEntries,
823 entries_to_segment = EToSeg,
705824 path = Path } = Segment) ->
706825 case array:sparse_size(JEntries) of
707826 0 -> Segment;
708 _ -> Seg = array:sparse_foldr(
709 fun entry_to_segment/3, [], JEntries),
710 file_handle_cache_stats:update(queue_index_write),
711
712 {ok, Hdl} = file_handle_cache:open(Path, ?WRITE_MODE,
713 [{write_buffer, infinity}]),
714 file_handle_cache:append(Hdl, Seg),
715 ok = file_handle_cache:close(Hdl),
716 Segment #segment { journal_entries = array_new() }
827 _ ->
828 file_handle_cache_stats:update(queue_index_write),
829
830 {ok, Hdl} = file_handle_cache:open(Path, ?WRITE_MODE,
831 [{write_buffer, infinity}]),
832 %% the file_handle_cache also does a list reverse, so this
833 %% might not be required here, but before we were doing a
834 %% sparse_foldr, a lists:reverse/1 seems to be the correct
835 %% thing to do for now.
836 file_handle_cache:append(Hdl, lists:reverse(array:to_list(EToSeg))),
837 ok = file_handle_cache:close(Hdl),
838 Segment #segment { journal_entries = array_new(),
839 entries_to_segment = array_new([]) }
717840 end.
718841
719842 get_journal_handle(State = #qistate { journal_handle = undefined,
746869 Segments1 =
747870 segment_map(
748871 fun (Segment = #segment { journal_entries = JEntries,
872 entries_to_segment = EToSeg,
749873 unacked = UnackedCountInJournal }) ->
750874 %% We want to keep ack'd entries in so that we can
751875 %% remove them if duplicates are in the journal. The
752876 %% counts here are purely from the segment itself.
753877 {SegEntries, UnackedCountInSeg} = load_segment(true, Segment),
754 {JEntries1, UnackedCountDuplicates} =
755 journal_minus_segment(JEntries, SegEntries),
878 {JEntries1, EToSeg1, UnackedCountDuplicates} =
879 journal_minus_segment(JEntries, EToSeg, SegEntries),
756880 Segment #segment { journal_entries = JEntries1,
881 entries_to_segment = EToSeg1,
757882 unacked = (UnackedCountInJournal +
758883 UnackedCountInSeg -
759884 UnackedCountDuplicates) }
840965 {ok, Segment} -> Segment;
841966 error -> SegName = integer_to_list(Seg) ++ ?SEGMENT_EXTENSION,
842967 Path = filename:join(Dir, SegName),
843 #segment { num = Seg,
844 path = Path,
845 journal_entries = array_new(),
846 unacked = 0 }
968 #segment { num = Seg,
969 path = Path,
970 journal_entries = array_new(),
971 entries_to_segment = array_new([]),
972 unacked = 0 }
847973 end.
848974
849975 segment_find(Seg, {_Segments, [Segment = #segment { num = Seg } |_]}) ->
8831009 segments_new() ->
8841010 {dict:new(), []}.
8851011
886 entry_to_segment(_RelSeq, {?PUB, del, ack}, Buf) ->
887 Buf;
888 entry_to_segment(RelSeq, {Pub, Del, Ack}, Buf) ->
1012 entry_to_segment(_RelSeq, {?PUB, del, ack}, Initial) ->
1013 Initial;
1014 entry_to_segment(RelSeq, {Pub, Del, Ack}, Initial) ->
8891015 %% NB: we are assembling the segment in reverse order here, so
8901016 %% del/ack comes first.
8911017 Buf1 = case {Del, Ack} of
8921018 {no_del, no_ack} ->
893 Buf;
1019 Initial;
8941020 _ ->
8951021 Binary = <<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
8961022 RelSeq:?REL_SEQ_BITS>>,
8971023 case {Del, Ack} of
898 {del, ack} -> [[Binary, Binary] | Buf];
899 _ -> [Binary | Buf]
1024 {del, ack} -> [[Binary, Binary] | Initial];
1025 _ -> [Binary | Initial]
9001026 end
9011027 end,
9021028 case Pub of
9851111 end.
9861112
9871113 array_new() ->
988 array:new([{default, undefined}, fixed, {size, ?SEGMENT_ENTRY_COUNT}]).
1114 array_new(undefined).
1115
1116 array_new(Default) ->
1117 array:new([{default, Default}, fixed, {size, ?SEGMENT_ENTRY_COUNT}]).
9891118
9901119 bool_to_int(true ) -> 1;
9911120 bool_to_int(false) -> 0.
10311160 %% Remove from the journal entries for a segment, items that are
10321161 %% duplicates of entries found in the segment itself. Used on start up
10331162 %% to clean up the journal.
1034 journal_minus_segment(JEntries, SegEntries) ->
1163 %%
1164 %% We need to update the entries_to_segment since they are just a
1165 %% cache of what's on the journal.
1166 journal_minus_segment(JEntries, EToSeg, SegEntries) ->
10351167 array:sparse_foldl(
1036 fun (RelSeq, JObj, {JEntriesOut, UnackedRemoved}) ->
1168 fun (RelSeq, JObj, {JEntriesOut, EToSegOut, UnackedRemoved}) ->
10371169 SegEntry = array:get(RelSeq, SegEntries),
10381170 {Obj, UnackedRemovedDelta} =
10391171 journal_minus_segment1(JObj, SegEntry),
1040 {case Obj of
1041 keep -> JEntriesOut;
1042 undefined -> array:reset(RelSeq, JEntriesOut);
1043 _ -> array:set(RelSeq, Obj, JEntriesOut)
1044 end,
1045 UnackedRemoved + UnackedRemovedDelta}
1046 end, {JEntries, 0}, JEntries).
1172 {JEntriesOut1, EToSegOut1} =
1173 case Obj of
1174 keep ->
1175 {JEntriesOut, EToSegOut};
1176 undefined ->
1177 {array:reset(RelSeq, JEntriesOut),
1178 array:reset(RelSeq, EToSegOut)};
1179 _ ->
1180 {array:set(RelSeq, Obj, JEntriesOut),
1181 array:set(RelSeq, entry_to_segment(RelSeq, Obj, []),
1182 EToSegOut)}
1183 end,
1184 {JEntriesOut1, EToSegOut1, UnackedRemoved + UnackedRemovedDelta}
1185 end, {JEntries, EToSeg, 0}, JEntries).
10471186
10481187 %% Here, the result is a tuple with the first element containing the
10491188 %% item we are adding to or modifying in the (initially fresh) journal
477477 handle_other(emit_stats, State) ->
478478 emit_stats(State);
479479 handle_other({bump_credit, Msg}, State) ->
480 %% Here we are receiving credit by some channel process.
480481 credit_flow:handle_bump_msg(Msg),
481482 control_throttle(State);
482483 handle_other(Other, State) ->
10421043 validate_negotiated_integer_value(Field, Min, ClientValue) ->
10431044 ServerValue = get_env(Field),
10441045 if ClientValue /= 0 andalso ClientValue < Min ->
1045 fail_negotiation(Field, min, ServerValue, ClientValue);
1046 fail_negotiation(Field, min, Min, ClientValue);
10461047 ServerValue /= 0 andalso (ClientValue =:= 0 orelse
10471048 ClientValue > ServerValue) ->
10481049 fail_negotiation(Field, max, ServerValue, ClientValue);
188188
189189 check(Fun) ->
190190 case [Error || {Tab, TabDef} <- definitions(),
191 case Fun(Tab, TabDef) of
192 ok -> Error = none, false;
193 {error, Error} -> true
191 begin
192 {Ret, Error} = case Fun(Tab, TabDef) of
193 ok -> {false, none};
194 {error, E} -> {true, E}
195 end,
196 Ret
194197 end] of
195198 [] -> ok;
196199 Errors -> {error, Errors}
272272 msg_store_clients,
273273 durable,
274274 transient_threshold,
275 qi_embed_msgs_below,
275276
276277 len, %% w/o unacked
277278 bytes, %% w/o unacked
296297 %% Unlike the other counters these two do not feed into
297298 %% #rates{} and get reset
298299 disk_read_count,
299 disk_write_count
300 disk_write_count,
301
302 io_batch_size
300303 }).
301304
302305 -record(rates, { in, out, ack_in, ack_out, timestamp }).
319322 end_seq_id %% end_seq_id is exclusive
320323 }).
321324
322 %% When we discover that we should write some indices to disk for some
323 %% betas, the IO_BATCH_SIZE sets the number of betas that we must be
324 %% due to write indices for before we do any work at all.
325 -define(IO_BATCH_SIZE, 2048). %% next power-of-2 after ?CREDIT_DISC_BOUND
326325 -define(HEADER_GUESS_SIZE, 100). %% see determine_persist_to/2
327326 -define(PERSISTENT_MSG_STORE, msg_store_persistent).
328327 -define(TRANSIENT_MSG_STORE, msg_store_transient).
372371 {any(), binary()}},
373372 durable :: boolean(),
374373 transient_threshold :: non_neg_integer(),
374 qi_embed_msgs_below :: non_neg_integer(),
375375
376376 len :: non_neg_integer(),
377377 bytes :: non_neg_integer(),
395395 ack_out_counter :: non_neg_integer(),
396396 ack_in_counter :: non_neg_integer(),
397397 disk_read_count :: non_neg_integer(),
398 disk_write_count :: non_neg_integer() }).
398 disk_write_count :: non_neg_integer(),
399
400 io_batch_size :: pos_integer()}).
399401 %% Duplicated from rabbit_backing_queue
400402 -spec(ack/2 :: ([ack()], state()) -> {[rabbit_guid:guid()], state()}).
401403
530532 %% the only difference between purge and delete is that delete also
531533 %% needs to delete everything that's been delivered and not ack'd.
532534 delete_and_terminate(_Reason, State) ->
533 %% TODO: there is no need to interact with qi at all - which we do
534 %% as part of 'purge' and 'purge_pending_ack', other than
535 %% deleting it.
536 {_PurgeCount, State1} = purge(State),
537 State2 = #vqstate { index_state = IndexState,
538 msg_store_clients = {MSCStateP, MSCStateT} } =
539 purge_pending_ack(false, State1),
540 IndexState1 = rabbit_queue_index:delete_and_terminate(IndexState),
535 %% Normally when we purge messages we interact with the qi by
536 %% issues delivers and acks for every purged message. In this case
537 %% we don't need to do that, so we just delete the qi.
538 State1 = purge_and_index_reset(State),
539 State2 = #vqstate { msg_store_clients = {MSCStateP, MSCStateT} } =
540 purge_pending_ack_delete_and_terminate(State1),
541541 case MSCStateP of
542542 undefined -> ok;
543543 _ -> rabbit_msg_store:client_delete_and_terminate(MSCStateP)
544544 end,
545545 rabbit_msg_store:client_delete_and_terminate(MSCStateT),
546 a(State2 #vqstate { index_state = IndexState1,
547 msg_store_clients = undefined }).
546 a(State2 #vqstate { msg_store_clients = undefined }).
548547
549548 delete_crashed(#amqqueue{name = QName}) ->
550549 ok = rabbit_queue_index:erase(QName).
551550
552 purge(State = #vqstate { q4 = Q4,
553 len = Len }) ->
554 %% TODO: when there are no pending acks, which is a common case,
555 %% we could simply wipe the qi instead of issuing delivers and
556 %% acks for all the messages.
557 State1 = remove_queue_entries(Q4, State),
558
559 State2 = #vqstate { q1 = Q1 } =
560 purge_betas_and_deltas(State1 #vqstate { q4 = ?QUEUE:new() }),
561
562 State3 = remove_queue_entries(Q1, State2),
563
564 {Len, a(State3 #vqstate { q1 = ?QUEUE:new() })}.
551 purge(State = #vqstate { len = Len }) ->
552 case is_pending_ack_empty(State) of
553 true ->
554 {Len, purge_and_index_reset(State)};
555 false ->
556 {Len, purge_when_pending_acks(State)}
557 end.
565558
566559 purge_acks(State) -> a(purge_pending_ack(false, State)).
567560
569562 MsgProps = #message_properties { needs_confirming = NeedsConfirming },
570563 IsDelivered, _ChPid, _Flow,
571564 State = #vqstate { q1 = Q1, q3 = Q3, q4 = Q4,
572 next_seq_id = SeqId,
573 in_counter = InCount,
574 durable = IsDurable,
575 unconfirmed = UC }) ->
565 qi_embed_msgs_below = IndexMaxSize,
566 next_seq_id = SeqId,
567 in_counter = InCount,
568 durable = IsDurable,
569 unconfirmed = UC }) ->
576570 IsPersistent1 = IsDurable andalso IsPersistent,
577 MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps),
571 MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize),
578572 {MsgStatus1, State1} = maybe_write_to_disk(false, false, MsgStatus, State),
579573 State2 = case ?QUEUE:is_empty(Q3) of
580574 false -> State1 #vqstate { q1 = ?QUEUE:in(m(MsgStatus1), Q1) };
593587 MsgProps = #message_properties {
594588 needs_confirming = NeedsConfirming },
595589 _ChPid, _Flow,
596 State = #vqstate { next_seq_id = SeqId,
597 out_counter = OutCount,
598 in_counter = InCount,
599 durable = IsDurable,
600 unconfirmed = UC }) ->
590 State = #vqstate { qi_embed_msgs_below = IndexMaxSize,
591 next_seq_id = SeqId,
592 out_counter = OutCount,
593 in_counter = InCount,
594 durable = IsDurable,
595 unconfirmed = UC }) ->
601596 IsPersistent1 = IsDurable andalso IsPersistent,
602 MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps),
597 MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps, IndexMaxSize),
603598 {MsgStatus1, State1} = maybe_write_to_disk(false, false, MsgStatus, State),
604599 State2 = record_pending_ack(m(MsgStatus1), State1),
605600 UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
620615 end.
621616
622617 dropwhile(Pred, State) ->
623 case queue_out(State) of
624 {empty, State1} ->
625 {undefined, a(State1)};
626 {{value, MsgStatus = #msg_status { msg_props = MsgProps }}, State1} ->
627 case Pred(MsgProps) of
628 true -> {_, State2} = remove(false, MsgStatus, State1),
629 dropwhile(Pred, State2);
630 false -> {MsgProps, a(in_r(MsgStatus, State1))}
631 end
632 end.
618 {MsgProps, State1} =
619 remove_by_predicate(Pred, State),
620 {MsgProps, a(State1)}.
633621
634622 fetchwhile(Pred, Fun, Acc, State) ->
635 case queue_out(State) of
636 {empty, State1} ->
637 {undefined, Acc, a(State1)};
638 {{value, MsgStatus = #msg_status { msg_props = MsgProps }}, State1} ->
639 case Pred(MsgProps) of
640 true -> {Msg, State2} = read_msg(MsgStatus, State1),
641 {AckTag, State3} = remove(true, MsgStatus, State2),
642 fetchwhile(Pred, Fun, Fun(Msg, AckTag, Acc), State3);
643 false -> {MsgProps, Acc, a(in_r(MsgStatus, State1))}
644 end
645 end.
623 {MsgProps, Acc1, State1} =
624 fetch_by_predicate(Pred, Fun, Acc, State),
625 {MsgProps, Acc1, a(State1)}.
646626
647627 fetch(AckRequired, State) ->
648628 case queue_out(State) of
700680 {accumulate_ack(MsgStatus, Acc), State3}
701681 end, {accumulate_ack_init(), State}, AckTags),
702682 IndexState1 = rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState),
703 [ok = msg_store_remove(MSCState, IsPersistent, MsgIds)
704 || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)],
683 remove_msgs_by_id(MsgIdsByStore, MSCState),
705684 {lists:reverse(AllMsgIds),
706685 a(State1 #vqstate { index_state = IndexState1,
707686 ack_out_counter = AckOutCount + length(AckTags) })}.
749728
750729 is_empty(State) -> 0 == len(State).
751730
752 depth(State = #vqstate { ram_pending_ack = RPA,
753 disk_pending_ack = DPA,
754 qi_pending_ack = QPA }) ->
755 len(State) + gb_trees:size(RPA) + gb_trees:size(DPA) + gb_trees:size(QPA).
731 depth(State) ->
732 len(State) + count_pending_acks(State).
756733
757734 set_ram_duration_target(
758735 DurationTarget, State = #vqstate {
967944 gb_sets_maybe_insert(true, Val, Set) -> gb_sets:add(Val, Set).
968945
969946 msg_status(IsPersistent, IsDelivered, SeqId,
970 Msg = #basic_message {id = MsgId}, MsgProps) ->
947 Msg = #basic_message {id = MsgId}, MsgProps, IndexMaxSize) ->
971948 #msg_status{seq_id = SeqId,
972949 msg_id = MsgId,
973950 msg = Msg,
975952 is_delivered = IsDelivered,
976953 msg_in_store = false,
977954 index_on_disk = false,
978 persist_to = determine_persist_to(Msg, MsgProps),
955 persist_to = determine_persist_to(Msg, MsgProps, IndexMaxSize),
979956 msg_props = MsgProps}.
980957
981958 beta_msg_status({Msg = #basic_message{id = MsgId},
10671044 maybe_write_delivered(true, SeqId, IndexState) ->
10681045 rabbit_queue_index:deliver([SeqId], IndexState).
10691046
1070 betas_from_index_entries(List, TransientThreshold, RPA, DPA, QPA, IndexState) ->
1047 betas_from_index_entries(List, TransientThreshold, DelsAndAcksFun, State) ->
10711048 {Filtered, Delivers, Acks, RamReadyCount, RamBytes} =
10721049 lists:foldr(
10731050 fun ({_MsgOrId, SeqId, _MsgProps, IsPersistent, IsDelivered} = M,
10791056 false -> MsgStatus = m(beta_msg_status(M)),
10801057 HaveMsg = msg_in_ram(MsgStatus),
10811058 Size = msg_size(MsgStatus),
1082 case (gb_trees:is_defined(SeqId, RPA) orelse
1083 gb_trees:is_defined(SeqId, DPA) orelse
1084 gb_trees:is_defined(SeqId, QPA)) of
1059 case is_msg_in_pending_acks(SeqId, State) of
10851060 false -> {?QUEUE:in_r(MsgStatus, Filtered1),
10861061 Delivers1, Acks1,
10871062 RRC + one_if(HaveMsg),
10901065 end
10911066 end
10921067 end, {?QUEUE:new(), [], [], 0, 0}, List),
1093 {Filtered, RamReadyCount, RamBytes,
1094 rabbit_queue_index:ack(
1095 Acks, rabbit_queue_index:deliver(Delivers, IndexState))}.
1068 {Filtered, RamReadyCount, RamBytes, DelsAndAcksFun(Delivers, Acks, State)}.
10961069 %% [0] We don't increase RamBytes here, even though it pertains to
10971070 %% unacked messages too, since if HaveMsg then the message must have
10981071 %% been stored in the QI, thus the message must have been in
10991072 %% qi_pending_ack, thus it must already have been in RAM.
1073
1074 is_msg_in_pending_acks(SeqId, #vqstate { ram_pending_ack = RPA,
1075 disk_pending_ack = DPA,
1076 qi_pending_ack = QPA }) ->
1077 (gb_trees:is_defined(SeqId, RPA) orelse
1078 gb_trees:is_defined(SeqId, DPA) orelse
1079 gb_trees:is_defined(SeqId, QPA)).
11001080
11011081 expand_delta(SeqId, ?BLANK_DELTA_PATTERN(X)) ->
11021082 d(#delta { start_seq_id = SeqId, count = 1, end_seq_id = SeqId + 1 });
11341114 end_seq_id = NextSeqId })
11351115 end,
11361116 Now = now(),
1117 IoBatchSize = rabbit_misc:get_env(rabbit, msg_store_io_batch_size,
1118 ?IO_BATCH_SIZE),
1119
1120 {ok, IndexMaxSize} = application:get_env(
1121 rabbit, queue_index_embed_msgs_below),
11371122 State = #vqstate {
11381123 q1 = ?QUEUE:new(),
11391124 q2 = ?QUEUE:new(),
11481133 msg_store_clients = {PersistentClient, TransientClient},
11491134 durable = IsDurable,
11501135 transient_threshold = NextSeqId,
1136 qi_embed_msgs_below = IndexMaxSize,
11511137
11521138 len = DeltaCount1,
11531139 persistent_count = DeltaCount1,
11701156 ack_out_counter = 0,
11711157 ack_in_counter = 0,
11721158 disk_read_count = 0,
1173 disk_write_count = 0 },
1159 disk_write_count = 0,
1160
1161 io_batch_size = IoBatchSize },
11741162 a(maybe_deltas_to_betas(State)).
11751163
11761164 blank_rates(Now) ->
12671255
12681256 msg_in_ram(#msg_status{msg = Msg}) -> Msg =/= undefined.
12691257
1270 remove(AckRequired, MsgStatus = #msg_status {
1271 seq_id = SeqId,
1272 msg_id = MsgId,
1273 is_persistent = IsPersistent,
1274 is_delivered = IsDelivered,
1275 msg_in_store = MsgInStore,
1276 index_on_disk = IndexOnDisk },
1258 %% first param: AckRequired
1259 remove(true, MsgStatus = #msg_status {
1260 seq_id = SeqId,
1261 is_delivered = IsDelivered,
1262 index_on_disk = IndexOnDisk },
1263 State = #vqstate {out_counter = OutCount,
1264 index_state = IndexState}) ->
1265 %% Mark it delivered if necessary
1266 IndexState1 = maybe_write_delivered(
1267 IndexOnDisk andalso not IsDelivered,
1268 SeqId, IndexState),
1269
1270 State1 = record_pending_ack(
1271 MsgStatus #msg_status {
1272 is_delivered = true }, State),
1273
1274 State2 = stats({-1, 1}, {MsgStatus, MsgStatus}, State1),
1275
1276 {SeqId, maybe_update_rates(
1277 State2 #vqstate {out_counter = OutCount + 1,
1278 index_state = IndexState1})};
1279
1280 %% This function body has the same behaviour as remove_queue_entries/3
1281 %% but instead of removing messages based on a ?QUEUE, this removes
1282 %% just one message, the one referenced by the MsgStatus provided.
1283 remove(false, MsgStatus = #msg_status {
1284 seq_id = SeqId,
1285 msg_id = MsgId,
1286 is_persistent = IsPersistent,
1287 is_delivered = IsDelivered,
1288 msg_in_store = MsgInStore,
1289 index_on_disk = IndexOnDisk },
12771290 State = #vqstate {out_counter = OutCount,
12781291 index_state = IndexState,
12791292 msg_store_clients = MSCState}) ->
1280 %% 1. Mark it delivered if necessary
1293 %% Mark it delivered if necessary
12811294 IndexState1 = maybe_write_delivered(
12821295 IndexOnDisk andalso not IsDelivered,
12831296 SeqId, IndexState),
12841297
1285 %% 2. Remove from msg_store and queue index, if necessary
1286 Rem = fun () ->
1287 ok = msg_store_remove(MSCState, IsPersistent, [MsgId])
1288 end,
1289 Ack = fun () -> rabbit_queue_index:ack([SeqId], IndexState1) end,
1290 IndexState2 = case {AckRequired, MsgInStore, IndexOnDisk} of
1291 {false, true, false} -> Rem(), IndexState1;
1292 {false, true, true} -> Rem(), Ack();
1293 {false, false, true} -> Ack();
1294 _ -> IndexState1
1295 end,
1296
1297 %% 3. If an ack is required, add something sensible to PA
1298 {AckTag, State1} = case AckRequired of
1299 true -> StateN = record_pending_ack(
1300 MsgStatus #msg_status {
1301 is_delivered = true }, State),
1302 {SeqId, StateN};
1303 false -> {undefined, State}
1304 end,
1305 State2 = case AckRequired of
1306 false -> stats({-1, 0}, {MsgStatus, none}, State1);
1307 true -> stats({-1, 1}, {MsgStatus, MsgStatus}, State1)
1308 end,
1309 {AckTag, maybe_update_rates(
1310 State2 #vqstate {out_counter = OutCount + 1,
1311 index_state = IndexState2})}.
1312
1313 purge_betas_and_deltas(State = #vqstate { q3 = Q3 }) ->
1298 %% Remove from msg_store and queue index, if necessary
1299 case MsgInStore of
1300 true -> ok = msg_store_remove(MSCState, IsPersistent, [MsgId]);
1301 false -> ok
1302 end,
1303
1304 IndexState2 =
1305 case IndexOnDisk of
1306 true -> rabbit_queue_index:ack([SeqId], IndexState1);
1307 false -> IndexState1
1308 end,
1309
1310 State1 = stats({-1, 0}, {MsgStatus, none}, State),
1311
1312 {undefined, maybe_update_rates(
1313 State1 #vqstate {out_counter = OutCount + 1,
1314 index_state = IndexState2})}.
1315
1316 %% This function exists as a way to improve dropwhile/2
1317 %% performance. The idea of having this function is to optimise calls
1318 %% to rabbit_queue_index by batching delivers and acks, instead of
1319 %% sending them one by one.
1320 %%
1321 %% Instead of removing every message as their are popped from the
1322 %% queue, it first accumulates them and then removes them by calling
1323 %% remove_queue_entries/3, since the behaviour of
1324 %% remove_queue_entries/3 when used with
1325 %% process_delivers_and_acks_fun(deliver_and_ack) is the same as
1326 %% calling remove(false, MsgStatus, State).
1327 %%
1328 %% remove/3 also updates the out_counter in every call, but here we do
1329 %% it just once at the end.
1330 remove_by_predicate(Pred, State = #vqstate {out_counter = OutCount}) ->
1331 {MsgProps, QAcc, State1} =
1332 collect_by_predicate(Pred, ?QUEUE:new(), State),
1333 State2 =
1334 remove_queue_entries(
1335 QAcc, process_delivers_and_acks_fun(deliver_and_ack), State1),
1336 %% maybe_update_rates/1 is called in remove/2 for every
1337 %% message. Since we update out_counter only once, we call it just
1338 %% there.
1339 {MsgProps, maybe_update_rates(
1340 State2 #vqstate {
1341 out_counter = OutCount + ?QUEUE:len(QAcc)})}.
1342
1343 %% This function exists as a way to improve fetchwhile/4
1344 %% performance. The idea of having this function is to optimise calls
1345 %% to rabbit_queue_index by batching delivers, instead of sending them
1346 %% one by one.
1347 %%
1348 %% Fun is the function passed to fetchwhile/4 that's
1349 %% applied to every fetched message and used to build the fetchwhile/4
1350 %% result accumulator FetchAcc.
1351 fetch_by_predicate(Pred, Fun, FetchAcc,
1352 State = #vqstate {
1353 index_state = IndexState,
1354 out_counter = OutCount}) ->
1355 {MsgProps, QAcc, State1} =
1356 collect_by_predicate(Pred, ?QUEUE:new(), State),
1357
1358 {Delivers, FetchAcc1, State2} =
1359 process_queue_entries(QAcc, Fun, FetchAcc, State1),
1360
1361 IndexState1 = rabbit_queue_index:deliver(Delivers, IndexState),
1362
1363 {MsgProps, FetchAcc1, maybe_update_rates(
1364 State2 #vqstate {
1365 index_state = IndexState1,
1366 out_counter = OutCount + ?QUEUE:len(QAcc)})}.
1367
1368 %% We try to do here the same as what remove(true, State) does but
1369 %% processing several messages at the same time. The idea is to
1370 %% optimize rabbit_queue_index:deliver/2 calls by sending a list of
1371 %% SeqIds instead of one by one, thus process_queue_entries1 will
1372 %% accumulate the required deliveries, will record_pending_ack for
1373 %% each message, and will update stats, like remove/2 does.
1374 %%
1375 %% For the meaning of Fun and FetchAcc arguments see
1376 %% fetch_by_predicate/4 above.
1377 process_queue_entries(Q, Fun, FetchAcc, State) ->
1378 ?QUEUE:foldl(fun (MsgStatus, Acc) ->
1379 process_queue_entries1(MsgStatus, Fun, Acc)
1380 end,
1381 {[], FetchAcc, State}, Q).
1382
1383 process_queue_entries1(
1384 #msg_status { seq_id = SeqId, is_delivered = IsDelivered,
1385 index_on_disk = IndexOnDisk} = MsgStatus,
1386 Fun,
1387 {Delivers, FetchAcc, State}) ->
1388 {Msg, State1} = read_msg(MsgStatus, State),
1389 State2 = record_pending_ack(
1390 MsgStatus #msg_status {
1391 is_delivered = true }, State1),
1392 {cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers),
1393 Fun(Msg, SeqId, FetchAcc),
1394 stats({-1, 1}, {MsgStatus, MsgStatus}, State2)}.
1395
1396 collect_by_predicate(Pred, QAcc, State) ->
1397 case queue_out(State) of
1398 {empty, State1} ->
1399 {undefined, QAcc, State1};
1400 {{value, MsgStatus = #msg_status { msg_props = MsgProps }}, State1} ->
1401 case Pred(MsgProps) of
1402 true -> collect_by_predicate(Pred, ?QUEUE:in(MsgStatus, QAcc),
1403 State1);
1404 false -> {MsgProps, QAcc, in_r(MsgStatus, State1)}
1405 end
1406 end.
1407
1408 %%----------------------------------------------------------------------------
1409 %% Helpers for Public API purge/1 function
1410 %%----------------------------------------------------------------------------
1411
1412 %% The difference between purge_when_pending_acks/1
1413 %% vs. purge_and_index_reset/1 is that the first one issues a deliver
1414 %% and an ack to the queue index for every message that's being
1415 %% removed, while the later just resets the queue index state.
1416 purge_when_pending_acks(State) ->
1417 State1 = purge1(process_delivers_and_acks_fun(deliver_and_ack), State),
1418 a(State1).
1419
1420 purge_and_index_reset(State) ->
1421 State1 = purge1(process_delivers_and_acks_fun(none), State),
1422 a(reset_qi_state(State1)).
1423
1424 %% This function removes messages from each of {q1, q2, q3, q4}.
1425 %%
1426 %% With remove_queue_entries/3 q1 and q4 are emptied, while q2 and q3
1427 %% are specially handled by purge_betas_and_deltas/2.
1428 %%
1429 %% purge_betas_and_deltas/2 loads messages from the queue index,
1430 %% filling up q3 and in some cases moving messages form q2 to q3 while
1431 %% reseting q2 to an empty queue (see maybe_deltas_to_betas/2). The
1432 %% messages loaded into q3 are removed by calling
1433 %% remove_queue_entries/3 until there are no more messages to be read
1434 %% from the queue index. Messages are read in batches from the queue
1435 %% index.
1436 purge1(AfterFun, State = #vqstate { q4 = Q4}) ->
1437 State1 = remove_queue_entries(Q4, AfterFun, State),
1438
1439 State2 = #vqstate {q1 = Q1} =
1440 purge_betas_and_deltas(AfterFun, State1#vqstate{q4 = ?QUEUE:new()}),
1441
1442 State3 = remove_queue_entries(Q1, AfterFun, State2),
1443
1444 a(State3#vqstate{q1 = ?QUEUE:new()}).
1445
1446 reset_qi_state(State = #vqstate{index_state = IndexState}) ->
1447 State#vqstate{index_state =
1448 rabbit_queue_index:reset_state(IndexState)}.
1449
1450 is_pending_ack_empty(State) ->
1451 count_pending_acks(State) =:= 0.
1452
1453 count_pending_acks(#vqstate { ram_pending_ack = RPA,
1454 disk_pending_ack = DPA,
1455 qi_pending_ack = QPA }) ->
1456 gb_trees:size(RPA) + gb_trees:size(DPA) + gb_trees:size(QPA).
1457
1458 purge_betas_and_deltas(DelsAndAcksFun, State = #vqstate { q3 = Q3 }) ->
13141459 case ?QUEUE:is_empty(Q3) of
13151460 true -> State;
1316 false -> State1 = remove_queue_entries(Q3, State),
1317 purge_betas_and_deltas(maybe_deltas_to_betas(
1461 false -> State1 = remove_queue_entries(Q3, DelsAndAcksFun, State),
1462 purge_betas_and_deltas(DelsAndAcksFun,
1463 maybe_deltas_to_betas(
1464 DelsAndAcksFun,
13181465 State1#vqstate{q3 = ?QUEUE:new()}))
13191466 end.
13201467
1321 remove_queue_entries(Q, State = #vqstate{index_state = IndexState,
1322 msg_store_clients = MSCState}) ->
1468 remove_queue_entries(Q, DelsAndAcksFun,
1469 State = #vqstate{msg_store_clients = MSCState}) ->
13231470 {MsgIdsByStore, Delivers, Acks, State1} =
13241471 ?QUEUE:foldl(fun remove_queue_entries1/2,
13251472 {orddict:new(), [], [], State}, Q),
1326 ok = orddict:fold(fun (IsPersistent, MsgIds, ok) ->
1327 msg_store_remove(MSCState, IsPersistent, MsgIds)
1328 end, ok, MsgIdsByStore),
1329 IndexState1 = rabbit_queue_index:ack(
1330 Acks, rabbit_queue_index:deliver(Delivers, IndexState)),
1331 State1#vqstate{index_state = IndexState1}.
1473 remove_msgs_by_id(MsgIdsByStore, MSCState),
1474 DelsAndAcksFun(Delivers, Acks, State1).
13321475
13331476 remove_queue_entries1(
13341477 #msg_status { msg_id = MsgId, seq_id = SeqId, is_delivered = IsDelivered,
13421485 cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers),
13431486 cons_if(IndexOnDisk, SeqId, Acks),
13441487 stats({-1, 0}, {MsgStatus, none}, State)}.
1488
1489 process_delivers_and_acks_fun(deliver_and_ack) ->
1490 fun (Delivers, Acks, State = #vqstate { index_state = IndexState }) ->
1491 IndexState1 =
1492 rabbit_queue_index:ack(
1493 Acks, rabbit_queue_index:deliver(Delivers, IndexState)),
1494 State #vqstate { index_state = IndexState1 }
1495 end;
1496 process_delivers_and_acks_fun(_) ->
1497 fun (_, _, State) ->
1498 State
1499 end.
13451500
13461501 %%----------------------------------------------------------------------------
13471502 %% Internal gubbins for publishing
13641519 queue_index -> {MsgStatus, State}
13651520 end;
13661521 maybe_write_msg_to_disk(_Force, MsgStatus, State) ->
1522 {MsgStatus, State}.
1523
1524 %% Due to certain optimizations made inside
1525 %% rabbit_queue_index:pre_publish/7 we need to have two separate
1526 %% functions for index persistence. This one is only used when paging
1527 %% during memory pressure. We didn't want to modify
1528 %% maybe_write_index_to_disk/3 because that function is used in other
1529 %% places.
1530 maybe_batch_write_index_to_disk(_Force,
1531 MsgStatus = #msg_status {
1532 index_on_disk = true }, State) ->
1533 {MsgStatus, State};
1534 maybe_batch_write_index_to_disk(Force,
1535 MsgStatus = #msg_status {
1536 msg = Msg,
1537 msg_id = MsgId,
1538 seq_id = SeqId,
1539 is_persistent = IsPersistent,
1540 is_delivered = IsDelivered,
1541 msg_props = MsgProps},
1542 State = #vqstate {
1543 target_ram_count = TargetRamCount,
1544 disk_write_count = DiskWriteCount,
1545 index_state = IndexState})
1546 when Force orelse IsPersistent ->
1547 {MsgOrId, DiskWriteCount1} =
1548 case persist_to(MsgStatus) of
1549 msg_store -> {MsgId, DiskWriteCount};
1550 queue_index -> {prepare_to_store(Msg), DiskWriteCount + 1}
1551 end,
1552 IndexState1 = rabbit_queue_index:pre_publish(
1553 MsgOrId, SeqId, MsgProps, IsPersistent, IsDelivered,
1554 TargetRamCount, IndexState),
1555 {MsgStatus#msg_status{index_on_disk = true},
1556 State#vqstate{index_state = IndexState1,
1557 disk_write_count = DiskWriteCount1}};
1558 maybe_batch_write_index_to_disk(_Force, MsgStatus, State) ->
13671559 {MsgStatus, State}.
13681560
13691561 maybe_write_index_to_disk(_Force, MsgStatus = #msg_status {
14001592 {MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State),
14011593 maybe_write_index_to_disk(ForceIndex, MsgStatus1, State1).
14021594
1595 maybe_prepare_write_to_disk(ForceMsg, ForceIndex, MsgStatus, State) ->
1596 {MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State),
1597 maybe_batch_write_index_to_disk(ForceIndex, MsgStatus1, State1).
1598
14031599 determine_persist_to(#basic_message{
14041600 content = #content{properties = Props,
14051601 properties_bin = PropsBin}},
1406 #message_properties{size = BodySize}) ->
1407 {ok, IndexMaxSize} = application:get_env(
1408 rabbit, queue_index_embed_msgs_below),
1602 #message_properties{size = BodySize},
1603 IndexMaxSize) ->
14091604 %% The >= is so that you can set the env to 0 and never persist
14101605 %% to the index.
14111606 %%
14971692 end.
14981693
14991694 purge_pending_ack(KeepPersistent,
1500 State = #vqstate { ram_pending_ack = RPA,
1501 disk_pending_ack = DPA,
1502 qi_pending_ack = QPA,
1503 index_state = IndexState,
1695 State = #vqstate { index_state = IndexState,
15041696 msg_store_clients = MSCState }) ->
1697 {IndexOnDiskSeqIds, MsgIdsByStore, State1} = purge_pending_ack1(State),
1698 case KeepPersistent of
1699 true -> remove_transient_msgs_by_id(MsgIdsByStore, MSCState),
1700 State1;
1701 false -> IndexState1 =
1702 rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState),
1703 remove_msgs_by_id(MsgIdsByStore, MSCState),
1704 State1 #vqstate { index_state = IndexState1 }
1705 end.
1706
1707 purge_pending_ack_delete_and_terminate(
1708 State = #vqstate { index_state = IndexState,
1709 msg_store_clients = MSCState }) ->
1710 {_, MsgIdsByStore, State1} = purge_pending_ack1(State),
1711 IndexState1 = rabbit_queue_index:delete_and_terminate(IndexState),
1712 remove_msgs_by_id(MsgIdsByStore, MSCState),
1713 State1 #vqstate { index_state = IndexState1 }.
1714
1715 purge_pending_ack1(State = #vqstate { ram_pending_ack = RPA,
1716 disk_pending_ack = DPA,
1717 qi_pending_ack = QPA }) ->
15051718 F = fun (_SeqId, MsgStatus, Acc) -> accumulate_ack(MsgStatus, Acc) end,
15061719 {IndexOnDiskSeqIds, MsgIdsByStore, _AllMsgIds} =
15071720 rabbit_misc:gb_trees_fold(
15111724 State1 = State #vqstate { ram_pending_ack = gb_trees:empty(),
15121725 disk_pending_ack = gb_trees:empty(),
15131726 qi_pending_ack = gb_trees:empty()},
1514
1515 case KeepPersistent of
1516 true -> case orddict:find(false, MsgIdsByStore) of
1517 error -> State1;
1518 {ok, MsgIds} -> ok = msg_store_remove(MSCState, false,
1519 MsgIds),
1520 State1
1521 end;
1522 false -> IndexState1 =
1523 rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState),
1524 [ok = msg_store_remove(MSCState, IsPersistent, MsgIds)
1525 || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)],
1526 State1 #vqstate { index_state = IndexState1 }
1727 {IndexOnDiskSeqIds, MsgIdsByStore, State1}.
1728
1729 %% MsgIdsByStore is an orddict with two keys:
1730 %%
1731 %% true: holds a list of Persistent Message Ids.
1732 %% false: holds a list of Transient Message Ids.
1733 %%
1734 %% When we call orddict:to_list/1 we get two sets of msg ids, where
1735 %% IsPersistent is either true for persistent messages or false for
1736 %% transient ones. The msg_store_remove/3 function takes this boolean
1737 %% flag to determine from which store the messages should be removed
1738 %% from.
1739 remove_msgs_by_id(MsgIdsByStore, MSCState) ->
1740 [ok = msg_store_remove(MSCState, IsPersistent, MsgIds)
1741 || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)].
1742
1743 remove_transient_msgs_by_id(MsgIdsByStore, MSCState) ->
1744 case orddict:find(false, MsgIdsByStore) of
1745 error -> ok;
1746 {ok, MsgIds} -> ok = msg_store_remove(MSCState, false, MsgIds)
15271747 end.
15281748
15291749 accumulate_ack_init() -> {[], orddict:new(), []}.
16981918 next({delta, Delta, [], State}, IndexState) ->
16991919 next({delta, Delta, State}, IndexState);
17001920 next({delta, Delta, [{_, SeqId, _, _, _} = M | Rest], State}, IndexState) ->
1701 case (gb_trees:is_defined(SeqId, State#vqstate.ram_pending_ack) orelse
1702 gb_trees:is_defined(SeqId, State#vqstate.disk_pending_ack) orelse
1703 gb_trees:is_defined(SeqId, State#vqstate.qi_pending_ack)) of
1921 case is_msg_in_pending_acks(SeqId, State) of
17041922 false -> Next = {delta, Delta, Rest, State},
17051923 {value, beta_msg_status(M), false, Next, IndexState};
17061924 true -> next({delta, Delta, Rest, State}, IndexState)
17471965 ram_pending_ack = RPA,
17481966 ram_msg_count = RamMsgCount,
17491967 target_ram_count = TargetRamCount,
1968 io_batch_size = IoBatchSize,
17501969 rates = #rates { in = AvgIngress,
17511970 out = AvgEgress,
17521971 ack_in = AvgAckIngress,
17721991 State2
17731992 end,
17741993
1775 case chunk_size(?QUEUE:len(Q2) + ?QUEUE:len(Q3),
1776 permitted_beta_count(State1)) of
1777 S2 when S2 >= ?IO_BATCH_SIZE ->
1778 %% There is an implicit, but subtle, upper bound here. We
1779 %% may shuffle a lot of messages from Q2/3 into delta, but
1780 %% the number of these that require any disk operation,
1781 %% namely index writing, i.e. messages that are genuine
1782 %% betas and not gammas, is bounded by the credit_flow
1783 %% limiting of the alpha->beta conversion above.
1784 push_betas_to_deltas(S2, State1);
1785 _ ->
1786 State1
1787 end.
1994 State3 =
1995 case chunk_size(?QUEUE:len(Q2) + ?QUEUE:len(Q3),
1996 permitted_beta_count(State1)) of
1997 S2 when S2 >= IoBatchSize ->
1998 %% There is an implicit, but subtle, upper bound here. We
1999 %% may shuffle a lot of messages from Q2/3 into delta, but
2000 %% the number of these that require any disk operation,
2001 %% namely index writing, i.e. messages that are genuine
2002 %% betas and not gammas, is bounded by the credit_flow
2003 %% limiting of the alpha->beta conversion above.
2004 push_betas_to_deltas(S2, State1);
2005 _ ->
2006 State1
2007 end,
2008 %% See rabbitmq-server-290 for the reasons behind this GC call.
2009 garbage_collect(),
2010 State3.
17882011
17892012 limit_ram_acks(0, State) ->
1790 {0, State};
2013 {0, ui(State)};
17912014 limit_ram_acks(Quota, State = #vqstate { ram_pending_ack = RPA,
17922015 disk_pending_ack = DPA }) ->
17932016 case gb_trees:is_empty(RPA) of
17942017 true ->
1795 {Quota, State};
2018 {Quota, ui(State)};
17962019 false ->
17972020 {SeqId, MsgStatus, RPA1} = gb_trees:take_largest(RPA),
17982021 {MsgStatus1, State1} =
1799 maybe_write_to_disk(true, false, MsgStatus, State),
2022 maybe_prepare_write_to_disk(true, false, MsgStatus, State),
18002023 MsgStatus2 = m(trim_msg_status(MsgStatus1)),
18012024 DPA1 = gb_trees:insert(SeqId, MsgStatus2, DPA),
18022025 limit_ram_acks(Quota - 1,
18562079 {loaded, {MsgStatus, State2}}
18572080 end.
18582081
1859 maybe_deltas_to_betas(State = #vqstate { delta = ?BLANK_DELTA_PATTERN(X) }) ->
2082 maybe_deltas_to_betas(State) ->
2083 AfterFun = process_delivers_and_acks_fun(deliver_and_ack),
2084 maybe_deltas_to_betas(AfterFun, State).
2085
2086 maybe_deltas_to_betas(_DelsAndAcksFun,
2087 State = #vqstate {delta = ?BLANK_DELTA_PATTERN(X) }) ->
18602088 State;
1861 maybe_deltas_to_betas(State = #vqstate {
2089 maybe_deltas_to_betas(DelsAndAcksFun,
2090 State = #vqstate {
18622091 q2 = Q2,
18632092 delta = Delta,
18642093 q3 = Q3,
18652094 index_state = IndexState,
18662095 ram_msg_count = RamMsgCount,
18672096 ram_bytes = RamBytes,
1868 ram_pending_ack = RPA,
1869 disk_pending_ack = DPA,
1870 qi_pending_ack = QPA,
18712097 disk_read_count = DiskReadCount,
18722098 transient_threshold = TransientThreshold }) ->
18732099 #delta { start_seq_id = DeltaSeqId,
18782104 DeltaSeqIdEnd]),
18792105 {List, IndexState1} = rabbit_queue_index:read(DeltaSeqId, DeltaSeqId1,
18802106 IndexState),
1881 {Q3a, RamCountsInc, RamBytesInc, IndexState2} =
2107 {Q3a, RamCountsInc, RamBytesInc, State1} =
18822108 betas_from_index_entries(List, TransientThreshold,
1883 RPA, DPA, QPA, IndexState1),
1884 State1 = State #vqstate { index_state = IndexState2,
1885 ram_msg_count = RamMsgCount + RamCountsInc,
1886 ram_bytes = RamBytes + RamBytesInc,
1887 disk_read_count = DiskReadCount + RamCountsInc},
2109 DelsAndAcksFun,
2110 State #vqstate { index_state = IndexState1 }),
2111 State2 = State1 #vqstate { ram_msg_count = RamMsgCount + RamCountsInc,
2112 ram_bytes = RamBytes + RamBytesInc,
2113 disk_read_count = DiskReadCount + RamCountsInc },
18882114 case ?QUEUE:len(Q3a) of
18892115 0 ->
18902116 %% we ignored every message in the segment due to it being
18912117 %% transient and below the threshold
18922118 maybe_deltas_to_betas(
1893 State1 #vqstate {
2119 DelsAndAcksFun,
2120 State2 #vqstate {
18942121 delta = d(Delta #delta { start_seq_id = DeltaSeqId1 })});
18952122 Q3aLen ->
18962123 Q3b = ?QUEUE:join(Q3, Q3a),
18982125 0 ->
18992126 %% delta is now empty, but it wasn't before, so
19002127 %% can now join q2 onto q3
1901 State1 #vqstate { q2 = ?QUEUE:new(),
2128 State2 #vqstate { q2 = ?QUEUE:new(),
19022129 delta = ?BLANK_DELTA,
19032130 q3 = ?QUEUE:join(Q3b, Q2) };
19042131 N when N > 0 ->
19052132 Delta1 = d(#delta { start_seq_id = DeltaSeqId1,
19062133 count = N,
19072134 end_seq_id = DeltaSeqIdEnd }),
1908 State1 #vqstate { delta = Delta1,
2135 State2 #vqstate { delta = Delta1,
19092136 q3 = Q3b }
19102137 end
19112138 end.
19342161 when Quota =:= 0 orelse
19352162 TargetRamCount =:= infinity orelse
19362163 TargetRamCount >= RamMsgCount ->
1937 {Quota, State};
2164 {Quota, ui(State)};
19382165 push_alphas_to_betas(Generator, Consumer, Quota, Q, State) ->
2166 %% We consume credits from the message_store whenever we need to
2167 %% persist a message to disk. See:
2168 %% rabbit_variable_queue:msg_store_write/4. So perhaps the
2169 %% msg_store is trying to throttle down our queue.
19392170 case credit_flow:blocked() of
1940 true -> {Quota, State};
2171 true -> {Quota, ui(State)};
19412172 false -> case Generator(Q) of
19422173 {empty, _Q} ->
1943 {Quota, State};
2174 {Quota, ui(State)};
19442175 {{value, MsgStatus}, Qa} ->
19452176 {MsgStatus1, State1} =
1946 maybe_write_to_disk(true, false, MsgStatus, State),
2177 maybe_prepare_write_to_disk(true, false, MsgStatus,
2178 State),
19472179 MsgStatus2 = m(trim_msg_status(MsgStatus1)),
19482180 State2 = stats(
19492181 ready0, {MsgStatus, MsgStatus2}, State1),
19842216 end
19852217 end.
19862218
1987 push_betas_to_deltas1(_Generator, _Limit, Q, {0, _Delta, _State} = PushState) ->
1988 {Q, PushState};
1989 push_betas_to_deltas1(Generator, Limit, Q, {Quota, Delta, State} = PushState) ->
2219 push_betas_to_deltas1(_Generator, _Limit, Q, {0, Delta, State}) ->
2220 {Q, {0, Delta, ui(State)}};
2221 push_betas_to_deltas1(Generator, Limit, Q, {Quota, Delta, State}) ->
19902222 case Generator(Q) of
19912223 {empty, _Q} ->
1992 {Q, PushState};
2224 {Q, {Quota, Delta, ui(State)}};
19932225 {{value, #msg_status { seq_id = SeqId }}, _Qa}
19942226 when SeqId < Limit ->
1995 {Q, PushState};
2227 {Q, {Quota, Delta, ui(State)}};
19962228 {{value, MsgStatus = #msg_status { seq_id = SeqId }}, Qa} ->
19972229 {#msg_status { index_on_disk = true }, State1} =
1998 maybe_write_index_to_disk(true, MsgStatus, State),
2230 maybe_batch_write_index_to_disk(true, MsgStatus, State),
19992231 State2 = stats(ready0, {MsgStatus, none}, State1),
20002232 Delta1 = expand_delta(SeqId, Delta),
20012233 push_betas_to_deltas1(Generator, Limit, Qa,
20022234 {Quota - 1, Delta1, State2})
20032235 end.
2236
2237 %% Flushes queue index batch caches and updates queue index state.
2238 ui(#vqstate{index_state = IndexState,
2239 target_ram_count = TargetRamCount} = State) ->
2240 IndexState1 = rabbit_queue_index:flush_pre_publish_cache(
2241 TargetRamCount, IndexState),
2242 State#vqstate{index_state = IndexState1}.
20042243
20052244 %%----------------------------------------------------------------------------
20062245 %% Upgrading
0 %% The contents of this file are subject to the Mozilla Public License
1 %% Version 1.1 (the "License"); you may not use this file except in
2 %% compliance with the License. You may obtain a copy of the License
3 %% at http://www.mozilla.org/MPL/
4 %%
5 %% Software distributed under the License is distributed on an "AS IS"
6 %% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
7 %% the License for the specific language governing rights and
8 %% limitations under the License.
9 %%
10 %% The Original Code is RabbitMQ.
11 %%
12 %% The Initial Developer of the Original Code is GoPivotal, Inc.
13 %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
14 %%
15
16 -module(ssl_compat).
17
18 %% We don't want warnings about the use of erlang:now/0 in
19 %% this module.
20 -compile(nowarn_deprecated_function).
21
22 -export([connection_information/1,
23 connection_information/2]).
24
25 connection_information(SslSocket) ->
26 try
27 ssl:connection_information(SslSocket)
28 catch
29 error:undef ->
30 case ssl:connection_info(SslSocket) of
31 {ok, {ProtocolVersion, CipherSuite}} ->
32 {ok, [{protocol, ProtocolVersion},
33 {cipher_suite, CipherSuite}]};
34 {error, Reason} ->
35 {error, Reason}
36 end
37 end.
38
39 connection_information(SslSocket, Items) ->
40 try
41 ssl:connection_information(SslSocket, Items)
42 catch
43 error:undef ->
44 WantProtocolVersion = lists:member(protocol, Items),
45 WantCipherSuite = lists:member(cipher_suite, Items),
46 if
47 WantProtocolVersion orelse WantCipherSuite ->
48 case ssl:connection_info(SslSocket) of
49 {ok, {ProtocolVersion, CipherSuite}} ->
50 filter_information_items(ProtocolVersion,
51 CipherSuite,
52 Items,
53 []);
54 {error, Reason} ->
55 {error, Reason}
56 end;
57 true ->
58 {ok, []}
59 end
60 end.
61
62 filter_information_items(ProtocolVersion, CipherSuite, [protocol | Rest],
63 Result) ->
64 filter_information_items(ProtocolVersion, CipherSuite, Rest,
65 [{protocol, ProtocolVersion} | Result]);
66 filter_information_items(ProtocolVersion, CipherSuite, [cipher_suite | Rest],
67 Result) ->
68 filter_information_items(ProtocolVersion, CipherSuite, Rest,
69 [{cipher_suite, CipherSuite} | Result]);
70 filter_information_items(ProtocolVersion, CipherSuite, [_ | Rest],
71 Result) ->
72 filter_information_items(ProtocolVersion, CipherSuite, Rest, Result);
73 filter_information_items(_ProtocolVersion, _CipherSuite, [], Result) ->
74 {ok, lists:reverse(Result)}.
0 %%
1 %% %CopyrightBegin%
2 %%
3 %% Copyright Ericsson AB 2014-2015. All Rights Reserved.
4 %%
5 %% Licensed under the Apache License, Version 2.0 (the "License");
6 %% you may not use this file except in compliance with the License.
7 %% You may obtain a copy of the License at
8 %%
9 %% http://www.apache.org/licenses/LICENSE-2.0
10 %%
11 %% Unless required by applicable law or agreed to in writing, software
12 %% distributed under the License is distributed on an "AS IS" BASIS,
13 %% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 %% See the License for the specific language governing permissions and
15 %% limitations under the License.
16 %%
17 %% %CopyrightEnd%
18 %%
19
20 %%
21 %% If your code need to be able to execute on ERTS versions both
22 %% earlier and later than 7.0, the best approach is to use the new
23 %% time API introduced in ERTS 7.0 and implement a fallback
24 %% solution using the old primitives to be used on old ERTS
25 %% versions. This way your code can automatically take advantage
26 %% of the improvements in the API when available. This is an
27 %% example of how to implement such an API, but it can be used
28 %% as is if you want to. Just add (a preferrably renamed version of)
29 %% this module to your project, and call the API via this module
30 %% instead of calling the BIFs directly.
31 %%
32
33 -module(time_compat).
34
35 %% We don't want warnings about the use of erlang:now/0 in
36 %% this module.
37 -compile(nowarn_deprecated_function).
38 %%
39 %% We don't use
40 %% -compile({nowarn_deprecated_function, [{erlang, now, 0}]}).
41 %% since this will produce warnings when compiled on systems
42 %% where it has not yet been deprecated.
43 %%
44
45 -export([monotonic_time/0,
46 monotonic_time/1,
47 erlang_system_time/0,
48 erlang_system_time/1,
49 os_system_time/0,
50 os_system_time/1,
51 time_offset/0,
52 time_offset/1,
53 convert_time_unit/3,
54 timestamp/0,
55 unique_integer/0,
56 unique_integer/1,
57 monitor/2,
58 system_info/1,
59 system_flag/2]).
60
61 monotonic_time() ->
62 try
63 erlang:monotonic_time()
64 catch
65 error:undef ->
66 %% Use Erlang system time as monotonic time
67 erlang_system_time_fallback()
68 end.
69
70 monotonic_time(Unit) ->
71 try
72 erlang:monotonic_time(Unit)
73 catch
74 error:badarg ->
75 erlang:error(badarg, [Unit]);
76 error:undef ->
77 %% Use Erlang system time as monotonic time
78 STime = erlang_system_time_fallback(),
79 try
80 convert_time_unit_fallback(STime, native, Unit)
81 catch
82 error:bad_time_unit -> erlang:error(badarg, [Unit])
83 end
84 end.
85
86 erlang_system_time() ->
87 try
88 erlang:system_time()
89 catch
90 error:undef ->
91 erlang_system_time_fallback()
92 end.
93
94 erlang_system_time(Unit) ->
95 try
96 erlang:system_time(Unit)
97 catch
98 error:badarg ->
99 erlang:error(badarg, [Unit]);
100 error:undef ->
101 STime = erlang_system_time_fallback(),
102 try
103 convert_time_unit_fallback(STime, native, Unit)
104 catch
105 error:bad_time_unit -> erlang:error(badarg, [Unit])
106 end
107 end.
108
109 os_system_time() ->
110 try
111 os:system_time()
112 catch
113 error:undef ->
114 os_system_time_fallback()
115 end.
116
117 os_system_time(Unit) ->
118 try
119 os:system_time(Unit)
120 catch
121 error:badarg ->
122 erlang:error(badarg, [Unit]);
123 error:undef ->
124 STime = os_system_time_fallback(),
125 try
126 convert_time_unit_fallback(STime, native, Unit)
127 catch
128 error:bad_time_unit -> erlang:error(badarg, [Unit])
129 end
130 end.
131
132 time_offset() ->
133 try
134 erlang:time_offset()
135 catch
136 error:undef ->
137 %% Erlang system time and Erlang monotonic
138 %% time are always aligned
139 0
140 end.
141
142 time_offset(Unit) ->
143 try
144 erlang:time_offset(Unit)
145 catch
146 error:badarg ->
147 erlang:error(badarg, [Unit]);
148 error:undef ->
149 try
150 _ = integer_time_unit(Unit)
151 catch
152 error:bad_time_unit -> erlang:error(badarg, [Unit])
153 end,
154 %% Erlang system time and Erlang monotonic
155 %% time are always aligned
156 0
157 end.
158
159 convert_time_unit(Time, FromUnit, ToUnit) ->
160 try
161 erlang:convert_time_unit(Time, FromUnit, ToUnit)
162 catch
163 error:undef ->
164 try
165 convert_time_unit_fallback(Time, FromUnit, ToUnit)
166 catch
167 _:_ ->
168 erlang:error(badarg, [Time, FromUnit, ToUnit])
169 end;
170 error:Error ->
171 erlang:error(Error, [Time, FromUnit, ToUnit])
172 end.
173
174 timestamp() ->
175 try
176 erlang:timestamp()
177 catch
178 error:undef ->
179 erlang:now()
180 end.
181
182 unique_integer() ->
183 try
184 erlang:unique_integer()
185 catch
186 error:undef ->
187 {MS, S, US} = erlang:now(),
188 (MS*1000000+S)*1000000+US
189 end.
190
191 unique_integer(Modifiers) ->
192 try
193 erlang:unique_integer(Modifiers)
194 catch
195 error:badarg ->
196 erlang:error(badarg, [Modifiers]);
197 error:undef ->
198 case is_valid_modifier_list(Modifiers) of
199 true ->
200 %% now() converted to an integer
201 %% fullfill the requirements of
202 %% all modifiers: unique, positive,
203 %% and monotonic...
204 {MS, S, US} = erlang:now(),
205 (MS*1000000+S)*1000000+US;
206 false ->
207 erlang:error(badarg, [Modifiers])
208 end
209 end.
210
211 monitor(Type, Item) ->
212 try
213 erlang:monitor(Type, Item)
214 catch
215 error:Error ->
216 case {Error, Type, Item} of
217 {badarg, time_offset, clock_service} ->
218 %% Time offset is final and will never change.
219 %% Return a dummy reference, there will never
220 %% be any need for 'CHANGE' messages...
221 make_ref();
222 _ ->
223 erlang:error(Error, [Type, Item])
224 end
225 end.
226
227 system_info(Item) ->
228 try
229 erlang:system_info(Item)
230 catch
231 error:badarg ->
232 case Item of
233 time_correction ->
234 case erlang:system_info(tolerant_timeofday) of
235 enabled -> true;
236 disabled -> false
237 end;
238 time_warp_mode ->
239 no_time_warp;
240 time_offset ->
241 final;
242 NotSupArg when NotSupArg == os_monotonic_time_source;
243 NotSupArg == os_system_time_source;
244 NotSupArg == start_time;
245 NotSupArg == end_time ->
246 %% Cannot emulate this...
247 erlang:error(notsup, [NotSupArg]);
248 _ ->
249 erlang:error(badarg, [Item])
250 end;
251 error:Error ->
252 erlang:error(Error, [Item])
253 end.
254
255 system_flag(Flag, Value) ->
256 try
257 erlang:system_flag(Flag, Value)
258 catch
259 error:Error ->
260 case {Error, Flag, Value} of
261 {badarg, time_offset, finalize} ->
262 %% Time offset is final
263 final;
264 _ ->
265 erlang:error(Error, [Flag, Value])
266 end
267 end.
268
269 %%
270 %% Internal functions
271 %%
272
273 integer_time_unit(native) -> 1000*1000;
274 integer_time_unit(nano_seconds) -> 1000*1000*1000;
275 integer_time_unit(micro_seconds) -> 1000*1000;
276 integer_time_unit(milli_seconds) -> 1000;
277 integer_time_unit(seconds) -> 1;
278 integer_time_unit(I) when is_integer(I), I > 0 -> I;
279 integer_time_unit(BadRes) -> erlang:error(bad_time_unit, [BadRes]).
280
281 erlang_system_time_fallback() ->
282 {MS, S, US} = erlang:now(),
283 (MS*1000000+S)*1000000+US.
284
285 os_system_time_fallback() ->
286 {MS, S, US} = os:timestamp(),
287 (MS*1000000+S)*1000000+US.
288
289 convert_time_unit_fallback(Time, FromUnit, ToUnit) ->
290 FU = integer_time_unit(FromUnit),
291 TU = integer_time_unit(ToUnit),
292 case Time < 0 of
293 true -> TU*Time - (FU - 1);
294 false -> TU*Time
295 end div FU.
296
297 is_valid_modifier_list([positive|Ms]) ->
298 is_valid_modifier_list(Ms);
299 is_valid_modifier_list([monotonic|Ms]) ->
300 is_valid_modifier_list(Ms);
301 is_valid_modifier_list([]) ->
302 true;
303 is_valid_modifier_list(_) ->
304 false.
5151
5252 -record(state, {total_memory,
5353 memory_limit,
54 memory_fraction,
54 memory_config_limit,
5555 timeout,
5656 timer,
5757 alarmed,
6262
6363 -ifdef(use_specs).
6464
65 -type(vm_memory_high_watermark() :: (float() | {'absolute', integer()})).
6566 -spec(start_link/1 :: (float()) -> rabbit_types:ok_pid_or_error()).
6667 -spec(start_link/3 :: (float(), fun ((any()) -> 'ok'),
6768 fun ((any()) -> 'ok')) -> rabbit_types:ok_pid_or_error()).
6970 -spec(get_vm_limit/0 :: () -> non_neg_integer()).
7071 -spec(get_check_interval/0 :: () -> non_neg_integer()).
7172 -spec(set_check_interval/1 :: (non_neg_integer()) -> 'ok').
72 -spec(get_vm_memory_high_watermark/0 :: () -> float()).
73 -spec(set_vm_memory_high_watermark/1 :: (float()) -> 'ok').
73 -spec(get_vm_memory_high_watermark/0 :: () -> vm_memory_high_watermark()).
74 -spec(set_vm_memory_high_watermark/1 :: (vm_memory_high_watermark()) -> 'ok').
7475 -spec(get_memory_limit/0 :: () -> non_neg_integer()).
7576
7677 -endif.
127128 alarm_funs = AlarmFuns },
128129 {ok, set_mem_limits(State, MemFraction)}.
129130
130 handle_call(get_vm_memory_high_watermark, _From, State) ->
131 {reply, State#state.memory_fraction, State};
132
133 handle_call({set_vm_memory_high_watermark, MemFraction}, _From, State) ->
134 {reply, ok, set_mem_limits(State, MemFraction)};
131 handle_call(get_vm_memory_high_watermark, _From,
132 #state{memory_config_limit = MemLimit} = State) ->
133 {reply, MemLimit, State};
134
135 handle_call({set_vm_memory_high_watermark, MemLimit}, _From, State) ->
136 {reply, ok, set_mem_limits(State, MemLimit)};
135137
136138 handle_call(get_check_interval, _From, State) ->
137139 {reply, State#state.timeout, State};
165167 %% Server Internals
166168 %%----------------------------------------------------------------------------
167169
168 set_mem_limits(State, MemFraction) ->
170 set_mem_limits(State, MemLimit) ->
171 case erlang:system_info(wordsize) of
172 4 ->
173 error_logger:warning_msg(
174 "You are using a 32-bit version of Erlang: you may run into "
175 "memory address~n"
176 "space exhaustion or statistic counters overflow.~n");
177 _ ->
178 ok
179 end,
169180 TotalMemory =
170181 case get_total_memory() of
171182 unknown ->
196207 _ ->
197208 TotalMemory
198209 end,
199 MemLim = trunc(MemFraction * UsableMemory),
210 MemLim = interpret_limit(MemLimit, UsableMemory),
200211 error_logger:info_msg("Memory limit set to ~pMB of ~pMB total.~n",
201212 [trunc(MemLim/?ONE_MB), trunc(TotalMemory/?ONE_MB)]),
202213 internal_update(State #state { total_memory = TotalMemory,
203214 memory_limit = MemLim,
204 memory_fraction = MemFraction}).
215 memory_config_limit = MemLimit}).
216
217 interpret_limit({'absolute', MemLim}, UsableMemory) ->
218 erlang:min(MemLim, UsableMemory);
219 interpret_limit(MemFraction, UsableMemory) ->
220 trunc(MemFraction * UsableMemory).
205221
206222 internal_update(State = #state { memory_limit = MemLimit,
207223 alarmed = Alarmed,
376392 read_proc_file(File) ->
377393 {ok, IoDevice} = file:open(File, [read, raw]),
378394 Res = read_proc_file(IoDevice, []),
379 file:close(IoDevice),
395 _ = file:close(IoDevice),
380396 lists:flatten(lists:reverse(Res)).
381397
382398 -define(BUFFER_SIZE, 1024).
0 VERSION?=3.5.4
0 VERSION?=3.5.7