Imported Upstream version 3.5.7
James Page
8 years ago
47 | 47 | BASIC_PLT=basic.plt |
48 | 48 | RABBIT_PLT=rabbit.plt |
49 | 49 | |
50 | ifndef USE_SPECS | |
51 | # our type specs rely on dict:dict/0 etc, which are only available in 17.0 | |
52 | # upwards. | |
53 | USE_SPECS:=$(shell erl -noshell -eval 'io:format([list_to_integer(X) || X <- string:tokens(erlang:system_info(version), ".")] >= [5,11]), halt().') | |
54 | endif | |
55 | ||
56 | 50 | ifndef USE_PROPER_QC |
57 | 51 | # PropEr needs to be installed for property checking |
58 | 52 | # http://proper.softlab.ntua.gr/ |
59 | USE_PROPER_QC:=$(shell erl -noshell -eval 'io:format({module, proper} =:= code:ensure_loaded(proper)), halt().') | |
53 | USE_PROPER_QC=$(shell erl -noshell -eval 'io:format({module, proper} =:= code:ensure_loaded(proper)), halt().') | |
60 | 54 | endif |
61 | 55 | |
62 | 56 | #other args: +native +"{hipe,[o3,verbose]}" -Ddebug=true +debug_info +no_strict_record_tests |
63 | ERLC_OPTS=-I $(INCLUDE_DIR) -Wall -v +debug_info $(call boolean_macro,$(USE_SPECS),use_specs) $(call boolean_macro,$(USE_PROPER_QC),use_proper_qc) | |
57 | ERLC_OPTS=-I $(INCLUDE_DIR) -Wall +warn_export_vars -v +debug_info $(call boolean_macro,$(USE_SPECS),use_specs) $(call boolean_macro,$(USE_PROPER_QC),use_proper_qc) | |
58 | ||
59 | # Our type specs rely on dict:dict/0 etc, which are only available in | |
60 | # 17.0 upwards. | |
61 | define compare_version | |
62 | $(shell awk 'BEGIN { | |
63 | split("$(1)", v1, "\."); | |
64 | version1 = v1[1] * 1000000 + v1[2] * 10000 + v1[3] * 100 + v1[4]; | |
65 | ||
66 | split("$(2)", v2, "\."); | |
67 | version2 = v2[1] * 1000000 + v2[2] * 10000 + v2[3] * 100 + v2[4]; | |
68 | ||
69 | if (version1 $(3) version2) { | |
70 | print "true"; | |
71 | } else { | |
72 | print "false"; | |
73 | } | |
74 | }') | |
75 | endef | |
76 | ||
77 | ERTS_VER = $(shell erl -version 2>&1 | sed -E 's/.* version //') | |
78 | USE_SPECS_MIN_ERTS_VER = 5.11 | |
79 | ifeq ($(call compare_version,$(ERTS_VER),$(USE_SPECS_MIN_ERTS_VER),>=),true) | |
80 | ERLC_OPTS += -Duse_specs | |
81 | endif | |
64 | 82 | |
65 | 83 | ifdef INSTRUMENT_FOR_QC |
66 | 84 | ERLC_OPTS += -DINSTR_MOD=gm_qc |
269 | 287 | $(ERL_CALL) |
270 | 288 | |
271 | 289 | stop-node: |
272 | -$(ERL_CALL) -q | |
290 | -( \ | |
291 | pid=$$(./scripts/rabbitmqctl -n $(RABBITMQ_NODENAME) eval 'os:getpid().') && \ | |
292 | $(ERL_CALL) -q && \ | |
293 | while ps -p $$pid >/dev/null 2>&1; do sleep 1; done \ | |
294 | ) | |
273 | 295 | |
274 | 296 | # code coverage will be created for subdirectory "ebin" of COVER_DIR |
275 | 297 | COVER_DIR=. |
0 | 0 | all: |
1 | echo "Please select a target from the Makefile." | |
1 | @echo "Please select a target from the Makefile." | |
2 | 2 | |
3 | 3 | clean: |
4 | 4 | rm -f *.pyc |
9 | 9 | ## |
10 | 10 | ## The Original Code is RabbitMQ. |
11 | 11 | ## |
12 | ## The Initial Developer of the Original Code is GoPivotal, Inc. | |
12 | ## The Initial Developer of the Original Code is Pivotal Software, Inc. | |
13 | 13 | ## Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. |
14 | 14 | ## |
15 | 15 | |
104 | 104 | %% |
105 | 105 | %% The Original Code is RabbitMQ. |
106 | 106 | %% |
107 | %% The Initial Developer of the Original Code is GoPivotal, Inc. | |
107 | %% The Initial Developer of the Original Code is Pivotal Software, Inc. | |
108 | 108 | %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. |
109 | 109 | %%""" |
110 | 110 |
181 | 181 | %% |
182 | 182 | %% {vm_memory_high_watermark, 0.4}, |
183 | 183 | |
184 | %% Alternatively, we can set a limit (in bytes) of RAM used by the node. | |
185 | %% | |
186 | %% {vm_memory_high_watermark, {absolute, 1073741824}}, | |
187 | ||
184 | 188 | %% Fraction of the high watermark limit at which queues start to |
185 | 189 | %% page message out to disc in order to free up memory. |
186 | 190 | %% |
721 | 721 | </varlistentry> |
722 | 722 | |
723 | 723 | <varlistentry> |
724 | <term> | |
725 | <cmdsynopsis> | |
726 | <command>authenticate_user</command> <arg choice="req"><replaceable>username</replaceable></arg> <arg choice="req"><replaceable>password</replaceable></arg> | |
727 | </cmdsynopsis> | |
728 | </term> | |
729 | <listitem> | |
730 | <variablelist> | |
731 | <varlistentry> | |
732 | <term>username</term> | |
733 | <listitem><para>The name of the user.</para></listitem> | |
734 | </varlistentry> | |
735 | <varlistentry> | |
736 | <term>password</term> | |
737 | <listitem><para>The password of the user.</para></listitem> | |
738 | </varlistentry> | |
739 | </variablelist> | |
740 | <para role="example-prefix">For example:</para> | |
741 | <screen role="example">rabbitmqctl authenticate_user tonyg verifyit</screen> | |
742 | <para role="example"> | |
743 | This command instructs the RabbitMQ broker to authenticate the | |
744 | user named <command>tonyg</command> with password | |
745 | <command>verifyit</command>. | |
746 | </para> | |
747 | </listitem> | |
748 | </varlistentry> | |
749 | ||
750 | <varlistentry> | |
724 | 751 | <term><cmdsynopsis><command>set_user_tags</command> <arg choice="req"><replaceable>username</replaceable></arg> <arg choice="req"><replaceable>tag</replaceable> ...</arg></cmdsynopsis></term> |
725 | 752 | <listitem> |
726 | 753 | <variablelist> |
1229 | 1256 | queue is non-exclusive.</para></listitem> |
1230 | 1257 | </varlistentry> |
1231 | 1258 | <varlistentry> |
1259 | <term>exclusive</term> | |
1260 | <listitem><para>True if queue is exclusive (i.e. has | |
1261 | owner_pid), false otherwise</para></listitem> | |
1262 | </varlistentry> | |
1263 | <varlistentry> | |
1232 | 1264 | <term>exclusive_consumer_pid</term> |
1233 | 1265 | <listitem><para>Id of the Erlang process representing the channel of the |
1234 | 1266 | exclusive consumer subscribed to this queue. Empty if |
1920 | 1952 | </variablelist> |
1921 | 1953 | </listitem> |
1922 | 1954 | </varlistentry> |
1955 | <varlistentry> | |
1956 | <term><cmdsynopsis><command>set_vm_memory_high_watermark absolute</command> <arg choice="req"><replaceable>memory_limit_in_bytes</replaceable></arg></cmdsynopsis></term> | |
1957 | <listitem> | |
1958 | <variablelist> | |
1959 | <varlistentry> | |
1960 | <term>memory_limit_in_bytes</term> | |
1961 | <listitem><para> | |
1962 | The new memory limit at which flow control is | |
1963 | triggered, expressed in bytes as an integer number | |
1964 | greater than or equal to 0. | |
1965 | </para></listitem> | |
1966 | </varlistentry> | |
1967 | </variablelist> | |
1968 | </listitem> | |
1969 | </varlistentry> | |
1923 | 1970 | </variablelist> |
1924 | 1971 | </refsect2> |
1925 | 1972 | </refsect1> |
0 | 0 | {application, rabbit, %% -*- erlang -*- |
1 | 1 | [{description, "RabbitMQ"}, |
2 | 2 | {id, "RabbitMQ"}, |
3 | {vsn, "3.5.4"}, | |
3 | {vsn, "3.5.7"}, | |
4 | 4 | {modules, []}, |
5 | 5 | {registered, [rabbit_amqqueue_sup, |
6 | 6 | rabbit_log, |
25 | 25 | %% breaks the QPid Java client |
26 | 26 | {frame_max, 131072}, |
27 | 27 | {channel_max, 0}, |
28 | {heartbeat, 580}, | |
28 | {heartbeat, 60}, | |
29 | 29 | {msg_store_file_size_limit, 16777216}, |
30 | {queue_index_max_journal_entries, 65536}, | |
30 | {fhc_write_buffering, true}, | |
31 | {fhc_read_buffering, true}, | |
32 | {queue_index_max_journal_entries, 32768}, | |
31 | 33 | {queue_index_embed_msgs_below, 4096}, |
32 | 34 | {default_user, <<"guest">>}, |
33 | 35 | {default_pass, <<"guest">>}, |
80 | 82 | gen_fsm, ssl]}, |
81 | 83 | {ssl_apps, [asn1, crypto, public_key, ssl]}, |
82 | 84 | %% see rabbitmq-server#114 |
83 | {mirroring_flow_control, true} | |
85 | {mirroring_flow_control, true}, | |
86 | %% see rabbitmq-server#227 and related tickets. | |
87 | %% msg_store_credit_disc_bound only takes effect when | |
88 | %% messages are persisted to the message store. If messages | |
89 | %% are embedded on the queue index, then modifying this | |
90 | %% setting has no effect because credit_flow is not used when | |
91 | %% writing to the queue index. See the setting | |
92 | %% queue_index_embed_msgs_below above. | |
93 | {msg_store_credit_disc_bound, {2000, 500}}, | |
94 | {msg_store_io_batch_size, 2048}, | |
95 | %% see rabbitmq-server#143 | |
96 | {credit_flow_default_credit, {200, 50}} | |
84 | 97 | ]}]}. |
9 | 9 | %% |
10 | 10 | %% The Original Code is RabbitMQ. |
11 | 11 | %% |
12 | %% The Initial Developer of the Original Code is GoPivotal, Inc. | |
12 | %% The Initial Developer of the Original Code is Pivotal Software, Inc. | |
13 | 13 | %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. |
14 | 14 | %% |
15 | 15 |
9 | 9 | %% |
10 | 10 | %% The Original Code is RabbitMQ. |
11 | 11 | %% |
12 | %% The Initial Developer of the Original Code is GoPivotal, Inc. | |
12 | %% The Initial Developer of the Original Code is Pivotal Software, Inc. | |
13 | 13 | %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. |
14 | 14 | %% |
15 | 15 | |
121 | 121 | -define(HIBERNATE_AFTER_MIN, 1000). |
122 | 122 | -define(DESIRED_HIBERNATE, 10000). |
123 | 123 | -define(CREDIT_DISC_BOUND, {2000, 500}). |
124 | %% When we discover that we should write some indices to disk for some | |
125 | %% betas, the IO_BATCH_SIZE sets the number of betas that we must be | |
126 | %% due to write indices for before we do any work at all. | |
127 | -define(IO_BATCH_SIZE, 2048). %% next power-of-2 after ?CREDIT_DISC_BOUND | |
124 | 128 | |
125 | 129 | -define(INVALID_HEADERS_KEY, <<"x-invalid-headers">>). |
126 | 130 | -define(ROUTING_HEADERS, [<<"CC">>, <<"BCC">>]). |
9 | 9 | %% |
10 | 10 | %% The Original Code is RabbitMQ. |
11 | 11 | %% |
12 | %% The Initial Developer of the Original Code is GoPivotal, Inc. | |
12 | %% The Initial Developer of the Original Code is Pivotal Software, Inc. | |
13 | 13 | %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. |
14 | 14 | %% |
15 | 15 |
9 | 9 | %% |
10 | 10 | %% The Original Code is RabbitMQ. |
11 | 11 | %% |
12 | %% The Initial Developer of the Original Code is GoPivotal, Inc. | |
12 | %% The Initial Developer of the Original Code is Pivotal Software, Inc. | |
13 | 13 | %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. |
14 | 14 | %% |
15 | 15 |
87 | 87 | Count = length(List), |
88 | 88 | Compound = lists:map(fun generate/1, List), |
89 | 89 | S = iolist_size(Compound), |
90 | %% S < 256 -> Count < 256 | |
91 | if S > 255 -> [<<16#d0, (S + 4):32/unsigned, Count:32/unsigned>>, Compound]; | |
92 | true -> [<<16#c0, (S + 1):8/unsigned, Count:8/unsigned>>, Compound] | |
90 | %% If the list contains less than (256 - 1) elements and if the | |
91 | %% encoded size (including the encoding of "Count", thus S + 1 | |
92 | %% in the test) is less than 256 bytes, we use the short form. | |
93 | %% Otherwise, we use the large form. | |
94 | if Count >= (256 - 1) orelse (S + 1) >= 256 -> | |
95 | [<<16#d0, (S + 4):32/unsigned, Count:32/unsigned>>, Compound]; | |
96 | true -> | |
97 | [<<16#c0, (S + 1):8/unsigned, Count:8/unsigned>>, Compound] | |
93 | 98 | end; |
94 | 99 | |
95 | 100 | generate({map, ListOfPairs}) -> |
99 | 104 | (generate(Val))] |
100 | 105 | end, ListOfPairs), |
101 | 106 | S = iolist_size(Compound), |
102 | if S > 255 -> [<<16#d1,(S + 4):32,Count:32>>, Compound]; | |
103 | true -> [<<16#c1,(S + 1):8,Count:8>>, Compound] | |
107 | %% See generate({list, ...}) for an explanation of this test. | |
108 | if Count >= (256 - 1) orelse (S + 1) >= 256 -> | |
109 | [<<16#d1, (S + 4):32, Count:32>>, Compound]; | |
110 | true -> | |
111 | [<<16#c1, (S + 1):8, Count:8>>, Compound] | |
104 | 112 | end; |
105 | 113 | |
106 | 114 | generate({array, Type, List}) -> |
108 | 116 | Body = iolist_to_binary( |
109 | 117 | [constructor(Type), [generate(Type, I) || I <- List]]), |
110 | 118 | S = size(Body), |
111 | %% S < 256 -> Count < 256 | |
112 | if S > 255 -> [<<16#f0, (S + 4):32/unsigned, Count:32/unsigned>>, Body]; | |
113 | true -> [<<16#e0, (S + 1):8/unsigned, Count:8/unsigned>>, Body] | |
119 | %% See generate({list, ...}) for an explanation of this test. | |
120 | if Count >= (256 - 1) orelse (S + 1) >= 256 -> | |
121 | [<<16#f0, (S + 4):32/unsigned, Count:32/unsigned>>, Body]; | |
122 | true -> | |
123 | [<<16#e0, (S + 1):8/unsigned, Count:8/unsigned>>, Body] | |
114 | 124 | end; |
115 | 125 | |
116 | 126 | generate({as_is, TypeCode, Bin}) -> |
347 | 347 | handle_1_0_connection_frame(#'v1_0.open'{ max_frame_size = ClientFrameMax, |
348 | 348 | channel_max = ClientChannelMax, |
349 | 349 | idle_time_out = IdleTimeout, |
350 | hostname = Hostname, | |
351 | properties = Props }, | |
350 | hostname = Hostname }, | |
352 | 351 | State = #v1{ |
353 | 352 | connection_state = starting, |
354 | 353 | connection = Connection, |
355 | 354 | throttle = Throttle, |
356 | 355 | helper_sup = HelperSupPid, |
357 | 356 | sock = Sock}) -> |
358 | ClientProps = case Props of | |
359 | undefined -> []; | |
360 | {map, Ps} -> Ps | |
361 | end, | |
362 | 357 | ClientHeartbeatSec = case IdleTimeout of |
363 | 358 | undefined -> 0; |
364 | 359 | {uint, Interval} -> Interval div 1000 |
366 | 361 | FrameMax = case ClientFrameMax of |
367 | 362 | undefined -> unlimited; |
368 | 363 | {_, FM} -> FM |
369 | end, | |
370 | ChannelMax = case ClientChannelMax of | |
371 | undefined -> unlimited; | |
372 | {_, CM} -> CM | |
373 | 364 | end, |
374 | 365 | {ok, HeartbeatSec} = application:get_env(rabbit, heartbeat), |
375 | 366 | State1 = |
221 | 221 | %% content records. However, that's already been handled for us, we're |
222 | 222 | %% just sending a chunk, so from this perspective it's just a binary. |
223 | 223 | |
224 | assemble_frames(Channel, Performative, Content, FrameMax, | |
224 | assemble_frames(Channel, Performative, Content, _FrameMax, | |
225 | 225 | rabbit_amqp1_0_framing) -> |
226 | 226 | ?DEBUG("Channel ~p <-~n~p~n followed by ~p bytes of content~n~n", |
227 | 227 | [Channel, rabbit_amqp1_0_framing:pprint(Performative), |
0 | sudo: true | |
1 | language: erlang | |
2 | notifications: | |
3 | email: | |
4 | - alerts@rabbitmq.com | |
5 | addons: | |
6 | apt: | |
7 | packages: | |
8 | - slapd | |
9 | - ldap-utils | |
10 | - xsltproc | |
11 | otp_release: | |
12 | - "R16B03-1" | |
13 | - "17.5" | |
14 | - "18.0" | |
15 | install: | |
16 | - if [ ! -d "$HOME/rabbitmq-public-umbrella/.git" ]; then git clone https://github.com/rabbitmq/rabbitmq-public-umbrella.git $HOME/rabbitmq-public-umbrella; fi | |
17 | - cd $HOME/rabbitmq-public-umbrella | |
18 | - make co | |
19 | - make up | |
20 | services: | |
21 | - slapd | |
22 | before_script: | |
23 | - IFS="/" read -a PARTS <<< "$TRAVIS_REPO_SLUG" | |
24 | - export TEST_DIR=$HOME/rabbitmq-public-umbrella/${PARTS[1]} | |
25 | - rm -rf ${TEST_DIR} | |
26 | - cp -r ${TRAVIS_BUILD_DIR} ${TEST_DIR} | |
27 | - cd ${TEST_DIR} | |
28 | - ./example/setup.sh | |
29 | script: make test | |
30 | before_cache: | |
31 | - rm -rf ${TEST_DIR} | |
32 | - cd $HOME | |
33 | cache: | |
34 | apt: true | |
35 | directories: | |
36 | - $HOME/rabbitmq-public-umbrella |
0 | 0 | %% -*- erlang -*- |
1 | [{rabbit, [{auth_backends, [rabbit_auth_backend_ldap]}, | |
2 | {default_vhost, <<"test">>}]}, | |
1 | [{rabbit, [{default_vhost, <<"test">>}]}, | |
3 | 2 | {rabbitmq_auth_backend_ldap, |
4 | 3 | [ {servers, ["localhost"]}, |
5 | 4 | {user_dn_pattern, "cn=${username},ou=People,dc=example,dc=com"}, |
36 | 35 | {'not', {equals, "${username}", "Mike Bridgen"}}]} |
37 | 36 | ]}} |
38 | 37 | ]}} |
39 | ]}} | |
38 | ]}}, | |
39 | {tag_queries, [{administrator, {constant, false}}, | |
40 | {management, {constant, false}}]} | |
40 | 41 | ]} |
41 | 42 | ]. |
69 | 69 | |
70 | 70 | user_login_authorization(Username) -> |
71 | 71 | case user_login_authentication(Username, []) of |
72 | {ok, #auth_user{impl = Impl}} -> {ok, Impl}; | |
73 | Else -> Else | |
72 | {ok, #auth_user{impl = Impl, tags = Tags}} -> {ok, Impl, Tags}; | |
73 | Else -> Else | |
74 | 74 | end. |
75 | 75 | |
76 | 76 | check_vhost_access(User = #auth_user{username = Username, |
+126
-14
18 | 18 | -include_lib("eunit/include/eunit.hrl"). |
19 | 19 | -include_lib("amqp_client/include/amqp_client.hrl"). |
20 | 20 | |
21 | -define(SIMON, #amqp_params_network{username = <<"Simon MacMullen">>, | |
21 | -define(SIMON_NAME, "Simon MacMullen"). | |
22 | -define(MIKEB_NAME, "Mike Bridgen"). | |
23 | -define(VHOST, "test"). | |
24 | ||
25 | -define(SIMON, #amqp_params_network{username = << ?SIMON_NAME >>, | |
22 | 26 | password = <<"password">>, |
23 | virtual_host = <<"test">>}). | |
24 | ||
25 | -define(MIKEB, #amqp_params_network{username = <<"Mike Bridgen">>, | |
27 | virtual_host = << ?VHOST >>}). | |
28 | ||
29 | -define(MIKEB, #amqp_params_network{username = << ?MIKEB_NAME >>, | |
26 | 30 | password = <<"password">>, |
27 | virtual_host = <<"test">>}). | |
28 | ||
29 | %%-------------------------------------------------------------------- | |
30 | ||
31 | login_test_() -> | |
31 | virtual_host = << ?VHOST >>}). | |
32 | ||
33 | %%-------------------------------------------------------------------- | |
34 | ||
35 | ldap_only_test_() -> | |
36 | { setup, | |
37 | fun () -> ok = application:set_env(rabbit, auth_backends, | |
38 | [rabbit_auth_backend_ldap]) end, | |
39 | fun (_) -> ok = application:unset_env(rabbit, auth_backends) end, | |
40 | [ {"LDAP Login", login()}, | |
41 | {"LDAP In group", in_group()}, | |
42 | {"LDAP Constant", const()}, | |
43 | {"LDAP String match", string_match()}, | |
44 | {"LDAP Boolean check", boolean_logic()}, | |
45 | {"LDAP Tags", tag_check([])} | |
46 | ]}. | |
47 | ||
48 | ldap_and_internal_test_() -> | |
49 | { setup, | |
50 | fun () -> | |
51 | ok = application:set_env(rabbit, auth_backends, | |
52 | [{rabbit_auth_backend_ldap, rabbit_auth_backend_internal}]), | |
53 | ok = control_action(add_user, [ ?SIMON_NAME, ""]), | |
54 | ok = control_action(set_permissions, [ ?SIMON_NAME, "prefix-.*", "prefix-.*", "prefix-.*"]), | |
55 | ok = control_action(set_user_tags, [ ?SIMON_NAME, "management", "foo"]), | |
56 | ok = control_action(add_user, [ ?MIKEB_NAME, ""]), | |
57 | ok = control_action(set_permissions, [ ?MIKEB_NAME, "", "", ""]) | |
58 | end, | |
59 | fun (_) -> | |
60 | ok = application:unset_env(rabbit, auth_backends), | |
61 | ok = control_action(delete_user, [ ?SIMON_NAME ]), | |
62 | ok = control_action(delete_user, [ ?MIKEB_NAME ]) | |
63 | end, | |
64 | [ {"LDAP&Internal Login", login()}, | |
65 | {"LDAP&Internal Permissions", permission_match()}, | |
66 | {"LDAP&Internal Tags", tag_check([management, foo])} | |
67 | ]}. | |
68 | ||
69 | internal_followed_ldap_and_internal_test_() -> | |
70 | { setup, | |
71 | fun () -> | |
72 | ok = application:set_env(rabbit, auth_backends, | |
73 | [rabbit_auth_backend_internal, {rabbit_auth_backend_ldap, rabbit_auth_backend_internal}]), | |
74 | ok = control_action(add_user, [ ?SIMON_NAME, ""]), | |
75 | ok = control_action(set_permissions, [ ?SIMON_NAME, "prefix-.*", "prefix-.*", "prefix-.*"]), | |
76 | ok = control_action(set_user_tags, [ ?SIMON_NAME, "management", "foo"]), | |
77 | ok = control_action(add_user, [ ?MIKEB_NAME, ""]), | |
78 | ok = control_action(set_permissions, [ ?MIKEB_NAME, "", "", ""]) | |
79 | end, | |
80 | fun (_) -> | |
81 | ok = application:unset_env(rabbit, auth_backends), | |
82 | ok = control_action(delete_user, [ ?SIMON_NAME ]), | |
83 | ok = control_action(delete_user, [ ?MIKEB_NAME ]) | |
84 | end, | |
85 | [ {"Internal, LDAP&Internal Login", login()}, | |
86 | {"Internal, LDAP&Internal Permissions", permission_match()}, | |
87 | {"Internal, LDAP&Internal Tags", tag_check([management, foo])} | |
88 | ]}. | |
89 | ||
90 | ||
91 | %%-------------------------------------------------------------------- | |
92 | ||
93 | login() -> | |
32 | 94 | [test_login(Env, L, case {LGood, EnvGood} of |
33 | 95 | {good, good} -> fun succ/1; |
34 | 96 | _ -> fun fail/1 |
89 | 151 | |
90 | 152 | %%-------------------------------------------------------------------- |
91 | 153 | |
92 | in_group_test_() -> | |
154 | in_group() -> | |
93 | 155 | X = [#'exchange.declare'{exchange = <<"test">>}], |
94 | 156 | test_resource_funs([{?SIMON, X, ok}, |
95 | 157 | {?MIKEB, X, fail}]). |
96 | 158 | |
97 | const_test_() -> | |
159 | const() -> | |
98 | 160 | Q = [#'queue.declare'{queue = <<"test">>}], |
99 | 161 | test_resource_funs([{?SIMON, Q, ok}, |
100 | 162 | {?MIKEB, Q, fail}]). |
101 | 163 | |
102 | string_match_test_() -> | |
164 | string_match() -> | |
103 | 165 | B = fun(N) -> |
104 | 166 | [#'exchange.declare'{exchange = N}, |
105 | 167 | #'queue.declare'{queue = <<"test">>}, |
109 | 171 | {?SIMON, B(<<"abc123">>), fail}, |
110 | 172 | {?SIMON, B(<<"xch-Someone Else-abc123">>), fail}]). |
111 | 173 | |
112 | boolean_logic_test_() -> | |
174 | boolean_logic() -> | |
113 | 175 | Q1 = [#'queue.declare'{queue = <<"test1">>}, |
114 | 176 | #'basic.consume'{queue = <<"test1">>}], |
115 | 177 | Q2 = [#'queue.declare'{queue = <<"test2">>}, |
118 | 180 | {?SIMON, Q2, ok}, |
119 | 181 | {?MIKEB, Q1, fail}, |
120 | 182 | {?MIKEB, Q2, fail}]]. |
183 | ||
184 | permission_match() -> | |
185 | B = fun(N) -> | |
186 | [#'exchange.declare'{exchange = N}, | |
187 | #'queue.declare'{queue = <<"prefix-test">>}, | |
188 | #'queue.bind'{exchange = N, queue = <<"prefix-test">>}] | |
189 | end, | |
190 | test_resource_funs([{?SIMON, B(<<"prefix-abc123">>), ok}, | |
191 | {?SIMON, B(<<"abc123">>), fail}, | |
192 | {?SIMON, B(<<"xch-Simon MacMullen-abc123">>), fail}]). | |
193 | ||
194 | tag_check(Tags) -> | |
195 | fun() -> | |
196 | {ok, User} = rabbit_access_control:check_user_pass_login( | |
197 | << ?SIMON_NAME >>, <<"password">>), | |
198 | ?assertEqual(Tags, User#user.tags) | |
199 | end. | |
200 | ||
201 | ||
202 | %%-------------------------------------------------------------------- | |
121 | 203 | |
122 | 204 | test_resource_funs(PTRs) -> [test_resource_fun(PTR) || PTR <- PTRs]. |
123 | 205 | |
134 | 216 | end) |
135 | 217 | end. |
136 | 218 | |
137 | %%-------------------------------------------------------------------- | |
219 | control_action(Command, Args) -> | |
220 | control_action(Command, node(), Args, default_options()). | |
221 | ||
222 | control_action(Command, Args, NewOpts) -> | |
223 | control_action(Command, node(), Args, | |
224 | expand_options(default_options(), NewOpts)). | |
225 | ||
226 | control_action(Command, Node, Args, Opts) -> | |
227 | case catch rabbit_control_main:action( | |
228 | Command, Node, Args, Opts, | |
229 | fun (Format, Args1) -> | |
230 | io:format(Format ++ " ...~n", Args1) | |
231 | end) of | |
232 | ok -> | |
233 | io:format("done.~n"), | |
234 | ok; | |
235 | Other -> | |
236 | io:format("failed.~n"), | |
237 | Other | |
238 | end. | |
239 | ||
240 | default_options() -> [{"-p", ?VHOST}, {"-q", "false"}]. | |
241 | ||
242 | expand_options(As, Bs) -> | |
243 | lists:foldl(fun({K, _}=A, R) -> | |
244 | case proplists:is_defined(K, R) of | |
245 | true -> R; | |
246 | false -> [A | R] | |
247 | end | |
248 | end, Bs, As). | |
249 |
0 | sudo: false | |
1 | language: erlang | |
2 | addons: | |
3 | apt: | |
4 | packages: | |
5 | - xsltproc | |
6 | otp_release: | |
7 | - R16B03-1 | |
8 | - 17.5 | |
9 | - 18.0 | |
10 | install: | |
11 | - if [ ! -d "$HOME/rabbitmq-public-umbrella/.git" ]; then git clone https://github.com/rabbitmq/rabbitmq-public-umbrella.git $HOME/rabbitmq-public-umbrella; fi | |
12 | - cd $HOME/rabbitmq-public-umbrella | |
13 | - make co | |
14 | - make up | |
15 | before_script: | |
16 | - IFS="/" read -a PARTS <<< "$TRAVIS_REPO_SLUG" | |
17 | - export TEST_DIR=$HOME/rabbitmq-public-umbrella/${PARTS[1]} | |
18 | - rm -rf ${TEST_DIR} | |
19 | - cp -r ${TRAVIS_BUILD_DIR} ${TEST_DIR} | |
20 | - cd ${TEST_DIR} | |
21 | script: make test | |
22 | before_cache: | |
23 | - rm -rf ${TEST_DIR} | |
24 | - cd $HOME | |
25 | cache: | |
26 | apt: true | |
27 | directories: | |
28 | - $HOME/rabbitmq-public-umbrella | |
29 | notifications: | |
30 | email: | |
31 | - alerts@rabbitmq.com |
+10
-1
85 | 85 | |
86 | 86 | validate(_X) -> ok. |
87 | 87 | |
88 | validate_binding(_X, _B) -> ok. | |
88 | validate_binding(_X, #binding { key = K }) -> | |
89 | try | |
90 | V = list_to_integer(binary_to_list(K)), | |
91 | case V < 1 of | |
92 | true -> {error, {binding_invalid, "The binding key must be greater than 0", []}}; | |
93 | false -> ok | |
94 | end | |
95 | catch error:badarg -> | |
96 | {error, {binding_invalid, "The binding key must be an integer: ~p", [K]}} | |
97 | end. | |
89 | 98 | |
90 | 99 | create(_Tx, _X) -> ok. |
91 | 100 |
+48
-9
16 | 16 | -module(rabbit_exchange_type_consistent_hash_test). |
17 | 17 | -export([test/0]). |
18 | 18 | -include_lib("amqp_client/include/amqp_client.hrl"). |
19 | -include_lib("eunit/include/eunit.hrl"). | |
19 | 20 | |
20 | 21 | %% Because the routing is probabilistic, we can't really test a great |
21 | 22 | %% deal here. |
28 | 29 | t(Qs) -> |
29 | 30 | ok = test_with_rk(Qs), |
30 | 31 | ok = test_with_header(Qs), |
32 | ok = test_binding_with_negative_routing_key(), | |
33 | ok = test_binding_with_non_numeric_routing_key(), | |
31 | 34 | ok. |
32 | 35 | |
33 | 36 | test_with_rk(Qs) -> |
62 | 65 | type = <<"x-consistent-hash">>, |
63 | 66 | auto_delete = true, |
64 | 67 | arguments = DeclareArgs |
65 | }), | |
68 | }), | |
66 | 69 | [#'queue.declare_ok'{} = |
67 | 70 | amqp_channel:call(Chan, #'queue.declare' { |
68 | queue = Q, exclusive = true }) || Q <- Queues], | |
71 | queue = Q, exclusive = true}) || Q <- Queues], | |
69 | 72 | [#'queue.bind_ok'{} = |
70 | amqp_channel:call(Chan, #'queue.bind' { queue = Q, | |
73 | amqp_channel:call(Chan, #'queue.bind' {queue = Q, | |
71 | 74 | exchange = <<"e">>, |
72 | routing_key = <<"10">> }) | |
75 | routing_key = <<"10">>}) | |
73 | 76 | || Q <- [Q1, Q2]], |
74 | 77 | [#'queue.bind_ok'{} = |
75 | amqp_channel:call(Chan, #'queue.bind' { queue = Q, | |
78 | amqp_channel:call(Chan, #'queue.bind' {queue = Q, | |
76 | 79 | exchange = <<"e">>, |
77 | routing_key = <<"20">> }) | |
80 | routing_key = <<"20">>}) | |
78 | 81 | || Q <- [Q3, Q4]], |
79 | 82 | #'tx.select_ok'{} = amqp_channel:call(Chan, #'tx.select'{}), |
80 | 83 | [amqp_channel:call(Chan, |
85 | 88 | [begin |
86 | 89 | #'queue.declare_ok'{message_count = M} = |
87 | 90 | amqp_channel:call(Chan, #'queue.declare' {queue = Q, |
88 | exclusive = true }), | |
91 | exclusive = true}), | |
89 | 92 | M |
90 | 93 | end || Q <- Queues], |
91 | 94 | Count = lists:sum(Counts), %% All messages got routed |
92 | 95 | [true = C > 0.01 * Count || C <- Counts], %% We are not *grossly* unfair |
93 | amqp_channel:call(Chan, #'exchange.delete' { exchange = <<"e">> }), | |
94 | [amqp_channel:call(Chan, #'queue.delete' { queue = Q }) || Q <- Queues], | |
96 | amqp_channel:call(Chan, #'exchange.delete' {exchange = <<"e">>}), | |
97 | [amqp_channel:call(Chan, #'queue.delete' {queue = Q}) || Q <- Queues], | |
95 | 98 | amqp_channel:close(Chan), |
96 | 99 | amqp_connection:close(Conn), |
97 | 100 | ok. |
101 | ||
102 | test_binding_with_negative_routing_key() -> | |
103 | {ok, Conn} = amqp_connection:start(#amqp_params_network{}), | |
104 | {ok, Chan} = amqp_connection:open_channel(Conn), | |
105 | Declare1 = #'exchange.declare'{exchange = <<"bind-fail">>, | |
106 | type = <<"x-consistent-hash">>}, | |
107 | #'exchange.declare_ok'{} = amqp_channel:call(Chan, Declare1), | |
108 | Q = <<"test-queue">>, | |
109 | Declare2 = #'queue.declare'{queue = Q}, | |
110 | #'queue.declare_ok'{} = amqp_channel:call(Chan, Declare2), | |
111 | process_flag(trap_exit, true), | |
112 | Cmd = #'queue.bind'{exchange = <<"bind-fail">>, | |
113 | routing_key = <<"-1">>}, | |
114 | ?assertExit(_, amqp_channel:call(Chan, Cmd)), | |
115 | {ok, Ch2} = amqp_connection:open_channel(Conn), | |
116 | amqp_channel:call(Ch2, #'queue.delete'{queue = Q}), | |
117 | amqp_connection:close(Conn), | |
118 | ok. | |
119 | ||
120 | test_binding_with_non_numeric_routing_key() -> | |
121 | {ok, Conn} = amqp_connection:start(#amqp_params_network{}), | |
122 | {ok, Chan} = amqp_connection:open_channel(Conn), | |
123 | Declare1 = #'exchange.declare'{exchange = <<"bind-fail">>, | |
124 | type = <<"x-consistent-hash">>}, | |
125 | #'exchange.declare_ok'{} = amqp_channel:call(Chan, Declare1), | |
126 | Q = <<"test-queue">>, | |
127 | Declare2 = #'queue.declare'{queue = Q}, | |
128 | #'queue.declare_ok'{} = amqp_channel:call(Chan, Declare2), | |
129 | process_flag(trap_exit, true), | |
130 | Cmd = #'queue.bind'{exchange = <<"bind-fail">>, | |
131 | routing_key = <<"not-a-number">>}, | |
132 | ?assertExit(_, amqp_channel:call(Chan, Cmd)), | |
133 | {ok, Ch2} = amqp_connection:open_channel(Conn), | |
134 | amqp_channel:call(Ch2, #'queue.delete'{queue = Q}), | |
135 | amqp_connection:close(Conn), | |
136 | ok. |
28 | 28 | port = undefined, |
29 | 29 | channel_max = 0, |
30 | 30 | frame_max = 0, |
31 | heartbeat = 0, | |
31 | heartbeat = 10, | |
32 | 32 | connection_timeout = infinity, |
33 | 33 | ssl_options = none, |
34 | 34 | auth_mechanisms = |
36 | 36 | rabbit_queue_collector, |
37 | 37 | rabbit_queue_decorator, |
38 | 38 | rabbit_amqqueue, |
39 | supervisor2 | |
39 | ssl_compat, | |
40 | supervisor2, | |
41 | time_compat | |
40 | 42 | ]}, |
41 | 43 | {registered, []}, |
42 | 44 | {env, []}, |
0 | Generic build instructions are at: | |
1 | http://www.rabbitmq.com/plugin-development.html | |
2 | ||
3 | See http://www.rabbitmq.com/federation.html |
0 | ## RabbitMQ Federation | |
1 | ||
2 | RabbitMQ federation offers a group of features for loosely | |
3 | coupled and WAN-friendly distributed RabbitMQ setups. Note that | |
4 | this is not an alternative to queue mirroring. | |
5 | ||
6 | ||
7 | ## Supported RabbitMQ Versions | |
8 | ||
9 | This plugin ships with RabbitMQ, there is no need to | |
10 | install it separately. | |
11 | ||
12 | ||
13 | ## Documentation | |
14 | ||
15 | See [RabbitMQ federation plugin](http://www.rabbitmq.com/federation.html) on rabbitmq.com. | |
16 | ||
17 | ||
18 | ## License and Copyright | |
19 | ||
20 | Released under [the same license as RabbitMQ](https://www.rabbitmq.com/mpl.html). | |
21 | ||
22 | 2007-2015 (c) Pivotal Software Inc. |
252 | 252 | %% routing key the first time a message gets |
253 | 253 | %% forwarded; after that it's known that they were |
254 | 254 | %% <<>> and QueueName respectively. |
255 | {rabbit_misc:set_table_value( | |
256 | rabbit_misc:set_table_value( | |
257 | Headers, <<"x-original-exchange">>, longstr, X), | |
258 | <<"x-original-routing-key">>, longstr, K), 0}; | |
255 | {init_x_original_source_headers(Headers, X, K), 0}; | |
259 | 256 | {array, Been} -> |
260 | {Found, Been1} = lists:partition( | |
261 | fun (I) -> visit_match(I, Table) end, | |
262 | Been), | |
263 | C = case Found of | |
264 | [] -> 0; | |
265 | [{table, T}] -> case rabbit_misc:table_lookup( | |
266 | T, <<"visit-count">>) of | |
267 | {_, I} when is_number(I) -> I; | |
268 | _ -> 0 | |
269 | end | |
270 | end, | |
271 | {rabbit_misc:set_table_value( | |
272 | Headers, ?ROUTING_HEADER, array, Been1), C} | |
257 | update_visit_count(Table, Been, Headers); | |
258 | %% this means the header comes from the client | |
259 | %% which re-published the message, most likely unintentionally. | |
260 | %% We can't assume much about the value, so we simply ignore it. | |
261 | _Other -> | |
262 | {init_x_original_source_headers(Headers, X, K), 0} | |
273 | 263 | end, |
274 | 264 | rabbit_basic:prepend_table_header( |
275 | 265 | ?ROUTING_HEADER, Table ++ [{<<"redelivered">>, bool, Redelivered}, |
276 | 266 | {<<"visit-count">>, long, Count + 1}], |
277 | 267 | swap_cc_header(Headers1)). |
268 | ||
269 | init_x_original_source_headers(Headers, X, K) -> | |
270 | rabbit_misc:set_table_value( | |
271 | rabbit_misc:set_table_value( | |
272 | Headers, <<"x-original-exchange">>, longstr, X), | |
273 | <<"x-original-routing-key">>, longstr, K). | |
274 | ||
275 | update_visit_count(Table, Been, Headers) -> | |
276 | {Found, Been1} = lists:partition( | |
277 | fun(I) -> visit_match(I, Table) end, | |
278 | Been), | |
279 | C = case Found of | |
280 | [] -> 0; | |
281 | [{table, T}] -> case rabbit_misc:table_lookup( | |
282 | T, <<"visit-count">>) of | |
283 | {_, I} when is_number(I) -> I; | |
284 | _ -> 0 | |
285 | end | |
286 | end, | |
287 | {rabbit_misc:set_table_value( | |
288 | Headers, ?ROUTING_HEADER, array, Been1), C}. | |
278 | 289 | |
279 | 290 | swap_cc_header(Table) -> |
280 | 291 | [{case K of |
121 | 121 | exchange_name = bget(exchange, US, U, name(XorQ)), |
122 | 122 | queue_name = bget(queue, US, U, name(XorQ)), |
123 | 123 | prefetch_count = bget('prefetch-count', US, U, ?DEF_PREFETCH), |
124 | reconnect_delay = bget('reconnect-delay', US, U, 1), | |
124 | reconnect_delay = bget('reconnect-delay', US, U, 5), | |
125 | 125 | max_hops = bget('max-hops', US, U, 1), |
126 | 126 | expires = bget(expires, US, U, none), |
127 | 127 | message_ttl = bget('message-ttl', US, U, none), |
49 | 49 | 'Maximum number of unacknowledged messages that may be in flight over a federation link at one time. Defaults to 1000 if not set.'; |
50 | 50 | |
51 | 51 | HELP['federation-reconnect'] = |
52 | 'Time in seconds to wait after a network link goes down before attempting reconnection. Defaults to 1 if not set.'; | |
52 | 'Time in seconds to wait after a network link goes down before attempting reconnection. Defaults to 5 if not set.'; | |
53 | 53 | |
54 | 54 | HELP['federation-ack-mode'] = |
55 | 55 | '<dl>\ |
0 | sudo: false | |
1 | language: erlang | |
2 | notifications: | |
3 | email: | |
4 | - alerts@rabbitmq.com | |
5 | addons: | |
6 | apt: | |
7 | packages: | |
8 | - xsltproc | |
9 | - python3 | |
10 | otp_release: | |
11 | - "R16B03-1" | |
12 | - "17.5" | |
13 | - "18.0" | |
14 | install: | |
15 | - if [ ! -d "$HOME/rabbitmq-public-umbrella/.git" ]; then git clone https://github.com/rabbitmq/rabbitmq-public-umbrella.git $HOME/rabbitmq-public-umbrella; fi | |
16 | - cd $HOME/rabbitmq-public-umbrella | |
17 | - make co | |
18 | - make up | |
19 | before_script: | |
20 | - IFS="/" read -a PARTS <<< "$TRAVIS_REPO_SLUG" | |
21 | - export TEST_DIR=$HOME/rabbitmq-public-umbrella/${PARTS[1]} | |
22 | - rm -rf ${TEST_DIR} | |
23 | - cp -r ${TRAVIS_BUILD_DIR} ${TEST_DIR} | |
24 | - cd ${TEST_DIR} | |
25 | script: make test | |
26 | before_cache: | |
27 | - rm -rf ${TEST_DIR} | |
28 | - cd $HOME | |
29 | cache: | |
30 | apt: true | |
31 | directories: | |
32 | - $HOME/rabbitmq-public-umbrella |
366 | 366 | (options.node, options.config, error)) |
367 | 367 | else: |
368 | 368 | for key, val in new_conf.items(): |
369 | setattr(options, key, val) | |
369 | if key == 'ssl': | |
370 | setattr(options, key, val == "True") | |
371 | else: | |
372 | setattr(options, key, val) | |
370 | 373 | |
371 | 374 | return (options, args) |
372 | 375 |
204 | 204 | if (num == undefined) return UNKNOWN_REPR; |
205 | 205 | else if (num < 1) return num.toFixed(2); |
206 | 206 | else if (num < 10) return num.toFixed(1); |
207 | else return fmt_num_thousands(num.toFixed(0)); | |
207 | else return fmt_num_thousands(num); | |
208 | 208 | } |
209 | 209 | |
210 | 210 | function fmt_num_thousands(num) { |
211 | if (num == undefined) return UNKNOWN_REPR; | |
212 | num = '' + num; | |
213 | if (num.length < 4) return num; | |
214 | return fmt_num_thousands(num.slice(0, -3)) + ',' + num.slice(-3); | |
211 | var conv_num = parseFloat(num); // to avoid errors, if someone calls fmt_num_thousands(someNumber.toFixed(0)) | |
212 | return fmt_num_thousands_unfixed(conv_num.toFixed(0)); | |
213 | } | |
214 | ||
215 | function fmt_num_thousands_unfixed(num) { | |
216 | if (num == undefined) return UNKNOWN_REPR; | |
217 | num = '' + num; | |
218 | if (num.length < 4) return num; | |
219 | res= fmt_num_thousands_unfixed(num.slice(0, -3)) + ',' + num.slice(-3); | |
220 | return res; | |
215 | 221 | } |
216 | 222 | |
217 | 223 | function fmt_percent(num) { |
727 | 733 | var prefix = ''; |
728 | 734 | if (current_sort == sort) { |
729 | 735 | prefix = '<span class="arrow">' + |
730 | (current_sort_reverse ? '▲ ' : '▼ ') + | |
736 | (current_sort_reverse ? '▼ ' : '▲ ') + | |
731 | 737 | '</span>'; |
732 | 738 | } |
733 | 739 | return '<a class="sort" sort="' + sort + '">' + prefix + display + '</a>'; |
23 | 23 | -export([to_amqp_table/1, listener/1, properties/1, basic_properties/1]). |
24 | 24 | -export([record/2, to_basic_properties/1]). |
25 | 25 | -export([addr/1, port/1]). |
26 | -export([format_nulls/1]). | |
26 | 27 | |
27 | 28 | -import(rabbit_misc, [pget/2, pset/3]). |
28 | 29 | |
99 | 100 | xmerl_ucs:from_utf8(V), |
100 | 101 | V |
101 | 102 | catch exit:{ucs, _} -> |
102 | Enc = base64:encode(V), | |
103 | <<"Invalid UTF-8, base64 is: ", Enc/binary>> | |
103 | Enc = split_lines(base64:encode(V)), | |
104 | <<"Not UTF-8, base64 is: ", Enc/binary>> | |
104 | 105 | end. |
106 | ||
107 | % MIME enforces a limit on line length of base 64-encoded data to 76 characters. | |
108 | split_lines(<<Text:76/binary, Rest/binary>>) -> | |
109 | <<Text/binary, $\n, (split_lines(Rest))/binary>>; | |
110 | split_lines(Text) -> | |
111 | Text. | |
105 | 112 | |
106 | 113 | parameter(P) -> pset(value, rabbit_misc:term_to_json(pget(value, P)), P). |
107 | 114 | |
318 | 325 | ]); |
319 | 326 | |
320 | 327 | strip_pids(Items) -> [strip_pids(I) || I <- Items]. |
328 | ||
329 | %% Format for JSON replies. Transforms '' into null | |
330 | format_nulls(Items) when is_list(Items) -> | |
331 | lists:foldr(fun (Pair, Acc) -> | |
332 | [format_null_item(Pair) | Acc] | |
333 | end, [], Items); | |
334 | format_nulls(Item) -> | |
335 | format_null_item(Item). | |
336 | ||
337 | format_null_item({Key, ''}) -> | |
338 | {Key, null}; | |
339 | format_null_item({Key, Value}) when is_list(Value) -> | |
340 | {Key, format_nulls(Value)}; | |
341 | format_null_item({Key, {struct, Struct}}) -> | |
342 | {Key, {struct, format_nulls(Struct)}}; | |
343 | format_null_item({Key, {array, Struct}}) -> | |
344 | {Key, {array, format_nulls(Struct)}}; | |
345 | format_null_item({Key, Value}) -> | |
346 | {Key, Value}; | |
347 | format_null_item([{_K, _V} | _T] = L) -> | |
348 | format_nulls(L); | |
349 | format_null_item(Value) -> | |
350 | Value. |
56 | 56 | %%---------------------------------------------------------------------------- |
57 | 57 | |
58 | 58 | init([]) -> |
59 | {ok, {{one_for_one, 0, 1}, [sup()]}}. | |
59 | %% see above as well as https://github.com/rabbitmq/rabbitmq-management/pull/84. | |
60 | %% we sent a message to ourselves so that if there's a conflict | |
61 | %% with the mirrored supervisor already being started on another node, | |
62 | %% we fail and let the other node win in a way that doesn't | |
63 | %% prevent rabbitmq_management and, in turn, the entire | |
64 | %% node fail to start. | |
65 | timer:apply_after(0, ?MODULE, start_child, []), | |
66 | {ok, {{one_for_one, 0, 1}, []}}. | |
60 | 67 | |
61 | 68 | sup() -> |
62 | 69 | {rabbit_mgmt_sup, {rabbit_mgmt_sup, start_link, []}, |
177 | 177 | reply0(Facts, ReqData, Context) -> |
178 | 178 | ReqData1 = set_resp_header("Cache-Control", "no-cache", ReqData), |
179 | 179 | try |
180 | {mochijson2:encode(Facts), ReqData1, Context} | |
180 | {mochijson2:encode(rabbit_mgmt_format:format_nulls(Facts)), ReqData1, | |
181 | Context} | |
181 | 182 | catch exit:{json_encode, E} -> |
182 | 183 | Error = iolist_to_binary( |
183 | 184 | io_lib:format("JSON encode error: ~p", [E])), |
288 | 289 | Json = {struct, [{error, Type}, |
289 | 290 | {reason, rabbit_mgmt_format:tuple(Reason)}]}, |
290 | 291 | ReqData1 = wrq:append_to_response_body(mochijson2:encode(Json), ReqData), |
291 | {{halt, Code}, ReqData1, Context}. | |
292 | {{halt, Code}, set_resp_header( | |
293 | "Content-Type", "application/json", ReqData1), Context}. | |
292 | 294 | |
293 | 295 | id(Key, ReqData) when Key =:= exchange; |
294 | 296 | Key =:= source; |
869 | 869 | http_delete("/vhosts/vh1", ?NO_CONTENT), |
870 | 870 | ok. |
871 | 871 | |
872 | format_output_test() -> | |
873 | QArgs = [], | |
874 | PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}], | |
875 | http_put("/vhosts/vh1", none, ?NO_CONTENT), | |
876 | http_put("/permissions/vh1/guest", PermArgs, ?NO_CONTENT), | |
877 | http_put("/queues/%2f/test0", QArgs, ?NO_CONTENT), | |
878 | assert_list([[{name, <<"test0">>}, | |
879 | {consumer_utilisation, null}, | |
880 | {exclusive_consumer_tag, null}, | |
881 | {recoverable_slaves, null}]], http_get("/queues", ?OK)), | |
882 | http_delete("/queues/%2f/test0", ?NO_CONTENT), | |
883 | http_delete("/vhosts/vh1", ?NO_CONTENT), | |
884 | ok. | |
885 | ||
872 | 886 | columns_test() -> |
873 | 887 | http_put("/queues/%2f/test", [{arguments, [{<<"foo">>, <<"bar">>}]}], |
874 | 888 | ?NO_CONTENT), |
1178 | 1192 | rabbit_runtime_parameters_test:unregister(), |
1179 | 1193 | ok. |
1180 | 1194 | |
1195 | issue67_test()-> | |
1196 | {ok, {{_, 401, _}, Headers, _}} = req(get, "/queues", | |
1197 | [auth_header("user_no_access", "password_no_access")]), | |
1198 | ?assertEqual("application/json", | |
1199 | proplists:get_value("content-type",Headers)), | |
1200 | ok. | |
1181 | 1201 | |
1182 | 1202 | extensions_test() -> |
1183 | 1203 | [[{javascript,<<"dispatcher.js">>}]] = http_get("/extensions", ?OK), |
162 | 162 | self.assert_table([exp_msg('test', 0, False, 'test_3')], ['get', 'queue=test', 'requeue=false']) |
163 | 163 | self.run_success(['publish', 'routing_key=test'], stdin=b'test_4') |
164 | 164 | filename = '/tmp/rabbitmq-test/get.txt' |
165 | ensure_dir(filename) | |
165 | 166 | self.run_success(['get', 'queue=test', 'requeue=false', 'payload_file=' + filename]) |
166 | 167 | with open(filename) as f: |
167 | 168 | self.assertEqual('test_4', f.read()) |
241 | 242 | # routing_key, exchange, message_count, payload, payload_bytes, payload_encoding, properties, redelivered |
242 | 243 | return [key, '', str(count), payload, str(len(payload)), 'string', '', str(redelivered)] |
243 | 244 | |
245 | def ensure_dir(f): | |
246 | d = os.path.dirname(f) | |
247 | if not os.path.exists(d): | |
248 | os.makedirs(d) | |
249 | ||
244 | 250 | if __name__ == '__main__': |
245 | 251 | print("\nrabbitmqadmin tests\n===================\n") |
246 | 252 | suite = unittest.TestLoader().loadTestsFromTestCase(TestRabbitMQAdmin) |
18 | 18 | -behaviour(rabbit_mgmt_extension). |
19 | 19 | |
20 | 20 | -export([dispatcher/0, web_ui/0]). |
21 | ||
21 | 22 | dispatcher() -> [{["all"], rabbit_mgmt_wm_all, []}, |
22 | 23 | {["all", vhost], rabbit_mgmt_wm_all, []}]. |
23 | 24 | web_ui() -> [{javascript, <<"visualiser.js">>}]. |
0 | 0 | RELEASABLE:=true |
1 | 1 | DEPS:=rabbitmq-server rabbitmq-erlang-client rabbitmq-test |
2 | STANDALONE_TEST_COMMANDS:=eunit:test(rabbit_mqtt_util) | |
2 | 3 | WITH_BROKER_TEST_SCRIPTS:=$(PACKAGE_DIR)/test/test.sh |
3 | 4 | WITH_BROKER_TEST_CONFIG:=$(PACKAGE_DIR)/test/ebin/test |
4 | 5 | WITH_BROKER_SETUP_SCRIPTS:=$(PACKAGE_DIR)/test/setup-rabbit-test.sh |
11 | 12 | mkdir -p $(PACKAGE_DIR)/test/ebin |
12 | 13 | sed -E -e "s|%%CERTS_DIR%%|$(abspath $(PACKAGE_DIR))/test/certs|g" < $(PACKAGE_DIR)/test/src/test.config > $(PACKAGE_DIR)/test/ebin/test.config |
13 | 14 | $(MAKE) -C $(PACKAGE_DIR)/../rabbitmq-test/certs all PASSWORD=bunnychow DIR=$(abspath $(PACKAGE_DIR))/test/certs |
15 | cp $(PACKAGE_DIR)/test/src/rabbitmq_mqtt_standalone.app.src $(PACKAGE_DIR)/test/ebin/rabbitmq_mqtt.app | |
14 | 16 | |
15 | 17 | $(PACKAGE_DIR)+clean:: |
16 | 18 | rm -rf $(PACKAGE_DIR)/test/certs |
372 | 372 | DefaultPass = rabbit_mqtt_util:env(default_pass), |
373 | 373 | {ok, Anon} = application:get_env(?APP, allow_anonymous), |
374 | 374 | {ok, TLSAuth} = application:get_env(?APP, ssl_cert_login), |
375 | U = case {User =/= undefined, is_binary(DefaultUser), | |
376 | Anon =:= true, (TLSAuth andalso SSLLoginName =/= none)} of | |
375 | U = case {User =/= undefined, | |
376 | is_binary(DefaultUser), | |
377 | Anon =:= true, | |
378 | (TLSAuth andalso SSLLoginName =/= none)} of | |
379 | %% username provided | |
377 | 380 | {true, _, _, _} -> list_to_binary(User); |
381 | %% anonymous, default user is configured, no TLS | |
382 | {false, true, true, false} -> DefaultUser; | |
383 | %% no username provided, TLS certificate is present, | |
384 | %% rabbitmq_mqtt.ssl_cert_login is true | |
378 | 385 | {false, _, _, true} -> SSLLoginName; |
379 | {false, true, true, false} -> DefaultUser; | |
380 | 386 | _ -> nocreds |
381 | 387 | end, |
382 | 388 | case U of |
383 | 389 | nocreds -> |
384 | 390 | nocreds; |
385 | 391 | _ -> |
386 | case {Pass =/= undefined, is_binary(DefaultPass), Anon =:= true, SSLLoginName == U} of | |
392 | case {Pass =/= undefined, | |
393 | is_binary(DefaultPass), | |
394 | Anon =:= true, | |
395 | TLSAuth} of | |
396 | %% password provided | |
387 | 397 | {true, _, _, _} -> {U, list_to_binary(Pass)}; |
388 | {false, _, _, _} -> {U, none}; | |
398 | %% password not provided, TLS certificate is present, | |
399 | %% rabbitmq_mqtt.ssl_cert_login is true | |
400 | {false, _, _, true} -> {U, none}; | |
401 | %% anonymous, default password is configured | |
389 | 402 | {false, true, true, _} -> {U, DefaultPass}; |
390 | 403 | _ -> {U, none} |
391 | 404 | end |
428 | 441 | {QueueQ1, |
429 | 442 | #'queue.declare'{ queue = QueueQ1, |
430 | 443 | durable = true, |
444 | %% Clean session means a transient connection, | |
445 | %% translating into auto-delete. | |
446 | %% | |
447 | %% see rabbitmq/rabbitmq-mqtt#37 | |
431 | 448 | auto_delete = CleanSess, |
432 | 449 | arguments = Qos1Args }, |
433 | 450 | #'basic.consume'{ queue = QueueQ1, |
42 | 42 | |
43 | 43 | env(Key) -> |
44 | 44 | case application:get_env(rabbitmq_mqtt, Key) of |
45 | {ok, Val} -> Val; | |
45 | {ok, Val} -> coerce_env_value(Key, Val); | |
46 | 46 | undefined -> undefined |
47 | 47 | end. |
48 | ||
49 | coerce_env_value(default_pass, Val) -> to_binary(Val); | |
50 | coerce_env_value(default_user, Val) -> to_binary(Val); | |
51 | coerce_env_value(exchange, Val) -> to_binary(Val); | |
52 | coerce_env_value(vhost, Val) -> to_binary(Val); | |
53 | coerce_env_value(_, Val) -> Val. | |
54 | ||
55 | to_binary(Val) when is_list(Val) -> list_to_binary(Val); | |
56 | to_binary(Val) -> Val. | |
48 | 57 | |
49 | 58 | table_lookup(undefined, _Key) -> |
50 | 59 | undefined; |
Binary diff not shown
16 | 16 | package com.rabbitmq.mqtt.test; |
17 | 17 | |
18 | 18 | import com.rabbitmq.client.*; |
19 | import junit.framework.Assert; | |
20 | 19 | import junit.framework.TestCase; |
20 | import org.junit.Assert; | |
21 | 21 | import org.eclipse.paho.client.mqttv3.IMqttDeliveryToken; |
22 | 22 | import org.eclipse.paho.client.mqttv3.MqttCallback; |
23 | 23 | import org.eclipse.paho.client.mqttv3.MqttClient; |
38 | 38 | import java.net.Socket; |
39 | 39 | import java.util.ArrayList; |
40 | 40 | import java.util.Arrays; |
41 | import java.util.HashMap; | |
41 | 42 | import java.util.List; |
43 | import java.util.Map; | |
42 | 44 | import java.util.concurrent.TimeoutException; |
43 | 45 | |
44 | 46 | /*** |
64 | 66 | private long lastReceipt; |
65 | 67 | private boolean expectConnectionFailure; |
66 | 68 | |
67 | private ConnectionFactory connectionFactory; | |
68 | 69 | private Connection conn; |
69 | 70 | private Channel ch; |
70 | 71 | |
89 | 90 | client2 = new MqttClient(brokerUrl, clientId2, null); |
90 | 91 | conOpt = new MyConnOpts(); |
91 | 92 | setConOpts(conOpt); |
92 | receivedMessages = new ArrayList(); | |
93 | receivedMessages = new ArrayList<MqttMessage>(); | |
93 | 94 | expectConnectionFailure = false; |
94 | 95 | } |
95 | 96 | |
100 | 101 | client = new MqttClient(brokerUrl, clientId, null); |
101 | 102 | try { |
102 | 103 | client.connect(conOpt); |
103 | client.disconnect(); | |
104 | } catch (Exception _) {} | |
104 | client.disconnect(3000); | |
105 | } catch (Exception ignored) {} | |
105 | 106 | |
106 | 107 | client2 = new MqttClient(brokerUrl, clientId2, null); |
107 | 108 | try { |
108 | 109 | client2.connect(conOpt); |
109 | client2.disconnect(); | |
110 | } catch (Exception _) {} | |
110 | client2.disconnect(3000); | |
111 | } catch (Exception ignored) {} | |
111 | 112 | } |
112 | 113 | |
113 | 114 | private void setUpAmqp() throws IOException, TimeoutException { |
114 | connectionFactory = new ConnectionFactory(); | |
115 | connectionFactory.setHost(host); | |
116 | conn = connectionFactory.newConnection(); | |
115 | ConnectionFactory cf = new ConnectionFactory(); | |
116 | cf.setHost(host); | |
117 | conn = cf.newConnection(); | |
117 | 118 | ch = conn.createChannel(); |
118 | 119 | } |
119 | 120 | |
120 | 121 | private void tearDownAmqp() throws IOException { |
121 | conn.close(); | |
122 | if(conn.isOpen()) { | |
123 | conn.close(); | |
124 | } | |
122 | 125 | } |
123 | 126 | |
124 | 127 | private void setConOpts(MqttConnectOptions conOpts) { |
139 | 142 | mqttOut.flush(); |
140 | 143 | mqttIn.readMqttWireMessage(); |
141 | 144 | fail("Error expected if CONNECT is not first packet"); |
142 | } catch (IOException _) {} | |
145 | } catch (IOException ignored) {} | |
143 | 146 | } |
144 | 147 | |
145 | 148 | public void testInvalidUser() throws MqttException { |
149 | 152 | fail("Authentication failure expected"); |
150 | 153 | } catch (MqttException ex) { |
151 | 154 | Assert.assertEquals(MqttException.REASON_CODE_FAILED_AUTHENTICATION, ex.getReasonCode()); |
155 | } | |
156 | } | |
157 | ||
158 | // rabbitmq/rabbitmq-mqtt#37: QoS 1, clean session = false | |
159 | public void testQos1AndCleanSessionUnset() | |
160 | throws MqttException, IOException, TimeoutException, InterruptedException { | |
161 | testQueuePropertiesWithCleanSessionUnset("qos1-no-clean-session", 1, true, false); | |
162 | } | |
163 | ||
164 | protected void testQueuePropertiesWithCleanSessionSet(String cid, int qos, boolean durable, boolean autoDelete) | |
165 | throws IOException, MqttException, TimeoutException, InterruptedException { | |
166 | testQueuePropertiesWithCleanSession(true, cid, qos, durable, autoDelete); | |
167 | } | |
168 | ||
169 | protected void testQueuePropertiesWithCleanSessionUnset(String cid, int qos, boolean durable, boolean autoDelete) | |
170 | throws IOException, MqttException, TimeoutException, InterruptedException { | |
171 | testQueuePropertiesWithCleanSession(false, cid, qos, durable, autoDelete); | |
172 | } | |
173 | ||
174 | protected void testQueuePropertiesWithCleanSession(boolean cleanSession, String cid, int qos, | |
175 | boolean durable, boolean autoDelete) | |
176 | throws MqttException, IOException, TimeoutException, InterruptedException { | |
177 | MqttClient c = new MqttClient(brokerUrl, cid, null); | |
178 | MqttConnectOptions opts = new MyConnOpts(); | |
179 | opts.setCleanSession(cleanSession); | |
180 | c.connect(opts); | |
181 | ||
182 | setUpAmqp(); | |
183 | Channel tmpCh = conn.createChannel(); | |
184 | ||
185 | String q = "mqtt-subscription-" + cid + "qos" + String.valueOf(qos); | |
186 | ||
187 | c.subscribe(topic, qos); | |
188 | // there is no server-sent notification about subscription | |
189 | // success so we inject a delay | |
190 | Thread.sleep(testDelay); | |
191 | ||
192 | // ensure the queue is declared with the arguments we expect | |
193 | // e.g. mqtt-subscription-client-3aqos0 | |
194 | try { | |
195 | // first ensure the queue exists | |
196 | tmpCh.queueDeclarePassive(q); | |
197 | // then assert on properties | |
198 | Map<String, Object> args = new HashMap<String, Object>(); | |
199 | args.put("x-expires", 1800000); | |
200 | tmpCh.queueDeclare(q, durable, autoDelete, false, args); | |
201 | } finally { | |
202 | if(c.isConnected()) { | |
203 | c.disconnect(3000); | |
204 | } | |
205 | ||
206 | Channel tmpCh2 = conn.createChannel(); | |
207 | tmpCh2.queueDelete(q); | |
208 | tmpCh2.close(); | |
209 | tearDownAmqp(); | |
152 | 210 | } |
153 | 211 | } |
154 | 212 |
91 | 91 | try { |
92 | 92 | client.connect(conOpt); |
93 | 93 | client.disconnect(); |
94 | } catch (Exception _) { | |
94 | } catch (Exception ignored) { | |
95 | 95 | } |
96 | 96 | |
97 | 97 | client2 = new MqttClient(brokerUrl, clientId2, null); |
98 | 98 | try { |
99 | 99 | client2.connect(conOpt); |
100 | 100 | client2.disconnect(); |
101 | } catch (Exception _) { | |
101 | } catch (Exception ignored) { | |
102 | 102 | } |
103 | 103 | } |
104 | 104 |
0 | %% The contents of this file are subject to the Mozilla Public License | |
1 | %% Version 1.1 (the "License"); you may not use this file except in | |
2 | %% compliance with the License. You may obtain a copy of the License | |
3 | %% at http://www.mozilla.org/MPL/ | |
4 | %% | |
5 | %% Software distributed under the License is distributed on an "AS IS" | |
6 | %% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See | |
7 | %% the License for the specific language governing rights and | |
8 | %% limitations under the License. | |
9 | %% | |
10 | %% The Original Code is RabbitMQ. | |
11 | %% | |
12 | %% The Initial Developer of the Original Code is GoPivotal, Inc. | |
13 | %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. | |
14 | %% | |
15 | ||
16 | -module(rabbit_mqtt_util_tests). | |
17 | ||
18 | -include_lib("eunit/include/eunit.hrl"). | |
19 | ||
20 | all_test_() -> | |
21 | {setup, | |
22 | fun setup/0, | |
23 | [fun coerce_exchange/0, | |
24 | fun coerce_vhost/0, | |
25 | fun coerce_default_user/0, | |
26 | fun coerce_default_pass/0]}. | |
27 | ||
28 | setup() -> | |
29 | application:load(rabbitmq_mqtt). | |
30 | ||
31 | coerce_exchange() -> | |
32 | ?assertEqual(<<"amq.topic">>, rabbit_mqtt_util:env(exchange)). | |
33 | ||
34 | coerce_vhost() -> | |
35 | ?assertEqual(<<"/">>, rabbit_mqtt_util:env(vhost)). | |
36 | ||
37 | coerce_default_user() -> | |
38 | ?assertEqual(<<"guest_user">>, rabbit_mqtt_util:env(default_user)). | |
39 | ||
40 | coerce_default_pass() -> | |
41 | ?assertEqual(<<"guest_pass">>, rabbit_mqtt_util:env(default_pass)). |
0 | {application, rabbitmq_mqtt, | |
1 | [{description, "RabbitMQ MQTT Adapter"}, | |
2 | {vsn, "%%VSN%%"}, | |
3 | {modules, []}, | |
4 | {registered, []}, | |
5 | {mod, {rabbit_mqtt, []}}, | |
6 | {env, [{default_user, "guest_user"}, | |
7 | {default_pass, "guest_pass"}, | |
8 | {ssl_cert_login,false}, | |
9 | {allow_anonymous, true}, | |
10 | {vhost, "/"}, | |
11 | {exchange, "amq.topic"}, | |
12 | {subscription_ttl, 1800000}, % 30 min | |
13 | {prefetch, 10}, | |
14 | {ssl_listeners, []}, | |
15 | {tcp_listeners, [1883]}, | |
16 | {tcp_listen_options, [binary, | |
17 | {packet, raw}, | |
18 | {reuseaddr, true}, | |
19 | {backlog, 128}, | |
20 | {nodelay, true}]}]}, | |
21 | {applications, [kernel, stdlib, rabbit, amqp_client]}]}. |
0 | sudo: false | |
1 | language: erlang | |
2 | notifications: | |
3 | email: | |
4 | - alerts@rabbitmq.com | |
5 | addons: | |
6 | apt: | |
7 | packages: | |
8 | - xsltproc | |
9 | otp_release: | |
10 | - R16B03-1 | |
11 | - "17.5" | |
12 | - "18.0" | |
13 | install: | |
14 | - if [ ! -d "$HOME/rabbitmq-public-umbrella/.git" ]; then git clone https://github.com/rabbitmq/rabbitmq-public-umbrella.git $HOME/rabbitmq-public-umbrella; fi | |
15 | - cd $HOME/rabbitmq-public-umbrella | |
16 | - make co | |
17 | - make up | |
18 | before_script: | |
19 | - IFS="/" read -a PARTS <<< "$TRAVIS_REPO_SLUG" | |
20 | - export TEST_DIR=$HOME/rabbitmq-public-umbrella/${PARTS[1]} | |
21 | - rm -rf ${TEST_DIR} | |
22 | - cp -r ${TRAVIS_BUILD_DIR} ${TEST_DIR} | |
23 | - cd ${TEST_DIR} | |
24 | script: make test | |
25 | before_cache: | |
26 | - rm -rf ${TEST_DIR} | |
27 | - cd $HOME | |
28 | cache: | |
29 | apt: true | |
30 | directories: | |
31 | - $HOME/rabbitmq-public-umbrella |
0 | UPSTREAM_GIT=https://github.com/pika/pika.git | |
1 | REVISION=0.9.14 | |
2 | ||
3 | LIB_DIR=pika | |
4 | CHECKOUT_DIR=pika-git | |
5 | ||
6 | TARGETS=$(LIB_DIR) | |
7 | ||
8 | all: $(TARGETS) | |
9 | ||
10 | clean: | |
11 | rm -rf $(LIB_DIR) | |
12 | ||
13 | distclean: clean | |
14 | rm -rf $(CHECKOUT_DIR) | |
15 | ||
16 | $(LIB_DIR) : $(CHECKOUT_DIR) | |
17 | rm -rf $@ | |
18 | cp -R $< $@ | |
19 | ||
20 | $(CHECKOUT_DIR): | |
21 | git clone $(UPSTREAM_GIT) $@ | |
22 | (cd $@ && git checkout $(REVISION)) || rm -rf $@ | |
23 | ||
24 | echo-revision: | |
25 | @echo $(REVISION) | |
26 |
33 | 33 | -define(HEADER_PREFETCH_COUNT, "prefetch-count"). |
34 | 34 | -define(HEADER_PRIORITY, "priority"). |
35 | 35 | -define(HEADER_RECEIPT, "receipt"). |
36 | -define(HEADER_REDELIVERED, "redelivered"). | |
36 | 37 | -define(HEADER_REPLY_TO, "reply-to"). |
37 | 38 | -define(HEADER_SERVER, "server"). |
38 | 39 | -define(HEADER_SESSION, "session"). |
42 | 43 | -define(HEADER_TYPE, "type"). |
43 | 44 | -define(HEADER_USER_ID, "user-id"). |
44 | 45 | -define(HEADER_VERSION, "version"). |
46 | -define(HEADER_X_DEAD_LETTER_EXCHANGE, "x-dead-letter-exchange"). | |
47 | -define(HEADER_X_DEAD_LETTER_ROUTING_KEY, "x-dead-letter-routing-key"). | |
48 | -define(HEADER_X_EXPIRES, "x-expires"). | |
49 | -define(HEADER_X_MAX_LENGTH, "x-max-length"). | |
50 | -define(HEADER_X_MAX_LENGTH_BYTES, "x-max-length-bytes"). | |
51 | -define(HEADER_X_MAX_PRIORITY, "x-max-priority"). | |
52 | -define(HEADER_X_MESSAGE_TTL, "x-message-ttl"). | |
53 | -define(HEADER_X_QUEUE_NAME, "x-queue-name"). | |
45 | 54 | |
46 | 55 | -define(MESSAGE_ID_SEPARATOR, "@@"). |
47 | 56 | |
48 | 57 | -define(HEADERS_NOT_ON_SEND, [?HEADER_MESSAGE_ID]). |
49 | 58 | |
50 | 59 | -define(TEMP_QUEUE_ID_PREFIX, "/temp-queue/"). |
60 | ||
61 | -define(HEADER_ARGUMENTS, [ | |
62 | ?HEADER_X_DEAD_LETTER_EXCHANGE, | |
63 | ?HEADER_X_DEAD_LETTER_ROUTING_KEY, | |
64 | ?HEADER_X_EXPIRES, | |
65 | ?HEADER_X_MAX_LENGTH, | |
66 | ?HEADER_X_MAX_LENGTH_BYTES, | |
67 | ?HEADER_X_MAX_PRIORITY, | |
68 | ?HEADER_X_MESSAGE_TTL | |
69 | ]). |
0 | 0 | RELEASABLE:=true |
1 | 1 | DEPS:=rabbitmq-server rabbitmq-erlang-client rabbitmq-test |
2 | #STANDALONE_TEST_COMMANDS:=eunit:test([rabbit_stomp_test_util,rabbit_stomp_test_frame],[verbose]) | |
2 | STANDALONE_TEST_COMMANDS:=eunit:test([rabbit_stomp_test_util,rabbit_stomp_test_frame],[verbose]) | |
3 | 3 | WITH_BROKER_TEST_SCRIPTS:=$(PACKAGE_DIR)/test/src/test.py $(PACKAGE_DIR)/test/src/test_connect_options.py $(PACKAGE_DIR)/test/src/test_ssl.py |
4 | #WITH_BROKER_TEST_COMMANDS:=rabbit_stomp_test:all_tests() rabbit_stomp_amqqueue_test:all_tests() | |
4 | WITH_BROKER_TEST_COMMANDS:=rabbit_stomp_test:all_tests() rabbit_stomp_amqqueue_test:all_tests() | |
5 | 5 | WITH_BROKER_TEST_CONFIG:=$(PACKAGE_DIR)/test/ebin/test |
6 | 6 | |
7 | 7 | define package_rules |
13 | 13 | sed -e "s|%%CERTS_DIR%%|$(abspath $(PACKAGE_DIR))/test/certs|g" < $(PACKAGE_DIR)/test/src/test.config > $(PACKAGE_DIR)/test/ebin/test.config |
14 | 14 | $(MAKE) -C $(PACKAGE_DIR)/../rabbitmq-test/certs all PASSWORD=test DIR=$(abspath $(PACKAGE_DIR))/test/certs |
15 | 15 | $(MAKE) -C $(PACKAGE_DIR)/deps/stomppy |
16 | $(MAKE) -C $(PACKAGE_DIR)/deps/pika | |
16 | 17 | |
17 | 18 | $(PACKAGE_DIR)+clean:: |
18 | 19 | rm -rf $(PACKAGE_DIR)/test/certs |
19 | 20 | |
20 | 21 | $(PACKAGE_DIR)+clean-with-deps:: |
21 | 22 | $(MAKE) -C $(PACKAGE_DIR)/deps/stomppy distclean |
23 | $(MAKE) -C $(PACKAGE_DIR)/deps/pika distclean | |
22 | 24 | |
23 | 25 | endef |
26 | 26 | boolean_header/2, boolean_header/3, |
27 | 27 | integer_header/2, integer_header/3, |
28 | 28 | binary_header/2, binary_header/3]). |
29 | -export([serialize/1]). | |
29 | -export([serialize/1, serialize/2]). | |
30 | 30 | |
31 | 31 | initial_state() -> none. |
32 | 32 | |
221 | 221 | |
222 | 222 | binary_header(F, K, D) -> default_value(binary_header(F, K), D). |
223 | 223 | |
224 | serialize(Frame) -> | |
225 | serialize(Frame, true). | |
226 | ||
227 | %% second argument controls whether a trailing linefeed | |
228 | %% character should be added, see rabbitmq/rabbitmq-stomp#39. | |
229 | serialize(Frame, true) -> | |
230 | serialize(Frame, false) ++ [?LF]; | |
224 | 231 | serialize(#stomp_frame{command = Command, |
225 | 232 | headers = Headers, |
226 | body_iolist = BodyFragments}) -> | |
233 | body_iolist = BodyFragments}, false) -> | |
227 | 234 | Len = iolist_size(BodyFragments), |
228 | 235 | [Command, ?LF, |
229 | 236 | lists:map(fun serialize_header/1, |
232 | 239 | Len > 0 -> [?HEADER_CONTENT_LENGTH ++ ":", integer_to_list(Len), ?LF]; |
233 | 240 | true -> [] |
234 | 241 | end, |
235 | ?LF, BodyFragments, 0, ?LF]. | |
242 | ?LF, BodyFragments, 0]. | |
236 | 243 | |
237 | 244 | serialize_header({K, V}) when is_integer(V) -> hdr(escape(K), integer_to_list(V)); |
245 | serialize_header({K, V}) when is_boolean(V) -> hdr(escape(K), boolean_to_list(V)); | |
238 | 246 | serialize_header({K, V}) when is_list(V) -> hdr(escape(K), escape(V)). |
247 | ||
248 | boolean_to_list(true) -> "true"; | |
249 | boolean_to_list(_) -> "false". | |
239 | 250 | |
240 | 251 | hdr(K, V) -> [K, ?COLON, V, ?LF]. |
241 | 252 |
29 | 29 | -record(state, {session_id, channel, connection, subscriptions, |
30 | 30 | version, start_heartbeat_fun, pending_receipts, |
31 | 31 | config, route_state, reply_queues, frame_transformer, |
32 | adapter_info, send_fun, ssl_login_name, peer_addr}). | |
32 | adapter_info, send_fun, ssl_login_name, peer_addr, | |
33 | %% see rabbitmq/rabbitmq-stomp#39 | |
34 | trailing_lf}). | |
33 | 35 | |
34 | 36 | -record(subscription, {dest_hdr, ack_mode, multi_ack, description}). |
35 | 37 | |
70 | 72 | config = Configuration, |
71 | 73 | route_state = rabbit_routing_util:init_state(), |
72 | 74 | reply_queues = dict:new(), |
73 | frame_transformer = undefined}, | |
75 | frame_transformer = undefined, | |
76 | trailing_lf = rabbit_misc:get_env(rabbitmq_stomp, trailing_lf, true)}, | |
74 | 77 | hibernate, |
75 | 78 | {backoff, 1000, 1000, 10000} |
76 | 79 | }. |
448 | 451 | ?HEADER_PERSISTENT, false) of |
449 | 452 | true -> |
450 | 453 | {ok, Id} = rabbit_stomp_frame:header(Frame, ?HEADER_ID), |
451 | QName = rabbit_stomp_util:subscription_queue_name(Name, Id), | |
454 | QName = rabbit_stomp_util:subscription_queue_name(Name, Id, Frame), | |
452 | 455 | amqp_channel:call(Channel, |
453 | 456 | #'queue.delete'{queue = list_to_binary(QName), |
454 | 457 | nowait = false}), |
591 | 594 | _ -> amqp_channel:call( |
592 | 595 | Channel, #'basic.qos'{prefetch_count = Prefetch}) |
593 | 596 | end, |
594 | ExchangeAndKey = rabbit_routing_util:parse_routing(Destination), | |
595 | try | |
596 | amqp_channel:subscribe(Channel, | |
597 | #'basic.consume'{ | |
598 | queue = Queue, | |
599 | consumer_tag = ConsumerTag, | |
600 | no_local = false, | |
601 | no_ack = (AckMode == auto), | |
602 | exclusive = false, | |
603 | arguments = []}, | |
604 | self()), | |
605 | ok = rabbit_routing_util:ensure_binding( | |
606 | Queue, ExchangeAndKey, Channel) | |
607 | catch exit:Err -> | |
608 | %% it's safe to delete this queue, it was server-named | |
609 | %% and declared by us | |
610 | case Destination of | |
611 | {exchange, _} -> | |
612 | ok = maybe_clean_up_queue(Queue, State); | |
613 | {topic, _} -> | |
614 | ok = maybe_clean_up_queue(Queue, State); | |
615 | _ -> | |
616 | ok | |
597 | case dict:find(ConsumerTag, Subs) of | |
598 | {ok, _} -> | |
599 | Message = "Duplicated subscription identifier", | |
600 | Detail = "A subscription identified by '~s' alredy exists.", | |
601 | error(Message, Detail, [ConsumerTag], State), | |
602 | send_error(Message, Detail, [ConsumerTag], State), | |
603 | {stop, normal, close_connection(State)}; | |
604 | error -> | |
605 | ExchangeAndKey = | |
606 | rabbit_routing_util:parse_routing(Destination), | |
607 | try | |
608 | amqp_channel:subscribe(Channel, | |
609 | #'basic.consume'{ | |
610 | queue = Queue, | |
611 | consumer_tag = ConsumerTag, | |
612 | no_local = false, | |
613 | no_ack = (AckMode == auto), | |
614 | exclusive = false, | |
615 | arguments = []}, | |
616 | self()), | |
617 | ok = rabbit_routing_util:ensure_binding( | |
618 | Queue, ExchangeAndKey, Channel) | |
619 | catch exit:Err -> | |
620 | %% it's safe to delete this queue, it | |
621 | %% was server-named and declared by us | |
622 | case Destination of | |
623 | {exchange, _} -> | |
624 | ok = maybe_clean_up_queue(Queue, State); | |
625 | {topic, _} -> | |
626 | ok = maybe_clean_up_queue(Queue, State); | |
627 | _ -> | |
628 | ok | |
629 | end, | |
630 | exit(Err) | |
617 | 631 | end, |
618 | exit(Err) | |
619 | end, | |
620 | ok(State#state{subscriptions = | |
621 | dict:store( | |
622 | ConsumerTag, | |
623 | #subscription{dest_hdr = DestHdr, | |
624 | ack_mode = AckMode, | |
625 | multi_ack = IsMulti, | |
626 | description = Description}, | |
627 | Subs), | |
628 | route_state = RouteState1}); | |
632 | ok(State#state{subscriptions = | |
633 | dict:store( | |
634 | ConsumerTag, | |
635 | #subscription{dest_hdr = DestHdr, | |
636 | ack_mode = AckMode, | |
637 | multi_ack = IsMulti, | |
638 | description = Description}, | |
639 | Subs), | |
640 | route_state = RouteState1}) | |
641 | end; | |
629 | 642 | {error, _} = Err -> |
630 | 643 | Err |
631 | 644 | end. |
972 | 985 | ensure_endpoint(_Direction, {queue, []}, _Frame, _Channel, _State) -> |
973 | 986 | {error, {invalid_destination, "Destination cannot be blank"}}; |
974 | 987 | |
975 | ensure_endpoint(source, EndPoint, Frame, Channel, State) -> | |
988 | ensure_endpoint(source, EndPoint, {_, _, Headers, _} = Frame, Channel, State) -> | |
976 | 989 | Params = |
977 | case rabbit_stomp_frame:boolean_header( | |
978 | Frame, ?HEADER_PERSISTENT, false) of | |
979 | true -> | |
980 | [{subscription_queue_name_gen, | |
981 | fun () -> | |
982 | {ok, Id} = rabbit_stomp_frame:header(Frame, ?HEADER_ID), | |
983 | {_, Name} = rabbit_routing_util:parse_routing(EndPoint), | |
984 | list_to_binary( | |
985 | rabbit_stomp_util:subscription_queue_name(Name, | |
986 | Id)) | |
987 | end}, | |
988 | {durable, true}]; | |
989 | false -> | |
990 | [{subscription_queue_name_gen, | |
991 | fun () -> | |
992 | Id = rabbit_guid:gen_secure(), | |
993 | {_, Name} = rabbit_routing_util:parse_routing(EndPoint), | |
994 | list_to_binary( | |
995 | rabbit_stomp_util:subscription_queue_name(Name, | |
996 | Id)) | |
997 | end}, | |
998 | {durable, false}] | |
999 | end, | |
1000 | rabbit_routing_util:ensure_endpoint(source, Channel, EndPoint, Params, State); | |
1001 | ||
1002 | ensure_endpoint(Direction, Endpoint, _Frame, Channel, State) -> | |
1003 | rabbit_routing_util:ensure_endpoint(Direction, Channel, Endpoint, State). | |
990 | [{subscription_queue_name_gen, | |
991 | fun () -> | |
992 | Id = build_subscription_id(Frame), | |
993 | {_, Name} = rabbit_routing_util:parse_routing(EndPoint), | |
994 | list_to_binary(rabbit_stomp_util:subscription_queue_name(Name, Id, Frame)) | |
995 | end}, | |
996 | {durable, rabbit_stomp_frame:boolean_header(Frame, ?HEADER_PERSISTENT, false)}], | |
997 | Arguments = rabbit_stomp_util:build_arguments(Headers), | |
998 | rabbit_routing_util:ensure_endpoint(source, Channel, EndPoint, | |
999 | [Arguments | Params], State); | |
1000 | ||
1001 | ensure_endpoint(Direction, Endpoint, {_, _, Headers, _}, Channel, State) -> | |
1002 | Arguments = rabbit_stomp_util:build_arguments(Headers), | |
1003 | rabbit_routing_util:ensure_endpoint(Direction, Channel, Endpoint, | |
1004 | [Arguments], State). | |
1005 | ||
1006 | build_subscription_id(Frame) -> | |
1007 | case rabbit_stomp_frame:boolean_header(Frame, ?HEADER_PERSISTENT, false) of | |
1008 | true -> | |
1009 | {ok, Id} = rabbit_stomp_frame:header(Frame, ?HEADER_ID), | |
1010 | Id; | |
1011 | false -> | |
1012 | rabbit_guid:gen_secure() | |
1013 | end. | |
1004 | 1014 | |
1005 | 1015 | %%---------------------------------------------------------------------------- |
1006 | 1016 | %% Success/error handling |
1050 | 1060 | body_iolist = BodyFragments}, |
1051 | 1061 | State). |
1052 | 1062 | |
1053 | send_frame(Frame, State = #state{send_fun = SendFun}) -> | |
1054 | SendFun(async, rabbit_stomp_frame:serialize(Frame)), | |
1063 | send_frame(Frame, State = #state{send_fun = SendFun, | |
1064 | trailing_lf = TrailingLF}) -> | |
1065 | SendFun(async, rabbit_stomp_frame:serialize(Frame, TrailingLF)), | |
1055 | 1066 | State. |
1056 | 1067 | |
1057 | 1068 | send_error_frame(Message, ExtraHeaders, Format, Args, State) -> |
15 | 15 | |
16 | 16 | -module(rabbit_stomp_util). |
17 | 17 | |
18 | -export([parse_message_id/1, subscription_queue_name/2]). | |
18 | -export([parse_message_id/1, subscription_queue_name/3]). | |
19 | 19 | -export([longstr_field/2]). |
20 | 20 | -export([ack_mode/1, consumer_tag_reply_to/1, consumer_tag/1, message_headers/1, |
21 | 21 | headers_post_process/1, headers/5, message_properties/1, tag_to_id/1, |
22 | msg_header_name/1, ack_header_name/1]). | |
22 | msg_header_name/1, ack_header_name/1, build_arguments/1]). | |
23 | 23 | -export([negotiate_version/2]). |
24 | 24 | -export([trim_headers/1]). |
25 | 25 | |
121 | 121 | #'basic.deliver'{consumer_tag = ConsumerTag, |
122 | 122 | delivery_tag = DeliveryTag, |
123 | 123 | exchange = ExchangeBin, |
124 | routing_key = RoutingKeyBin}) -> | |
124 | routing_key = RoutingKeyBin, | |
125 | redelivered = Redelivered}) -> | |
125 | 126 | case tag_to_id(ConsumerTag) of |
126 | 127 | {ok, {internal, Id}} -> [{?HEADER_SUBSCRIPTION, Id}]; |
127 | 128 | _ -> [] |
130 | 131 | format_destination(binary_to_list(ExchangeBin), |
131 | 132 | binary_to_list(RoutingKeyBin))}, |
132 | 133 | {?HEADER_MESSAGE_ID, |
133 | create_message_id(ConsumerTag, SessionId, DeliveryTag)}] ++ | |
134 | create_message_id(ConsumerTag, SessionId, DeliveryTag)}, | |
135 | {?HEADER_REDELIVERED, Redelivered}] ++ | |
134 | 136 | case AckMode == client andalso Version == "1.2" of |
135 | 137 | true -> [{?HEADER_ACK, |
136 | 138 | create_message_id(ConsumerTag, SessionId, DeliveryTag)}]; |
259 | 261 | msg_header_name("1.1") -> ?HEADER_MESSAGE_ID; |
260 | 262 | msg_header_name("1.0") -> ?HEADER_MESSAGE_ID. |
261 | 263 | |
264 | build_arguments(Headers) -> | |
265 | Arguments = | |
266 | lists:foldl(fun({K, V}, Acc) -> | |
267 | case lists:member(K, ?HEADER_ARGUMENTS) of | |
268 | true -> [build_argument(K, V) | Acc]; | |
269 | false -> Acc | |
270 | end | |
271 | end, | |
272 | [], | |
273 | Headers), | |
274 | {arguments, Arguments}. | |
275 | ||
276 | %% build the actual value thru pattern matching | |
277 | build_argument(?HEADER_X_DEAD_LETTER_EXCHANGE, Val) -> | |
278 | {list_to_binary(?HEADER_X_DEAD_LETTER_EXCHANGE), longstr, | |
279 | list_to_binary(string:strip(Val))}; | |
280 | build_argument(?HEADER_X_DEAD_LETTER_ROUTING_KEY, Val) -> | |
281 | {list_to_binary(?HEADER_X_DEAD_LETTER_ROUTING_KEY), longstr, | |
282 | list_to_binary(string:strip(Val))}; | |
283 | build_argument(?HEADER_X_EXPIRES, Val) -> | |
284 | {list_to_binary(?HEADER_X_EXPIRES), long, | |
285 | list_to_integer(string:strip(Val))}; | |
286 | build_argument(?HEADER_X_MAX_LENGTH, Val) -> | |
287 | {list_to_binary(?HEADER_X_MAX_LENGTH), long, | |
288 | list_to_integer(string:strip(Val))}; | |
289 | build_argument(?HEADER_X_MAX_LENGTH_BYTES, Val) -> | |
290 | {list_to_binary(?HEADER_X_MAX_LENGTH_BYTES), long, | |
291 | list_to_integer(string:strip(Val))}; | |
292 | build_argument(?HEADER_X_MAX_PRIORITY, Val) -> | |
293 | {list_to_binary(?HEADER_X_MAX_PRIORITY), long, | |
294 | list_to_integer(string:strip(Val))}; | |
295 | build_argument(?HEADER_X_MESSAGE_TTL, Val) -> | |
296 | {list_to_binary(?HEADER_X_MESSAGE_TTL), long, | |
297 | list_to_integer(string:strip(Val))}. | |
298 | ||
262 | 299 | %%-------------------------------------------------------------------- |
263 | 300 | %% Destination Formatting |
264 | 301 | %%-------------------------------------------------------------------- |
276 | 313 | %% Destination Parsing |
277 | 314 | %%-------------------------------------------------------------------- |
278 | 315 | |
279 | subscription_queue_name(Destination, SubscriptionId) -> | |
280 | %% We need a queue name that a) can be derived from the | |
281 | %% Destination and SubscriptionId, and b) meets the constraints on | |
282 | %% AMQP queue names. It doesn't need to be secure; we use md5 here | |
283 | %% simply as a convenient means to bound the length. | |
284 | rabbit_guid:string( | |
285 | erlang:md5(term_to_binary({Destination, SubscriptionId})), | |
286 | "stomp-subscription"). | |
316 | subscription_queue_name(Destination, SubscriptionId, Frame) -> | |
317 | case rabbit_stomp_frame:header(Frame, ?HEADER_X_QUEUE_NAME, undefined) of | |
318 | undefined -> | |
319 | %% We need a queue name that a) can be derived from the | |
320 | %% Destination and SubscriptionId, and b) meets the constraints on | |
321 | %% AMQP queue names. It doesn't need to be secure; we use md5 here | |
322 | %% simply as a convenient means to bound the length. | |
323 | rabbit_guid:string( | |
324 | erlang:md5(term_to_binary({Destination, SubscriptionId})), | |
325 | "stomp-subscription"); | |
326 | Name -> | |
327 | Name | |
328 | end. | |
287 | 329 | |
288 | 330 | %% ---- Helpers ---- |
289 | 331 |
15 | 15 | {packet, raw}, |
16 | 16 | {reuseaddr, true}, |
17 | 17 | {backlog, 128}, |
18 | {nodelay, true}]}]}, | |
18 | {nodelay, true}]}, | |
19 | %% see rabbitmq/rabbitmq-stomp#39 | |
20 | {trailing_lf, true}]}, | |
19 | 21 | {applications, [kernel, stdlib, rabbit, amqp_client]}]}. |
215 | 215 | |
216 | 216 | self.conn.send_frame("NACK", {self.ack_id_header: message_id, "requeue": False}) |
217 | 217 | self.assertFalse(self.listener.await(4), "Received message after NACK with requeue = False") |
218 | ||
219 | class TestAck11(TestAck): | |
220 | ||
221 | def create_connection_obj(self, version='1.1', vhost='/', heartbeats=(0, 0)): | |
222 | conn = stomp.StompConnection11(vhost=vhost, | |
223 | heartbeats=heartbeats) | |
224 | self.ack_id_source_header = 'message-id' | |
225 | self.ack_id_header = 'message-id' | |
226 | return conn | |
227 | ||
228 | def test_version(self): | |
229 | self.assertEquals('1.1', self.conn.version) | |
230 | ||
231 | class TestAck12(TestAck): | |
232 | ||
233 | def create_connection_obj(self, version='1.2', vhost='/', heartbeats=(0, 0)): | |
234 | conn = stomp.StompConnection12(vhost=vhost, | |
235 | heartbeats=heartbeats) | |
236 | self.ack_id_source_header = 'ack' | |
237 | self.ack_id_header = 'id' | |
238 | return conn | |
239 | ||
240 | def test_version(self): | |
241 | self.assertEquals('1.2', self.conn.version) |
498 | 498 | self.__subscribe(destination, conn2, "other.id") |
499 | 499 | |
500 | 500 | for l in [self.listener, listener2]: |
501 | self.assertTrue(l.await(20)) | |
502 | self.assertEquals(100, len(l.messages)) | |
501 | self.assertTrue(l.await(15)) | |
502 | self.assertTrue(len(l.messages) >= 90) | |
503 | self.assertTrue(len(l.messages) <= 100) | |
503 | 504 | |
504 | 505 | finally: |
505 | 506 | conn2.disconnect() |
1 | 1 | import stomp |
2 | 2 | import base |
3 | 3 | import time |
4 | ||
5 | class TestErrorsAndCloseConnection(base.BaseTest): | |
6 | def __test_duplicate_consumer_tag_with_headers(self, destination, headers): | |
7 | self.subscribe_dest(self.conn, destination, None, | |
8 | headers = headers) | |
9 | ||
10 | self.subscribe_dest(self.conn, destination, None, | |
11 | headers = headers) | |
12 | ||
13 | self.assertTrue(self.listener.await()) | |
14 | ||
15 | self.assertEquals(1, len(self.listener.errors)) | |
16 | errorReceived = self.listener.errors[0] | |
17 | self.assertEquals("Duplicated subscription identifier", errorReceived['headers']['message']) | |
18 | self.assertEquals("A subscription identified by 'T_1' alredy exists.", errorReceived['message']) | |
19 | time.sleep(2) | |
20 | self.assertFalse(self.conn.is_connected()) | |
21 | ||
22 | ||
23 | def test_duplicate_consumer_tag_with_transient_destination(self): | |
24 | destination = "/exchange/amq.direct/duplicate-consumer-tag-test1" | |
25 | self.__test_duplicate_consumer_tag_with_headers(destination, {'id': 1}) | |
26 | ||
27 | def test_duplicate_consumer_tag_with_durable_destination(self): | |
28 | destination = "/queue/duplicate-consumer-tag-test2" | |
29 | self.__test_duplicate_consumer_tag_with_headers(destination, {'id': 1, | |
30 | 'persistent': True}) | |
31 | ||
4 | 32 | |
5 | 33 | class TestErrors(base.BaseTest): |
6 | 34 | |
63 | 91 | self.assertEquals("'" + content + "' is not a valid " + |
64 | 92 | dtype + " destination\n", |
65 | 93 | err['message']) |
66 |
83 | 83 | resp = ('MESSAGE\n' |
84 | 84 | 'destination:/exchange/amq.fanout\n' |
85 | 85 | 'message-id:Q_/exchange/amq.fanout@@session-(.*)\n' |
86 | 'redelivered:false\n' | |
86 | 87 | 'content-type:text/plain\n' |
87 | 88 | 'content-length:6\n' |
88 | 89 | '\n' |
101 | 102 | resp = ('MESSAGE\n' |
102 | 103 | 'destination:/exchange/amq.fanout\n' |
103 | 104 | 'message-id:Q_/exchange/amq.fanout@@session-(.*)\n' |
105 | 'redelivered:false\n' | |
104 | 106 | 'content-length:6\n' |
105 | 107 | '\n' |
106 | 108 | 'hello\n\0') |
120 | 122 | resp = ('MESSAGE\n' |
121 | 123 | 'destination:/exchange/amq.fanout\n' |
122 | 124 | 'message-id:Q_/exchange/amq.fanout@@session-(.*)\n' |
125 | 'redelivered:false\n' | |
123 | 126 | 'content-length:'+str(len(msg))+'\n' |
124 | 127 | '\n' |
125 | 128 | + msg + '\0') |
138 | 141 | resp = ('MESSAGE\n' |
139 | 142 | 'destination:/exchange/amq.fanout\n' |
140 | 143 | 'message-id:Q_/exchange/amq.fanout@@session-(.*)\n' |
144 | 'redelivered:false\n' | |
141 | 145 | 'content-type:text/plain\n' |
142 | 146 | 'content-length:6\n' |
143 | 147 | '\n' |
187 | 191 | 'subscription:(.*)\n' |
188 | 192 | 'destination:/topic/da9d4779\n' |
189 | 193 | 'message-id:(.*)\n' |
194 | 'redelivered:false\n' | |
190 | 195 | 'content-type:text/plain\n' |
191 | 196 | 'content-length:8\n' |
192 | 197 | '\n' |
226 | 231 | 'subscription:(.*)\n' # 14 + subscription |
227 | 232 | +resp_dest+ # 44 |
228 | 233 | 'message-id:(.*)\n' # 12 + message-id |
234 | 'redelivered:false\n' # 18 | |
229 | 235 | 'content-type:text/plain\n' # 24 |
230 | 236 | 'content-length:%i\n' # 16 + 4==len('1024') |
231 | 237 | '\n' # 1 |
232 | 238 | '(.*)$' # prefix of body+null (potentially) |
233 | 239 | % len(message) ) |
234 | headlen = 8 + 24 + 14 + (3) + 44 + 12 + (48) + 16 + (4) + 1 + (1) | |
240 | headlen = 8 + 24 + 14 + (3) + 44 + 12 + 18 + (48) + 16 + (4) + 1 + (1) | |
235 | 241 | |
236 | 242 | headbuf = self.recv_atleast(headlen) |
237 | 243 | self.assertFalse(len(headbuf) == 0) |
285 | 291 | 'subscription:(.*)\n' # 14 + subscription |
286 | 292 | +resp_dest+ # 44 |
287 | 293 | 'message-id:(.*)\n' # 12 + message-id |
294 | 'redelivered:false\n' # 18 | |
288 | 295 | 'content-type:text/plain\n' # 24 |
289 | 296 | 'content-length:%i\n' # 16 + 4==len('1024') |
290 | 297 | '\n' # 1 |
291 | 298 | '(.*)$' # prefix of body+null (potentially) |
292 | 299 | % len(message) ) |
293 | headlen = 8 + 24 + 14 + (3) + 44 + 12 + (48) + 16 + (4) + 1 + (1) | |
300 | headlen = 8 + 24 + 14 + (3) + 44 + 12 + 18 + (48) + 16 + (4) + 1 + (1) | |
294 | 301 | |
295 | 302 | headbuf = self.recv_atleast(headlen) |
296 | 303 | self.assertFalse(len(headbuf) == 0) |
0 | import unittest | |
1 | import stomp | |
2 | import pika | |
3 | import base | |
4 | import time | |
5 | ||
6 | class TestQueueProperties(base.BaseTest): | |
7 | ||
8 | def test_subscribe(self): | |
9 | destination = "/queue/queue-properties-subscribe-test" | |
10 | ||
11 | # subscribe | |
12 | self.subscribe_dest(self.conn, destination, None, | |
13 | headers={ | |
14 | 'x-message-ttl': 60000, | |
15 | 'x-expires': 70000, | |
16 | 'x-max-length': 10, | |
17 | 'x-max-length-bytes': 20000, | |
18 | 'x-dead-letter-exchange': 'dead-letter-exchange', | |
19 | 'x-dead-letter-routing-key': 'dead-letter-routing-key', | |
20 | 'x-max-priority': 6, | |
21 | }) | |
22 | ||
23 | # now try to declare the queue using pika | |
24 | # if the properties are the same we should | |
25 | # not get any error | |
26 | connection = pika.BlockingConnection(pika.ConnectionParameters( | |
27 | host='localhost')) | |
28 | channel = connection.channel() | |
29 | channel.queue_declare(queue='queue-properties-subscribe-test', | |
30 | durable=True, | |
31 | arguments={ | |
32 | 'x-message-ttl': 60000, | |
33 | 'x-expires': 70000, | |
34 | 'x-max-length': 10, | |
35 | 'x-max-length-bytes': 20000, | |
36 | 'x-dead-letter-exchange': 'dead-letter-exchange', | |
37 | 'x-dead-letter-routing-key': 'dead-letter-routing-key', | |
38 | 'x-max-priority': 6, | |
39 | }) | |
40 | ||
41 | self.conn.disconnect() | |
42 | connection.close() | |
43 | ||
44 | def test_send(self): | |
45 | destination = "/queue/queue-properties-send-test" | |
46 | ||
47 | # send | |
48 | self.conn.send(destination, "test1", | |
49 | headers={ | |
50 | 'x-message-ttl': 60000, | |
51 | 'x-expires': 70000, | |
52 | 'x-max-length': 10, | |
53 | 'x-max-length-bytes': 20000, | |
54 | 'x-dead-letter-exchange': 'dead-letter-exchange', | |
55 | 'x-dead-letter-routing-key': 'dead-letter-routing-key', | |
56 | 'x-max-priority': 6, | |
57 | }) | |
58 | ||
59 | # now try to declare the queue using pika | |
60 | # if the properties are the same we should | |
61 | # not get any error | |
62 | connection = pika.BlockingConnection(pika.ConnectionParameters( | |
63 | host='localhost')) | |
64 | channel = connection.channel() | |
65 | channel.queue_declare(queue='queue-properties-send-test', | |
66 | durable=True, | |
67 | arguments={ | |
68 | 'x-message-ttl': 60000, | |
69 | 'x-expires': 70000, | |
70 | 'x-max-length': 10, | |
71 | 'x-max-length-bytes': 20000, | |
72 | 'x-dead-letter-exchange': 'dead-letter-exchange', | |
73 | 'x-dead-letter-routing-key': 'dead-letter-routing-key', | |
74 | 'x-max-priority': 6, | |
75 | }) | |
76 | ||
77 | self.conn.disconnect() | |
78 | connection.close() |
72 | 72 | case rabbit_stomp_frame:parse(Payload, FrameState) of |
73 | 73 | {ok, Frame, <<>>} -> |
74 | 74 | recv({Sock, lists:reverse([Frame | FramesRev])}); |
75 | {ok, Frame, <<"\n">>} -> | |
76 | recv({Sock, lists:reverse([Frame | FramesRev])}); | |
75 | 77 | {ok, Frame, Rest} -> |
76 | 78 | parse(Rest, {Sock, [Frame | FramesRev]}, |
77 | 79 | rabbit_stomp_frame:initial_state(), Length); |
40 | 40 | rabbit_stomp_client:send( |
41 | 41 | Client, "LOL", [{"", ""}]) |
42 | 42 | end, |
43 | lists:seq(1, 1000)), | |
43 | lists:seq(1, 100)), | |
44 | 44 | timer:sleep(5000), |
45 | 45 | N = count_connections(), |
46 | 46 | ok. |
38 | 38 | {Key, Value} <- Headers], |
39 | 39 | #stomp_frame{body_iolist = Body} = Frame, |
40 | 40 | ?assertEqual(<<"Body Content">>, iolist_to_binary(Body)). |
41 | ||
42 | parse_simple_frame_with_null_test() -> | |
43 | Headers = [{"header1", "value1"}, {"header2", "value2"}, | |
44 | {?HEADER_CONTENT_LENGTH, "12"}], | |
45 | Content = frame_string("COMMAND", | |
46 | Headers, | |
47 | "Body\0Content"), | |
48 | {"COMMAND", Frame, _State} = parse_complete(Content), | |
49 | [?assertEqual({ok, Value}, | |
50 | rabbit_stomp_frame:header(Frame, Key)) || | |
51 | {Key, Value} <- Headers], | |
52 | #stomp_frame{body_iolist = Body} = Frame, | |
53 | ?assertEqual(<<"Body\0Content">>, iolist_to_binary(Body)). | |
54 | ||
55 | parse_large_content_frame_with_nulls_test() -> | |
56 | BodyContent = string:copies("012345678\0", 1024), | |
57 | Headers = [{"header1", "value1"}, {"header2", "value2"}, | |
58 | {?HEADER_CONTENT_LENGTH, integer_to_list(string:len(BodyContent))}], | |
59 | Content = frame_string("COMMAND", | |
60 | Headers, | |
61 | BodyContent), | |
62 | {"COMMAND", Frame, _State} = parse_complete(Content), | |
63 | [?assertEqual({ok, Value}, | |
64 | rabbit_stomp_frame:header(Frame, Key)) || | |
65 | {Key, Value} <- Headers], | |
66 | #stomp_frame{body_iolist = Body} = Frame, | |
67 | ?assertEqual(list_to_binary(BodyContent), iolist_to_binary(Body)). | |
68 | 41 | |
69 | 42 | parse_command_only_test() -> |
70 | 43 | {ok, #stomp_frame{command = "COMMAND"}, _Rest} = parse("COMMAND\n\n\0"). |
165 | 138 | headers = [{"header", "val:ue"}], |
166 | 139 | body_iolist = []}). |
167 | 140 | |
168 | headers_escaping_roundtrip_test() -> | |
169 | Content = "COMMAND\nhead\\r\\c\\ner:\\c\\n\\r\\\\\n\n\0", | |
170 | {ok, Frame, _} = parse(Content), | |
141 | test_frame_serialization(Expected, TrailingLF) -> | |
142 | {ok, Frame, _} = parse(Expected), | |
171 | 143 | {ok, Val} = rabbit_stomp_frame:header(Frame, "head\r:\ner"), |
172 | 144 | ?assertEqual(":\n\r\\", Val), |
173 | Serialized = lists:flatten(rabbit_stomp_frame:serialize(Frame)), | |
174 | ?assertEqual(Content, rabbit_misc:format("~s", [Serialized])). | |
145 | Serialized = lists:flatten(rabbit_stomp_frame:serialize(Frame, TrailingLF)), | |
146 | ?assertEqual(Expected, rabbit_misc:format("~s", [Serialized])). | |
147 | ||
148 | headers_escaping_roundtrip_test() -> | |
149 | test_frame_serialization("COMMAND\nhead\\r\\c\\ner:\\c\\n\\r\\\\\n\n\0\n", true). | |
150 | ||
151 | headers_escaping_roundtrip_without_trailing_lf_test() -> | |
152 | test_frame_serialization("COMMAND\nhead\\r\\c\\ner:\\c\\n\\r\\\\\n\n\0", false). | |
175 | 153 | |
176 | 154 | parse(Content) -> |
177 | 155 | parse(Content, rabbit_stomp_frame:initial_state()). |
188 | 166 | frame_string(Command, Headers, BodyContent, Term) -> |
189 | 167 | HeaderString = |
190 | 168 | lists:flatten([Key ++ ":" ++ Value ++ Term || {Key, Value} <- Headers]), |
191 | Command ++ Term ++ HeaderString ++ Term ++ BodyContent ++ "\0". | |
169 | Command ++ Term ++ HeaderString ++ Term ++ BodyContent ++ "\0" ++ "\n". | |
192 | 170 |
0 | import unittest | |
1 | import stomp | |
2 | import base | |
3 | import time | |
4 | ||
5 | class TestRedelivered(base.BaseTest): | |
6 | ||
7 | def test_redelivered(self): | |
8 | destination = "/queue/redelivered-test" | |
9 | ||
10 | # subscribe and send message | |
11 | self.subscribe_dest(self.conn, destination, None, ack='client') | |
12 | self.conn.send(destination, "test1") | |
13 | self.assertTrue(self.listener.await(4), "initial message not received") | |
14 | self.assertEquals(1, len(self.listener.messages)) | |
15 | self.assertEquals('false', self.listener.messages[0]['headers']['redelivered']) | |
16 | ||
17 | # disconnect with no ack | |
18 | self.conn.disconnect() | |
19 | ||
20 | # now reconnect | |
21 | conn2 = self.create_connection() | |
22 | try: | |
23 | listener2 = base.WaitableListener() | |
24 | listener2.reset(1) | |
25 | conn2.set_listener('', listener2) | |
26 | self.subscribe_dest(conn2, destination, None, ack='client') | |
27 | self.assertTrue(listener2.await(), "message not received again") | |
28 | self.assertEquals(1, len(listener2.messages)) | |
29 | self.assertEquals('true', listener2.messages[0]['headers']['redelivered']) | |
30 | finally: | |
31 | conn2.disconnect() |
3 | 3 | |
4 | 4 | if __name__ == '__main__': |
5 | 5 | modules = [ |
6 | 'ack', | |
7 | 'destinations', | |
8 | 'errors', | |
9 | 'lifecycle', | |
6 | 10 | 'parsing', |
7 | 'destinations', | |
8 | 'lifecycle', | |
11 | 'queue_properties', | |
12 | 'redelivered', | |
13 | 'reliability', | |
9 | 14 | 'transactions', |
10 | 'ack', | |
11 | 'errors', | |
12 | 'reliability', | |
15 | 'x_queue_name', | |
13 | 16 | ] |
14 | 17 | test_runner.run_unittests(modules) |
15 | 18 |
6 | 6 | def add_deps_to_path(): |
7 | 7 | deps_dir = os.path.realpath(os.path.join(__file__, "..", "..", "..", "deps")) |
8 | 8 | sys.path.append(os.path.join(deps_dir, "stomppy", "stomppy")) |
9 | sys.path.append(os.path.join(deps_dir, "pika", "pika")) | |
9 | 10 | |
10 | 11 | def run_unittests(modules): |
11 | 12 | add_deps_to_path() |
0 | import unittest | |
1 | import stomp | |
2 | import pika | |
3 | import base | |
4 | import time | |
5 | ||
6 | class TestUserGeneratedQueueName(base.BaseTest): | |
7 | ||
8 | def test_exchange_dest(self): | |
9 | queueName='my-user-generated-queue-name-exchange' | |
10 | ||
11 | # subscribe | |
12 | self.subscribe_dest( | |
13 | self.conn, | |
14 | '/exchange/amq.direct/test', | |
15 | None, | |
16 | headers={ 'x-queue-name': queueName } | |
17 | ) | |
18 | ||
19 | connection = pika.BlockingConnection( | |
20 | pika.ConnectionParameters( host='localhost')) | |
21 | channel = connection.channel() | |
22 | ||
23 | # publish a message to the named queue | |
24 | channel.basic_publish( | |
25 | exchange='', | |
26 | routing_key=queueName, | |
27 | body='Hello World!') | |
28 | ||
29 | # check if we receive the message from the STOMP subscription | |
30 | self.assertTrue(self.listener.await(2), "initial message not received") | |
31 | self.assertEquals(1, len(self.listener.messages)) | |
32 | ||
33 | self.conn.disconnect() | |
34 | connection.close() | |
35 | ||
36 | def test_topic_dest(self): | |
37 | queueName='my-user-generated-queue-name-topic' | |
38 | ||
39 | # subscribe | |
40 | self.subscribe_dest( | |
41 | self.conn, | |
42 | '/topic/test', | |
43 | None, | |
44 | headers={ 'x-queue-name': queueName } | |
45 | ) | |
46 | ||
47 | connection = pika.BlockingConnection( | |
48 | pika.ConnectionParameters( host='localhost')) | |
49 | channel = connection.channel() | |
50 | ||
51 | # publish a message to the named queue | |
52 | channel.basic_publish( | |
53 | exchange='', | |
54 | routing_key=queueName, | |
55 | body='Hello World!') | |
56 | ||
57 | # check if we receive the message from the STOMP subscription | |
58 | self.assertTrue(self.listener.await(2), "initial message not received") | |
59 | self.assertEquals(1, len(self.listener.messages)) | |
60 | ||
61 | self.conn.disconnect() | |
62 | connection.close() |
173 | 173 | RABBITMQ_ENABLED_PLUGINS_FILE=/does-not-exist \ |
174 | 174 | stop-rabbit-on-node ${COVER_STOP} stop-node |
175 | 175 | |
176 | define compare_version | |
177 | $(shell awk 'BEGIN { | |
178 | split("$(1)", v1, "\."); | |
179 | version1 = v1[1] * 1000000 + v1[2] * 10000 + v1[3] * 100 + v1[4]; | |
180 | ||
181 | split("$(2)", v2, "\."); | |
182 | version2 = v2[1] * 1000000 + v2[2] * 10000 + v2[3] * 100 + v2[4]; | |
183 | ||
184 | if (version1 $(3) version2) { | |
185 | print "true"; | |
186 | } else { | |
187 | print "false"; | |
188 | } | |
189 | }') | |
190 | endef | |
191 | ||
192 | ERLANG_SSL_VER = $(shell erl -noshell -eval '\ | |
193 | ok = application:load(ssl), \ | |
194 | {ok, VSN} = application:get_key(ssl, vsn), \ | |
195 | io:format("~s~n", [VSN]), \ | |
196 | halt(0).') | |
197 | MINIMUM_ERLANG_SSL_VER = 5.3 | |
198 | ||
199 | ifeq ($(call compare_version,$(ERLANG_SSL_VER),$(MINIMUM_ERLANG_SSL_VER),>=),true) | |
176 | 200 | create_ssl_certs: |
177 | 201 | $(MAKE) -C certs DIR=$(SSL_CERTS_DIR) clean all |
178 | ||
202 | else | |
203 | create_ssl_certs: | |
204 | @# Skip SSL certs if Erlang is older than R16B01 (ssl 5.3). | |
205 | $(MAKE) -C certs DIR=$(SSL_CERTS_DIR) clean | |
206 | @echo "WARNING: Skip SSL certs creation; Erlang's SSL application is too" \ | |
207 | "old ($(ERLANG_SSL_VER) < $(MINIMUM_ERLANG_SSL_VER)) and SSL support" \ | |
208 | "is disabled in RabbitMQ" | |
209 | endif |
1 | 1 | FILTER:=all |
2 | 2 | COVER:=false |
3 | 3 | WITH_BROKER_TEST_COMMANDS:=rabbit_test_runner:run_in_broker(\"$(PACKAGE_DIR)/test/ebin\",\"$(FILTER)\") |
4 | STANDALONE_TEST_COMMANDS:=rabbit_test_runner:run_multi(\"$(UMBRELLA_BASE_DIR)/rabbitmq-server\",\"$(PACKAGE_DIR)/test/ebin\",\"$(FILTER)\",$(COVER),none) | |
5 | 4 | |
6 | 5 | ## Require R15B to compile inet_proxy_dist since it requires includes |
7 | 6 | ## introduced there. |
8 | 7 | ifeq ($(shell erl -noshell -eval 'io:format([list_to_integer(X) || X <- string:tokens(erlang:system_info(version), ".")] >= [5,9]), halt().'),true) |
8 | STANDALONE_TEST_COMMANDS:=rabbit_test_runner:run_multi(\"$(UMBRELLA_BASE_DIR)/rabbitmq-server\",\"$(PACKAGE_DIR)/test/ebin\",\"$(FILTER)\",$(COVER),none) | |
9 | 9 | PACKAGE_ERLC_OPTS+=-Derlang_r15b_or_later |
10 | 10 | endif |
504 | 504 | wait_for_cluster_status(0, Max, Status, AllNodes, Nodes). |
505 | 505 | |
506 | 506 | wait_for_cluster_status(N, Max, Status, _AllNodes, Nodes) when N >= Max -> |
507 | error({cluster_status_max_tries_failed, | |
508 | [{nodes, Nodes}, | |
509 | {expected_status, Status}, | |
510 | {max_tried, Max}]}); | |
507 | erlang:error({cluster_status_max_tries_failed, | |
508 | [{nodes, Nodes}, | |
509 | {expected_status, Status}, | |
510 | {max_tried, Max}]}); | |
511 | 511 | wait_for_cluster_status(N, Max, Status, AllNodes, Nodes) -> |
512 | 512 | case lists:all(fun (Node) -> |
513 | 513 | verify_status_equal(Node, Status, AllNodes) |
153 | 153 | wait_for_sync_status(0, Max, Status, pget(node, Cfg), Queue). |
154 | 154 | |
155 | 155 | wait_for_sync_status(N, Max, Status, Node, Queue) when N >= Max -> |
156 | error({sync_status_max_tries_failed, | |
157 | [{queue, Queue}, | |
158 | {node, Node}, | |
159 | {expected_status, Status}, | |
160 | {max_tried, Max}]}); | |
156 | erlang:error({sync_status_max_tries_failed, | |
157 | [{queue, Queue}, | |
158 | {node, Node}, | |
159 | {expected_status, Status}, | |
160 | {max_tried, Max}]}); | |
161 | 161 | wait_for_sync_status(N, Max, Status, Node, Queue) -> |
162 | 162 | Synced = length(slave_pids(Node, Queue)) =:= 1, |
163 | 163 | case Synced =:= Status of |
32 | 32 | |
33 | 33 | REM CONF_ENV_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq-env.conf |
34 | 34 | if "!RABBITMQ_CONF_ENV_FILE!"=="" ( |
35 | set CONF_ENV_FILE=!APPDATA!\RabbitMQ\rabbitmq-env-conf.bat | |
35 | set RABBITMQ_CONF_ENV_FILE=!RABBITMQ_BASE!\rabbitmq-env-conf.bat | |
36 | 36 | ) |
34 | 34 | REM ## Get configuration variables from the configure environment file |
35 | 35 | REM [ -f ${CONF_ENV_FILE} ] && . ${CONF_ENV_FILE} || true |
36 | 36 | if exist "!RABBITMQ_CONF_ENV_FILE!" ( |
37 | call !RABBITMQ_CONF_ENV_FILE! | |
37 | call "!RABBITMQ_CONF_ENV_FILE!" | |
38 | 38 | ) |
39 | 39 | |
40 | 40 | REM Check for the short names here too |
83 | 83 | REM ) |
84 | 84 | REM ) |
85 | 85 | |
86 | REM DOUBLE CHECK THIS LOGIC | |
87 | 86 | if "!RABBITMQ_NODE_IP_ADDRESS!"=="" ( |
88 | if "!NODE_IP_ADDRESS!"=="" ( | |
89 | set RABBITMQ_NODE_IP_ADDRESS=auto | |
90 | ) else ( | |
87 | if not "!NODE_IP_ADDRESS!"=="" ( | |
91 | 88 | set RABBITMQ_NODE_IP_ADDRESS=!NODE_IP_ADDRESS! |
92 | 89 | ) |
93 | 90 | ) |
94 | 91 | |
95 | 92 | if "!RABBITMQ_NODE_PORT!"=="" ( |
96 | if "!NODE_PORT!"=="" ( | |
97 | set RABBITMQ_NODE_PORT=5672 | |
98 | ) else ( | |
93 | if not "!NODE_PORT!"=="" ( | |
99 | 94 | set RABBITMQ_NODE_PORT=!NODE_PORT! |
100 | 95 | ) |
96 | ) | |
97 | ||
98 | if "!RABBITMQ_NODE_IP_ADDRESS!"=="" ( | |
99 | if not "!RABBITMQ_NODE_PORT!"=="" ( | |
100 | set RABBITMQ_NODE_IP_ADDRESS=auto | |
101 | ) | |
102 | ) else ( | |
103 | if "!RABBITMQ_NODE_PORT!"=="" ( | |
104 | set RABBITMQ_NODE_PORT=5672 | |
105 | ) | |
101 | 106 | ) |
102 | 107 | |
103 | 108 | REM [ "x" = "x$RABBITMQ_DIST_PORT" ] && RABBITMQ_DIST_PORT=${DIST_PORT} |
197 | 202 | REM [ "x" = "x$RABBITMQ_PLUGINS_DIR" ] && RABBITMQ_PLUGINS_DIR=${PLUGINS_DIR} |
198 | 203 | if "!RABBITMQ_PLUGINS_DIR!"=="" ( |
199 | 204 | if "!PLUGINS_DIR!"=="" ( |
200 | set RABBITMQ_PLUGINS_DIR=!RABBITMQ_BASE!\plugins | |
205 | set RABBITMQ_PLUGINS_DIR=!RABBITMQ_HOME!\plugins | |
201 | 206 | ) else ( |
202 | 207 | set RABBITMQ_PLUGINS_DIR=!PLUGINS_DIR! |
203 | 208 | ) |
208 | 213 | REM [ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}.log" |
209 | 214 | if "!RABBITMQ_LOGS!"=="" ( |
210 | 215 | if "!LOGS!"=="" ( |
211 | set LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!.log | |
212 | ) else ( | |
213 | set LOGS=!LOGS! | |
216 | set RABBITMQ_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!.log | |
217 | ) else ( | |
218 | set RABBITMQ_LOGS=!LOGS! | |
214 | 219 | ) |
215 | 220 | ) |
216 | 221 | |
218 | 223 | REM [ "x" = "x$RABBITMQ_SASL_LOGS" ] && RABBITMQ_SASL_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}-sasl.log" |
219 | 224 | if "!RABBITMQ_SASL_LOGS!"=="" ( |
220 | 225 | if "!SASL_LOGS!"=="" ( |
221 | set SASL_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!-sasl.log | |
222 | ) else ( | |
223 | set SASL_LOGS=!SASL_LOGS! | |
226 | set RABBITMQ_SASL_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!-sasl.log | |
227 | ) else ( | |
228 | set RABBITMQ_SASL_LOGS=!SASL_LOGS! | |
224 | 229 | ) |
225 | 230 | ) |
226 | 231 |
23 | 23 | -pa "${RABBITMQ_HOME}/ebin" \ |
24 | 24 | -noinput \ |
25 | 25 | -hidden \ |
26 | ${RABBITMQ_PLUGINS_ERL_ARGS} \ | |
26 | ${RABBITMQ_CTL_ERL_ARGS} \ | |
27 | 27 | -boot "${CLEAN_BOOT_FILE}" \ |
28 | 28 | -s rabbit_plugins_main \ |
29 | 29 | -enabled_plugins_file "$RABBITMQ_ENABLED_PLUGINS_FILE" \ |
98 | 98 | # there is no other way of preventing their expansion. |
99 | 99 | set -f |
100 | 100 | |
101 | RABBITMQ_CONFIG_FILE=$RABBITMQ_CONFIG_FILE \ | |
102 | exec ${ERL_DIR}erl \ | |
103 | -pa ${RABBITMQ_EBIN_ROOT} \ | |
104 | ${RABBITMQ_START_RABBIT} \ | |
105 | ${RABBITMQ_NAME_TYPE} ${RABBITMQ_NODENAME} \ | |
106 | -boot "${SASL_BOOT_FILE}" \ | |
107 | ${RABBITMQ_CONFIG_ARG} \ | |
108 | +W w \ | |
109 | +A ${RABBITMQ_IO_THREAD_POOL_SIZE} \ | |
110 | ${RABBITMQ_SERVER_ERL_ARGS} \ | |
111 | +K true \ | |
112 | -kernel inet_default_connect_options "[{nodelay,true}]" \ | |
113 | ${RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS} \ | |
114 | ${RABBITMQ_LISTEN_ARG} \ | |
115 | -sasl errlog_type error \ | |
116 | -sasl sasl_error_logger "$SASL_ERROR_LOGGER" \ | |
117 | -rabbit error_logger "$RABBIT_ERROR_LOGGER" \ | |
118 | -rabbit sasl_error_logger "$RABBIT_SASL_ERROR_LOGGER" \ | |
119 | -rabbit enabled_plugins_file "\"$RABBITMQ_ENABLED_PLUGINS_FILE\"" \ | |
120 | -rabbit plugins_dir "\"$RABBITMQ_PLUGINS_DIR\"" \ | |
121 | -rabbit plugins_expand_dir "\"$RABBITMQ_PLUGINS_EXPAND_DIR\"" \ | |
122 | -os_mon start_cpu_sup false \ | |
123 | -os_mon start_disksup false \ | |
124 | -os_mon start_memsup false \ | |
125 | -mnesia dir "\"${RABBITMQ_MNESIA_DIR}\"" \ | |
126 | ${RABBITMQ_SERVER_START_ARGS} \ | |
127 | ${RABBITMQ_DIST_ARG} \ | |
128 | "$@" | |
101 | start_rabbitmq_server() { | |
102 | RABBITMQ_CONFIG_FILE=$RABBITMQ_CONFIG_FILE \ | |
103 | exec ${ERL_DIR}erl \ | |
104 | -pa ${RABBITMQ_EBIN_ROOT} \ | |
105 | ${RABBITMQ_START_RABBIT} \ | |
106 | ${RABBITMQ_NAME_TYPE} ${RABBITMQ_NODENAME} \ | |
107 | -boot "${SASL_BOOT_FILE}" \ | |
108 | ${RABBITMQ_CONFIG_ARG} \ | |
109 | +W w \ | |
110 | +A ${RABBITMQ_IO_THREAD_POOL_SIZE} \ | |
111 | ${RABBITMQ_SERVER_ERL_ARGS} \ | |
112 | +K true \ | |
113 | -kernel inet_default_connect_options "[{nodelay,true}]" \ | |
114 | ${RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS} \ | |
115 | ${RABBITMQ_LISTEN_ARG} \ | |
116 | -sasl errlog_type error \ | |
117 | -sasl sasl_error_logger "$SASL_ERROR_LOGGER" \ | |
118 | -rabbit error_logger "$RABBIT_ERROR_LOGGER" \ | |
119 | -rabbit sasl_error_logger "$RABBIT_SASL_ERROR_LOGGER" \ | |
120 | -rabbit enabled_plugins_file "\"$RABBITMQ_ENABLED_PLUGINS_FILE\"" \ | |
121 | -rabbit plugins_dir "\"$RABBITMQ_PLUGINS_DIR\"" \ | |
122 | -rabbit plugins_expand_dir "\"$RABBITMQ_PLUGINS_EXPAND_DIR\"" \ | |
123 | -os_mon start_cpu_sup false \ | |
124 | -os_mon start_disksup false \ | |
125 | -os_mon start_memsup false \ | |
126 | -mnesia dir "\"${RABBITMQ_MNESIA_DIR}\"" \ | |
127 | ${RABBITMQ_SERVER_START_ARGS} \ | |
128 | ${RABBITMQ_DIST_ARG} \ | |
129 | "$@" | |
130 | } | |
131 | ||
132 | stop_rabbitmq_server() { | |
133 | RABBITMQCTL="$(dirname "$0")/rabbitmqctl" | |
134 | ||
135 | if ${RABBITMQCTL} -n ${RABBITMQ_NODENAME} status >/dev/null 2>&1; then | |
136 | ${RABBITMQCTL} -n ${RABBITMQ_NODENAME} stop | |
137 | fi | |
138 | } | |
139 | ||
140 | if [ 'x' = "x$RABBITMQ_ALLOW_INPUT" -a -z "$detached" ]; then | |
141 | # When RabbitMQ runs in the foreground but the Erlang shell is | |
142 | # disabled, we setup signal handlers to stop RabbitMQ properly. This | |
143 | # is at least useful in the case of Docker. | |
144 | ||
145 | # The Erlang VM should ignore SIGINT. | |
146 | RABBITMQ_SERVER_START_ARGS="${RABBITMQ_SERVER_START_ARGS} +B i" | |
147 | ||
148 | # Signal handlers. They all stop RabbitMQ properly (using | |
149 | # rabbitmqctl stop). Depending on the signal, this script will exwit | |
150 | # with a non-zero error code: | |
151 | # SIGHUP SIGTERM SIGTSTP | |
152 | # They are considered a normal process termination, so the script | |
153 | # exits with 0. | |
154 | # SIGINT | |
155 | # They are considered an abnormal process termination, the script | |
156 | # exits with the job exit code. | |
157 | trap "stop_rabbitmq_server; exit 0" HUP TERM TSTP | |
158 | trap "stop_rabbitmq_server" INT | |
159 | ||
160 | start_rabbitmq_server "$@" & | |
161 | ||
162 | # Block until RabbitMQ exits or a signal is caught. | |
163 | # Waits for last command (which is start_rabbitmq_server) | |
164 | wait $! | |
165 | else | |
166 | start_rabbitmq_server "$@" | |
167 | fi |
95 | 95 | !RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^ |
96 | 96 | -sasl errlog_type error ^ |
97 | 97 | -sasl sasl_error_logger false ^ |
98 | -rabbit error_logger {file,\""!LOGS:\=/!"\"} ^ | |
99 | -rabbit sasl_error_logger {file,\""!SASL_LOGS:\=/!"\"} ^ | |
98 | -rabbit error_logger {file,\""!RABBITMQ_LOGS:\=/!"\"} ^ | |
99 | -rabbit sasl_error_logger {file,\""!RABBITMQ_SASL_LOGS:\=/!"\"} ^ | |
100 | 100 | -rabbit enabled_plugins_file \""!RABBITMQ_ENABLED_PLUGINS_FILE:\=/!"\" ^ |
101 | 101 | -rabbit plugins_dir \""!RABBITMQ_PLUGINS_DIR:\=/!"\" ^ |
102 | 102 | -rabbit plugins_expand_dir \""!RABBITMQ_PLUGINS_EXPAND_DIR:\=/!"\" ^ |
145 | 145 | |
146 | 146 | set RABBITMQ_START_RABBIT= |
147 | 147 | if "!RABBITMQ_NODE_ONLY!"=="" ( |
148 | set RABBITMQ_START_RABBIT=-s rabbit boot | |
148 | set RABBITMQ_START_RABBIT=-s "!RABBITMQ_BOOT_MODULE!" boot | |
149 | 149 | ) |
150 | 150 | |
151 | 151 | if "!RABBITMQ_IO_THREAD_POOL_SIZE!"=="" ( |
160 | 160 | +W w ^ |
161 | 161 | +A "!RABBITMQ_IO_THREAD_POOL_SIZE!" ^ |
162 | 162 | +P 1048576 ^ |
163 | -kernel inet_default_connect_options "[{nodelay,true}]" ^ | |
164 | 163 | !RABBITMQ_LISTEN_ARG! ^ |
165 | 164 | !RABBITMQ_SERVER_ERL_ARGS! ^ |
165 | -kernel inet_default_connect_options "[{nodelay,true}]" ^ | |
166 | !RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^ | |
166 | 167 | -sasl errlog_type error ^ |
167 | 168 | -sasl sasl_error_logger false ^ |
168 | -rabbit error_logger {file,\""!LOGS:\=/!"\"} ^ | |
169 | -rabbit sasl_error_logger {file,\""!SASL_LOGS:\=/!"\"} ^ | |
169 | -rabbit error_logger {file,\""!RABBITMQ_LOGS:\=/!"\"} ^ | |
170 | -rabbit sasl_error_logger {file,\""!RABBITMQ_SASL_LOGS:\=/!"\"} ^ | |
170 | 171 | -rabbit enabled_plugins_file \""!RABBITMQ_ENABLED_PLUGINS_FILE:\=/!"\" ^ |
171 | 172 | -rabbit plugins_dir \""!RABBITMQ_PLUGINS_DIR:\=/!"\" ^ |
172 | 173 | -rabbit plugins_expand_dir \""!RABBITMQ_PLUGINS_EXPAND_DIR:\=/!"\" ^ |
176 | 177 | -os_mon start_memsup false ^ |
177 | 178 | -mnesia dir \""!RABBITMQ_MNESIA_DIR:\=/!"\" ^ |
178 | 179 | !RABBITMQ_SERVER_START_ARGS! ^ |
179 | !RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^ | |
180 | 180 | !RABBITMQ_DIST_ARG! ^ |
181 | 181 | !STARVAR! |
182 | 182 | |
190 | 190 | -stopaction "rabbit:stop_and_halt()." ^ |
191 | 191 | !RABBITMQ_NAME_TYPE! !RABBITMQ_NODENAME! ^ |
192 | 192 | !CONSOLE_FLAG! ^ |
193 | -comment "A robust and scalable messaging broker" ^ | |
193 | -comment "Multi-protocol open source messaging broker" ^ | |
194 | 194 | -args "!ERLANG_SERVICE_ARGUMENTS!" > NUL |
195 | 195 | |
196 | 196 | goto END |
23 | 23 | set STAR=%* |
24 | 24 | setlocal enabledelayedexpansion |
25 | 25 | |
26 | REM Get default settings with user overrides for (RABBITMQ_)<var_name> | |
27 | REM Non-empty defaults should be set in rabbitmq-env | |
28 | call "%TDP0%\rabbitmq-env.bat" | |
29 | ||
26 | 30 | if not exist "!ERLANG_HOME!\bin\erl.exe" ( |
27 | 31 | echo. |
28 | 32 | echo ****************************** |
34 | 38 | echo. |
35 | 39 | exit /B 1 |
36 | 40 | ) |
37 | ||
38 | REM Get default settings with user overrides for (RABBITMQ_)<var_name> | |
39 | REM Non-empty defaults should be set in rabbitmq-env | |
40 | call "%TDP0%\rabbitmq-env.bat" | |
41 | 41 | |
42 | 42 | "!ERLANG_HOME!\bin\erl.exe" ^ |
43 | 43 | -pa "!TDP0!..\ebin" ^ |
26 | 26 | %% receiver it will not grant any more credit to its senders when it |
27 | 27 | %% is itself blocked - thus the only processes that need to check |
28 | 28 | %% blocked/0 are ones that read from network sockets. |
29 | %% | |
30 | %% Credit flows left to right when process send messags down the | |
31 | %% chain, starting at the rabbit_reader, ending at the msg_store: | |
32 | %% reader -> channel -> queue_process -> msg_store. | |
33 | %% | |
34 | %% If the message store has a back log, then it will block the | |
35 | %% queue_process, which will block the channel, and finally the reader | |
36 | %% will be blocked, throttling down publishers. | |
37 | %% | |
38 | %% Once a process is unblocked, it will grant credits up the chain, | |
39 | %% possibly unblocking other processes: | |
40 | %% reader <--grant channel <--grant queue_process <--grant msg_store. | |
41 | %% | |
42 | %% Grepping the project files for `credit_flow` will reveal the places | |
43 | %% where this module is currently used, with extra comments on what's | |
44 | %% going on at each instance. Note that credit flow between mirrors | |
45 | %% synchronization has not been documented, since this doesn't affect | |
46 | %% client publishes. | |
29 | 47 | |
30 | -define(DEFAULT_CREDIT, {200, 50}). | |
48 | -define(DEFAULT_INITIAL_CREDIT, 200). | |
49 | -define(DEFAULT_MORE_CREDIT_AFTER, 50). | |
50 | ||
51 | -define(DEFAULT_CREDIT, | |
52 | case get(credit_flow_default_credit) of | |
53 | undefined -> | |
54 | Val = rabbit_misc:get_env(rabbit, credit_flow_default_credit, | |
55 | {?DEFAULT_INITIAL_CREDIT, | |
56 | ?DEFAULT_MORE_CREDIT_AFTER}), | |
57 | put(credit_flow_default_credit, Val), | |
58 | Val; | |
59 | Val -> Val | |
60 | end). | |
31 | 61 | |
32 | 62 | -export([send/1, send/2, ack/1, ack/2, handle_bump_msg/1, blocked/0, state/0]). |
33 | 63 | -export([peer_down/1]). |
60 | 90 | %% We deliberately allow Var to escape from the case here |
61 | 91 | %% to be used in Expr. Any temporary var we introduced |
62 | 92 | %% would also escape, and might conflict. |
63 | case get(Key) of | |
64 | undefined -> Var = Default; | |
65 | Var -> ok | |
93 | Var = case get(Key) of | |
94 | undefined -> Default; | |
95 | V -> V | |
66 | 96 | end, |
67 | 97 | put(Key, Expr) |
68 | 98 | end). |
158 | 188 | case blocked() of |
159 | 189 | false -> case erase(credit_deferred) of |
160 | 190 | undefined -> ok; |
161 | Credits -> [To ! Msg || {To, Msg} <- Credits] | |
191 | Credits -> _ = [To ! Msg || {To, Msg} <- Credits], | |
192 | ok | |
162 | 193 | end; |
163 | 194 | true -> ok |
164 | 195 | end. |
130 | 130 | end, {[], BadPids}, ResultsNoNode). |
131 | 131 | |
132 | 132 | invoke_no_result(Pid, FunOrMFA) when is_pid(Pid) andalso node(Pid) =:= node() -> |
133 | safe_invoke(Pid, FunOrMFA), %% we don't care about any error | |
133 | _ = safe_invoke(Pid, FunOrMFA), %% we don't care about any error | |
134 | 134 | ok; |
135 | 135 | invoke_no_result(Pid, FunOrMFA) when is_pid(Pid) -> |
136 | 136 | invoke_no_result([Pid], FunOrMFA); |
138 | 138 | invoke_no_result([], _FunOrMFA) -> %% optimisation |
139 | 139 | ok; |
140 | 140 | invoke_no_result([Pid], FunOrMFA) when node(Pid) =:= node() -> %% optimisation |
141 | safe_invoke(Pid, FunOrMFA), %% must not die | |
141 | _ = safe_invoke(Pid, FunOrMFA), %% must not die | |
142 | 142 | ok; |
143 | 143 | invoke_no_result(Pids, FunOrMFA) when is_list(Pids) -> |
144 | 144 | {LocalPids, Grouped} = group_pids_by_node(Pids), |
148 | 148 | RemoteNodes, delegate(self(), RemoteNodes), |
149 | 149 | {invoke, FunOrMFA, Grouped}) |
150 | 150 | end, |
151 | safe_invoke(LocalPids, FunOrMFA), %% must not die | |
151 | _ = safe_invoke(LocalPids, FunOrMFA), %% must not die | |
152 | 152 | ok. |
153 | 153 | |
154 | 154 | monitor(process, Pid) when node(Pid) =:= node() -> |
246 | 246 | {noreply, State#state{monitors = Monitors1}, hibernate}; |
247 | 247 | |
248 | 248 | handle_cast({invoke, FunOrMFA, Grouped}, State = #state{node = Node}) -> |
249 | safe_invoke(orddict:fetch(Node, Grouped), FunOrMFA), | |
249 | _ = safe_invoke(orddict:fetch(Node, Grouped), FunOrMFA), | |
250 | 250 | {noreply, State, hibernate}. |
251 | 251 | |
252 | 252 | handle_info({'DOWN', Ref, process, Pid, Info}, |
68 | 68 | %% primary key. |
69 | 69 | insert(PK, [], V, {P, S}) -> |
70 | 70 | %% dummy insert to force error if PK exists |
71 | gb_trees:insert(PK, {gb_sets:empty(), V}, P), | |
71 | _ = gb_trees:insert(PK, {gb_sets:empty(), V}, P), | |
72 | 72 | {P, S}; |
73 | 73 | insert(PK, SKs, V, {P, S}) -> |
74 | 74 | {gb_trees:insert(PK, {gb_sets:from_list(SKs), V}, P), |
342 | 342 | [Ref], keep, |
343 | 343 | fun ([#handle { is_read = false }]) -> |
344 | 344 | {error, not_open_for_reading}; |
345 | ([#handle{read_buffer_size_limit = 0, | |
346 | hdl = Hdl, offset = Offset} = Handle]) -> | |
347 | %% The read buffer is disabled. This is just an | |
348 | %% optimization: the clauses below can handle this case. | |
349 | case prim_file_read(Hdl, Count) of | |
350 | {ok, Data} -> {{ok, Data}, | |
351 | [Handle#handle{offset = Offset+size(Data)}]}; | |
352 | eof -> {eof, [Handle #handle { at_eof = true }]}; | |
353 | Error -> {Error, Handle} | |
354 | end; | |
345 | 355 | ([Handle = #handle{read_buffer = Buf, |
346 | 356 | read_buffer_pos = BufPos, |
347 | 357 | read_buffer_rem = BufRem, |
583 | 593 | info(Items) -> gen_server2:call(?SERVER, {info, Items}, infinity). |
584 | 594 | |
585 | 595 | clear_read_cache() -> |
586 | gen_server2:cast(?SERVER, clear_read_cache), | |
587 | clear_vhost_read_cache(rabbit_vhost:list()). | |
596 | case application:get_env(rabbit, fhc_read_buffering) of | |
597 | {ok, true} -> | |
598 | gen_server2:cast(?SERVER, clear_read_cache), | |
599 | clear_vhost_read_cache(rabbit_vhost:list()); | |
600 | _ -> %% undefined or {ok, false} | |
601 | ok | |
602 | end. | |
588 | 603 | |
589 | 604 | clear_vhost_read_cache([]) -> |
590 | 605 | ok; |
601 | 616 | %% process because the read buffer is stored in the process |
602 | 617 | %% dictionary. |
603 | 618 | Fun = fun(_, State) -> |
604 | clear_process_read_cache(), | |
619 | _ = clear_process_read_cache(), | |
605 | 620 | State |
606 | 621 | end, |
607 | 622 | [rabbit_amqqueue:run_backing_queue(Pid, rabbit_variable_queue, Fun) |
659 | 674 | end, |
660 | 675 | case Fun(Handles) of |
661 | 676 | {Result, Handles1} when is_list(Handles1) -> |
662 | lists:zipwith(fun put_handle/2, Refs, Handles1), | |
677 | _ = lists:zipwith(fun put_handle/2, Refs, Handles1), | |
663 | 678 | Result; |
664 | 679 | Result -> |
665 | 680 | Result |
801 | 816 | case gb_trees:is_empty(Tree) of |
802 | 817 | true -> Tree; |
803 | 818 | false -> {Oldest, _Ref} = gb_trees:smallest(Tree), |
804 | gen_server2:cast(?SERVER, {update, self(), Oldest}) | |
805 | end, | |
806 | Tree | |
819 | gen_server2:cast(?SERVER, {update, self(), Oldest}), | |
820 | Tree | |
821 | end | |
807 | 822 | end). |
808 | 823 | |
809 | 824 | oldest(Tree, DefaultFun) -> |
815 | 830 | |
816 | 831 | new_closed_handle(Path, Mode, Options) -> |
817 | 832 | WriteBufferSize = |
818 | case proplists:get_value(write_buffer, Options, unbuffered) of | |
819 | unbuffered -> 0; | |
820 | infinity -> infinity; | |
821 | N when is_integer(N) -> N | |
833 | case application:get_env(rabbit, fhc_write_buffering) of | |
834 | {ok, false} -> 0; | |
835 | {ok, true} -> | |
836 | case proplists:get_value(write_buffer, Options, unbuffered) of | |
837 | unbuffered -> 0; | |
838 | infinity -> infinity; | |
839 | N when is_integer(N) -> N | |
840 | end | |
822 | 841 | end, |
823 | 842 | ReadBufferSize = |
824 | case proplists:get_value(read_buffer, Options, unbuffered) of | |
825 | unbuffered -> 0; | |
826 | N2 when is_integer(N2) -> N2 | |
843 | case application:get_env(rabbit, fhc_read_buffering) of | |
844 | {ok, false} -> 0; | |
845 | {ok, true} -> | |
846 | case proplists:get_value(read_buffer, Options, unbuffered) of | |
847 | unbuffered -> 0; | |
848 | N2 when is_integer(N2) -> N2 | |
849 | end | |
827 | 850 | end, |
828 | 851 | Ref = make_ref(), |
829 | 852 | put({Ref, fhc_handle}, #handle { hdl = closed, |
1048 | 1071 | %%---------------------------------------------------------------------------- |
1049 | 1072 | |
1050 | 1073 | init([AlarmSet, AlarmClear]) -> |
1051 | file_handle_cache_stats:init(), | |
1074 | _ = file_handle_cache_stats:init(), | |
1052 | 1075 | Limit = case application:get_env(file_handles_high_watermark) of |
1053 | 1076 | {ok, Watermark} when (is_integer(Watermark) andalso |
1054 | 1077 | Watermark > 0) -> |
1187 | 1210 | State)))}; |
1188 | 1211 | |
1189 | 1212 | handle_cast(clear_read_cache, State) -> |
1190 | clear_process_read_cache(), | |
1213 | _ = clear_process_read_cache(), | |
1191 | 1214 | {noreply, State}. |
1192 | 1215 | |
1193 | 1216 | handle_info(check_counts, State) -> |
119 | 119 | handle_cast(finish, State = #gstate { forks = Forks, blocked = Blocked }) -> |
120 | 120 | NewForks = Forks - 1, |
121 | 121 | NewBlocked = case NewForks of |
122 | 0 -> [gen_server2:reply(From, empty) || | |
123 | From <- queue:to_list(Blocked)], | |
122 | 0 -> _ = [gen_server2:reply(From, empty) || | |
123 | From <- queue:to_list(Blocked)], | |
124 | 124 | queue:new(); |
125 | 125 | _ -> Blocked |
126 | 126 | end, |
632 | 632 | %%% The MAIN loop. |
633 | 633 | %%% --------------------------------------------------- |
634 | 634 | loop(GS2State = #gs2_state { time = hibernate, |
635 | timeout_state = undefined }) -> | |
636 | pre_hibernate(GS2State); | |
635 | timeout_state = undefined, | |
636 | queue = Queue }) -> | |
637 | case priority_queue:is_empty(Queue) of | |
638 | true -> | |
639 | pre_hibernate(GS2State); | |
640 | false -> | |
641 | process_next_msg(GS2State) | |
642 | end; | |
643 | ||
637 | 644 | loop(GS2State) -> |
638 | 645 | process_next_msg(drain(GS2State)). |
639 | 646 |
419 | 419 | broadcast_buffer, |
420 | 420 | broadcast_buffer_sz, |
421 | 421 | broadcast_timer, |
422 | txn_executor | |
422 | txn_executor, | |
423 | shutting_down | |
423 | 424 | }). |
424 | 425 | |
425 | 426 | -record(gm_group, { name, version, members }). |
550 | 551 | init([GroupName, Module, Args, TxnFun]) -> |
551 | 552 | put(process_name, {?MODULE, GroupName}), |
552 | 553 | {MegaSecs, Secs, MicroSecs} = now(), |
553 | random:seed(MegaSecs, Secs, MicroSecs), | |
554 | _ = random:seed(MegaSecs, Secs, MicroSecs), | |
554 | 555 | Self = make_member(GroupName), |
555 | 556 | gen_server2:cast(self(), join), |
556 | 557 | {ok, #state { self = Self, |
566 | 567 | broadcast_buffer = [], |
567 | 568 | broadcast_buffer_sz = 0, |
568 | 569 | broadcast_timer = undefined, |
569 | txn_executor = TxnFun }, hibernate, | |
570 | txn_executor = TxnFun, | |
571 | shutting_down = false }, hibernate, | |
570 | 572 | {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. |
571 | 573 | |
574 | ||
575 | handle_call({confirmed_broadcast, _Msg}, _From, | |
576 | State = #state { shutting_down = {true, _} }) -> | |
577 | reply(shutting_down, State); | |
572 | 578 | |
573 | 579 | handle_call({confirmed_broadcast, _Msg}, _From, |
574 | 580 | State = #state { members_state = undefined }) -> |
643 | 649 | handle_callback_result( |
644 | 650 | if_callback_success( |
645 | 651 | Result, fun handle_msg_true/3, fun handle_msg_false/3, Msg, State1)); |
652 | ||
653 | handle_cast({broadcast, _Msg, _SizeHint}, | |
654 | State = #state { shutting_down = {true, _} }) -> | |
655 | noreply(State); | |
646 | 656 | |
647 | 657 | handle_cast({broadcast, _Msg, _SizeHint}, |
648 | 658 | State = #state { members_state = undefined }) -> |
741 | 751 | end. |
742 | 752 | |
743 | 753 | |
744 | terminate(Reason, State = #state { module = Module, | |
745 | callback_args = Args }) -> | |
746 | flush_broadcast_buffer(State), | |
754 | terminate(Reason, #state { module = Module, callback_args = Args }) -> | |
747 | 755 | Module:handle_terminate(Args, Reason). |
748 | 756 | |
749 | 757 | |
892 | 900 | State; |
893 | 901 | ensure_broadcast_timer(State = #state { broadcast_buffer = [], |
894 | 902 | broadcast_timer = TRef }) -> |
895 | erlang:cancel_timer(TRef), | |
903 | _ = erlang:cancel_timer(TRef), | |
896 | 904 | State #state { broadcast_timer = undefined }; |
897 | 905 | ensure_broadcast_timer(State = #state { broadcast_timer = undefined }) -> |
898 | 906 | TRef = erlang:send_after(?BROADCAST_TIMER, self(), flush), |
1426 | 1434 | activity_false(Result, _Activity, State) -> |
1427 | 1435 | {Result, State}. |
1428 | 1436 | |
1429 | if_callback_success(ok, True, _False, Arg, State) -> | |
1437 | if_callback_success(Result, True, False, Arg, State) -> | |
1438 | {NewResult, NewState} = maybe_stop(Result, State), | |
1439 | if_callback_success1(NewResult, True, False, Arg, NewState). | |
1440 | ||
1441 | if_callback_success1(ok, True, _False, Arg, State) -> | |
1430 | 1442 | True(ok, Arg, State); |
1431 | if_callback_success( | |
1443 | if_callback_success1( | |
1432 | 1444 | {become, Module, Args} = Result, True, _False, Arg, State) -> |
1433 | 1445 | True(Result, Arg, State #state { module = Module, |
1434 | 1446 | callback_args = Args }); |
1435 | if_callback_success({stop, _Reason} = Result, _True, False, Arg, State) -> | |
1447 | if_callback_success1({stop, _Reason} = Result, _True, False, Arg, State) -> | |
1436 | 1448 | False(Result, Arg, State). |
1449 | ||
1450 | maybe_stop({stop, Reason}, #state{ shutting_down = false } = State) -> | |
1451 | ShuttingDown = {true, Reason}, | |
1452 | case has_pending_messages(State) of | |
1453 | true -> {ok, State #state{ shutting_down = ShuttingDown }}; | |
1454 | false -> {{stop, Reason}, State #state{ shutting_down = ShuttingDown }} | |
1455 | end; | |
1456 | maybe_stop(Result, #state{ shutting_down = false } = State) -> | |
1457 | {Result, State}; | |
1458 | maybe_stop(Result, #state{ shutting_down = {true, Reason} } = State) -> | |
1459 | case has_pending_messages(State) of | |
1460 | true -> {Result, State}; | |
1461 | false -> {{stop, Reason}, State} | |
1462 | end. | |
1463 | ||
1464 | has_pending_messages(#state{ broadcast_buffer = Buffer }) | |
1465 | when Buffer =/= [] -> | |
1466 | true; | |
1467 | has_pending_messages(#state{ members_state = MembersState }) -> | |
1468 | [] =/= [M || {_, #member{last_pub = LP, last_ack = LA} = M} | |
1469 | <- MembersState, | |
1470 | LP =/= LA]. | |
1437 | 1471 | |
1438 | 1472 | maybe_confirm(_Self, _Id, Confirms, []) -> |
1439 | 1473 | Confirms; |
1451 | 1485 | Confirms. |
1452 | 1486 | |
1453 | 1487 | purge_confirms(Confirms) -> |
1454 | [gen_server2:reply(From, ok) || {_PubNum, From} <- queue:to_list(Confirms)], | |
1488 | _ = [gen_server2:reply(From, ok) || {_PubNum, From} <- queue:to_list(Confirms)], | |
1455 | 1489 | queue:new(). |
1456 | 1490 | |
1457 | 1491 |
346 | 346 | {noreply, State}; |
347 | 347 | |
348 | 348 | handle_cast({die, Reason}, State = #state{group = Group}) -> |
349 | tell_all_peers_to_die(Group, Reason), | |
349 | _ = tell_all_peers_to_die(Group, Reason), | |
350 | 350 | {stop, Reason, State}; |
351 | 351 | |
352 | 352 | handle_cast(Msg, State) -> |
363 | 363 | %% |
364 | 364 | %% Therefore if we get here we know we need to cause the entire |
365 | 365 | %% mirrored sup to shut down, not just fail over. |
366 | tell_all_peers_to_die(Group, Reason), | |
366 | _ = tell_all_peers_to_die(Group, Reason), | |
367 | 367 | {stop, Reason, State}; |
368 | 368 | |
369 | 369 | handle_info({'DOWN', _Ref, process, Pid, _Reason}, |
410 | 410 | |
411 | 411 | check_start(Group, Overall, Delegate, ChildSpec) -> |
412 | 412 | case mnesia:wread({?TABLE, {Group, id(ChildSpec)}}) of |
413 | [] -> write(Group, Overall, ChildSpec), | |
413 | [] -> _ = write(Group, Overall, ChildSpec), | |
414 | 414 | start; |
415 | 415 | [S] -> #mirrored_sup_childspec{key = {Group, Id}, |
416 | 416 | mirroring_pid = Pid} = S, |
417 | 417 | case Overall of |
418 | 418 | Pid -> child(Delegate, Id); |
419 | 419 | _ -> case supervisor(Pid) of |
420 | dead -> write(Group, Overall, ChildSpec), | |
420 | dead -> _ = write(Group, Overall, ChildSpec), | |
421 | 421 | start; |
422 | 422 | Delegate0 -> child(Delegate0, Id) |
423 | 423 | end |
64 | 64 | |
65 | 65 | handle_info(timeout, #state{waiting = Waiting} = State) -> |
66 | 66 | ok = disk_log:sync(latest_log), |
67 | [gen_server:reply(From, ok) || From <- Waiting], | |
67 | _ = [gen_server:reply(From, ok) || From <- Waiting], | |
68 | 68 | {noreply, State#state{waiting = []}}; |
69 | 69 | handle_info(Message, State) -> |
70 | 70 | {stop, {unhandled_info, Message}, State}. |
50 | 50 | -spec create(term()) -> 'ok'. |
51 | 51 | |
52 | 52 | create(Name) -> |
53 | ensure_started(), | |
53 | _ = ensure_started(), | |
54 | 54 | case ets:member(pg2_fixed_table, {group, Name}) of |
55 | 55 | false -> |
56 | 56 | global:trans({{?MODULE, Name}, self()}, |
67 | 67 | -spec delete(name()) -> 'ok'. |
68 | 68 | |
69 | 69 | delete(Name) -> |
70 | ensure_started(), | |
70 | _ = ensure_started(), | |
71 | 71 | global:trans({{?MODULE, Name}, self()}, |
72 | 72 | fun() -> |
73 | 73 | gen_server:multi_call(?MODULE, {delete, Name}) |
77 | 77 | -spec join(name(), pid()) -> 'ok' | {'error', {'no_such_group', term()}}. |
78 | 78 | |
79 | 79 | join(Name, Pid) when is_pid(Pid) -> |
80 | ensure_started(), | |
80 | _ = ensure_started(), | |
81 | 81 | case ets:member(pg2_fixed_table, {group, Name}) of |
82 | 82 | false -> |
83 | 83 | {error, {no_such_group, Name}}; |
93 | 93 | -spec leave(name(), pid()) -> 'ok' | {'error', {'no_such_group', name()}}. |
94 | 94 | |
95 | 95 | leave(Name, Pid) when is_pid(Pid) -> |
96 | ensure_started(), | |
96 | _ = ensure_started(), | |
97 | 97 | case ets:member(pg2_fixed_table, {group, Name}) of |
98 | 98 | false -> |
99 | 99 | {error, {no_such_group, Name}}; |
109 | 109 | -type get_members_ret() :: [pid()] | {'error', {'no_such_group', name()}}. |
110 | 110 | |
111 | 111 | -spec get_members(name()) -> get_members_ret(). |
112 | ||
112 | ||
113 | 113 | get_members(Name) -> |
114 | ensure_started(), | |
114 | _ = ensure_started(), | |
115 | 115 | case ets:member(pg2_fixed_table, {group, Name}) of |
116 | 116 | true -> |
117 | 117 | group_members(Name); |
122 | 122 | -spec get_local_members(name()) -> get_members_ret(). |
123 | 123 | |
124 | 124 | get_local_members(Name) -> |
125 | ensure_started(), | |
125 | _ = ensure_started(), | |
126 | 126 | case ets:member(pg2_fixed_table, {group, Name}) of |
127 | 127 | true -> |
128 | 128 | local_group_members(Name); |
133 | 133 | -spec which_groups() -> [name()]. |
134 | 134 | |
135 | 135 | which_groups() -> |
136 | ensure_started(), | |
136 | _ = ensure_started(), | |
137 | 137 | all_groups(). |
138 | 138 | |
139 | 139 | -type gcp_error_reason() :: {'no_process', term()} | {'no_such_group', term()}. |
168 | 168 | |
169 | 169 | init([]) -> |
170 | 170 | Ns = nodes(), |
171 | net_kernel:monitor_nodes(true), | |
171 | _ = net_kernel:monitor_nodes(true), | |
172 | 172 | lists:foreach(fun(N) -> |
173 | 173 | {?MODULE, N} ! {new_pg2_fixed, node()}, |
174 | 174 | self() ! {nodeup, N} |
181 | 181 | | {'join', name(), pid()} |
182 | 182 | | {'leave', name(), pid()}. |
183 | 183 | |
184 | -spec handle_call(call(), _, #state{}) -> | |
184 | -spec handle_call(call(), _, #state{}) -> | |
185 | 185 | {'reply', 'ok', #state{}}. |
186 | 186 | |
187 | 187 | handle_call({create, Name}, _From, S) -> |
189 | 189 | {reply, ok, S}; |
190 | 190 | handle_call({join, Name, Pid}, _From, S) -> |
191 | 191 | case ets:member(pg2_fixed_table, {group, Name}) of |
192 | true -> join_group(Name, Pid); | |
192 | true -> _ = join_group(Name, Pid), | |
193 | ok; | |
193 | 194 | _ -> ok |
194 | 195 | end, |
195 | 196 | {reply, ok, S}; |
204 | 205 | {reply, ok, S}; |
205 | 206 | handle_call(Request, From, S) -> |
206 | 207 | error_logger:warning_msg("The pg2_fixed server received an unexpected message:\n" |
207 | "handle_call(~p, ~p, _)\n", | |
208 | "handle_call(~p, ~p, _)\n", | |
208 | 209 | [Request, From]), |
209 | 210 | {noreply, S}. |
210 | 211 | |
264 | 265 | %%% Pid is a member of group Name. |
265 | 266 | |
266 | 267 | store(List) -> |
267 | _ = [case assure_group(Name) of | |
268 | true -> | |
269 | [join_group(Name, P) || P <- Members -- group_members(Name)]; | |
270 | _ -> | |
271 | ok | |
272 | end || [Name, Members] <- List], | |
268 | _ = [assure_group(Name) | |
269 | andalso | |
270 | [join_group(Name, P) || P <- Members -- group_members(Name)] || | |
271 | [Name, Members] <- List], | |
273 | 272 | ok. |
274 | 273 | |
275 | 274 | assure_group(Name) -> |
284 | 283 | member_died(Ref) -> |
285 | 284 | [{{ref, Ref}, Pid}] = ets:lookup(pg2_fixed_table, {ref, Ref}), |
286 | 285 | Names = member_groups(Pid), |
287 | _ = [leave_group(Name, P) || | |
286 | _ = [leave_group(Name, P) || | |
288 | 287 | Name <- Names, |
289 | 288 | P <- member_in_group(Pid, Name)], |
290 | 289 | %% Kept for backward compatibility with links. Can be removed, eventually. |
293 | 292 | ok. |
294 | 293 | |
295 | 294 | join_group(Name, Pid) -> |
296 | Ref_Pid = {ref, Pid}, | |
295 | Ref_Pid = {ref, Pid}, | |
297 | 296 | try _ = ets:update_counter(pg2_fixed_table, Ref_Pid, {4, +1}) |
298 | 297 | catch _:_ -> |
299 | 298 | {RPid, Ref} = do_monitor(Pid), |
313 | 312 | Member_Name_Pid = {member, Name, Pid}, |
314 | 313 | try ets:update_counter(pg2_fixed_table, Member_Name_Pid, {2, -1, 0, 0}) of |
315 | 314 | N -> |
316 | if | |
315 | if | |
317 | 316 | N =:= 0 -> |
318 | 317 | true = ets:delete(pg2_fixed_table, {pid, Pid, Name}), |
319 | 318 | _ = [ets:delete(pg2_fixed_table, {local_member, Name, Pid}) || |
322 | 321 | true -> |
323 | 322 | ok |
324 | 323 | end, |
325 | Ref_Pid = {ref, Pid}, | |
324 | Ref_Pid = {ref, Pid}, | |
326 | 325 | case ets:update_counter(pg2_fixed_table, Ref_Pid, {4, -1}) of |
327 | 326 | 0 -> |
328 | 327 | [{Ref_Pid,RPid,Ref,0}] = ets:lookup(pg2_fixed_table, Ref_Pid), |
341 | 340 | [[G, group_members(G)] || G <- all_groups()]. |
342 | 341 | |
343 | 342 | group_members(Name) -> |
344 | [P || | |
343 | [P || | |
345 | 344 | [P, N] <- ets:match(pg2_fixed_table, {{member, Name, '$1'},'$2'}), |
346 | 345 | _ <- lists:seq(1, N)]. |
347 | 346 | |
348 | 347 | local_group_members(Name) -> |
349 | [P || | |
348 | [P || | |
350 | 349 | [Pid] <- ets:match(pg2_fixed_table, {{local_member, Name, '$1'}}), |
351 | 350 | P <- member_in_group(Pid, Name)]. |
352 | 351 | |
388 | 387 | %% Assume the node is still up |
389 | 388 | {Pid, erlang:monitor(process, Pid)}; |
390 | 389 | false -> |
391 | F = fun() -> | |
390 | F = fun() -> | |
392 | 391 | Ref = erlang:monitor(process, Pid), |
393 | receive | |
392 | receive | |
394 | 393 | {'DOWN', Ref, process, Pid, _Info} -> |
395 | 394 | exit(normal) |
396 | 395 | end |
15 | 15 | %% All modifications are (C) 2010-2013 GoPivotal, Inc. |
16 | 16 | |
17 | 17 | %% %CopyrightBegin% |
18 | %% | |
18 | %% | |
19 | 19 | %% Copyright Ericsson AB 1997-2009. All Rights Reserved. |
20 | %% | |
20 | %% | |
21 | 21 | %% The contents of this file are subject to the Erlang Public License, |
22 | 22 | %% Version 1.1, (the "License"); you may not use this file except in |
23 | 23 | %% compliance with the License. You should have received a copy of the |
24 | 24 | %% Erlang Public License along with this software. If not, it can be |
25 | 25 | %% retrieved online at http://www.erlang.org/. |
26 | %% | |
26 | %% | |
27 | 27 | %% Software distributed under the License is distributed on an "AS IS" |
28 | 28 | %% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See |
29 | 29 | %% the License for the specific language governing rights and limitations |
30 | 30 | %% under the License. |
31 | %% | |
31 | %% | |
32 | 32 | %% %CopyrightEnd% |
33 | 33 | %% |
34 | 34 | -module(pg_local). |
70 | 70 | ensure_started(). |
71 | 71 | |
72 | 72 | join(Name, Pid) when is_pid(Pid) -> |
73 | ensure_started(), | |
73 | _ = ensure_started(), | |
74 | 74 | gen_server:cast(?MODULE, {join, Name, Pid}). |
75 | 75 | |
76 | 76 | leave(Name, Pid) when is_pid(Pid) -> |
77 | ensure_started(), | |
77 | _ = ensure_started(), | |
78 | 78 | gen_server:cast(?MODULE, {leave, Name, Pid}). |
79 | 79 | |
80 | 80 | get_members(Name) -> |
81 | ensure_started(), | |
81 | _ = ensure_started(), | |
82 | 82 | group_members(Name). |
83 | 83 | |
84 | 84 | in_group(Name, Pid) -> |
85 | ensure_started(), | |
85 | _ = ensure_started(), | |
86 | 86 | %% The join message is a cast and thus can race, but we want to |
87 | 87 | %% keep it that way to be fast in the common case. |
88 | 88 | case member_present(Name, Pid) of |
92 | 92 | end. |
93 | 93 | |
94 | 94 | sync() -> |
95 | ensure_started(), | |
95 | _ = ensure_started(), | |
96 | 96 | gen_server:call(?MODULE, sync, infinity). |
97 | 97 | |
98 | 98 | %%% |
110 | 110 | |
111 | 111 | handle_call(Request, From, S) -> |
112 | 112 | error_logger:warning_msg("The pg_local server received an unexpected message:\n" |
113 | "handle_call(~p, ~p, _)\n", | |
113 | "handle_call(~p, ~p, _)\n", | |
114 | 114 | [Request, From]), |
115 | 115 | {noreply, S}. |
116 | 116 | |
117 | 117 | handle_cast({join, Name, Pid}, S) -> |
118 | join_group(Name, Pid), | |
118 | _ = join_group(Name, Pid), | |
119 | 119 | {noreply, S}; |
120 | 120 | handle_cast({leave, Name, Pid}, S) -> |
121 | 121 | leave_group(Name, Pid), |
154 | 154 | member_died(Ref) -> |
155 | 155 | [{{ref, Ref}, Pid}] = ets:lookup(pg_local_table, {ref, Ref}), |
156 | 156 | Names = member_groups(Pid), |
157 | _ = [leave_group(Name, P) || | |
157 | _ = [leave_group(Name, P) || | |
158 | 158 | Name <- Names, |
159 | 159 | P <- member_in_group(Pid, Name)], |
160 | 160 | ok. |
161 | 161 | |
162 | 162 | join_group(Name, Pid) -> |
163 | Ref_Pid = {ref, Pid}, | |
163 | Ref_Pid = {ref, Pid}, | |
164 | 164 | try _ = ets:update_counter(pg_local_table, Ref_Pid, {3, +1}) |
165 | 165 | catch _:_ -> |
166 | 166 | Ref = erlang:monitor(process, Pid), |
178 | 178 | Member_Name_Pid = {member, Name, Pid}, |
179 | 179 | try ets:update_counter(pg_local_table, Member_Name_Pid, {2, -1}) of |
180 | 180 | N -> |
181 | if | |
181 | if | |
182 | 182 | N =:= 0 -> |
183 | 183 | true = ets:delete(pg_local_table, {pid, Pid, Name}), |
184 | 184 | true = ets:delete(pg_local_table, Member_Name_Pid); |
185 | 185 | true -> |
186 | 186 | ok |
187 | 187 | end, |
188 | Ref_Pid = {ref, Pid}, | |
188 | Ref_Pid = {ref, Pid}, | |
189 | 189 | case ets:update_counter(pg_local_table, Ref_Pid, {3, -1}) of |
190 | 190 | 0 -> |
191 | 191 | [{Ref_Pid,Ref,0}] = ets:lookup(pg_local_table, Ref_Pid), |
201 | 201 | end. |
202 | 202 | |
203 | 203 | group_members(Name) -> |
204 | [P || | |
204 | [P || | |
205 | 205 | [P, N] <- ets:match(pg_local_table, {{member, Name, '$1'},'$2'}), |
206 | 206 | _ <- lists:seq(1, N)]. |
207 | 207 |
83 | 83 | case dict:find(Item, M) of |
84 | 84 | {ok, MRef} -> Module:demonitor(MRef), |
85 | 85 | S#state{dict = dict:erase(Item, M)}; |
86 | error -> M | |
86 | error -> S | |
87 | 87 | end. |
88 | 88 | |
89 | 89 | is_monitored(Item, #state{dict = M}) -> dict:is_key(Item, M). |
28 | 28 | %%--------------------------------------------------------------------------- |
29 | 29 | %% Boot steps. |
30 | 30 | -export([maybe_insert_default_data/0, boot_delegate/0, recover/0]). |
31 | ||
32 | %% for tests | |
33 | -export([validate_msg_store_io_batch_size_and_credit_disc_bound/2]). | |
31 | 34 | |
32 | 35 | -rabbit_boot_step({pre_boot, [{description, "rabbit boot start"}]}). |
33 | 36 | |
519 | 522 | print_banner(), |
520 | 523 | log_banner(), |
521 | 524 | warn_if_kernel_config_dubious(), |
525 | warn_if_disc_io_options_dubious(), | |
522 | 526 | run_boot_steps(), |
523 | 527 | {ok, SupPid}; |
524 | 528 | Error -> |
847 | 851 | true -> ok |
848 | 852 | end. |
849 | 853 | |
854 | warn_if_disc_io_options_dubious() -> | |
855 | %% if these values are not set, it doesn't matter since | |
856 | %% rabbit_variable_queue will pick up the values defined in the | |
857 | %% IO_BATCH_SIZE and CREDIT_DISC_BOUND constants. | |
858 | CreditDiscBound = rabbit_misc:get_env(rabbit, msg_store_credit_disc_bound, | |
859 | undefined), | |
860 | IoBatchSize = rabbit_misc:get_env(rabbit, msg_store_io_batch_size, | |
861 | undefined), | |
862 | case catch validate_msg_store_io_batch_size_and_credit_disc_bound( | |
863 | CreditDiscBound, IoBatchSize) of | |
864 | ok -> ok; | |
865 | {error, {Reason, Vars}} -> | |
866 | rabbit_log:warning(Reason, Vars) | |
867 | end. | |
868 | ||
869 | validate_msg_store_io_batch_size_and_credit_disc_bound(CreditDiscBound, | |
870 | IoBatchSize) -> | |
871 | case IoBatchSize of | |
872 | undefined -> | |
873 | ok; | |
874 | IoBatchSize when is_integer(IoBatchSize) -> | |
875 | if IoBatchSize < ?IO_BATCH_SIZE -> | |
876 | throw({error, | |
877 | {"io_batch_size of ~b lower than recommended value ~b, " | |
878 | "paging performance may worsen~n", | |
879 | [IoBatchSize, ?IO_BATCH_SIZE]}}); | |
880 | true -> | |
881 | ok | |
882 | end; | |
883 | IoBatchSize -> | |
884 | throw({error, | |
885 | {"io_batch_size should be an integer, but ~b given", | |
886 | [IoBatchSize]}}) | |
887 | end, | |
888 | ||
889 | %% CreditDiscBound = {InitialCredit, MoreCreditAfter} | |
890 | {RIC, RMCA} = ?CREDIT_DISC_BOUND, | |
891 | case CreditDiscBound of | |
892 | undefined -> | |
893 | ok; | |
894 | {IC, MCA} when is_integer(IC), is_integer(MCA) -> | |
895 | if IC < RIC; MCA < RMCA -> | |
896 | throw({error, | |
897 | {"msg_store_credit_disc_bound {~b, ~b} lower than" | |
898 | "recommended value {~b, ~b}," | |
899 | " paging performance may worsen~n", | |
900 | [IC, MCA, RIC, RMCA]}}); | |
901 | true -> | |
902 | ok | |
903 | end; | |
904 | {IC, MCA} -> | |
905 | throw({error, | |
906 | {"both msg_store_credit_disc_bound values should be integers, but ~p given", | |
907 | [{IC, MCA}]}}); | |
908 | CreditDiscBound -> | |
909 | throw({error, | |
910 | {"invalid msg_store_credit_disc_bound value given: ~p", | |
911 | [CreditDiscBound]}}) | |
912 | end, | |
913 | ||
914 | case {CreditDiscBound, IoBatchSize} of | |
915 | {undefined, undefined} -> | |
916 | ok; | |
917 | {_CDB, undefined} -> | |
918 | ok; | |
919 | {undefined, _IBS} -> | |
920 | ok; | |
921 | {{InitialCredit, _MCA}, IoBatchSize} -> | |
922 | if IoBatchSize < InitialCredit -> | |
923 | throw( | |
924 | {error, | |
925 | {"msg_store_io_batch_size ~b should be bigger than the initial " | |
926 | "credit value from msg_store_credit_disc_bound ~b," | |
927 | " paging performance may worsen~n", | |
928 | [IoBatchSize, InitialCredit]}}); | |
929 | true -> | |
930 | ok | |
931 | end | |
932 | end. | |
933 | ||
850 | 934 | home_dir() -> |
851 | 935 | case init:get_argument(home) of |
852 | 936 | {ok, [[Home]]} -> Home; |
897 | 981 | %% file_handle_cache, we spawn a separate process. |
898 | 982 | Parent = self(), |
899 | 983 | TestFun = fun() -> |
984 | ReadBuf = case application:get_env(rabbit, fhc_read_buffering) of | |
985 | {ok, true} -> "ON"; | |
986 | {ok, false} -> "OFF" | |
987 | end, | |
988 | WriteBuf = case application:get_env(rabbit, fhc_write_buffering) of | |
989 | {ok, true} -> "ON"; | |
990 | {ok, false} -> "OFF" | |
991 | end, | |
992 | rabbit_log:info( | |
993 | "FHC read buffering: ~s~n" | |
994 | "FHC write buffering: ~s~n", [ReadBuf, WriteBuf]), | |
900 | 995 | Filename = filename:join(code:lib_dir(kernel, ebin), "kernel.app"), |
901 | 996 | {ok, Fd} = file_handle_cache:open(Filename, [raw, binary, read], []), |
902 | 997 | {ok, _} = file_handle_cache:read(Fd, 1), |
75 | 75 | %% it gives us |
76 | 76 | case try_authenticate(Mod, Username, AuthProps) of |
77 | 77 | {ok, ModNUser = #auth_user{impl = Impl}} -> |
78 | user(ModNUser, {ok, [{Mod, Impl}]}); | |
78 | user(ModNUser, {ok, [{Mod, Impl}], []}); | |
79 | 79 | Else -> |
80 | 80 | Else |
81 | 81 | end; |
97 | 97 | |
98 | 98 | try_authorize(Modules, Username) -> |
99 | 99 | lists:foldr( |
100 | fun (Module, {ok, ModsImpls}) -> | |
100 | fun (Module, {ok, ModsImpls, ModsTags}) -> | |
101 | 101 | case Module:user_login_authorization(Username) of |
102 | {ok, Impl} -> {ok, [{Module, Impl} | ModsImpls]}; | |
102 | {ok, Impl, Tags}-> {ok, [{Module, Impl} | ModsImpls], ModsTags ++ Tags}; | |
103 | {ok, Impl} -> {ok, [{Module, Impl} | ModsImpls], ModsTags}; | |
103 | 104 | {error, E} -> {refused, Username, |
104 | 105 | "~s failed authorizing ~s: ~p~n", |
105 | 106 | [Module, Username, E]}; |
107 | 108 | end; |
108 | 109 | (_, {refused, F, A}) -> |
109 | 110 | {refused, Username, F, A} |
110 | end, {ok, []}, Modules). | |
111 | end, {ok, [], []}, Modules). | |
111 | 112 | |
112 | user(#auth_user{username = Username, tags = Tags}, {ok, ModZImpls}) -> | |
113 | user(#auth_user{username = Username, tags = Tags}, {ok, ModZImpls, ModZTags}) -> | |
113 | 114 | {ok, #user{username = Username, |
114 | tags = Tags, | |
115 | tags = Tags ++ ModZTags, | |
115 | 116 | authz_backends = ModZImpls}}; |
116 | 117 | user(_AuthUser, Error) -> |
117 | 118 | Error. |
872 | 872 | %% the slave receives the message direct from the channel, and the |
873 | 873 | %% other when it receives it via GM. |
874 | 874 | case Flow of |
875 | %% Here we are tracking messages sent by the rabbit_channel | |
876 | %% process. We are accessing the rabbit_channel process | |
877 | %% dictionary. | |
875 | 878 | flow -> [credit_flow:send(QPid) || QPid <- QPids], |
876 | 879 | [credit_flow:send(QPid) || QPid <- SPids]; |
877 | 880 | noflow -> ok |
91 | 91 | durable, |
92 | 92 | auto_delete, |
93 | 93 | arguments, |
94 | owner_pid | |
94 | owner_pid, | |
95 | exclusive | |
95 | 96 | ]). |
96 | 97 | |
97 | 98 | -define(INFO_KEYS, [pid | ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [name]]). |
661 | 662 | exclusive_consumer = Holder, |
662 | 663 | senders = Senders}) -> |
663 | 664 | State1 = State#q{senders = case pmon:is_monitored(DownPid, Senders) of |
664 | false -> Senders; | |
665 | true -> credit_flow:peer_down(DownPid), | |
666 | pmon:demonitor(DownPid, Senders) | |
665 | false -> | |
666 | Senders; | |
667 | true -> | |
668 | %% A rabbit_channel process died. Here credit_flow will take care | |
669 | %% of cleaning up the rabbit_amqqueue_process process dictionary | |
670 | %% with regards to the credit we were tracking for the channel | |
671 | %% process. See handle_cast({deliver, Deliver}, State) in this | |
672 | %% module. In that cast function we process deliveries from the | |
673 | %% channel, which means we credit_flow:ack/1 said | |
674 | %% messages. credit_flow:ack'ing messages means we are increasing | |
675 | %% a counter to know when we need to send MoreCreditAfter. Since | |
676 | %% the process died, the credit_flow flow module will clean up | |
677 | %% that for us. | |
678 | credit_flow:peer_down(DownPid), | |
679 | pmon:demonitor(DownPid, Senders) | |
667 | 680 | end}, |
668 | 681 | case rabbit_queue_consumers:erase_ch(DownPid, Consumers) of |
669 | 682 | not_found -> |
816 | 829 | ''; |
817 | 830 | i(owner_pid, #q{q = #amqqueue{exclusive_owner = ExclusiveOwner}}) -> |
818 | 831 | ExclusiveOwner; |
832 | i(exclusive, #q{q = #amqqueue{exclusive_owner = ExclusiveOwner}}) -> | |
833 | is_pid(ExclusiveOwner); | |
819 | 834 | i(policy, #q{q = Q}) -> |
820 | 835 | case rabbit_policy:name(Q) of |
821 | 836 | none -> ''; |
1109 | 1124 | flow = Flow}, SlaveWhenPublished}, |
1110 | 1125 | State = #q{senders = Senders}) -> |
1111 | 1126 | Senders1 = case Flow of |
1127 | %% In both credit_flow:ack/1 we are acking messages to the channel | |
1128 | %% process that sent us the message delivery. See handle_ch_down | |
1129 | %% for more info. | |
1112 | 1130 | flow -> credit_flow:ack(Sender), |
1113 | 1131 | case SlaveWhenPublished of |
1114 | 1132 | true -> credit_flow:ack(Sender); %% [0] |
1288 | 1306 | |
1289 | 1307 | handle_info({bump_credit, Msg}, State = #q{backing_queue = BQ, |
1290 | 1308 | backing_queue_state = BQS}) -> |
1309 | %% The message_store is granting us more credit. This means the | |
1310 | %% backing queue (for the rabbit_variable_queue case) might | |
1311 | %% continue paging messages to disk if it still needs to. We | |
1312 | %% consume credits from the message_store whenever we need to | |
1313 | %% persist a message to disk. See: | |
1314 | %% rabbit_variable_queue:msg_store_write/4. | |
1291 | 1315 | credit_flow:handle_bump_msg(Msg), |
1292 | 1316 | noreply(State#q{backing_queue_state = BQ:resume(BQS)}); |
1293 | 1317 |
91 | 91 | |
92 | 92 | user_login_authorization(Username) -> |
93 | 93 | case user_login_authentication(Username, []) of |
94 | {ok, #auth_user{impl = Impl}} -> {ok, Impl}; | |
95 | Else -> Else | |
94 | {ok, #auth_user{impl = Impl, tags = Tags}} -> {ok, Impl, Tags}; | |
95 | Else -> Else | |
96 | 96 | end. |
97 | 97 | |
98 | 98 | internal_check_user_login(Username, Fun) -> |
28 | 28 | %% |
29 | 29 | %% Possible responses: |
30 | 30 | %% {ok, Impl} |
31 | %% User authorisation succeeded, and here's the impl field. | |
31 | %% {ok, Impl, Tags} | |
32 | %% User authorisation succeeded, and here's the impl and potential extra tags fields. | |
32 | 33 | %% {error, Error} |
33 | 34 | %% Something went wrong. Log and die. |
34 | 35 | %% {refused, Msg, Args} |
35 | 36 | %% User authorisation failed. Log and die. |
36 | 37 | -callback user_login_authorization(rabbit_types:username()) -> |
37 | 38 | {'ok', any()} | |
39 | {'ok', any(), any()} | | |
38 | 40 | {'refused', string(), [any()]} | |
39 | 41 | {'error', any()}. |
40 | 42 |
132 | 132 | gen_server2:cast(Pid, {method, Method, Content, noflow}). |
133 | 133 | |
134 | 134 | do_flow(Pid, Method, Content) -> |
135 | %% Here we are tracking messages sent by the rabbit_reader | |
136 | %% process. We are accessing the rabbit_reader process dictionary. | |
135 | 137 | credit_flow:send(Pid), |
136 | 138 | gen_server2:cast(Pid, {method, Method, Content, flow}). |
137 | 139 | |
326 | 328 | State = #ch{reader_pid = Reader, |
327 | 329 | virtual_host = VHost}) -> |
328 | 330 | case Flow of |
331 | %% We are going to process a message from the rabbit_reader | |
332 | %% process, so here we ack it. In this case we are accessing | |
333 | %% the rabbit_channel process dictionary. | |
329 | 334 | flow -> credit_flow:ack(Reader); |
330 | 335 | noflow -> ok |
331 | 336 | end, |
434 | 439 | noreply_coalesce(record_confirms(MXs, State#ch{unconfirmed = UC1})). |
435 | 440 | |
436 | 441 | handle_info({bump_credit, Msg}, State) -> |
442 | %% A rabbit_amqqueue_process is granting credit to our channel. If | |
443 | %% our channel was being blocked by this process, and no other | |
444 | %% process is blocking our channel, then this channel will be | |
445 | %% unblocked. This means that any credit that was deferred will be | |
446 | %% sent to rabbit_reader processs that might be blocked by this | |
447 | %% particular channel. | |
437 | 448 | credit_flow:handle_bump_msg(Msg), |
438 | 449 | noreply(State); |
439 | 450 | |
451 | 462 | State1 = handle_publishing_queue_down(QPid, Reason, State), |
452 | 463 | State3 = handle_consuming_queue_down(QPid, State1), |
453 | 464 | State4 = handle_delivering_queue_down(QPid, State3), |
465 | %% A rabbit_amqqueue_process has died. If our channel was being | |
466 | %% blocked by this process, and no other process is blocking our | |
467 | %% channel, then this channel will be unblocked. This means that | |
468 | %% any credit that was deferred will be sent to the rabbit_reader | |
469 | %% processs that might be blocked by this particular channel. | |
454 | 470 | credit_flow:peer_down(QPid), |
455 | 471 | #ch{queue_names = QNames, queue_monitors = QMons} = State4, |
456 | 472 | case dict:find(QPid, QNames) of |
65 | 65 | case catch DoFun(Command, Node, Args, Opts) of |
66 | 66 | ok -> |
67 | 67 | rabbit_misc:quit(0); |
68 | {ok, Result} -> | |
69 | rabbit_ctl_misc:print_cmd_result(Command, Result), | |
70 | rabbit_misc:quit(0); | |
68 | 71 | {'EXIT', {function_clause, [{?MODULE, action, _} | _]}} -> %% < R15 |
69 | 72 | PrintInvalidCommandError(), |
70 | 73 | usage(UsageMod); |
104 | 107 | {badrpc_multi, Reason, Nodes} -> |
105 | 108 | print_error("unable to connect to nodes ~p: ~w", [Nodes, Reason]), |
106 | 109 | print_badrpc_diagnostics(Nodes), |
110 | rabbit_misc:quit(2); | |
111 | {refused, Username, _, _} -> | |
112 | print_error("failed to authenticate user \"~s\"", [Username]), | |
107 | 113 | rabbit_misc:quit(2); |
108 | 114 | Other -> |
109 | 115 | print_error("~p", [Other]), |
51 | 51 | delete_user, |
52 | 52 | change_password, |
53 | 53 | clear_password, |
54 | authenticate_user, | |
54 | 55 | set_user_tags, |
55 | 56 | list_users, |
56 | 57 | |
85 | 86 | close_connection, |
86 | 87 | {trace_on, [?VHOST_DEF]}, |
87 | 88 | {trace_off, [?VHOST_DEF]}, |
88 | set_vm_memory_high_watermark | |
89 | set_vm_memory_high_watermark, | |
90 | help | |
89 | 91 | ]). |
90 | 92 | |
91 | 93 | -define(GLOBAL_QUERIES, |
107 | 109 | [stop, stop_app, start_app, wait, reset, force_reset, rotate_logs, |
108 | 110 | join_cluster, change_cluster_node_type, update_cluster_nodes, |
109 | 111 | forget_cluster_node, rename_cluster_node, cluster_status, status, |
110 | environment, eval, force_boot]). | |
112 | environment, eval, force_boot, help]). | |
111 | 113 | |
112 | 114 | -define(COMMANDS_WITH_TIMEOUT, |
113 | 115 | [list_user_permissions, list_policies, list_queues, list_exchanges, |
377 | 379 | Inform("Clearing password for user \"~s\"", [Username]), |
378 | 380 | call(Node, {rabbit_auth_backend_internal, clear_password, Args}); |
379 | 381 | |
382 | action(authenticate_user, Node, Args = [Username, _Password], _Opts, Inform) -> | |
383 | Inform("Authenticating user \"~s\"", [Username]), | |
384 | call(Node, {rabbit_access_control, check_user_pass_login, Args}); | |
385 | ||
380 | 386 | action(set_user_tags, Node, [Username | TagsStr], _Opts, Inform) -> |
381 | 387 | Tags = [list_to_atom(T) || T <- TagsStr], |
382 | 388 | Inform("Setting tags for user \"~s\" to ~p", [Username, Tags]), |
408 | 414 | end), |
409 | 415 | Inform("Setting memory threshold on ~p to ~p", [Node, Frac]), |
410 | 416 | rpc_call(Node, vm_memory_monitor, set_vm_memory_high_watermark, [Frac]); |
417 | ||
418 | action(set_vm_memory_high_watermark, Node, ["absolute", Arg], _Opts, Inform) -> | |
419 | Limit = list_to_integer(Arg), | |
420 | Inform("Setting memory threshold on ~p to ~p bytes", [Node, Limit]), | |
421 | rpc_call(Node, vm_memory_monitor, set_vm_memory_high_watermark, | |
422 | [{absolute, Limit}]); | |
411 | 423 | |
412 | 424 | action(set_permissions, Node, [Username, CPerm, WPerm, RPerm], Opts, Inform) -> |
413 | 425 | VHost = proplists:get_value(?VHOST_OPT, Opts), |
481 | 493 | {error, E, _} -> |
482 | 494 | {error_string, format_parse_error(E)} |
483 | 495 | end; |
496 | ||
497 | action(help, _Node, _Args, _Opts, _Inform) -> | |
498 | io:format("~s", [rabbit_ctl_usage:usage()]); | |
484 | 499 | |
485 | 500 | action(Command, Node, Args, Opts, Inform) -> |
486 | 501 | %% For backward compatibility, run commands accepting a timeout with |
661 | 676 | |
662 | 677 | become(BecomeNode) -> |
663 | 678 | error_logger:tty(false), |
664 | ok = net_kernel:stop(), | |
665 | 679 | case net_adm:ping(BecomeNode) of |
666 | 680 | pong -> exit({node_running, BecomeNode}); |
667 | pang -> io:format(" * Impersonating node: ~s...", [BecomeNode]), | |
681 | pang -> ok = net_kernel:stop(), | |
682 | io:format(" * Impersonating node: ~s...", [BecomeNode]), | |
668 | 683 | {ok, _} = rabbit_cli:start_distribution(BecomeNode), |
669 | 684 | io:format(" done~n", []), |
670 | 685 | Dir = mnesia:system_info(directory), |
0 | %% The contents of this file are subject to the Mozilla Public License | |
1 | %% Version 1.1 (the "License"); you may not use this file except in | |
2 | %% compliance with the License. You may obtain a copy of the License | |
3 | %% at http://www.mozilla.org/MPL/ | |
4 | %% | |
5 | %% Software distributed under the License is distributed on an "AS IS" | |
6 | %% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See | |
7 | %% the License for the specific language governing rights and | |
8 | %% limitations under the License. | |
9 | %% | |
10 | %% The Original Code is RabbitMQ. | |
11 | %% | |
12 | %% The Initial Developer of the Original Code is GoPivotal, Inc. | |
13 | %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. | |
14 | %% | |
15 | ||
16 | -module(rabbit_ctl_misc). | |
17 | ||
18 | -export([print_cmd_result/2]). | |
19 | ||
20 | %%---------------------------------------------------------------------------- | |
21 | ||
22 | -ifdef(use_specs). | |
23 | ||
24 | -spec(print_cmd_result/2 :: (atom(), term()) -> string()). | |
25 | ||
26 | -endif. | |
27 | ||
28 | %%---------------------------------------------------------------------------- | |
29 | ||
30 | print_cmd_result(authenticate_user, _Result) -> io:format("Success~n"). |
39 | 39 | min_interval, |
40 | 40 | max_interval, |
41 | 41 | timer, |
42 | alarmed | |
42 | alarmed, | |
43 | enabled | |
43 | 44 | }). |
44 | 45 | |
45 | 46 | %%---------------------------------------------------------------------------- |
95 | 96 | State = #state{dir = Dir, |
96 | 97 | min_interval = ?DEFAULT_MIN_DISK_CHECK_INTERVAL, |
97 | 98 | max_interval = ?DEFAULT_MAX_DISK_CHECK_INTERVAL, |
98 | alarmed = false}, | |
99 | alarmed = false, | |
100 | enabled = true}, | |
99 | 101 | case {catch get_disk_free(Dir), |
100 | 102 | vm_memory_monitor:get_total_memory()} of |
101 | 103 | {N1, N2} when is_integer(N1), is_integer(N2) -> |
103 | 105 | Err -> |
104 | 106 | rabbit_log:info("Disabling disk free space monitoring " |
105 | 107 | "on unsupported platform:~n~p~n", [Err]), |
106 | {stop, unsupported_platform} | |
108 | {ok, State#state{enabled = false}} | |
107 | 109 | end. |
108 | 110 | |
109 | 111 | handle_call(get_disk_free_limit, _From, State = #state{limit = Limit}) -> |
110 | 112 | {reply, Limit, State}; |
113 | ||
114 | handle_call({set_disk_free_limit, _}, _From, #state{enabled = false} = State) -> | |
115 | rabbit_log:info("Cannot set disk free limit: " | |
116 | "disabled disk free space monitoring", []), | |
117 | {reply, ok, State}; | |
111 | 118 | |
112 | 119 | handle_call({set_disk_free_limit, Limit}, _From, State) -> |
113 | 120 | {reply, ok, set_disk_limits(State, Limit)}; |
23 | 23 | |
24 | 24 | -export([safe_handle_event/3]). |
25 | 25 | |
26 | %% extracted from error_logger_file_h. Since 18.1 the state of the | |
27 | %% error logger module changed. See: | |
28 | %% https://github.com/erlang/otp/commit/003091a1fcc749a182505ef5675c763f71eacbb0#diff-d9a19ba08f5d2b60fadfc3aa1566b324R108 | |
29 | %% github issue: | |
30 | %% https://github.com/rabbitmq/rabbitmq-server/issues/324 | |
31 | -record(st, {fd, | |
32 | filename, | |
33 | prev_handler, | |
34 | depth = unlimited}). | |
35 | ||
36 | %% extracted from error_logger_file_h. See comment above. | |
37 | get_depth() -> | |
38 | case application:get_env(kernel, error_logger_format_depth) of | |
39 | {ok, Depth} when is_integer(Depth) -> | |
40 | erlang:max(10, Depth); | |
41 | undefined -> | |
42 | unlimited | |
43 | end. | |
44 | ||
45 | -define(ERTS_NEW_LOGGER_STATE, "7.1"). | |
46 | ||
26 | 47 | %% rabbit_error_logger_file_h is a wrapper around the error_logger_file_h |
27 | 48 | %% module because the original's init/1 does not match properly |
28 | 49 | %% with the result of closing the old handler when swapping handlers. |
33 | 54 | %% lib/stdlib/src/error_logger_file_h.erl from R14B3 was copied as |
34 | 55 | %% init_file/2 and changed so that it opens the file in 'append' mode. |
35 | 56 | |
36 | %% Used only when swapping handlers in log rotation | |
57 | %% Used only when swapping handlers in log rotation, pre OTP 18.1 | |
37 | 58 | init({{File, Suffix}, []}) -> |
38 | case rabbit_file:append_file(File, Suffix) of | |
39 | ok -> file:delete(File), | |
40 | ok; | |
41 | {error, Error} -> | |
42 | rabbit_log:error("Failed to append contents of " | |
43 | "log file '~s' to '~s':~n~p~n", | |
44 | [File, [File, Suffix], Error]) | |
45 | end, | |
59 | rotate_logs(File, Suffix), | |
60 | init(File); | |
61 | %% Used only when swapping handlers in log rotation, since OTP 18.1 | |
62 | init({{File, Suffix}, ok}) -> | |
63 | rotate_logs(File, Suffix), | |
46 | 64 | init(File); |
47 | 65 | %% Used only when swapping handlers and the original handler |
48 | 66 | %% failed to terminate or was never installed |
64 | 82 | |
65 | 83 | init_file(File, {error_logger, Buf}) -> |
66 | 84 | case init_file(File, error_logger) of |
67 | {ok, {Fd, File, PrevHandler}} -> | |
68 | [handle_event(Event, {Fd, File, PrevHandler}) || | |
85 | {ok, State} -> | |
86 | [handle_event(Event, State) || | |
69 | 87 | {_, Event} <- lists:reverse(Buf)], |
70 | {ok, {Fd, File, PrevHandler}}; | |
88 | {ok, State}; | |
71 | 89 | Error -> |
72 | 90 | Error |
73 | 91 | end; |
74 | 92 | init_file(File, PrevHandler) -> |
75 | 93 | process_flag(trap_exit, true), |
76 | 94 | case file:open(File, [append]) of |
77 | {ok,Fd} -> {ok, {Fd, File, PrevHandler}}; | |
78 | Error -> Error | |
95 | {ok, Fd} -> | |
96 | FoundVer = erlang:system_info(version), | |
97 | State = | |
98 | case rabbit_misc:version_compare( | |
99 | ?ERTS_NEW_LOGGER_STATE, FoundVer, lte) of | |
100 | true -> | |
101 | #st{fd = Fd, | |
102 | filename = File, | |
103 | prev_handler = PrevHandler, | |
104 | depth = get_depth()}; | |
105 | _ -> | |
106 | {Fd, File, PrevHandler} | |
107 | end, | |
108 | {ok, State}; | |
109 | Error -> Error | |
79 | 110 | end. |
80 | 111 | |
81 | 112 | handle_event(Event, State) -> |
133 | 164 | %%---------------------------------------------------------------------- |
134 | 165 | |
135 | 166 | t(Term) -> truncate:log_event(Term, ?LOG_TRUNC). |
167 | ||
168 | rotate_logs(File, Suffix) -> | |
169 | case rabbit_file:append_file(File, Suffix) of | |
170 | ok -> file:delete(File), | |
171 | ok; | |
172 | {error, Error} -> | |
173 | rabbit_log:error("Failed to append contents of " | |
174 | "log file '~s' to '~s':~n~p~n", | |
175 | [File, [File, Suffix], Error]) | |
176 | end. |
75 | 75 | rabbit_topic_trie_edge, |
76 | 76 | rabbit_topic_trie_binding]] |
77 | 77 | end, |
78 | [begin | |
79 | Path = [{FinalNode, _} | _] = | |
80 | follow_down_get_path(X, split_topic_key(K)), | |
81 | trie_remove_binding(X, FinalNode, D, Args), | |
82 | remove_path_if_empty(X, Path) | |
78 | [case follow_down_get_path(X, split_topic_key(K)) of | |
79 | {ok, Path = [{FinalNode, _} | _]} -> | |
80 | trie_remove_binding(X, FinalNode, D, Args), | |
81 | remove_path_if_empty(X, Path); | |
82 | {error, _Node, _RestW} -> | |
83 | %% We're trying to remove a binding that no longer exists. | |
84 | %% That's unexpected, but shouldn't be a problem. | |
85 | ok | |
83 | 86 | end || #binding{source = X, key = K, destination = D, args = Args} <- Bs], |
84 | 87 | ok; |
85 | 88 | remove_bindings(none, _X, _Bs) -> |
136 | 139 | follow_down(X, fun (_, Node, _) -> Node end, root, Words). |
137 | 140 | |
138 | 141 | follow_down_get_path(X, Words) -> |
139 | {ok, Path} = | |
140 | follow_down(X, fun (W, Node, PathAcc) -> [{Node, W} | PathAcc] end, | |
141 | [{root, none}], Words), | |
142 | Path. | |
142 | follow_down(X, fun (W, Node, PathAcc) -> [{Node, W} | PathAcc] end, | |
143 | [{root, none}], Words). | |
143 | 144 | |
144 | 145 | follow_down(X, AccFun, Acc0, Words) -> |
145 | 146 | follow_down(X, root, AccFun, Acc0, Words). |
368 | 368 | handle_cast({ensure_monitoring, Pids}, State = #state { monitors = Mons }) -> |
369 | 369 | noreply(State #state { monitors = pmon:monitor_all(Pids, Mons) }); |
370 | 370 | |
371 | handle_cast({delete_and_terminate, {shutdown, ring_shutdown}}, State) -> | |
372 | {stop, normal, State}; | |
371 | 373 | handle_cast({delete_and_terminate, Reason}, State) -> |
372 | 374 | {stop, Reason, State}. |
373 | 375 | |
415 | 417 | ok = gen_server2:cast(CPid, Msg); |
416 | 418 | handle_msg([CPid], _From, {ensure_monitoring, _Pids} = Msg) -> |
417 | 419 | ok = gen_server2:cast(CPid, Msg); |
418 | handle_msg([CPid], _From, {delete_and_terminate, _Reason} = Msg) -> | |
419 | ok = gen_server2:cast(CPid, Msg), | |
420 | handle_msg([_CPid], _From, {delete_and_terminate, _Reason}) -> | |
421 | %% We tell GM to stop, but we don't instruct the coordinator to | |
422 | %% stop yet. The GM will first make sure all pending messages were | |
423 | %% actually delivered. Then it calls handle_terminate/2 below so the | |
424 | %% coordinator is stopped. | |
425 | %% | |
426 | %% If we stop the coordinator right now, remote slaves could see the | |
427 | %% coordinator DOWN before delete_and_terminate was delivered to all | |
428 | %% GMs. One of those GM would be promoted as the master, and this GM | |
429 | %% would hang forever, waiting for other GMs to stop. | |
420 | 430 | {stop, {shutdown, ring_shutdown}}; |
421 | 431 | handle_msg([_CPid], _From, _Msg) -> |
422 | 432 | ok. |
423 | 433 | |
424 | handle_terminate([_CPid], _Reason) -> | |
434 | handle_terminate([CPid], Reason) -> | |
435 | ok = gen_server2:cast(CPid, {delete_and_terminate, Reason}), | |
425 | 436 | ok. |
426 | 437 | |
427 | 438 | %% --------------------------------------------------------------------------- |
283 | 283 | {SPid, SPids}. |
284 | 284 | |
285 | 285 | initial_queue_node(Q, DefNode) -> |
286 | {MNode, _SNodes} = suggested_queue_nodes(Q, DefNode, all_nodes()), | |
286 | {MNode, _SNodes} = suggested_queue_nodes(Q, DefNode, rabbit_nodes:all_running()), | |
287 | 287 | MNode. |
288 | 288 | |
289 | suggested_queue_nodes(Q) -> suggested_queue_nodes(Q, all_nodes()). | |
289 | suggested_queue_nodes(Q) -> suggested_queue_nodes(Q, rabbit_nodes:all_running()). | |
290 | 290 | suggested_queue_nodes(Q, All) -> suggested_queue_nodes(Q, node(), All). |
291 | 291 | |
292 | 292 | %% The third argument exists so we can pull a call to |
307 | 307 | end; |
308 | 308 | _ -> {MNode, []} |
309 | 309 | end. |
310 | ||
311 | all_nodes() -> rabbit_mnesia:cluster_nodes(running). | |
312 | 310 | |
313 | 311 | policy(Policy, Q) -> |
314 | 312 | case rabbit_policy:get(Policy, Q) of |
256 | 256 | State) -> |
257 | 257 | %% Asynchronous, non-"mandatory", deliver mode. |
258 | 258 | case Flow of |
259 | %% We are acking messages to the channel process that sent us | |
260 | %% the message delivery. See | |
261 | %% rabbit_amqqueue_process:handle_ch_down for more info. | |
259 | 262 | flow -> credit_flow:ack(Sender); |
260 | 263 | noflow -> ok |
261 | 264 | end, |
100 | 100 | ensure_mnesia_running(), |
101 | 101 | ensure_mnesia_dir(), |
102 | 102 | case is_virgin_node() of |
103 | true -> init_from_config(); | |
104 | false -> NodeType = node_type(), | |
105 | init_db_and_upgrade(cluster_nodes(all), NodeType, | |
106 | NodeType =:= ram) | |
103 | true -> | |
104 | rabbit_log:info("Database directory at ~s is empty. Initialising from scratch... ~n", | |
105 | [dir()]), | |
106 | init_from_config(); | |
107 | false -> | |
108 | NodeType = node_type(), | |
109 | init_db_and_upgrade(cluster_nodes(all), NodeType, | |
110 | NodeType =:= ram) | |
107 | 111 | end, |
108 | 112 | %% We intuitively expect the global name server to be synced when |
109 | 113 | %% Mnesia is up. In fact that's not guaranteed to be the case - |
76 | 76 | %% to callbacks |
77 | 77 | successfully_recovered, %% boolean: did we recover state? |
78 | 78 | file_size_limit, %% how big are our files allowed to get? |
79 | cref_to_msg_ids %% client ref to synced messages mapping | |
79 | cref_to_msg_ids, %% client ref to synced messages mapping | |
80 | credit_disc_bound %% See rabbit.hrl CREDIT_DISC_BOUND | |
80 | 81 | }). |
81 | 82 | |
82 | 83 | -record(client_msstate, |
90 | 91 | file_handles_ets, |
91 | 92 | file_summary_ets, |
92 | 93 | cur_file_cache_ets, |
93 | flying_ets | |
94 | flying_ets, | |
95 | credit_disc_bound | |
94 | 96 | }). |
95 | 97 | |
96 | 98 | -record(file_summary, |
133 | 135 | file_handles_ets :: ets:tid(), |
134 | 136 | file_summary_ets :: ets:tid(), |
135 | 137 | cur_file_cache_ets :: ets:tid(), |
136 | flying_ets :: ets:tid()}). | |
138 | flying_ets :: ets:tid(), | |
139 | credit_disc_bound :: {pos_integer(), pos_integer()}}). | |
137 | 140 | -type(msg_ref_delta_gen(A) :: |
138 | 141 | fun ((A) -> 'finished' | |
139 | 142 | {rabbit_types:msg_id(), non_neg_integer(), A})). |
441 | 444 | gen_server2:call( |
442 | 445 | Server, {new_client_state, Ref, self(), MsgOnDiskFun, CloseFDsFun}, |
443 | 446 | infinity), |
447 | CreditDiscBound = rabbit_misc:get_env(rabbit, msg_store_credit_disc_bound, | |
448 | ?CREDIT_DISC_BOUND), | |
444 | 449 | #client_msstate { server = Server, |
445 | 450 | client_ref = Ref, |
446 | 451 | file_handle_cache = dict:new(), |
451 | 456 | file_handles_ets = FileHandlesEts, |
452 | 457 | file_summary_ets = FileSummaryEts, |
453 | 458 | cur_file_cache_ets = CurFileCacheEts, |
454 | flying_ets = FlyingEts }. | |
459 | flying_ets = FlyingEts, | |
460 | credit_disc_bound = CreditDiscBound }. | |
455 | 461 | |
456 | 462 | client_terminate(CState = #client_msstate { client_ref = Ref }) -> |
457 | 463 | close_all_handles(CState), |
464 | 470 | |
465 | 471 | client_ref(#client_msstate { client_ref = Ref }) -> Ref. |
466 | 472 | |
467 | write_flow(MsgId, Msg, CState = #client_msstate { server = Server }) -> | |
468 | credit_flow:send(whereis(Server), ?CREDIT_DISC_BOUND), | |
473 | write_flow(MsgId, Msg, | |
474 | CState = #client_msstate { | |
475 | server = Server, | |
476 | credit_disc_bound = CreditDiscBound }) -> | |
477 | %% Here we are tracking messages sent by the | |
478 | %% rabbit_amqqueue_process process via the | |
479 | %% rabbit_variable_queue. We are accessing the | |
480 | %% rabbit_amqqueue_process process dictionary. | |
481 | credit_flow:send(whereis(Server), CreditDiscBound), | |
469 | 482 | client_write(MsgId, Msg, flow, CState). |
470 | 483 | |
471 | 484 | write(MsgId, Msg, CState) -> client_write(MsgId, Msg, noflow, CState). |
708 | 721 | msg_store = self() |
709 | 722 | }), |
710 | 723 | |
724 | CreditDiscBound = rabbit_misc:get_env(rabbit, msg_store_credit_disc_bound, | |
725 | ?CREDIT_DISC_BOUND), | |
726 | ||
711 | 727 | State = #msstate { dir = Dir, |
712 | 728 | index_module = IndexModule, |
713 | 729 | index_state = IndexState, |
727 | 743 | clients = Clients, |
728 | 744 | successfully_recovered = CleanShutdown, |
729 | 745 | file_size_limit = FileSizeLimit, |
730 | cref_to_msg_ids = dict:new() | |
746 | cref_to_msg_ids = dict:new(), | |
747 | credit_disc_bound = CreditDiscBound | |
731 | 748 | }, |
732 | 749 | |
733 | 750 | %% If we didn't recover the msg location index then we need to |
811 | 828 | |
812 | 829 | handle_cast({write, CRef, MsgId, Flow}, |
813 | 830 | State = #msstate { cur_file_cache_ets = CurFileCacheEts, |
814 | clients = Clients }) -> | |
831 | clients = Clients, | |
832 | credit_disc_bound = CreditDiscBound }) -> | |
815 | 833 | case Flow of |
816 | 834 | flow -> {CPid, _, _} = dict:fetch(CRef, Clients), |
817 | credit_flow:ack(CPid, ?CREDIT_DISC_BOUND); | |
835 | %% We are going to process a message sent by the | |
836 | %% rabbit_amqqueue_process. Now we are accessing the | |
837 | %% msg_store process dictionary. | |
838 | credit_flow:ack(CPid, CreditDiscBound); | |
818 | 839 | noflow -> ok |
819 | 840 | end, |
820 | 841 | true = 0 =< ets:update_counter(CurFileCacheEts, MsgId, {3, -1}), |
889 | 910 | noreply(internal_sync(State)); |
890 | 911 | |
891 | 912 | handle_info({'DOWN', _MRef, process, Pid, _Reason}, State) -> |
913 | %% similar to what happens in | |
914 | %% rabbit_amqqueue_process:handle_ch_down but with a relation of | |
915 | %% msg_store -> rabbit_amqqueue_process instead of | |
916 | %% rabbit_amqqueue_process -> rabbit_channel. | |
892 | 917 | credit_flow:peer_down(Pid), |
893 | 918 | noreply(State); |
894 | 919 |
474 | 474 | cmap(F) -> rabbit_misc:filter_exit_map(F, connections()). |
475 | 475 | |
476 | 476 | tcp_opts() -> |
477 | {ok, Opts} = application:get_env(rabbit, tcp_listen_options), | |
478 | Opts. | |
477 | {ok, ConfigOpts} = application:get_env(rabbit, tcp_listen_options), | |
478 | merge_essential_tcp_listen_options(ConfigOpts). | |
479 | ||
480 | -define(ESSENTIAL_LISTEN_OPTIONS, | |
481 | [binary, | |
482 | {active, false}, | |
483 | {packet, raw}, | |
484 | {reuseaddr, true}, | |
485 | {nodelay, true}]). | |
486 | ||
487 | merge_essential_tcp_listen_options(Opts) -> | |
488 | lists:foldl(fun ({K, _} = Opt, Acc) -> | |
489 | lists:keystore(K, 1, Acc, Opt); | |
490 | (Opt, Acc) -> | |
491 | [Opt | Acc] | |
492 | end , Opts, ?ESSENTIAL_LISTEN_OPTIONS). | |
479 | 493 | |
480 | 494 | %% inet_parse:address takes care of ip string, like "0.0.0.0" |
481 | 495 | %% inet:getaddr returns immediately for ip tuple {0,0,0,0}, |
17 | 17 | |
18 | 18 | -export([names/1, diagnostics/1, make/1, parts/1, cookie_hash/0, |
19 | 19 | is_running/2, is_process_running/2, |
20 | cluster_name/0, set_cluster_name/1, ensure_epmd/0]). | |
20 | cluster_name/0, set_cluster_name/1, ensure_epmd/0, | |
21 | all_running/0]). | |
21 | 22 | |
22 | 23 | -include_lib("kernel/include/inet.hrl"). |
23 | 24 | |
41 | 42 | -spec(cluster_name/0 :: () -> binary()). |
42 | 43 | -spec(set_cluster_name/1 :: (binary()) -> 'ok'). |
43 | 44 | -spec(ensure_epmd/0 :: () -> 'ok'). |
45 | -spec(all_running/0 :: () -> [node()]). | |
44 | 46 | |
45 | 47 | -endif. |
46 | 48 | |
214 | 216 | {Port, {exit_status, _Rc}} -> ok; |
215 | 217 | {Port, _} -> port_shutdown_loop(Port) |
216 | 218 | end. |
219 | ||
220 | all_running() -> rabbit_mnesia:cluster_nodes(running). |
26 | 26 | {enable, [?OFFLINE_DEF, ?ONLINE_DEF]}, |
27 | 27 | {disable, [?OFFLINE_DEF, ?ONLINE_DEF]}, |
28 | 28 | {set, [?OFFLINE_DEF, ?ONLINE_DEF]}, |
29 | {sync, []}]). | |
29 | {sync, []}, | |
30 | {help, []}]). | |
30 | 31 | |
31 | 32 | %%---------------------------------------------------------------------------- |
32 | 33 | |
146 | 147 | action_change(Opts, Node, Implicit, NewImplicit, State); |
147 | 148 | |
148 | 149 | action(sync, Node, [], _Opts, State) -> |
149 | sync(Node, true, State). | |
150 | sync(Node, true, State); | |
151 | ||
152 | action(help, _Node, _Args, _Opts, _State) -> | |
153 | io:format("~s", [rabbit_plugins_usage:usage()]). | |
150 | 154 | |
151 | 155 | %%---------------------------------------------------------------------------- |
152 | 156 |
112 | 112 | case os:getenv("RABBITMQ_DIST_PORT") of |
113 | 113 | false -> ok; |
114 | 114 | PortStr -> Port = list_to_integer(PortStr), |
115 | case gen_tcp:listen(Port, [inet, {reuseaddr, true}]) of | |
116 | {ok, Sock} -> gen_tcp:close(Sock); | |
117 | {error, _} -> dist_port_use_check_fail(Port, NodeHost) | |
118 | end | |
115 | dist_port_use_check_ipv4(NodeHost, Port) | |
116 | end. | |
117 | ||
118 | dist_port_use_check_ipv4(NodeHost, Port) -> | |
119 | case gen_tcp:listen(Port, [inet, {reuseaddr, true}]) of | |
120 | {ok, Sock} -> gen_tcp:close(Sock); | |
121 | {error, einval} -> dist_port_use_check_ipv6(NodeHost, Port); | |
122 | {error, _} -> dist_port_use_check_fail(Port, NodeHost) | |
123 | end. | |
124 | ||
125 | dist_port_use_check_ipv6(NodeHost, Port) -> | |
126 | case gen_tcp:listen(Port, [inet6, {reuseaddr, true}]) of | |
127 | {ok, Sock} -> gen_tcp:close(Sock); | |
128 | {error, _} -> dist_port_use_check_fail(Port, NodeHost) | |
119 | 129 | end. |
120 | 130 | |
121 | 131 | -ifdef(use_specs). |
278 | 278 | subtract_acks(TL, Prefix, |
279 | 279 | orddict:update_counter(CTag, 1, CTagCounts), QTail); |
280 | 280 | {{value, V}, QTail} -> |
281 | subtract_acks(AckTags, [V | Prefix], CTagCounts, QTail) | |
281 | subtract_acks(AckTags, [V | Prefix], CTagCounts, QTail); | |
282 | {empty, _} -> | |
283 | subtract_acks([], Prefix, CTagCounts, AckQ) | |
282 | 284 | end. |
283 | 285 | |
284 | 286 | possibly_unblock(Update, ChPid, State) -> |
0 | %% The contents of this file are subject to the Mozilla Public License | |
1 | %% Version 1.1 (the "License"); you may not use this file except in | |
2 | %% compliance with the License. You may obtain a copy of the License | |
3 | %% at http://www.mozilla.org/MPL/ | |
4 | %% | |
5 | %% Software distributed under the License is distributed on an "AS IS" | |
6 | %% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See | |
7 | %% the License for the specific language governing rights and | |
8 | %% limitations under the License. | |
9 | %% | |
10 | %% The Original Code is RabbitMQ. | |
11 | %% | |
12 | %% The Initial Developer of the Original Code is GoPivotal, Inc. | |
13 | %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. | |
14 | %% | |
15 | ||
0 | 16 | -module(rabbit_queue_decorator). |
1 | 17 | |
2 | 18 | -include("rabbit.hrl"). |
25 | 41 | -export([behaviour_info/1]). |
26 | 42 | |
27 | 43 | behaviour_info(callbacks) -> |
28 | [{description, 0}, {startup, 1}, {shutdown, 1}, {policy_changed, 2}, | |
44 | [{startup, 1}, {shutdown, 1}, {policy_changed, 2}, | |
29 | 45 | {active_for, 1}, {consumer_state_changed, 3}]; |
30 | 46 | behaviour_info(_Other) -> |
31 | 47 | undefined. |
15 | 15 | |
16 | 16 | -module(rabbit_queue_index). |
17 | 17 | |
18 | -export([erase/1, init/3, recover/6, | |
18 | -export([erase/1, init/3, reset_state/1, recover/6, | |
19 | 19 | terminate/2, delete_and_terminate/1, |
20 | pre_publish/7, flush_pre_publish_cache/2, | |
20 | 21 | publish/6, deliver/2, ack/2, sync/1, needs_sync/1, flush/1, |
21 | 22 | read/3, next_segment_boundary/1, bounds/1, start/1, stop/0]). |
22 | 23 | |
126 | 127 | %% binary generation/matching with constant vs variable lengths. |
127 | 128 | |
128 | 129 | -define(REL_SEQ_BITS, 14). |
129 | -define(SEGMENT_ENTRY_COUNT, 16384). %% trunc(math:pow(2,?REL_SEQ_BITS))). | |
130 | %% calculated as trunc(math:pow(2,?REL_SEQ_BITS))). | |
131 | -define(SEGMENT_ENTRY_COUNT, 16384). | |
130 | 132 | |
131 | 133 | %% seq only is binary 01 followed by 14 bits of rel seq id |
132 | 134 | %% (range: 0 - 16383) |
175 | 177 | |
176 | 178 | -record(qistate, {dir, segments, journal_handle, dirty_count, |
177 | 179 | max_journal_entries, on_sync, on_sync_msg, |
178 | unconfirmed, unconfirmed_msg}). | |
179 | ||
180 | -record(segment, {num, path, journal_entries, unacked}). | |
180 | unconfirmed, unconfirmed_msg, | |
181 | pre_publish_cache, delivered_cache}). | |
182 | ||
183 | -record(segment, {num, path, journal_entries, | |
184 | entries_to_segment, unacked}). | |
181 | 185 | |
182 | 186 | -include("rabbit.hrl"). |
183 | 187 | |
192 | 196 | |
193 | 197 | -type(hdl() :: ('undefined' | any())). |
194 | 198 | -type(segment() :: ('undefined' | |
195 | #segment { num :: non_neg_integer(), | |
196 | path :: file:filename(), | |
197 | journal_entries :: array:array(), | |
198 | unacked :: non_neg_integer() | |
199 | #segment { num :: non_neg_integer(), | |
200 | path :: file:filename(), | |
201 | journal_entries :: array:array(), | |
202 | entries_to_segment :: array:array(), | |
203 | unacked :: non_neg_integer() | |
199 | 204 | })). |
200 | 205 | -type(seq_id() :: integer()). |
201 | 206 | -type(seg_dict() :: {dict:dict(), [segment()]}). |
208 | 213 | on_sync :: on_sync_fun(), |
209 | 214 | on_sync_msg :: on_sync_fun(), |
210 | 215 | unconfirmed :: gb_sets:set(), |
211 | unconfirmed_msg :: gb_sets:set() | |
216 | unconfirmed_msg :: gb_sets:set(), | |
217 | pre_publish_cache :: list(), | |
218 | delivered_cache :: list() | |
212 | 219 | }). |
213 | 220 | -type(contains_predicate() :: fun ((rabbit_types:msg_id()) -> boolean())). |
214 | 221 | -type(walker(A) :: fun ((A) -> 'finished' | |
216 | 223 | -type(shutdown_terms() :: [term()] | 'non_clean_shutdown'). |
217 | 224 | |
218 | 225 | -spec(erase/1 :: (rabbit_amqqueue:name()) -> 'ok'). |
226 | -spec(reset_state/1 :: (qistate()) -> qistate()). | |
219 | 227 | -spec(init/3 :: (rabbit_amqqueue:name(), |
220 | 228 | on_sync_fun(), on_sync_fun()) -> qistate()). |
221 | 229 | -spec(recover/6 :: (rabbit_amqqueue:name(), shutdown_terms(), boolean(), |
253 | 261 | |
254 | 262 | erase(Name) -> |
255 | 263 | #qistate { dir = Dir } = blank_state(Name), |
256 | case rabbit_file:is_dir(Dir) of | |
257 | true -> rabbit_file:recursive_delete([Dir]); | |
258 | false -> ok | |
259 | end. | |
264 | erase_index_dir(Dir). | |
265 | ||
266 | %% used during variable queue purge when there are no pending acks | |
267 | reset_state(#qistate{ dir = Dir, | |
268 | on_sync = OnSyncFun, | |
269 | on_sync_msg = OnSyncMsgFun, | |
270 | journal_handle = JournalHdl }) -> | |
271 | ok = case JournalHdl of | |
272 | undefined -> ok; | |
273 | _ -> file_handle_cache:close(JournalHdl) | |
274 | end, | |
275 | ok = erase_index_dir(Dir), | |
276 | blank_state_dir_funs(Dir, OnSyncFun, OnSyncMsgFun). | |
260 | 277 | |
261 | 278 | init(Name, OnSyncFun, OnSyncMsgFun) -> |
262 | 279 | State = #qistate { dir = Dir } = blank_state(Name), |
286 | 303 | {_SegmentCounts, State1 = #qistate { dir = Dir }} = terminate(State), |
287 | 304 | ok = rabbit_file:recursive_delete([Dir]), |
288 | 305 | State1. |
306 | ||
307 | pre_publish(MsgOrId, SeqId, MsgProps, IsPersistent, IsDelivered, JournalSizeHint, | |
308 | State = #qistate{unconfirmed = UC, | |
309 | unconfirmed_msg = UCM, | |
310 | pre_publish_cache = PPC, | |
311 | delivered_cache = DC}) -> | |
312 | MsgId = case MsgOrId of | |
313 | #basic_message{id = Id} -> Id; | |
314 | Id when is_binary(Id) -> Id | |
315 | end, | |
316 | ?MSG_ID_BYTES = size(MsgId), | |
317 | ||
318 | State1 = | |
319 | case {MsgProps#message_properties.needs_confirming, MsgOrId} of | |
320 | {true, MsgId} -> UC1 = gb_sets:add_element(MsgId, UC), | |
321 | State#qistate{unconfirmed = UC1}; | |
322 | {true, _} -> UCM1 = gb_sets:add_element(MsgId, UCM), | |
323 | State#qistate{unconfirmed_msg = UCM1}; | |
324 | {false, _} -> State | |
325 | end, | |
326 | ||
327 | {Bin, MsgBin} = create_pub_record_body(MsgOrId, MsgProps), | |
328 | ||
329 | PPC1 = | |
330 | [[<<(case IsPersistent of | |
331 | true -> ?PUB_PERSIST_JPREFIX; | |
332 | false -> ?PUB_TRANS_JPREFIX | |
333 | end):?JPREFIX_BITS, | |
334 | SeqId:?SEQ_BITS, Bin/binary, | |
335 | (size(MsgBin)):?EMBEDDED_SIZE_BITS>>, MsgBin], PPC], | |
336 | ||
337 | DC1 = | |
338 | case IsDelivered of | |
339 | true -> | |
340 | [SeqId | DC]; | |
341 | false -> | |
342 | DC | |
343 | end, | |
344 | ||
345 | add_to_journal(SeqId, {IsPersistent, Bin, MsgBin}, | |
346 | maybe_flush_pre_publish_cache( | |
347 | JournalSizeHint, | |
348 | State1#qistate{pre_publish_cache = PPC1, | |
349 | delivered_cache = DC1})). | |
350 | ||
351 | %% pre_publish_cache is the entry with most elements when comapred to | |
352 | %% delivered_cache so we only check the former in the guard. | |
353 | maybe_flush_pre_publish_cache(JournalSizeHint, | |
354 | #qistate{pre_publish_cache = PPC} = State) | |
355 | when length(PPC) >= ?SEGMENT_ENTRY_COUNT -> | |
356 | flush_pre_publish_cache(JournalSizeHint, State); | |
357 | maybe_flush_pre_publish_cache(_JournalSizeHint, State) -> | |
358 | State. | |
359 | ||
360 | flush_pre_publish_cache(JournalSizeHint, State) -> | |
361 | State1 = flush_pre_publish_cache(State), | |
362 | State2 = flush_delivered_cache(State1), | |
363 | maybe_flush_journal(JournalSizeHint, State2). | |
364 | ||
365 | flush_pre_publish_cache(#qistate{pre_publish_cache = []} = State) -> | |
366 | State; | |
367 | flush_pre_publish_cache(State = #qistate{pre_publish_cache = PPC}) -> | |
368 | {JournalHdl, State1} = get_journal_handle(State), | |
369 | file_handle_cache_stats:update(queue_index_journal_write), | |
370 | ok = file_handle_cache:append(JournalHdl, lists:reverse(PPC)), | |
371 | State1#qistate{pre_publish_cache = []}. | |
372 | ||
373 | flush_delivered_cache(#qistate{delivered_cache = []} = State) -> | |
374 | State; | |
375 | flush_delivered_cache(State = #qistate{delivered_cache = DC}) -> | |
376 | State1 = deliver(lists:reverse(DC), State), | |
377 | State1#qistate{delivered_cache = []}. | |
289 | 378 | |
290 | 379 | publish(MsgOrId, SeqId, MsgProps, IsPersistent, JournalSizeHint, |
291 | 380 | State = #qistate{unconfirmed = UC, |
427 | 516 | %% startup and shutdown |
428 | 517 | %%---------------------------------------------------------------------------- |
429 | 518 | |
519 | erase_index_dir(Dir) -> | |
520 | case rabbit_file:is_dir(Dir) of | |
521 | true -> rabbit_file:recursive_delete([Dir]); | |
522 | false -> ok | |
523 | end. | |
524 | ||
430 | 525 | blank_state(QueueName) -> |
431 | 526 | blank_state_dir( |
432 | 527 | filename:join(queues_dir(), queue_name_to_dir_name(QueueName))). |
433 | 528 | |
434 | 529 | blank_state_dir(Dir) -> |
530 | blank_state_dir_funs(Dir, | |
531 | fun (_) -> ok end, | |
532 | fun (_) -> ok end). | |
533 | ||
534 | blank_state_dir_funs(Dir, OnSyncFun, OnSyncMsgFun) -> | |
435 | 535 | {ok, MaxJournal} = |
436 | 536 | application:get_env(rabbit, queue_index_max_journal_entries), |
437 | 537 | #qistate { dir = Dir, |
439 | 539 | journal_handle = undefined, |
440 | 540 | dirty_count = 0, |
441 | 541 | max_journal_entries = MaxJournal, |
442 | on_sync = fun (_) -> ok end, | |
443 | on_sync_msg = fun (_) -> ok end, | |
542 | on_sync = OnSyncFun, | |
543 | on_sync_msg = OnSyncMsgFun, | |
444 | 544 | unconfirmed = gb_sets:new(), |
445 | unconfirmed_msg = gb_sets:new() }. | |
545 | unconfirmed_msg = gb_sets:new(), | |
546 | pre_publish_cache = [], | |
547 | delivered_cache = [] }. | |
446 | 548 | |
447 | 549 | init_clean(RecoveredCounts, State) -> |
448 | 550 | %% Load the journal. Since this is a clean recovery this (almost) |
648 | 750 | |
649 | 751 | add_to_journal(RelSeq, Action, |
650 | 752 | Segment = #segment { journal_entries = JEntries, |
753 | entries_to_segment = EToSeg, | |
651 | 754 | unacked = UnackedCount }) -> |
755 | ||
756 | {Fun, Entry} = action_to_entry(RelSeq, Action, JEntries), | |
757 | ||
758 | {JEntries1, EToSeg1} = | |
759 | case Fun of | |
760 | set -> | |
761 | {array:set(RelSeq, Entry, JEntries), | |
762 | array:set(RelSeq, entry_to_segment(RelSeq, Entry, []), | |
763 | EToSeg)}; | |
764 | reset -> | |
765 | {array:reset(RelSeq, JEntries), | |
766 | array:reset(RelSeq, EToSeg)} | |
767 | end, | |
768 | ||
652 | 769 | Segment #segment { |
653 | journal_entries = add_to_journal(RelSeq, Action, JEntries), | |
770 | journal_entries = JEntries1, | |
771 | entries_to_segment = EToSeg1, | |
654 | 772 | unacked = UnackedCount + case Action of |
655 | 773 | ?PUB -> +1; |
656 | 774 | del -> 0; |
657 | 775 | ack -> -1 |
658 | end}; | |
659 | ||
660 | add_to_journal(RelSeq, Action, JEntries) -> | |
776 | end}. | |
777 | ||
778 | action_to_entry(RelSeq, Action, JEntries) -> | |
661 | 779 | case array:get(RelSeq, JEntries) of |
662 | 780 | undefined -> |
663 | array:set(RelSeq, | |
664 | case Action of | |
665 | ?PUB -> {Action, no_del, no_ack}; | |
666 | del -> {no_pub, del, no_ack}; | |
667 | ack -> {no_pub, no_del, ack} | |
668 | end, JEntries); | |
781 | {set, | |
782 | case Action of | |
783 | ?PUB -> {Action, no_del, no_ack}; | |
784 | del -> {no_pub, del, no_ack}; | |
785 | ack -> {no_pub, no_del, ack} | |
786 | end}; | |
669 | 787 | ({Pub, no_del, no_ack}) when Action == del -> |
670 | array:set(RelSeq, {Pub, del, no_ack}, JEntries); | |
788 | {set, {Pub, del, no_ack}}; | |
671 | 789 | ({no_pub, del, no_ack}) when Action == ack -> |
672 | array:set(RelSeq, {no_pub, del, ack}, JEntries); | |
790 | {set, {no_pub, del, ack}}; | |
673 | 791 | ({?PUB, del, no_ack}) when Action == ack -> |
674 | array:reset(RelSeq, JEntries) | |
792 | {reset, none} | |
675 | 793 | end. |
676 | 794 | |
677 | 795 | maybe_flush_journal(State) -> |
702 | 820 | notify_sync(State1 #qistate { dirty_count = 0 }). |
703 | 821 | |
704 | 822 | append_journal_to_segment(#segment { journal_entries = JEntries, |
823 | entries_to_segment = EToSeg, | |
705 | 824 | path = Path } = Segment) -> |
706 | 825 | case array:sparse_size(JEntries) of |
707 | 826 | 0 -> Segment; |
708 | _ -> Seg = array:sparse_foldr( | |
709 | fun entry_to_segment/3, [], JEntries), | |
710 | file_handle_cache_stats:update(queue_index_write), | |
711 | ||
712 | {ok, Hdl} = file_handle_cache:open(Path, ?WRITE_MODE, | |
713 | [{write_buffer, infinity}]), | |
714 | file_handle_cache:append(Hdl, Seg), | |
715 | ok = file_handle_cache:close(Hdl), | |
716 | Segment #segment { journal_entries = array_new() } | |
827 | _ -> | |
828 | file_handle_cache_stats:update(queue_index_write), | |
829 | ||
830 | {ok, Hdl} = file_handle_cache:open(Path, ?WRITE_MODE, | |
831 | [{write_buffer, infinity}]), | |
832 | %% the file_handle_cache also does a list reverse, so this | |
833 | %% might not be required here, but before we were doing a | |
834 | %% sparse_foldr, a lists:reverse/1 seems to be the correct | |
835 | %% thing to do for now. | |
836 | file_handle_cache:append(Hdl, lists:reverse(array:to_list(EToSeg))), | |
837 | ok = file_handle_cache:close(Hdl), | |
838 | Segment #segment { journal_entries = array_new(), | |
839 | entries_to_segment = array_new([]) } | |
717 | 840 | end. |
718 | 841 | |
719 | 842 | get_journal_handle(State = #qistate { journal_handle = undefined, |
746 | 869 | Segments1 = |
747 | 870 | segment_map( |
748 | 871 | fun (Segment = #segment { journal_entries = JEntries, |
872 | entries_to_segment = EToSeg, | |
749 | 873 | unacked = UnackedCountInJournal }) -> |
750 | 874 | %% We want to keep ack'd entries in so that we can |
751 | 875 | %% remove them if duplicates are in the journal. The |
752 | 876 | %% counts here are purely from the segment itself. |
753 | 877 | {SegEntries, UnackedCountInSeg} = load_segment(true, Segment), |
754 | {JEntries1, UnackedCountDuplicates} = | |
755 | journal_minus_segment(JEntries, SegEntries), | |
878 | {JEntries1, EToSeg1, UnackedCountDuplicates} = | |
879 | journal_minus_segment(JEntries, EToSeg, SegEntries), | |
756 | 880 | Segment #segment { journal_entries = JEntries1, |
881 | entries_to_segment = EToSeg1, | |
757 | 882 | unacked = (UnackedCountInJournal + |
758 | 883 | UnackedCountInSeg - |
759 | 884 | UnackedCountDuplicates) } |
840 | 965 | {ok, Segment} -> Segment; |
841 | 966 | error -> SegName = integer_to_list(Seg) ++ ?SEGMENT_EXTENSION, |
842 | 967 | Path = filename:join(Dir, SegName), |
843 | #segment { num = Seg, | |
844 | path = Path, | |
845 | journal_entries = array_new(), | |
846 | unacked = 0 } | |
968 | #segment { num = Seg, | |
969 | path = Path, | |
970 | journal_entries = array_new(), | |
971 | entries_to_segment = array_new([]), | |
972 | unacked = 0 } | |
847 | 973 | end. |
848 | 974 | |
849 | 975 | segment_find(Seg, {_Segments, [Segment = #segment { num = Seg } |_]}) -> |
883 | 1009 | segments_new() -> |
884 | 1010 | {dict:new(), []}. |
885 | 1011 | |
886 | entry_to_segment(_RelSeq, {?PUB, del, ack}, Buf) -> | |
887 | Buf; | |
888 | entry_to_segment(RelSeq, {Pub, Del, Ack}, Buf) -> | |
1012 | entry_to_segment(_RelSeq, {?PUB, del, ack}, Initial) -> | |
1013 | Initial; | |
1014 | entry_to_segment(RelSeq, {Pub, Del, Ack}, Initial) -> | |
889 | 1015 | %% NB: we are assembling the segment in reverse order here, so |
890 | 1016 | %% del/ack comes first. |
891 | 1017 | Buf1 = case {Del, Ack} of |
892 | 1018 | {no_del, no_ack} -> |
893 | Buf; | |
1019 | Initial; | |
894 | 1020 | _ -> |
895 | 1021 | Binary = <<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS, |
896 | 1022 | RelSeq:?REL_SEQ_BITS>>, |
897 | 1023 | case {Del, Ack} of |
898 | {del, ack} -> [[Binary, Binary] | Buf]; | |
899 | _ -> [Binary | Buf] | |
1024 | {del, ack} -> [[Binary, Binary] | Initial]; | |
1025 | _ -> [Binary | Initial] | |
900 | 1026 | end |
901 | 1027 | end, |
902 | 1028 | case Pub of |
985 | 1111 | end. |
986 | 1112 | |
987 | 1113 | array_new() -> |
988 | array:new([{default, undefined}, fixed, {size, ?SEGMENT_ENTRY_COUNT}]). | |
1114 | array_new(undefined). | |
1115 | ||
1116 | array_new(Default) -> | |
1117 | array:new([{default, Default}, fixed, {size, ?SEGMENT_ENTRY_COUNT}]). | |
989 | 1118 | |
990 | 1119 | bool_to_int(true ) -> 1; |
991 | 1120 | bool_to_int(false) -> 0. |
1031 | 1160 | %% Remove from the journal entries for a segment, items that are |
1032 | 1161 | %% duplicates of entries found in the segment itself. Used on start up |
1033 | 1162 | %% to clean up the journal. |
1034 | journal_minus_segment(JEntries, SegEntries) -> | |
1163 | %% | |
1164 | %% We need to update the entries_to_segment since they are just a | |
1165 | %% cache of what's on the journal. | |
1166 | journal_minus_segment(JEntries, EToSeg, SegEntries) -> | |
1035 | 1167 | array:sparse_foldl( |
1036 | fun (RelSeq, JObj, {JEntriesOut, UnackedRemoved}) -> | |
1168 | fun (RelSeq, JObj, {JEntriesOut, EToSegOut, UnackedRemoved}) -> | |
1037 | 1169 | SegEntry = array:get(RelSeq, SegEntries), |
1038 | 1170 | {Obj, UnackedRemovedDelta} = |
1039 | 1171 | journal_minus_segment1(JObj, SegEntry), |
1040 | {case Obj of | |
1041 | keep -> JEntriesOut; | |
1042 | undefined -> array:reset(RelSeq, JEntriesOut); | |
1043 | _ -> array:set(RelSeq, Obj, JEntriesOut) | |
1044 | end, | |
1045 | UnackedRemoved + UnackedRemovedDelta} | |
1046 | end, {JEntries, 0}, JEntries). | |
1172 | {JEntriesOut1, EToSegOut1} = | |
1173 | case Obj of | |
1174 | keep -> | |
1175 | {JEntriesOut, EToSegOut}; | |
1176 | undefined -> | |
1177 | {array:reset(RelSeq, JEntriesOut), | |
1178 | array:reset(RelSeq, EToSegOut)}; | |
1179 | _ -> | |
1180 | {array:set(RelSeq, Obj, JEntriesOut), | |
1181 | array:set(RelSeq, entry_to_segment(RelSeq, Obj, []), | |
1182 | EToSegOut)} | |
1183 | end, | |
1184 | {JEntriesOut1, EToSegOut1, UnackedRemoved + UnackedRemovedDelta} | |
1185 | end, {JEntries, EToSeg, 0}, JEntries). | |
1047 | 1186 | |
1048 | 1187 | %% Here, the result is a tuple with the first element containing the |
1049 | 1188 | %% item we are adding to or modifying in the (initially fresh) journal |
477 | 477 | handle_other(emit_stats, State) -> |
478 | 478 | emit_stats(State); |
479 | 479 | handle_other({bump_credit, Msg}, State) -> |
480 | %% Here we are receiving credit by some channel process. | |
480 | 481 | credit_flow:handle_bump_msg(Msg), |
481 | 482 | control_throttle(State); |
482 | 483 | handle_other(Other, State) -> |
1042 | 1043 | validate_negotiated_integer_value(Field, Min, ClientValue) -> |
1043 | 1044 | ServerValue = get_env(Field), |
1044 | 1045 | if ClientValue /= 0 andalso ClientValue < Min -> |
1045 | fail_negotiation(Field, min, ServerValue, ClientValue); | |
1046 | fail_negotiation(Field, min, Min, ClientValue); | |
1046 | 1047 | ServerValue /= 0 andalso (ClientValue =:= 0 orelse |
1047 | 1048 | ClientValue > ServerValue) -> |
1048 | 1049 | fail_negotiation(Field, max, ServerValue, ClientValue); |
188 | 188 | |
189 | 189 | check(Fun) -> |
190 | 190 | case [Error || {Tab, TabDef} <- definitions(), |
191 | case Fun(Tab, TabDef) of | |
192 | ok -> Error = none, false; | |
193 | {error, Error} -> true | |
191 | begin | |
192 | {Ret, Error} = case Fun(Tab, TabDef) of | |
193 | ok -> {false, none}; | |
194 | {error, E} -> {true, E} | |
195 | end, | |
196 | Ret | |
194 | 197 | end] of |
195 | 198 | [] -> ok; |
196 | 199 | Errors -> {error, Errors} |
272 | 272 | msg_store_clients, |
273 | 273 | durable, |
274 | 274 | transient_threshold, |
275 | qi_embed_msgs_below, | |
275 | 276 | |
276 | 277 | len, %% w/o unacked |
277 | 278 | bytes, %% w/o unacked |
296 | 297 | %% Unlike the other counters these two do not feed into |
297 | 298 | %% #rates{} and get reset |
298 | 299 | disk_read_count, |
299 | disk_write_count | |
300 | disk_write_count, | |
301 | ||
302 | io_batch_size | |
300 | 303 | }). |
301 | 304 | |
302 | 305 | -record(rates, { in, out, ack_in, ack_out, timestamp }). |
319 | 322 | end_seq_id %% end_seq_id is exclusive |
320 | 323 | }). |
321 | 324 | |
322 | %% When we discover that we should write some indices to disk for some | |
323 | %% betas, the IO_BATCH_SIZE sets the number of betas that we must be | |
324 | %% due to write indices for before we do any work at all. | |
325 | -define(IO_BATCH_SIZE, 2048). %% next power-of-2 after ?CREDIT_DISC_BOUND | |
326 | 325 | -define(HEADER_GUESS_SIZE, 100). %% see determine_persist_to/2 |
327 | 326 | -define(PERSISTENT_MSG_STORE, msg_store_persistent). |
328 | 327 | -define(TRANSIENT_MSG_STORE, msg_store_transient). |
372 | 371 | {any(), binary()}}, |
373 | 372 | durable :: boolean(), |
374 | 373 | transient_threshold :: non_neg_integer(), |
374 | qi_embed_msgs_below :: non_neg_integer(), | |
375 | 375 | |
376 | 376 | len :: non_neg_integer(), |
377 | 377 | bytes :: non_neg_integer(), |
395 | 395 | ack_out_counter :: non_neg_integer(), |
396 | 396 | ack_in_counter :: non_neg_integer(), |
397 | 397 | disk_read_count :: non_neg_integer(), |
398 | disk_write_count :: non_neg_integer() }). | |
398 | disk_write_count :: non_neg_integer(), | |
399 | ||
400 | io_batch_size :: pos_integer()}). | |
399 | 401 | %% Duplicated from rabbit_backing_queue |
400 | 402 | -spec(ack/2 :: ([ack()], state()) -> {[rabbit_guid:guid()], state()}). |
401 | 403 | |
530 | 532 | %% the only difference between purge and delete is that delete also |
531 | 533 | %% needs to delete everything that's been delivered and not ack'd. |
532 | 534 | delete_and_terminate(_Reason, State) -> |
533 | %% TODO: there is no need to interact with qi at all - which we do | |
534 | %% as part of 'purge' and 'purge_pending_ack', other than | |
535 | %% deleting it. | |
536 | {_PurgeCount, State1} = purge(State), | |
537 | State2 = #vqstate { index_state = IndexState, | |
538 | msg_store_clients = {MSCStateP, MSCStateT} } = | |
539 | purge_pending_ack(false, State1), | |
540 | IndexState1 = rabbit_queue_index:delete_and_terminate(IndexState), | |
535 | %% Normally when we purge messages we interact with the qi by | |
536 | %% issues delivers and acks for every purged message. In this case | |
537 | %% we don't need to do that, so we just delete the qi. | |
538 | State1 = purge_and_index_reset(State), | |
539 | State2 = #vqstate { msg_store_clients = {MSCStateP, MSCStateT} } = | |
540 | purge_pending_ack_delete_and_terminate(State1), | |
541 | 541 | case MSCStateP of |
542 | 542 | undefined -> ok; |
543 | 543 | _ -> rabbit_msg_store:client_delete_and_terminate(MSCStateP) |
544 | 544 | end, |
545 | 545 | rabbit_msg_store:client_delete_and_terminate(MSCStateT), |
546 | a(State2 #vqstate { index_state = IndexState1, | |
547 | msg_store_clients = undefined }). | |
546 | a(State2 #vqstate { msg_store_clients = undefined }). | |
548 | 547 | |
549 | 548 | delete_crashed(#amqqueue{name = QName}) -> |
550 | 549 | ok = rabbit_queue_index:erase(QName). |
551 | 550 | |
552 | purge(State = #vqstate { q4 = Q4, | |
553 | len = Len }) -> | |
554 | %% TODO: when there are no pending acks, which is a common case, | |
555 | %% we could simply wipe the qi instead of issuing delivers and | |
556 | %% acks for all the messages. | |
557 | State1 = remove_queue_entries(Q4, State), | |
558 | ||
559 | State2 = #vqstate { q1 = Q1 } = | |
560 | purge_betas_and_deltas(State1 #vqstate { q4 = ?QUEUE:new() }), | |
561 | ||
562 | State3 = remove_queue_entries(Q1, State2), | |
563 | ||
564 | {Len, a(State3 #vqstate { q1 = ?QUEUE:new() })}. | |
551 | purge(State = #vqstate { len = Len }) -> | |
552 | case is_pending_ack_empty(State) of | |
553 | true -> | |
554 | {Len, purge_and_index_reset(State)}; | |
555 | false -> | |
556 | {Len, purge_when_pending_acks(State)} | |
557 | end. | |
565 | 558 | |
566 | 559 | purge_acks(State) -> a(purge_pending_ack(false, State)). |
567 | 560 | |
569 | 562 | MsgProps = #message_properties { needs_confirming = NeedsConfirming }, |
570 | 563 | IsDelivered, _ChPid, _Flow, |
571 | 564 | State = #vqstate { q1 = Q1, q3 = Q3, q4 = Q4, |
572 | next_seq_id = SeqId, | |
573 | in_counter = InCount, | |
574 | durable = IsDurable, | |
575 | unconfirmed = UC }) -> | |
565 | qi_embed_msgs_below = IndexMaxSize, | |
566 | next_seq_id = SeqId, | |
567 | in_counter = InCount, | |
568 | durable = IsDurable, | |
569 | unconfirmed = UC }) -> | |
576 | 570 | IsPersistent1 = IsDurable andalso IsPersistent, |
577 | MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps), | |
571 | MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize), | |
578 | 572 | {MsgStatus1, State1} = maybe_write_to_disk(false, false, MsgStatus, State), |
579 | 573 | State2 = case ?QUEUE:is_empty(Q3) of |
580 | 574 | false -> State1 #vqstate { q1 = ?QUEUE:in(m(MsgStatus1), Q1) }; |
593 | 587 | MsgProps = #message_properties { |
594 | 588 | needs_confirming = NeedsConfirming }, |
595 | 589 | _ChPid, _Flow, |
596 | State = #vqstate { next_seq_id = SeqId, | |
597 | out_counter = OutCount, | |
598 | in_counter = InCount, | |
599 | durable = IsDurable, | |
600 | unconfirmed = UC }) -> | |
590 | State = #vqstate { qi_embed_msgs_below = IndexMaxSize, | |
591 | next_seq_id = SeqId, | |
592 | out_counter = OutCount, | |
593 | in_counter = InCount, | |
594 | durable = IsDurable, | |
595 | unconfirmed = UC }) -> | |
601 | 596 | IsPersistent1 = IsDurable andalso IsPersistent, |
602 | MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps), | |
597 | MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps, IndexMaxSize), | |
603 | 598 | {MsgStatus1, State1} = maybe_write_to_disk(false, false, MsgStatus, State), |
604 | 599 | State2 = record_pending_ack(m(MsgStatus1), State1), |
605 | 600 | UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC), |
620 | 615 | end. |
621 | 616 | |
622 | 617 | dropwhile(Pred, State) -> |
623 | case queue_out(State) of | |
624 | {empty, State1} -> | |
625 | {undefined, a(State1)}; | |
626 | {{value, MsgStatus = #msg_status { msg_props = MsgProps }}, State1} -> | |
627 | case Pred(MsgProps) of | |
628 | true -> {_, State2} = remove(false, MsgStatus, State1), | |
629 | dropwhile(Pred, State2); | |
630 | false -> {MsgProps, a(in_r(MsgStatus, State1))} | |
631 | end | |
632 | end. | |
618 | {MsgProps, State1} = | |
619 | remove_by_predicate(Pred, State), | |
620 | {MsgProps, a(State1)}. | |
633 | 621 | |
634 | 622 | fetchwhile(Pred, Fun, Acc, State) -> |
635 | case queue_out(State) of | |
636 | {empty, State1} -> | |
637 | {undefined, Acc, a(State1)}; | |
638 | {{value, MsgStatus = #msg_status { msg_props = MsgProps }}, State1} -> | |
639 | case Pred(MsgProps) of | |
640 | true -> {Msg, State2} = read_msg(MsgStatus, State1), | |
641 | {AckTag, State3} = remove(true, MsgStatus, State2), | |
642 | fetchwhile(Pred, Fun, Fun(Msg, AckTag, Acc), State3); | |
643 | false -> {MsgProps, Acc, a(in_r(MsgStatus, State1))} | |
644 | end | |
645 | end. | |
623 | {MsgProps, Acc1, State1} = | |
624 | fetch_by_predicate(Pred, Fun, Acc, State), | |
625 | {MsgProps, Acc1, a(State1)}. | |
646 | 626 | |
647 | 627 | fetch(AckRequired, State) -> |
648 | 628 | case queue_out(State) of |
700 | 680 | {accumulate_ack(MsgStatus, Acc), State3} |
701 | 681 | end, {accumulate_ack_init(), State}, AckTags), |
702 | 682 | IndexState1 = rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState), |
703 | [ok = msg_store_remove(MSCState, IsPersistent, MsgIds) | |
704 | || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)], | |
683 | remove_msgs_by_id(MsgIdsByStore, MSCState), | |
705 | 684 | {lists:reverse(AllMsgIds), |
706 | 685 | a(State1 #vqstate { index_state = IndexState1, |
707 | 686 | ack_out_counter = AckOutCount + length(AckTags) })}. |
749 | 728 | |
750 | 729 | is_empty(State) -> 0 == len(State). |
751 | 730 | |
752 | depth(State = #vqstate { ram_pending_ack = RPA, | |
753 | disk_pending_ack = DPA, | |
754 | qi_pending_ack = QPA }) -> | |
755 | len(State) + gb_trees:size(RPA) + gb_trees:size(DPA) + gb_trees:size(QPA). | |
731 | depth(State) -> | |
732 | len(State) + count_pending_acks(State). | |
756 | 733 | |
757 | 734 | set_ram_duration_target( |
758 | 735 | DurationTarget, State = #vqstate { |
967 | 944 | gb_sets_maybe_insert(true, Val, Set) -> gb_sets:add(Val, Set). |
968 | 945 | |
969 | 946 | msg_status(IsPersistent, IsDelivered, SeqId, |
970 | Msg = #basic_message {id = MsgId}, MsgProps) -> | |
947 | Msg = #basic_message {id = MsgId}, MsgProps, IndexMaxSize) -> | |
971 | 948 | #msg_status{seq_id = SeqId, |
972 | 949 | msg_id = MsgId, |
973 | 950 | msg = Msg, |
975 | 952 | is_delivered = IsDelivered, |
976 | 953 | msg_in_store = false, |
977 | 954 | index_on_disk = false, |
978 | persist_to = determine_persist_to(Msg, MsgProps), | |
955 | persist_to = determine_persist_to(Msg, MsgProps, IndexMaxSize), | |
979 | 956 | msg_props = MsgProps}. |
980 | 957 | |
981 | 958 | beta_msg_status({Msg = #basic_message{id = MsgId}, |
1067 | 1044 | maybe_write_delivered(true, SeqId, IndexState) -> |
1068 | 1045 | rabbit_queue_index:deliver([SeqId], IndexState). |
1069 | 1046 | |
1070 | betas_from_index_entries(List, TransientThreshold, RPA, DPA, QPA, IndexState) -> | |
1047 | betas_from_index_entries(List, TransientThreshold, DelsAndAcksFun, State) -> | |
1071 | 1048 | {Filtered, Delivers, Acks, RamReadyCount, RamBytes} = |
1072 | 1049 | lists:foldr( |
1073 | 1050 | fun ({_MsgOrId, SeqId, _MsgProps, IsPersistent, IsDelivered} = M, |
1079 | 1056 | false -> MsgStatus = m(beta_msg_status(M)), |
1080 | 1057 | HaveMsg = msg_in_ram(MsgStatus), |
1081 | 1058 | Size = msg_size(MsgStatus), |
1082 | case (gb_trees:is_defined(SeqId, RPA) orelse | |
1083 | gb_trees:is_defined(SeqId, DPA) orelse | |
1084 | gb_trees:is_defined(SeqId, QPA)) of | |
1059 | case is_msg_in_pending_acks(SeqId, State) of | |
1085 | 1060 | false -> {?QUEUE:in_r(MsgStatus, Filtered1), |
1086 | 1061 | Delivers1, Acks1, |
1087 | 1062 | RRC + one_if(HaveMsg), |
1090 | 1065 | end |
1091 | 1066 | end |
1092 | 1067 | end, {?QUEUE:new(), [], [], 0, 0}, List), |
1093 | {Filtered, RamReadyCount, RamBytes, | |
1094 | rabbit_queue_index:ack( | |
1095 | Acks, rabbit_queue_index:deliver(Delivers, IndexState))}. | |
1068 | {Filtered, RamReadyCount, RamBytes, DelsAndAcksFun(Delivers, Acks, State)}. | |
1096 | 1069 | %% [0] We don't increase RamBytes here, even though it pertains to |
1097 | 1070 | %% unacked messages too, since if HaveMsg then the message must have |
1098 | 1071 | %% been stored in the QI, thus the message must have been in |
1099 | 1072 | %% qi_pending_ack, thus it must already have been in RAM. |
1073 | ||
1074 | is_msg_in_pending_acks(SeqId, #vqstate { ram_pending_ack = RPA, | |
1075 | disk_pending_ack = DPA, | |
1076 | qi_pending_ack = QPA }) -> | |
1077 | (gb_trees:is_defined(SeqId, RPA) orelse | |
1078 | gb_trees:is_defined(SeqId, DPA) orelse | |
1079 | gb_trees:is_defined(SeqId, QPA)). | |
1100 | 1080 | |
1101 | 1081 | expand_delta(SeqId, ?BLANK_DELTA_PATTERN(X)) -> |
1102 | 1082 | d(#delta { start_seq_id = SeqId, count = 1, end_seq_id = SeqId + 1 }); |
1134 | 1114 | end_seq_id = NextSeqId }) |
1135 | 1115 | end, |
1136 | 1116 | Now = now(), |
1117 | IoBatchSize = rabbit_misc:get_env(rabbit, msg_store_io_batch_size, | |
1118 | ?IO_BATCH_SIZE), | |
1119 | ||
1120 | {ok, IndexMaxSize} = application:get_env( | |
1121 | rabbit, queue_index_embed_msgs_below), | |
1137 | 1122 | State = #vqstate { |
1138 | 1123 | q1 = ?QUEUE:new(), |
1139 | 1124 | q2 = ?QUEUE:new(), |
1148 | 1133 | msg_store_clients = {PersistentClient, TransientClient}, |
1149 | 1134 | durable = IsDurable, |
1150 | 1135 | transient_threshold = NextSeqId, |
1136 | qi_embed_msgs_below = IndexMaxSize, | |
1151 | 1137 | |
1152 | 1138 | len = DeltaCount1, |
1153 | 1139 | persistent_count = DeltaCount1, |
1170 | 1156 | ack_out_counter = 0, |
1171 | 1157 | ack_in_counter = 0, |
1172 | 1158 | disk_read_count = 0, |
1173 | disk_write_count = 0 }, | |
1159 | disk_write_count = 0, | |
1160 | ||
1161 | io_batch_size = IoBatchSize }, | |
1174 | 1162 | a(maybe_deltas_to_betas(State)). |
1175 | 1163 | |
1176 | 1164 | blank_rates(Now) -> |
1267 | 1255 | |
1268 | 1256 | msg_in_ram(#msg_status{msg = Msg}) -> Msg =/= undefined. |
1269 | 1257 | |
1270 | remove(AckRequired, MsgStatus = #msg_status { | |
1271 | seq_id = SeqId, | |
1272 | msg_id = MsgId, | |
1273 | is_persistent = IsPersistent, | |
1274 | is_delivered = IsDelivered, | |
1275 | msg_in_store = MsgInStore, | |
1276 | index_on_disk = IndexOnDisk }, | |
1258 | %% first param: AckRequired | |
1259 | remove(true, MsgStatus = #msg_status { | |
1260 | seq_id = SeqId, | |
1261 | is_delivered = IsDelivered, | |
1262 | index_on_disk = IndexOnDisk }, | |
1263 | State = #vqstate {out_counter = OutCount, | |
1264 | index_state = IndexState}) -> | |
1265 | %% Mark it delivered if necessary | |
1266 | IndexState1 = maybe_write_delivered( | |
1267 | IndexOnDisk andalso not IsDelivered, | |
1268 | SeqId, IndexState), | |
1269 | ||
1270 | State1 = record_pending_ack( | |
1271 | MsgStatus #msg_status { | |
1272 | is_delivered = true }, State), | |
1273 | ||
1274 | State2 = stats({-1, 1}, {MsgStatus, MsgStatus}, State1), | |
1275 | ||
1276 | {SeqId, maybe_update_rates( | |
1277 | State2 #vqstate {out_counter = OutCount + 1, | |
1278 | index_state = IndexState1})}; | |
1279 | ||
1280 | %% This function body has the same behaviour as remove_queue_entries/3 | |
1281 | %% but instead of removing messages based on a ?QUEUE, this removes | |
1282 | %% just one message, the one referenced by the MsgStatus provided. | |
1283 | remove(false, MsgStatus = #msg_status { | |
1284 | seq_id = SeqId, | |
1285 | msg_id = MsgId, | |
1286 | is_persistent = IsPersistent, | |
1287 | is_delivered = IsDelivered, | |
1288 | msg_in_store = MsgInStore, | |
1289 | index_on_disk = IndexOnDisk }, | |
1277 | 1290 | State = #vqstate {out_counter = OutCount, |
1278 | 1291 | index_state = IndexState, |
1279 | 1292 | msg_store_clients = MSCState}) -> |
1280 | %% 1. Mark it delivered if necessary | |
1293 | %% Mark it delivered if necessary | |
1281 | 1294 | IndexState1 = maybe_write_delivered( |
1282 | 1295 | IndexOnDisk andalso not IsDelivered, |
1283 | 1296 | SeqId, IndexState), |
1284 | 1297 | |
1285 | %% 2. Remove from msg_store and queue index, if necessary | |
1286 | Rem = fun () -> | |
1287 | ok = msg_store_remove(MSCState, IsPersistent, [MsgId]) | |
1288 | end, | |
1289 | Ack = fun () -> rabbit_queue_index:ack([SeqId], IndexState1) end, | |
1290 | IndexState2 = case {AckRequired, MsgInStore, IndexOnDisk} of | |
1291 | {false, true, false} -> Rem(), IndexState1; | |
1292 | {false, true, true} -> Rem(), Ack(); | |
1293 | {false, false, true} -> Ack(); | |
1294 | _ -> IndexState1 | |
1295 | end, | |
1296 | ||
1297 | %% 3. If an ack is required, add something sensible to PA | |
1298 | {AckTag, State1} = case AckRequired of | |
1299 | true -> StateN = record_pending_ack( | |
1300 | MsgStatus #msg_status { | |
1301 | is_delivered = true }, State), | |
1302 | {SeqId, StateN}; | |
1303 | false -> {undefined, State} | |
1304 | end, | |
1305 | State2 = case AckRequired of | |
1306 | false -> stats({-1, 0}, {MsgStatus, none}, State1); | |
1307 | true -> stats({-1, 1}, {MsgStatus, MsgStatus}, State1) | |
1308 | end, | |
1309 | {AckTag, maybe_update_rates( | |
1310 | State2 #vqstate {out_counter = OutCount + 1, | |
1311 | index_state = IndexState2})}. | |
1312 | ||
1313 | purge_betas_and_deltas(State = #vqstate { q3 = Q3 }) -> | |
1298 | %% Remove from msg_store and queue index, if necessary | |
1299 | case MsgInStore of | |
1300 | true -> ok = msg_store_remove(MSCState, IsPersistent, [MsgId]); | |
1301 | false -> ok | |
1302 | end, | |
1303 | ||
1304 | IndexState2 = | |
1305 | case IndexOnDisk of | |
1306 | true -> rabbit_queue_index:ack([SeqId], IndexState1); | |
1307 | false -> IndexState1 | |
1308 | end, | |
1309 | ||
1310 | State1 = stats({-1, 0}, {MsgStatus, none}, State), | |
1311 | ||
1312 | {undefined, maybe_update_rates( | |
1313 | State1 #vqstate {out_counter = OutCount + 1, | |
1314 | index_state = IndexState2})}. | |
1315 | ||
1316 | %% This function exists as a way to improve dropwhile/2 | |
1317 | %% performance. The idea of having this function is to optimise calls | |
1318 | %% to rabbit_queue_index by batching delivers and acks, instead of | |
1319 | %% sending them one by one. | |
1320 | %% | |
1321 | %% Instead of removing every message as their are popped from the | |
1322 | %% queue, it first accumulates them and then removes them by calling | |
1323 | %% remove_queue_entries/3, since the behaviour of | |
1324 | %% remove_queue_entries/3 when used with | |
1325 | %% process_delivers_and_acks_fun(deliver_and_ack) is the same as | |
1326 | %% calling remove(false, MsgStatus, State). | |
1327 | %% | |
1328 | %% remove/3 also updates the out_counter in every call, but here we do | |
1329 | %% it just once at the end. | |
1330 | remove_by_predicate(Pred, State = #vqstate {out_counter = OutCount}) -> | |
1331 | {MsgProps, QAcc, State1} = | |
1332 | collect_by_predicate(Pred, ?QUEUE:new(), State), | |
1333 | State2 = | |
1334 | remove_queue_entries( | |
1335 | QAcc, process_delivers_and_acks_fun(deliver_and_ack), State1), | |
1336 | %% maybe_update_rates/1 is called in remove/2 for every | |
1337 | %% message. Since we update out_counter only once, we call it just | |
1338 | %% there. | |
1339 | {MsgProps, maybe_update_rates( | |
1340 | State2 #vqstate { | |
1341 | out_counter = OutCount + ?QUEUE:len(QAcc)})}. | |
1342 | ||
1343 | %% This function exists as a way to improve fetchwhile/4 | |
1344 | %% performance. The idea of having this function is to optimise calls | |
1345 | %% to rabbit_queue_index by batching delivers, instead of sending them | |
1346 | %% one by one. | |
1347 | %% | |
1348 | %% Fun is the function passed to fetchwhile/4 that's | |
1349 | %% applied to every fetched message and used to build the fetchwhile/4 | |
1350 | %% result accumulator FetchAcc. | |
1351 | fetch_by_predicate(Pred, Fun, FetchAcc, | |
1352 | State = #vqstate { | |
1353 | index_state = IndexState, | |
1354 | out_counter = OutCount}) -> | |
1355 | {MsgProps, QAcc, State1} = | |
1356 | collect_by_predicate(Pred, ?QUEUE:new(), State), | |
1357 | ||
1358 | {Delivers, FetchAcc1, State2} = | |
1359 | process_queue_entries(QAcc, Fun, FetchAcc, State1), | |
1360 | ||
1361 | IndexState1 = rabbit_queue_index:deliver(Delivers, IndexState), | |
1362 | ||
1363 | {MsgProps, FetchAcc1, maybe_update_rates( | |
1364 | State2 #vqstate { | |
1365 | index_state = IndexState1, | |
1366 | out_counter = OutCount + ?QUEUE:len(QAcc)})}. | |
1367 | ||
1368 | %% We try to do here the same as what remove(true, State) does but | |
1369 | %% processing several messages at the same time. The idea is to | |
1370 | %% optimize rabbit_queue_index:deliver/2 calls by sending a list of | |
1371 | %% SeqIds instead of one by one, thus process_queue_entries1 will | |
1372 | %% accumulate the required deliveries, will record_pending_ack for | |
1373 | %% each message, and will update stats, like remove/2 does. | |
1374 | %% | |
1375 | %% For the meaning of Fun and FetchAcc arguments see | |
1376 | %% fetch_by_predicate/4 above. | |
1377 | process_queue_entries(Q, Fun, FetchAcc, State) -> | |
1378 | ?QUEUE:foldl(fun (MsgStatus, Acc) -> | |
1379 | process_queue_entries1(MsgStatus, Fun, Acc) | |
1380 | end, | |
1381 | {[], FetchAcc, State}, Q). | |
1382 | ||
1383 | process_queue_entries1( | |
1384 | #msg_status { seq_id = SeqId, is_delivered = IsDelivered, | |
1385 | index_on_disk = IndexOnDisk} = MsgStatus, | |
1386 | Fun, | |
1387 | {Delivers, FetchAcc, State}) -> | |
1388 | {Msg, State1} = read_msg(MsgStatus, State), | |
1389 | State2 = record_pending_ack( | |
1390 | MsgStatus #msg_status { | |
1391 | is_delivered = true }, State1), | |
1392 | {cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers), | |
1393 | Fun(Msg, SeqId, FetchAcc), | |
1394 | stats({-1, 1}, {MsgStatus, MsgStatus}, State2)}. | |
1395 | ||
1396 | collect_by_predicate(Pred, QAcc, State) -> | |
1397 | case queue_out(State) of | |
1398 | {empty, State1} -> | |
1399 | {undefined, QAcc, State1}; | |
1400 | {{value, MsgStatus = #msg_status { msg_props = MsgProps }}, State1} -> | |
1401 | case Pred(MsgProps) of | |
1402 | true -> collect_by_predicate(Pred, ?QUEUE:in(MsgStatus, QAcc), | |
1403 | State1); | |
1404 | false -> {MsgProps, QAcc, in_r(MsgStatus, State1)} | |
1405 | end | |
1406 | end. | |
1407 | ||
1408 | %%---------------------------------------------------------------------------- | |
1409 | %% Helpers for Public API purge/1 function | |
1410 | %%---------------------------------------------------------------------------- | |
1411 | ||
1412 | %% The difference between purge_when_pending_acks/1 | |
1413 | %% vs. purge_and_index_reset/1 is that the first one issues a deliver | |
1414 | %% and an ack to the queue index for every message that's being | |
1415 | %% removed, while the later just resets the queue index state. | |
1416 | purge_when_pending_acks(State) -> | |
1417 | State1 = purge1(process_delivers_and_acks_fun(deliver_and_ack), State), | |
1418 | a(State1). | |
1419 | ||
1420 | purge_and_index_reset(State) -> | |
1421 | State1 = purge1(process_delivers_and_acks_fun(none), State), | |
1422 | a(reset_qi_state(State1)). | |
1423 | ||
1424 | %% This function removes messages from each of {q1, q2, q3, q4}. | |
1425 | %% | |
1426 | %% With remove_queue_entries/3 q1 and q4 are emptied, while q2 and q3 | |
1427 | %% are specially handled by purge_betas_and_deltas/2. | |
1428 | %% | |
1429 | %% purge_betas_and_deltas/2 loads messages from the queue index, | |
1430 | %% filling up q3 and in some cases moving messages form q2 to q3 while | |
1431 | %% reseting q2 to an empty queue (see maybe_deltas_to_betas/2). The | |
1432 | %% messages loaded into q3 are removed by calling | |
1433 | %% remove_queue_entries/3 until there are no more messages to be read | |
1434 | %% from the queue index. Messages are read in batches from the queue | |
1435 | %% index. | |
1436 | purge1(AfterFun, State = #vqstate { q4 = Q4}) -> | |
1437 | State1 = remove_queue_entries(Q4, AfterFun, State), | |
1438 | ||
1439 | State2 = #vqstate {q1 = Q1} = | |
1440 | purge_betas_and_deltas(AfterFun, State1#vqstate{q4 = ?QUEUE:new()}), | |
1441 | ||
1442 | State3 = remove_queue_entries(Q1, AfterFun, State2), | |
1443 | ||
1444 | a(State3#vqstate{q1 = ?QUEUE:new()}). | |
1445 | ||
1446 | reset_qi_state(State = #vqstate{index_state = IndexState}) -> | |
1447 | State#vqstate{index_state = | |
1448 | rabbit_queue_index:reset_state(IndexState)}. | |
1449 | ||
1450 | is_pending_ack_empty(State) -> | |
1451 | count_pending_acks(State) =:= 0. | |
1452 | ||
1453 | count_pending_acks(#vqstate { ram_pending_ack = RPA, | |
1454 | disk_pending_ack = DPA, | |
1455 | qi_pending_ack = QPA }) -> | |
1456 | gb_trees:size(RPA) + gb_trees:size(DPA) + gb_trees:size(QPA). | |
1457 | ||
1458 | purge_betas_and_deltas(DelsAndAcksFun, State = #vqstate { q3 = Q3 }) -> | |
1314 | 1459 | case ?QUEUE:is_empty(Q3) of |
1315 | 1460 | true -> State; |
1316 | false -> State1 = remove_queue_entries(Q3, State), | |
1317 | purge_betas_and_deltas(maybe_deltas_to_betas( | |
1461 | false -> State1 = remove_queue_entries(Q3, DelsAndAcksFun, State), | |
1462 | purge_betas_and_deltas(DelsAndAcksFun, | |
1463 | maybe_deltas_to_betas( | |
1464 | DelsAndAcksFun, | |
1318 | 1465 | State1#vqstate{q3 = ?QUEUE:new()})) |
1319 | 1466 | end. |
1320 | 1467 | |
1321 | remove_queue_entries(Q, State = #vqstate{index_state = IndexState, | |
1322 | msg_store_clients = MSCState}) -> | |
1468 | remove_queue_entries(Q, DelsAndAcksFun, | |
1469 | State = #vqstate{msg_store_clients = MSCState}) -> | |
1323 | 1470 | {MsgIdsByStore, Delivers, Acks, State1} = |
1324 | 1471 | ?QUEUE:foldl(fun remove_queue_entries1/2, |
1325 | 1472 | {orddict:new(), [], [], State}, Q), |
1326 | ok = orddict:fold(fun (IsPersistent, MsgIds, ok) -> | |
1327 | msg_store_remove(MSCState, IsPersistent, MsgIds) | |
1328 | end, ok, MsgIdsByStore), | |
1329 | IndexState1 = rabbit_queue_index:ack( | |
1330 | Acks, rabbit_queue_index:deliver(Delivers, IndexState)), | |
1331 | State1#vqstate{index_state = IndexState1}. | |
1473 | remove_msgs_by_id(MsgIdsByStore, MSCState), | |
1474 | DelsAndAcksFun(Delivers, Acks, State1). | |
1332 | 1475 | |
1333 | 1476 | remove_queue_entries1( |
1334 | 1477 | #msg_status { msg_id = MsgId, seq_id = SeqId, is_delivered = IsDelivered, |
1342 | 1485 | cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers), |
1343 | 1486 | cons_if(IndexOnDisk, SeqId, Acks), |
1344 | 1487 | stats({-1, 0}, {MsgStatus, none}, State)}. |
1488 | ||
1489 | process_delivers_and_acks_fun(deliver_and_ack) -> | |
1490 | fun (Delivers, Acks, State = #vqstate { index_state = IndexState }) -> | |
1491 | IndexState1 = | |
1492 | rabbit_queue_index:ack( | |
1493 | Acks, rabbit_queue_index:deliver(Delivers, IndexState)), | |
1494 | State #vqstate { index_state = IndexState1 } | |
1495 | end; | |
1496 | process_delivers_and_acks_fun(_) -> | |
1497 | fun (_, _, State) -> | |
1498 | State | |
1499 | end. | |
1345 | 1500 | |
1346 | 1501 | %%---------------------------------------------------------------------------- |
1347 | 1502 | %% Internal gubbins for publishing |
1364 | 1519 | queue_index -> {MsgStatus, State} |
1365 | 1520 | end; |
1366 | 1521 | maybe_write_msg_to_disk(_Force, MsgStatus, State) -> |
1522 | {MsgStatus, State}. | |
1523 | ||
1524 | %% Due to certain optimizations made inside | |
1525 | %% rabbit_queue_index:pre_publish/7 we need to have two separate | |
1526 | %% functions for index persistence. This one is only used when paging | |
1527 | %% during memory pressure. We didn't want to modify | |
1528 | %% maybe_write_index_to_disk/3 because that function is used in other | |
1529 | %% places. | |
1530 | maybe_batch_write_index_to_disk(_Force, | |
1531 | MsgStatus = #msg_status { | |
1532 | index_on_disk = true }, State) -> | |
1533 | {MsgStatus, State}; | |
1534 | maybe_batch_write_index_to_disk(Force, | |
1535 | MsgStatus = #msg_status { | |
1536 | msg = Msg, | |
1537 | msg_id = MsgId, | |
1538 | seq_id = SeqId, | |
1539 | is_persistent = IsPersistent, | |
1540 | is_delivered = IsDelivered, | |
1541 | msg_props = MsgProps}, | |
1542 | State = #vqstate { | |
1543 | target_ram_count = TargetRamCount, | |
1544 | disk_write_count = DiskWriteCount, | |
1545 | index_state = IndexState}) | |
1546 | when Force orelse IsPersistent -> | |
1547 | {MsgOrId, DiskWriteCount1} = | |
1548 | case persist_to(MsgStatus) of | |
1549 | msg_store -> {MsgId, DiskWriteCount}; | |
1550 | queue_index -> {prepare_to_store(Msg), DiskWriteCount + 1} | |
1551 | end, | |
1552 | IndexState1 = rabbit_queue_index:pre_publish( | |
1553 | MsgOrId, SeqId, MsgProps, IsPersistent, IsDelivered, | |
1554 | TargetRamCount, IndexState), | |
1555 | {MsgStatus#msg_status{index_on_disk = true}, | |
1556 | State#vqstate{index_state = IndexState1, | |
1557 | disk_write_count = DiskWriteCount1}}; | |
1558 | maybe_batch_write_index_to_disk(_Force, MsgStatus, State) -> | |
1367 | 1559 | {MsgStatus, State}. |
1368 | 1560 | |
1369 | 1561 | maybe_write_index_to_disk(_Force, MsgStatus = #msg_status { |
1400 | 1592 | {MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State), |
1401 | 1593 | maybe_write_index_to_disk(ForceIndex, MsgStatus1, State1). |
1402 | 1594 | |
1595 | maybe_prepare_write_to_disk(ForceMsg, ForceIndex, MsgStatus, State) -> | |
1596 | {MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State), | |
1597 | maybe_batch_write_index_to_disk(ForceIndex, MsgStatus1, State1). | |
1598 | ||
1403 | 1599 | determine_persist_to(#basic_message{ |
1404 | 1600 | content = #content{properties = Props, |
1405 | 1601 | properties_bin = PropsBin}}, |
1406 | #message_properties{size = BodySize}) -> | |
1407 | {ok, IndexMaxSize} = application:get_env( | |
1408 | rabbit, queue_index_embed_msgs_below), | |
1602 | #message_properties{size = BodySize}, | |
1603 | IndexMaxSize) -> | |
1409 | 1604 | %% The >= is so that you can set the env to 0 and never persist |
1410 | 1605 | %% to the index. |
1411 | 1606 | %% |
1497 | 1692 | end. |
1498 | 1693 | |
1499 | 1694 | purge_pending_ack(KeepPersistent, |
1500 | State = #vqstate { ram_pending_ack = RPA, | |
1501 | disk_pending_ack = DPA, | |
1502 | qi_pending_ack = QPA, | |
1503 | index_state = IndexState, | |
1695 | State = #vqstate { index_state = IndexState, | |
1504 | 1696 | msg_store_clients = MSCState }) -> |
1697 | {IndexOnDiskSeqIds, MsgIdsByStore, State1} = purge_pending_ack1(State), | |
1698 | case KeepPersistent of | |
1699 | true -> remove_transient_msgs_by_id(MsgIdsByStore, MSCState), | |
1700 | State1; | |
1701 | false -> IndexState1 = | |
1702 | rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState), | |
1703 | remove_msgs_by_id(MsgIdsByStore, MSCState), | |
1704 | State1 #vqstate { index_state = IndexState1 } | |
1705 | end. | |
1706 | ||
1707 | purge_pending_ack_delete_and_terminate( | |
1708 | State = #vqstate { index_state = IndexState, | |
1709 | msg_store_clients = MSCState }) -> | |
1710 | {_, MsgIdsByStore, State1} = purge_pending_ack1(State), | |
1711 | IndexState1 = rabbit_queue_index:delete_and_terminate(IndexState), | |
1712 | remove_msgs_by_id(MsgIdsByStore, MSCState), | |
1713 | State1 #vqstate { index_state = IndexState1 }. | |
1714 | ||
1715 | purge_pending_ack1(State = #vqstate { ram_pending_ack = RPA, | |
1716 | disk_pending_ack = DPA, | |
1717 | qi_pending_ack = QPA }) -> | |
1505 | 1718 | F = fun (_SeqId, MsgStatus, Acc) -> accumulate_ack(MsgStatus, Acc) end, |
1506 | 1719 | {IndexOnDiskSeqIds, MsgIdsByStore, _AllMsgIds} = |
1507 | 1720 | rabbit_misc:gb_trees_fold( |
1511 | 1724 | State1 = State #vqstate { ram_pending_ack = gb_trees:empty(), |
1512 | 1725 | disk_pending_ack = gb_trees:empty(), |
1513 | 1726 | qi_pending_ack = gb_trees:empty()}, |
1514 | ||
1515 | case KeepPersistent of | |
1516 | true -> case orddict:find(false, MsgIdsByStore) of | |
1517 | error -> State1; | |
1518 | {ok, MsgIds} -> ok = msg_store_remove(MSCState, false, | |
1519 | MsgIds), | |
1520 | State1 | |
1521 | end; | |
1522 | false -> IndexState1 = | |
1523 | rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState), | |
1524 | [ok = msg_store_remove(MSCState, IsPersistent, MsgIds) | |
1525 | || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)], | |
1526 | State1 #vqstate { index_state = IndexState1 } | |
1727 | {IndexOnDiskSeqIds, MsgIdsByStore, State1}. | |
1728 | ||
1729 | %% MsgIdsByStore is an orddict with two keys: | |
1730 | %% | |
1731 | %% true: holds a list of Persistent Message Ids. | |
1732 | %% false: holds a list of Transient Message Ids. | |
1733 | %% | |
1734 | %% When we call orddict:to_list/1 we get two sets of msg ids, where | |
1735 | %% IsPersistent is either true for persistent messages or false for | |
1736 | %% transient ones. The msg_store_remove/3 function takes this boolean | |
1737 | %% flag to determine from which store the messages should be removed | |
1738 | %% from. | |
1739 | remove_msgs_by_id(MsgIdsByStore, MSCState) -> | |
1740 | [ok = msg_store_remove(MSCState, IsPersistent, MsgIds) | |
1741 | || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)]. | |
1742 | ||
1743 | remove_transient_msgs_by_id(MsgIdsByStore, MSCState) -> | |
1744 | case orddict:find(false, MsgIdsByStore) of | |
1745 | error -> ok; | |
1746 | {ok, MsgIds} -> ok = msg_store_remove(MSCState, false, MsgIds) | |
1527 | 1747 | end. |
1528 | 1748 | |
1529 | 1749 | accumulate_ack_init() -> {[], orddict:new(), []}. |
1698 | 1918 | next({delta, Delta, [], State}, IndexState) -> |
1699 | 1919 | next({delta, Delta, State}, IndexState); |
1700 | 1920 | next({delta, Delta, [{_, SeqId, _, _, _} = M | Rest], State}, IndexState) -> |
1701 | case (gb_trees:is_defined(SeqId, State#vqstate.ram_pending_ack) orelse | |
1702 | gb_trees:is_defined(SeqId, State#vqstate.disk_pending_ack) orelse | |
1703 | gb_trees:is_defined(SeqId, State#vqstate.qi_pending_ack)) of | |
1921 | case is_msg_in_pending_acks(SeqId, State) of | |
1704 | 1922 | false -> Next = {delta, Delta, Rest, State}, |
1705 | 1923 | {value, beta_msg_status(M), false, Next, IndexState}; |
1706 | 1924 | true -> next({delta, Delta, Rest, State}, IndexState) |
1747 | 1965 | ram_pending_ack = RPA, |
1748 | 1966 | ram_msg_count = RamMsgCount, |
1749 | 1967 | target_ram_count = TargetRamCount, |
1968 | io_batch_size = IoBatchSize, | |
1750 | 1969 | rates = #rates { in = AvgIngress, |
1751 | 1970 | out = AvgEgress, |
1752 | 1971 | ack_in = AvgAckIngress, |
1772 | 1991 | State2 |
1773 | 1992 | end, |
1774 | 1993 | |
1775 | case chunk_size(?QUEUE:len(Q2) + ?QUEUE:len(Q3), | |
1776 | permitted_beta_count(State1)) of | |
1777 | S2 when S2 >= ?IO_BATCH_SIZE -> | |
1778 | %% There is an implicit, but subtle, upper bound here. We | |
1779 | %% may shuffle a lot of messages from Q2/3 into delta, but | |
1780 | %% the number of these that require any disk operation, | |
1781 | %% namely index writing, i.e. messages that are genuine | |
1782 | %% betas and not gammas, is bounded by the credit_flow | |
1783 | %% limiting of the alpha->beta conversion above. | |
1784 | push_betas_to_deltas(S2, State1); | |
1785 | _ -> | |
1786 | State1 | |
1787 | end. | |
1994 | State3 = | |
1995 | case chunk_size(?QUEUE:len(Q2) + ?QUEUE:len(Q3), | |
1996 | permitted_beta_count(State1)) of | |
1997 | S2 when S2 >= IoBatchSize -> | |
1998 | %% There is an implicit, but subtle, upper bound here. We | |
1999 | %% may shuffle a lot of messages from Q2/3 into delta, but | |
2000 | %% the number of these that require any disk operation, | |
2001 | %% namely index writing, i.e. messages that are genuine | |
2002 | %% betas and not gammas, is bounded by the credit_flow | |
2003 | %% limiting of the alpha->beta conversion above. | |
2004 | push_betas_to_deltas(S2, State1); | |
2005 | _ -> | |
2006 | State1 | |
2007 | end, | |
2008 | %% See rabbitmq-server-290 for the reasons behind this GC call. | |
2009 | garbage_collect(), | |
2010 | State3. | |
1788 | 2011 | |
1789 | 2012 | limit_ram_acks(0, State) -> |
1790 | {0, State}; | |
2013 | {0, ui(State)}; | |
1791 | 2014 | limit_ram_acks(Quota, State = #vqstate { ram_pending_ack = RPA, |
1792 | 2015 | disk_pending_ack = DPA }) -> |
1793 | 2016 | case gb_trees:is_empty(RPA) of |
1794 | 2017 | true -> |
1795 | {Quota, State}; | |
2018 | {Quota, ui(State)}; | |
1796 | 2019 | false -> |
1797 | 2020 | {SeqId, MsgStatus, RPA1} = gb_trees:take_largest(RPA), |
1798 | 2021 | {MsgStatus1, State1} = |
1799 | maybe_write_to_disk(true, false, MsgStatus, State), | |
2022 | maybe_prepare_write_to_disk(true, false, MsgStatus, State), | |
1800 | 2023 | MsgStatus2 = m(trim_msg_status(MsgStatus1)), |
1801 | 2024 | DPA1 = gb_trees:insert(SeqId, MsgStatus2, DPA), |
1802 | 2025 | limit_ram_acks(Quota - 1, |
1856 | 2079 | {loaded, {MsgStatus, State2}} |
1857 | 2080 | end. |
1858 | 2081 | |
1859 | maybe_deltas_to_betas(State = #vqstate { delta = ?BLANK_DELTA_PATTERN(X) }) -> | |
2082 | maybe_deltas_to_betas(State) -> | |
2083 | AfterFun = process_delivers_and_acks_fun(deliver_and_ack), | |
2084 | maybe_deltas_to_betas(AfterFun, State). | |
2085 | ||
2086 | maybe_deltas_to_betas(_DelsAndAcksFun, | |
2087 | State = #vqstate {delta = ?BLANK_DELTA_PATTERN(X) }) -> | |
1860 | 2088 | State; |
1861 | maybe_deltas_to_betas(State = #vqstate { | |
2089 | maybe_deltas_to_betas(DelsAndAcksFun, | |
2090 | State = #vqstate { | |
1862 | 2091 | q2 = Q2, |
1863 | 2092 | delta = Delta, |
1864 | 2093 | q3 = Q3, |
1865 | 2094 | index_state = IndexState, |
1866 | 2095 | ram_msg_count = RamMsgCount, |
1867 | 2096 | ram_bytes = RamBytes, |
1868 | ram_pending_ack = RPA, | |
1869 | disk_pending_ack = DPA, | |
1870 | qi_pending_ack = QPA, | |
1871 | 2097 | disk_read_count = DiskReadCount, |
1872 | 2098 | transient_threshold = TransientThreshold }) -> |
1873 | 2099 | #delta { start_seq_id = DeltaSeqId, |
1878 | 2104 | DeltaSeqIdEnd]), |
1879 | 2105 | {List, IndexState1} = rabbit_queue_index:read(DeltaSeqId, DeltaSeqId1, |
1880 | 2106 | IndexState), |
1881 | {Q3a, RamCountsInc, RamBytesInc, IndexState2} = | |
2107 | {Q3a, RamCountsInc, RamBytesInc, State1} = | |
1882 | 2108 | betas_from_index_entries(List, TransientThreshold, |
1883 | RPA, DPA, QPA, IndexState1), | |
1884 | State1 = State #vqstate { index_state = IndexState2, | |
1885 | ram_msg_count = RamMsgCount + RamCountsInc, | |
1886 | ram_bytes = RamBytes + RamBytesInc, | |
1887 | disk_read_count = DiskReadCount + RamCountsInc}, | |
2109 | DelsAndAcksFun, | |
2110 | State #vqstate { index_state = IndexState1 }), | |
2111 | State2 = State1 #vqstate { ram_msg_count = RamMsgCount + RamCountsInc, | |
2112 | ram_bytes = RamBytes + RamBytesInc, | |
2113 | disk_read_count = DiskReadCount + RamCountsInc }, | |
1888 | 2114 | case ?QUEUE:len(Q3a) of |
1889 | 2115 | 0 -> |
1890 | 2116 | %% we ignored every message in the segment due to it being |
1891 | 2117 | %% transient and below the threshold |
1892 | 2118 | maybe_deltas_to_betas( |
1893 | State1 #vqstate { | |
2119 | DelsAndAcksFun, | |
2120 | State2 #vqstate { | |
1894 | 2121 | delta = d(Delta #delta { start_seq_id = DeltaSeqId1 })}); |
1895 | 2122 | Q3aLen -> |
1896 | 2123 | Q3b = ?QUEUE:join(Q3, Q3a), |
1898 | 2125 | 0 -> |
1899 | 2126 | %% delta is now empty, but it wasn't before, so |
1900 | 2127 | %% can now join q2 onto q3 |
1901 | State1 #vqstate { q2 = ?QUEUE:new(), | |
2128 | State2 #vqstate { q2 = ?QUEUE:new(), | |
1902 | 2129 | delta = ?BLANK_DELTA, |
1903 | 2130 | q3 = ?QUEUE:join(Q3b, Q2) }; |
1904 | 2131 | N when N > 0 -> |
1905 | 2132 | Delta1 = d(#delta { start_seq_id = DeltaSeqId1, |
1906 | 2133 | count = N, |
1907 | 2134 | end_seq_id = DeltaSeqIdEnd }), |
1908 | State1 #vqstate { delta = Delta1, | |
2135 | State2 #vqstate { delta = Delta1, | |
1909 | 2136 | q3 = Q3b } |
1910 | 2137 | end |
1911 | 2138 | end. |
1934 | 2161 | when Quota =:= 0 orelse |
1935 | 2162 | TargetRamCount =:= infinity orelse |
1936 | 2163 | TargetRamCount >= RamMsgCount -> |
1937 | {Quota, State}; | |
2164 | {Quota, ui(State)}; | |
1938 | 2165 | push_alphas_to_betas(Generator, Consumer, Quota, Q, State) -> |
2166 | %% We consume credits from the message_store whenever we need to | |
2167 | %% persist a message to disk. See: | |
2168 | %% rabbit_variable_queue:msg_store_write/4. So perhaps the | |
2169 | %% msg_store is trying to throttle down our queue. | |
1939 | 2170 | case credit_flow:blocked() of |
1940 | true -> {Quota, State}; | |
2171 | true -> {Quota, ui(State)}; | |
1941 | 2172 | false -> case Generator(Q) of |
1942 | 2173 | {empty, _Q} -> |
1943 | {Quota, State}; | |
2174 | {Quota, ui(State)}; | |
1944 | 2175 | {{value, MsgStatus}, Qa} -> |
1945 | 2176 | {MsgStatus1, State1} = |
1946 | maybe_write_to_disk(true, false, MsgStatus, State), | |
2177 | maybe_prepare_write_to_disk(true, false, MsgStatus, | |
2178 | State), | |
1947 | 2179 | MsgStatus2 = m(trim_msg_status(MsgStatus1)), |
1948 | 2180 | State2 = stats( |
1949 | 2181 | ready0, {MsgStatus, MsgStatus2}, State1), |
1984 | 2216 | end |
1985 | 2217 | end. |
1986 | 2218 | |
1987 | push_betas_to_deltas1(_Generator, _Limit, Q, {0, _Delta, _State} = PushState) -> | |
1988 | {Q, PushState}; | |
1989 | push_betas_to_deltas1(Generator, Limit, Q, {Quota, Delta, State} = PushState) -> | |
2219 | push_betas_to_deltas1(_Generator, _Limit, Q, {0, Delta, State}) -> | |
2220 | {Q, {0, Delta, ui(State)}}; | |
2221 | push_betas_to_deltas1(Generator, Limit, Q, {Quota, Delta, State}) -> | |
1990 | 2222 | case Generator(Q) of |
1991 | 2223 | {empty, _Q} -> |
1992 | {Q, PushState}; | |
2224 | {Q, {Quota, Delta, ui(State)}}; | |
1993 | 2225 | {{value, #msg_status { seq_id = SeqId }}, _Qa} |
1994 | 2226 | when SeqId < Limit -> |
1995 | {Q, PushState}; | |
2227 | {Q, {Quota, Delta, ui(State)}}; | |
1996 | 2228 | {{value, MsgStatus = #msg_status { seq_id = SeqId }}, Qa} -> |
1997 | 2229 | {#msg_status { index_on_disk = true }, State1} = |
1998 | maybe_write_index_to_disk(true, MsgStatus, State), | |
2230 | maybe_batch_write_index_to_disk(true, MsgStatus, State), | |
1999 | 2231 | State2 = stats(ready0, {MsgStatus, none}, State1), |
2000 | 2232 | Delta1 = expand_delta(SeqId, Delta), |
2001 | 2233 | push_betas_to_deltas1(Generator, Limit, Qa, |
2002 | 2234 | {Quota - 1, Delta1, State2}) |
2003 | 2235 | end. |
2236 | ||
2237 | %% Flushes queue index batch caches and updates queue index state. | |
2238 | ui(#vqstate{index_state = IndexState, | |
2239 | target_ram_count = TargetRamCount} = State) -> | |
2240 | IndexState1 = rabbit_queue_index:flush_pre_publish_cache( | |
2241 | TargetRamCount, IndexState), | |
2242 | State#vqstate{index_state = IndexState1}. | |
2004 | 2243 | |
2005 | 2244 | %%---------------------------------------------------------------------------- |
2006 | 2245 | %% Upgrading |
0 | %% The contents of this file are subject to the Mozilla Public License | |
1 | %% Version 1.1 (the "License"); you may not use this file except in | |
2 | %% compliance with the License. You may obtain a copy of the License | |
3 | %% at http://www.mozilla.org/MPL/ | |
4 | %% | |
5 | %% Software distributed under the License is distributed on an "AS IS" | |
6 | %% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See | |
7 | %% the License for the specific language governing rights and | |
8 | %% limitations under the License. | |
9 | %% | |
10 | %% The Original Code is RabbitMQ. | |
11 | %% | |
12 | %% The Initial Developer of the Original Code is GoPivotal, Inc. | |
13 | %% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved. | |
14 | %% | |
15 | ||
16 | -module(ssl_compat). | |
17 | ||
18 | %% We don't want warnings about the use of erlang:now/0 in | |
19 | %% this module. | |
20 | -compile(nowarn_deprecated_function). | |
21 | ||
22 | -export([connection_information/1, | |
23 | connection_information/2]). | |
24 | ||
25 | connection_information(SslSocket) -> | |
26 | try | |
27 | ssl:connection_information(SslSocket) | |
28 | catch | |
29 | error:undef -> | |
30 | case ssl:connection_info(SslSocket) of | |
31 | {ok, {ProtocolVersion, CipherSuite}} -> | |
32 | {ok, [{protocol, ProtocolVersion}, | |
33 | {cipher_suite, CipherSuite}]}; | |
34 | {error, Reason} -> | |
35 | {error, Reason} | |
36 | end | |
37 | end. | |
38 | ||
39 | connection_information(SslSocket, Items) -> | |
40 | try | |
41 | ssl:connection_information(SslSocket, Items) | |
42 | catch | |
43 | error:undef -> | |
44 | WantProtocolVersion = lists:member(protocol, Items), | |
45 | WantCipherSuite = lists:member(cipher_suite, Items), | |
46 | if | |
47 | WantProtocolVersion orelse WantCipherSuite -> | |
48 | case ssl:connection_info(SslSocket) of | |
49 | {ok, {ProtocolVersion, CipherSuite}} -> | |
50 | filter_information_items(ProtocolVersion, | |
51 | CipherSuite, | |
52 | Items, | |
53 | []); | |
54 | {error, Reason} -> | |
55 | {error, Reason} | |
56 | end; | |
57 | true -> | |
58 | {ok, []} | |
59 | end | |
60 | end. | |
61 | ||
62 | filter_information_items(ProtocolVersion, CipherSuite, [protocol | Rest], | |
63 | Result) -> | |
64 | filter_information_items(ProtocolVersion, CipherSuite, Rest, | |
65 | [{protocol, ProtocolVersion} | Result]); | |
66 | filter_information_items(ProtocolVersion, CipherSuite, [cipher_suite | Rest], | |
67 | Result) -> | |
68 | filter_information_items(ProtocolVersion, CipherSuite, Rest, | |
69 | [{cipher_suite, CipherSuite} | Result]); | |
70 | filter_information_items(ProtocolVersion, CipherSuite, [_ | Rest], | |
71 | Result) -> | |
72 | filter_information_items(ProtocolVersion, CipherSuite, Rest, Result); | |
73 | filter_information_items(_ProtocolVersion, _CipherSuite, [], Result) -> | |
74 | {ok, lists:reverse(Result)}. |
0 | %% | |
1 | %% %CopyrightBegin% | |
2 | %% | |
3 | %% Copyright Ericsson AB 2014-2015. All Rights Reserved. | |
4 | %% | |
5 | %% Licensed under the Apache License, Version 2.0 (the "License"); | |
6 | %% you may not use this file except in compliance with the License. | |
7 | %% You may obtain a copy of the License at | |
8 | %% | |
9 | %% http://www.apache.org/licenses/LICENSE-2.0 | |
10 | %% | |
11 | %% Unless required by applicable law or agreed to in writing, software | |
12 | %% distributed under the License is distributed on an "AS IS" BASIS, | |
13 | %% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
14 | %% See the License for the specific language governing permissions and | |
15 | %% limitations under the License. | |
16 | %% | |
17 | %% %CopyrightEnd% | |
18 | %% | |
19 | ||
20 | %% | |
21 | %% If your code need to be able to execute on ERTS versions both | |
22 | %% earlier and later than 7.0, the best approach is to use the new | |
23 | %% time API introduced in ERTS 7.0 and implement a fallback | |
24 | %% solution using the old primitives to be used on old ERTS | |
25 | %% versions. This way your code can automatically take advantage | |
26 | %% of the improvements in the API when available. This is an | |
27 | %% example of how to implement such an API, but it can be used | |
28 | %% as is if you want to. Just add (a preferrably renamed version of) | |
29 | %% this module to your project, and call the API via this module | |
30 | %% instead of calling the BIFs directly. | |
31 | %% | |
32 | ||
33 | -module(time_compat). | |
34 | ||
35 | %% We don't want warnings about the use of erlang:now/0 in | |
36 | %% this module. | |
37 | -compile(nowarn_deprecated_function). | |
38 | %% | |
39 | %% We don't use | |
40 | %% -compile({nowarn_deprecated_function, [{erlang, now, 0}]}). | |
41 | %% since this will produce warnings when compiled on systems | |
42 | %% where it has not yet been deprecated. | |
43 | %% | |
44 | ||
45 | -export([monotonic_time/0, | |
46 | monotonic_time/1, | |
47 | erlang_system_time/0, | |
48 | erlang_system_time/1, | |
49 | os_system_time/0, | |
50 | os_system_time/1, | |
51 | time_offset/0, | |
52 | time_offset/1, | |
53 | convert_time_unit/3, | |
54 | timestamp/0, | |
55 | unique_integer/0, | |
56 | unique_integer/1, | |
57 | monitor/2, | |
58 | system_info/1, | |
59 | system_flag/2]). | |
60 | ||
61 | monotonic_time() -> | |
62 | try | |
63 | erlang:monotonic_time() | |
64 | catch | |
65 | error:undef -> | |
66 | %% Use Erlang system time as monotonic time | |
67 | erlang_system_time_fallback() | |
68 | end. | |
69 | ||
70 | monotonic_time(Unit) -> | |
71 | try | |
72 | erlang:monotonic_time(Unit) | |
73 | catch | |
74 | error:badarg -> | |
75 | erlang:error(badarg, [Unit]); | |
76 | error:undef -> | |
77 | %% Use Erlang system time as monotonic time | |
78 | STime = erlang_system_time_fallback(), | |
79 | try | |
80 | convert_time_unit_fallback(STime, native, Unit) | |
81 | catch | |
82 | error:bad_time_unit -> erlang:error(badarg, [Unit]) | |
83 | end | |
84 | end. | |
85 | ||
86 | erlang_system_time() -> | |
87 | try | |
88 | erlang:system_time() | |
89 | catch | |
90 | error:undef -> | |
91 | erlang_system_time_fallback() | |
92 | end. | |
93 | ||
94 | erlang_system_time(Unit) -> | |
95 | try | |
96 | erlang:system_time(Unit) | |
97 | catch | |
98 | error:badarg -> | |
99 | erlang:error(badarg, [Unit]); | |
100 | error:undef -> | |
101 | STime = erlang_system_time_fallback(), | |
102 | try | |
103 | convert_time_unit_fallback(STime, native, Unit) | |
104 | catch | |
105 | error:bad_time_unit -> erlang:error(badarg, [Unit]) | |
106 | end | |
107 | end. | |
108 | ||
109 | os_system_time() -> | |
110 | try | |
111 | os:system_time() | |
112 | catch | |
113 | error:undef -> | |
114 | os_system_time_fallback() | |
115 | end. | |
116 | ||
117 | os_system_time(Unit) -> | |
118 | try | |
119 | os:system_time(Unit) | |
120 | catch | |
121 | error:badarg -> | |
122 | erlang:error(badarg, [Unit]); | |
123 | error:undef -> | |
124 | STime = os_system_time_fallback(), | |
125 | try | |
126 | convert_time_unit_fallback(STime, native, Unit) | |
127 | catch | |
128 | error:bad_time_unit -> erlang:error(badarg, [Unit]) | |
129 | end | |
130 | end. | |
131 | ||
132 | time_offset() -> | |
133 | try | |
134 | erlang:time_offset() | |
135 | catch | |
136 | error:undef -> | |
137 | %% Erlang system time and Erlang monotonic | |
138 | %% time are always aligned | |
139 | 0 | |
140 | end. | |
141 | ||
142 | time_offset(Unit) -> | |
143 | try | |
144 | erlang:time_offset(Unit) | |
145 | catch | |
146 | error:badarg -> | |
147 | erlang:error(badarg, [Unit]); | |
148 | error:undef -> | |
149 | try | |
150 | _ = integer_time_unit(Unit) | |
151 | catch | |
152 | error:bad_time_unit -> erlang:error(badarg, [Unit]) | |
153 | end, | |
154 | %% Erlang system time and Erlang monotonic | |
155 | %% time are always aligned | |
156 | 0 | |
157 | end. | |
158 | ||
159 | convert_time_unit(Time, FromUnit, ToUnit) -> | |
160 | try | |
161 | erlang:convert_time_unit(Time, FromUnit, ToUnit) | |
162 | catch | |
163 | error:undef -> | |
164 | try | |
165 | convert_time_unit_fallback(Time, FromUnit, ToUnit) | |
166 | catch | |
167 | _:_ -> | |
168 | erlang:error(badarg, [Time, FromUnit, ToUnit]) | |
169 | end; | |
170 | error:Error -> | |
171 | erlang:error(Error, [Time, FromUnit, ToUnit]) | |
172 | end. | |
173 | ||
174 | timestamp() -> | |
175 | try | |
176 | erlang:timestamp() | |
177 | catch | |
178 | error:undef -> | |
179 | erlang:now() | |
180 | end. | |
181 | ||
182 | unique_integer() -> | |
183 | try | |
184 | erlang:unique_integer() | |
185 | catch | |
186 | error:undef -> | |
187 | {MS, S, US} = erlang:now(), | |
188 | (MS*1000000+S)*1000000+US | |
189 | end. | |
190 | ||
191 | unique_integer(Modifiers) -> | |
192 | try | |
193 | erlang:unique_integer(Modifiers) | |
194 | catch | |
195 | error:badarg -> | |
196 | erlang:error(badarg, [Modifiers]); | |
197 | error:undef -> | |
198 | case is_valid_modifier_list(Modifiers) of | |
199 | true -> | |
200 | %% now() converted to an integer | |
201 | %% fullfill the requirements of | |
202 | %% all modifiers: unique, positive, | |
203 | %% and monotonic... | |
204 | {MS, S, US} = erlang:now(), | |
205 | (MS*1000000+S)*1000000+US; | |
206 | false -> | |
207 | erlang:error(badarg, [Modifiers]) | |
208 | end | |
209 | end. | |
210 | ||
211 | monitor(Type, Item) -> | |
212 | try | |
213 | erlang:monitor(Type, Item) | |
214 | catch | |
215 | error:Error -> | |
216 | case {Error, Type, Item} of | |
217 | {badarg, time_offset, clock_service} -> | |
218 | %% Time offset is final and will never change. | |
219 | %% Return a dummy reference, there will never | |
220 | %% be any need for 'CHANGE' messages... | |
221 | make_ref(); | |
222 | _ -> | |
223 | erlang:error(Error, [Type, Item]) | |
224 | end | |
225 | end. | |
226 | ||
227 | system_info(Item) -> | |
228 | try | |
229 | erlang:system_info(Item) | |
230 | catch | |
231 | error:badarg -> | |
232 | case Item of | |
233 | time_correction -> | |
234 | case erlang:system_info(tolerant_timeofday) of | |
235 | enabled -> true; | |
236 | disabled -> false | |
237 | end; | |
238 | time_warp_mode -> | |
239 | no_time_warp; | |
240 | time_offset -> | |
241 | final; | |
242 | NotSupArg when NotSupArg == os_monotonic_time_source; | |
243 | NotSupArg == os_system_time_source; | |
244 | NotSupArg == start_time; | |
245 | NotSupArg == end_time -> | |
246 | %% Cannot emulate this... | |
247 | erlang:error(notsup, [NotSupArg]); | |
248 | _ -> | |
249 | erlang:error(badarg, [Item]) | |
250 | end; | |
251 | error:Error -> | |
252 | erlang:error(Error, [Item]) | |
253 | end. | |
254 | ||
255 | system_flag(Flag, Value) -> | |
256 | try | |
257 | erlang:system_flag(Flag, Value) | |
258 | catch | |
259 | error:Error -> | |
260 | case {Error, Flag, Value} of | |
261 | {badarg, time_offset, finalize} -> | |
262 | %% Time offset is final | |
263 | final; | |
264 | _ -> | |
265 | erlang:error(Error, [Flag, Value]) | |
266 | end | |
267 | end. | |
268 | ||
269 | %% | |
270 | %% Internal functions | |
271 | %% | |
272 | ||
273 | integer_time_unit(native) -> 1000*1000; | |
274 | integer_time_unit(nano_seconds) -> 1000*1000*1000; | |
275 | integer_time_unit(micro_seconds) -> 1000*1000; | |
276 | integer_time_unit(milli_seconds) -> 1000; | |
277 | integer_time_unit(seconds) -> 1; | |
278 | integer_time_unit(I) when is_integer(I), I > 0 -> I; | |
279 | integer_time_unit(BadRes) -> erlang:error(bad_time_unit, [BadRes]). | |
280 | ||
281 | erlang_system_time_fallback() -> | |
282 | {MS, S, US} = erlang:now(), | |
283 | (MS*1000000+S)*1000000+US. | |
284 | ||
285 | os_system_time_fallback() -> | |
286 | {MS, S, US} = os:timestamp(), | |
287 | (MS*1000000+S)*1000000+US. | |
288 | ||
289 | convert_time_unit_fallback(Time, FromUnit, ToUnit) -> | |
290 | FU = integer_time_unit(FromUnit), | |
291 | TU = integer_time_unit(ToUnit), | |
292 | case Time < 0 of | |
293 | true -> TU*Time - (FU - 1); | |
294 | false -> TU*Time | |
295 | end div FU. | |
296 | ||
297 | is_valid_modifier_list([positive|Ms]) -> | |
298 | is_valid_modifier_list(Ms); | |
299 | is_valid_modifier_list([monotonic|Ms]) -> | |
300 | is_valid_modifier_list(Ms); | |
301 | is_valid_modifier_list([]) -> | |
302 | true; | |
303 | is_valid_modifier_list(_) -> | |
304 | false. |
51 | 51 | |
52 | 52 | -record(state, {total_memory, |
53 | 53 | memory_limit, |
54 | memory_fraction, | |
54 | memory_config_limit, | |
55 | 55 | timeout, |
56 | 56 | timer, |
57 | 57 | alarmed, |
62 | 62 | |
63 | 63 | -ifdef(use_specs). |
64 | 64 | |
65 | -type(vm_memory_high_watermark() :: (float() | {'absolute', integer()})). | |
65 | 66 | -spec(start_link/1 :: (float()) -> rabbit_types:ok_pid_or_error()). |
66 | 67 | -spec(start_link/3 :: (float(), fun ((any()) -> 'ok'), |
67 | 68 | fun ((any()) -> 'ok')) -> rabbit_types:ok_pid_or_error()). |
69 | 70 | -spec(get_vm_limit/0 :: () -> non_neg_integer()). |
70 | 71 | -spec(get_check_interval/0 :: () -> non_neg_integer()). |
71 | 72 | -spec(set_check_interval/1 :: (non_neg_integer()) -> 'ok'). |
72 | -spec(get_vm_memory_high_watermark/0 :: () -> float()). | |
73 | -spec(set_vm_memory_high_watermark/1 :: (float()) -> 'ok'). | |
73 | -spec(get_vm_memory_high_watermark/0 :: () -> vm_memory_high_watermark()). | |
74 | -spec(set_vm_memory_high_watermark/1 :: (vm_memory_high_watermark()) -> 'ok'). | |
74 | 75 | -spec(get_memory_limit/0 :: () -> non_neg_integer()). |
75 | 76 | |
76 | 77 | -endif. |
127 | 128 | alarm_funs = AlarmFuns }, |
128 | 129 | {ok, set_mem_limits(State, MemFraction)}. |
129 | 130 | |
130 | handle_call(get_vm_memory_high_watermark, _From, State) -> | |
131 | {reply, State#state.memory_fraction, State}; | |
132 | ||
133 | handle_call({set_vm_memory_high_watermark, MemFraction}, _From, State) -> | |
134 | {reply, ok, set_mem_limits(State, MemFraction)}; | |
131 | handle_call(get_vm_memory_high_watermark, _From, | |
132 | #state{memory_config_limit = MemLimit} = State) -> | |
133 | {reply, MemLimit, State}; | |
134 | ||
135 | handle_call({set_vm_memory_high_watermark, MemLimit}, _From, State) -> | |
136 | {reply, ok, set_mem_limits(State, MemLimit)}; | |
135 | 137 | |
136 | 138 | handle_call(get_check_interval, _From, State) -> |
137 | 139 | {reply, State#state.timeout, State}; |
165 | 167 | %% Server Internals |
166 | 168 | %%---------------------------------------------------------------------------- |
167 | 169 | |
168 | set_mem_limits(State, MemFraction) -> | |
170 | set_mem_limits(State, MemLimit) -> | |
171 | case erlang:system_info(wordsize) of | |
172 | 4 -> | |
173 | error_logger:warning_msg( | |
174 | "You are using a 32-bit version of Erlang: you may run into " | |
175 | "memory address~n" | |
176 | "space exhaustion or statistic counters overflow.~n"); | |
177 | _ -> | |
178 | ok | |
179 | end, | |
169 | 180 | TotalMemory = |
170 | 181 | case get_total_memory() of |
171 | 182 | unknown -> |
196 | 207 | _ -> |
197 | 208 | TotalMemory |
198 | 209 | end, |
199 | MemLim = trunc(MemFraction * UsableMemory), | |
210 | MemLim = interpret_limit(MemLimit, UsableMemory), | |
200 | 211 | error_logger:info_msg("Memory limit set to ~pMB of ~pMB total.~n", |
201 | 212 | [trunc(MemLim/?ONE_MB), trunc(TotalMemory/?ONE_MB)]), |
202 | 213 | internal_update(State #state { total_memory = TotalMemory, |
203 | 214 | memory_limit = MemLim, |
204 | memory_fraction = MemFraction}). | |
215 | memory_config_limit = MemLimit}). | |
216 | ||
217 | interpret_limit({'absolute', MemLim}, UsableMemory) -> | |
218 | erlang:min(MemLim, UsableMemory); | |
219 | interpret_limit(MemFraction, UsableMemory) -> | |
220 | trunc(MemFraction * UsableMemory). | |
205 | 221 | |
206 | 222 | internal_update(State = #state { memory_limit = MemLimit, |
207 | 223 | alarmed = Alarmed, |
376 | 392 | read_proc_file(File) -> |
377 | 393 | {ok, IoDevice} = file:open(File, [read, raw]), |
378 | 394 | Res = read_proc_file(IoDevice, []), |
379 | file:close(IoDevice), | |
395 | _ = file:close(IoDevice), | |
380 | 396 | lists:flatten(lists:reverse(Res)). |
381 | 397 | |
382 | 398 | -define(BUFFER_SIZE, 1024). |