diff --git a/.github/workflows/erlang.yml b/.github/workflows/erlang.yml index d24950f..b2ed7c3 100644 --- a/.github/workflows/erlang.yml +++ b/.github/workflows/erlang.yml @@ -13,21 +13,30 @@ jobs: otp: [23.3, 24.3] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: erlef/setup-beam@v1 + id: setup-beam with: otp-version: ${{matrix.otp}} rebar3-version: '3.18.0' - name: Restore _build - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: _build - key: _build-cache-for-os-${{runner.os}}-otp-${{steps.setup-beam.outputs.otp-version}}-rebar3-${{steps.setup-beam.outputs.rebar3-version}}-hash-${{hashFiles('rebar.lock')}} + key: "_build-cache-for\ + -os-${{runner.os}}\ + -otp-${{steps.setup-beam.outputs.otp-version}}\ + -rebar3-${{steps.setup-beam.outputs.rebar3-version}}\ + -hash-${{hashFiles('rebar.lock')}}" - name: Restore rebar3's cache - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: ~/.cache/rebar3 - key: rebar3-cache-for-os-${{runner.os}}-otp-${{steps.setup-beam.outputs.otp-version}}-rebar3-${{steps.setup-beam.outputs.rebar3-version}}-hash-${{hashFiles('rebar.lock')}} + key: "rebar3-cache-for\ + -os-${{runner.os}}\ + -otp-${{steps.setup-beam.outputs.otp-version}}\ + -rebar3-${{steps.setup-beam.outputs.rebar3-version}}\ + -hash-${{hashFiles('rebar.lock')}}" - name: Compile run: rebar3 compile - name: Format check diff --git a/rebar.config b/rebar.config index 8f2d397..c3c4c3a 100644 --- a/rebar.config +++ b/rebar.config @@ -29,6 +29,6 @@ {project_plugins, [{rebar3_hex, "~> 7.0.1"}, - {rebar3_format, "~> 1.2.0"}, - {rebar3_lint, "~> 1.0.2"}, + {rebar3_format, "~> 1.2.1"}, + {rebar3_lint, "~> 1.1.0"}, {rebar3_hank, "~> 1.3.0"}]}. diff --git a/src/mero_cluster.erl b/src/mero_cluster.erl index d3fbdf2..effcce5 100644 --- a/src/mero_cluster.erl +++ b/src/mero_cluster.erl @@ -291,15 +291,18 @@ get_server_defs({ClusterName, ClusterConfig}) -> {Elements, _} = lists:foldl(fun({Host, Port}, {Acc, ShardSizeAcc}) -> Elements = - [begin - WorkerName = - worker_name(ClusterName, Host, ReplicationNumber, ShardSizeAcc), - {ClusterName, - ShardSizeAcc, - ReplicationNumber, - {ClusterName, Host, Port, WorkerName, WorkerModule}} - end - || ReplicationNumber <- lists:seq(0, Workers - 1)], + lists:map(fun(ReplicationNumber) -> + WorkerName = + worker_name(ClusterName, + Host, + ReplicationNumber, + ShardSizeAcc), + {ClusterName, + ShardSizeAcc, + ReplicationNumber, + {ClusterName, Host, Port, WorkerName, WorkerModule}} + end, + lists:seq(0, Workers - 1)), {Acc ++ Elements, ShardSizeAcc + 1} end, {[], 0}, diff --git a/src/mero_conn.erl b/src/mero_conn.erl index 13a13b6..1116244 100644 --- a/src/mero_conn.erl +++ b/src/mero_conn.erl @@ -217,24 +217,22 @@ async_by_shard(Name, response_error = AsyncOpResponseError}) -> {Processed, Errors} = lists:foldl(fun({ShardIdentifier, Items}, {Processed, Errors}) -> - begin - PoolName = mero_cluster:random_pool_of_shard(Name, ShardIdentifier), - case mero_pool:checkout(PoolName, TimeLimit) of - {ok, Conn} -> - case mero_pool:transaction(Conn, AsyncOp, [Items]) of - {error, Reason} -> - mero_pool:close(Conn, AsyncOpError), - mero_pool:checkin_closed(Conn), - {Processed, [Reason | Errors]}; - {NConn, {error, Reason}} -> - mero_pool:checkin(NConn), - {Processed, [Reason | Errors]}; - {NConn, ok} -> - {[{NConn, Items} | Processed], Errors} - end; - {error, Reason} -> - {Processed, [Reason | Errors]} - end + PoolName = mero_cluster:random_pool_of_shard(Name, ShardIdentifier), + case mero_pool:checkout(PoolName, TimeLimit) of + {ok, Conn} -> + case mero_pool:transaction(Conn, AsyncOp, [Items]) of + {error, Reason} -> + mero_pool:close(Conn, AsyncOpError), + mero_pool:checkin_closed(Conn), + {Processed, [Reason | Errors]}; + {NConn, {error, Reason}} -> + mero_pool:checkin(NConn), + {Processed, [Reason | Errors]}; + {NConn, ok} -> + {[{NConn, Items} | Processed], Errors} + end; + {error, Reason} -> + {Processed, [Reason | Errors]} end end, {[], []}, diff --git a/src/mero_elasticache.erl b/src/mero_elasticache.erl index 9780266..24e9fe7 100644 --- a/src/mero_elasticache.erl +++ b/src/mero_elasticache.erl @@ -125,13 +125,14 @@ parse_cluster_entries([H | T], Accum) -> [Host, IP, Port] -> case inet:parse_ipv4_address(IP) of {ok, IPAddr} -> - case catch erlang:list_to_integer(Port) of - {'EXIT', _} -> - {error, bad_port}; + try erlang:list_to_integer(Port) of P when P < 1 orelse P > 65535 -> {error, bad_port}; P -> parse_cluster_entries(T, [{Host, IPAddr, P} | Accum]) + catch + _:_ -> + {error, bad_port} end; {error, _} -> {error, bad_ip} diff --git a/src/mero_pool.erl b/src/mero_pool.erl index 6664477..3e116f5 100644 --- a/src/mero_pool.erl +++ b/src/mero_pool.erl @@ -30,6 +30,9 @@ -author('Miriam Pena '). +%% catch X is used as a way to fire and forget, ignoring errors if there are any +-elvis([{elvis_style, no_catch_expressions, disable}]). + %% many functions are "callbacks" for proc_lib -hank([{unnecessary_function_arguments, [system_terminate]}]). diff --git a/src/mero_wrk_tcp_binary.erl b/src/mero_wrk_tcp_binary.erl index e2dff3c..5db6644 100755 --- a/src/mero_wrk_tcp_binary.erl +++ b/src/mero_wrk_tcp_binary.erl @@ -30,6 +30,9 @@ -author('Miriam Pena '). +%% The usage of throw here is intentional and it's correctly used for non-local returns. +-elvis([{elvis_style, no_throw, disable}]). + -include_lib("mero/include/mero.hrl"). -behaviour(mero_pool). diff --git a/src/mero_wrk_tcp_txt.erl b/src/mero_wrk_tcp_txt.erl index 0b0de7d..8622714 100755 --- a/src/mero_wrk_tcp_txt.erl +++ b/src/mero_wrk_tcp_txt.erl @@ -30,6 +30,9 @@ -author('Miriam Pena '). +%% The usage of throw here is intentional and it's correctly used for non-local returns. +-elvis([{elvis_style, no_throw, disable}]). + -include_lib("mero/include/mero.hrl"). -behaviour(mero_pool). diff --git a/test/mero_cluster_SUITE.erl b/test/mero_cluster_SUITE.erl index a1512a2..2362137 100644 --- a/test/mero_cluster_SUITE.erl +++ b/test/mero_cluster_SUITE.erl @@ -128,21 +128,21 @@ load_cluster(_Conf) -> ok. shard_phash2(_Conf) -> - [[begin - Result = mero:shard_phash2(Key, Shards), - ?assertEqual(Result, mero:shard_phash2(Key, Shards)), - ?assert(Result =< Shards) - end - || Shards <- lists:seq(1, 10)] + [lists:map(fun(Shards) -> + Result = mero:shard_phash2(Key, Shards), + ?assertEqual(Result, mero:shard_phash2(Key, Shards)), + ?assert(Result =< Shards) + end, + lists:seq(1, 10)) || Key <- [<<"Adroll">>, <<"retargetting">>, <<"platform">>]]. shard_crc32(_Conf) -> - [[begin - Result = mero:shard_crc32(Key, Shards), - ?assertEqual(Result, mero:shard_crc32(Key, Shards)), - ?assert(Result =< Shards) - end - || Shards <- lists:seq(1, 10)] + [lists:map(fun(Shards) -> + Result = mero:shard_crc32(Key, Shards), + ?assertEqual(Result, mero:shard_crc32(Key, Shards)), + ?assert(Result =< Shards) + end, + lists:seq(1, 10)) || Key <- [<<"Adroll">>, <<"retargetting">>, <<"platform">>]]. select_pool(_Conf) -> diff --git a/test/mero_test_with_local_memcached_SUITE.erl b/test/mero_test_with_local_memcached_SUITE.erl index bc0bb1a..8ea8199 100644 --- a/test/mero_test_with_local_memcached_SUITE.erl +++ b/test/mero_test_with_local_memcached_SUITE.erl @@ -280,11 +280,11 @@ mget(Cluster, ClusterAlt, Keys) -> Results = mero:mget(Cluster, Keys, 10000), ResultsAlt = mero:mget(ClusterAlt, Keys, 10000), io:format("Checking mget ~p ~n", [Results]), - [begin - ?assertEqual({value, {Key, Key}}, lists:keysearch(Key, 1, Results)), - ?assertEqual({value, {Key, Key}}, lists:keysearch(Key, 1, ResultsAlt)) - end - || Key <- Keys]. + lists:foreach(fun(Key) -> + ?assertEqual({value, {Key, Key}}, lists:keysearch(Key, 1, Results)), + ?assertEqual({value, {Key, Key}}, lists:keysearch(Key, 1, ResultsAlt)) + end, + Keys). mincrement(Cluster = cluster_txt, Keys) -> {error, not_supportable} = mero:mincrement_counter(Cluster, Keys); @@ -337,43 +337,45 @@ add(Cluster, ClusterAlt, Keys) -> io:format("Attempt to add success to 5000 ~n"), Expected = <<"5000">>, Expected2 = <<"asdf">>, - [begin - ?assertEqual(ok, mero:add(Cluster, Key, Expected, 10000, 10000)), - ?assertEqual({error, not_stored}, mero:add(cluster_txt, Key, Expected2, 10000, 10000)), - ?assertEqual({error, already_exists}, - mero:add(cluster_binary, Key, Expected2, 10000, 10000)), - {Key, Value} = mero:get(Cluster, Key), - {Key, Value2} = mero:get(ClusterAlt, Key), - io:format("Checking get ~p ~p ~n", [Value, Value2]), - ?assertEqual(Expected, Value), - ?assertEqual(Expected, Value2) - end - || Key <- Keys]. + lists:foreach(fun(Key) -> + ?assertEqual(ok, mero:add(Cluster, Key, Expected, 10000, 10000)), + ?assertEqual({error, not_stored}, + mero:add(cluster_txt, Key, Expected2, 10000, 10000)), + ?assertEqual({error, already_exists}, + mero:add(cluster_binary, Key, Expected2, 10000, 10000)), + {Key, Value} = mero:get(Cluster, Key), + {Key, Value2} = mero:get(ClusterAlt, Key), + io:format("Checking get ~p ~p ~n", [Value, Value2]), + ?assertEqual(Expected, Value), + ?assertEqual(Expected, Value2) + end, + Keys). cas(Cluster, ClusterAlt, Keys) -> Value1 = <<"asdf">>, Value2 = <<"foo">>, Value3 = <<"bar">>, - [begin - ?assertEqual({error, not_found}, mero:cas(Cluster, Key, Value1, 10000, 10000, 12345)), - await_connected(Cluster), - ?assertEqual(ok, mero:set(Cluster, Key, Value1, 10000, 10000)), - ?assertEqual({Key, Value1}, mero:get(ClusterAlt, Key)), - {Key, Value1, CAS} = mero:gets(Cluster, Key), - {Key, Value1, CAS} = mero:gets(ClusterAlt, Key), - ?assertEqual({error, already_exists}, - mero:cas(Cluster, Key, Value2, 10000, 10000, CAS + 1)), - await_connected(Cluster), - ?assertEqual(ok, mero:cas(Cluster, Key, Value2, 10000, 10000, CAS)), - ?assertEqual({error, already_exists}, - mero:cas(ClusterAlt, Key, Value2, 10000, 10000, CAS)), - await_connected(ClusterAlt), - ?assertEqual({Key, Value2}, mero:get(ClusterAlt, Key)), - ?assertEqual(ok, mero:set(Cluster, Key, Value3, 10000, 10000)), - {Key, Value3, NCAS} = mero:gets(Cluster, Key), - ?assertNotEqual(CAS, NCAS) - end - || Key <- Keys]. + lists:foreach(fun(Key) -> + ?assertEqual({error, not_found}, + mero:cas(Cluster, Key, Value1, 10000, 10000, 12345)), + await_connected(Cluster), + ?assertEqual(ok, mero:set(Cluster, Key, Value1, 10000, 10000)), + ?assertEqual({Key, Value1}, mero:get(ClusterAlt, Key)), + {Key, Value1, CAS} = mero:gets(Cluster, Key), + {Key, Value1, CAS} = mero:gets(ClusterAlt, Key), + ?assertEqual({error, already_exists}, + mero:cas(Cluster, Key, Value2, 10000, 10000, CAS + 1)), + await_connected(Cluster), + ?assertEqual(ok, mero:cas(Cluster, Key, Value2, 10000, 10000, CAS)), + ?assertEqual({error, already_exists}, + mero:cas(ClusterAlt, Key, Value2, 10000, 10000, CAS)), + await_connected(ClusterAlt), + ?assertEqual({Key, Value2}, mero:get(ClusterAlt, Key)), + ?assertEqual(ok, mero:set(Cluster, Key, Value3, 10000, 10000)), + {Key, Value3, NCAS} = mero:gets(Cluster, Key), + ?assertNotEqual(CAS, NCAS) + end, + Keys). %% this is needed b/c our test server doesn't emulate a real memcached server with 100% %% accuracy.