diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index ff030ac5..07b131fa 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -11,7 +11,7 @@ jobs: strategy: matrix: erlang: [22, 23] - mongodb: ["4.4.8", "5.0.2"] + mongodb: ["4.4.8"] container: image: erlang:${{ matrix.erlang }} steps: @@ -19,8 +19,8 @@ jobs: - run: ./scripts/install_mongo_debian.sh ${{ matrix.mongodb }} - run: ./scripts/start_mongo_single_node.sh - run: ./scripts/start_mongo_cluster.sh - - run: ./rebar3 eunit - - run: ./rebar3 ct + - run: ./rebar3 eunit --verbose + - run: ./rebar3 ct --verbose - name: Archive Replica Set Logs uses: actions/upload-artifact@v2 if: failure() diff --git a/.github/workflows/test_coverage.yml b/.github/workflows/test_coverage.yml index 76ee41d0..919a35c8 100644 --- a/.github/workflows/test_coverage.yml +++ b/.github/workflows/test_coverage.yml @@ -15,9 +15,9 @@ jobs: - run: ./scripts/install_mongo_debian.sh 5.0.2 - run: ./scripts/start_mongo_single_node.sh - run: ./scripts/start_mongo_cluster.sh - - run: ./rebar3 eunit --cover --cover_export_name eunit.coverdata - - run: ./rebar3 ct --cover --cover_export_name ct.coverdata - - run: rebar3 cover --verbose + - run: ./rebar3 eunit --verbose --cover --cover_export_name eunit + - run: ./rebar3 ct --verbose --cover --cover_export_name ct + - run: ./rebar3 cover --verbose - name: Archive Replica Set Logs uses: actions/upload-artifact@v2 if: failure() @@ -34,12 +34,14 @@ jobs: retention-days: 1 - name: Coverage Report uses: actions/upload-artifact@v2 + if: always() with: name: Coverage Report path: _build/test/cover/ retention-days: 5 - name: CT Logs uses: actions/upload-artifact@v2 + if: always() with: name: ct_logs path: _build/test/logs/ diff --git a/scripts/start_mongo_cluster.sh b/scripts/start_mongo_cluster.sh index c23b1d6e..2fb9164b 100755 --- a/scripts/start_mongo_cluster.sh +++ b/scripts/start_mongo_cluster.sh @@ -4,6 +4,19 @@ set -ex +mongo --port 27018 admin --eval 'db.adminCommand({ + shutdown: 1, + force: true +})' || true +mongo --port 27019 admin --eval 'db.adminCommand({ + shutdown: 1, + force: true +})' || true +mongo --port 27020 admin --eval 'db.adminCommand({ + shutdown: 1, + force: true +})' || true + rm -rf rs0-0 rs0-1 rs0-2 rs0-logs rs0-key mkdir -p rs0-0 rs0-1 rs0-2 rs0-logs rs0-key @@ -44,13 +57,26 @@ mongo --host rs0/localhost:27018,localhost:27019,localhost:27020 \ pwd: "rs_test", roles: [{role: "clusterAdmin", db: "admin"}, {role: "userAdminAnyDatabase", db: "admin"}, + {role: "readWriteAnyDatabase", db: "admin"}, "readWrite"]})' # Shutdown nodes in replica set -mongod --shutdown --dbpath rs0-0 -mongod --shutdown --dbpath rs0-1 -mongod --shutdown --dbpath rs0-2 - +mongo --port 27018 admin --eval 'db.adminCommand({ + shutdown: 1, + force: true +})' || true +mongo --port 27019 admin --eval 'db.adminCommand({ + shutdown: 1, + force: true +})' || true +mongo --port 27020 admin --eval 'db.adminCommand({ + shutdown: 1, + force: true +})' || true +#mongod --shutdown --dbpath rs0-0 +#mongod --shutdown --dbpath rs0-1 +#mongod --shutdown --dbpath rs0-2 +sleep 10 # Restart replica set nodes with authentication enabled mongod --replSet rs0 --auth --keyFile rs0-key/key --port 27018 --bind_ip localhost --dbpath rs0-0 --oplogSize 128 >> rs0-logs/rs0-0-auth.log.txt & mongod --replSet rs0 --auth --keyFile rs0-key/key --port 27019 --bind_ip localhost --dbpath rs0-1 --oplogSize 128 >> rs0-logs/rs0-1-auth.log.txt & @@ -63,9 +89,10 @@ timeout 5m sh -c 'until nc -z localhost 27019; do sleep 1; done' echo "Waiting on MongoDB to restart on 27020" timeout 5m sh -c 'until nc -z localhost 27020; do sleep 1; done' +# mongodb://rs_user:rs_test@localhost:27018,localhost:27019,localhost:27020/?authSource=admin&replicaSet=rs0 # Verify that we can auth to the restarted replica set mongo --host rs0/localhost:27018,localhost:27019,localhost:27020 \ --username rs_user \ --password rs_test \ --authenticationDatabase admin \ - --eval 'db.serverStatus()' > /dev/null + --eval 'db.serverStatus()' diff --git a/test/replica_set_SUITE.erl b/test/replica_set_SUITE.erl new file mode 100644 index 00000000..a622eaf5 --- /dev/null +++ b/test/replica_set_SUITE.erl @@ -0,0 +1,93 @@ +-module(replica_set_SUITE). + +%% API +-export([]). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-include("mongo_protocol.hrl"). + +-compile(export_all). + +all() -> + [todo_test, rp_mode_test]. + +init_per_suite(Config) -> + application:ensure_all_started(mongodb), + [{seed, + {rs, <<"rs0">>, [<<"localhost:27018">>, <<"localhost:27019">>, <<"localhost:27020">>]}}, + {user, <<"rs_user">>}, + {password, <<"rs_test">>}, + {collection, <<"test">>}] + ++ Config. + +end_per_suite(_Config) -> + ok. + +init_per_testcase(_Case, Config) -> + case mongoc_connect(Config) of + {ok, Topology} -> + [{topology, Topology}] ++ Config; + {error, _} -> + {skipped, cannot_connect_rs_cluster} + end. + +end_per_testcase(_Case, Config) -> + mongoc:disconnect(?config(topology, Config)), + ok. + +%% Tests +todo_test(Config) -> + Topology = ?config(topology, Config), + State = mc_topology:get_state(Topology), + ct:pal("State: ~p", [State]), + Status = mongoc:status(Topology), + ct:pal("Status: ~p", [Status]), + PrimaryPool = mc_topology:get_pool(Topology, #{rp_mode => primary}), + ct:pal("PrimaryPool: ~p", [PrimaryPool]), + SecondaryPool = mc_topology:get_pool(Topology, #{rp_mode => secondary}), + ct:pal("SecondaryPool: ~p", [SecondaryPool]), + Config. + +rp_mode_test(Config) -> + {rs, _, Hosts} = ?config(seed, Config), + HostsAndPorts = [binary:split(Host, <<":">>) || Host <- Hosts], + BaseConnectArgs = [{login, ?config(user, Config)}, {password, ?config(password, Config)}], + TaggedPorts = + lists:map(fun([_, Port]) -> + ConnectArgs = [{port, binary_to_integer(Port)}] ++ BaseConnectArgs, + {ok, Connection} = mc_worker_api:connect(ConnectArgs), + {true, #{<<"ismaster">> := IsPrimary}} = + mc_worker_api:command(Connection, {isMaster, 1}), + mc_worker_api:disconnect(Connection), + {binary_to_integer(Port), IsPrimary} + end, + HostsAndPorts), + [{PrimaryPort, true}] = + lists:filter(fun({_Port, IsPrimary}) -> IsPrimary end, TaggedPorts), + ct:pal("PrimaryPort ~p", [PrimaryPort]), + SPL = lists:subtract(TaggedPorts, [{PrimaryPort, true}]), + [SecondaryPort | _] = [Port || {Port, _} <- SPL], + ct:pal("SecondaryPort ~p", [SecondaryPort]), + {ok, PrimaryConn} = mc_worker_api:connect([{port, PrimaryPort}] ++ BaseConnectArgs), + InsertResult = + mc_worker_api:insert(PrimaryConn, + ?config(collection, Config), + #{<<"foo">> => <<"bar">>, <<"a">> => <<"b">>}), + ct:pal("InsertResult ~p", [InsertResult]), + {ok, SecondaryConn} = mc_worker_api:connect([{port, SecondaryPort}, { {r_mode, slave_ok}}] ++ BaseConnectArgs), + FindOneResult = + mc_worker_api:find_one(SecondaryConn, + ?config(collection, Config), + #{<<"foo">> => <<"bar">>}), + ct:pal("FindOneResult ~p", [FindOneResult]), + ok. + +%% Private +mongoc_connect(Config) -> + mongoc:connect(?config(seed, Config), + [{rp_mode, secondaryPreferred}], + [{database, ?config(collection, Config)}, + {login, ?config(user, Config)}, + {password, ?config(password, Config)}]).