Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Exclude gRPC server tests from sanitizer builds #648

Merged
merged 5 commits into from
Apr 27, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions node/silkworm/rpc/backend_kv_server_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -325,6 +325,8 @@ struct BackEndKvE2eTest {

namespace silkworm::rpc {

// Exclude gRPC tests from sanitizer builds due to data race warnings
#ifndef SILKWORM_SANITIZE
TEST_CASE("BackEndKvServer", "[silkworm][node][rpc]") {
silkworm::log::set_verbosity(silkworm::log::Level::kNone);
Grpc2SilkwormLogGuard log_guard;
Expand Down Expand Up @@ -784,7 +786,7 @@ TEST_CASE("BackEndKvServer E2E: more than one Sentry all status KO", "[silkworm]
}
}

#ifndef SILKWORM_SANITIZE

TEST_CASE("BackEndKvServer E2E: trigger server-side write error", "[silkworm][node][rpc]") {
{
const uint32_t kNumTxs{1000};
Expand All @@ -808,7 +810,6 @@ TEST_CASE("BackEndKvServer E2E: trigger server-side write error", "[silkworm][no
}
// Server-side lifecyle of Tx calls must be OK.
}
#endif // SILKWORM_SANITIZE

TEST_CASE("BackEndKvServer E2E: Tx max simultaneous readers exceeded", "[silkworm][node][rpc]") {
NodeSettings node_settings;
Expand Down Expand Up @@ -2120,5 +2121,6 @@ TEST_CASE("BackEndKvServer E2E: bidirectional max TTL duration", "[silkworm][nod
CHECK(status.ok());
}
}
#endif // SILKWORM_SANITIZE

} // namespace silkworm::rpc
36 changes: 12 additions & 24 deletions node/silkworm/rpc/completion_end_point_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
#include "completion_end_point.hpp"

#include <chrono>
#include <future>
#include <thread>

#include <catch2/catch.hpp>
Expand All @@ -43,38 +42,29 @@ TEST_CASE("CompletionEndPoint", "[silkworm][rpc][completion_end_point]") {
std::this_thread::sleep_for(100us);
}
});
std::this_thread::yield();
completion_end_point.shutdown();
CHECK_NOTHROW(completion_end_point_thread.join());
}

SECTION("posting handler completion to I/O execution context") {
// Exclude gRPC test from sanitizer builds due to data race warnings
#ifndef SILKWORM_SANITIZE
SECTION("executing completion handler") {
grpc::CompletionQueue queue;
CompletionEndPoint completion_end_point{queue};
auto completion_end_point_thread = std::thread([&]() {
while (completion_end_point.poll_one() >= 0) {
std::this_thread::sleep_for(100us);
}
});
std::this_thread::yield();
std::promise<void> p;
std::future<void> f = p.get_future();
class AsyncCompletionHandler {
public:
explicit AsyncCompletionHandler(std::promise<void>& p) : p_(p) {}
void operator()(bool /*ok*/) { p_.set_value(); };
private:
std::promise<void>& p_;
bool executed{false};
TagProcessor tag_processor = [&completion_end_point, &executed](bool) {
executed = true;
completion_end_point.shutdown();
};
AsyncCompletionHandler handler{p};
TagProcessor tag_processor = handler;
auto alarm_deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_millis(50, GPR_TIMESPAN));
grpc::Alarm alarm;
alarm.Set(&queue, alarm_deadline, &tag_processor);
f.get();
completion_end_point.shutdown();
CHECK_NOTHROW(completion_end_point_thread.join());
while (completion_end_point.poll_one() >= 0) {
std::this_thread::sleep_for(100us);
}
CHECK(executed);
}
#endif // SILKWORM_SANITIZE

SECTION("exiting on completion queue already shutdown") {
grpc::CompletionQueue queue;
Expand All @@ -85,7 +75,6 @@ TEST_CASE("CompletionEndPoint", "[silkworm][rpc][completion_end_point]") {
std::this_thread::sleep_for(100us);
}
});
std::this_thread::yield();
CHECK_NOTHROW(completion_end_point_thread.join());
}

Expand All @@ -97,7 +86,6 @@ TEST_CASE("CompletionEndPoint", "[silkworm][rpc][completion_end_point]") {
std::this_thread::sleep_for(100us);
}
});
std::this_thread::yield();
completion_end_point.shutdown();
CHECK_NOTHROW(completion_end_point_thread.join());
CHECK_NOTHROW(completion_end_point.shutdown());
Expand Down
3 changes: 3 additions & 0 deletions node/silkworm/rpc/server_context_pool_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ inline std::ostream& null_stream() {

namespace silkworm::rpc {

// Exclude gRPC tests from sanitizer builds due to data race warnings
#ifndef SILKWORM_SANITIZE
TEST_CASE("ServerContext", "[silkworm][rpc][server_context]") {
grpc::ServerBuilder builder;
std::unique_ptr<grpc::ServerCompletionQueue> scq = builder.AddCompletionQueue();
Expand Down Expand Up @@ -95,5 +97,6 @@ TEST_CASE("ServerContextPool", "[silkworm][rpc][server_context]") {
CHECK(server_context_pool.num_contexts() == 2);
}
}
#endif // SILKWORM_SANITIZE

} // namespace silkworm::rpc
5 changes: 4 additions & 1 deletion node/silkworm/rpc/server_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,10 @@ class EmptyServer : public Server {
};

// TODO(canepat): better copy grpc_pick_unused_port_or_die to generate unused port
constexpr const char* kTestAddressUri = "localhost:12345";
static const std::string kTestAddressUri{"localhost:12345"};

// Exclude gRPC tests from sanitizer builds due to data race warnings
#ifndef SILKWORM_SANITIZE
TEST_CASE("Barebone gRPC Server", "[silkworm][node][rpc]") {
grpc::ServerBuilder builder;
// Add *at least one non-empty* ServerCompletionQueue (otherwise: ASAN SEGV error in Shutdown)
Expand Down Expand Up @@ -191,5 +193,6 @@ TEST_CASE("Server::join", "[silkworm][node][rpc]") {
server_thread.join();
}
}
#endif // SILKWORM_SANITIZE

} // namespace silkworm::rpc