From d11da7cf9c64bee82b9ef4dfa2c1ab348ccf1072 Mon Sep 17 00:00:00 2001 From: Huang-Ming Huang Date: Mon, 29 May 2023 10:11:04 -0500 Subject: [PATCH 001/191] simplify listener --- .../libfc/include/fc/network/listener.hpp | 311 ++++++++---------- plugins/http_plugin/http_plugin.cpp | 58 ++-- plugins/net_plugin/net_plugin.cpp | 108 +++--- .../state_history_plugin.cpp | 67 ++-- .../tests/session_test.cpp | 28 +- 5 files changed, 263 insertions(+), 309 deletions(-) diff --git a/libraries/libfc/include/fc/network/listener.hpp b/libraries/libfc/include/fc/network/listener.hpp index 807323a8bd..4cf701780e 100644 --- a/libraries/libfc/include/fc/network/listener.hpp +++ b/libraries/libfc/include/fc/network/listener.hpp @@ -32,94 +32,50 @@ inline std::pair split_host_port(std::string_view endp } } -///////////////////////////////////////////////////////////////////////////////////////////// +template +struct listener_base; + +template <> +struct listener_base { + listener_base(const std::string&) {} +}; + +template <> +struct listener_base { + std::filesystem::path path_; + listener_base(const std::string& local_address) : path_(std::filesystem::absolute(local_address)) {} + ~listener_base() { + std::error_code ec; + std::filesystem::remove(path_, ec); + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// /// -/// fc::listener is template class to simplify the code for accepting new socket connections. +/// @brief fc::listener is template class to simplify the code for accepting new socket connections. /// It can be used for both tcp or Unix socket connection. /// -/// Example Usage: -/// \code{.cpp} -/// -/// class shared_state_type; -/// -/// template -/// struct example_session : std::enable_shared_from_this> { -/// using socket_type = Protocol::socket; -/// socket_type&& socket_; -/// shared_state_type& shared_state_; -/// example_session(socket_type&& socket, shared_state_type& shared_state) -/// : socket_(std::move(socket)), shared_state_(shared_state_) {} -/// -/// // ... -/// void start(); -/// }; -/// -/// template -/// struct example_listener : fc::listener, Protocol>{ -/// static constexpr uint32_t accept_timeout_ms = 200; -/// shared_state_type& shared_state_; -/// -/// example_listener(boost::asio::io_context& executor, -/// logger& logger, -/// const std::string& local_address, -/// const typename Protocol::endpoint& endpoint, -/// shared_state_type& shared_state) -/// : fc::listener, Protocol> -/// (executor, logger, boost::posix_time::milliseconds(accept_timeout_ms), local_address, endpoint) -/// , shared_state_(shared_state) {} -/// -/// std::string extra_listening_log_info() { -/// return shared_state_.info_to_be_printed_after_address_is_resolved_and_listening; -/// } -/// -/// void create_session(Protocol::socket&& sock) { -/// auto session = std::make_shared(std::move(sock), shared_state_); -/// session->start(); -/// } -/// }; -/// -/// int main() { -/// boost::asio::io_context ioc; -/// fc::logger logger = fc::logger::get(DEFAULT_LOGGER); -/// shared_state_type shared_state{...}; -/// -/// // usage for accepting tcp connection -/// // notice that it only throws std::system_error, not fc::exception -/// example_listener::create(executor, logger, "localhost:8080", std::ref(shared_state)); -/// -/// // usage for accepting unix socket connection -/// example_listener::create(executor, logger, "tmp.sock", -/// std::ref(shared_state)); -/// -/// ioc.run(); -/// return 0; -/// } -/// \endcode +/// @note Users should use fc::create_listener() instead, this class is the implementation +/// detail for fc::create_listener(). /// ///////////////////////////////////////////////////////////////////////////////////////////// - -template -struct listener : std::enable_shared_from_this { +template +struct listener : listener_base, std::enable_shared_from_this> { using endpoint_type = typename Protocol::endpoint; typename Protocol::acceptor acceptor_; boost::asio::deadline_timer accept_error_timer_; boost::posix_time::time_duration accept_timeout_; logger& logger_; - std::string local_address_; + std::string extra_listening_log_info_; + CreateSession create_session_; listener(boost::asio::io_context& executor, logger& logger, boost::posix_time::time_duration accept_timeout, - const std::string& local_address, const endpoint_type& endpoint) - : acceptor_(executor, endpoint), accept_error_timer_(executor), accept_timeout_(accept_timeout), logger_(logger), - local_address_(std::is_same_v - ? local_address - : std::filesystem::absolute(local_address).string()) {} - - ~listener() { - if constexpr (std::is_same_v) { - std::filesystem::remove(local_address_); - } - } + const std::string& local_address, const endpoint_type& endpoint, + const std::string& extra_listening_log_info, const CreateSession& create_session) + : listener_base(local_address), acceptor_(executor, endpoint), accept_error_timer_(executor), + accept_timeout_(accept_timeout), logger_(logger), extra_listening_log_info_(extra_listening_log_info), + create_session_(create_session) {} void do_accept() { acceptor_.async_accept([self = this->shared_from_this()](boost::system::error_code ec, auto&& peer_socket) { @@ -130,7 +86,7 @@ struct listener : std::enable_shared_from_this { template void on_accept(boost::system::error_code ec, Socket&& socket) { if (!ec) { - static_cast(this)->create_session(std::forward(socket)); + create_session_(std::forward(socket)); do_accept(); } else if (ec == boost::system::errc::too_many_files_open) { // retry accept() after timeout to avoid cpu loop on accept @@ -157,8 +113,6 @@ struct listener : std::enable_shared_from_this { } } - const char* extra_listening_log_info() { return ""; } - void log_listening(const endpoint_type& endpoint, const std::string& local_address) { std::string info; if constexpr (std::is_same_v) { @@ -166,111 +120,138 @@ struct listener : std::enable_shared_from_this { } else { info = "Unix socket " + local_address; } - info += static_cast(this)->extra_listening_log_info(); + info += extra_listening_log_info_; fc_ilog(logger_, "start listening on ${info}", ("info", info)); } +}; - /// @brief Create listeners to listen on endpoints resolved from address - /// @param ...args The arguments to forward to the listener constructor so that they can be accessed - /// from create_session() to construct the customized session objects. - /// @throws std::system_error - template - static void create(boost::asio::io_context& executor, logger& logger, const std::string& address, Args&&... args) { - using tcp = boost::asio::ip::tcp; - if constexpr (std::is_same_v) { - auto [host, port] = split_host_port(address); - if (port.empty()) { - fc_elog(logger, "port is not specified for address ${addr}", ("addr", address)); - throw std::system_error(std::make_error_code(std::errc::bad_address)); - } - - boost::system::error_code ec; - tcp::resolver resolver(executor); - auto endpoints = resolver.resolve(host, port, tcp::resolver::passive, ec); - if (ec) { - fc_elog(logger, "failed to resolve address: ${msg}", ("msg", ec.message())); - throw std::system_error(ec); - } +/// @brief create a stream-oriented socket listener which listens on the specified \c address and calls \c +/// create_session whenever a socket is accepted. +/// +/// @details +/// This function is used for listening on TCP or Unix socket address and creating corresponding session when the +/// socket is accepted. +/// +/// For TCP socket, the address format can be :port or :port where the `:port` part is mandatory. +/// If only the port is specified, all network interfaces are listened. The function can listen on multiple IP addresses +/// if the specified hostname is resolved to multiple IP addresses; in other words, it can create more than one +/// fc::listener objects. If port is not specified or none of the resolved address can be listened, an std::system_error +/// with std::errc::bad_address error code will be thrown. +/// +/// For Unix socket, this function will temporary change current working directory to the parent of the specified \c +/// address (i.e. socket file path), listen on the filename component of the path, and then restore the working +/// directory before return. This is the workaround for the socket file paths limitation which is around 100 characters. +/// +/// The lifetime of the created listener objects is controlled by \c executor, the created objects will be destroyed +/// when \c executor.stop() is called. +/// +/// @note +/// This function is not thread safe for Unix socket because it will temporarily change working directory without any +/// lock. Any code which depends the current working directory (such as opening files with relative paths) in other +/// threads should be protected. +/// +/// @tparam Protocol either \c boost::asio::ip::tcp or \c boost::asio::local::stream_protocol +/// @throws std::system_error or boost::system::system_error +template +void create_listener(boost::asio::io_context& executor, logger& logger, + boost::posix_time::time_duration accept_timeout, const std::string& address, + const std::string& extra_listening_log_info, const CreateSession& create_session) { + using tcp = boost::asio::ip::tcp; + if constexpr (std::is_same_v) { + auto [host, port] = split_host_port(address); + if (port.empty()) { + fc_elog(logger, "port is not specified for address ${addr}", ("addr", address)); + throw std::system_error(std::make_error_code(std::errc::bad_address)); + } - int listened = 0; - std::optional unspecified_ipv4_addr; - bool has_unspecified_ipv6_only = false; + boost::system::error_code ec; + tcp::resolver resolver(executor); + auto endpoints = resolver.resolve(host, port, tcp::resolver::passive, ec); + if (ec) { + fc_elog(logger, "failed to resolve address: ${msg}", ("msg", ec.message())); + throw std::system_error(ec); + } - auto create_server = [&](const auto& endpoint) { - const auto& ip_addr = endpoint.address(); - try { - auto server = std::make_shared(executor, logger, address, endpoint, std::forward(args)...); - server->log_listening(endpoint, address); - server->do_accept(); - ++listened; - has_unspecified_ipv6_only = ip_addr.is_unspecified() && ip_addr.is_v6(); - if (has_unspecified_ipv6_only) { - boost::asio::ip::v6_only option; - server->acceptor_.get_option(option); - has_unspecified_ipv6_only &= option.value(); - } + int listened = 0; + std::optional unspecified_ipv4_addr; + bool has_unspecified_ipv6_only = false; - } catch (boost::system::system_error& ex) { - fc_wlog(logger, "unable to listen on ${ip_addr}:${port} resolved from ${address}: ${msg}", - ("ip_addr", ip_addr.to_string())("port", endpoint.port())("address", address)("msg", ex.what())); + auto create_listener = [&](const auto& endpoint) { + const auto& ip_addr = endpoint.address(); + try { + auto listener = std::make_shared>( + executor, logger, accept_timeout, address, endpoint, extra_listening_log_info, create_session); + listener->log_listening(endpoint, address); + listener->do_accept(); + ++listened; + has_unspecified_ipv6_only = ip_addr.is_unspecified() && ip_addr.is_v6(); + if (has_unspecified_ipv6_only) { + boost::asio::ip::v6_only option; + listener->acceptor_.get_option(option); + has_unspecified_ipv6_only &= option.value(); } - }; - for (const auto& ep : endpoints) { - const auto& endpoint = ep.endpoint(); - const auto& ip_addr = endpoint.address(); - if (ip_addr.is_unspecified() && ip_addr.is_v4() && endpoints.size() > 1) { - // it is an error to bind a socket to the same port for both ipv6 and ipv4 INADDR_ANY address when - // the system has ipv4-mapped ipv6 enabled by default, we just skip the ipv4 for now. - unspecified_ipv4_addr = endpoint; - continue; - } - create_server(endpoint); + } catch (boost::system::system_error& ex) { + fc_wlog(logger, "unable to listen on ${ip_addr}:${port} resolved from ${address}: ${msg}", + ("ip_addr", ip_addr.to_string())("port", endpoint.port())("address", address)("msg", ex.what())); } + }; - if (unspecified_ipv4_addr.has_value() && has_unspecified_ipv6_only) { - create_server(*unspecified_ipv4_addr); + for (const auto& ep : endpoints) { + const auto& endpoint = ep.endpoint(); + const auto& ip_addr = endpoint.address(); + if (ip_addr.is_unspecified() && ip_addr.is_v4() && endpoints.size() > 1) { + // it is an error to bind a socket to the same port for both ipv6 and ipv4 INADDR_ANY address when + // the system has ipv4-mapped ipv6 enabled by default, we just skip the ipv4 for now. + unspecified_ipv4_addr = endpoint; + continue; } + create_listener(endpoint); + } - if (listened == 0) { - fc_elog(logger, "none of the addresses resolved from ${addr} can be listened to", ("addr", address)); - throw std::system_error(std::make_error_code(std::errc::bad_address)); - } - } else { - using stream_protocol = boost::asio::local::stream_protocol; - static_assert(std::is_same_v); + if (unspecified_ipv4_addr.has_value() && has_unspecified_ipv6_only) { + create_listener(*unspecified_ipv4_addr); + } - namespace fs = std::filesystem; - auto cwd = fs::current_path(); - fs::path sock_path = address; + if (listened == 0) { + fc_elog(logger, "none of the addresses resolved from ${addr} can be listened to", ("addr", address)); + throw std::system_error(std::make_error_code(std::errc::bad_address)); + } + } else { + using stream_protocol = boost::asio::local::stream_protocol; + static_assert(std::is_same_v); - fs::create_directories(sock_path.parent_path()); - // The maximum length of the socket path is defined by sockaddr_un::sun_path. On Linux, - // according to unix(7), it is 108 bytes. On FreeBSD, according to unix(4), it is 104 bytes. - // Therefore, we create the unix socket with the relative path to its parent path to avoid the - // problem. - fs::current_path(sock_path.parent_path()); - auto restore = fc::make_scoped_exit([cwd] { fs::current_path(cwd); }); + namespace fs = std::filesystem; + auto cwd = fs::current_path(); + fs::path sock_path = address; - endpoint_type endpoint{ sock_path.filename().string() }; + fs::create_directories(sock_path.parent_path()); + // The maximum length of the socket path is defined by sockaddr_un::sun_path. On Linux, + // according to unix(7), it is 108 bytes. On FreeBSD, according to unix(4), it is 104 bytes. + // Therefore, we create the unix socket with the relative path to its parent path to avoid the + // problem. + fs::current_path(sock_path.parent_path()); + auto restore = fc::make_scoped_exit([cwd] { fs::current_path(cwd); }); - boost::system::error_code ec; - stream_protocol::socket test_socket(executor); - test_socket.connect(endpoint, ec); + stream_protocol::endpoint endpoint{ sock_path.filename().string() }; - // looks like a service is already running on that socket, don't touch it... fail out - if (ec == boost::system::errc::success) { - fc_elog(logger, "The unix socket path ${addr} is already in use", ("addr", address)); - throw std::system_error(std::make_error_code(std::errc::address_in_use)); - } - // socket exists but no one home, go ahead and remove it and continue on - else if (ec == boost::system::errc::connection_refused) - fs::remove(sock_path); + boost::system::error_code ec; + stream_protocol::socket test_socket(executor); + test_socket.connect(endpoint, ec); - auto server = std::make_shared(executor, logger, address, endpoint, std::forward(args)...); - server->log_listening(endpoint, address); - server->do_accept(); + // looks like a service is already running on that socket, don't touch it... fail out + if (ec == boost::system::errc::success) { + fc_elog(logger, "The unix socket path ${addr} is already in use", ("addr", address)); + throw std::system_error(std::make_error_code(std::errc::address_in_use)); } + // socket exists but no one home, go ahead and remove it and continue on + else if (ec == boost::system::errc::connection_refused) + fs::remove(sock_path); + + auto listener = std::make_shared>( + executor, logger, accept_timeout, address, endpoint, extra_listening_log_info, create_session); + listener->log_listening(endpoint, address); + listener->do_accept(); } -}; +} } // namespace fc diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index bc40bb7885..f141c98335 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -111,34 +111,6 @@ namespace eosio { return result; } - template - struct beast_http_listener - : fc::listener, Protocol> { - using socket_type = typename Protocol::socket; - - static constexpr uint32_t accept_timeout_ms = 500; - http_plugin_state& state_; - api_category_set categories_ = {}; - - beast_http_listener(boost::asio::io_context& executor, fc::logger& logger, const std::string& local_address, - const typename Protocol::endpoint& endpoint, http_plugin_state& plugin_state, - api_category_set categories) - : fc::listener, Protocol>( - executor, logger, boost::posix_time::milliseconds(accept_timeout_ms), local_address, endpoint), - state_(plugin_state), categories_(categories) {} - - std::string extra_listening_log_info() { return " for API categories: " + category_names(categories_); } - - void create_session(socket_type&& socket) { - boost::system::error_code re_ec; - auto re = socket.remote_endpoint(re_ec); - std::string remote_endpoint = re_ec ? "unknown" : boost::lexical_cast(re); - std::make_shared>(std::move(socket), this->state_, std::move(remote_endpoint), - categories_, this->local_address_) - ->run_session(); - } - }; - class http_plugin_impl : public std::enable_shared_from_this { public: http_plugin_impl() = default; @@ -241,6 +213,30 @@ namespace eosio { }); } + template + void create_listener(const std::string& address, api_category_set categories) { + const boost::posix_time::milliseconds accept_timeout(500); + auto extra_listening_log_info = " for API categories: " + category_names(categories); + using socket_type = typename Protocol::socket; + auto create_session = [this, categories, address](socket_type&& socket) { + std::string remote_endpoint; + if constexpr (std::is_same_v) { + boost::system::error_code re_ec; + auto re = socket.remote_endpoint(re_ec); + remote_endpoint = re_ec ? "unknown" : fc::to_string(re); + } else { + remote_endpoint = address; + } + std::make_shared>( + std::move(socket), plugin_state, std::move(remote_endpoint), categories, address) + ->run_session(); + }; + + fc::create_listener(plugin_state.thread_pool.get_executor(), logger(), accept_timeout, + address, extra_listening_log_info, create_session); + + } + void create_beast_server(const std::string& address, api_category_set categories) { try { if (is_unix_socket_address(address)) { @@ -248,11 +244,9 @@ namespace eosio { fs::path sock_path = address; if (sock_path.is_relative()) sock_path = fs::weakly_canonical(app().data_dir() / sock_path); - beast_http_listener::create(plugin_state.thread_pool.get_executor(), logger(), - sock_path.string(), std::ref(plugin_state), categories); + create_listener(sock_path.string(), categories); } else { - beast_http_listener::create(plugin_state.thread_pool.get_executor(), logger(), address, - std::ref(plugin_state), categories); + create_listener(address, categories); } } catch (const fc::exception& e) { fc_elog(logger(), "http service failed to start for ${addr}: ${e}", diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index dfa1e47d85..13ab44318b 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -518,6 +518,8 @@ namespace eosio { void plugin_shutdown(); bool in_sync() const; fc::logger& get_logger() { return logger; } + + void create_session(tcp::socket&& socket); }; // peer_[x]log must be called from thread in connection strand @@ -2498,68 +2500,57 @@ namespace eosio { } ) ); } - struct p2p_listener : public fc::listener { - static constexpr uint32_t accept_timeout_ms = 100; - eosio::net_plugin_impl* state_; - - p2p_listener(boost::asio::io_context& executor, fc::logger& logger, const std::string& local_address, - const tcp::endpoint& endpoint, eosio::net_plugin_impl* impl) - : fc::listener(executor, logger, boost::posix_time::milliseconds(accept_timeout_ms), - local_address, endpoint), - state_(impl) {} + - std::string extra_listening_log_info() { - return ", max clients is " + std::to_string(state_->connections.get_max_client_count()); - } + - void create_session(tcp::socket&& socket) { - uint32_t visitors = 0; - uint32_t from_addr = 0; - boost::system::error_code rec; - const auto& paddr_add = socket.remote_endpoint(rec).address(); - string paddr_str; - if (rec) { - fc_elog(logger, "Error getting remote endpoint: ${m}", ("m", rec.message())); - } else { - paddr_str = paddr_add.to_string(); - state_->connections.for_each_connection([&visitors, &from_addr, &paddr_str](auto& conn) { - if (conn->socket_is_open()) { - if (conn->peer_address().empty()) { - ++visitors; - std::lock_guard g_conn(conn->conn_mtx); - if (paddr_str == conn->remote_endpoint_ip) { - ++from_addr; - } + void net_plugin_impl::create_session(tcp::socket&& socket) { + uint32_t visitors = 0; + uint32_t from_addr = 0; + boost::system::error_code rec; + const auto& paddr_add = socket.remote_endpoint(rec).address(); + string paddr_str; + if (rec) { + fc_elog(logger, "Error getting remote endpoint: ${m}", ("m", rec.message())); + } else { + paddr_str = paddr_add.to_string(); + connections.for_each_connection([&visitors, &from_addr, &paddr_str](auto& conn) { + if (conn->socket_is_open()) { + if (conn->peer_address().empty()) { + ++visitors; + std::lock_guard g_conn(conn->conn_mtx); + if (paddr_str == conn->remote_endpoint_ip) { + ++from_addr; } } + } + }); + if (from_addr < max_nodes_per_host && + (auto_bp_peering_enabled() || connections.get_max_client_count() == 0 || + visitors < connections.get_max_client_count())) { + fc_ilog(logger, "Accepted new connection: " + paddr_str); + + connection_ptr new_connection = std::make_shared(std::move(socket)); + new_connection->strand.post([new_connection, this]() { + if (new_connection->start_session()) { + connections.add(new_connection); + } }); - if (from_addr < state_->max_nodes_per_host && - (state_->auto_bp_peering_enabled() || state_->connections.get_max_client_count() == 0 || - visitors < state_->connections.get_max_client_count())) { - fc_ilog(logger, "Accepted new connection: " + paddr_str); - - connection_ptr new_connection = std::make_shared(std::move(socket)); - new_connection->strand.post([new_connection, state = state_]() { - if (new_connection->start_session()) { - state->connections.add(new_connection); - } - }); + } else { + if (from_addr >= max_nodes_per_host) { + fc_dlog(logger, "Number of connections (${n}) from ${ra} exceeds limit ${l}", + ("n", from_addr + 1)("ra", paddr_str)("l", max_nodes_per_host)); } else { - if (from_addr >= state_->max_nodes_per_host) { - fc_dlog(logger, "Number of connections (${n}) from ${ra} exceeds limit ${l}", - ("n", from_addr + 1)("ra", paddr_str)("l", state_->max_nodes_per_host)); - } else { - fc_dlog(logger, "max_client_count ${m} exceeded", ("m", state_->connections.get_max_client_count())); - } - // new_connection never added to connections and start_session not called, lifetime will end - boost::system::error_code ec; - socket.shutdown(tcp::socket::shutdown_both, ec); - socket.close(ec); + fc_dlog(logger, "max_client_count ${m} exceeded", ("m", connections.get_max_client_count())); } + // new_connection never added to connections and start_session not called, lifetime will end + boost::system::error_code ec; + socket.shutdown(tcp::socket::shutdown_both, ec); + socket.close(ec); } } - }; + } // only called from strand thread void connection::start_read_message() { @@ -2949,8 +2940,8 @@ namespace eosio { my_impl->mark_bp_connection(this); if (my_impl->exceeding_connection_limit(this)) { - // When auto bp peering is enabled, the p2p_listener check doesn't have enough information to determine - // if a client is a BP peer. In p2p_listener, it only has the peer address which a node is connecting + // When auto bp peering is enabled, create_session() check doesn't have enough information to determine + // if a client is a BP peer. In create_session(), it only has the peer address which a node is connecting // from, but it would be different from the address it is listening. The only way to make sure is when the // first handshake message is received with the p2p_address information in the message. Thus the connection // limit checking has to be here when auto bp peering is enabled. @@ -3886,7 +3877,14 @@ namespace eosio { app().executor().post(priority::highest, [my=my, address = std::move(listen_address)](){ if (address.size()) { try { - p2p_listener::create(my->thread_pool.get_executor(), logger, address, my.get()); + const boost::posix_time::milliseconds accept_timeout(100); + + std::string extra_listening_log_info = + ", max clients is " + std::to_string(my->connections.get_max_client_count()); + + fc::create_listener( + my->thread_pool.get_executor(), logger, accept_timeout, address, extra_listening_log_info, + [my = my](tcp::socket&& socket) { my->create_session(std::move(socket)); }); } catch (const std::exception& e) { fc_elog( logger, "net_plugin::plugin_startup failed to listen on ${addr}, ${what}", ("addr", address)("what", e.what()) ); diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index f783693193..52f0ce5c98 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -135,7 +135,34 @@ struct state_history_plugin_impl : std::enable_shared_from_this + void create_listener(const std::string& address) { + const boost::posix_time::milliseconds accept_timeout(200); + using socket_type = typename Protocol::socket; + fc::create_listener( + thread_pool.get_executor(), _log, accept_timeout, address, "", [this](socket_type&& socket) { + // Create a session object and run it + catch_and_log([&, this] { + auto s = std::make_shared>(*this, std::move(socket), + session_mgr); + session_mgr.insert(s); + s->start(); + }); + }); + } + + void listen(){ + try { + if (!endpoint_address.empty()) { + create_listener(endpoint_address); + } + if (!unix_path.empty()) { + create_listener(unix_path); + } + } catch (std::exception&) { + FC_THROW_EXCEPTION(plugin_exception, "unable to open listen socket"); + } + } // called from main thread void on_applied_transaction(const transaction_trace_ptr& p, const packed_transaction_ptr& t) { @@ -224,44 +251,6 @@ struct state_history_plugin_impl : std::enable_shared_from_this -struct ship_listener : fc::listener, Protocol> { - using socket_type = typename Protocol::socket; - - static constexpr uint32_t accept_timeout_ms = 200; - - state_history_plugin_impl& state_; - - ship_listener(boost::asio::io_context& executor, logger& logger, const std::string& local_address, - const typename Protocol::endpoint& endpoint, state_history_plugin_impl& state) - : fc::listener, Protocol>( - executor, logger, boost::posix_time::milliseconds(accept_timeout_ms), local_address, endpoint) - , state_(state) {} - - void create_session(socket_type&& socket) { - // Create a session object and run it - catch_and_log([&] { - auto s = std::make_shared>( - state_, std::move(socket), state_.session_mgr); - state_.session_mgr.insert(s); - s->start(); - }); - } -}; - -void state_history_plugin_impl::listen() { - try { - if (!endpoint_address.empty()) { - ship_listener::create(thread_pool.get_executor(), _log, endpoint_address, *this); - } - if (!unix_path.empty()) { - ship_listener::create(thread_pool.get_executor(), _log, unix_path, *this); - } - } catch (std::exception&) { - FC_THROW_EXCEPTION(plugin_exception, "unable to open listen socket"); - } -} - state_history_plugin::state_history_plugin() : my(std::make_shared()) {} diff --git a/plugins/state_history_plugin/tests/session_test.cpp b/plugins/state_history_plugin/tests/session_test.cpp index 6dcc79d76e..ca642f37e2 100644 --- a/plugins/state_history_plugin/tests/session_test.cpp +++ b/plugins/state_history_plugin/tests/session_test.cpp @@ -145,23 +145,6 @@ struct mock_state_history_plugin { using session_type = eosio::session; -struct listener : fc::listener { - mock_state_history_plugin& server_; - - listener(boost::asio::io_context& executor, fc::logger& logger, const std::string& local_address, - tcp::endpoint& endpoint, mock_state_history_plugin& server) - : fc::listener(executor, logger, boost::posix_time::milliseconds(100), local_address, endpoint) - , server_(server) { - endpoint = acceptor_.local_endpoint(); - } - - void create_session(tcp::socket&& peer_socket) { - auto s = std::make_shared(server_, std::move(peer_socket), server_.session_mgr); - s->start(); - server_.add_session(s); - } -}; - struct test_server : mock_state_history_plugin { std::vector threads; tcp::endpoint local_address{net::ip::make_address("127.0.0.1"), 0}; @@ -174,8 +157,17 @@ struct test_server : mock_state_history_plugin { threads.emplace_back([this]{ main_ioc.run(); }); threads.emplace_back([this]{ ship_ioc.run(); }); + auto create_session = [this](tcp::socket&& peer_socket) { + auto s = std::make_shared(*this, std::move(peer_socket), session_mgr); + s->start(); + add_session(s); + }; + // Create and launch a listening port - std::make_shared(ship_ioc, logger, "", local_address, *this)->do_accept(); + auto server = std::make_shared>( + ship_ioc, logger, boost::posix_time::milliseconds(100), "", local_address, "", create_session); + server->do_accept(); + local_address = server->acceptor_.local_endpoint(); } ~test_server() { From 26d1869c251dd4cae56ad9ab6b6f647a7fa92a6b Mon Sep 17 00:00:00 2001 From: Huang-Ming Huang Date: Fri, 2 Jun 2023 08:56:50 -0500 Subject: [PATCH 002/191] address PR comments --- libraries/libfc/include/fc/network/listener.hpp | 9 +++++---- plugins/http_plugin/http_plugin.cpp | 5 ++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/libraries/libfc/include/fc/network/listener.hpp b/libraries/libfc/include/fc/network/listener.hpp index 4cf701780e..05ca2f3904 100644 --- a/libraries/libfc/include/fc/network/listener.hpp +++ b/libraries/libfc/include/fc/network/listener.hpp @@ -153,9 +153,9 @@ struct listener : listener_base, std::enable_shared_from_this -void create_listener(boost::asio::io_context& executor, logger& logger, - boost::posix_time::time_duration accept_timeout, const std::string& address, - const std::string& extra_listening_log_info, const CreateSession& create_session) { +void create_listener(boost::asio::io_context& executor, logger& logger, boost::posix_time::time_duration accept_timeout, + const std::string& address, const std::string& extra_listening_log_info, + const CreateSession& create_session) { using tcp = boost::asio::ip::tcp; if constexpr (std::is_same_v) { auto [host, port] = split_host_port(address); @@ -245,8 +245,9 @@ void create_listener(boost::asio::io_context& executor, logger& logger, throw std::system_error(std::make_error_code(std::errc::address_in_use)); } // socket exists but no one home, go ahead and remove it and continue on - else if (ec == boost::system::errc::connection_refused) + else if (ec == boost::system::errc::connection_refused) { fs::remove(sock_path); + } auto listener = std::make_shared>( executor, logger, accept_timeout, address, endpoint, extra_listening_log_info, create_session); diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index f141c98335..09bbc2bd5f 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -232,9 +232,8 @@ namespace eosio { ->run_session(); }; - fc::create_listener(plugin_state.thread_pool.get_executor(), logger(), accept_timeout, - address, extra_listening_log_info, create_session); - + fc::create_listener(plugin_state.thread_pool.get_executor(), logger(), accept_timeout, address, + extra_listening_log_info, create_session); } void create_beast_server(const std::string& address, api_category_set categories) { From bb9c1c923dc63330d1137e05d83a61d52ab2ead8 Mon Sep 17 00:00:00 2001 From: Huang-Ming Huang Date: Mon, 5 Jun 2023 09:14:44 -0500 Subject: [PATCH 003/191] address PR comments --- libraries/libfc/include/fc/network/listener.hpp | 2 +- plugins/http_plugin/tests/unit_tests.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/libfc/include/fc/network/listener.hpp b/libraries/libfc/include/fc/network/listener.hpp index 05ca2f3904..4f51d449d9 100644 --- a/libraries/libfc/include/fc/network/listener.hpp +++ b/libraries/libfc/include/fc/network/listener.hpp @@ -244,8 +244,8 @@ void create_listener(boost::asio::io_context& executor, logger& logger, boost::p fc_elog(logger, "The unix socket path ${addr} is already in use", ("addr", address)); throw std::system_error(std::make_error_code(std::errc::address_in_use)); } - // socket exists but no one home, go ahead and remove it and continue on else if (ec == boost::system::errc::connection_refused) { + // socket exists but no one home, go ahead and remove it and continue on fs::remove(sock_path); } diff --git a/plugins/http_plugin/tests/unit_tests.cpp b/plugins/http_plugin/tests/unit_tests.cpp index f76d3b01d7..15fc50bef9 100644 --- a/plugins/http_plugin/tests/unit_tests.cpp +++ b/plugins/http_plugin/tests/unit_tests.cpp @@ -354,7 +354,7 @@ class app_log { int fork_app_and_redirect_stderr(const char* redirect_filename, std::initializer_list args) { int pid = fork(); if (pid == 0) { - freopen(redirect_filename, "w", stderr); + (void) freopen(redirect_filename, "w", stderr); bool ret = 0; try { appbase::scoped_app app; From daaf76d7f976246465b52c2d4a8ab2c44ce6f93b Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Wed, 7 Jun 2023 18:31:12 -0500 Subject: [PATCH 004/191] Use retry-num-blocks where possible to avoid polling in tests. --- tests/TestHarness/transactions.py | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/tests/TestHarness/transactions.py b/tests/TestHarness/transactions.py index 7a67c3584e..3f173ff310 100644 --- a/tests/TestHarness/transactions.py +++ b/tests/TestHarness/transactions.py @@ -11,6 +11,8 @@ from .testUtils import Utils class Transactions(NodeosQueries): + retry_num_blocks_default = 1 + def __init__(self, host, port, walletMgr=None): super().__init__(host, port, walletMgr) @@ -18,9 +20,9 @@ def __init__(self, host, port, walletMgr=None): def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, stakeNet=100, stakeCPU=100, buyRAM=10000, exitOnError=False, sign=False, additionalArgs=''): signStr = NodeosQueries.sign_str(sign, [ creatorAccount.activePublicKey ]) cmdDesc="system newaccount" - cmd='%s -j %s %s %s \'%s\' \'%s\' --stake-net "%s %s" --stake-cpu "%s %s" --buy-ram "%s %s" %s' % ( - cmdDesc, signStr, creatorAccount.name, account.name, account.ownerPublicKey, - account.activePublicKey, stakeNet, CORE_SYMBOL, stakeCPU, CORE_SYMBOL, buyRAM, CORE_SYMBOL, additionalArgs) + cmd=(f'{cmdDesc} -j {signStr} {creatorAccount.name} {account.name} \'{account.ownerPublicKey}\' ' + f'\'{account.activePublicKey}\' --stake-net "{stakeNet} {CORE_SYMBOL}" --stake-cpu ' + f'"{stakeCPU} {CORE_SYMBOL}" --buy-ram "{buyRAM} {CORE_SYMBOL}" {additionalArgs}') msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name); trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) @@ -37,9 +39,10 @@ def createAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTran """Create account and return creation transactions. Return transaction json object. waitForTransBlock: wait on creation transaction id to appear in a block.""" signStr = NodeosQueries.sign_str(sign, [ creatorAccount.activePublicKey ]) + retryStr = f"--retry-num-blocks {self.retry_num_blocks_default}" if waitForTransBlock else "" cmdDesc="create account" - cmd="%s -j %s %s %s %s %s" % ( - cmdDesc, signStr, creatorAccount.name, account.name, account.ownerPublicKey, account.activePublicKey) + cmd=(f"{cmdDesc} -j {signStr} {creatorAccount.name} {account.name} {account.ownerPublicKey} " + f"{account.activePublicKey} {retryStr}") msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name); trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) @@ -51,7 +54,7 @@ def createAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTran self.trackCmdTransaction(trans) transId=NodeosQueries.getTransId(trans) - return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + return trans def transferFundsCmdArr(self, source, destination, amountStr, memo, force, retry, sign, dontSend, expiration, skipSign): assert isinstance(amountStr, str) @@ -282,24 +285,25 @@ def undelegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, wa signStr = NodeosQueries.sign_str(sign, [ fromAccount.activePublicKey ]) cmdDesc="system undelegatebw" - cmd="%s -j %s %s %s \"%s %s\" \"%s %s\"" % ( - cmdDesc, signStr, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL) + retryStr=f"--retry-num-blocks {self.retry_num_blocks_default}" if waitForTransBlock else "" + cmd=(f'{cmdDesc} -j {signStr} {fromAccount.name} {toAccount.name} "{netQuantity} {CORE_SYMBOL}" ' + f'"{cpuQuantity} {CORE_SYMBOL}" {retryStr}') msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name); trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) - return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + return trans def regproducer(self, producer, url, location, waitForTransBlock=False, exitOnError=False, sign=False): signStr = NodeosQueries.sign_str(sign, [ producer.activePublicKey ]) cmdDesc="system regproducer" - cmd="%s -j %s %s %s %s %s" % ( - cmdDesc, signStr, producer.name, producer.activePublicKey, url, location) + retryStr=f"--retry-num-blocks {self.retry_num_blocks_default}" if waitForTransBlock else "" + cmd=f'{cmdDesc} -j {signStr} {producer.name} {producer.activePublicKey} {url} {location} {retryStr}' msg="producer=%s" % (producer.name); trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) - return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + return trans def vote(self, account, producers, waitForTransBlock=False, exitOnError=False, sign=False): signStr = NodeosQueries.sign_str(sign, [ account.activePublicKey ]) From 850969e0d172a36739bf7551e59bf80224f94c2b Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 8 Jun 2023 16:19:03 -0500 Subject: [PATCH 005/191] gh-1104 First attempt to run libtester CI test from dev installs. --- .github/workflows/build.yaml | 40 ++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index e3f60238f7..2558758fd5 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -201,6 +201,46 @@ jobs: name: ${{matrix.platform}}-lr-logs path: '*-logs.tar.gz' + libtester-deb-install-test: + name: libtester deb install test + needs: [d, Build, dev-package] + if: always() && needs.dev-package.result == 'success' + strategy: + fail-fast: false + matrx: + platform: [ubuntu20] # Assuming want to do both ubuntu20 and ubuntu22 at some point so setting up with matrix + runs-on: ["self-hosted", "enf-x86-lowtier"] # not sure if this warrants a better machine, but start here + container: ${{fromJSON(needs.d.outputs.p)['ubuntu20'].image}} + steps: + - name: Download cdt + uses: AntelopeIO/asset-artifact-download-action@v2 + with: + owner: AntelopeIO + repo: cdt + file: 'cdt_.*amd64.deb' + target: main + artifact-name: cdt_ubuntu_package_amd64 + token: ${{github.token}} + - name: Download leap-dev + uses: actions/download-artifact@v3 + with: + name: leap-dev-ubuntu20-amd64 + - name: Install cdt and leap-dev Packages + run: | + sudo apt install ./*.deb + sudo apt-get install cmake + rm ./*.deb + - name: checkout reference-contracts + uses: actions/checkout@v3 + with: + repository: AntelopeIO/reference-contracts + path: src + - name: Build & Test reference-contracts + run: | + cmake -S src -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On + cmake --build build -- -j $(nproc) + ctest --test-dir build/tests --output-on-failure -j $(nproc) + all-passing: name: All Required Tests Passed needs: [dev-package, tests, np-tests] From c9185d623a6b59ec533fd7ff37984d5c46e6e9b6 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 8 Jun 2023 16:21:36 -0500 Subject: [PATCH 006/191] Fix spelling. --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 2558758fd5..e3b1898b28 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -207,7 +207,7 @@ jobs: if: always() && needs.dev-package.result == 'success' strategy: fail-fast: false - matrx: + matrix: platform: [ubuntu20] # Assuming want to do both ubuntu20 and ubuntu22 at some point so setting up with matrix runs-on: ["self-hosted", "enf-x86-lowtier"] # not sure if this warrants a better machine, but start here container: ${{fromJSON(needs.d.outputs.p)['ubuntu20'].image}} From 52431b90ccc27020cbbc329d06b722fb35fe4f44 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 8 Jun 2023 16:36:09 -0500 Subject: [PATCH 007/191] Try without sudo. --- .github/workflows/build.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index e3b1898b28..abfe322700 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -227,8 +227,8 @@ jobs: name: leap-dev-ubuntu20-amd64 - name: Install cdt and leap-dev Packages run: | - sudo apt install ./*.deb - sudo apt-get install cmake + apt install ./*.deb + apt-get install cmake rm ./*.deb - name: checkout reference-contracts uses: actions/checkout@v3 From 607fe061999196fdeb4f6ec4038fee486b4aaae2 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Thu, 8 Jun 2023 18:37:54 -0500 Subject: [PATCH 008/191] Revert attempt to add retry to transactions.py since all are read/write. Expose silentErrors parameter for some transactions.py calls. Improve tests to exercise waitForTransBlock on createAccount, createInitializeAccount, transferFunds, regproducer, and delegatebw. Add exercising undelegatebw to a regression test (previously only tested in unit tests). --- tests/TestHarness/transactions.py | 30 ++++++++++------------- tests/nodeos_chainbase_allocation_test.py | 2 +- tests/nodeos_voting_test.py | 24 +++++++++--------- 3 files changed, 27 insertions(+), 29 deletions(-) diff --git a/tests/TestHarness/transactions.py b/tests/TestHarness/transactions.py index 3f173ff310..566d3d0e8d 100644 --- a/tests/TestHarness/transactions.py +++ b/tests/TestHarness/transactions.py @@ -11,8 +11,6 @@ from .testUtils import Utils class Transactions(NodeosQueries): - retry_num_blocks_default = 1 - def __init__(self, host, port, walletMgr=None): super().__init__(host, port, walletMgr) @@ -39,10 +37,9 @@ def createAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTran """Create account and return creation transactions. Return transaction json object. waitForTransBlock: wait on creation transaction id to appear in a block.""" signStr = NodeosQueries.sign_str(sign, [ creatorAccount.activePublicKey ]) - retryStr = f"--retry-num-blocks {self.retry_num_blocks_default}" if waitForTransBlock else "" cmdDesc="create account" - cmd=(f"{cmdDesc} -j {signStr} {creatorAccount.name} {account.name} {account.ownerPublicKey} " - f"{account.activePublicKey} {retryStr}") + cmd="%s -j %s %s %s %s %s" % ( + cmdDesc, signStr, creatorAccount.name, account.name, account.ownerPublicKey, account.activePublicKey) msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name); trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) @@ -54,7 +51,7 @@ def createAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTran self.trackCmdTransaction(trans) transId=NodeosQueries.getTransId(trans) - return trans + return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) def transferFundsCmdArr(self, source, destination, amountStr, memo, force, retry, sign, dontSend, expiration, skipSign): assert isinstance(amountStr, str) @@ -279,31 +276,30 @@ def delegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, tran return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) - def undelegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, waitForTransBlock=False, exitOnError=False, sign=False): + def undelegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, waitForTransBlock=False, silentErrors=True, exitOnError=False, sign=False): if toAccount is None: toAccount=fromAccount signStr = NodeosQueries.sign_str(sign, [ fromAccount.activePublicKey ]) cmdDesc="system undelegatebw" - retryStr=f"--retry-num-blocks {self.retry_num_blocks_default}" if waitForTransBlock else "" - cmd=(f'{cmdDesc} -j {signStr} {fromAccount.name} {toAccount.name} "{netQuantity} {CORE_SYMBOL}" ' - f'"{cpuQuantity} {CORE_SYMBOL}" {retryStr}') + cmd="%s -j %s %s %s \"%s %s\" \"%s %s\"" % ( + cmdDesc, signStr, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL) msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name); - trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) - return trans + return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) - def regproducer(self, producer, url, location, waitForTransBlock=False, exitOnError=False, sign=False): + def regproducer(self, producer, url, location, waitForTransBlock=False, silentErrors=True, exitOnError=False, sign=False): signStr = NodeosQueries.sign_str(sign, [ producer.activePublicKey ]) cmdDesc="system regproducer" - retryStr=f"--retry-num-blocks {self.retry_num_blocks_default}" if waitForTransBlock else "" - cmd=f'{cmdDesc} -j {signStr} {producer.name} {producer.activePublicKey} {url} {location} {retryStr}' + cmd="%s -j %s %s %s %s %s" % ( + cmdDesc, signStr, producer.name, producer.activePublicKey, url, location) msg="producer=%s" % (producer.name); - trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) - return trans + return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) def vote(self, account, producers, waitForTransBlock=False, exitOnError=False, sign=False): signStr = NodeosQueries.sign_str(sign, [ account.activePublicKey ]) diff --git a/tests/nodeos_chainbase_allocation_test.py b/tests/nodeos_chainbase_allocation_test.py index 634c254cfe..8a78c793f3 100755 --- a/tests/nodeos_chainbase_allocation_test.py +++ b/tests/nodeos_chainbase_allocation_test.py @@ -63,7 +63,7 @@ newProducerAcc = Account("newprod") newProducerAcc.ownerPublicKey = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" newProducerAcc.activePublicKey = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - producerNode.createAccount(newProducerAcc, cluster.eosioAccount) + producerNode.createAccount(newProducerAcc, cluster.eosioAccount, waitForTransBlock=True) setProdsStr = '{"schedule": [' setProdsStr += '{"producer_name":' + newProducerAcc.name + ',"block_signing_key":' + newProducerAcc.activePublicKey + '}' diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index ea51b5fd5e..7e6ab0368e 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -186,7 +186,9 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): node=cluster.getNode(i) node.producers=Cluster.parseProducers(i) for prod in node.producers: - trans=node.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, waitForTransBlock=False, exitOnError=True) + trans=node.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, + waitForTransBlock=True if prod == node.producers[-1] else False, + silentErrors=False if prod == node.producers[-1] else True, exitOnError=True) node0=cluster.getNode(0) node1=cluster.getNode(1) @@ -198,21 +200,19 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) for account in accounts: Print("Create new account %s via %s" % (account.name, cluster.eosioAccount.name)) - trans=node.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, stakeNet=1000, stakeCPU=1000, buyRAM=1000, exitOnError=True) - - node.waitForTransBlockIfNeeded(trans, True, exitOnError=True) + trans=node.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, + waitForTransBlock=True if account == accounts[-1] else False, + stakeNet=1000, stakeCPU=1000, buyRAM=1000, exitOnError=True) for account in accounts: Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name)) - node.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=False) - - node.waitForTransBlockIfNeeded(trans, True, exitOnError=True) + node.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", + waitForTransBlock=True if account == accounts[-1] else False) for account in accounts: - trans=node.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=False, exitOnError=True) - - node.waitForTransBlockIfNeeded(trans, True, exitOnError=True) - + trans=node.delegatebw(account, 20000000.0000, 20000000.0000, + waitForTransBlock=True if account == accounts[-1] else False, exitOnError=True) + # containers for tracking producers prodsActive={} for i in range(0, 4): @@ -227,6 +227,8 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): trans=node.vote(account, node.producers, waitForTransBlock=True) node=node1 + node.undelegatebw(account, 1.0000, 1.0000, waitForTransBlock=True, silentErrors=False, exitOnError=True) + setActiveProducers(prodsActive, node1.producers) verifyProductionRounds(trans, node2, prodsActive, 2) From 542b4e778ea626c380e8718fee1f001a6375c810 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 9 Jun 2023 09:13:57 -0500 Subject: [PATCH 009/191] Add -y flag --- .github/workflows/build.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index abfe322700..909c5136a7 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -227,8 +227,8 @@ jobs: name: leap-dev-ubuntu20-amd64 - name: Install cdt and leap-dev Packages run: | - apt install ./*.deb - apt-get install cmake + apt install -y ./*.deb + apt-get -y install cmake rm ./*.deb - name: checkout reference-contracts uses: actions/checkout@v3 From 33c446c7087fc5f715dd03a30c6e1a590591e8e4 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 9 Jun 2023 13:43:36 -0500 Subject: [PATCH 010/191] Use cached base image's already installed cmake. --- .github/workflows/build.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 909c5136a7..20806283f3 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -228,7 +228,6 @@ jobs: - name: Install cdt and leap-dev Packages run: | apt install -y ./*.deb - apt-get -y install cmake rm ./*.deb - name: checkout reference-contracts uses: actions/checkout@v3 From 1b1909af1c2636f05de2ec1de1dcb2baf135de33 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 9 Jun 2023 13:44:24 -0500 Subject: [PATCH 011/191] Installed cmake doesn't support --test-dir so cd and then run tests. --- .github/workflows/build.yaml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 20806283f3..3f1360b34a 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -233,12 +233,13 @@ jobs: uses: actions/checkout@v3 with: repository: AntelopeIO/reference-contracts - path: src + path: reference-contracts - name: Build & Test reference-contracts run: | - cmake -S src -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On + cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On cmake --build build -- -j $(nproc) - ctest --test-dir build/tests --output-on-failure -j $(nproc) + cd build/tests + ctest --output-on-failure -j $(nproc) all-passing: name: All Required Tests Passed From 361d5a68b2a9f225218f1298240b96849c13cce9 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 9 Jun 2023 14:29:37 -0500 Subject: [PATCH 012/191] Add libtester-make-dev-install-test --- .github/workflows/build.yaml | 55 ++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 3f1360b34a..c814aefd23 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -115,6 +115,7 @@ jobs: with: name: leap-dev-ubuntu20-amd64 path: build/leap-dev*.deb + tests: name: Tests needs: [d, Build] @@ -241,6 +242,60 @@ jobs: cd build/tests ctest --output-on-failure -j $(nproc) + libtester-make-dev-install-test: + name: libtester make dev-install test + needs: [d, Build] + if: always() && needs.Build.result == 'success' + strategy: + fail-fast: false + matrix: + platform: [ubuntu20, ubuntu22] + runs-on: ["self-hosted", "enf-x86-lowtier"] # not sure if this warrants a better machine, but start here + container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + steps: + - name: Download leap builddir + uses: actions/download-artifact@v3 + with: + name: ${{matrix.platform}}-build + path: leap + - name: checkout cdt + uses: actions/checkout@v3 + with: + repository: AntelopeIO/cdt + submodules: recursive + path: cdt + - name: checkout reference-contracts + uses: actions/checkout@v3 + with: + repository: AntelopeIO/reference-contracts + path: reference-contracts + - name: Check directory structure + run: | + pwd + ls -l + - name: Extract leap build and make dev-install + run: | + cd leap + zstdcat build.tar.zst | tar x + cd build + make dev-install + cd ../.. + - name: make install cdt + run: | + cd cdt + mkdir build + cd build + cmake -DCMAKE_BUILD_TYPE=Release .. + make -j $(nproc) + make install + cd ../.. + - name: Build & Test reference-contracts + run: | + cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On + cmake --build build -- -j $(nproc) + cd build/tests + ctest --output-on-failure -j $(nproc) + all-passing: name: All Required Tests Passed needs: [dev-package, tests, np-tests] From a7d472f3f225c63afff6048839b6654bc32303e9 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 9 Jun 2023 14:34:04 -0500 Subject: [PATCH 013/191] Add libtester-build-tree-test --- .github/workflows/build.yaml | 51 ++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index c814aefd23..91c45395d8 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -296,6 +296,57 @@ jobs: cd build/tests ctest --output-on-failure -j $(nproc) + libtester-build-tree-test: + name: libtester build tree test + needs: [d, Build] + if: always() && needs.Build.result == 'success' + strategy: + fail-fast: false + matrix: + platform: [ubuntu20, ubuntu22] + runs-on: ["self-hosted", "enf-x86-lowtier"] # not sure if this warrants a better machine, but start here + container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + steps: + - name: Download leap builddir + uses: actions/download-artifact@v3 + with: + name: ${{matrix.platform}}-build + path: leap + - name: checkout cdt + uses: actions/checkout@v3 + with: + repository: AntelopeIO/cdt + submodules: recursive + path: cdt + - name: checkout reference-contracts + uses: actions/checkout@v3 + with: + repository: AntelopeIO/reference-contracts + path: reference-contracts + - name: Check directory structure + run: | + pwd + ls -l + - name: Extract leap build and make dev-install + run: | + cd leap + zstdcat build.tar.zst | tar x + cd .. + - name: make install cdt + run: | + cd cdt + mkdir build + cd build + cmake -DCMAKE_BUILD_TYPE=Release .. + make -j $(nproc) + cd ../.. + - name: Build & Test reference-contracts + run: | + cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dcdt_DIR="./cdt/build/lib/cmake/cdt" -Dleap_DIR="./leap/build/lib/cmake/leap" + cmake --build build -- -j $(nproc) + cd build/tests + ctest --output-on-failure -j $(nproc) + all-passing: name: All Required Tests Passed needs: [dev-package, tests, np-tests] From 04d9410b9e6ab3628a3ee24009574c73f033caf3 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 9 Jun 2023 15:02:45 -0500 Subject: [PATCH 014/191] Update build tool for using leap builddir. Fix/update build tree test. --- .github/workflows/build.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 91c45395d8..3e831d25db 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -278,7 +278,7 @@ jobs: cd leap zstdcat build.tar.zst | tar x cd build - make dev-install + ninja dev-install cd ../.. - name: make install cdt run: | @@ -327,17 +327,17 @@ jobs: run: | pwd ls -l - - name: Extract leap build and make dev-install + - name: Extract leap build run: | cd leap zstdcat build.tar.zst | tar x cd .. - - name: make install cdt + - name: Build cdt run: | cd cdt mkdir build cd build - cmake -DCMAKE_BUILD_TYPE=Release .. + cmake -DCMAKE_BUILD_TYPE=Release -Dleap_DIR="../../leap/build/lib/cmake/leap" .. make -j $(nproc) cd ../.. - name: Build & Test reference-contracts From 82d03eeb742029eac483baaaca6d8e37f67cd02e Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 9 Jun 2023 15:14:23 -0500 Subject: [PATCH 015/191] Attempt to make dev-packages for ubunut20 and ubuntu22 and update libtester-deb-install-test to use both images. --- .github/workflows/build.yaml | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 3e831d25db..d161e21ec7 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -88,8 +88,12 @@ jobs: name: Build leap-dev package needs: [d, Build] if: always() && needs.Build.result == 'success' + strategy: + fail-fast: false + matrix: + platform: [ubuntu20, ubuntu22] runs-on: ubuntu-latest - container: ${{fromJSON(needs.d.outputs.p)['ubuntu20'].image}} + container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} steps: - uses: actions/checkout@v3 with: @@ -97,7 +101,7 @@ jobs: - name: Download builddir uses: actions/download-artifact@v3 with: - name: ubuntu20-build + name: ${{matrix.platform}}-build - name: Build dev package run: | zstdcat build.tar.zst | tar x @@ -113,7 +117,7 @@ jobs: - name: Upload dev package uses: actions/upload-artifact@v3 with: - name: leap-dev-ubuntu20-amd64 + name: leap-dev-${{matrix.platform}}-amd64 path: build/leap-dev*.deb tests: @@ -209,9 +213,9 @@ jobs: strategy: fail-fast: false matrix: - platform: [ubuntu20] # Assuming want to do both ubuntu20 and ubuntu22 at some point so setting up with matrix + platform: [ubuntu20, ubuntu22] runs-on: ["self-hosted", "enf-x86-lowtier"] # not sure if this warrants a better machine, but start here - container: ${{fromJSON(needs.d.outputs.p)['ubuntu20'].image}} + container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} steps: - name: Download cdt uses: AntelopeIO/asset-artifact-download-action@v2 @@ -225,7 +229,7 @@ jobs: - name: Download leap-dev uses: actions/download-artifact@v3 with: - name: leap-dev-ubuntu20-amd64 + name: leap-dev-${{matrix.platform}}-amd64 - name: Install cdt and leap-dev Packages run: | apt install -y ./*.deb From 36eaa0aef9154b2fac939d65e8139bd966894474 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 9 Jun 2023 15:27:16 -0500 Subject: [PATCH 016/191] Fix expected dir structure for builddir --- .github/workflows/build.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index d161e21ec7..ce94958a49 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -261,7 +261,6 @@ jobs: uses: actions/download-artifact@v3 with: name: ${{matrix.platform}}-build - path: leap - name: checkout cdt uses: actions/checkout@v3 with: @@ -279,11 +278,10 @@ jobs: ls -l - name: Extract leap build and make dev-install run: | - cd leap zstdcat build.tar.zst | tar x cd build ninja dev-install - cd ../.. + cd .. - name: make install cdt run: | cd cdt From 3954d1eec6fefa8934497162396faaae42fb9377 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 9 Jun 2023 15:43:10 -0500 Subject: [PATCH 017/191] For make dev-install need to rebuild leap. --- .github/workflows/build.yaml | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index ce94958a49..1d68245783 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -254,13 +254,14 @@ jobs: fail-fast: false matrix: platform: [ubuntu20, ubuntu22] - runs-on: ["self-hosted", "enf-x86-lowtier"] # not sure if this warrants a better machine, but start here + runs-on: ["self-hosted", "enf-x86-beefy"] container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} steps: - - name: Download leap builddir - uses: actions/download-artifact@v3 + - name: checkout leap + uses: actions/checkout@v3 with: - name: ${{matrix.platform}}-build + submodules: recursive + path: leap - name: checkout cdt uses: actions/checkout@v3 with: @@ -276,12 +277,16 @@ jobs: run: | pwd ls -l - - name: Extract leap build and make dev-install + - name: leap build and make dev-install run: | - zstdcat build.tar.zst | tar x + # https://github.com/actions/runner/issues/2033 + chown -R $(id -u):$(id -g) $PWD + cd leap + cmake -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -GNinja + cmake --build build cd build ninja dev-install - cd .. + cd ../.. - name: make install cdt run: | cd cdt From d897dfeb4f9cb6640cae560a7fdc31c360a054db Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 9 Jun 2023 17:04:18 -0400 Subject: [PATCH 018/191] Thread Safety Analysis - wip --- libraries/libfc/include/fc/mutex.hpp | 225 +++++++++++++++++++++++++++ plugins/net_plugin/CMakeLists.txt | 4 + plugins/net_plugin/net_plugin.cpp | 49 +++--- 3 files changed, 253 insertions(+), 25 deletions(-) create mode 100644 libraries/libfc/include/fc/mutex.hpp diff --git a/libraries/libfc/include/fc/mutex.hpp b/libraries/libfc/include/fc/mutex.hpp new file mode 100644 index 0000000000..609b4d6f34 --- /dev/null +++ b/libraries/libfc/include/fc/mutex.hpp @@ -0,0 +1,225 @@ +#ifndef THREAD_SAFETY_ANALYSIS_MUTEX_HPP +#define THREAD_SAFETY_ANALYSIS_MUTEX_HPP + +// Enable thread safety attributes only with clang. +// The attributes can be safely erased when compiling with other compilers. +#if defined(__clang__) && (!defined(SWIG)) +#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) +#else +#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op +#endif + +#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x)) + +#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) + +#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) + +#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) + +#define ACQUIRED_BEFORE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) + +#define ACQUIRED_AFTER(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) + +#define REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__)) + +#define REQUIRES_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__)) + +#define ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__)) + +#define ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__)) + +#define RELEASE(...) THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__)) + +#define RELEASE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__)) + +#define RELEASE_GENERIC(...) THREAD_ANNOTATION_ATTRIBUTE__(release_generic_capability(__VA_ARGS__)) + +#define TRY_ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__)) + +#define TRY_ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__)) + +#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) + +#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x)) + +#define ASSERT_SHARED_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x)) + +#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) + +#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) + +#include +#include + +namespace fc { + +// Defines an annotated interface for mutexes. +// These methods can be implemented to use any internal mutex implementation. +class CAPABILITY("mutex") mutex { +private: + std::mutex mutex_; + +public: + // Acquire/lock this mutex exclusively. Only one thread can have exclusive + // access at any one time. Write operations to guarded data require an + // exclusive lock. + void lock() ACQUIRE() { mutex_.lock(); } + + // Release/unlock an exclusive mutex. + void unlock() RELEASE() { mutex_.unlock(); } + + // Try to acquire the mutex. Returns true on success, and false on failure. + bool try_lock() TRY_ACQUIRE(true) { return mutex_.try_lock(); } +}; + +// Defines an annotated interface for mutexes. +// These methods can be implemented to use any internal mutex implementation. +class CAPABILITY("shared_mutex") shared_mutex { +private: + std::shared_mutex mutex_; + +public: + // Acquire/lock this mutex exclusively. Only one thread can have exclusive + // access at any one time. Write operations to guarded data require an + // exclusive lock. + void lock() ACQUIRE() { mutex_.lock(); } + + // Acquire/lock this mutex for read operations, which require only a shared + // lock. This assumes a multiple-reader, single writer semantics. Multiple + // threads may acquire the mutex simultaneously as readers, but a writer + // must wait for all of them to release the mutex before it can acquire it + // exclusively. + void lock_shared() ACQUIRE_SHARED() { mutex_.lock_shared(); } + + // Release/unlock an exclusive mutex. + void unlock() RELEASE() { mutex_.unlock(); } + + // Release/unlock a shared mutex. + void unlock_shared() RELEASE_SHARED() { mutex_.unlock_shared(); } + + // Try to acquire the mutex. Returns true on success, and false on failure. + bool try_lock() TRY_ACQUIRE(true) { return mutex_.try_lock(); } + + // Try to acquire the mutex for read operations. + bool try_lock_shared() TRY_ACQUIRE_SHARED(true) { return mutex_.try_lock_shared(); } + + // Assert that this mutex is currently held by the calling thread. + // void AssertHeld() ASSERT_CAPABILITY(this); + + // Assert that is mutex is currently held for read operations. + // void AssertReaderHeld() ASSERT_SHARED_CAPABILITY(this); + + // For negative capabilities. + // const Mutex& operator!() const { return *this; } +}; + +// Tag types for selecting a constructor. +struct adopt_lock_t { +} inline constexpr adopt_lock = {}; +struct defer_lock_t { +} inline constexpr defer_lock = {}; +struct shared_lock_t { +} inline constexpr shared_lock = {}; +struct try_to_lock_t { +} inline constexpr try_to_lock = {}; + +// LockGuard is an RAII class that acquires a mutex in its constructor, and +// releases it in its destructor. +template +class SCOPED_CAPABILITY lock_guard { +private: + M& mut; + +public: + // Acquire mu, implicitly acquire *this and associate it with mu. + lock_guard(M& mu) ACQUIRE(mu) + : mut(mu) { + mu.lock(); + } + + // Assume mu is held, implicitly acquire *this and associate it with mu. + lock_guard(M& mu, adopt_lock_t) REQUIRES(mu) + : mut(mu) {} + + ~lock_guard() RELEASE() { mut.unlock(); } +}; + +// unique_lock is an RAII class that acquires a mutex in its constructor, and +// releases it in its destructor. +template +class SCOPED_CAPABILITY unique_lock { +private: + using mutex_type = M; + + M* mut; + bool locked; + +public: + unique_lock() noexcept + : mut(nullptr) + , locked(false) {} + + // Acquire mu, implicitly acquire *this and associate it with mu. + explicit unique_lock(M& mu) ACQUIRE(mu) + : mut(&mu) + , locked(true) { + mut->lock(); + } + + unique_lock(unique_lock&& o) noexcept + : mut(o.mut) + , locked(o.locked) { + o.locked = false; + o.mut = nullptr; + } + + // Assume mu is held, implicitly acquire *this and associate it with mu. + unique_lock(M& mu, adopt_lock_t) REQUIRES(mu) + : mut(&mu) + , locked(true) {} + + // Assume mu is not held, implicitly acquire *this and associate it with mu. + unique_lock(M& mu, defer_lock_t) EXCLUDES(mu) + : mut(mu) + , locked(false) {} + + // Release *this and all associated mutexes, if they are still held. + // There is no warning if the scope was already unlocked before. + ~unique_lock() RELEASE() { + if (locked) + mut->unlock(); + } + + // Acquire all associated mutexes exclusively. + void lock() ACQUIRE() { + mut->lock(); + locked = true; + } + + // Try to acquire all associated mutexes exclusively. + bool try_lock() TRY_ACQUIRE(true) { return locked = mut->try_lock(); } + + // Release all associated mutexes. Warn on double unlock. + void unlock() RELEASE() { + mut->unlock(); + locked = false; + } + + mutex_type* release() noexcept { + mutex_type* res = mut; + mut = nullptr; + locked = false; + return res; + } + + mutex_type* mutex() const noexcept { return mut; } + + bool owns_lock() const noexcept { return locked; } + + explicit operator bool() const noexcept { return locked; } +}; + +} // namespace fc + +#endif // THREAD_SAFETY_ANALYSIS_MUTEX_HPP diff --git a/plugins/net_plugin/CMakeLists.txt b/plugins/net_plugin/CMakeLists.txt index d204117ff7..8f40649ba6 100644 --- a/plugins/net_plugin/CMakeLists.txt +++ b/plugins/net_plugin/CMakeLists.txt @@ -3,6 +3,10 @@ add_library( net_plugin net_plugin.cpp ${HEADERS} ) +if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") + target_compile_options(net_plugin PUBLIC -Wthread-safety) +endif() + target_link_libraries( net_plugin chain_plugin producer_plugin appbase fc ) target_include_directories( net_plugin PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include ${CMAKE_CURRENT_SOURCE_DIR}/../chain_interface/include "${CMAKE_CURRENT_SOURCE_DIR}/../../libraries/appbase/include") diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 572574cd4f..d8ac931d4e 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -27,9 +28,7 @@ #include #include #include -#include #include -#include // should be defined for c++17, but clang++16 still has not implemented it #ifdef __cpp_lib_hardware_interference_size @@ -161,8 +160,8 @@ namespace eosio { >; alignas(hardware_destructive_interference_size) - mutable std::mutex unlinkable_blk_state_mtx; - unlinkable_block_state_index unlinkable_blk_state; + mutable fc::mutex unlinkable_blk_state_mtx; + unlinkable_block_state_index unlinkable_blk_state GUARDED_BY(unlinkable_blk_state_mtx); // 30 should be plenty large enough as any unlinkable block that will be usable is likely to be usable // almost immediately (blocks came in from multiple peers out of order). 30 allows for one block per // producer round until lib. When queue larger than max, remove by block timestamp farthest in the past. @@ -171,7 +170,7 @@ namespace eosio { public: // returns block id of any block removed because of a full cache std::optional add_unlinkable_block( signed_block_ptr b, const block_id_type& id ) { - std::lock_guard g(unlinkable_blk_state_mtx); + fc::lock_guard g(unlinkable_blk_state_mtx); unlinkable_blk_state.insert( {id, std::move(b)} ); // does not insert if already there if (unlinkable_blk_state.size() > max_unlinkable_cache_size) { auto& index = unlinkable_blk_state.get(); @@ -184,7 +183,7 @@ namespace eosio { } unlinkable_block_state pop_possible_linkable_block(const block_id_type& blkid) { - std::lock_guard g(unlinkable_blk_state_mtx); + fc::lock_guard g(unlinkable_blk_state_mtx); auto& index = unlinkable_blk_state.get(); auto blk_itr = index.find( blkid ); if (blk_itr != index.end()) { @@ -196,7 +195,7 @@ namespace eosio { } void expire_blocks( uint32_t lib_num ) { - std::lock_guard g(unlinkable_blk_state_mtx); + fc::lock_guard g(unlinkable_blk_state_mtx); auto& stale_blk = unlinkable_blk_state.get(); stale_blk.erase( stale_blk.lower_bound( 1 ), stale_blk.upper_bound( lib_num ) ); } @@ -211,14 +210,14 @@ namespace eosio { }; alignas(hardware_destructive_interference_size) - std::mutex sync_mtx; - uint32_t sync_known_lib_num{0}; // highest known lib num from currently connected peers - uint32_t sync_last_requested_num{0}; // end block number of the last requested range, inclusive - uint32_t sync_next_expected_num{0}; // the next block number we need from peer + fc::mutex sync_mtx; + uint32_t sync_known_lib_num GUARDED_BY(sync_mtx) {0}; // highest known lib num from currently connected peers + uint32_t sync_last_requested_num GUARDED_BY(sync_mtx) {0}; // end block number of the last requested range, inclusive + uint32_t sync_next_expected_num GUARDED_BY(sync_mtx) {0}; // the next block number we need from peer connection_ptr sync_source; // connection we are currently syncing from - const uint32_t sync_req_span{0}; - const uint32_t sync_peer_limit{0}; + const uint32_t sync_req_span GUARDED_BY(sync_mtx) {0}; + const uint32_t sync_peer_limit GUARDED_BY(sync_mtx) {0}; alignas(hardware_destructive_interference_size) std::atomic sync_state{in_sync}; @@ -228,7 +227,7 @@ namespace eosio { constexpr static auto stage_str( stages s ); bool set_state( stages newstate ); bool is_sync_required( uint32_t fork_head_block_num ); - void request_next_chunk( std::unique_lock g_sync, const connection_ptr& conn = connection_ptr() ); + void request_next_chunk( fc::unique_lock&& g_sync, const connection_ptr& conn = connection_ptr() ) REQUIRES(sync_mtx); connection_ptr find_next_sync_node(); void start_sync( const connection_ptr& c, uint32_t target ); bool verify_catchup( const connection_ptr& c, uint32_t num, const block_id_type& id ); @@ -1841,7 +1840,7 @@ namespace eosio { // called from c's connection strand void sync_manager::sync_reset_lib_num(const connection_ptr& c, bool closing) { - std::unique_lock g( sync_mtx ); + fc::unique_lock g( sync_mtx ); if( sync_state == in_sync ) { sync_source.reset(); } @@ -1873,11 +1872,11 @@ namespace eosio { } } - connection_ptr sync_manager::find_next_sync_node() { + connection_ptr sync_manager::find_next_sync_node() REQUIRES(sync_mtx) { fc_dlog(logger, "Number connections ${s}, sync_next_expected_num: ${e}, sync_known_lib_num: ${l}", ("s", my_impl->connections.number_connections())("e", sync_next_expected_num)("l", sync_known_lib_num)); deque conns; - my_impl->connections.for_each_block_connection([&](const auto& c) { + my_impl->connections.for_each_block_connection([&](const auto& c) REQUIRES(sync_mtx) { if (c->should_sync_from(sync_next_expected_num, sync_known_lib_num)) { conns.push_back(c); } @@ -1921,7 +1920,7 @@ namespace eosio { } // call with g_sync locked, called from conn's connection strand - void sync_manager::request_next_chunk( std::unique_lock g_sync, const connection_ptr& conn ) { + void sync_manager::request_next_chunk( fc::unique_lock&& g_sync, const connection_ptr& conn ) { auto chain_info = my_impl->get_chain_info(); fc_dlog( logger, "sync_last_requested_num: ${r}, sync_next_expected_num: ${e}, sync_known_lib_num: ${k}, sync_req_span: ${s}, head: ${h}", @@ -1992,7 +1991,7 @@ namespace eosio { } ); } - bool sync_manager::is_sync_required( uint32_t fork_head_block_num ) { + bool sync_manager::is_sync_required( uint32_t fork_head_block_num ) REQUIRES(sync_mtx) { fc_dlog( logger, "last req = ${req}, last recv = ${recv} known = ${known} our head = ${head}", ("req", sync_last_requested_num)( "recv", sync_next_expected_num )( "known", sync_known_lib_num ) ("head", fork_head_block_num ) ); @@ -2003,7 +2002,7 @@ namespace eosio { // called from c's connection strand void sync_manager::start_sync(const connection_ptr& c, uint32_t target) { - std::unique_lock g_sync( sync_mtx ); + fc::unique_lock g_sync( sync_mtx ); if( target > sync_known_lib_num) { sync_known_lib_num = target; } @@ -2026,7 +2025,7 @@ namespace eosio { // called from connection strand void sync_manager::sync_reassign_fetch(const connection_ptr& c, go_away_reason reason) { - std::unique_lock g( sync_mtx ); + fc::unique_lock g( sync_mtx ); peer_ilog( c, "reassign_fetch, our last req is ${cc}, next expected is ${ne}", ("cc", sync_last_requested_num)("ne", sync_next_expected_num) ); @@ -2169,7 +2168,7 @@ namespace eosio { } if( req.req_blocks.mode == catch_up ) { { - std::lock_guard g( sync_mtx ); + fc::lock_guard g( sync_mtx ); peer_ilog( c, "catch_up while in ${s}, fork head num = ${fhn} " "target LIB = ${lib} next_expected = ${ne}, id ${id}...", ("s", stage_str( sync_state ))("fhn", num)("lib", sync_known_lib_num) @@ -2234,7 +2233,7 @@ namespace eosio { // called from connection strand void sync_manager::rejected_block( const connection_ptr& c, uint32_t blk_num ) { c->block_status_monitor_.rejected(); - std::unique_lock g( sync_mtx ); + fc::unique_lock g( sync_mtx ); sync_last_requested_num = 0; if( c->block_status_monitor_.max_events_violated()) { peer_wlog( c, "block ${bn} not accepted, closing connection", ("bn", blk_num) ); @@ -2260,7 +2259,7 @@ namespace eosio { stages state = sync_state; peer_dlog( c, "state ${s}", ("s", stage_str( state )) ); if( state == head_catchup ) { - std::unique_lock g_sync( sync_mtx ); + fc::unique_lock g_sync( sync_mtx ); peer_dlog( c, "sync_manager in head_catchup state" ); sync_source.reset(); g_sync.unlock(); @@ -2294,7 +2293,7 @@ namespace eosio { send_handshakes(); } } else if( state == lib_catchup ) { - std::unique_lock g_sync( sync_mtx ); + fc::unique_lock g_sync( sync_mtx ); if( blk_applied && blk_num >= sync_known_lib_num ) { peer_dlog( c, "All caught up with last known last irreversible block resending handshake" ); set_state( in_sync ); From bde575eccc238c89091efbcfed39cbc921a966db Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 9 Jun 2023 16:06:48 -0500 Subject: [PATCH 019/191] install missing CDT dependencies. --- .github/workflows/build.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 1d68245783..278a0a8e09 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -292,6 +292,7 @@ jobs: cd cdt mkdir build cd build + apt-get install -y pkg-config libcurl4-gnutls-dev cmake -DCMAKE_BUILD_TYPE=Release .. make -j $(nproc) make install From d024d9018c409667338e9b4ddf9a38665fd936a6 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 9 Jun 2023 16:07:20 -0500 Subject: [PATCH 020/191] Upgrade to beefy machine for these double builds. --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 278a0a8e09..a5a14432d8 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -312,7 +312,7 @@ jobs: fail-fast: false matrix: platform: [ubuntu20, ubuntu22] - runs-on: ["self-hosted", "enf-x86-lowtier"] # not sure if this warrants a better machine, but start here + runs-on: ["self-hosted", "enf-x86-beefy"] container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} steps: - name: Download leap builddir From 00640d79fd42ddd2e4a8d0c28584f306b93a7fba Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 9 Jun 2023 16:09:57 -0500 Subject: [PATCH 021/191] install missing CDT dependencies. --- .github/workflows/build.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index a5a14432d8..fde2163486 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -345,6 +345,7 @@ jobs: cd cdt mkdir build cd build + apt-get install -y pkg-config libcurl4-gnutls-dev cmake -DCMAKE_BUILD_TYPE=Release -Dleap_DIR="../../leap/build/lib/cmake/leap" .. make -j $(nproc) cd ../.. From e32ee2413e3d08a8250d17c094337f9c52840c20 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 9 Jun 2023 16:15:32 -0500 Subject: [PATCH 022/191] This job no longer depends on Build job. --- .github/workflows/build.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index fde2163486..07db415ebe 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -248,8 +248,8 @@ jobs: libtester-make-dev-install-test: name: libtester make dev-install test - needs: [d, Build] - if: always() && needs.Build.result == 'success' + needs: [d] + if: always() && needs.d.result == 'success' strategy: fail-fast: false matrix: From f21cd8596014f7d1cb2902dd22200bb7645e1d2d Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 9 Jun 2023 16:25:19 -0500 Subject: [PATCH 023/191] update archive listings before trying to install. --- .github/workflows/build.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 07db415ebe..7e31e2bb0f 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -292,7 +292,7 @@ jobs: cd cdt mkdir build cd build - apt-get install -y pkg-config libcurl4-gnutls-dev + apt-get update && apt-get install -y pkg-config libcurl4-gnutls-dev cmake -DCMAKE_BUILD_TYPE=Release .. make -j $(nproc) make install @@ -345,7 +345,7 @@ jobs: cd cdt mkdir build cd build - apt-get install -y pkg-config libcurl4-gnutls-dev + apt-get update && apt-get install -y pkg-config libcurl4-gnutls-dev cmake -DCMAKE_BUILD_TYPE=Release -Dleap_DIR="../../leap/build/lib/cmake/leap" .. make -j $(nproc) cd ../.. From 7a8be73f510883558d915144015bee94f1a9ea30 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 9 Jun 2023 17:31:54 -0400 Subject: [PATCH 024/191] Fix one warning --- libraries/libfc/include/fc/mutex.hpp | 4 ++-- plugins/net_plugin/net_plugin.cpp | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/libraries/libfc/include/fc/mutex.hpp b/libraries/libfc/include/fc/mutex.hpp index 609b4d6f34..a331d2e985 100644 --- a/libraries/libfc/include/fc/mutex.hpp +++ b/libraries/libfc/include/fc/mutex.hpp @@ -167,7 +167,7 @@ class SCOPED_CAPABILITY unique_lock { mut->lock(); } - unique_lock(unique_lock&& o) noexcept + unique_lock(unique_lock&& o) noexcept ACQUIRE(o) : mut(o.mut) , locked(o.locked) { o.locked = false; @@ -206,7 +206,7 @@ class SCOPED_CAPABILITY unique_lock { locked = false; } - mutex_type* release() noexcept { + mutex_type* release() noexcept RETURN_CAPABILITY(this) { mutex_type* res = mut; mut = nullptr; locked = false; diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index d8ac931d4e..f1ecf3c3b9 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1876,7 +1876,9 @@ namespace eosio { fc_dlog(logger, "Number connections ${s}, sync_next_expected_num: ${e}, sync_known_lib_num: ${l}", ("s", my_impl->connections.number_connections())("e", sync_next_expected_num)("l", sync_known_lib_num)); deque conns; - my_impl->connections.for_each_block_connection([&](const auto& c) REQUIRES(sync_mtx) { + my_impl->connections.for_each_block_connection([sync_next_expected_num = sync_next_expected_num, + sync_known_lib_num = sync_known_lib_num, + &conns](const auto& c) { if (c->should_sync_from(sync_next_expected_num, sync_known_lib_num)) { conns.push_back(c); } From 1508378ec1073bebe41db0935b1929557d7a5ba0 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 9 Jun 2023 22:20:42 -0400 Subject: [PATCH 025/191] Fix thread safety warnings for `sync_mtx` --- libraries/libfc/include/fc/mutex.hpp | 4 ++- plugins/net_plugin/net_plugin.cpp | 49 ++++++++++++++-------------- 2 files changed, 28 insertions(+), 25 deletions(-) diff --git a/libraries/libfc/include/fc/mutex.hpp b/libraries/libfc/include/fc/mutex.hpp index a331d2e985..9184783199 100644 --- a/libraries/libfc/include/fc/mutex.hpp +++ b/libraries/libfc/include/fc/mutex.hpp @@ -167,13 +167,15 @@ class SCOPED_CAPABILITY unique_lock { mut->lock(); } +#if 0 unique_lock(unique_lock&& o) noexcept ACQUIRE(o) : mut(o.mut) , locked(o.locked) { o.locked = false; o.mut = nullptr; } - +#endif + // Assume mu is held, implicitly acquire *this and associate it with mu. unique_lock(M& mu, adopt_lock_t) REQUIRES(mu) : mut(&mu) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index f1ecf3c3b9..6d9dd0d1b4 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -227,7 +227,7 @@ namespace eosio { constexpr static auto stage_str( stages s ); bool set_state( stages newstate ); bool is_sync_required( uint32_t fork_head_block_num ); - void request_next_chunk( fc::unique_lock&& g_sync, const connection_ptr& conn = connection_ptr() ) REQUIRES(sync_mtx); + void request_next_chunk( fc::mutex *m, const connection_ptr& conn = connection_ptr() ) RELEASE(sync_mtx); connection_ptr find_next_sync_node(); void start_sync( const connection_ptr& c, uint32_t target ); bool verify_catchup( const connection_ptr& c, uint32_t num, const block_id_type& id ); @@ -1867,7 +1867,7 @@ namespace eosio { // if starting to sync need to always start from lib as we might be on our own fork uint32_t lib_num = my_impl->get_chain_lib_num(); sync_next_expected_num = std::max( lib_num + 1, sync_next_expected_num ); - request_next_chunk( std::move(g) ); + request_next_chunk( g.release() ); } } } @@ -1922,7 +1922,7 @@ namespace eosio { } // call with g_sync locked, called from conn's connection strand - void sync_manager::request_next_chunk( fc::unique_lock&& g_sync, const connection_ptr& conn ) { + void sync_manager::request_next_chunk( fc::mutex *, const connection_ptr& conn ) RELEASE(sync_mtx) { auto chain_info = my_impl->get_chain_info(); fc_dlog( logger, "sync_last_requested_num: ${r}, sync_next_expected_num: ${e}, sync_known_lib_num: ${k}, sync_req_span: ${s}, head: ${h}", @@ -1932,6 +1932,7 @@ namespace eosio { fc_wlog( logger, "ignoring request, head is ${h} last req = ${r}, sync_next_expected_num: ${e}, sync_known_lib_num: ${k}, sync_req_span: ${s}, source connection ${c}", ("h", chain_info.head_num)("r", sync_last_requested_num)("e", sync_next_expected_num) ("k", sync_known_lib_num)("s", sync_req_span)("c", sync_source->connection_id) ); + sync_mtx.unlock(); return; } @@ -1957,31 +1958,31 @@ namespace eosio { sync_known_lib_num = chain_info.lib_num; sync_last_requested_num = 0; set_state( in_sync ); // probably not, but we can't do anything else - return; - } - - bool request_sent = false; - if( sync_last_requested_num != sync_known_lib_num ) { + sync_mtx.unlock(); + } else { + bool send_request = false; uint32_t start = sync_next_expected_num; uint32_t end = start + sync_req_span - 1; - if( end > sync_known_lib_num ) - end = sync_known_lib_num; - if( end > 0 && end >= start ) { - sync_last_requested_num = end; - sync_source = new_sync_source; - g_sync.unlock(); - request_sent = true; + if( sync_last_requested_num != sync_known_lib_num ) { + if( end > sync_known_lib_num ) + end = sync_known_lib_num; + if( end > 0 && end >= start ) { + sync_last_requested_num = end; + sync_source = new_sync_source; + send_request = true; + } + } + sync_mtx.unlock(); + if (send_request) { new_sync_source->strand.post( [new_sync_source, start, end, head_num=chain_info.head_num]() { peer_ilog( new_sync_source, "requesting range ${s} to ${e}, head ${h}", ("s", start)("e", end)("h", head_num) ); new_sync_source->request_sync_blocks( start, end ); - } ); + } ); + } else { + fc_wlog(logger, "Unable to request range, sending handshakes to everyone"); + send_handshakes(); } } - if( !request_sent ) { - g_sync.unlock(); - fc_wlog(logger, "Unable to request range, sending handshakes to everyone"); - send_handshakes(); - } } // static, thread safe @@ -2022,7 +2023,7 @@ namespace eosio { } sync_next_expected_num = std::max( chain_info.lib_num + 1, sync_next_expected_num ); - request_next_chunk( std::move( g_sync ), c ); + request_next_chunk( g_sync.release(), c ); } // called from connection strand @@ -2034,7 +2035,7 @@ namespace eosio { if( c == sync_source ) { c->cancel_sync(reason); sync_last_requested_num = 0; - request_next_chunk( std::move(g) ); + request_next_chunk( g.release() ); } } @@ -2319,7 +2320,7 @@ namespace eosio { if (sync_next_expected_num > sync_last_requested_num && sync_last_requested_num < sync_known_lib_num) { fc_dlog(logger, "Requesting range ahead, head: ${h} blk_num: ${bn} sync_next_expected_num ${nen} sync_last_requested_num: ${lrn}", ("h", head)("bn", blk_num)("nen", sync_next_expected_num)("lrn", sync_last_requested_num)); - request_next_chunk(std::move(g_sync)); + request_next_chunk(g_sync.release()); } } From 337e96ca84420befbb29017f2d72ebf4a854294a Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Sat, 10 Jun 2023 15:18:51 -0400 Subject: [PATCH 026/191] Add thread safety directives for two other mutexes. --- plugins/net_plugin/net_plugin.cpp | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 6d9dd0d1b4..cabeb30b72 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -247,12 +247,12 @@ namespace eosio { class dispatch_manager { alignas(hardware_destructive_interference_size) - mutable std::mutex blk_state_mtx; - peer_block_state_index blk_state; + mutable fc::mutex blk_state_mtx; + peer_block_state_index blk_state GUARDED_BY(blk_state_mtx); alignas(hardware_destructive_interference_size) - mutable std::mutex local_txns_mtx; - node_transaction_index local_txns; + mutable fc::mutex local_txns_mtx; + node_transaction_index local_txns GUARDED_BY(local_txns_mtx); unlinkable_block_state_cache unlinkable_block_cache; @@ -2333,7 +2333,7 @@ namespace eosio { bool dispatch_manager::add_peer_block( const block_id_type& blkid, uint32_t connection_id) { uint32_t block_num = block_header::num_from_id(blkid); - std::lock_guard g( blk_state_mtx ); + fc::lock_guard g( blk_state_mtx ); auto bptr = blk_state.get().find( std::make_tuple(block_num, std::ref(blkid), connection_id) ); bool added = (bptr == blk_state.end()); if( added ) { @@ -2344,14 +2344,14 @@ namespace eosio { bool dispatch_manager::peer_has_block( const block_id_type& blkid, uint32_t connection_id ) const { uint32_t block_num = block_header::num_from_id(blkid); - std::lock_guard g(blk_state_mtx); + fc::lock_guard g(blk_state_mtx); const auto blk_itr = blk_state.get().find( std::make_tuple(block_num, std::ref(blkid), connection_id) ); return blk_itr != blk_state.end(); } bool dispatch_manager::have_block( const block_id_type& blkid ) const { uint32_t block_num = block_header::num_from_id(blkid); - std::lock_guard g(blk_state_mtx); + fc::lock_guard g(blk_state_mtx); const auto& index = blk_state.get(); auto blk_itr = index.find( std::make_tuple(block_num, std::ref(blkid)) ); return blk_itr != index.end(); @@ -2360,7 +2360,7 @@ namespace eosio { void dispatch_manager::rm_block( const block_id_type& blkid ) { uint32_t block_num = block_header::num_from_id(blkid); fc_dlog( logger, "rm_block ${n}, id: ${id}", ("n", block_num)("id", blkid)); - std::lock_guard g(blk_state_mtx); + fc::lock_guard g(blk_state_mtx); auto& index = blk_state.get(); auto p = index.equal_range( std::make_tuple(block_num, std::ref(blkid)) ); index.erase(p.first, p.second); @@ -2368,7 +2368,7 @@ namespace eosio { bool dispatch_manager::add_peer_txn( const transaction_id_type& id, const time_point_sec& trx_expires, uint32_t connection_id, const time_point_sec& now ) { - std::lock_guard g( local_txns_mtx ); + fc::lock_guard g( local_txns_mtx ); auto tptr = local_txns.get().find( std::make_tuple( std::ref( id ), connection_id ) ); bool added = (tptr == local_txns.end()); if( added ) { @@ -2384,7 +2384,7 @@ namespace eosio { } bool dispatch_manager::have_txn( const transaction_id_type& tid ) const { - std::lock_guard g( local_txns_mtx ); + fc::lock_guard g( local_txns_mtx ); const auto tptr = local_txns.get().find( tid ); return tptr != local_txns.end(); } @@ -2393,7 +2393,7 @@ namespace eosio { size_t start_size = 0, end_size = 0; fc::time_point_sec now{time_point::now()}; - std::unique_lock g( local_txns_mtx ); + fc::unique_lock g( local_txns_mtx ); start_size = local_txns.size(); auto& old = local_txns.get(); auto ex_lo = old.lower_bound( fc::time_point_sec( 0 ) ); @@ -2407,7 +2407,7 @@ namespace eosio { void dispatch_manager::expire_blocks( uint32_t lib_num ) { unlinkable_block_cache.expire_blocks( lib_num ); - std::lock_guard g( blk_state_mtx ); + fc::lock_guard g( blk_state_mtx ); auto& stale_blk = blk_state.get(); stale_blk.erase( stale_blk.lower_bound( 1 ), stale_blk.upper_bound( lib_num ) ); } From 5ec8c2a775a0ee82839ca94861a31f514957133d Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Sat, 10 Jun 2023 15:27:54 -0400 Subject: [PATCH 027/191] Add thread safety directives to some other mutexes. --- plugins/net_plugin/net_plugin.cpp | 66 +++++++++++++++---------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index cabeb30b72..5232240f68 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -322,8 +322,8 @@ namespace eosio { chain::flat_set supplied_peers; alignas(hardware_destructive_interference_size) - std::mutex connector_check_timer_mtx; - unique_ptr connector_check_timer; + fc::mutex connector_check_timer_mtx; + unique_ptr connector_check_timer GUARDED_BY(connector_check_timer_mtx); /// thread safe, only modified on startup std::chrono::milliseconds heartbeat_timeout{def_keepalive_interval*2}; @@ -427,12 +427,12 @@ namespace eosio { /** @} */ alignas(hardware_destructive_interference_size) - std::mutex expire_timer_mtx; - unique_ptr expire_timer; + fc::mutex expire_timer_mtx; + unique_ptr expire_timer GUARDED_BY(expire_timer_mtx); alignas(hardware_destructive_interference_size) - std::mutex keepalive_timer_mtx; - unique_ptr keepalive_timer; + fc::mutex keepalive_timer_mtx; + unique_ptr keepalive_timer GUARDED_BY(keepalive_timer_mtx); alignas(hardware_destructive_interference_size) std::atomic in_shutdown{false}; @@ -459,8 +459,8 @@ namespace eosio { private: alignas(hardware_destructive_interference_size) - mutable std::mutex chain_info_mtx; // protects chain_info_t - chain_info_t chain_info; + mutable fc::mutex chain_info_mtx; // protects chain_info_t + chain_info_t chain_info GUARDED_BY(chain_info_mtx); public: void update_chain_info(); @@ -611,31 +611,31 @@ namespace eosio { class queued_buffer : boost::noncopyable { public: void clear_write_queue() { - std::lock_guard g( _mtx ); + std::lock_guard g( _mtx ); _write_queue.clear(); _sync_write_queue.clear(); _write_queue_size = 0; } void clear_out_queue() { - std::lock_guard g( _mtx ); + std::lock_guard g( _mtx ); while ( !_out_queue.empty() ) { _out_queue.pop_front(); } } uint32_t write_queue_size() const { - std::lock_guard g( _mtx ); + std::lock_guard g( _mtx ); return _write_queue_size; } bool is_out_queue_empty() const { - std::lock_guard g( _mtx ); + std::lock_guard g( _mtx ); return _out_queue.empty(); } bool ready_to_send() const { - std::lock_guard g( _mtx ); + std::lock_guard g( _mtx ); // if out_queue is not empty then async_write is in progress return ((!_sync_write_queue.empty() || !_write_queue.empty()) && _out_queue.empty()); } @@ -644,7 +644,7 @@ namespace eosio { bool add_write_queue( const std::shared_ptr>& buff, std::function callback, bool to_sync_queue ) { - std::lock_guard g( _mtx ); + std::lock_guard g( _mtx ); if( to_sync_queue ) { _sync_write_queue.push_back( {buff, std::move(callback)} ); } else { @@ -658,7 +658,7 @@ namespace eosio { } void fill_out_buffer( std::vector& bufs ) { - std::lock_guard g( _mtx ); + std::lock_guard g( _mtx ); if( !_sync_write_queue.empty() ) { // always send msgs from sync_write_queue first fill_out_buffer( bufs, _sync_write_queue ); } else { // postpone real_time write_queue if sync queue is not empty @@ -668,7 +668,7 @@ namespace eosio { } void out_callback( boost::system::error_code ec, std::size_t w ) { - std::lock_guard g( _mtx ); + std::lock_guard g( _mtx ); for( auto& m : _out_queue ) { m.callback( ec, w ); } @@ -838,8 +838,8 @@ namespace eosio { block_status_monitor block_status_monitor_; alignas(hardware_destructive_interference_size) - std::mutex response_expected_timer_mtx; - boost::asio::steady_timer response_expected_timer; + fc::mutex response_expected_timer_mtx; + boost::asio::steady_timer response_expected_timer GUARDED_BY(response_expected_timer_mtx); alignas(hardware_destructive_interference_size) std::atomic no_retry{no_reason}; @@ -1729,14 +1729,14 @@ namespace eosio { // thread safe void connection::cancel_wait() { - std::lock_guard g( response_expected_timer_mtx ); + fc::lock_guard g( response_expected_timer_mtx ); response_expected_timer.cancel(); } // thread safe void connection::sync_wait() { connection_ptr c(shared_from_this()); - std::lock_guard g( response_expected_timer_mtx ); + fc::lock_guard g( response_expected_timer_mtx ); response_expected_timer.expires_from_now( my_impl->resp_expected_period ); response_expected_timer.async_wait( boost::asio::bind_executor( c->strand, [c]( boost::system::error_code ec ) { @@ -1747,7 +1747,7 @@ namespace eosio { // thread safe void connection::fetch_wait() { connection_ptr c( shared_from_this() ); - std::lock_guard g( response_expected_timer_mtx ); + fc::lock_guard g( response_expected_timer_mtx ); response_expected_timer.expires_from_now( my_impl->resp_expected_period ); response_expected_timer.async_wait( boost::asio::bind_executor( c->strand, [c]( boost::system::error_code ec ) { @@ -2983,12 +2983,12 @@ namespace eosio { connections.stop_conn_timer(); { - std::lock_guard g( expire_timer_mtx ); + fc::lock_guard g( expire_timer_mtx ); if( expire_timer ) expire_timer->cancel(); } { - std::lock_guard g( keepalive_timer_mtx ); + fc::lock_guard g( keepalive_timer_mtx ); if( keepalive_timer ) keepalive_timer->cancel(); } @@ -3002,7 +3002,7 @@ namespace eosio { controller& cc = chain_plug->chain(); uint32_t lib_num = 0, head_num = 0; { - std::lock_guard g( chain_info_mtx ); + fc::lock_guard g( chain_info_mtx ); chain_info.lib_num = lib_num = cc.last_irreversible_block_num(); chain_info.lib_id = cc.last_irreversible_block_id(); chain_info.head_num = head_num = cc.fork_db_head_block_num(); @@ -3012,17 +3012,17 @@ namespace eosio { } net_plugin_impl::chain_info_t net_plugin_impl::get_chain_info() const { - std::lock_guard g( chain_info_mtx ); + fc::lock_guard g( chain_info_mtx ); return chain_info; } uint32_t net_plugin_impl::get_chain_lib_num() const { - std::lock_guard g( chain_info_mtx ); + fc::lock_guard g( chain_info_mtx ); return chain_info.lib_num; } uint32_t net_plugin_impl::get_chain_head_num() const { - std::lock_guard g( chain_info_mtx ); + fc::lock_guard g( chain_info_mtx ); return chain_info.head_num; } @@ -3627,7 +3627,7 @@ namespace eosio { // thread safe void net_plugin_impl::start_expire_timer() { if( in_shutdown ) return; - std::lock_guard g( expire_timer_mtx ); + fc::lock_guard g( expire_timer_mtx ); expire_timer->expires_from_now( txn_exp_period); expire_timer->async_wait( [my = shared_from_this()]( boost::system::error_code ec ) { if( !ec ) { @@ -3643,7 +3643,7 @@ namespace eosio { // thread safe void net_plugin_impl::ticker() { if( in_shutdown ) return; - std::lock_guard g( keepalive_timer_mtx ); + fc::lock_guard g( keepalive_timer_mtx ); keepalive_timer->expires_from_now(keepalive_interval); keepalive_timer->async_wait( [my = shared_from_this()]( boost::system::error_code ec ) { my->ticker(); @@ -3665,7 +3665,7 @@ namespace eosio { void net_plugin_impl::start_monitors() { { - std::lock_guard g( expire_timer_mtx ); + fc::lock_guard g( expire_timer_mtx ); expire_timer = std::make_unique( my_impl->thread_pool.get_executor() ); } connections.start_conn_timer(); @@ -4058,7 +4058,7 @@ namespace eosio { } { - std::lock_guard g( my->keepalive_timer_mtx ); + fc::lock_guard g( my->keepalive_timer_mtx ); my->keepalive_timer = std::make_unique( my->thread_pool.get_executor() ); } @@ -4278,7 +4278,7 @@ namespace eosio { // called from any thread void connections_manager::start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection) { - std::lock_guard g( connector_check_timer_mtx ); + fc::lock_guard g( connector_check_timer_mtx ); if (!connector_check_timer) { connector_check_timer = std::make_unique( my_impl->thread_pool.get_executor() ); } @@ -4291,7 +4291,7 @@ namespace eosio { } void connections_manager::stop_conn_timer() { - std::lock_guard g( connector_check_timer_mtx ); + fc::lock_guard g( connector_check_timer_mtx ); if (connector_check_timer) { connector_check_timer->cancel(); } From 25bb1554cf92eb85b035c05bd2668e11aaca7608 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Sat, 10 Jun 2023 15:31:47 -0400 Subject: [PATCH 028/191] Add thread safety directives for queued_buffer class. --- plugins/net_plugin/net_plugin.cpp | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 5232240f68..48c6934df2 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -611,31 +611,31 @@ namespace eosio { class queued_buffer : boost::noncopyable { public: void clear_write_queue() { - std::lock_guard g( _mtx ); + fc::lock_guard g( _mtx ); _write_queue.clear(); _sync_write_queue.clear(); _write_queue_size = 0; } void clear_out_queue() { - std::lock_guard g( _mtx ); + fc::lock_guard g( _mtx ); while ( !_out_queue.empty() ) { _out_queue.pop_front(); } } uint32_t write_queue_size() const { - std::lock_guard g( _mtx ); + fc::lock_guard g( _mtx ); return _write_queue_size; } bool is_out_queue_empty() const { - std::lock_guard g( _mtx ); + fc::lock_guard g( _mtx ); return _out_queue.empty(); } bool ready_to_send() const { - std::lock_guard g( _mtx ); + fc::lock_guard g( _mtx ); // if out_queue is not empty then async_write is in progress return ((!_sync_write_queue.empty() || !_write_queue.empty()) && _out_queue.empty()); } @@ -644,7 +644,7 @@ namespace eosio { bool add_write_queue( const std::shared_ptr>& buff, std::function callback, bool to_sync_queue ) { - std::lock_guard g( _mtx ); + fc::lock_guard g( _mtx ); if( to_sync_queue ) { _sync_write_queue.push_back( {buff, std::move(callback)} ); } else { @@ -658,7 +658,7 @@ namespace eosio { } void fill_out_buffer( std::vector& bufs ) { - std::lock_guard g( _mtx ); + fc::lock_guard g( _mtx ); if( !_sync_write_queue.empty() ) { // always send msgs from sync_write_queue first fill_out_buffer( bufs, _sync_write_queue ); } else { // postpone real_time write_queue if sync queue is not empty @@ -668,7 +668,7 @@ namespace eosio { } void out_callback( boost::system::error_code ec, std::size_t w ) { - std::lock_guard g( _mtx ); + fc::lock_guard g( _mtx ); for( auto& m : _out_queue ) { m.callback( ec, w ); } @@ -677,7 +677,7 @@ namespace eosio { private: struct queued_write; void fill_out_buffer( std::vector& bufs, - deque& w_queue ) { + deque& w_queue ) REQUIRES(_mtx) { while ( !w_queue.empty() ) { auto& m = w_queue.front(); bufs.emplace_back( m.buff->data(), m.buff->size() ); @@ -694,11 +694,11 @@ namespace eosio { }; alignas(hardware_destructive_interference_size) - mutable std::mutex _mtx; - uint32_t _write_queue_size{0}; - deque _write_queue; - deque _sync_write_queue; // sync_write_queue will be sent first - deque _out_queue; + mutable fc::mutex _mtx; + uint32_t _write_queue_size GUARDED_BY(_mtx) {0}; + deque _write_queue GUARDED_BY(_mtx); + deque _sync_write_queue GUARDED_BY(_mtx); // sync_write_queue will be sent first + deque _out_queue GUARDED_BY(_mtx); }; // queued_buffer From 902fdaa07f704510e07049613862a7e33ea77e98 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Sat, 10 Jun 2023 15:35:32 -0400 Subject: [PATCH 029/191] Remove explicit template parameters ``. --- plugins/net_plugin/net_plugin.cpp | 48 +++++++++++++++---------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 48c6934df2..4c6d1db3f0 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1004,7 +1004,7 @@ namespace eosio { bool incoming() const { return peer_address().empty(); } // thread safe becuase of peer_address bool incoming_and_handshake_received() const { if (!incoming()) return false; - std::lock_guard g_conn( conn_mtx ); + std::lock_guard g_conn( conn_mtx ); return !last_handshake_recv.p2p_address.empty(); } }; // class connection @@ -1160,7 +1160,7 @@ namespace eosio { log_remote_endpoint_port = ec ? unknown : std::to_string(rep.port()); local_endpoint_ip = ec2 ? unknown : lep.address().to_string(); local_endpoint_port = ec2 ? unknown : std::to_string(lep.port()); - std::lock_guard g_conn( conn_mtx ); + std::lock_guard g_conn( conn_mtx ); remote_endpoint_ip = log_remote_endpoint_ip; } @@ -1212,7 +1212,7 @@ namespace eosio { stat.connecting = state() == connection_state::connecting; stat.syncing = peer_syncing_from_us; stat.is_bp_peer = is_bp_connection; - std::lock_guard g( conn_mtx ); + std::lock_guard g( conn_mtx ); stat.last_handshake = last_handshake_recv; return stat; } @@ -1297,7 +1297,7 @@ namespace eosio { ++self->consecutive_immediate_connection_close; bool has_last_req = false; { - std::lock_guard g_conn( self->conn_mtx ); + std::lock_guard g_conn( self->conn_mtx ); has_last_req = self->last_req.has_value(); self->last_handshake_recv = handshake_message(); self->last_handshake_sent = handshake_message(); @@ -1335,7 +1335,7 @@ namespace eosio { } if( logger.is_enabled( fc::log_level::debug ) ) { - std::unique_lock g_conn( conn_mtx ); + std::unique_lock g_conn( conn_mtx ); if( last_handshake_recv.generation >= 1 ) { peer_dlog( this, "maybe truncating branch at = ${h}:${id}", ("h", block_header::num_from_id(last_handshake_recv.head_id))("id", last_handshake_recv.head_id) ); @@ -1414,7 +1414,7 @@ namespace eosio { if (closed()) return; strand.post( [c = shared_from_this()]() { - std::unique_lock g_conn( c->conn_mtx ); + std::unique_lock g_conn( c->conn_mtx ); if( c->populate_handshake( c->last_handshake_sent ) ) { static_assert( std::is_same_vsent_handshake_count ), int16_t>, "INT16_MAX based on int16_t" ); if( c->sent_handshake_count == INT16_MAX ) c->sent_handshake_count = 1; // do not wrap @@ -1854,7 +1854,7 @@ namespace eosio { // Determine current LIB of remaining peers as our sync_known_lib_num. uint32_t highest_lib_num = 0; my_impl->connections.for_each_block_connection( [&highest_lib_num]( const auto& cc ) { - std::lock_guard g_conn( cc->conn_mtx ); + std::lock_guard g_conn( cc->conn_mtx ); if( cc->current() && cc->last_handshake_recv.last_irreversible_block_num > highest_lib_num ) { highest_lib_num = cc->last_handshake_recv.last_irreversible_block_num; } @@ -2159,7 +2159,7 @@ namespace eosio { request_message req; req.req_blocks.mode = catch_up; auto is_fork_head_greater = [num, &id, &req]( const auto& cc ) { - std::lock_guard g_conn( cc->conn_mtx ); + std::lock_guard g_conn( cc->conn_mtx ); if( cc->fork_head_num > num || cc->fork_head == id ) { req.req_blocks.mode = none; return true; @@ -2182,7 +2182,7 @@ namespace eosio { return false; set_state( head_catchup ); { - std::lock_guard g_conn( c->conn_mtx ); + std::lock_guard g_conn( c->conn_mtx ); c->fork_head = id; c->fork_head_num = num; } @@ -2191,7 +2191,7 @@ namespace eosio { } else { peer_ilog( c, "none notice while in ${s}, fork head num = ${fhn}, id ${id}...", ("s", stage_str( sync_state ))("fhn", num)("id", id.str().substr(8,16)) ); - std::lock_guard g_conn( c->conn_mtx ); + std::lock_guard g_conn( c->conn_mtx ); c->fork_head = block_id_type(); c->fork_head_num = 0; } @@ -2223,7 +2223,7 @@ namespace eosio { } else if (msg.known_blocks.mode == last_irr_catch_up) { { c->peer_lib_num = msg.known_trx.pending; - std::lock_guard g_conn( c->conn_mtx ); + std::lock_guard g_conn( c->conn_mtx ); c->last_handshake_recv.last_irreversible_block_num = msg.known_trx.pending; } sync_reset_lib_num(c, false); @@ -2270,14 +2270,14 @@ namespace eosio { block_id_type null_id; bool set_state_to_head_catchup = false; my_impl->connections.for_each_block_connection( [&null_id, blk_num, &blk_id, &c, &set_state_to_head_catchup]( const auto& cp ) { - std::unique_lock g_cp_conn( cp->conn_mtx ); + std::unique_lock g_cp_conn( cp->conn_mtx ); uint32_t fork_head_num = cp->fork_head_num; block_id_type fork_head_id = cp->fork_head; g_cp_conn.unlock(); if( fork_head_id == null_id ) { // continue } else if( fork_head_num < blk_num || fork_head_id == blk_id ) { - std::lock_guard g_conn( c->conn_mtx ); + std::lock_guard g_conn( c->conn_mtx ); c->fork_head = null_id; c->fork_head_num = 0; } else { @@ -2445,7 +2445,7 @@ namespace eosio { // called from c's connection strand void dispatch_manager::recv_block(const connection_ptr& c, const block_id_type& id, uint32_t bnum) { - std::unique_lock g( c->conn_mtx ); + std::unique_lock g( c->conn_mtx ); if (c && c->last_req && c->last_req->req_blocks.mode != none && @@ -2516,7 +2516,7 @@ namespace eosio { request_message last_req; block_id_type bid; { - std::lock_guard g_c_conn( c->conn_mtx ); + std::lock_guard g_c_conn( c->conn_mtx ); if( !c->last_req ) { return; } @@ -2535,7 +2535,7 @@ namespace eosio { return false; { - std::lock_guard guard( conn->conn_mtx ); + std::lock_guard guard( conn->conn_mtx ); if( conn->last_req ) { return false; } @@ -2546,7 +2546,7 @@ namespace eosio { conn->strand.post( [conn, last_req{std::move(last_req)}]() { conn->enqueue( last_req ); conn->fetch_wait(); - std::lock_guard g_conn_conn( conn->conn_mtx ); + std::lock_guard g_conn_conn( conn->conn_mtx ); conn->last_req = last_req; } ); return true; @@ -2589,7 +2589,7 @@ namespace eosio { if( consecutive_immediate_connection_close > def_max_consecutive_immediate_connection_close || no_retry == benign_other ) { fc::microseconds connector_period = my_impl->connections.get_connector_period(); - std::lock_guard g( c->conn_mtx ); + std::lock_guard g( c->conn_mtx ); if( last_close == fc::time_point() || last_close > fc::time_point::now() - connector_period ) { return true; // true so doesn't remove from valid connections } @@ -2669,7 +2669,7 @@ namespace eosio { if (conn->socket_is_open()) { if (conn->peer_address().empty()) { ++visitors; - std::lock_guard g_conn(conn->conn_mtx); + std::lock_guard g_conn(conn->conn_mtx); if (paddr_str == conn->remote_endpoint_ip) { ++from_addr; } @@ -2890,7 +2890,7 @@ namespace eosio { if( !my_impl->sync_master->syncing_from_peer() ) { // guard against peer thinking it needs to send us old blocks uint32_t lib_num = my_impl->get_chain_lib_num(); if( blk_num < lib_num ) { - std::unique_lock g( conn_mtx ); + std::unique_lock g( conn_mtx ); const auto last_sent_lib = last_handshake_sent.last_irreversible_block_num; g.unlock(); peer_ilog( this, "received block ${n} less than ${which}lib ${lib}", @@ -3082,7 +3082,7 @@ namespace eosio { peer_lib_num = msg.last_irreversible_block_num; peer_head_block_num = msg.head_num; - std::unique_lock g_conn( conn_mtx ); + std::unique_lock g_conn( conn_mtx ); last_handshake_recv = msg; g_conn.unlock(); @@ -3123,7 +3123,7 @@ namespace eosio { auto is_duplicate = [&](const auto& check) { if(check.get() == this) return false; - std::unique_lock g_check_conn( check->conn_mtx ); + std::unique_lock g_check_conn( check->conn_mtx ); fc_dlog( logger, "dup check: connected ${c}, ${l} =? ${r}", ("c", check->connected())("l", check->last_handshake_recv.node_id)("r", msg.node_id) ); if(check->connected() && check->last_handshake_recv.node_id == msg.node_id) { @@ -3312,7 +3312,7 @@ namespace eosio { org = 0; rec = 0; - std::unique_lock g_conn( conn_mtx ); + std::unique_lock g_conn( conn_mtx ); if( last_handshake_recv.generation == 0 ) { g_conn.unlock(); send_handshake(); @@ -3343,7 +3343,7 @@ namespace eosio { case none: break; case last_irr_catch_up: { - std::unique_lock g_conn( conn_mtx ); + std::unique_lock g_conn( conn_mtx ); last_handshake_recv.head_num = msg.known_blocks.pending; g_conn.unlock(); break; From 836856204d197a51f6e998ab059d05c016d10d1d Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Sat, 10 Jun 2023 15:45:10 -0400 Subject: [PATCH 030/191] Add thread safety directives for conn_mtx --- plugins/net_plugin/net_plugin.cpp | 64 +++++++++++++++---------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 4c6d1db3f0..65d5584550 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -845,14 +845,14 @@ namespace eosio { std::atomic no_retry{no_reason}; alignas(hardware_destructive_interference_size) - mutable std::mutex conn_mtx; //< mtx for last_req .. remote_endpoint_ip - std::optional last_req; - handshake_message last_handshake_recv; - handshake_message last_handshake_sent; - block_id_type fork_head; - uint32_t fork_head_num{0}; - fc::time_point last_close; - string remote_endpoint_ip; + mutable fc::mutex conn_mtx; //< mtx for last_req .. remote_endpoint_ip + std::optional last_req GUARDED_BY(conn_mtx); + handshake_message last_handshake_recv GUARDED_BY(conn_mtx); + handshake_message last_handshake_sent GUARDED_BY(conn_mtx); + block_id_type fork_head GUARDED_BY(conn_mtx); + uint32_t fork_head_num GUARDED_BY(conn_mtx) {0}; + fc::time_point last_close GUARDED_BY(conn_mtx); + string remote_endpoint_ip GUARDED_BY(conn_mtx); connection_status get_status()const; @@ -1004,7 +1004,7 @@ namespace eosio { bool incoming() const { return peer_address().empty(); } // thread safe becuase of peer_address bool incoming_and_handshake_received() const { if (!incoming()) return false; - std::lock_guard g_conn( conn_mtx ); + fc::lock_guard g_conn( conn_mtx ); return !last_handshake_recv.p2p_address.empty(); } }; // class connection @@ -1160,7 +1160,7 @@ namespace eosio { log_remote_endpoint_port = ec ? unknown : std::to_string(rep.port()); local_endpoint_ip = ec2 ? unknown : lep.address().to_string(); local_endpoint_port = ec2 ? unknown : std::to_string(lep.port()); - std::lock_guard g_conn( conn_mtx ); + fc::lock_guard g_conn( conn_mtx ); remote_endpoint_ip = log_remote_endpoint_ip; } @@ -1212,7 +1212,7 @@ namespace eosio { stat.connecting = state() == connection_state::connecting; stat.syncing = peer_syncing_from_us; stat.is_bp_peer = is_bp_connection; - std::lock_guard g( conn_mtx ); + fc::lock_guard g( conn_mtx ); stat.last_handshake = last_handshake_recv; return stat; } @@ -1297,7 +1297,7 @@ namespace eosio { ++self->consecutive_immediate_connection_close; bool has_last_req = false; { - std::lock_guard g_conn( self->conn_mtx ); + fc::lock_guard g_conn( self->conn_mtx ); has_last_req = self->last_req.has_value(); self->last_handshake_recv = handshake_message(); self->last_handshake_sent = handshake_message(); @@ -1335,7 +1335,7 @@ namespace eosio { } if( logger.is_enabled( fc::log_level::debug ) ) { - std::unique_lock g_conn( conn_mtx ); + fc::unique_lock g_conn( conn_mtx ); if( last_handshake_recv.generation >= 1 ) { peer_dlog( this, "maybe truncating branch at = ${h}:${id}", ("h", block_header::num_from_id(last_handshake_recv.head_id))("id", last_handshake_recv.head_id) ); @@ -1414,7 +1414,7 @@ namespace eosio { if (closed()) return; strand.post( [c = shared_from_this()]() { - std::unique_lock g_conn( c->conn_mtx ); + fc::unique_lock g_conn( c->conn_mtx ); if( c->populate_handshake( c->last_handshake_sent ) ) { static_assert( std::is_same_vsent_handshake_count ), int16_t>, "INT16_MAX based on int16_t" ); if( c->sent_handshake_count == INT16_MAX ) c->sent_handshake_count = 1; // do not wrap @@ -1854,7 +1854,7 @@ namespace eosio { // Determine current LIB of remaining peers as our sync_known_lib_num. uint32_t highest_lib_num = 0; my_impl->connections.for_each_block_connection( [&highest_lib_num]( const auto& cc ) { - std::lock_guard g_conn( cc->conn_mtx ); + fc::lock_guard g_conn( cc->conn_mtx ); if( cc->current() && cc->last_handshake_recv.last_irreversible_block_num > highest_lib_num ) { highest_lib_num = cc->last_handshake_recv.last_irreversible_block_num; } @@ -2159,7 +2159,7 @@ namespace eosio { request_message req; req.req_blocks.mode = catch_up; auto is_fork_head_greater = [num, &id, &req]( const auto& cc ) { - std::lock_guard g_conn( cc->conn_mtx ); + fc::lock_guard g_conn( cc->conn_mtx ); if( cc->fork_head_num > num || cc->fork_head == id ) { req.req_blocks.mode = none; return true; @@ -2182,7 +2182,7 @@ namespace eosio { return false; set_state( head_catchup ); { - std::lock_guard g_conn( c->conn_mtx ); + fc::lock_guard g_conn( c->conn_mtx ); c->fork_head = id; c->fork_head_num = num; } @@ -2191,7 +2191,7 @@ namespace eosio { } else { peer_ilog( c, "none notice while in ${s}, fork head num = ${fhn}, id ${id}...", ("s", stage_str( sync_state ))("fhn", num)("id", id.str().substr(8,16)) ); - std::lock_guard g_conn( c->conn_mtx ); + fc::lock_guard g_conn( c->conn_mtx ); c->fork_head = block_id_type(); c->fork_head_num = 0; } @@ -2223,7 +2223,7 @@ namespace eosio { } else if (msg.known_blocks.mode == last_irr_catch_up) { { c->peer_lib_num = msg.known_trx.pending; - std::lock_guard g_conn( c->conn_mtx ); + fc::lock_guard g_conn( c->conn_mtx ); c->last_handshake_recv.last_irreversible_block_num = msg.known_trx.pending; } sync_reset_lib_num(c, false); @@ -2270,14 +2270,14 @@ namespace eosio { block_id_type null_id; bool set_state_to_head_catchup = false; my_impl->connections.for_each_block_connection( [&null_id, blk_num, &blk_id, &c, &set_state_to_head_catchup]( const auto& cp ) { - std::unique_lock g_cp_conn( cp->conn_mtx ); + fc::unique_lock g_cp_conn( cp->conn_mtx ); uint32_t fork_head_num = cp->fork_head_num; block_id_type fork_head_id = cp->fork_head; g_cp_conn.unlock(); if( fork_head_id == null_id ) { // continue } else if( fork_head_num < blk_num || fork_head_id == blk_id ) { - std::lock_guard g_conn( c->conn_mtx ); + fc::lock_guard g_conn( c->conn_mtx ); c->fork_head = null_id; c->fork_head_num = 0; } else { @@ -2445,7 +2445,7 @@ namespace eosio { // called from c's connection strand void dispatch_manager::recv_block(const connection_ptr& c, const block_id_type& id, uint32_t bnum) { - std::unique_lock g( c->conn_mtx ); + fc::unique_lock g( c->conn_mtx ); if (c && c->last_req && c->last_req->req_blocks.mode != none && @@ -2516,7 +2516,7 @@ namespace eosio { request_message last_req; block_id_type bid; { - std::lock_guard g_c_conn( c->conn_mtx ); + fc::lock_guard g_c_conn( c->conn_mtx ); if( !c->last_req ) { return; } @@ -2535,7 +2535,7 @@ namespace eosio { return false; { - std::lock_guard guard( conn->conn_mtx ); + fc::lock_guard guard( conn->conn_mtx ); if( conn->last_req ) { return false; } @@ -2546,7 +2546,7 @@ namespace eosio { conn->strand.post( [conn, last_req{std::move(last_req)}]() { conn->enqueue( last_req ); conn->fetch_wait(); - std::lock_guard g_conn_conn( conn->conn_mtx ); + fc::lock_guard g_conn_conn( conn->conn_mtx ); conn->last_req = last_req; } ); return true; @@ -2589,7 +2589,7 @@ namespace eosio { if( consecutive_immediate_connection_close > def_max_consecutive_immediate_connection_close || no_retry == benign_other ) { fc::microseconds connector_period = my_impl->connections.get_connector_period(); - std::lock_guard g( c->conn_mtx ); + fc::lock_guard g( conn_mtx ); if( last_close == fc::time_point() || last_close > fc::time_point::now() - connector_period ) { return true; // true so doesn't remove from valid connections } @@ -2669,7 +2669,7 @@ namespace eosio { if (conn->socket_is_open()) { if (conn->peer_address().empty()) { ++visitors; - std::lock_guard g_conn(conn->conn_mtx); + fc::lock_guard g_conn(conn->conn_mtx); if (paddr_str == conn->remote_endpoint_ip) { ++from_addr; } @@ -2890,7 +2890,7 @@ namespace eosio { if( !my_impl->sync_master->syncing_from_peer() ) { // guard against peer thinking it needs to send us old blocks uint32_t lib_num = my_impl->get_chain_lib_num(); if( blk_num < lib_num ) { - std::unique_lock g( conn_mtx ); + fc::unique_lock g( conn_mtx ); const auto last_sent_lib = last_handshake_sent.last_irreversible_block_num; g.unlock(); peer_ilog( this, "received block ${n} less than ${which}lib ${lib}", @@ -3082,7 +3082,7 @@ namespace eosio { peer_lib_num = msg.last_irreversible_block_num; peer_head_block_num = msg.head_num; - std::unique_lock g_conn( conn_mtx ); + fc::unique_lock g_conn( conn_mtx ); last_handshake_recv = msg; g_conn.unlock(); @@ -3123,7 +3123,7 @@ namespace eosio { auto is_duplicate = [&](const auto& check) { if(check.get() == this) return false; - std::unique_lock g_check_conn( check->conn_mtx ); + fc::unique_lock g_check_conn( check->conn_mtx ); fc_dlog( logger, "dup check: connected ${c}, ${l} =? ${r}", ("c", check->connected())("l", check->last_handshake_recv.node_id)("r", msg.node_id) ); if(check->connected() && check->last_handshake_recv.node_id == msg.node_id) { @@ -3312,7 +3312,7 @@ namespace eosio { org = 0; rec = 0; - std::unique_lock g_conn( conn_mtx ); + fc::unique_lock g_conn( conn_mtx ); if( last_handshake_recv.generation == 0 ) { g_conn.unlock(); send_handshake(); @@ -3343,7 +3343,7 @@ namespace eosio { case none: break; case last_irr_catch_up: { - std::unique_lock g_conn( conn_mtx ); + fc::unique_lock g_conn( conn_mtx ); last_handshake_recv.head_num = msg.known_blocks.pending; g_conn.unlock(); break; From 563bbbad1f50414e33b0ce3486adba577dcea29c Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Sat, 10 Jun 2023 15:57:26 -0400 Subject: [PATCH 031/191] whitespace --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 65d5584550..2a8288b64a 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -842,7 +842,7 @@ namespace eosio { boost::asio::steady_timer response_expected_timer GUARDED_BY(response_expected_timer_mtx); alignas(hardware_destructive_interference_size) - std::atomic no_retry{no_reason}; + std::atomic no_retry{no_reason}; alignas(hardware_destructive_interference_size) mutable fc::mutex conn_mtx; //< mtx for last_req .. remote_endpoint_ip From 4635c6c4a472bebdca61003f2bf3f46b59104436 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 12 Jun 2023 07:54:38 -0500 Subject: [PATCH 032/191] Use absolute paths. --- .github/workflows/build.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 7e31e2bb0f..64f8800b89 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -335,6 +335,7 @@ jobs: run: | pwd ls -l + echo github workspace: ${{ github.workspace }} - name: Extract leap build run: | cd leap @@ -346,12 +347,12 @@ jobs: mkdir build cd build apt-get update && apt-get install -y pkg-config libcurl4-gnutls-dev - cmake -DCMAKE_BUILD_TYPE=Release -Dleap_DIR="../../leap/build/lib/cmake/leap" .. + cmake -DCMAKE_BUILD_TYPE=Release -Dleap_DIR=${{ github.workspace }}/leap/build/lib/cmake/leap .. make -j $(nproc) cd ../.. - name: Build & Test reference-contracts run: | - cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dcdt_DIR="./cdt/build/lib/cmake/cdt" -Dleap_DIR="./leap/build/lib/cmake/leap" + cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dcdt_DIR=${{ github.workspace }}/cdt/build/lib/cmake/cdt -Dleap_DIR=${{ github.workspace }}/leap/build/lib/cmake/leap cmake --build build -- -j $(nproc) cd build/tests ctest --output-on-failure -j $(nproc) From 33a4496105844e6b15b8a392f26a8e0eb487458c Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 12 Jun 2023 08:18:51 -0500 Subject: [PATCH 033/191] double check dir structure --- .github/workflows/build.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 64f8800b89..3d4c831f13 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -341,6 +341,7 @@ jobs: cd leap zstdcat build.tar.zst | tar x cd .. + ls -lR ${{ github.workspace }}/leap/build/lib/cmake/leap - name: Build cdt run: | cd cdt @@ -350,6 +351,7 @@ jobs: cmake -DCMAKE_BUILD_TYPE=Release -Dleap_DIR=${{ github.workspace }}/leap/build/lib/cmake/leap .. make -j $(nproc) cd ../.. + ls -lR ${{ github.workspace }}/cdt/build/lib/cmake/cdt - name: Build & Test reference-contracts run: | cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dcdt_DIR=${{ github.workspace }}/cdt/build/lib/cmake/cdt -Dleap_DIR=${{ github.workspace }}/leap/build/lib/cmake/leap From 6fdb21d79b456148b7e289963402af93c9eceda7 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 12 Jun 2023 09:27:30 -0400 Subject: [PATCH 034/191] Add version check for enabling `-Wthread-safety` (version >= 14) --- plugins/net_plugin/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/CMakeLists.txt b/plugins/net_plugin/CMakeLists.txt index 8f40649ba6..ec459d1387 100644 --- a/plugins/net_plugin/CMakeLists.txt +++ b/plugins/net_plugin/CMakeLists.txt @@ -3,7 +3,7 @@ add_library( net_plugin net_plugin.cpp ${HEADERS} ) -if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") +if(CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 14.0) target_compile_options(net_plugin PUBLIC -Wthread-safety) endif() From 60d62888db008c2c33aa2e268cb621c877bd87c1 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 12 Jun 2023 08:37:58 -0500 Subject: [PATCH 035/191] double check dir structure --- .github/workflows/build.yaml | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 3d4c831f13..e9165cdc7d 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -341,7 +341,12 @@ jobs: cd leap zstdcat build.tar.zst | tar x cd .. - ls -lR ${{ github.workspace }}/leap/build/lib/cmake/leap + ls -l ${{ github.workspace }} + ls -l ${{ github.workspace }}/leap + ls -l ${{ github.workspace }}/leap/build + ls -l ${{ github.workspace }}/leap/build/lib + ls -l ${{ github.workspace }}/leap/build/lib/cmake + ls -l ${{ github.workspace }}/leap/build/lib/cmake/leap - name: Build cdt run: | cd cdt @@ -351,7 +356,12 @@ jobs: cmake -DCMAKE_BUILD_TYPE=Release -Dleap_DIR=${{ github.workspace }}/leap/build/lib/cmake/leap .. make -j $(nproc) cd ../.. - ls -lR ${{ github.workspace }}/cdt/build/lib/cmake/cdt + ls -l ${{ github.workspace }} + ls -l ${{ github.workspace }}/cdt + ls -l ${{ github.workspace }}/cdt/build + ls -l ${{ github.workspace }}/cdt/build/lib + ls -l ${{ github.workspace }}/cdt/build/lib/cmake + ls -l ${{ github.workspace }}/cdt/build/lib/cmake/cdt - name: Build & Test reference-contracts run: | cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dcdt_DIR=${{ github.workspace }}/cdt/build/lib/cmake/cdt -Dleap_DIR=${{ github.workspace }}/leap/build/lib/cmake/leap From a8d176796b00f693c5383ccda272967d5919719a Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 12 Jun 2023 08:50:44 -0500 Subject: [PATCH 036/191] double check dir structure --- .github/workflows/build.yaml | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index e9165cdc7d..aa9e8830d8 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -338,30 +338,40 @@ jobs: echo github workspace: ${{ github.workspace }} - name: Extract leap build run: | + pwd + ls -l cd leap + pwd zstdcat build.tar.zst | tar x + ls -l cd .. - ls -l ${{ github.workspace }} - ls -l ${{ github.workspace }}/leap - ls -l ${{ github.workspace }}/leap/build - ls -l ${{ github.workspace }}/leap/build/lib - ls -l ${{ github.workspace }}/leap/build/lib/cmake - ls -l ${{ github.workspace }}/leap/build/lib/cmake/leap + pwd + ls -l + ls -l leap + ls -l leap/build + ls -l leap/build/lib + ls -l leap/build/lib/cmake + ls -l leap/build/lib/cmake/leap - name: Build cdt run: | + pwd + ls -l cd cdt + pwd mkdir build cd build + pwd apt-get update && apt-get install -y pkg-config libcurl4-gnutls-dev cmake -DCMAKE_BUILD_TYPE=Release -Dleap_DIR=${{ github.workspace }}/leap/build/lib/cmake/leap .. make -j $(nproc) cd ../.. - ls -l ${{ github.workspace }} - ls -l ${{ github.workspace }}/cdt - ls -l ${{ github.workspace }}/cdt/build - ls -l ${{ github.workspace }}/cdt/build/lib - ls -l ${{ github.workspace }}/cdt/build/lib/cmake - ls -l ${{ github.workspace }}/cdt/build/lib/cmake/cdt + pwd + ls -l + ls -l cdt + ls -l cdt/build + ls -l cdt/build/lib + ls -l cdt/build/lib/cmake + ls -l cdt/build/lib/cmake/cdt - name: Build & Test reference-contracts run: | cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dcdt_DIR=${{ github.workspace }}/cdt/build/lib/cmake/cdt -Dleap_DIR=${{ github.workspace }}/leap/build/lib/cmake/leap From d954698e31ae15ae1e89836bf75876a72808190f Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 12 Jun 2023 10:13:08 -0400 Subject: [PATCH 037/191] Whitespace cleanup --- libraries/libfc/include/fc/mutex.hpp | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/libraries/libfc/include/fc/mutex.hpp b/libraries/libfc/include/fc/mutex.hpp index 9184783199..a302535dd0 100644 --- a/libraries/libfc/include/fc/mutex.hpp +++ b/libraries/libfc/include/fc/mutex.hpp @@ -115,14 +115,9 @@ class CAPABILITY("shared_mutex") shared_mutex { }; // Tag types for selecting a constructor. -struct adopt_lock_t { -} inline constexpr adopt_lock = {}; -struct defer_lock_t { -} inline constexpr defer_lock = {}; -struct shared_lock_t { -} inline constexpr shared_lock = {}; -struct try_to_lock_t { -} inline constexpr try_to_lock = {}; +struct adopt_lock_t {} inline constexpr adopt_lock = {}; +struct defer_lock_t {} inline constexpr defer_lock = {}; +struct shared_lock_t {} inline constexpr shared_lock = {}; // LockGuard is an RAII class that acquires a mutex in its constructor, and // releases it in its destructor. @@ -177,7 +172,7 @@ class SCOPED_CAPABILITY unique_lock { #endif // Assume mu is held, implicitly acquire *this and associate it with mu. - unique_lock(M& mu, adopt_lock_t) REQUIRES(mu) + unique_lock(M& mu, adopt_lock_t) REQUIRES(mu) : mut(&mu) , locked(true) {} From 8df02aa8a31138d84bce70be779d4a0db36af48d Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 12 Jun 2023 09:17:25 -0500 Subject: [PATCH 038/191] Try install first then dev-install. --- .github/workflows/build.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index aa9e8830d8..8956daded1 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -285,6 +285,7 @@ jobs: cmake -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -GNinja cmake --build build cd build + ninja install ninja dev-install cd ../.. - name: make install cdt From d055c409ed52ad9c7632856dd0d6bf0526788013 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 12 Jun 2023 09:17:56 -0500 Subject: [PATCH 039/191] Try exporting lead and cdt dir to env variable for use in ref-contracts build. --- .github/workflows/build.yaml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 8956daded1..5fd2e17deb 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -353,6 +353,9 @@ jobs: ls -l leap/build/lib ls -l leap/build/lib/cmake ls -l leap/build/lib/cmake/leap + cd leap/build/lib/cmake/leap + export LEAPDIR=$PWD + echo LEAPDIR = $LEAPDIR - name: Build cdt run: | pwd @@ -373,9 +376,13 @@ jobs: ls -l cdt/build/lib ls -l cdt/build/lib/cmake ls -l cdt/build/lib/cmake/cdt + cd cdt/build/lib/cmake/cdt + export CDTDIR=$PWD + echo CDTDIR = $CDTDIR - name: Build & Test reference-contracts run: | - cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dcdt_DIR=${{ github.workspace }}/cdt/build/lib/cmake/cdt -Dleap_DIR=${{ github.workspace }}/leap/build/lib/cmake/leap + # cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dcdt_DIR=${{ github.workspace }}/cdt/build/lib/cmake/cdt -Dleap_DIR=${{ github.workspace }}/leap/build/lib/cmake/leap + cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dcdt_DIR=$CDTDIR -Dleap_DIR=$LEAPDIR cmake --build build -- -j $(nproc) cd build/tests ctest --output-on-failure -j $(nproc) From a0a1effbdd9ff49dc7c5907f93e6accf44676faf Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 12 Jun 2023 10:21:18 -0400 Subject: [PATCH 040/191] Address PR comments --- libraries/libfc/include/fc/mutex.hpp | 7 +------ plugins/net_plugin/net_plugin.cpp | 10 +++++----- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/libraries/libfc/include/fc/mutex.hpp b/libraries/libfc/include/fc/mutex.hpp index a302535dd0..684274b90c 100644 --- a/libraries/libfc/include/fc/mutex.hpp +++ b/libraries/libfc/include/fc/mutex.hpp @@ -1,5 +1,4 @@ -#ifndef THREAD_SAFETY_ANALYSIS_MUTEX_HPP -#define THREAD_SAFETY_ANALYSIS_MUTEX_HPP +#pragma once // Enable thread safety attributes only with clang. // The attributes can be safely erased when compiling with other compilers. @@ -162,14 +161,12 @@ class SCOPED_CAPABILITY unique_lock { mut->lock(); } -#if 0 unique_lock(unique_lock&& o) noexcept ACQUIRE(o) : mut(o.mut) , locked(o.locked) { o.locked = false; o.mut = nullptr; } -#endif // Assume mu is held, implicitly acquire *this and associate it with mu. unique_lock(M& mu, adopt_lock_t) REQUIRES(mu) @@ -218,5 +215,3 @@ class SCOPED_CAPABILITY unique_lock { }; } // namespace fc - -#endif // THREAD_SAFETY_ANALYSIS_MUTEX_HPP diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 2a8288b64a..1c0471833d 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -211,13 +211,13 @@ namespace eosio { alignas(hardware_destructive_interference_size) fc::mutex sync_mtx; - uint32_t sync_known_lib_num GUARDED_BY(sync_mtx) {0}; // highest known lib num from currently connected peers + uint32_t sync_known_lib_num GUARDED_BY(sync_mtx) {0}; // highest known lib num from currently connected peers uint32_t sync_last_requested_num GUARDED_BY(sync_mtx) {0}; // end block number of the last requested range, inclusive - uint32_t sync_next_expected_num GUARDED_BY(sync_mtx) {0}; // the next block number we need from peer - connection_ptr sync_source; // connection we are currently syncing from + uint32_t sync_next_expected_num GUARDED_BY(sync_mtx) {0}; // the next block number we need from peer + connection_ptr sync_source GUARDED_BY(sync_mtx); // connection we are currently syncing from - const uint32_t sync_req_span GUARDED_BY(sync_mtx) {0}; - const uint32_t sync_peer_limit GUARDED_BY(sync_mtx) {0}; + const uint32_t sync_req_span GUARDED_BY(sync_mtx) {0}; + const uint32_t sync_peer_limit GUARDED_BY(sync_mtx) {0}; alignas(hardware_destructive_interference_size) std::atomic sync_state{in_sync}; From 28ad1c34e4c84a6e752ec83f25b673eb379d87ca Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 12 Jun 2023 10:39:06 -0400 Subject: [PATCH 041/191] Address PR comments --- plugins/net_plugin/net_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 1c0471833d..caab1ba264 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -216,8 +216,8 @@ namespace eosio { uint32_t sync_next_expected_num GUARDED_BY(sync_mtx) {0}; // the next block number we need from peer connection_ptr sync_source GUARDED_BY(sync_mtx); // connection we are currently syncing from - const uint32_t sync_req_span GUARDED_BY(sync_mtx) {0}; - const uint32_t sync_peer_limit GUARDED_BY(sync_mtx) {0}; + const uint32_t sync_req_span {0}; + const uint32_t sync_peer_limit {0}; alignas(hardware_destructive_interference_size) std::atomic sync_state{in_sync}; From 61b3d2e494a3853ca176f7bd81b12d45ce497f1f Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 12 Jun 2023 09:44:45 -0500 Subject: [PATCH 042/191] Still trying to get cdt_DIR and leap_DIR to work. --- .github/workflows/build.yaml | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 5fd2e17deb..b951c8ab8e 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -354,8 +354,8 @@ jobs: ls -l leap/build/lib/cmake ls -l leap/build/lib/cmake/leap cd leap/build/lib/cmake/leap - export LEAPDIR=$PWD - echo LEAPDIR = $LEAPDIR + export leap_DIR=$PWD + echo LEAPDIR = $leap_DIR - name: Build cdt run: | pwd @@ -377,12 +377,13 @@ jobs: ls -l cdt/build/lib/cmake ls -l cdt/build/lib/cmake/cdt cd cdt/build/lib/cmake/cdt - export CDTDIR=$PWD - echo CDTDIR = $CDTDIR + export cdt_DIR=$PWD + echo CDTDIR = $cdt_DIR - name: Build & Test reference-contracts run: | - # cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dcdt_DIR=${{ github.workspace }}/cdt/build/lib/cmake/cdt -Dleap_DIR=${{ github.workspace }}/leap/build/lib/cmake/leap - cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dcdt_DIR=$CDTDIR -Dleap_DIR=$LEAPDIR + echo leap_DIR = $leap_DIR + echo cdt_DIR = $cdt_DIR + cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On cmake --build build -- -j $(nproc) cd build/tests ctest --output-on-failure -j $(nproc) From ae03420dbac4fee2611641384fdcc8713aa36323 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 12 Jun 2023 10:46:00 -0500 Subject: [PATCH 043/191] Install CDT from deb package instead of src build and install. --- .github/workflows/build.yaml | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index b951c8ab8e..8235bcdf9f 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -262,12 +262,6 @@ jobs: with: submodules: recursive path: leap - - name: checkout cdt - uses: actions/checkout@v3 - with: - repository: AntelopeIO/cdt - submodules: recursive - path: cdt - name: checkout reference-contracts uses: actions/checkout@v3 with: @@ -288,16 +282,19 @@ jobs: ninja install ninja dev-install cd ../.. - - name: make install cdt + - name: Download cdt + uses: AntelopeIO/asset-artifact-download-action@v2 + with: + owner: AntelopeIO + repo: cdt + file: 'cdt_.*amd64.deb' + target: main + artifact-name: cdt_ubuntu_package_amd64 + token: ${{github.token}} + - name: Install cdt Packages run: | - cd cdt - mkdir build - cd build - apt-get update && apt-get install -y pkg-config libcurl4-gnutls-dev - cmake -DCMAKE_BUILD_TYPE=Release .. - make -j $(nproc) - make install - cd ../.. + apt install -y ./*.deb + rm ./*.deb - name: Build & Test reference-contracts run: | cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On From 73f326f395454221ac86f940a206e1ff7c6cdde5 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 12 Jun 2023 10:46:33 -0500 Subject: [PATCH 044/191] Env variables not carrying over between steps. Reset leap_DIR and cdt_DIR here. --- .github/workflows/build.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 8235bcdf9f..89aed3b557 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -378,8 +378,10 @@ jobs: echo CDTDIR = $cdt_DIR - name: Build & Test reference-contracts run: | - echo leap_DIR = $leap_DIR - echo cdt_DIR = $cdt_DIR + echo leap_DIR = $PWD/leap/build/lib/cmake/leap + export leap_DIR=$PWD/leap/build/lib/cmake/leap + echo cdt_DIR = $PWD/cdt/build/lib/cmake/cdt + export cdt_DIR=$PWD/cdt/build/lib/cmake/cdt cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On cmake --build build -- -j $(nproc) cd build/tests From 61242561d70dd2ddbb77a688a521870ccb744294 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 12 Jun 2023 11:51:07 -0400 Subject: [PATCH 045/191] Restore exception safety as discussed with Kevin. --- plugins/net_plugin/net_plugin.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index caab1ba264..183b10cf6e 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1923,6 +1923,7 @@ namespace eosio { // call with g_sync locked, called from conn's connection strand void sync_manager::request_next_chunk( fc::mutex *, const connection_ptr& conn ) RELEASE(sync_mtx) { + fc::lock_guard g(sync_mtx, fc::adopt_lock); auto chain_info = my_impl->get_chain_info(); fc_dlog( logger, "sync_last_requested_num: ${r}, sync_next_expected_num: ${e}, sync_known_lib_num: ${k}, sync_req_span: ${s}, head: ${h}", @@ -1932,7 +1933,6 @@ namespace eosio { fc_wlog( logger, "ignoring request, head is ${h} last req = ${r}, sync_next_expected_num: ${e}, sync_known_lib_num: ${k}, sync_req_span: ${s}, source connection ${c}", ("h", chain_info.head_num)("r", sync_last_requested_num)("e", sync_next_expected_num) ("k", sync_known_lib_num)("s", sync_req_span)("c", sync_source->connection_id) ); - sync_mtx.unlock(); return; } @@ -1958,7 +1958,6 @@ namespace eosio { sync_known_lib_num = chain_info.lib_num; sync_last_requested_num = 0; set_state( in_sync ); // probably not, but we can't do anything else - sync_mtx.unlock(); } else { bool send_request = false; uint32_t start = sync_next_expected_num; @@ -1972,7 +1971,6 @@ namespace eosio { send_request = true; } } - sync_mtx.unlock(); if (send_request) { new_sync_source->strand.post( [new_sync_source, start, end, head_num=chain_info.head_num]() { peer_ilog( new_sync_source, "requesting range ${s} to ${e}, head ${h}", ("s", start)("e", end)("h", head_num) ); From 8c73690b01c9c94c7f6b0f3cc93de934f259e599 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 12 Jun 2023 11:58:31 -0500 Subject: [PATCH 046/191] Use github environment file and variable to pass env variable between steps. --- .github/workflows/build.yaml | 33 +++++++-------------------------- 1 file changed, 7 insertions(+), 26 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 89aed3b557..f1a4a1a011 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -334,6 +334,7 @@ jobs: pwd ls -l echo github workspace: ${{ github.workspace }} + echo pwd = $PWD - name: Extract leap build run: | pwd @@ -343,16 +344,8 @@ jobs: zstdcat build.tar.zst | tar x ls -l cd .. - pwd - ls -l - ls -l leap - ls -l leap/build - ls -l leap/build/lib - ls -l leap/build/lib/cmake - ls -l leap/build/lib/cmake/leap - cd leap/build/lib/cmake/leap - export leap_DIR=$PWD - echo LEAPDIR = $leap_DIR + echo "lead_DIR=$PWD/leap/build/lib/cmake/leap" >> "$GITHUB_ENV" + echo leap_DIR = "${{ env.leap_DIR }}" - name: Build cdt run: | pwd @@ -363,26 +356,14 @@ jobs: cd build pwd apt-get update && apt-get install -y pkg-config libcurl4-gnutls-dev - cmake -DCMAKE_BUILD_TYPE=Release -Dleap_DIR=${{ github.workspace }}/leap/build/lib/cmake/leap .. + cmake -DCMAKE_BUILD_TYPE=Release -Dleap_DIR="${{ env.leap_DIR }}" .. make -j $(nproc) cd ../.. - pwd - ls -l - ls -l cdt - ls -l cdt/build - ls -l cdt/build/lib - ls -l cdt/build/lib/cmake - ls -l cdt/build/lib/cmake/cdt - cd cdt/build/lib/cmake/cdt - export cdt_DIR=$PWD - echo CDTDIR = $cdt_DIR + echo "cdt_DIR=$PWD/cdt/build/lib/cmake/cdt" >> "$GITHUB_ENV" + echo cdt_DIR = "${{ env.cdt_DIR }}" - name: Build & Test reference-contracts run: | - echo leap_DIR = $PWD/leap/build/lib/cmake/leap - export leap_DIR=$PWD/leap/build/lib/cmake/leap - echo cdt_DIR = $PWD/cdt/build/lib/cmake/cdt - export cdt_DIR=$PWD/cdt/build/lib/cmake/cdt - cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On + cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dleap_DIR="${{ env.leap_DIR }}" -Dcdt_DIR="${{ env.cdt_DIR }}" cmake --build build -- -j $(nproc) cd build/tests ctest --output-on-failure -j $(nproc) From 30ffccc59b21a5bdacb4b7bb77c901fb1c73ee09 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 12 Jun 2023 13:12:29 -0400 Subject: [PATCH 047/191] Do not pass the `mutex *` as suggested in PR comment. --- plugins/net_plugin/net_plugin.cpp | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 183b10cf6e..de32e3c3c2 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -227,7 +227,7 @@ namespace eosio { constexpr static auto stage_str( stages s ); bool set_state( stages newstate ); bool is_sync_required( uint32_t fork_head_block_num ); - void request_next_chunk( fc::mutex *m, const connection_ptr& conn = connection_ptr() ) RELEASE(sync_mtx); + void request_next_chunk( const connection_ptr& conn = connection_ptr() ) RELEASE(sync_mtx); connection_ptr find_next_sync_node(); void start_sync( const connection_ptr& c, uint32_t target ); bool verify_catchup( const connection_ptr& c, uint32_t num, const block_id_type& id ); @@ -1867,7 +1867,8 @@ namespace eosio { // if starting to sync need to always start from lib as we might be on our own fork uint32_t lib_num = my_impl->get_chain_lib_num(); sync_next_expected_num = std::max( lib_num + 1, sync_next_expected_num ); - request_next_chunk( g.release() ); + g.release(); + request_next_chunk(); } } } @@ -1922,7 +1923,7 @@ namespace eosio { } // call with g_sync locked, called from conn's connection strand - void sync_manager::request_next_chunk( fc::mutex *, const connection_ptr& conn ) RELEASE(sync_mtx) { + void sync_manager::request_next_chunk( const connection_ptr& conn ) RELEASE(sync_mtx) { fc::lock_guard g(sync_mtx, fc::adopt_lock); auto chain_info = my_impl->get_chain_info(); @@ -2021,7 +2022,8 @@ namespace eosio { } sync_next_expected_num = std::max( chain_info.lib_num + 1, sync_next_expected_num ); - request_next_chunk( g_sync.release(), c ); + g_sync.release(); + request_next_chunk( c ); } // called from connection strand @@ -2033,7 +2035,8 @@ namespace eosio { if( c == sync_source ) { c->cancel_sync(reason); sync_last_requested_num = 0; - request_next_chunk( g.release() ); + g.release(); + request_next_chunk(); } } @@ -2318,7 +2321,8 @@ namespace eosio { if (sync_next_expected_num > sync_last_requested_num && sync_last_requested_num < sync_known_lib_num) { fc_dlog(logger, "Requesting range ahead, head: ${h} blk_num: ${bn} sync_next_expected_num ${nen} sync_last_requested_num: ${lrn}", ("h", head)("bn", blk_num)("nen", sync_next_expected_num)("lrn", sync_last_requested_num)); - request_next_chunk(g_sync.release()); + g_sync.release(); + request_next_chunk(); } } From 72702596d2b793cf4df487ed6a4b2b2b19d06ed4 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 12 Jun 2023 12:15:15 -0500 Subject: [PATCH 048/191] Spelling correction. --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index f1a4a1a011..c34b78d0fc 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -344,7 +344,7 @@ jobs: zstdcat build.tar.zst | tar x ls -l cd .. - echo "lead_DIR=$PWD/leap/build/lib/cmake/leap" >> "$GITHUB_ENV" + echo "leap_DIR=$PWD/leap/build/lib/cmake/leap" >> "$GITHUB_ENV" echo leap_DIR = "${{ env.leap_DIR }}" - name: Build cdt run: | From 3a8584cf1e6f85e3257efc0287d793aab4ec4513 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 12 Jun 2023 13:23:23 -0400 Subject: [PATCH 049/191] Hold the lock during `request_next_chunk` method as suggested --- plugins/net_plugin/net_plugin.cpp | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index de32e3c3c2..763a4c4e11 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -227,7 +227,7 @@ namespace eosio { constexpr static auto stage_str( stages s ); bool set_state( stages newstate ); bool is_sync_required( uint32_t fork_head_block_num ); - void request_next_chunk( const connection_ptr& conn = connection_ptr() ) RELEASE(sync_mtx); + void request_next_chunk( const connection_ptr& conn = connection_ptr() ) REQUIRES(sync_mtx); connection_ptr find_next_sync_node(); void start_sync( const connection_ptr& c, uint32_t target ); bool verify_catchup( const connection_ptr& c, uint32_t num, const block_id_type& id ); @@ -1867,7 +1867,6 @@ namespace eosio { // if starting to sync need to always start from lib as we might be on our own fork uint32_t lib_num = my_impl->get_chain_lib_num(); sync_next_expected_num = std::max( lib_num + 1, sync_next_expected_num ); - g.release(); request_next_chunk(); } } @@ -1923,8 +1922,7 @@ namespace eosio { } // call with g_sync locked, called from conn's connection strand - void sync_manager::request_next_chunk( const connection_ptr& conn ) RELEASE(sync_mtx) { - fc::lock_guard g(sync_mtx, fc::adopt_lock); + void sync_manager::request_next_chunk( const connection_ptr& conn ) REQUIRES(sync_mtx) { auto chain_info = my_impl->get_chain_info(); fc_dlog( logger, "sync_last_requested_num: ${r}, sync_next_expected_num: ${e}, sync_known_lib_num: ${k}, sync_req_span: ${s}, head: ${h}", @@ -2022,7 +2020,6 @@ namespace eosio { } sync_next_expected_num = std::max( chain_info.lib_num + 1, sync_next_expected_num ); - g_sync.release(); request_next_chunk( c ); } @@ -2035,7 +2032,6 @@ namespace eosio { if( c == sync_source ) { c->cancel_sync(reason); sync_last_requested_num = 0; - g.release(); request_next_chunk(); } } @@ -2321,7 +2317,6 @@ namespace eosio { if (sync_next_expected_num > sync_last_requested_num && sync_last_requested_num < sync_known_lib_num) { fc_dlog(logger, "Requesting range ahead, head: ${h} blk_num: ${bn} sync_next_expected_num ${nen} sync_last_requested_num: ${lrn}", ("h", head)("bn", blk_num)("nen", sync_next_expected_num)("lrn", sync_last_requested_num)); - g_sync.release(); request_next_chunk(); } } From 76129fd10f6d32e43cbe120530251bd96adbf100 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 12 Jun 2023 13:26:34 -0400 Subject: [PATCH 050/191] Revert unnecessary logic changes. --- plugins/net_plugin/net_plugin.cpp | 33 ++++++++++++++++--------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 763a4c4e11..d3bbbdc4cd 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1957,29 +1957,30 @@ namespace eosio { sync_known_lib_num = chain_info.lib_num; sync_last_requested_num = 0; set_state( in_sync ); // probably not, but we can't do anything else - } else { - bool send_request = false; + return; + } + + bool request_sent = false; + if( sync_last_requested_num != sync_known_lib_num ) { uint32_t start = sync_next_expected_num; uint32_t end = start + sync_req_span - 1; - if( sync_last_requested_num != sync_known_lib_num ) { - if( end > sync_known_lib_num ) - end = sync_known_lib_num; - if( end > 0 && end >= start ) { - sync_last_requested_num = end; - sync_source = new_sync_source; - send_request = true; - } - } - if (send_request) { + if( end > sync_known_lib_num ) + end = sync_known_lib_num; + if( end > 0 && end >= start ) { + sync_last_requested_num = end; + sync_source = new_sync_source; + request_sent = true; new_sync_source->strand.post( [new_sync_source, start, end, head_num=chain_info.head_num]() { peer_ilog( new_sync_source, "requesting range ${s} to ${e}, head ${h}", ("s", start)("e", end)("h", head_num) ); new_sync_source->request_sync_blocks( start, end ); - } ); - } else { - fc_wlog(logger, "Unable to request range, sending handshakes to everyone"); - send_handshakes(); + } ); } } + if( !request_sent ) { + sync_source.reset(); + fc_wlog(logger, "Unable to request range, sending handshakes to everyone"); + send_handshakes(); + } } // static, thread safe From 994b547642abe1bd60e624953144923966064ee4 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 12 Jun 2023 13:27:16 -0400 Subject: [PATCH 051/191] Remove `fc::unique_lock` copy constructor which I am not sure how to get right --- libraries/libfc/include/fc/mutex.hpp | 7 ------- 1 file changed, 7 deletions(-) diff --git a/libraries/libfc/include/fc/mutex.hpp b/libraries/libfc/include/fc/mutex.hpp index 684274b90c..518a9ccd8b 100644 --- a/libraries/libfc/include/fc/mutex.hpp +++ b/libraries/libfc/include/fc/mutex.hpp @@ -161,13 +161,6 @@ class SCOPED_CAPABILITY unique_lock { mut->lock(); } - unique_lock(unique_lock&& o) noexcept ACQUIRE(o) - : mut(o.mut) - , locked(o.locked) { - o.locked = false; - o.mut = nullptr; - } - // Assume mu is held, implicitly acquire *this and associate it with mu. unique_lock(M& mu, adopt_lock_t) REQUIRES(mu) : mut(&mu) From 45844e019b67aa3158a10c96b153440815000f1a Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 12 Jun 2023 13:40:15 -0400 Subject: [PATCH 052/191] Fix a couple issues from the previous merge. --- plugins/net_plugin/net_plugin.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 66e16f0d44..0711237194 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1989,7 +1989,6 @@ namespace eosio { } if( !request_sent ) { sync_source.reset(); - g_sync.unlock(); fc_wlog(logger, "Unable to request range, sending handshakes to everyone"); send_handshakes(); } @@ -4089,7 +4088,7 @@ namespace eosio { } { - fc::lock_guard g( my->keepalive_timer_mtx ); + fc::lock_guard g( keepalive_timer_mtx ); keepalive_timer = std::make_unique( thread_pool.get_executor() ); } From 0529db6b3d8051fab3660601e417a30fd6c5d142 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 12 Jun 2023 13:25:00 -0500 Subject: [PATCH 053/191] Export the env variable in cdt build. --- .github/workflows/build.yaml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index c34b78d0fc..ebd2669a77 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -345,7 +345,7 @@ jobs: ls -l cd .. echo "leap_DIR=$PWD/leap/build/lib/cmake/leap" >> "$GITHUB_ENV" - echo leap_DIR = "${{ env.leap_DIR }}" + echo leap_DIR = $leap_DIR - name: Build cdt run: | pwd @@ -356,11 +356,13 @@ jobs: cd build pwd apt-get update && apt-get install -y pkg-config libcurl4-gnutls-dev - cmake -DCMAKE_BUILD_TYPE=Release -Dleap_DIR="${{ env.leap_DIR }}" .. + export leap_DIR="${{ env.leap_DIR }}" + echo leap_DIR=$leap_DIR + cmake -DCMAKE_BUILD_TYPE=Release .. make -j $(nproc) cd ../.. echo "cdt_DIR=$PWD/cdt/build/lib/cmake/cdt" >> "$GITHUB_ENV" - echo cdt_DIR = "${{ env.cdt_DIR }}" + echo cdt_DIR = $cdt_DIR - name: Build & Test reference-contracts run: | cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dleap_DIR="${{ env.leap_DIR }}" -Dcdt_DIR="${{ env.cdt_DIR }}" From 756d884d616e08d22b3f289210509f66a670cf59 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 12 Jun 2023 13:36:17 -0500 Subject: [PATCH 054/191] Instead of bulding cdt from source, for this just install from deb package as well. --- .github/workflows/build.yaml | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index ebd2669a77..bbcf8f5da2 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -318,12 +318,6 @@ jobs: with: name: ${{matrix.platform}}-build path: leap - - name: checkout cdt - uses: actions/checkout@v3 - with: - repository: AntelopeIO/cdt - submodules: recursive - path: cdt - name: checkout reference-contracts uses: actions/checkout@v3 with: @@ -346,26 +340,22 @@ jobs: cd .. echo "leap_DIR=$PWD/leap/build/lib/cmake/leap" >> "$GITHUB_ENV" echo leap_DIR = $leap_DIR - - name: Build cdt + - name: Download cdt + uses: AntelopeIO/asset-artifact-download-action@v2 + with: + owner: AntelopeIO + repo: cdt + file: 'cdt_.*amd64.deb' + target: main + artifact-name: cdt_ubuntu_package_amd64 + token: ${{github.token}} + - name: Install cdt Packages run: | - pwd - ls -l - cd cdt - pwd - mkdir build - cd build - pwd - apt-get update && apt-get install -y pkg-config libcurl4-gnutls-dev - export leap_DIR="${{ env.leap_DIR }}" - echo leap_DIR=$leap_DIR - cmake -DCMAKE_BUILD_TYPE=Release .. - make -j $(nproc) - cd ../.. - echo "cdt_DIR=$PWD/cdt/build/lib/cmake/cdt" >> "$GITHUB_ENV" - echo cdt_DIR = $cdt_DIR + apt install -y ./*.deb + rm ./*.deb - name: Build & Test reference-contracts run: | - cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dleap_DIR="${{ env.leap_DIR }}" -Dcdt_DIR="${{ env.cdt_DIR }}" + cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dleap_DIR="${{ env.leap_DIR }}" cmake --build build -- -j $(nproc) cd build/tests ctest --output-on-failure -j $(nproc) From d32f1f77cd548b2791c2b877a7463db4ac014a57 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 12 Jun 2023 13:59:40 -0500 Subject: [PATCH 055/191] Try specifying LEAP_BUILD_DIR --- .github/workflows/build.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index bbcf8f5da2..74cc45847d 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -339,7 +339,10 @@ jobs: ls -l cd .. echo "leap_DIR=$PWD/leap/build/lib/cmake/leap" >> "$GITHUB_ENV" + echo "LEAP_BUILD_DIR=$PWD/leap/build" >> "$GITHUB_ENV" + pwd echo leap_DIR = $leap_DIR + echo LEAP_BUILD_DIR = $LEAP_BUILD_DIR - name: Download cdt uses: AntelopeIO/asset-artifact-download-action@v2 with: @@ -355,6 +358,10 @@ jobs: rm ./*.deb - name: Build & Test reference-contracts run: | + export LEAP_BUILD_DIR="${{ env.LEAP_BUILD_DIR }}" + echo LEAP_BUILD_DIR = $LEAP_BUILD_DIR + echo leap_DIR = "${{ env.leap_DIR }}" + ls $leap_DIR cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dleap_DIR="${{ env.leap_DIR }}" cmake --build build -- -j $(nproc) cd build/tests From 2df6005091bb2bdbef076d566d658b13268a57cb Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 12 Jun 2023 14:10:44 -0500 Subject: [PATCH 056/191] Try adding CMAKE_PREFIX_PATH. --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 74cc45847d..3bb3dd6738 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -362,7 +362,7 @@ jobs: echo LEAP_BUILD_DIR = $LEAP_BUILD_DIR echo leap_DIR = "${{ env.leap_DIR }}" ls $leap_DIR - cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dleap_DIR="${{ env.leap_DIR }}" + cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dleap_DIR="${{ env.leap_DIR }}" -DCMAKE_PREFIX_PATH=$LEAP_BUILD_DIR cmake --build build -- -j $(nproc) cd build/tests ctest --output-on-failure -j $(nproc) From 6f2ab3c589b0c98af1d8bb12f6f42b01548ca352 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 12 Jun 2023 14:25:03 -0500 Subject: [PATCH 057/191] Trying if the dir structure has to match that from the leap Build job when using the build archive. --- .github/workflows/build.yaml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 3bb3dd6738..e665928f98 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -317,7 +317,6 @@ jobs: uses: actions/download-artifact@v3 with: name: ${{matrix.platform}}-build - path: leap - name: checkout reference-contracts uses: actions/checkout@v3 with: @@ -333,13 +332,10 @@ jobs: run: | pwd ls -l - cd leap - pwd zstdcat build.tar.zst | tar x ls -l - cd .. - echo "leap_DIR=$PWD/leap/build/lib/cmake/leap" >> "$GITHUB_ENV" - echo "LEAP_BUILD_DIR=$PWD/leap/build" >> "$GITHUB_ENV" + echo "leap_DIR=$PWD/build/lib/cmake/leap" >> "$GITHUB_ENV" + echo "LEAP_BUILD_DIR=$PWD/build" >> "$GITHUB_ENV" pwd echo leap_DIR = $leap_DIR echo LEAP_BUILD_DIR = $LEAP_BUILD_DIR From 1e78934af6b0f84bf037347281436cb104d45765 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Mon, 12 Jun 2023 14:36:02 -0500 Subject: [PATCH 058/191] Move reference-contracts build into the reference-contracts dir. --- .github/workflows/build.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index e665928f98..be79e41c0f 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -358,9 +358,9 @@ jobs: echo LEAP_BUILD_DIR = $LEAP_BUILD_DIR echo leap_DIR = "${{ env.leap_DIR }}" ls $leap_DIR - cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dleap_DIR="${{ env.leap_DIR }}" -DCMAKE_PREFIX_PATH=$LEAP_BUILD_DIR - cmake --build build -- -j $(nproc) - cd build/tests + cmake -S reference-contracts -B reference-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dleap_DIR="${{ env.leap_DIR }}" -DCMAKE_PREFIX_PATH=$LEAP_BUILD_DIR + cmake --build reference-contracts/build -- -j $(nproc) + cd reference-contracts/build/tests ctest --output-on-failure -j $(nproc) all-passing: From 4abcae09c95936763c1f2bac0a4c4c570aae5ed3 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Mon, 12 Jun 2023 15:48:25 -0500 Subject: [PATCH 059/191] Support --retry-num-blocks in regression tests. Enable --transaction-retry-max-storage-size-gb on all non-producer nodes in regression tests. --- plugins/chain_plugin/chain_plugin.cpp | 2 +- tests/TestHarness/launcher.py | 2 + tests/TestHarness/transactions.py | 82 ++++++++++++++--------- tests/nodeos_chainbase_allocation_test.py | 6 +- tests/nodeos_voting_test.py | 28 ++++---- 5 files changed, 71 insertions(+), 49 deletions(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 1544853fbd..a102b7895e 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -2193,7 +2193,7 @@ void api_base::send_transaction_gen(API &api, send_transaction_params_t params, retry = params.retry_trx; retry_num_blocks = params.retry_trx_num_blocks; - EOS_ASSERT( !retry || api.trx_retry.has_value(), unsupported_feature, "Transaction retry not enabled on node" ); + EOS_ASSERT( !retry || api.trx_retry.has_value(), unsupported_feature, "Transaction retry not enabled on node. transaction-retry-max-storage-size-gb is 0" ); EOS_ASSERT( !retry || (ptrx->expiration() <= api.trx_retry->get_max_expiration_time()), tx_exp_too_far_exception, "retry transaction expiration ${e} larger than allowed ${m}", ("e", ptrx->expiration())("m", api.trx_retry->get_max_expiration_time()) ); diff --git a/tests/TestHarness/launcher.py b/tests/TestHarness/launcher.py index 805f30f47e..01acd0b7c7 100644 --- a/tests/TestHarness/launcher.py +++ b/tests/TestHarness/launcher.py @@ -510,6 +510,8 @@ def construct_command_line(self, instance: nodeDefinition): eosdcmd.extend(producer_keys) producer_names = list(sum([('--producer-name', p) for p in instance.producers], ())) eosdcmd.extend(producer_names) + else: + a(a(eosdcmd, '--transaction-retry-max-storage-size-gb'), '100') a(a(eosdcmd, '--plugin'), 'eosio::net_plugin') a(a(eosdcmd, '--plugin'), 'eosio::chain_api_plugin') diff --git a/tests/TestHarness/transactions.py b/tests/TestHarness/transactions.py index 566d3d0e8d..d90b9b32c7 100644 --- a/tests/TestHarness/transactions.py +++ b/tests/TestHarness/transactions.py @@ -11,18 +11,22 @@ from .testUtils import Utils class Transactions(NodeosQueries): + retry_num_blocks_default = 1 + def __init__(self, host, port, walletMgr=None): super().__init__(host, port, walletMgr) # Create & initialize account and return creation transactions. Return transaction json object - def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, stakeNet=100, stakeCPU=100, buyRAM=10000, exitOnError=False, sign=False, additionalArgs=''): + def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, silentErrors=False, stakeNet=100, stakeCPU=100, buyRAM=10000, exitOnError=False, sign=False, additionalArgs='', retry_num_blocks=None): signStr = NodeosQueries.sign_str(sign, [ creatorAccount.activePublicKey ]) cmdDesc="system newaccount" + retry_num_blocks = self.retry_num_blocks_default if retry_num_blocks is None else retry_num_blocks + retryStr = f"--retry-num-blocks {retry_num_blocks}" if waitForTransBlock else "" cmd=(f'{cmdDesc} -j {signStr} {creatorAccount.name} {account.name} \'{account.ownerPublicKey}\' ' f'\'{account.activePublicKey}\' --stake-net "{stakeNet} {CORE_SYMBOL}" --stake-cpu ' - f'"{stakeCPU} {CORE_SYMBOL}" --buy-ram "{buyRAM} {CORE_SYMBOL}" {additionalArgs}') + f'"{stakeCPU} {CORE_SYMBOL}" --buy-ram "{buyRAM} {CORE_SYMBOL}" {additionalArgs} {retryStr}') msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name); - trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) transId=NodeosQueries.getTransId(trans) @@ -31,27 +35,30 @@ def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, w trans = self.transferFunds(creatorAccount, account, NodeosQueries.currencyIntToStr(stakedDeposit, CORE_SYMBOL), "init") transId=NodeosQueries.getTransId(trans) - return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + return trans - def createAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, exitOnError=False, sign=False): + def createAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, silentErrors=False,exitOnError=False, sign=False, retry_num_blocks=None): """Create account and return creation transactions. Return transaction json object. waitForTransBlock: wait on creation transaction id to appear in a block.""" signStr = NodeosQueries.sign_str(sign, [ creatorAccount.activePublicKey ]) cmdDesc="create account" - cmd="%s -j %s %s %s %s %s" % ( - cmdDesc, signStr, creatorAccount.name, account.name, account.ownerPublicKey, account.activePublicKey) + retry_num_blocks = self.retry_num_blocks_default if retry_num_blocks is None else retry_num_blocks + retryStr = f"--retry-num-blocks {retry_num_blocks}" if waitForTransBlock else "" + cmd=(f"{cmdDesc} -j {signStr} {creatorAccount.name} {account.name} {account.ownerPublicKey} " + f"{account.activePublicKey} {retryStr}") msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name); - trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) transId=NodeosQueries.getTransId(trans) if stakedDeposit > 0: - self.waitForTransactionInBlock(transId) # seems like account creation needs to be finlized before transfer can happen + if not waitForTransBlock: # account creation needs to be finalized before transfer can happen so wait if we haven't already + self.waitForTransactionInBlock(transId) trans = self.transferFunds(creatorAccount, account, "%0.04f %s" % (stakedDeposit/10000, CORE_SYMBOL), "init") self.trackCmdTransaction(trans) transId=NodeosQueries.getTransId(trans) - return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + return trans def transferFundsCmdArr(self, source, destination, amountStr, memo, force, retry, sign, dontSend, expiration, skipSign): assert isinstance(amountStr, str) @@ -96,6 +103,9 @@ def transferFundsCmdArr(self, source, destination, amountStr, memo, force, retry # Trasfer funds. Returns "transfer" json return object def transferFunds(self, source, destination, amountStr, memo="memo", force=False, waitForTransBlock=False, exitOnError=True, reportStatus=True, retry=None, sign=False, dontSend=False, expiration=90, skipSign=False): cmdArr = self.transferFundsCmdArr(source, destination, amountStr, memo, force, retry, sign, dontSend, expiration, skipSign) + if waitForTransBlock: + cmdArr.append('--retry-num-blocks') + cmdArr.append('1') trans=None start=time.perf_counter() try: @@ -118,7 +128,7 @@ def transferFunds(self, source, destination, amountStr, memo="memo", force=False Utils.cmdError("could not transfer \"%s\" from %s to %s" % (amountStr, source, destination)) Utils.errorExit("Failed to transfer \"%s\" from %s to %s" % (amountStr, source, destination)) - return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + return trans # Trasfer funds. Returns (popen, cmdArr) for checkDelayedOutput def transferFundsAsync(self, source, destination, amountStr, memo="memo", force=False, exitOnError=True, retry=None, sign=False, dontSend=False, expiration=90, skipSign=False): @@ -261,56 +271,62 @@ def setPermission(self, account, code, pType, requirement, waitForTransBlock=Fal return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) - def delegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, transferTo=False, waitForTransBlock=False, exitOnError=False, reportStatus=True, sign=False): + def delegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, transferTo=False, waitForTransBlock=False, silentErrors=True, exitOnError=False, reportStatus=True, sign=False, retry_num_blocks=None): if toAccount is None: toAccount=fromAccount signStr = NodeosQueries.sign_str(sign, [ fromAccount.activePublicKey ]) cmdDesc="system delegatebw" transferStr="--transfer" if transferTo else "" - cmd="%s -j %s %s %s \"%s %s\" \"%s %s\" %s" % ( - cmdDesc, signStr, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL, transferStr) + retry_num_blocks = self.retry_num_blocks_default if retry_num_blocks is None else retry_num_blocks + retryStr=f"--retry-num-blocks {retry_num_blocks}" if waitForTransBlock else "" + cmd=(f'{cmdDesc} -j {signStr} {fromAccount.name} {toAccount.name} "{netQuantity} {CORE_SYMBOL}" ' + f'"{cpuQuantity} {CORE_SYMBOL}" {transferStr} {retryStr}') msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name); - trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans, reportStatus=reportStatus) - return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + return trans - def undelegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, waitForTransBlock=False, silentErrors=True, exitOnError=False, sign=False): + def undelegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, waitForTransBlock=False, silentErrors=True, exitOnError=False, sign=False, retry_num_blocks=None): if toAccount is None: toAccount=fromAccount signStr = NodeosQueries.sign_str(sign, [ fromAccount.activePublicKey ]) cmdDesc="system undelegatebw" - cmd="%s -j %s %s %s \"%s %s\" \"%s %s\"" % ( - cmdDesc, signStr, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL) + retry_num_blocks = self.retry_num_blocks_default if retry_num_blocks is None else retry_num_blocks + retryStr=f"--retry-num-blocks {retry_num_blocks}" if waitForTransBlock else "" + cmd=(f'{cmdDesc} -j {signStr} {fromAccount.name} {toAccount.name} "{netQuantity} {CORE_SYMBOL}" ' + f'"{cpuQuantity} {CORE_SYMBOL}" {retryStr}') msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name); trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) - return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + return trans - def regproducer(self, producer, url, location, waitForTransBlock=False, silentErrors=True, exitOnError=False, sign=False): + def regproducer(self, producer, url, location, waitForTransBlock=False, silentErrors=True, exitOnError=False, sign=False, retry_num_blocks=None): signStr = NodeosQueries.sign_str(sign, [ producer.activePublicKey ]) - cmdDesc="system regproducer" - cmd="%s -j %s %s %s %s %s" % ( - cmdDesc, signStr, producer.name, producer.activePublicKey, url, location) - msg="producer=%s" % (producer.name); - trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) + cmdDesc = "system regproducer" + retry_num_blocks = self.retry_num_blocks_default if retry_num_blocks is None else retry_num_blocks + retryStr = f"--retry-num-blocks {retry_num_blocks}" if waitForTransBlock else "" + cmd = f'{cmdDesc} -j {signStr} {producer.name} {producer.activePublicKey} {url} {location} {retryStr}' + msg = f"producer={producer.name}" + trans = self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) - return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + return trans - def vote(self, account, producers, waitForTransBlock=False, exitOnError=False, sign=False): + def vote(self, account, producers, waitForTransBlock=False, silentErrors=True, exitOnError=False, sign=False, retry_num_blocks=None): signStr = NodeosQueries.sign_str(sign, [ account.activePublicKey ]) cmdDesc = "system voteproducer prods" - cmd="%s -j %s %s %s" % ( - cmdDesc, signStr, account.name, " ".join(producers)) - msg="account=%s, producers=[ %s ]" % (account.name, ", ".join(producers)); - trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + retry_num_blocks = self.retry_num_blocks_default if retry_num_blocks is None else retry_num_blocks + retryStr = f"--retry-num-blocks {retry_num_blocks}" if waitForTransBlock else "" + cmd = f'{cmdDesc} -j {signStr} {account.name} {" ".join(producers)} {retryStr}' + msg = "account=%s, producers=[ %s ]" % (account.name, ", ".join(producers)); + trans = self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) - return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + return trans # Require producer_api_plugin def activatePreactivateFeature(self): diff --git a/tests/nodeos_chainbase_allocation_test.py b/tests/nodeos_chainbase_allocation_test.py index 8a78c793f3..5771428b80 100755 --- a/tests/nodeos_chainbase_allocation_test.py +++ b/tests/nodeos_chainbase_allocation_test.py @@ -43,15 +43,17 @@ pnodes=1, prodCount=1, totalProducers=1, - totalNodes=2, + totalNodes=3, loadSystemContract=False, specificExtraNodeosArgs={ 1:"--read-mode irreversible --plugin eosio::producer_api_plugin"}) producerNodeId = 0 irrNodeId = 1 + nonProdNodeId = 2 producerNode = cluster.getNode(producerNodeId) irrNode = cluster.getNode(irrNodeId) + nonProdNode = cluster.getNode(nonProdNodeId) # Create delayed transaction to create "generated_transaction_object" cmd = "create account -j eosio sample EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV\ @@ -63,7 +65,7 @@ newProducerAcc = Account("newprod") newProducerAcc.ownerPublicKey = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" newProducerAcc.activePublicKey = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - producerNode.createAccount(newProducerAcc, cluster.eosioAccount, waitForTransBlock=True) + nonProdNode.createAccount(newProducerAcc, cluster.eosioAccount, waitForTransBlock=True) setProdsStr = '{"schedule": [' setProdsStr += '{"producer_name":' + newProducerAcc.name + ',"block_signing_key":' + newProducerAcc.activePublicKey + '}' diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index 7e6ab0368e..9963866766 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -139,7 +139,8 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running", "--wallet-port","--unshared"}) Utils.Debug=args.v -totalNodes=4 +prodNodes=4 +totalNodes=5 cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) dumpErrorDetails=args.dump_error_details prodCount=args.prod_count @@ -156,7 +157,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): cluster.setWalletMgr(walletMgr) Print("Stand up cluster") - if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21) is False: + if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=prodNodes, totalNodes=totalNodes, totalProducers=prodNodes*21) is False: Utils.cmdError("launcher") Utils.errorExit("Failed to stand up eos cluster.") @@ -182,13 +183,14 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8"))) + nonProdNode=cluster.getNode(4) for i in range(0, totalNodes): node=cluster.getNode(i) node.producers=Cluster.parseProducers(i) for prod in node.producers: - trans=node.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, - waitForTransBlock=True if prod == node.producers[-1] else False, - silentErrors=False if prod == node.producers[-1] else True, exitOnError=True) + trans=nonProdNode.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, + waitForTransBlock=True if prod == node.producers[-1] else False, + silentErrors=False if prod == node.producers[-1] else True, exitOnError=True) node0=cluster.getNode(0) node1=cluster.getNode(1) @@ -200,9 +202,9 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) for account in accounts: Print("Create new account %s via %s" % (account.name, cluster.eosioAccount.name)) - trans=node.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, - waitForTransBlock=True if account == accounts[-1] else False, - stakeNet=1000, stakeCPU=1000, buyRAM=1000, exitOnError=True) + trans=nonProdNode.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, + waitForTransBlock=True if account == accounts[-1] else False, + stakeNet=1000, stakeCPU=1000, buyRAM=1000, exitOnError=True) for account in accounts: Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name)) @@ -210,8 +212,8 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): waitForTransBlock=True if account == accounts[-1] else False) for account in accounts: - trans=node.delegatebw(account, 20000000.0000, 20000000.0000, - waitForTransBlock=True if account == accounts[-1] else False, exitOnError=True) + trans=nonProdNode.delegatebw(account, 20000000.0000, 20000000.0000, + waitForTransBlock=True if account == accounts[-1] else False, exitOnError=True) # containers for tracking producers prodsActive={} @@ -224,10 +226,10 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): #first account will vote for node0 producers, all others will vote for node1 producers node=node0 for account in accounts: - trans=node.vote(account, node.producers, waitForTransBlock=True) + trans=nonProdNode.vote(account, node.producers, waitForTransBlock=True if account == accounts[-1] else False) node=node1 - node.undelegatebw(account, 1.0000, 1.0000, waitForTransBlock=True, silentErrors=False, exitOnError=True) + nonProdNode.undelegatebw(account, 1.0000, 1.0000, waitForTransBlock=True, silentErrors=False, exitOnError=True) setActiveProducers(prodsActive, node1.producers) @@ -237,7 +239,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): # first account will vote for node2 producers, all others will vote for node3 producers node1 for account in accounts: - trans=node.vote(account, node.producers, waitForTransBlock=True) + trans=nonProdNode.vote(account, node.producers, waitForTransBlock=True if account == accounts[-1] else False) node=node2 setActiveProducers(prodsActive, node2.producers) From 6265cdbf06975d0753a41eae3800e5d618d3af9f Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 13 Jun 2023 07:46:05 -0500 Subject: [PATCH 060/191] Clone leap to have src avail for build tree test. --- .github/workflows/build.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index be79e41c0f..1ed1eee2e1 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -78,6 +78,7 @@ jobs: cmake -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -GNinja cmake --build build tar -pc --exclude "*.o" build | zstd --long -T0 -9 > build.tar.zst + ls -l - name: Upload builddir uses: AntelopeIO/upload-artifact-large-chunks-action@v1 with: @@ -313,6 +314,10 @@ jobs: runs-on: ["self-hosted", "enf-x86-beefy"] container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} steps: + - name: Clone leap + uses: actions/checkout@v3 + with: + submodules: recursive - name: Download leap builddir uses: actions/download-artifact@v3 with: From 3d1b726450c765bf51aebab528c4a420f9654301 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 13 Jun 2023 08:09:47 -0500 Subject: [PATCH 061/191] Try using clone and builddir combo for dev-install variant. --- .github/workflows/build.yaml | 42 +++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 1ed1eee2e1..f79a99cd8e 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -258,31 +258,26 @@ jobs: runs-on: ["self-hosted", "enf-x86-beefy"] container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} steps: - - name: checkout leap + - name: Clone leap uses: actions/checkout@v3 with: submodules: recursive - path: leap - - name: checkout reference-contracts - uses: actions/checkout@v3 + - name: Download leap builddir + uses: actions/download-artifact@v3 with: - repository: AntelopeIO/reference-contracts - path: reference-contracts - - name: Check directory structure + name: ${{matrix.platform}}-build + - name: Extract leap build run: | pwd ls -l - - name: leap build and make dev-install + zstdcat build.tar.zst | tar x + ls -l + - name: leap make dev-install run: | - # https://github.com/actions/runner/issues/2033 - chown -R $(id -u):$(id -g) $PWD - cd leap - cmake -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -GNinja - cmake --build build cd build ninja install ninja dev-install - cd ../.. + cd .. - name: Download cdt uses: AntelopeIO/asset-artifact-download-action@v2 with: @@ -292,15 +287,28 @@ jobs: target: main artifact-name: cdt_ubuntu_package_amd64 token: ${{github.token}} + path: cdt - name: Install cdt Packages run: | + ls -l + cd cdt + ls -l apt install -y ./*.deb rm ./*.deb + - name: checkout reference-contracts + uses: actions/checkout@v3 + with: + repository: AntelopeIO/reference-contracts + path: reference-contracts + - name: Check directory structure + run: | + pwd + ls -l - name: Build & Test reference-contracts run: | - cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On - cmake --build build -- -j $(nproc) - cd build/tests + cmake -S reference-contracts -B reference-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On + cmake --build reference-contracts/build -- -j $(nproc) + cd reference-contracts/build/tests ctest --output-on-failure -j $(nproc) libtester-build-tree-test: From 70d7fa042204c96c505869850e578b4e2605065a Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 13 Jun 2023 10:18:34 -0500 Subject: [PATCH 062/191] Update dependency now that it is using the artifact. --- .github/workflows/build.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index f79a99cd8e..3217b7f7bf 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -249,8 +249,8 @@ jobs: libtester-make-dev-install-test: name: libtester make dev-install test - needs: [d] - if: always() && needs.d.result == 'success' + needs: [d, Build] + if: always() && needs.Build.result == 'success' strategy: fail-fast: false matrix: From 1b9a4a19785b8acd875621970dfde8ed80dcaf3a Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 13 Jun 2023 10:35:06 -0500 Subject: [PATCH 063/191] Fix path not supported for artifact download action. Introduce removal of leap artifacts after install. --- .github/workflows/build.yaml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 3217b7f7bf..b827bd217f 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -278,6 +278,11 @@ jobs: ninja install ninja dev-install cd .. + - name: Delete leap artifacts + run: | + ls -l + rm -r * + ls -l - name: Download cdt uses: AntelopeIO/asset-artifact-download-action@v2 with: @@ -287,11 +292,8 @@ jobs: target: main artifact-name: cdt_ubuntu_package_amd64 token: ${{github.token}} - path: cdt - name: Install cdt Packages run: | - ls -l - cd cdt ls -l apt install -y ./*.deb rm ./*.deb From 903a8d71e05903e197bc327e502d707065eff089 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 13 Jun 2023 11:18:18 -0500 Subject: [PATCH 064/191] Try with lowtier machines. --- .github/workflows/build.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index b827bd217f..53bc0faa21 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -255,7 +255,7 @@ jobs: fail-fast: false matrix: platform: [ubuntu20, ubuntu22] - runs-on: ["self-hosted", "enf-x86-beefy"] + runs-on: ["self-hosted", "enf-x86-lowtier"] # not sure if this warrants a better machine, but start here container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} steps: - name: Clone leap @@ -321,7 +321,7 @@ jobs: fail-fast: false matrix: platform: [ubuntu20, ubuntu22] - runs-on: ["self-hosted", "enf-x86-beefy"] + runs-on: ["self-hosted", "enf-x86-lowtier"] # not sure if this warrants a better machine, but start here container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} steps: - name: Clone leap From cb18aea20c912c2c526d9fdd63692bd8b6c55bd3 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 13 Jun 2023 11:46:13 -0500 Subject: [PATCH 065/191] Attempt to build up from ubuntu base images to check dependency installation in deb packages. --- .github/workflows/build.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 53bc0faa21..476fe6fed0 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -216,7 +216,8 @@ jobs: matrix: platform: [ubuntu20, ubuntu22] runs-on: ["self-hosted", "enf-x86-lowtier"] # not sure if this warrants a better machine, but start here - container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + # container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + container: ${{ matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} steps: - name: Download cdt uses: AntelopeIO/asset-artifact-download-action@v2 @@ -234,6 +235,7 @@ jobs: - name: Install cdt and leap-dev Packages run: | apt install -y ./*.deb + apt-get -y install cmake rm ./*.deb - name: checkout reference-contracts uses: actions/checkout@v3 From d5bc51732e5f806ee843c8bce0c207c762ad9655 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 13 Jun 2023 12:14:28 -0500 Subject: [PATCH 066/191] Run apt update and upgrade on base images before installing. --- .github/workflows/build.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 476fe6fed0..48744f17c1 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -234,8 +234,9 @@ jobs: name: leap-dev-${{matrix.platform}}-amd64 - name: Install cdt and leap-dev Packages run: | + apt update && apt upgrade -y apt install -y ./*.deb - apt-get -y install cmake + apt -y install cmake rm ./*.deb - name: checkout reference-contracts uses: actions/checkout@v3 From 213eff9341e814dc34d93fa1aa41cfc76d5210ac Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 13 Jun 2023 12:36:46 -0500 Subject: [PATCH 067/191] Move apt install step down to reference-contracts where it is required. Also install build-essential. --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 48744f17c1..8b6ba9285e 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -236,7 +236,6 @@ jobs: run: | apt update && apt upgrade -y apt install -y ./*.deb - apt -y install cmake rm ./*.deb - name: checkout reference-contracts uses: actions/checkout@v3 @@ -245,6 +244,7 @@ jobs: path: reference-contracts - name: Build & Test reference-contracts run: | + apt -y install cmake build-essential cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On cmake --build build -- -j $(nproc) cd build/tests From 6eeb12b0caf28ee4165d18932c4eb6ecdd05ff3d Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 13 Jun 2023 13:00:52 -0500 Subject: [PATCH 068/191] apt reported as not having stable CLI interface. Try using apt-get instead. --- .github/workflows/build.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 8b6ba9285e..5ee543bd96 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -234,8 +234,8 @@ jobs: name: leap-dev-${{matrix.platform}}-amd64 - name: Install cdt and leap-dev Packages run: | - apt update && apt upgrade -y - apt install -y ./*.deb + apt-get update && apt-get upgrade -y + apt-get install -y ./*.deb rm ./*.deb - name: checkout reference-contracts uses: actions/checkout@v3 @@ -244,7 +244,7 @@ jobs: path: reference-contracts - name: Build & Test reference-contracts run: | - apt -y install cmake build-essential + apt-get -y install cmake build-essential cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On cmake --build build -- -j $(nproc) cd build/tests From cb707a526c2e3754fc61f20805d0d1ba666119ef Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 13 Jun 2023 13:20:30 -0500 Subject: [PATCH 069/191] Don't upgrade, just update list. --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 5ee543bd96..2dd5b4a52b 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -234,7 +234,7 @@ jobs: name: leap-dev-${{matrix.platform}}-amd64 - name: Install cdt and leap-dev Packages run: | - apt-get update && apt-get upgrade -y + apt-get update apt-get install -y ./*.deb rm ./*.deb - name: checkout reference-contracts From 4fb6c05667b513503712eef276878ac66a78ac9f Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 13 Jun 2023 13:35:33 -0500 Subject: [PATCH 070/191] Set some env variables for install. --- .github/workflows/build.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 2dd5b4a52b..2b44532907 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -235,6 +235,8 @@ jobs: - name: Install cdt and leap-dev Packages run: | apt-get update + export DEBIAN_FRONTEND='noninteractive' + export TZ='Etc/UTC' apt-get install -y ./*.deb rm ./*.deb - name: checkout reference-contracts From 781bf34868ca758302eb08a0f15aeba9ea8654c6 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 13 Jun 2023 14:21:49 -0500 Subject: [PATCH 071/191] Up the runner for the dev-install test. --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 2b44532907..8e4383cd44 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -260,7 +260,7 @@ jobs: fail-fast: false matrix: platform: [ubuntu20, ubuntu22] - runs-on: ["self-hosted", "enf-x86-lowtier"] # not sure if this warrants a better machine, but start here + runs-on: ["self-hosted", "enf-x86-beefy"] # not sure if this warrants a different machine, but start here due to dev-install step container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} steps: - name: Clone leap From 1a2f38ded69e9c49bedeb3ab9d24e858445bfc10 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 13 Jun 2023 14:23:26 -0500 Subject: [PATCH 072/191] Begin cleanup of build.yaml --- .github/workflows/build.yaml | 30 +----------------------------- 1 file changed, 1 insertion(+), 29 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 8e4383cd44..ea3e66b74b 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -78,7 +78,6 @@ jobs: cmake -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -GNinja cmake --build build tar -pc --exclude "*.o" build | zstd --long -T0 -9 > build.tar.zst - ls -l - name: Upload builddir uses: AntelopeIO/upload-artifact-large-chunks-action@v1 with: @@ -216,7 +215,6 @@ jobs: matrix: platform: [ubuntu20, ubuntu22] runs-on: ["self-hosted", "enf-x86-lowtier"] # not sure if this warrants a better machine, but start here - # container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} container: ${{ matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} steps: - name: Download cdt @@ -273,10 +271,7 @@ jobs: name: ${{matrix.platform}}-build - name: Extract leap build run: | - pwd - ls -l zstdcat build.tar.zst | tar x - ls -l - name: leap make dev-install run: | cd build @@ -285,9 +280,7 @@ jobs: cd .. - name: Delete leap artifacts run: | - ls -l rm -r * - ls -l - name: Download cdt uses: AntelopeIO/asset-artifact-download-action@v2 with: @@ -299,7 +292,6 @@ jobs: token: ${{github.token}} - name: Install cdt Packages run: | - ls -l apt install -y ./*.deb rm ./*.deb - name: checkout reference-contracts @@ -307,10 +299,6 @@ jobs: with: repository: AntelopeIO/reference-contracts path: reference-contracts - - name: Check directory structure - run: | - pwd - ls -l - name: Build & Test reference-contracts run: | cmake -S reference-contracts -B reference-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On @@ -342,23 +330,11 @@ jobs: with: repository: AntelopeIO/reference-contracts path: reference-contracts - - name: Check directory structure - run: | - pwd - ls -l - echo github workspace: ${{ github.workspace }} - echo pwd = $PWD - name: Extract leap build run: | - pwd - ls -l zstdcat build.tar.zst | tar x - ls -l echo "leap_DIR=$PWD/build/lib/cmake/leap" >> "$GITHUB_ENV" echo "LEAP_BUILD_DIR=$PWD/build" >> "$GITHUB_ENV" - pwd - echo leap_DIR = $leap_DIR - echo LEAP_BUILD_DIR = $LEAP_BUILD_DIR - name: Download cdt uses: AntelopeIO/asset-artifact-download-action@v2 with: @@ -374,11 +350,7 @@ jobs: rm ./*.deb - name: Build & Test reference-contracts run: | - export LEAP_BUILD_DIR="${{ env.LEAP_BUILD_DIR }}" - echo LEAP_BUILD_DIR = $LEAP_BUILD_DIR - echo leap_DIR = "${{ env.leap_DIR }}" - ls $leap_DIR - cmake -S reference-contracts -B reference-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dleap_DIR="${{ env.leap_DIR }}" -DCMAKE_PREFIX_PATH=$LEAP_BUILD_DIR + cmake -S reference-contracts -B reference-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dleap_DIR="${{ env.leap_DIR }}" -DCMAKE_PREFIX_PATH="${{ env.LEAP_BUILD_DIR }}" cmake --build reference-contracts/build -- -j $(nproc) cd reference-contracts/build/tests ctest --output-on-failure -j $(nproc) From 6ef42555d85a27448e4f24372242dd0ea8c4c02c Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 13 Jun 2023 16:18:01 -0500 Subject: [PATCH 073/191] Use midtier runner instead of beefy. --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index ea3e66b74b..c61ef531e4 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -258,7 +258,7 @@ jobs: fail-fast: false matrix: platform: [ubuntu20, ubuntu22] - runs-on: ["self-hosted", "enf-x86-beefy"] # not sure if this warrants a different machine, but start here due to dev-install step + runs-on: ["self-hosted", "enf-x86-midtier"] # not sure if this warrants a different machine, but start here due to dev-install step container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} steps: - name: Clone leap From da2e36fd8db47ce2ccb7ed2ae1da77614b8703de Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 13 Jun 2023 17:02:00 -0500 Subject: [PATCH 074/191] midtier seems to be failing, up it to hightier and try. Fix documentation. --- .github/workflows/build.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index c61ef531e4..1a1250c062 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -258,7 +258,7 @@ jobs: fail-fast: false matrix: platform: [ubuntu20, ubuntu22] - runs-on: ["self-hosted", "enf-x86-midtier"] # not sure if this warrants a different machine, but start here due to dev-install step + runs-on: ["self-hosted", "enf-x86-hightier"] # not sure if this warrants a different machine, but start here due to dev-install step container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} steps: - name: Clone leap @@ -272,7 +272,7 @@ jobs: - name: Extract leap build run: | zstdcat build.tar.zst | tar x - - name: leap make dev-install + - name: leap dev-install run: | cd build ninja install From f069ef42df2860ea749e796d97ccc6c693c18ee5 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 08:39:32 -0500 Subject: [PATCH 075/191] Try to install from cmake. --- .github/workflows/build.yaml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 1a1250c062..ca11ebae8e 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -274,10 +274,8 @@ jobs: zstdcat build.tar.zst | tar x - name: leap dev-install run: | - cd build - ninja install - ninja dev-install - cd .. + cmake --install build + cmake --install build --component dev - name: Delete leap artifacts run: | rm -r * From 4ec1aba9a6ee006e9360fceaf64045b62aae3c68 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 08:54:21 -0500 Subject: [PATCH 076/191] Probably doesn't need hightier any longer. Move down to lowtier. --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index ca11ebae8e..9498651839 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -258,7 +258,7 @@ jobs: fail-fast: false matrix: platform: [ubuntu20, ubuntu22] - runs-on: ["self-hosted", "enf-x86-hightier"] # not sure if this warrants a different machine, but start here due to dev-install step + runs-on: ["self-hosted", "enf-x86-lowtier"] # not sure if this warrants a better machine, but start here container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} steps: - name: Clone leap From 0dbfb1a6edd3eb9ff93ad108493a415ba357dfc9 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 11:26:08 -0500 Subject: [PATCH 077/191] Creating a shell of the initial pinned_build.yaml workflow to be merged quickly allowing development of actual workflow in follow on branch. Need to have workflow file merged before github will allow triggering it in a development branch. --- .github/workflows/pinned_build.yaml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 .github/workflows/pinned_build.yaml diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml new file mode 100644 index 0000000000..78124fe5c6 --- /dev/null +++ b/.github/workflows/pinned_build.yaml @@ -0,0 +1,28 @@ +name: "Pinned Build" + +on: + workflow_dispatch: + +permissions: + packages: read + contents: read + +defaults: + run: + shell: bash + +jobs: + d: + name: Discover Platforms + runs-on: ubuntu-latest + outputs: + missing-platforms: ${{steps.discover.outputs.missing-platforms}} + p: ${{steps.discover.outputs.platforms}} + steps: + - name: Discover Platforms + id: discover + uses: AntelopeIO/discover-platforms-action@v1 + with: + platform-file: .cicd/platforms.json + password: ${{secrets.GITHUB_TOKEN}} + package-name: builders From a886743005619c0ae0f3c0fe0d214201aaba6210 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 11:44:53 -0500 Subject: [PATCH 078/191] Initial work to run pinned_build script on base images. Build package and upload artifacts. --- .github/workflows/pinned_build.yaml | 42 +++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 78124fe5c6..bfb37faa24 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -26,3 +26,45 @@ jobs: platform-file: .cicd/platforms.json password: ${{secrets.GITHUB_TOKEN}} package-name: builders + + Build: + name: Build + needs: [d] + if: always() && needs.d.result == 'success' + strategy: + fail-fast: false + matrix: + platform: [ubuntu18, ubuntu20, ubuntu22] + runs-on: ["self-hosted", "enf-x86-beefy"] # not sure if this warrants a different machine, but start here due to building + container: ${{ matrix.platform == 'ubuntu18' && 'ubuntu:bionic' || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} + steps: + - name: Update and Install git + run: | + apt-get update + apt-get install -y git + git --version + - name: Clone leap + uses: actions/checkout@v3 + with: + submodules: recursive + - name: Install dependencies + run: | + # https://github.com/actions/runner/issues/2033 + chown -R $(id -u):$(id -g) $PWD + ./scripts/install_deps.sh + - name: Build Pinned Build + run: | + ./scripts/pinned_build.sh deps build "$(nproc)" + cd build + cpack + ls -l + cd .. + - name: Upload package + uses: actions/upload-artifact@v3 + with: + name: leap-${{matrix.platform}}-amd64 + path: build/leap*.deb + - name: Run Parallel Tests + run: | + cd build + ctest --output-on-failure -j $(nproc) -LE "(nonparallelizable_tests|long_running_tests)" --timeout 420 From da6a0686390f1ac773b7eee833c795aba343e118 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 12:03:27 -0500 Subject: [PATCH 079/191] Script already runs cpack, remove. Break artifact check into own step for easier removal in future. --- .github/workflows/pinned_build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index bfb37faa24..1735844bc0 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -55,8 +55,8 @@ jobs: - name: Build Pinned Build run: | ./scripts/pinned_build.sh deps build "$(nproc)" + - name: Check artifacts # Note: This step is for testing and debugging and can be removed in the future. cd build - cpack ls -l cd .. - name: Upload package From 9047d1175b29d4108b66cd6d64a1e397642139c5 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 12:05:12 -0500 Subject: [PATCH 080/191] For now run on github runner. --- .github/workflows/pinned_build.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 1735844bc0..d9211b485d 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -35,7 +35,8 @@ jobs: fail-fast: false matrix: platform: [ubuntu18, ubuntu20, ubuntu22] - runs-on: ["self-hosted", "enf-x86-beefy"] # not sure if this warrants a different machine, but start here due to building + # runs-on: ["self-hosted", "enf-x86-beefy"] # not sure if this warrants a different machine, but start here due to building + runs-on: ubuntu-latest container: ${{ matrix.platform == 'ubuntu18' && 'ubuntu:bionic' || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} steps: - name: Update and Install git From 7c106605cc4ac138c3604c9fc659913a9e28cb4c Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 12:16:11 -0500 Subject: [PATCH 081/191] Not using cached build environments so removing this job. --- .github/workflows/pinned_build.yaml | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index d9211b485d..4f7b296373 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -12,25 +12,8 @@ defaults: shell: bash jobs: - d: - name: Discover Platforms - runs-on: ubuntu-latest - outputs: - missing-platforms: ${{steps.discover.outputs.missing-platforms}} - p: ${{steps.discover.outputs.platforms}} - steps: - - name: Discover Platforms - id: discover - uses: AntelopeIO/discover-platforms-action@v1 - with: - platform-file: .cicd/platforms.json - password: ${{secrets.GITHUB_TOKEN}} - package-name: builders - Build: name: Build - needs: [d] - if: always() && needs.d.result == 'success' strategy: fail-fast: false matrix: From 5523010cfd11f3a3ebf1a6767abfe4a430a52c66 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 13:00:00 -0500 Subject: [PATCH 082/191] Fix syntax and forgotten run command. --- .github/workflows/pinned_build.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 4f7b296373..9cc644abc2 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -40,6 +40,7 @@ jobs: run: | ./scripts/pinned_build.sh deps build "$(nproc)" - name: Check artifacts # Note: This step is for testing and debugging and can be removed in the future. + run: | cd build ls -l cd .. From 74fb1615d8dfd97d2ca0ece3b7c8f90b59293855 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 13:02:32 -0500 Subject: [PATCH 083/191] Add pinned to artifact name and don't upload dev artifact by filtering out in regex. --- .github/workflows/pinned_build.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 9cc644abc2..509035ca5a 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -47,8 +47,9 @@ jobs: - name: Upload package uses: actions/upload-artifact@v3 with: - name: leap-${{matrix.platform}}-amd64 - path: build/leap*.deb + name: leap-${{matrix.platform}}-pinned-amd64 + # Skip the leap-dev artifact + path: build/leap-[^d]+*.deb - name: Run Parallel Tests run: | cd build From 7b7027cbee8fd589a2e694a06f9061e92787a23b Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 13:49:33 -0500 Subject: [PATCH 084/191] Not using cached build environments so removing this job. --- .github/workflows/pinned_build.yaml | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 78124fe5c6..4d951aa34f 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -12,17 +12,3 @@ defaults: shell: bash jobs: - d: - name: Discover Platforms - runs-on: ubuntu-latest - outputs: - missing-platforms: ${{steps.discover.outputs.missing-platforms}} - p: ${{steps.discover.outputs.platforms}} - steps: - - name: Discover Platforms - id: discover - uses: AntelopeIO/discover-platforms-action@v1 - with: - platform-file: .cicd/platforms.json - password: ${{secrets.GITHUB_TOKEN}} - package-name: builders From e4b28a7854e52ed6aca5e9c61d4b6d0929a4ce7e Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 13:54:13 -0500 Subject: [PATCH 085/191] Remove empty jobs section. --- .github/workflows/pinned_build.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 4d951aa34f..e1ddfcd2b5 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -10,5 +10,3 @@ permissions: defaults: run: shell: bash - -jobs: From 39a5ba846a24169558056504dc5570a1dcccc400 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 13:57:00 -0500 Subject: [PATCH 086/191] Have to have a job for github. Add temp job. --- .github/workflows/pinned_build.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index e1ddfcd2b5..637dcec161 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -10,3 +10,12 @@ permissions: defaults: run: shell: bash + +jobs: + Temp: + name: temp + runs-on: ubuntu-latest + steps: + - name: test + run: | + pwd From aa5ef2a6688f39e291c06869a31716c6d4301cfe Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 14:08:50 -0500 Subject: [PATCH 087/191] Try single method of discovering leap dir. Use environment variable for leap_DIR. Remove use of cache variables. --- .github/workflows/build.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 9498651839..e09275aaa2 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -348,7 +348,8 @@ jobs: rm ./*.deb - name: Build & Test reference-contracts run: | - cmake -S reference-contracts -B reference-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -Dleap_DIR="${{ env.leap_DIR }}" -DCMAKE_PREFIX_PATH="${{ env.LEAP_BUILD_DIR }}" + export leap_DIR="${{ env.leap_DIR }}" + cmake -S reference-contracts -B reference-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On cmake --build reference-contracts/build -- -j $(nproc) cd reference-contracts/build/tests ctest --output-on-failure -j $(nproc) From 20f45e59230c23be5fddca470904179182a92829 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 15:27:18 -0500 Subject: [PATCH 088/191] Add conditional step to add updated apt repository for git. Ubuntu 18.04 installs git 2.17.1, but need 2.18+ for git submodules to work. --- .github/workflows/pinned_build.yaml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 509035ca5a..53d48a0dfe 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -22,10 +22,14 @@ jobs: runs-on: ubuntu-latest container: ${{ matrix.platform == 'ubuntu18' && 'ubuntu:bionic' || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} steps: + - name: Conditionally update git repo + if: ${{ matrix.platform == "ubuntu18" }} + run: | + add-apt-repository ppa:git-core/ppa - name: Update and Install git run: | - apt-get update - apt-get install -y git + apt update + apt install -y git git --version - name: Clone leap uses: actions/checkout@v3 From f4b39ea29eaffa3256bb87e1426428f7b416977c Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 15:30:35 -0500 Subject: [PATCH 089/191] Use single quotes. --- .github/workflows/pinned_build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 53d48a0dfe..f65162d8b6 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -23,7 +23,7 @@ jobs: container: ${{ matrix.platform == 'ubuntu18' && 'ubuntu:bionic' || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} steps: - name: Conditionally update git repo - if: ${{ matrix.platform == "ubuntu18" }} + if: ${{ matrix.platform == 'ubuntu18' }} run: | add-apt-repository ppa:git-core/ppa - name: Update and Install git From 78dd785035ef12c153495ce18cb39580cc02f7e1 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 15:34:06 -0500 Subject: [PATCH 090/191] Need software-properties-common and a apt-get update to get add-apt-repository to work. --- .github/workflows/pinned_build.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index f65162d8b6..691f8fe8e7 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -25,11 +25,13 @@ jobs: - name: Conditionally update git repo if: ${{ matrix.platform == 'ubuntu18' }} run: | + apt-get install software-properties-common + apt-get update add-apt-repository ppa:git-core/ppa - name: Update and Install git run: | - apt update - apt install -y git + apt-get update + apt-get install -y git git --version - name: Clone leap uses: actions/checkout@v3 From 32de87b1853645c3714ed240d34fb072c381a1e5 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 15:35:28 -0500 Subject: [PATCH 091/191] Update package lists first. --- .github/workflows/pinned_build.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 691f8fe8e7..cec6d38eb0 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -25,6 +25,7 @@ jobs: - name: Conditionally update git repo if: ${{ matrix.platform == 'ubuntu18' }} run: | + apt-get update apt-get install software-properties-common apt-get update add-apt-repository ppa:git-core/ppa From feeee5eb8fe2afb11cfaff5a38367a37945d99b4 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 15:37:38 -0500 Subject: [PATCH 092/191] Say yes. --- .github/workflows/pinned_build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index cec6d38eb0..4709fb0c32 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -26,7 +26,7 @@ jobs: if: ${{ matrix.platform == 'ubuntu18' }} run: | apt-get update - apt-get install software-properties-common + apt-get install -y software-properties-common apt-get update add-apt-repository ppa:git-core/ppa - name: Update and Install git From 9c0c1aa906ce2a7ae57652e56c1878abfbcd5c89 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 15:42:15 -0500 Subject: [PATCH 093/191] See if setting leap_DIR just once on GITHUB_ENV will suffice. Remove LEAP_BUILD_DIR as it is not used any longer. --- .github/workflows/build.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index e09275aaa2..ebcffc6487 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -332,7 +332,6 @@ jobs: run: | zstdcat build.tar.zst | tar x echo "leap_DIR=$PWD/build/lib/cmake/leap" >> "$GITHUB_ENV" - echo "LEAP_BUILD_DIR=$PWD/build" >> "$GITHUB_ENV" - name: Download cdt uses: AntelopeIO/asset-artifact-download-action@v2 with: @@ -348,7 +347,6 @@ jobs: rm ./*.deb - name: Build & Test reference-contracts run: | - export leap_DIR="${{ env.leap_DIR }}" cmake -S reference-contracts -B reference-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On cmake --build reference-contracts/build -- -j $(nproc) cd reference-contracts/build/tests From 449f37133b077ad2df745a15b695f0bcedbe496b Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 14 Jun 2023 16:44:42 -0500 Subject: [PATCH 094/191] Skip wildcards in upload-artifact and capture artifact name with regex grep and store in env variable. --- .github/workflows/pinned_build.yaml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 4709fb0c32..2b2e7edf19 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -46,17 +46,16 @@ jobs: - name: Build Pinned Build run: | ./scripts/pinned_build.sh deps build "$(nproc)" - - name: Check artifacts # Note: This step is for testing and debugging and can be removed in the future. + - name: Check and select artifact run: | cd build - ls -l + echo "PINNED_BUILD_ARTIFACT="$(ls | grep -E 'leap-[^d]+*.deb')"" >> "$GIHUB_ENV" cd .. - name: Upload package uses: actions/upload-artifact@v3 with: name: leap-${{matrix.platform}}-pinned-amd64 - # Skip the leap-dev artifact - path: build/leap-[^d]+*.deb + path: build/"${{ env.PINNED_BUILD_ARTIFACT }}" - name: Run Parallel Tests run: | cd build From 83c0bfc6afc5ed0a5659ce7799fbe55324dd274e Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Wed, 14 Jun 2023 23:55:47 -0500 Subject: [PATCH 095/191] Adapt cluster create account to use non-producing node if available. Add non-producing nodes to several tests to allow transaction retry. Adjust individual tests to use non-producing nodes for trx retry. --- tests/CMakeLists.txt | 4 ++-- tests/TestHarness/Cluster.py | 20 ++++++++++++-------- tests/TestHarness/Node.py | 2 ++ tests/compute_transaction_test.py | 2 +- tests/nodeos_contrl_c_test.py | 2 +- tests/nodeos_extra_packed_data_test.py | 6 ++++-- tests/nodeos_run_test.py | 13 +++++++------ tests/nodeos_snapshot_diff_test.py | 2 +- 8 files changed, 30 insertions(+), 21 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index e1b4ccb92d..40c32c9a7a 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -136,7 +136,7 @@ add_test(NAME read-only-trx-parallel-eos-vm-oc-test COMMAND tests/read_only_trx_ set_property(TEST read-only-trx-parallel-eos-vm-oc-test PROPERTY LABELS nonparallelizable_tests) add_test(NAME subjective_billing_test COMMAND tests/subjective_billing_test.py -v -p 2 -n 4 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST subjective_billing_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME get_account_test COMMAND tests/get_account_test.py -v -p 2 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME get_account_test COMMAND tests/get_account_test.py -v -p 2 -n 3 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST get_account_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 4 -n 6 -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -175,7 +175,7 @@ set_tests_properties(db_modes_test PROPERTIES COST 6000) add_test(NAME release-build-test COMMAND tests/release-build.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME version-label-test COMMAND tests/version-label.sh "v${VERSION_FULL}" WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME full-version-label-test COMMAND tests/full-version-label.sh "v${VERSION_FULL}" ${CMAKE_SOURCE_DIR} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME nested_container_multi_index_test COMMAND tests/nested_container_multi_index_test.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nested_container_multi_index_test COMMAND tests/nested_container_multi_index_test.py -n 2 WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nested_container_multi_index_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME nodeos_run_check_test COMMAND tests/nodeos_run_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index b46f0120b6..3c71722ca6 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -483,11 +483,13 @@ def connectGroup(group, producerNodes, bridgeNodes) : self.unstartedNodes.append(node) time.sleep(delay) - startedNodes=totalNodes-unstartedNodes + self.startedNodesCount = totalNodes - unstartedNodes + self.productionNodesCount = pnodes + self.totalNodesCount = totalNodes - if self.nodes is None or startedNodes != len(self.nodes): + if self.nodes is None or self.startedNodesCount != len(self.nodes): Utils.Print("ERROR: Unable to validate %s instances, expected: %d, actual: %d" % - (Utils.EosServerName, startedNodes, len(self.nodes))) + (Utils.EosServerName, self.startedNodesCount, len(self.nodes))) return False if not self.biosNode or not Utils.waitForBool(self.biosNode.checkPulse, Utils.systemWaitTimeout): @@ -512,7 +514,7 @@ def connectGroup(group, producerNodes, bridgeNodes) : return True Utils.Print("Bootstrap cluster.") - if not self.bootstrap(self.biosNode, startedNodes, prodCount + sharedProducers, totalProducers, pfSetupPolicy, onlyBios, onlySetProds, loadSystemContract): + if not self.bootstrap(self.biosNode, self.startedNodesCount, prodCount + sharedProducers, totalProducers, pfSetupPolicy, onlyBios, onlySetProds, loadSystemContract): Utils.Print("ERROR: Bootstrap failed.") return False @@ -872,11 +874,13 @@ def validateAccounts(self, accounts, testSysAccounts=True): node.validateAccounts(myAccounts) - def createAccountAndVerify(self, account, creator, stakedDeposit=1000, stakeNet=100, stakeCPU=100, buyRAM=10000, validationNodeIndex=0): + def createAccountAndVerify(self, account, creator, stakedDeposit=1000, stakeNet=100, stakeCPU=100, buyRAM=10000, validationNodeIndex=-1): """create account, verify account and return transaction id""" - assert(len(self.nodes) > validationNodeIndex) node=self.nodes[validationNodeIndex] - trans=node.createInitializeAccount(account, creator, stakedDeposit, waitForTransBlock=True, stakeNet=stakeNet, stakeCPU=stakeCPU, buyRAM=buyRAM, exitOnError=True) + waitViaRetry = self.totalNodesCount > self.productionNodesCount + trans=node.createInitializeAccount(account, creator, stakedDeposit, waitForTransBlock=waitViaRetry, stakeNet=stakeNet, stakeCPU=stakeCPU, buyRAM=buyRAM, exitOnError=True) + if not waitViaRetry: + node.waitForTransBlockIfNeeded(trans, True, exitOnError=True) assert(node.verifyAccount(account)) return trans @@ -1379,7 +1383,7 @@ def cleanup(self): os.remove(f) # Create accounts, if account does not already exist, and validates that the last transaction is received on root node - def createAccounts(self, creator, waitForTransBlock=True, stakedDeposit=1000, validationNodeIndex=0): + def createAccounts(self, creator, waitForTransBlock=True, stakedDeposit=1000, validationNodeIndex=-1): if self.accounts is None: return True transId=None diff --git a/tests/TestHarness/Node.py b/tests/TestHarness/Node.py index 8ae9da0633..1487972315 100644 --- a/tests/TestHarness/Node.py +++ b/tests/TestHarness/Node.py @@ -65,6 +65,7 @@ def __init__(self, host, port, nodeId: int, data_dir: Path, config_dir: Path, cm self.data_dir=data_dir self.config_dir=config_dir self.launch_time=launch_time + self.isProducer=False self.configureVersion() def configureVersion(self): @@ -447,6 +448,7 @@ def launchCmd(self, cmd: List[str], data_dir: Path, launch_time: str): popen.errfile = serr self.pid = popen.pid self.cmd = cmd + self.isProducer = '--producer-name' in self.cmd with pidf.open('w') as pidout: pidout.write(str(popen.pid)) try: diff --git a/tests/compute_transaction_test.py b/tests/compute_transaction_test.py index 7f8d234660..e257c3fb37 100755 --- a/tests/compute_transaction_test.py +++ b/tests/compute_transaction_test.py @@ -86,7 +86,7 @@ transferAmount="1000.0000 {0}".format(CORE_SYMBOL) - node.transferFunds(cluster.eosioAccount, account1, transferAmount, "fund account", waitForTransBlock=True) + npnode.transferFunds(cluster.eosioAccount, account1, transferAmount, "fund account", waitForTransBlock=True) preBalances = node.getEosBalances([account1, account2]) Print("Starting balances:") Print(preBalances) diff --git a/tests/nodeos_contrl_c_test.py b/tests/nodeos_contrl_c_test.py index c6bc9baf74..71e7ec8b3a 100755 --- a/tests/nodeos_contrl_c_test.py +++ b/tests/nodeos_contrl_c_test.py @@ -59,7 +59,7 @@ cluster.validateAccounts(None) prodNode = cluster.getNode(0) - nonProdNode = cluster.getNode(1) + nonProdNode = cluster.getNode(2) accounts=createAccountKeys(2) if accounts is None: diff --git a/tests/nodeos_extra_packed_data_test.py b/tests/nodeos_extra_packed_data_test.py index c9071c2fa1..3d38aa0691 100755 --- a/tests/nodeos_extra_packed_data_test.py +++ b/tests/nodeos_extra_packed_data_test.py @@ -29,6 +29,7 @@ dumpErrorDetails=args.dump_error_details dontLaunch=args.dont_launch pnodes=args.p +totalNodes=pnodes+1 sanityTest=args.sanity_test walletPort=args.wallet_port @@ -65,7 +66,7 @@ if pnodes > 3: specificExtraNodeosArgs[pnodes - 2] = "" - if cluster.launch(totalNodes=pnodes, + if cluster.launch(totalNodes=totalNodes, pnodes=pnodes, dontBootstrap=dontBootstrap, pfSetupPolicy=PFSetupPolicy.PREACTIVATE_FEATURE_ONLY, @@ -114,12 +115,13 @@ errorExit("Failed to import key for account %s" % (account.name)) node=cluster.getNode(0) + nonProdNode=cluster.getAllNodes()[-1] Print("Create new account %s via %s" % (testeraAccount.name, cluster.defproduceraAccount.name)) transId=node.createInitializeAccount(testeraAccount, cluster.defproduceraAccount, stakedDeposit=0, waitForTransBlock=False, exitOnError=True) Print("Create new account %s via %s" % (testerbAccount.name, cluster.defproduceraAccount.name)) - transId=node.createInitializeAccount(testerbAccount, cluster.defproduceraAccount, stakedDeposit=0, waitForTransBlock=True, exitOnError=True) + transId=nonProdNode.createInitializeAccount(testerbAccount, cluster.defproduceraAccount, stakedDeposit=0, waitForTransBlock=True, exitOnError=True) Print("Validating accounts after user accounts creation") accounts=[testeraAccount, testerbAccount] diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index 80346b9ae0..48e72a202b 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -62,7 +62,7 @@ abs_path = os.path.abspath(os.getcwd() + '/unittests/contracts/eosio.token/eosio.token.abi') traceNodeosArgs=" --http-max-response-time-ms 990000 --trace-rpc-abi eosio.token=" + abs_path specificNodeosInstances={0: "bin/nodeos"} - if cluster.launch(prodCount=prodCount, onlyBios=onlyBios, dontBootstrap=dontBootstrap, extraNodeosArgs=traceNodeosArgs, specificNodeosInstances=specificNodeosInstances) is False: + if cluster.launch(totalNodes=2, prodCount=prodCount, onlyBios=onlyBios, dontBootstrap=dontBootstrap, extraNodeosArgs=traceNodeosArgs, specificNodeosInstances=specificNodeosInstances) is False: cmdError("launcher") errorExit("Failed to stand up eos cluster.") else: @@ -195,13 +195,13 @@ if len(noMatch) > 0: errorExit("FAILURE - wallet keys did not include %s" % (noMatch), raw=True) - node=cluster.getNode(0) + node=cluster.getNode(1) Print("Validating accounts before user accounts creation") cluster.validateAccounts(None) Print("Create new account %s via %s" % (testeraAccount.name, cluster.defproduceraAccount.name)) - transId=node.createInitializeAccount(testeraAccount, cluster.defproduceraAccount, stakedDeposit=0, waitForTransBlock=False, exitOnError=True) + transId=node.createInitializeAccount(testeraAccount, cluster.defproduceraAccount, stakedDeposit=0, waitForTransBlock=True, exitOnError=True) Print("Create new account %s via %s" % (testerbAccount.name, cluster.defproduceraAccount.name)) transId=node.createInitializeAccount(testerbAccount, cluster.defproduceraAccount, stakedDeposit=0, waitForTransBlock=False, exitOnError=True) @@ -222,7 +222,7 @@ transferAmount="97.5321 {0}".format(CORE_SYMBOL) Print("Transfer funds %s from account %s to %s" % (transferAmount, defproduceraAccount.name, testeraAccount.name)) - node.transferFunds(defproduceraAccount, testeraAccount, transferAmount, "test transfer") + node.transferFunds(defproduceraAccount, testeraAccount, transferAmount, "test transfer", waitForTransBlock=True) expectedAmount=transferAmount Print("Verify transfer, Expected: %s" % (expectedAmount)) @@ -234,7 +234,7 @@ transferAmount="0.0100 {0}".format(CORE_SYMBOL) Print("Force transfer funds %s from account %s to %s" % ( transferAmount, defproduceraAccount.name, testeraAccount.name)) - node.transferFunds(defproduceraAccount, testeraAccount, transferAmount, "test transfer", force=True) + node.transferFunds(defproduceraAccount, testeraAccount, transferAmount, "test transfer", force=True, waitForTransBlock=True) expectedAmount="97.5421 {0}".format(CORE_SYMBOL) Print("Verify transfer, Expected: %s" % (expectedAmount)) @@ -260,7 +260,7 @@ transferAmount="97.5311 {0}".format(CORE_SYMBOL) Print("Transfer funds %s from account %s to %s" % ( transferAmount, testeraAccount.name, currencyAccount.name)) - trans=node.transferFunds(testeraAccount, currencyAccount, transferAmount, "test transfer a->b") + trans=node.transferFunds(testeraAccount, currencyAccount, transferAmount, "test transfer a->b", waitForTransBlock=True) transId=Node.getTransId(trans) expectedAmount="98.0311 {0}".format(CORE_SYMBOL) # 5000 initial deposit @@ -293,6 +293,7 @@ Print("Currency Contract Tests") Print("verify no contract in place") Print("Get code hash for account %s" % (currencyAccount.name)) + node=cluster.getNode(0) codeHash=node.getAccountCodeHash(currencyAccount.name) if codeHash is None: cmdError("%s get code currency1111" % (ClientName)) diff --git a/tests/nodeos_snapshot_diff_test.py b/tests/nodeos_snapshot_diff_test.py index c302c0228c..4e0624030c 100755 --- a/tests/nodeos_snapshot_diff_test.py +++ b/tests/nodeos_snapshot_diff_test.py @@ -85,7 +85,7 @@ def removeState(nodeId): cluster.populateWallet(2, wallet) Print("Create test accounts for transactions.") - cluster.createAccounts(cluster.eosioAccount, stakedDeposit=0, validationNodeIndex=0) + cluster.createAccounts(cluster.eosioAccount, stakedDeposit=0) account1Name = cluster.accounts[0].name account2Name = cluster.accounts[1].name From 31fd1361aa6328de242dee6edb90397a2cb31efb Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 15 Jun 2023 08:00:00 -0500 Subject: [PATCH 096/191] Something is not matching up with expected dir or file structure. --- .github/workflows/pinned_build.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 2b2e7edf19..5eed53e7ad 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -46,8 +46,12 @@ jobs: - name: Build Pinned Build run: | ./scripts/pinned_build.sh deps build "$(nproc)" + ls -l + pwd - name: Check and select artifact run: | + pwd + ls -l cd build echo "PINNED_BUILD_ARTIFACT="$(ls | grep -E 'leap-[^d]+*.deb')"" >> "$GIHUB_ENV" cd .. From 20e59a92bc1f41b2966b63c246fd3d78d1356d63 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 15 Jun 2023 10:49:04 -0500 Subject: [PATCH 097/191] Not finding deb packages. check dir structure some more. --- .github/workflows/pinned_build.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 5eed53e7ad..53672fa20d 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -53,6 +53,8 @@ jobs: pwd ls -l cd build + ls -l + ls | grep -E 'leap-[^d]+*.deb' echo "PINNED_BUILD_ARTIFACT="$(ls | grep -E 'leap-[^d]+*.deb')"" >> "$GIHUB_ENV" cd .. - name: Upload package From b90a843acc3721f1193f17f37039a0477cd14274 Mon Sep 17 00:00:00 2001 From: Lin Huang Date: Thu, 15 Jun 2023 13:18:18 -0400 Subject: [PATCH 098/191] [4.0] Bump Leap version to 4.0.2 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5a842822d2..71c82da0ac 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -15,7 +15,7 @@ set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 4) set(VERSION_MINOR 0) -set(VERSION_PATCH 1) +set(VERSION_PATCH 2) #set(VERSION_SUFFIX rc3) if(VERSION_SUFFIX) From b7b4554a18a77505699e29ad1a79f1232577826e Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 15 Jun 2023 12:35:43 -0500 Subject: [PATCH 099/191] Fix spelling causing issue with GITHUB_ENV. And trying again to get artifact name correctly. --- .github/workflows/pinned_build.yaml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 53672fa20d..890b934894 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -55,12 +55,16 @@ jobs: cd build ls -l ls | grep -E 'leap-[^d]+*.deb' - echo "PINNED_BUILD_ARTIFACT="$(ls | grep -E 'leap-[^d]+*.deb')"" >> "$GIHUB_ENV" + echo "PINNED_BUILD_ARTIFACT="$(ls | grep -E 'leap-[^d]+*.deb')"" >> "$GITHUB_ENV" + ls | grep -E 'leap-[^d]+*.deb' | sed -e 's/-x86_64.deb/-pinned-amd64/' + echo "PINNED_BUILD_NAME="$(ls | grep -E 'leap-[^d]+*.deb' | sed -e 's/-x86_64.deb/-pinned-amd64/')"" >> "$GITHUB_ENV" + echo "env.PINNED_BUILD_ARTIFACT = ${{ env.PINNED_BUILD_ARTIFACT }}" + echo "env.PINNED_BUILD_NAME = ${{ env.PINNED_BUILD_NAME }}" cd .. - name: Upload package uses: actions/upload-artifact@v3 with: - name: leap-${{matrix.platform}}-pinned-amd64 + name: "${{ env.PINNED_BUILD_NAME }}" path: build/"${{ env.PINNED_BUILD_ARTIFACT }}" - name: Run Parallel Tests run: | From 0f25eaa33aff11a26dc8e2f614333c90a8f36fc3 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 15 Jun 2023 14:13:06 -0500 Subject: [PATCH 100/191] Remove quotes from path for upload. --- .github/workflows/pinned_build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 890b934894..77f95856c1 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -65,7 +65,7 @@ jobs: uses: actions/upload-artifact@v3 with: name: "${{ env.PINNED_BUILD_NAME }}" - path: build/"${{ env.PINNED_BUILD_ARTIFACT }}" + path: build/${{ env.PINNED_BUILD_ARTIFACT }} - name: Run Parallel Tests run: | cd build From d8d46399e1437169d7e7e8ca83ab69beba25c1bf Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Thu, 15 Jun 2023 15:07:30 -0500 Subject: [PATCH 101/191] Use default validation node in startup_catchup test. --- tests/nodeos_startup_catchup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index ddc21e2e36..c7ab4e0939 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -66,7 +66,7 @@ cluster.populateWallet(2, wallet) Print("Create test accounts for transactions.") - cluster.createAccounts(cluster.eosioAccount, stakedDeposit=0, validationNodeIndex=0) + cluster.createAccounts(cluster.eosioAccount, stakedDeposit=0) account1Name = cluster.accounts[0].name account2Name = cluster.accounts[1].name From f46e922cc741833e638c43a57b15c4c34d78ce27 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Thu, 15 Jun 2023 15:22:14 -0500 Subject: [PATCH 102/191] Add retry node and remove explicit sleeps from trace_plugin_test. --- tests/trace_plugin_test.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/tests/trace_plugin_test.py b/tests/trace_plugin_test.py index d4c23cc896..6fb0376413 100755 --- a/tests/trace_plugin_test.py +++ b/tests/trace_plugin_test.py @@ -11,7 +11,6 @@ testSuccessful = True class TraceApiPluginTest(unittest.TestCase): - sleep_s = 1 cluster=Cluster(defproduceraPrvtKey=None) walletMgr=WalletMgr(True) accounts = [] @@ -22,19 +21,18 @@ def startEnv(self) : account_names = ["alice", "bob", "charlie"] abs_path = os.path.abspath(os.getcwd() + '/unittests/contracts/eosio.token/eosio.token.abi') traceNodeosArgs = " --trace-rpc-abi eosio.token=" + abs_path - self.cluster.launch(totalNodes=1, extraNodeosArgs=traceNodeosArgs) + self.cluster.launch(totalNodes=2, extraNodeosArgs=traceNodeosArgs) self.walletMgr.launch() testWalletName="testwallet" testWallet=self.walletMgr.create(testWalletName, [self.cluster.eosioAccount, self.cluster.defproduceraAccount]) self.cluster.validateAccounts(None) self.accounts=createAccountKeys(len(account_names)) - node = self.cluster.getNode(0) + node = self.cluster.getNode(1) for idx in range(len(account_names)): self.accounts[idx].name = account_names[idx] self.walletMgr.importKey(self.accounts[idx], testWallet) for account in self.accounts: - node.createInitializeAccount(account, self.cluster.eosioAccount, buyRAM=1000000, stakedDeposit=5000000, waitForTransBlock=True, exitOnError=True) - time.sleep(self.sleep_s) + node.createInitializeAccount(account, self.cluster.eosioAccount, buyRAM=1000000, stakedDeposit=5000000, waitForTransBlock=True if account == self.accounts[-1] else False, exitOnError=True) def get_block(self, params: str, node: Node) -> json: resource = "trace_api" @@ -61,7 +59,7 @@ def test_TraceApi(self) : self.assertEqual(node.getAccountEosBalanceStr(self.accounts[0].name), Utils.deduceAmount(expectedAmount, xferAmount)) self.assertEqual(node.getAccountEosBalanceStr(self.accounts[1].name), Utils.addAmount(expectedAmount, xferAmount)) - time.sleep(self.sleep_s) + node.waitForBlock(blockNum) # verify trans via node api before calling trace_api RPC blockFromNode = node.getBlock(blockNum) From 7bc12b1948314a47902cee2b2b3a5676988c34a1 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Thu, 15 Jun 2023 15:47:53 -0500 Subject: [PATCH 103/191] Propagate waitForTransBlock through to transferFunds during account creation. Don't bother to revalidate eosio and producer accounts in subjective billing tests. --- tests/TestHarness/transactions.py | 5 +++-- tests/subjective_billing_test.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/TestHarness/transactions.py b/tests/TestHarness/transactions.py index d90b9b32c7..c00c7ad4ea 100644 --- a/tests/TestHarness/transactions.py +++ b/tests/TestHarness/transactions.py @@ -31,8 +31,9 @@ def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, w transId=NodeosQueries.getTransId(trans) if stakedDeposit > 0: - self.waitForTransactionInBlock(transId) # seems like account creation needs to be finalized before transfer can happen - trans = self.transferFunds(creatorAccount, account, NodeosQueries.currencyIntToStr(stakedDeposit, CORE_SYMBOL), "init") + if not waitForTransBlock: # Wait for account creation to be finalized if we haven't already + self.waitForTransactionInBlock(transId) + trans = self.transferFunds(creatorAccount, account, NodeosQueries.currencyIntToStr(stakedDeposit, CORE_SYMBOL), "init", waitForTransBlock=waitForTransBlock) transId=NodeosQueries.getTransId(trans) return trans diff --git a/tests/subjective_billing_test.py b/tests/subjective_billing_test.py index 2e5d91dcac..57416307df 100755 --- a/tests/subjective_billing_test.py +++ b/tests/subjective_billing_test.py @@ -81,7 +81,7 @@ cluster.createAccountAndVerify(account2, cluster.eosioAccount, stakedDeposit=1000, stakeCPU=1) Print("Validating accounts after bootstrap") - cluster.validateAccounts([account1, account2]) + cluster.validateAccounts([account1, account2], testSysAccounts=False) node = cluster.getNode() From ae2679e164e8c70bb1fa4dea59f1a0e6e54ca4a5 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 15 Jun 2023 16:13:03 -0500 Subject: [PATCH 104/191] Cleanup. --- .github/workflows/pinned_build.yaml | 9 --------- 1 file changed, 9 deletions(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 77f95856c1..49fc85a87b 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -46,20 +46,11 @@ jobs: - name: Build Pinned Build run: | ./scripts/pinned_build.sh deps build "$(nproc)" - ls -l - pwd - name: Check and select artifact run: | - pwd - ls -l cd build - ls -l - ls | grep -E 'leap-[^d]+*.deb' echo "PINNED_BUILD_ARTIFACT="$(ls | grep -E 'leap-[^d]+*.deb')"" >> "$GITHUB_ENV" - ls | grep -E 'leap-[^d]+*.deb' | sed -e 's/-x86_64.deb/-pinned-amd64/' echo "PINNED_BUILD_NAME="$(ls | grep -E 'leap-[^d]+*.deb' | sed -e 's/-x86_64.deb/-pinned-amd64/')"" >> "$GITHUB_ENV" - echo "env.PINNED_BUILD_ARTIFACT = ${{ env.PINNED_BUILD_ARTIFACT }}" - echo "env.PINNED_BUILD_NAME = ${{ env.PINNED_BUILD_NAME }}" cd .. - name: Upload package uses: actions/upload-artifact@v3 From 1ab17d06f3700d3a7735d05df97fdb9c51d22c9c Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Thu, 15 Jun 2023 16:58:23 -0500 Subject: [PATCH 105/191] Use retry node in under_min_avail_ram test. --- tests/nodeos_under_min_avail_ram.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/tests/nodeos_under_min_avail_ram.py b/tests/nodeos_under_min_avail_ram.py index 5075385cbe..a16a776560 100755 --- a/tests/nodeos_under_min_avail_ram.py +++ b/tests/nodeos_under_min_avail_ram.py @@ -20,7 +20,8 @@ args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--wallet-port","--unshared"}) Utils.Debug=args.v -totalNodes=4 +pNodes=4 +totalNodes=5 cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) dumpErrorDetails=args.dump_error_details walletPort=args.wallet_port @@ -41,7 +42,7 @@ maxRAMFlag="--chain-state-db-size-mb" maxRAMValue=1010 extraNodeosArgs=" %s %d %s %d --http-max-response-time-ms 990000 " % (minRAMFlag, minRAMValue, maxRAMFlag, maxRAMValue) - if cluster.launch(onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes, extraNodeosArgs=extraNodeosArgs) is False: + if cluster.launch(onlyBios=False, pnodes=pNodes, totalNodes=totalNodes, totalProducers=totalNodes, extraNodeosArgs=extraNodeosArgs) is False: Utils.cmdError("launcher") errorExit("Failed to stand up eos cluster.") @@ -68,7 +69,7 @@ nodes.append(cluster.getNode(2)) nodes.append(cluster.getNode(3)) numNodes=len(nodes) - + nonProdNode = cluster.getNode(4) for account in accounts: walletMgr.importKey(account, testWallet) @@ -76,21 +77,21 @@ # create accounts via eosio as otherwise a bid is needed for account in accounts: Print("Create new account %s via %s" % (account.name, cluster.eosioAccount.name)) - trans=nodes[0].createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=500000, waitForTransBlock=True, stakeNet=50000, stakeCPU=50000, buyRAM=50000, exitOnError=True) + trans=nonProdNode.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=500000, waitForTransBlock=True, stakeNet=50000, stakeCPU=50000, buyRAM=50000, exitOnError=True) transferAmount="70000000.0000 {0}".format(CORE_SYMBOL) Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name)) - nodes[0].transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=True) - trans=nodes[0].delegatebw(account, 1000000.0000, 68000000.0000, waitForTransBlock=True, exitOnError=True) + nonProdNode.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=True) + trans=nonProdNode.delegatebw(account, 1000000.0000, 68000000.0000, waitForTransBlock=True, exitOnError=True) contractAccount=createAccountKeys(1)[0] contractAccount.name="contracttest" walletMgr.importKey(contractAccount, testWallet) Print("Create new account %s via %s" % (contractAccount.name, cluster.eosioAccount.name)) - trans=nodes[0].createInitializeAccount(contractAccount, cluster.eosioAccount, stakedDeposit=500000, waitForTransBlock=True, stakeNet=50000, stakeCPU=50000, buyRAM=50000, exitOnError=True) + trans=nonProdNode.createInitializeAccount(contractAccount, cluster.eosioAccount, stakedDeposit=500000, waitForTransBlock=True, stakeNet=50000, stakeCPU=50000, buyRAM=50000, exitOnError=True) transferAmount="90000000.0000 {0}".format(CORE_SYMBOL) Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, contractAccount.name)) - nodes[0].transferFunds(cluster.eosioAccount, contractAccount, transferAmount, "test transfer", waitForTransBlock=True) - trans=nodes[0].delegatebw(contractAccount, 1000000.0000, 88000000.0000, waitForTransBlock=True, exitOnError=True) + nonProdNode.transferFunds(cluster.eosioAccount, contractAccount, transferAmount, "test transfer", waitForTransBlock=True) + trans=nonProdNode.delegatebw(contractAccount, 1000000.0000, 88000000.0000, waitForTransBlock=True, exitOnError=True) contractDir="unittests/test-contracts/integration_test" wasmFile="integration_test.wasm" From 7f479b7d439454906daf33d299df6bdbd6d8c09f Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Thu, 15 Jun 2023 17:22:40 -0500 Subject: [PATCH 106/191] Use non-production node for account creation in forked_chain_test. --- tests/nodeos_forked_chain_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 5d0bd9eff8..b998d15a59 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -222,7 +222,7 @@ def getMinHeadAndLib(prodNodes): # *** delegate bandwidth to accounts *** - node=prodNodes[0] + node=nonProdNode # create accounts via eosio as otherwise a bid is needed for account in accounts: Print("Create new account %s via %s" % (account.name, cluster.eosioAccount.name)) From 5ecdf44471be933141c88adf34d37953f675625b Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Thu, 15 Jun 2023 19:11:16 -0500 Subject: [PATCH 107/191] Allow some specificExtraArgs to override existing nodeos args. --- tests/TestHarness/launcher.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/tests/TestHarness/launcher.py b/tests/TestHarness/launcher.py index 01acd0b7c7..f87601c4f9 100644 --- a/tests/TestHarness/launcher.py +++ b/tests/TestHarness/launcher.py @@ -523,9 +523,19 @@ def construct_command_line(self, instance: nodeDefinition): i = self.args.specific_nums.index(instance.index) specifics = getattr(self.args, f'specific_{Utils.EosServerName}es')[i] if specifics[0] == "'" and specifics[-1] == "'": - eosdcmd.extend(shlex.split(specifics[1:-1])) + specificList = shlex.split(specifics[1:-1]) else: - eosdcmd.extend(shlex.split(specifics)) + specificList = shlex.split(specifics) + # Allow specific nodeos args to override existing args up to this point. + # Consider moving specific arg handling to the end to allow overriding all args. + for arg in specificList: + if '-' in arg: + if arg in eosdcmd: + i = eosdcmd.index(arg) + if eosdcmd[i+1] != '-': + eosdcmd.pop(i+1) + eosdcmd.pop(i) + eosdcmd.extend(specificList) a(a(eosdcmd, '--config-dir'), str(instance.config_dir_name)) a(a(eosdcmd, '--data-dir'), str(instance.data_dir_name)) a(a(eosdcmd, '--genesis-json'), f'{instance.config_dir_name}/genesis.json') From d6cd2c9a7d3e6b24f3e03b2310e9e38bbcaffb2d Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 15 Jun 2023 22:37:09 -0400 Subject: [PATCH 108/191] LEAP_PINNED_INSTALL_PREFIX knob to set pinned_build.sh CMAKE_INSTALL_PREFIX --- .github/workflows/pinned_build.yaml | 2 ++ scripts/pinned_build.sh | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 49fc85a87b..24ccf43e9b 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -44,6 +44,8 @@ jobs: chown -R $(id -u):$(id -g) $PWD ./scripts/install_deps.sh - name: Build Pinned Build + env: + LEAP_PINNED_INSTALL_PREFIX: /usr run: | ./scripts/pinned_build.sh deps build "$(nproc)" - name: Check and select artifact diff --git a/scripts/pinned_build.sh b/scripts/pinned_build.sh index 15159851ab..c63d940635 100755 --- a/scripts/pinned_build.sh +++ b/scripts/pinned_build.sh @@ -132,7 +132,7 @@ pushdir ${LEAP_DIR} # build Leap echo "Building Leap ${SCRIPT_DIR}" -try cmake -DCMAKE_TOOLCHAIN_FILE=${SCRIPT_DIR}/pinned_toolchain.cmake -DCMAKE_INSTALL_PREFIX=/usr/local -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=${LLVM_DIR}/lib/cmake -DCMAKE_PREFIX_PATH=${BOOST_DIR}/bin ${SCRIPT_DIR}/.. +try cmake -DCMAKE_TOOLCHAIN_FILE=${SCRIPT_DIR}/pinned_toolchain.cmake -DCMAKE_INSTALL_PREFIX=${LEAP_PINNED_INSTALL_PREFIX:-/usr/local} -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=${LLVM_DIR}/lib/cmake -DCMAKE_PREFIX_PATH=${BOOST_DIR}/bin ${SCRIPT_DIR}/.. try make -j${JOBS} try cpack From cb61f1caa85d6be7deaa2184dba3b792193ace6c Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 16 Jun 2023 09:57:31 -0500 Subject: [PATCH 109/191] Bump to midtier. Remove comments. Make libtester tests required for passing. Trying libtester tests at midtier to see how it affect runtime to judge whether they can be made required. --- .github/workflows/build.yaml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index ebcffc6487..602476eb88 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -214,7 +214,7 @@ jobs: fail-fast: false matrix: platform: [ubuntu20, ubuntu22] - runs-on: ["self-hosted", "enf-x86-lowtier"] # not sure if this warrants a better machine, but start here + runs-on: ["self-hosted", "enf-x86-midtier"] container: ${{ matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} steps: - name: Download cdt @@ -258,7 +258,7 @@ jobs: fail-fast: false matrix: platform: [ubuntu20, ubuntu22] - runs-on: ["self-hosted", "enf-x86-lowtier"] # not sure if this warrants a better machine, but start here + runs-on: ["self-hosted", "enf-x86-midtier"] container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} steps: - name: Clone leap @@ -312,7 +312,7 @@ jobs: fail-fast: false matrix: platform: [ubuntu20, ubuntu22] - runs-on: ["self-hosted", "enf-x86-lowtier"] # not sure if this warrants a better machine, but start here + runs-on: ["self-hosted", "enf-x86-midtier"] container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} steps: - name: Clone leap @@ -354,9 +354,10 @@ jobs: all-passing: name: All Required Tests Passed - needs: [dev-package, tests, np-tests] + needs: [dev-package, tests, np-tests, libtester-build-tree-test, libtester-make-dev-install-test, libtester-deb-install-test] if: always() runs-on: ubuntu-latest steps: - - if: needs.dev-package.result != 'success' || needs.tests.result != 'success' || needs.np-tests.result != 'success' + - if: needs.dev-package.result != 'success' || needs.tests.result != 'success' || needs.np-tests.result != 'success' || + needs.libtester-build-tree-test.result != 'success' || needs.libtester-make-dev-install-test.result != 'success' || needs.libtester-deb-install-test.result != 'success' run: false From 481a958f971741e8604e52ed900d92429e440806 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 16 Jun 2023 12:58:50 -0500 Subject: [PATCH 110/191] Try to combine 3 libtester test cases into one using matrix. --- .github/workflows/build.yaml | 135 ++++++++++------------------------- 1 file changed, 38 insertions(+), 97 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 602476eb88..00fbb56645 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -206,79 +206,62 @@ jobs: name: ${{matrix.platform}}-lr-logs path: '*-logs.tar.gz' - libtester-deb-install-test: - name: libtester deb install test + libtester-tests: + name: libtester tests needs: [d, Build, dev-package] if: always() && needs.dev-package.result == 'success' strategy: fail-fast: false matrix: platform: [ubuntu20, ubuntu22] + test: ["build-tree", "make-dev-install", "deb-install"] runs-on: ["self-hosted", "enf-x86-midtier"] - container: ${{ matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} + container: ${{ matrix.test != "deb-install" && fromJSON(needs.d.outputs.p)[matrix.platform].image || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy') }} steps: - - name: Download cdt - uses: AntelopeIO/asset-artifact-download-action@v2 - with: - owner: AntelopeIO - repo: cdt - file: 'cdt_.*amd64.deb' - target: main - artifact-name: cdt_ubuntu_package_amd64 - token: ${{github.token}} - - name: Download leap-dev - uses: actions/download-artifact@v3 - with: - name: leap-dev-${{matrix.platform}}-amd64 - - name: Install cdt and leap-dev Packages - run: | - apt-get update - export DEBIAN_FRONTEND='noninteractive' - export TZ='Etc/UTC' - apt-get install -y ./*.deb - rm ./*.deb - - name: checkout reference-contracts - uses: actions/checkout@v3 - with: - repository: AntelopeIO/reference-contracts - path: reference-contracts - - name: Build & Test reference-contracts - run: | - apt-get -y install cmake build-essential - cmake -S reference-contracts -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On - cmake --build build -- -j $(nproc) - cd build/tests - ctest --output-on-failure -j $(nproc) - libtester-make-dev-install-test: - name: libtester make dev-install test - needs: [d, Build] - if: always() && needs.Build.result == 'success' - strategy: - fail-fast: false - matrix: - platform: [ubuntu20, ubuntu22] - runs-on: ["self-hosted", "enf-x86-midtier"] - container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} - steps: - - name: Clone leap + # LEAP + - if: ${{ matrix.test != "deb-install" }} + name: Clone leap uses: actions/checkout@v3 with: submodules: recursive - - name: Download leap builddir + - if: ${{ matrix.test != "deb-install" }} + name: Download leap builddir uses: actions/download-artifact@v3 with: name: ${{matrix.platform}}-build - - name: Extract leap build + - if: ${{ matrix.test != "deb-install" }} + name: Extract leap build run: | zstdcat build.tar.zst | tar x - - name: leap dev-install + - if: ${{ matrix.test == "build-tree" }} + name: Set leap_DIR env var + run: | + echo "leap_DIR=$PWD/build/lib/cmake/leap" >> "$GITHUB_ENV" + - if: ${{ matrix.test == "make-dev-install" }} + name: leap dev-install run: | cmake --install build cmake --install build --component dev - - name: Delete leap artifacts + - if: ${{ matrix.test == "make-dev-install" }} + name: Delete leap artifacts run: | rm -r * + - if: ${{ matrix.test == "deb-install" }} + name: Download leap-dev + uses: actions/download-artifact@v3 + with: + name: leap-dev-${{matrix.platform}}-amd64 + - if: ${{ matrix.test == "deb-install" }} + name: Install leap-dev Package + run: | + apt-get update + export DEBIAN_FRONTEND='noninteractive' + export TZ='Etc/UTC' + apt-get install -y ./*.deb + rm ./*.deb + + # CDT - name: Download cdt uses: AntelopeIO/asset-artifact-download-action@v2 with: @@ -292,59 +275,17 @@ jobs: run: | apt install -y ./*.deb rm ./*.deb - - name: checkout reference-contracts - uses: actions/checkout@v3 - with: - repository: AntelopeIO/reference-contracts - path: reference-contracts - - name: Build & Test reference-contracts - run: | - cmake -S reference-contracts -B reference-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On - cmake --build reference-contracts/build -- -j $(nproc) - cd reference-contracts/build/tests - ctest --output-on-failure -j $(nproc) - libtester-build-tree-test: - name: libtester build tree test - needs: [d, Build] - if: always() && needs.Build.result == 'success' - strategy: - fail-fast: false - matrix: - platform: [ubuntu20, ubuntu22] - runs-on: ["self-hosted", "enf-x86-midtier"] - container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} - steps: - - name: Clone leap - uses: actions/checkout@v3 - with: - submodules: recursive - - name: Download leap builddir - uses: actions/download-artifact@v3 - with: - name: ${{matrix.platform}}-build + # Reference Contracts - name: checkout reference-contracts uses: actions/checkout@v3 with: repository: AntelopeIO/reference-contracts path: reference-contracts - - name: Extract leap build - run: | - zstdcat build.tar.zst | tar x - echo "leap_DIR=$PWD/build/lib/cmake/leap" >> "$GITHUB_ENV" - - name: Download cdt - uses: AntelopeIO/asset-artifact-download-action@v2 - with: - owner: AntelopeIO - repo: cdt - file: 'cdt_.*amd64.deb' - target: main - artifact-name: cdt_ubuntu_package_amd64 - token: ${{github.token}} - - name: Install cdt Packages + - if: ${{ matrix.test == "deb-install" }} + name: Install reference-contracts deps run: | - apt install -y ./*.deb - rm ./*.deb + apt-get -y install cmake build-essential - name: Build & Test reference-contracts run: | cmake -S reference-contracts -B reference-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On From fd18f945861d37932ca766a2220cb2c471a0dea6 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 16 Jun 2023 13:21:36 -0500 Subject: [PATCH 111/191] Add configuration knobs and defaults for cdt. --- .cicd/defaults.json | 6 ++++++ .github/workflows/build.yaml | 29 ++++++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 .cicd/defaults.json diff --git a/.cicd/defaults.json b/.cicd/defaults.json new file mode 100644 index 0000000000..5c42656a8b --- /dev/null +++ b/.cicd/defaults.json @@ -0,0 +1,6 @@ +{ + "cdt":{ + "target":"4", + "prerelease":false + } + } \ No newline at end of file diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 00fbb56645..635028e116 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -7,6 +7,17 @@ on: - "release/*" pull_request: workflow_dispatch: + inputs: + override-cdt: + description: 'Override cdt target' + type: string + override-cdt-prerelease: + type: choice + description: Override cdt prelease + options: + - default + - true + - false permissions: packages: read @@ -218,6 +229,21 @@ jobs: runs-on: ["self-hosted", "enf-x86-midtier"] container: ${{ matrix.test != "deb-install" && fromJSON(needs.d.outputs.p)[matrix.platform].image || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy') }} steps: + - name: Setup cdt & reference-contracts versions + id: versions + env: + GH_TOKEN: ${{secrets.GITHUB_TOKEN}} + run: | + DEFAULTS_JSON=$(curl -sSfL $(gh api https://api.github.com/repos/${{github.repository}}/contents/.cicd/defaults.json?ref=${{github.sha}} --jq .download_url)) + echo cdt-target=$(echo "$DEFAULTS_JSON" | jq -r '.cdt.target') >> $GITHUB_OUTPUT + echo cdt-prerelease=$(echo "$DEFAULTS_JSON" | jq -r '.cdt.prerelease') >> $GITHUB_OUTPUT + + if [[ "${{inputs.override-cdt}}" != "" ]]; then + echo cdt-target=${{inputs.override-cdt}} >> $GITHUB_OUTPUT + fi + if [[ "${{inputs.override-cdt-prerelease}}" == +(true|false) ]]; then + echo cdt-prerelease=${{inputs.override-cdt-prerelease}} >> $GITHUB_OUTPUT + fi # LEAP - if: ${{ matrix.test != "deb-install" }} @@ -268,7 +294,8 @@ jobs: owner: AntelopeIO repo: cdt file: 'cdt_.*amd64.deb' - target: main + target: '${{steps.versions.outputs.cdt-target}}' + prereleases: ${{fromJSON(steps.versions.outputs.cdt-prerelease)}} artifact-name: cdt_ubuntu_package_amd64 token: ${{github.token}} - name: Install cdt Packages From ddc341a8356ac118ed7452700ec055dc92f0f0f3 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 16 Jun 2023 13:28:03 -0500 Subject: [PATCH 112/191] Fix use of double and single quotes. --- .github/workflows/build.yaml | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 00fbb56645..a980903170 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -214,45 +214,45 @@ jobs: fail-fast: false matrix: platform: [ubuntu20, ubuntu22] - test: ["build-tree", "make-dev-install", "deb-install"] + test: [build-tree, make-dev-install, deb-install] runs-on: ["self-hosted", "enf-x86-midtier"] - container: ${{ matrix.test != "deb-install" && fromJSON(needs.d.outputs.p)[matrix.platform].image || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy') }} + container: ${{ matrix.test != 'deb-install' && fromJSON(needs.d.outputs.p)[matrix.platform].image || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy') }} steps: # LEAP - - if: ${{ matrix.test != "deb-install" }} + - if: ${{ matrix.test != 'deb-install' }} name: Clone leap uses: actions/checkout@v3 with: submodules: recursive - - if: ${{ matrix.test != "deb-install" }} + - if: ${{ matrix.test != 'deb-install' }} name: Download leap builddir uses: actions/download-artifact@v3 with: name: ${{matrix.platform}}-build - - if: ${{ matrix.test != "deb-install" }} + - if: ${{ matrix.test != 'deb-install' }} name: Extract leap build run: | zstdcat build.tar.zst | tar x - - if: ${{ matrix.test == "build-tree" }} + - if: ${{ matrix.test == 'build-tree' }} name: Set leap_DIR env var run: | echo "leap_DIR=$PWD/build/lib/cmake/leap" >> "$GITHUB_ENV" - - if: ${{ matrix.test == "make-dev-install" }} + - if: ${{ matrix.test == 'make-dev-install' }} name: leap dev-install run: | cmake --install build cmake --install build --component dev - - if: ${{ matrix.test == "make-dev-install" }} + - if: ${{ matrix.test == 'make-dev-install' }} name: Delete leap artifacts run: | rm -r * - - if: ${{ matrix.test == "deb-install" }} + - if: ${{ matrix.test == 'deb-install' }} name: Download leap-dev uses: actions/download-artifact@v3 with: name: leap-dev-${{matrix.platform}}-amd64 - - if: ${{ matrix.test == "deb-install" }} + - if: ${{ matrix.test == 'deb-install' }} name: Install leap-dev Package run: | apt-get update @@ -282,7 +282,7 @@ jobs: with: repository: AntelopeIO/reference-contracts path: reference-contracts - - if: ${{ matrix.test == "deb-install" }} + - if: ${{ matrix.test == 'deb-install' }} name: Install reference-contracts deps run: | apt-get -y install cmake build-essential From 1b78c4ab46137bfd6dfdfd6982e9451920435e58 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 16 Jun 2023 13:30:48 -0500 Subject: [PATCH 113/191] Remove dangling close paren. --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index a980903170..470e813b0e 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -216,7 +216,7 @@ jobs: platform: [ubuntu20, ubuntu22] test: [build-tree, make-dev-install, deb-install] runs-on: ["self-hosted", "enf-x86-midtier"] - container: ${{ matrix.test != 'deb-install' && fromJSON(needs.d.outputs.p)[matrix.platform].image || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy') }} + container: ${{ matrix.test != 'deb-install' && fromJSON(needs.d.outputs.p)[matrix.platform].image || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} steps: # LEAP From c106b231880fa3effa8ddb095a964e7ca390b17c Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 16 Jun 2023 13:32:08 -0500 Subject: [PATCH 114/191] Fix libtester tests name in all-passing job now that all combined in matrix. --- .github/workflows/build.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 470e813b0e..0f75bf8b41 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -299,6 +299,5 @@ jobs: if: always() runs-on: ubuntu-latest steps: - - if: needs.dev-package.result != 'success' || needs.tests.result != 'success' || needs.np-tests.result != 'success' || - needs.libtester-build-tree-test.result != 'success' || needs.libtester-make-dev-install-test.result != 'success' || needs.libtester-deb-install-test.result != 'success' + - if: needs.dev-package.result != 'success' || needs.tests.result != 'success' || needs.np-tests.result != 'success' || needs.libtester-tests.result != 'success' run: false From 429589b148338b03caf3fbe5bf40ed8be69030a6 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 16 Jun 2023 13:33:01 -0500 Subject: [PATCH 115/191] Fix all-passing job dependency list. --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 0f75bf8b41..162e771384 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -295,7 +295,7 @@ jobs: all-passing: name: All Required Tests Passed - needs: [dev-package, tests, np-tests, libtester-build-tree-test, libtester-make-dev-install-test, libtester-deb-install-test] + needs: [dev-package, tests, np-tests, libtester-tests] if: always() runs-on: ubuntu-latest steps: From ec89e07bb4cfa066f009f577b97c4a2235b0a00c Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 16 Jun 2023 13:36:41 -0500 Subject: [PATCH 116/191] Fix indentation. --- .github/workflows/build.yaml | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index d0e21ec5e1..51f0ac54c0 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -7,17 +7,17 @@ on: - "release/*" pull_request: workflow_dispatch: - inputs: - override-cdt: - description: 'Override cdt target' - type: string - override-cdt-prerelease: - type: choice - description: Override cdt prelease - options: - - default - - true - - false + inputs: + override-cdt: + description: 'Override cdt target' + type: string + override-cdt-prerelease: + type: choice + description: Override cdt prelease + options: + - default + - true + - false permissions: packages: read From fa5edb27b3f519ebfadf1bd20f9ffdcd2b9c2636 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 16 Jun 2023 14:08:14 -0500 Subject: [PATCH 117/191] Break version check out into its own job. --- .github/workflows/build.yaml | 46 ++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 51f0ac54c0..c3a9405643 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -42,6 +42,28 @@ jobs: platform-file: .cicd/platforms.json password: ${{secrets.GITHUB_TOKEN}} package-name: builders + v: + name: Discover Versions + runs-on: ubuntu-latest + outputs: + cdt-target: ${{steps.versions.outputs.cdt-target}} + cdt-prerelease: ${{steps.versions.outputs.cdt-prerelease}} + steps: + - name: Setup cdt versions + id: versions + env: + GH_TOKEN: ${{secrets.GITHUB_TOKEN}} + run: | + DEFAULTS_JSON=$(curl -sSfL $(gh api https://api.github.com/repos/${{github.repository}}/contents/.cicd/defaults.json?ref=${{github.sha}} --jq .download_url)) + echo cdt-target=$(echo "$DEFAULTS_JSON" | jq -r '.cdt.target') >> $GITHUB_OUTPUT + echo cdt-prerelease=$(echo "$DEFAULTS_JSON" | jq -r '.cdt.prerelease') >> $GITHUB_OUTPUT + + if [[ "${{inputs.override-cdt}}" != "" ]]; then + echo cdt-target=${{inputs.override-cdt}} >> $GITHUB_OUTPUT + fi + if [[ "${{inputs.override-cdt-prerelease}}" == +(true|false) ]]; then + echo cdt-prerelease=${{inputs.override-cdt-prerelease}} >> $GITHUB_OUTPUT + fi build-platforms: name: Build Platforms needs: d @@ -219,8 +241,8 @@ jobs: libtester-tests: name: libtester tests - needs: [d, Build, dev-package] - if: always() && needs.dev-package.result == 'success' + needs: [d, v, Build, dev-package] + if: always() && needs.v.result == 'success' && needs.dev-package.result == 'success' strategy: fail-fast: false matrix: @@ -229,22 +251,6 @@ jobs: runs-on: ["self-hosted", "enf-x86-midtier"] container: ${{ matrix.test != 'deb-install' && fromJSON(needs.d.outputs.p)[matrix.platform].image || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} steps: - - name: Setup cdt & reference-contracts versions - id: versions - env: - GH_TOKEN: ${{secrets.GITHUB_TOKEN}} - run: | - DEFAULTS_JSON=$(curl -sSfL $(gh api https://api.github.com/repos/${{github.repository}}/contents/.cicd/defaults.json?ref=${{github.sha}} --jq .download_url)) - echo cdt-target=$(echo "$DEFAULTS_JSON" | jq -r '.cdt.target') >> $GITHUB_OUTPUT - echo cdt-prerelease=$(echo "$DEFAULTS_JSON" | jq -r '.cdt.prerelease') >> $GITHUB_OUTPUT - - if [[ "${{inputs.override-cdt}}" != "" ]]; then - echo cdt-target=${{inputs.override-cdt}} >> $GITHUB_OUTPUT - fi - if [[ "${{inputs.override-cdt-prerelease}}" == +(true|false) ]]; then - echo cdt-prerelease=${{inputs.override-cdt-prerelease}} >> $GITHUB_OUTPUT - fi - # LEAP - if: ${{ matrix.test != 'deb-install' }} name: Clone leap @@ -294,8 +300,8 @@ jobs: owner: AntelopeIO repo: cdt file: 'cdt_.*amd64.deb' - target: '${{steps.versions.outputs.cdt-target}}' - prereleases: ${{fromJSON(steps.versions.outputs.cdt-prerelease)}} + target: '${{needs.v.outputs.cdt-target}}' + prereleases: ${{fromJSON(needs.v.outputs.cdt-prerelease)}} artifact-name: cdt_ubuntu_package_amd64 token: ${{github.token}} - name: Install cdt Packages From c419644df9626d37965359adf8c624d872f5213b Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 16 Jun 2023 14:10:26 -0500 Subject: [PATCH 118/191] Add missing newline at eof. --- .cicd/defaults.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cicd/defaults.json b/.cicd/defaults.json index 5c42656a8b..45e05ac7b6 100644 --- a/.cicd/defaults.json +++ b/.cicd/defaults.json @@ -3,4 +3,4 @@ "target":"4", "prerelease":false } - } \ No newline at end of file + } From 93833b8e03d34a2af861919fbcc5da969040f9b7 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 16 Jun 2023 14:14:55 -0500 Subject: [PATCH 119/191] Simplify naming and path matching for artifact. --- .github/workflows/pinned_build.yaml | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 49fc85a87b..42214795b0 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -18,7 +18,6 @@ jobs: fail-fast: false matrix: platform: [ubuntu18, ubuntu20, ubuntu22] - # runs-on: ["self-hosted", "enf-x86-beefy"] # not sure if this warrants a different machine, but start here due to building runs-on: ubuntu-latest container: ${{ matrix.platform == 'ubuntu18' && 'ubuntu:bionic' || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} steps: @@ -46,17 +45,11 @@ jobs: - name: Build Pinned Build run: | ./scripts/pinned_build.sh deps build "$(nproc)" - - name: Check and select artifact - run: | - cd build - echo "PINNED_BUILD_ARTIFACT="$(ls | grep -E 'leap-[^d]+*.deb')"" >> "$GITHUB_ENV" - echo "PINNED_BUILD_NAME="$(ls | grep -E 'leap-[^d]+*.deb' | sed -e 's/-x86_64.deb/-pinned-amd64/')"" >> "$GITHUB_ENV" - cd .. - name: Upload package uses: actions/upload-artifact@v3 with: - name: "${{ env.PINNED_BUILD_NAME }}" - path: build/${{ env.PINNED_BUILD_ARTIFACT }} + name: leap-${{matrix.platform}}-pinned-amd64 + path: build/leap-3*.deb - name: Run Parallel Tests run: | cd build From 1ec98976b1eb197bd3d1f3dc0e8f551ab352f2ea Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 16 Jun 2023 14:42:43 -0500 Subject: [PATCH 120/191] Try newly added self hosted runner for pinned builds. --- .github/workflows/pinned_build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 42214795b0..7b8caf1de9 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -18,7 +18,7 @@ jobs: fail-fast: false matrix: platform: [ubuntu18, ubuntu20, ubuntu22] - runs-on: ubuntu-latest + runs-on: ["self-hosted", "enf-x86-hightier-long"] container: ${{ matrix.platform == 'ubuntu18' && 'ubuntu:bionic' || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} steps: - name: Conditionally update git repo From e865853415a5df514e83c8d7961c163fea2020aa Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Fri, 16 Jun 2023 14:52:49 -0500 Subject: [PATCH 121/191] Omit repeatable args from specificNodeosArg replacement. --- tests/TestHarness/launcher.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/tests/TestHarness/launcher.py b/tests/TestHarness/launcher.py index f87601c4f9..570f6bfc8e 100644 --- a/tests/TestHarness/launcher.py +++ b/tests/TestHarness/launcher.py @@ -528,8 +528,23 @@ def construct_command_line(self, instance: nodeDefinition): specificList = shlex.split(specifics) # Allow specific nodeos args to override existing args up to this point. # Consider moving specific arg handling to the end to allow overriding all args. + repeatable = [ + # appbase + '--plugin', + # chain_plugin + '--checkpoint', '--profile-account', '--actor-whitelist', '--actor-blacklist', + '--contract-whitelist', '--contract-blacklist', '--action-blacklist', '--key-blacklist', + '--sender-bypass-whiteblacklist', '--trusted-producer', + # http_plugin + '--http-alias', + # net_plugin + '--p2p-peer-address', '--p2p-auto-bp-peer', '--peer-key', '--peer-private-key', + # producer_plugin + '--producer-name', '--signature-provider', '--greylist-account', '--disable-subjective-account-billing', + # trace_api_plugin + '--trace-rpc-abi'] for arg in specificList: - if '-' in arg: + if '-' in arg and arg not in repeatable: if arg in eosdcmd: i = eosdcmd.index(arg) if eosdcmd[i+1] != '-': From 237c235114c3189f361e401c32366ce3ae628d38 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Fri, 16 Jun 2023 15:46:02 -0500 Subject: [PATCH 122/191] Use non-production node for transferring funds in voting test. --- tests/nodeos_voting_test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index 9963866766..b9aabaa288 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -208,13 +208,13 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): for account in accounts: Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name)) - node.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", - waitForTransBlock=True if account == accounts[-1] else False) + nonProdNode.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", + waitForTransBlock=True if account == accounts[-1] else False) for account in accounts: trans=nonProdNode.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=True if account == accounts[-1] else False, exitOnError=True) - + # containers for tracking producers prodsActive={} for i in range(0, 4): From 9229b349b39dcba579df4a608c42a629b4d7ebeb Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 16 Jun 2023 16:29:16 -0500 Subject: [PATCH 123/191] GH-1251 Use enum for eosvmoc_tierup instead of bool. --- libraries/chain/controller.cpp | 2 +- .../chain/include/eosio/chain/controller.hpp | 2 +- .../include/eosio/chain/wasm_interface.hpp | 8 ++- .../eosio/chain/wasm_interface_private.hpp | 5 +- libraries/chain/wasm_interface.cpp | 2 +- plugins/chain_plugin/chain_plugin.cpp | 49 +++++++++++++++++-- 6 files changed, 57 insertions(+), 11 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index d114a2b3a9..5fbc600603 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -2681,7 +2681,7 @@ struct controller_impl { #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED bool is_eos_vm_oc_enabled() const { - return ( conf.eosvmoc_tierup || conf.wasm_runtime == wasm_interface::vm_type::eos_vm_oc ); + return ( (conf.eosvmoc_tierup != wasm_interface::vm_oc_enable::oc_none) || conf.wasm_runtime == wasm_interface::vm_type::eos_vm_oc ); } #endif diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 97bc488084..f90072bdb7 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -90,7 +90,7 @@ namespace eosio { namespace chain { wasm_interface::vm_type wasm_runtime = chain::config::default_wasm_runtime; eosvmoc::config eosvmoc_config; - bool eosvmoc_tierup = false; + wasm_interface::vm_oc_enable eosvmoc_tierup = wasm_interface::vm_oc_enable::oc_auto; db_read_mode read_mode = db_read_mode::HEAD; validation_mode block_validation_mode = validation_mode::FULL; diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 8d832a17ff..440863cd54 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -40,7 +40,13 @@ namespace eosio { namespace chain { } } - wasm_interface(vm_type vm, bool eosvmoc_tierup, const chainbase::database& d, const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile); + enum class vm_oc_enable { + oc_auto, + oc_all, + oc_none + }; + + wasm_interface(vm_type vm, vm_oc_enable eosvmoc_tierup, const chainbase::database& d, const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile); ~wasm_interface(); // initialize exec per thread diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index 825861ac58..09160c4db7 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -41,7 +41,6 @@ namespace eosio { namespace chain { uint8_t vm_version = 0; }; struct by_hash; - struct by_first_block_num; struct by_last_block_num; #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED @@ -65,7 +64,7 @@ namespace eosio { namespace chain { }; #endif - wasm_interface_impl(wasm_interface::vm_type vm, bool eosvmoc_tierup, const chainbase::database& d, const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile) : db(d), wasm_runtime_time(vm) { + wasm_interface_impl(wasm_interface::vm_type vm, wasm_interface::vm_oc_enable eosvmoc_tierup, const chainbase::database& d, const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile) : db(d), wasm_runtime_time(vm) { #ifdef EOSIO_EOS_VM_RUNTIME_ENABLED if(vm == wasm_interface::vm_type::eos_vm) runtime_interface = std::make_unique>(); @@ -86,7 +85,7 @@ namespace eosio { namespace chain { EOS_THROW(wasm_exception, "${r} wasm runtime not supported on this platform and/or configuration", ("r", vm)); #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - if(eosvmoc_tierup) { + if(eosvmoc_tierup != wasm_interface::vm_oc_enable::oc_none) { EOS_ASSERT(vm != wasm_interface::vm_type::eos_vm_oc, wasm_exception, "You can't use EOS VM OC as the base runtime when tier up is activated"); eosvmoc.emplace(data_dir, eosvmoc_config, d); } diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index c66a514eec..6ceff2e68f 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -32,7 +32,7 @@ namespace eosio { namespace chain { - wasm_interface::wasm_interface(vm_type vm, bool eosvmoc_tierup, const chainbase::database& d, const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile) + wasm_interface::wasm_interface(vm_type vm, vm_oc_enable eosvmoc_tierup, const chainbase::database& d, const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile) : my( new wasm_interface_impl(vm, eosvmoc_tierup, d, data_dir, eosvmoc_config, profile) ), vm( vm ) {} wasm_interface::~wasm_interface() {} diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 1544853fbd..a83d7d31ff 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -114,6 +114,43 @@ void validate(boost::any& v, } } +std::ostream& operator<<(std::ostream& os, wasm_interface::vm_oc_enable t) { + if (t == wasm_interface::vm_oc_enable::oc_auto) { + os << "auto"; + } else if (t == wasm_interface::vm_oc_enable::oc_all) { + os << "all"; + } else if (t == wasm_interface::vm_oc_enable::oc_none) { + os << "none"; + } + + return os; +} + +void validate(boost::any& v, + const std::vector& values, + wasm_interface::vm_oc_enable* /* target_type */, + int) +{ + using namespace boost::program_options; + + // Make sure no previous assignment to 'v' was made. + validators::check_first_occurrence(v); + + // Extract the first string from 'values'. If there is more than + // one string, it's an error, and exception will be thrown. + std::string const& s = validators::get_single_string(values); + + if (s == "auto") { + v = boost::any(wasm_interface::vm_oc_enable::oc_auto); + } else if (s == "all" || s == "true" || s == "on") { + v = boost::any(wasm_interface::vm_oc_enable::oc_all); + } else if (s == "none" || s == "false" || s == "off") { + v = boost::any(wasm_interface::vm_oc_enable::oc_none); + } else { + throw validation_error(validation_error::invalid_option_value); + } +} + } // namespace chain using namespace eosio; @@ -203,6 +240,7 @@ chain_plugin::chain_plugin() app().register_config_type(); app().register_config_type(); app().register_config_type(); + app().register_config_type(); } chain_plugin::~chain_plugin() = default; @@ -227,7 +265,7 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip #ifdef EOSIO_EOS_VM_OC_DEVELOPER wasm_runtime_opt += delim + "\"eos-vm-oc\""; - wasm_runtime_desc += "\"eos-vm-oc\" : Unsupported. Instead, use one of the other runtimes along with the option enable-eos-vm-oc.\n"; + wasm_runtime_desc += "\"eos-vm-oc\" : Unsupported. Instead, use one of the other runtimes along with the option eos-vm-oc-enable.\n"; #endif wasm_runtime_opt += ")\n" + wasm_runtime_desc; @@ -334,7 +372,11 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip EOS_ASSERT(false, plugin_exception, ""); } }), "Number of threads to use for EOS VM OC tier-up") - ("eos-vm-oc-enable", bpo::bool_switch(), "Enable EOS VM OC tier-up runtime") + ("eos-vm-oc-enable", bpo::value()->default_value(chain::wasm_interface::vm_oc_enable::oc_auto), + "Enable EOS VM OC tier-up runtime ('auto', 'all', 'none').\n" + "'auto' - EOS VM OC tier-up is enabled for eosio.* accounts and read-only trxs.\n" + "'all' - EOS VM OC tier-up is enabled for all contract execution.\n" + "'none' - EOS VM OC tier-up is completely disabled.\n") #endif ("enable-account-queries", bpo::value()->default_value(false), "enable queries to find accounts by various metadata.") ("max-nonprivileged-inline-action-size", bpo::value()->default_value(config::default_max_nonprivileged_inline_action_size), "maximum allowed size (in bytes) of an inline action for a nonprivileged account") @@ -907,8 +949,7 @@ void chain_plugin_impl::plugin_initialize(const variables_map& options) { chain_config->eosvmoc_config.cache_size = options.at( "eos-vm-oc-cache-size-mb" ).as() * 1024u * 1024u; if( options.count("eos-vm-oc-compile-threads") ) chain_config->eosvmoc_config.threads = options.at("eos-vm-oc-compile-threads").as(); - if( options["eos-vm-oc-enable"].as() ) - chain_config->eosvmoc_tierup = true; + chain_config->eosvmoc_tierup = options["eos-vm-oc-enable"].as(); #endif account_queries_enabled = options.at("enable-account-queries").as(); From 045a7e296a1d60b6d11f2e77b242aef3f9a83a33 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Fri, 16 Jun 2023 19:09:46 -0500 Subject: [PATCH 124/191] Add support for '--plugin' to cluster_launcher.py Required completing support for AppArgs 'action'. --- tests/TestHarness/TestHelper.py | 8 ++++---- tests/cluster_launcher.py | 13 ++++++++++--- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/tests/TestHarness/TestHelper.py b/tests/TestHarness/TestHelper.py index 78ca01dc14..6f3a1244ae 100644 --- a/tests/TestHarness/TestHelper.py +++ b/tests/TestHarness/TestHelper.py @@ -19,15 +19,15 @@ def __init__(self, flag, help, type=None, default=None, choices=None, action=Non self.choices=choices self.action=action - def add(self, flag, type, help, default, choices=None): - arg=self.AppArg(flag, help, type=type, default=default, choices=choices) + def add(self, flag, type, help, default=None, action=None, choices=None): + arg=self.AppArg(flag, help, action=action, type=type, default=default, choices=choices) self.args.append(arg) - def add_bool(self, flag, help, action='store_true'): arg=self.AppArg(flag, help, action=action) self.args.append(arg) + # pylint: disable=too-many-instance-attributes class TestHelper(object): LOCAL_HOST="localhost" @@ -121,7 +121,7 @@ def createArgumentParser(includeArgs, applicationSpecificArgs=AppArgs(), suppres appArgsGrp = thParser.add_argument_group(title=None if suppressHelp else appArgsGrpTitle, description=None if suppressHelp else appArgsGrpdescription) for arg in applicationSpecificArgs.args: if arg.type is not None: - appArgsGrp.add_argument(arg.flag, type=arg.type, help=argparse.SUPPRESS if suppressHelp else arg.help, choices=arg.choices, default=arg.default) + appArgsGrp.add_argument(arg.flag, action=arg.action, type=arg.type, help=argparse.SUPPRESS if suppressHelp else arg.help, choices=arg.choices, default=arg.default) else: appArgsGrp.add_argument(arg.flag, help=argparse.SUPPRESS if suppressHelp else arg.help, action=arg.action) diff --git a/tests/cluster_launcher.py b/tests/cluster_launcher.py index f2d2973636..da6dcfe665 100755 --- a/tests/cluster_launcher.py +++ b/tests/cluster_launcher.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 from TestHarness import Cluster, TestHelper, Utils, WalletMgr +from TestHarness.TestHelper import AppArgs ############################################################### # cluster_launcher @@ -13,9 +14,13 @@ Print=Utils.Print errorExit=Utils.errorExit -args=TestHelper.parse_args({"-p","-d","-s","--keep-logs" +appArgs = AppArgs() +appArgs.add(flag="--plugin",action='append',type=str,help="Run nodes with additional plugins") + +args=TestHelper.parse_args({"-p","-n","-d","-s","--keep-logs" ,"--dump-error-details","-v" - ,"--leave-running","--unshared"}) + ,"--leave-running","--unshared"}, + applicationSpecificArgs=appArgs) pnodes=args.p delay=args.d topo=args.s @@ -37,7 +42,9 @@ Print(f'producing nodes: {pnodes}, topology: {topo}, delay between nodes launch: {delay} second{"s" if delay != 1 else ""}') Print("Stand up cluster") - if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay) is False: + extraNodeosArgs = ''.join([i+j for i,j in zip([' --plugin '] * len(args.plugin), args.plugin)]) + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, + extraNodeosArgs=extraNodeosArgs) is False: errorExit("Failed to stand up eos cluster.") testSuccessful=True From 5dbcdc54c6709761cfe89eb8c69678006c777447 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Sat, 17 Jun 2023 09:12:02 -0500 Subject: [PATCH 125/191] GH-1315 Use magnitude safe time types --- plugins/net_plugin/net_plugin.cpp | 37 +++++++++++++++---------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index b61758dc44..ae28b7c829 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -607,8 +607,7 @@ namespace eosio { bool is_transactions_only_connection()const { return connection_type == transactions_only; } bool is_blocks_only_connection()const { return connection_type == blocks_only; } void set_heartbeat_timeout(std::chrono::milliseconds msec) { - std::chrono::system_clock::duration dur = msec; - hb_timeout = dur.count(); + hb_timeout = msec; } private: @@ -688,9 +687,9 @@ namespace eosio { tstamp xmt{0}; //!< transmit timestamp /** @} */ // timestamp for the lastest message - tstamp latest_msg_time{0}; - tstamp hb_timeout{std::chrono::milliseconds{def_keepalive_interval}.count()}; - tstamp latest_blk_time{0}; + std::chrono::system_clock::time_point latest_msg_time{std::chrono::system_clock::time_point::min()}; + std::chrono::milliseconds hb_timeout{std::chrono::milliseconds{def_keepalive_interval}}; + std::chrono::system_clock::time_point latest_blk_time{std::chrono::system_clock::time_point::min()}; bool connected(); bool current(); @@ -728,7 +727,7 @@ namespace eosio { */ /** \brief Check heartbeat time and send Time_message */ - void check_heartbeat( tstamp current_time ); + void check_heartbeat( std::chrono::system_clock::time_point current_time ); /** \brief Populate and queue time_message */ void send_time(); @@ -742,8 +741,8 @@ namespace eosio { * packet is placed on the send queue. Calls the kernel time of * day routine and converts to a (at least) 64 bit integer. */ - static tstamp get_time() { - return std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); + static std::chrono::nanoseconds get_time() { + return std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()); } /** @} */ @@ -1165,8 +1164,8 @@ namespace eosio { } // called from connection strand - void connection::check_heartbeat( tstamp current_time ) { - if( latest_msg_time > 0 ) { + void connection::check_heartbeat( std::chrono::system_clock::time_point current_time ) { + if( latest_msg_time > std::chrono::system_clock::time_point::min() ) { if( current_time > latest_msg_time + hb_timeout ) { no_retry = benign_other; if( !peer_address().empty() ) { @@ -1178,7 +1177,7 @@ namespace eosio { } return; } else { - const tstamp timeout = std::max(hb_timeout/2, 2*std::chrono::milliseconds(config::block_interval_ms).count()); + const std::chrono::milliseconds timeout = std::max(hb_timeout/2, 2*std::chrono::milliseconds(config::block_interval_ms)); if ( current_time > latest_blk_time + timeout ) { send_handshake(); return; @@ -1194,7 +1193,7 @@ namespace eosio { time_message xpkt; xpkt.org = rec; xpkt.rec = dst; - xpkt.xmt = get_time(); + xpkt.xmt = get_time().count(); org = xpkt.xmt; enqueue(xpkt); } @@ -1204,7 +1203,7 @@ namespace eosio { time_message xpkt; xpkt.org = msg.xmt; xpkt.rec = msg.dst; - xpkt.xmt = get_time(); + xpkt.xmt = get_time().count(); enqueue(xpkt); } @@ -1434,7 +1433,7 @@ namespace eosio { block_buffer_factory buff_factory; auto sb = buff_factory.get_send_buffer( b ); - latest_blk_time = get_time(); + latest_blk_time = std::chrono::system_clock::now(); enqueue_buffer( sb, no_reason, to_sync_queue); } @@ -2154,7 +2153,7 @@ namespace eosio { send_buffer_type sb = buff_factory.get_send_buffer( b ); cp->strand.post( [cp, bnum, sb{std::move(sb)}]() { - cp->latest_blk_time = cp->get_time(); + cp->latest_blk_time = std::chrono::system_clock::now(); bool has_block = cp->peer_lib_num >= bnum; if( !has_block ) { peer_dlog( cp, "bcast block ${b}", ("b", bnum) ); @@ -2574,14 +2573,14 @@ namespace eosio { // called from connection strand bool connection::process_next_message( uint32_t message_length ) { try { - latest_msg_time = get_time(); + latest_msg_time = std::chrono::system_clock::now(); // if next message is a block we already have, exit early auto peek_ds = pending_message_buffer.create_peek_datastream(); unsigned_int which{}; fc::raw::unpack( peek_ds, which ); if( which == signed_block_which ) { - latest_blk_time = get_time(); + latest_blk_time = std::chrono::system_clock::now(); return process_next_block_message( message_length ); } else if( which == packed_transaction_which ) { @@ -3010,7 +3009,7 @@ namespace eosio { /* We've already lost however many microseconds it took to dispatch * the message, but it can't be helped. */ - msg.dst = get_time(); + msg.dst = get_time().count(); // If the transmit timestamp is zero, the peer is horribly broken. if(msg.xmt == 0) @@ -3384,7 +3383,7 @@ namespace eosio { fc_wlog( logger, "Peer keepalive ticked sooner than expected: ${m}", ("m", ec.message()) ); } - tstamp current_time = connection::get_time(); + auto current_time = std::chrono::system_clock::now(); my->for_each_connection( [current_time]( auto& c ) { if( c->socket_is_open() ) { c->strand.post([c, current_time]() { From 6254908957244487122f8b4b0aa04cae940f63bb Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Sat, 17 Jun 2023 11:57:17 -0500 Subject: [PATCH 126/191] cluster_launcher needs to work without a plugin argument. --- tests/cluster_launcher.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/cluster_launcher.py b/tests/cluster_launcher.py index da6dcfe665..d2890c8177 100755 --- a/tests/cluster_launcher.py +++ b/tests/cluster_launcher.py @@ -42,7 +42,10 @@ Print(f'producing nodes: {pnodes}, topology: {topo}, delay between nodes launch: {delay} second{"s" if delay != 1 else ""}') Print("Stand up cluster") - extraNodeosArgs = ''.join([i+j for i,j in zip([' --plugin '] * len(args.plugin), args.plugin)]) + if args.plugin: + extraNodeosArgs = ''.join([i+j for i,j in zip([' --plugin '] * len(args.plugin), args.plugin)]) + else: + extraNodeosArgs = None if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, extraNodeosArgs=extraNodeosArgs) is False: errorExit("Failed to stand up eos cluster.") From 4bda591042f2cc539e19d59e8dedba350a9a501c Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Sat, 17 Jun 2023 15:47:50 -0500 Subject: [PATCH 127/191] Use empty list instead of None for unused extraNodeosArgs. --- tests/cluster_launcher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/cluster_launcher.py b/tests/cluster_launcher.py index d2890c8177..6509b8a85d 100755 --- a/tests/cluster_launcher.py +++ b/tests/cluster_launcher.py @@ -45,7 +45,7 @@ if args.plugin: extraNodeosArgs = ''.join([i+j for i,j in zip([' --plugin '] * len(args.plugin), args.plugin)]) else: - extraNodeosArgs = None + extraNodeosArgs = [] if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, extraNodeosArgs=extraNodeosArgs) is False: errorExit("Failed to stand up eos cluster.") From b221c12fcf07378a3c09b3f8b40be5697d257755 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 19 Jun 2023 08:41:30 -0500 Subject: [PATCH 128/191] GH-1315 Reset latest_msg_time and latest_blk_time so re-connect doesn't trip heartbeat timer --- plugins/net_plugin/net_plugin.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index ae28b7c829..8d0a17454c 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1050,6 +1050,8 @@ namespace eosio { if( !shutdown) my_impl->sync_master->sync_reset_lib_num( self->shared_from_this(), true ); peer_ilog( self, "closing" ); self->cancel_wait(); + self->latest_msg_time = std::chrono::system_clock::time_point::min(); + self->latest_blk_time = std::chrono::system_clock::time_point::min(); if( reconnect && !shutdown ) { my_impl->start_conn_timer( std::chrono::milliseconds( 100 ), connection_wptr() ); From a05077a16b00b7b6824abe97fc0f699ef21292ba Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 19 Jun 2023 09:45:04 -0500 Subject: [PATCH 129/191] GH-1251 Refactor wasm_interface map into a wasm_interface_collection --- libraries/chain/controller.cpp | 68 ++++------------ .../chain/include/eosio/chain/controller.hpp | 2 +- .../eosio/chain/wasm_interface_collection.hpp | 78 +++++++++++++++++++ 3 files changed, 93 insertions(+), 55 deletions(-) create mode 100644 libraries/chain/include/eosio/chain/wasm_interface_collection.hpp diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 5fbc600603..a93ee546bd 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -249,14 +250,11 @@ struct controller_impl { deep_mind_handler* deep_mind_logger = nullptr; bool okay_to_print_integrity_hash_on_stop = false; - std::thread::id main_thread_id; thread_local static platform_timer timer; // a copy for main thread and each read-only thread #if defined(EOSIO_EOS_VM_RUNTIME_ENABLED) || defined(EOSIO_EOS_VM_JIT_RUNTIME_ENABLED) thread_local static vm::wasm_allocator wasm_alloc; // a copy for main thread and each read-only thread #endif - wasm_interface wasmif; // used by main thread and all threads for EOSVMOC - std::mutex threaded_wasmifs_mtx; - std::unordered_map> threaded_wasmifs; // one for each read-only thread, used by eos-vm and eos-vm-jit + wasm_interface_collection wasm_if_collect; app_window_type app_window = app_window_type::write; typedef pair handler_key; @@ -315,8 +313,7 @@ struct controller_impl { chain_id( chain_id ), read_mode( cfg.read_mode ), thread_pool(), - main_thread_id( std::this_thread::get_id() ), - wasmif( conf.wasm_runtime, conf.eosvmoc_tierup, db, conf.state_dir, conf.eosvmoc_config, !conf.profile_accounts.empty() ) + wasm_if_collect( conf.wasm_runtime, conf.eosvmoc_tierup, db, conf.state_dir, conf.eosvmoc_config, !conf.profile_accounts.empty() ) { fork_db.open( [this]( block_timestamp_type timestamp, const flat_set& cur_features, @@ -342,12 +339,7 @@ struct controller_impl { set_activation_handler(); self.irreversible_block.connect([this](const block_state_ptr& bsp) { - // producer_plugin has already asserted irreversible_block signal is - // called in write window - wasmif.current_lib(bsp->block_num); - for (auto& w: threaded_wasmifs) { - w.second->current_lib(bsp->block_num); - } + wasm_if_collect.current_lib(bsp->block_num); }); @@ -2679,31 +2671,6 @@ struct controller_impl { return (blog.first_block_num() != 0) ? blog.first_block_num() : fork_db.root()->block_num; } -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - bool is_eos_vm_oc_enabled() const { - return ( (conf.eosvmoc_tierup != wasm_interface::vm_oc_enable::oc_none) || conf.wasm_runtime == wasm_interface::vm_type::eos_vm_oc ); - } -#endif - - // only called from non-main threads (read-only trx execution threads) - // when producer_plugin starts them - void init_thread_local_data() { - EOS_ASSERT( !is_on_main_thread(), misc_exception, "init_thread_local_data called on the main thread"); -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - if ( is_eos_vm_oc_enabled() ) - // EOSVMOC needs further initialization of its thread local data - wasmif.init_thread_local_data(); - else -#endif - { - std::lock_guard g(threaded_wasmifs_mtx); - // Non-EOSVMOC needs a wasmif per thread - threaded_wasmifs[std::this_thread::get_id()] = std::make_unique( conf.wasm_runtime, conf.eosvmoc_tierup, db, conf.state_dir, conf.eosvmoc_config, !conf.profile_accounts.empty()); - } - } - - bool is_on_main_thread() { return main_thread_id == std::this_thread::get_id(); }; - void set_to_write_window() { app_window = app_window_type::write; } @@ -2714,25 +2681,20 @@ struct controller_impl { return app_window == app_window_type::write; } + bool is_eos_vm_oc_enabled() const { + return wasm_if_collect.is_eos_vm_oc_enabled(); + } + + void init_thread_local_data() { + wasm_if_collect.init_thread_local_data(db, conf.state_dir, conf.eosvmoc_config, !conf.profile_accounts.empty()); + } + wasm_interface& get_wasm_interface() { - if ( is_on_main_thread() -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - || is_eos_vm_oc_enabled() -#endif - ) - return wasmif; - else - return *threaded_wasmifs[std::this_thread::get_id()]; + return wasm_if_collect.get_wasm_interface(); } void code_block_num_last_used(const digest_type& code_hash, uint8_t vm_type, uint8_t vm_version, uint32_t block_num) { - // The caller of this function apply_eosio_setcode has already asserted that - // the transaction is not a read-only trx, which implies we are - // in write window. Safe to call threaded_wasmifs's code_block_num_last_used - wasmif.code_block_num_last_used(code_hash, vm_type, vm_version, block_num); - for (auto& w: threaded_wasmifs) { - w.second->code_block_num_last_used(code_hash, vm_type, vm_version, block_num); - } + wasm_if_collect.code_block_num_last_used(code_hash, vm_type, vm_version, block_num); } block_state_ptr fork_db_head() const; @@ -3610,11 +3572,9 @@ vm::wasm_allocator& controller::get_wasm_allocator() { } #endif -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED bool controller::is_eos_vm_oc_enabled() const { return my->is_eos_vm_oc_enabled(); } -#endif std::optional controller::convert_exception_to_error_code( const fc::exception& e ) { const chain_exception* e_ptr = dynamic_cast( &e ); diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index f90072bdb7..385fbeb1a4 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -321,8 +321,8 @@ namespace eosio { namespace chain { #if defined(EOSIO_EOS_VM_RUNTIME_ENABLED) || defined(EOSIO_EOS_VM_JIT_RUNTIME_ENABLED) vm::wasm_allocator& get_wasm_allocator(); - bool is_eos_vm_oc_enabled() const; #endif + bool is_eos_vm_oc_enabled() const; static std::optional convert_exception_to_error_code( const fc::exception& e ); diff --git a/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp new file mode 100644 index 0000000000..da49188401 --- /dev/null +++ b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp @@ -0,0 +1,78 @@ +#pragma once +#include + +namespace eosio::chain { + + /** + * @class wasm_interface_collection manages the active wasm_interface to use for execution. + */ + class wasm_interface_collection { + public: + wasm_interface_collection(wasm_interface::vm_type vm, wasm_interface::vm_oc_enable eosvmoc_tierup, + const chainbase::database& d, const std::filesystem::path& data_dir, + const eosvmoc::config& eosvmoc_config, bool profile) + : main_thread_id(std::this_thread::get_id()) + , wasm_runtime(vm) + , eosvmoc_tierup(eosvmoc_tierup) + , wasmif(vm, eosvmoc_tierup, d, data_dir, eosvmoc_config, profile) + {} + + wasm_interface& get_wasm_interface() { + if (is_on_main_thread() || is_eos_vm_oc_enabled()) { + return wasmif; + } + return *threaded_wasmifs[std::this_thread::get_id()]; + } + + + // update current lib of all wasm interfaces + void current_lib(const uint32_t lib) { + // producer_plugin has already asserted irreversible_block signal is called in write window + wasmif.current_lib(lib); + for (auto& w: threaded_wasmifs) { + w.second->current_lib(lib); + } + } + + // only called from non-main threads (read-only trx execution threads) when producer_plugin starts them + void init_thread_local_data(const chainbase::database& d, const std::filesystem::path& data_dir, + const eosvmoc::config& eosvmoc_config, bool profile) { + EOS_ASSERT(!is_on_main_thread(), misc_exception, "init_thread_local_data called on the main thread"); + if (is_eos_vm_oc_enabled()) { + // EOSVMOC needs further initialization of its thread local data + wasmif.init_thread_local_data(); + } else { + std::lock_guard g(threaded_wasmifs_mtx); + // Non-EOSVMOC needs a wasmif per thread + threaded_wasmifs[std::this_thread::get_id()] = std::make_unique(wasm_runtime, eosvmoc_tierup, d, data_dir, eosvmoc_config, profile); + } + } + + bool is_eos_vm_oc_enabled() const { + return ((eosvmoc_tierup != wasm_interface::vm_oc_enable::oc_none) || wasm_runtime == wasm_interface::vm_type::eos_vm_oc); + } + + void code_block_num_last_used(const digest_type& code_hash, uint8_t vm_type, uint8_t vm_version, uint32_t block_num) { + // The caller of this function apply_eosio_setcode has already asserted that + // the transaction is not a read-only trx, which implies we are + // in write window. Safe to call threaded_wasmifs's code_block_num_last_used + wasmif.code_block_num_last_used(code_hash, vm_type, vm_version, block_num); + for (auto& w: threaded_wasmifs) { + w.second->code_block_num_last_used(code_hash, vm_type, vm_version, block_num); + } + } + + private: + bool is_on_main_thread() { return main_thread_id == std::this_thread::get_id(); }; + + private: + const std::thread::id main_thread_id; + const wasm_interface::vm_type wasm_runtime; + const wasm_interface::vm_oc_enable eosvmoc_tierup; + + wasm_interface wasmif; // used by main thread and all threads for EOSVMOC + std::mutex threaded_wasmifs_mtx; + std::unordered_map> threaded_wasmifs; // one for each read-only thread, used by eos-vm and eos-vm-jit + }; + +} // eosio::chain From 0b3b3b61f98cca4f422b62dd923882811348aa2b Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 19 Jun 2023 10:52:54 -0500 Subject: [PATCH 130/191] GH-1315 Completely remove problematic tstamp type. --- .../include/eosio/net_plugin/protocol.hpp | 11 +-- plugins/net_plugin/net_plugin.cpp | 86 ++++++++++++------- 2 files changed, 58 insertions(+), 39 deletions(-) diff --git a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp index 2e7245c180..5ca2ba1456 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp @@ -7,9 +7,6 @@ namespace eosio { using namespace chain; using namespace fc; - static_assert(sizeof(std::chrono::system_clock::duration::rep) >= 8, "system_clock is expected to be at least 64 bits"); - typedef std::chrono::system_clock::duration::rep tstamp; - struct chain_size_message { uint32_t last_irreversible_block_num = 0; block_id_type last_irreversible_block_id; @@ -83,10 +80,10 @@ namespace eosio { }; struct time_message { - tstamp org{0}; //!< origin timestamp - tstamp rec{0}; //!< receive timestamp - tstamp xmt{0}; //!< transmit timestamp - mutable tstamp dst{0}; //!< destination timestamp + int64_t org{0}; //!< origin timestamp, in nanoseconds + int64_t rec{0}; //!< receive timestamp, in nanoseconds + int64_t xmt{0}; //!< transmit timestamp, in nanoseconds + mutable int64_t dst{0}; //!< destination timestamp, in nanoseconds }; enum id_list_modes { diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8d0a17454c..0a29e161ae 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -51,6 +51,9 @@ namespace eosio { using connection_ptr = std::shared_ptr; using connection_wptr = std::weak_ptr; + static constexpr int64_t block_interval_ns = + std::chrono::duration_cast(std::chrono::milliseconds(config::block_interval_ms)).count(); + const fc::string logger_name("net_plugin_impl"); fc::logger logger; std::string peer_log_format; @@ -121,9 +124,6 @@ namespace eosio { in_sync }; - static constexpr int64_t block_interval_ns = - std::chrono::duration_cast(std::chrono::milliseconds(config::block_interval_ms)).count(); - mutable std::mutex sync_mtx; uint32_t sync_known_lib_num{0}; uint32_t sync_last_requested_num{0}; @@ -681,10 +681,10 @@ namespace eosio { * @{ */ // Members set from network data - tstamp org{0}; //!< originate timestamp - tstamp rec{0}; //!< receive timestamp - tstamp dst{0}; //!< destination timestamp - tstamp xmt{0}; //!< transmit timestamp + std::chrono::nanoseconds org{0}; //!< origin timestamp. Time at the client when the request departed for the server. + // std::chrono::nanoseconds (not used) rec{0}; //!< receive timestamp. Time at the server when the request arrived from the client. + std::chrono::nanoseconds xmt{0}; //!< transmit timestamp, Time at the server when the response left for the client. + // std::chrono::nanoseconds (not used) dst{0}; //!< destination timestamp, Time at the client when the reply arrived from the server. /** @} */ // timestamp for the lastest message std::chrono::system_clock::time_point latest_msg_time{std::chrono::system_clock::time_point::min()}; @@ -1192,20 +1192,27 @@ namespace eosio { // called from connection strand void connection::send_time() { - time_message xpkt; - xpkt.org = rec; - xpkt.rec = dst; - xpkt.xmt = get_time().count(); - org = xpkt.xmt; - enqueue(xpkt); + if (org == std::chrono::nanoseconds{0}) { // do not send if there is already a time loop in progress + org = get_time(); + // xpkt.org == 0 means we are initiating a ping. Actual origin time is in xpkt.xmt. + time_message xpkt{ + .org = 0, + .rec = 0, + .xmt = org.count(), + .dst = 0 }; + peer_dlog(this, "send init time_message: ${t}", ("t", xpkt)); + enqueue(xpkt); + } } // called from connection strand void connection::send_time(const time_message& msg) { - time_message xpkt; - xpkt.org = msg.xmt; - xpkt.rec = msg.dst; - xpkt.xmt = get_time().count(); + time_message xpkt{ + .org = msg.xmt, + .rec = msg.dst, + .xmt = get_time().count(), + .dst = 0 }; + peer_dlog( this, "send time_message: ${t}, org: ${o}", ("t", xpkt)("o", org.count()) ); enqueue(xpkt); } @@ -3005,38 +3012,53 @@ namespace eosio { close( retry ); // reconnect if wrong_version } + // some clients before leap 5.0 provided microsecond epoch instead of nanosecond epoch + std::chrono::nanoseconds normalize_epoch_to_ns(int64_t x) { + // 1686211688888 milliseconds - 2023-06-08T08:08:08.888, 5yrs from EOS genesis 2018-06-08T08:08:08.888 + // 1686211688888000 microseconds + // 1686211688888000000 nanoseconds + if (x >= 1686211688888000000) // nanoseconds + return std::chrono::nanoseconds{x}; + if (x >= 1686211688888000) // microseconds + return std::chrono::nanoseconds{x*1000}; + if (x >= 1686211688888) // milliseconds + return std::chrono::nanoseconds{x*1000*1000}; + if (x >= 1686211688) // seconds + return std::chrono::nanoseconds{x*1000*1000*1000}; + return std::chrono::nanoseconds{0}; // unknown or is zero + } + void connection::handle_message( const time_message& msg ) { - peer_ilog( this, "received time_message" ); + peer_dlog( this, "received time_message: ${t}, org: ${o}", ("t", msg)("o", org.count()) ); - /* We've already lost however many microseconds it took to dispatch - * the message, but it can't be helped. - */ + // We've already lost however many microseconds it took to dispatch the message, but it can't be helped. msg.dst = get_time().count(); // If the transmit timestamp is zero, the peer is horribly broken. if(msg.xmt == 0) return; /* invalid timestamp */ - if(msg.xmt == xmt) + auto msg_xmt = normalize_epoch_to_ns(msg.xmt); + if(msg_xmt == xmt) return; /* duplicate packet */ - xmt = msg.xmt; - rec = msg.rec; - dst = msg.dst; + xmt = msg_xmt; if( msg.org == 0 ) { send_time( msg ); return; // We don't have enough data to perform the calculation yet. } - double offset = (double(rec - org) + double(msg.xmt - dst)) / 2; - double NsecPerUsec{1000}; + if (org != std::chrono::nanoseconds{0}) { + auto rec = normalize_epoch_to_ns(msg.rec); + int64_t offset = (double((rec - org).count()) + double(msg_xmt.count() - msg.dst)) / 2.0; - if( logger.is_enabled( fc::log_level::all ) ) - logger.log( FC_LOG_MESSAGE( all, "Clock offset is ${o}ns (${us}us)", - ("o", offset)( "us", offset / NsecPerUsec ) ) ); - org = 0; - rec = 0; + if (std::abs(offset) > block_interval_ns) { + peer_wlog(this, "Clock offset is ${of}us, calculation: (rec ${r} - org ${o} + xmt ${x} - dst ${d})/2", + ("of", offset / 1000)("r", rec.count())("o", org.count())("x", msg_xmt.count())("d", msg.dst)); + } + } + org = std::chrono::nanoseconds{0}; std::unique_lock g_conn( conn_mtx ); if( last_handshake_recv.generation == 0 ) { From 326aa030981143fc9cc262ed3f28460acd1aefe8 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Mon, 19 Jun 2023 12:11:21 -0500 Subject: [PATCH 131/191] extraNodeosArgs should be empty string when not used, not list. --- tests/cluster_launcher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/cluster_launcher.py b/tests/cluster_launcher.py index 6509b8a85d..d4db91c72e 100755 --- a/tests/cluster_launcher.py +++ b/tests/cluster_launcher.py @@ -45,7 +45,7 @@ if args.plugin: extraNodeosArgs = ''.join([i+j for i,j in zip([' --plugin '] * len(args.plugin), args.plugin)]) else: - extraNodeosArgs = [] + extraNodeosArgs = '' if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, extraNodeosArgs=extraNodeosArgs) is False: errorExit("Failed to stand up eos cluster.") From c1673735cc7b125687f7923e538fe00817caa18e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 19 Jun 2023 12:42:41 -0500 Subject: [PATCH 132/191] GH-1251 Fix failing tests --- libraries/testing/include/eosio/testing/tester.hpp | 2 ++ programs/leap-util/actions/snapshot.cpp | 1 + tests/read_only_trx_test.py | 2 +- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index c07a1bda71..dfd00c9789 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -405,6 +405,8 @@ namespace eosio { namespace testing { cfg.eosvmoc_config.cache_size = 1024*1024*8; for(int i = 0; i < boost::unit_test::framework::master_test_suite().argc; ++i) { + // don't use auto tier up for tests, since the point is to test diff vms + cfg.eosvmoc_tierup = chain::wasm_interface::vm_oc_enable::oc_none; if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--eos-vm")) cfg.wasm_runtime = chain::wasm_interface::vm_type::eos_vm; else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--eos-vm-jit")) diff --git a/programs/leap-util/actions/snapshot.cpp b/programs/leap-util/actions/snapshot.cpp index cfa105a7ce..7e98040f7a 100644 --- a/programs/leap-util/actions/snapshot.cpp +++ b/programs/leap-util/actions/snapshot.cpp @@ -79,6 +79,7 @@ int snapshot_actions::run_subcommand() { cfg.state_dir = state_dir; cfg.state_size = opt->db_size * 1024 * 1024; cfg.state_guard_size = opt->guard_size * 1024 * 1024; + cfg.eosvmoc_tierup = wasm_interface::vm_oc_enable::oc_none; // wasm not used, no use to fire up oc protocol_feature_set pfs = initialize_protocol_features( std::filesystem::path("protocol_features"), false ); try { diff --git a/tests/read_only_trx_test.py b/tests/read_only_trx_test.py index f6abf504ce..9461c64914 100755 --- a/tests/read_only_trx_test.py +++ b/tests/read_only_trx_test.py @@ -91,7 +91,7 @@ def startCluster(): specificExtraNodeosArgs[pnodes]+=" --read-only-threads " specificExtraNodeosArgs[pnodes]+=str(args.read_only_threads) if args.eos_vm_oc_enable: - specificExtraNodeosArgs[pnodes]+=" --eos-vm-oc-enable" + specificExtraNodeosArgs[pnodes]+=" --eos-vm-oc-enable all" if args.wasm_runtime: specificExtraNodeosArgs[pnodes]+=" --wasm-runtime " specificExtraNodeosArgs[pnodes]+=args.wasm_runtime From e7f14066940a46a3d389e4e37579a55b572e2fe3 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 19 Jun 2023 14:48:59 -0500 Subject: [PATCH 133/191] GH-1251 Fix tests to be able to run with default of --eos-vm-oc-enable=auto --- .../webassembly/runtimes/eos-vm-oc/code_cache.cpp | 6 ++++++ .../include/eosio/state_history/log.hpp | 2 +- plugins/chain_plugin/test/CMakeLists.txt | 2 +- plugins/producer_plugin/test/CMakeLists.txt | 2 +- plugins/producer_plugin/test/test_read_only_trx.cpp | 12 ++++++++++++ plugins/state_history_plugin/tests/CMakeLists.txt | 4 ++-- plugins/state_history_plugin/tests/main.cpp | 2 ++ plugins/state_history_plugin/tests/session_test.cpp | 2 -- tests/CMakeLists.txt | 2 +- 9 files changed, 26 insertions(+), 8 deletions(-) create mode 100644 plugins/state_history_plugin/tests/main.cpp diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp index 60cac3dc19..a43f8ac932 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp @@ -306,6 +306,12 @@ code_cache_base::code_cache_base(const std::filesystem::path data_dir, const eos } void code_cache_base::set_on_disk_region_dirty(bool dirty) { + // tests can remove directory before destructor is called + if (!std::filesystem::exists(_cache_file_path)) { + wlog("Unable to sync code cache, cache file does not exist"); + return; + } + bip::file_mapping dirty_mapping(_cache_file_path.generic_string().c_str(), bip::read_write); bip::mapped_region dirty_region(dirty_mapping, bip::read_write); diff --git a/libraries/state_history/include/eosio/state_history/log.hpp b/libraries/state_history/include/eosio/state_history/log.hpp index a190c393ac..567294c2a2 100644 --- a/libraries/state_history/include/eosio/state_history/log.hpp +++ b/libraries/state_history/include/eosio/state_history/log.hpp @@ -460,7 +460,7 @@ class state_history_log { return get_block_id_i(block_num); } -#ifdef BOOST_TEST_MODULE +#ifdef BOOST_TEST fc::cfile& get_log_file() { return log;} #endif diff --git a/plugins/chain_plugin/test/CMakeLists.txt b/plugins/chain_plugin/test/CMakeLists.txt index ec397e710d..81473d42e9 100644 --- a/plugins/chain_plugin/test/CMakeLists.txt +++ b/plugins/chain_plugin/test/CMakeLists.txt @@ -5,5 +5,5 @@ add_executable( test_chain_plugin plugin_config_test.cpp main.cpp ) -target_link_libraries( test_chain_plugin chain_plugin eosio_testing) +target_link_libraries( test_chain_plugin chain_plugin eosio_testing eosio_chain_wrap ) add_test(NAME test_chain_plugin COMMAND plugins/chain_plugin/test/test_chain_plugin WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) \ No newline at end of file diff --git a/plugins/producer_plugin/test/CMakeLists.txt b/plugins/producer_plugin/test/CMakeLists.txt index 2eee8c8b8f..043d56791c 100644 --- a/plugins/producer_plugin/test/CMakeLists.txt +++ b/plugins/producer_plugin/test/CMakeLists.txt @@ -5,5 +5,5 @@ add_executable( test_producer_plugin test_block_timing_util.cpp main.cpp ) -target_link_libraries( test_producer_plugin producer_plugin eosio_testing ) +target_link_libraries( test_producer_plugin producer_plugin eosio_testing eosio_chain_wrap ) add_test(NAME test_producer_plugin COMMAND plugins/producer_plugin/test/test_producer_plugin WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) \ No newline at end of file diff --git a/plugins/producer_plugin/test/test_read_only_trx.cpp b/plugins/producer_plugin/test/test_read_only_trx.cpp index 188bf7366c..e89dfc2d6a 100644 --- a/plugins/producer_plugin/test/test_read_only_trx.cpp +++ b/plugins/producer_plugin/test/test_read_only_trx.cpp @@ -187,10 +187,22 @@ BOOST_AUTO_TEST_CASE(with_1_read_only_threads) { test_trxs_common(specific_args); } +// test read-only trxs on 3 threads (with --read-only-threads) +BOOST_AUTO_TEST_CASE(with_3_read_only_threads) { + std::vector specific_args = { "-p", "eosio", "-e", + "--read-only-threads=3", + "--max-transaction-time=10", + "--abi-serializer-max-time-ms=999", + "--read-only-write-window-time-us=100000", + "--read-only-read-window-time-us=40000" }; + test_trxs_common(specific_args); +} + // test read-only trxs on 8 separate threads (with --read-only-threads) BOOST_AUTO_TEST_CASE(with_8_read_only_threads) { std::vector specific_args = { "-p", "eosio", "-e", "--read-only-threads=8", + "--eos-vm-oc-enable=none", "--max-transaction-time=10", "--abi-serializer-max-time-ms=999", "--read-only-write-window-time-us=100000", diff --git a/plugins/state_history_plugin/tests/CMakeLists.txt b/plugins/state_history_plugin/tests/CMakeLists.txt index 3c018c7bc6..98ce935e2d 100644 --- a/plugins/state_history_plugin/tests/CMakeLists.txt +++ b/plugins/state_history_plugin/tests/CMakeLists.txt @@ -1,5 +1,5 @@ -add_executable( test_state_history session_test.cpp plugin_config_test.cpp) -target_link_libraries(test_state_history state_history_plugin Boost::unit_test_framework) +add_executable( test_state_history main.cpp session_test.cpp plugin_config_test.cpp) +target_link_libraries(test_state_history state_history_plugin eosio_testing eosio_chain_wrap Boost::unit_test_framework) target_include_directories( test_state_history PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include" ) add_test(test_state_history test_state_history) \ No newline at end of file diff --git a/plugins/state_history_plugin/tests/main.cpp b/plugins/state_history_plugin/tests/main.cpp new file mode 100644 index 0000000000..e618f36999 --- /dev/null +++ b/plugins/state_history_plugin/tests/main.cpp @@ -0,0 +1,2 @@ +#define BOOST_TEST_MODULE state_history_plugin +#include \ No newline at end of file diff --git a/plugins/state_history_plugin/tests/session_test.cpp b/plugins/state_history_plugin/tests/session_test.cpp index 6dcc79d76e..9dccc8c0ec 100644 --- a/plugins/state_history_plugin/tests/session_test.cpp +++ b/plugins/state_history_plugin/tests/session_test.cpp @@ -1,5 +1,3 @@ - -#define BOOST_TEST_MODULE example #include #include diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index e1b4ccb92d..50dc6e968b 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -5,7 +5,7 @@ list(REMOVE_ITEM UNIT_TESTS ship_client.cpp) list(REMOVE_ITEM UNIT_TESTS ship_streamer.cpp) add_executable( plugin_test ${UNIT_TESTS} ) -target_link_libraries( plugin_test eosio_testing eosio_chain chainbase chain_plugin producer_plugin wallet_plugin fc state_history ${PLATFORM_SPECIFIC_LIBS} ) +target_link_libraries( plugin_test eosio_testing eosio_chain_wrap chainbase chain_plugin producer_plugin wallet_plugin fc state_history ${PLATFORM_SPECIFIC_LIBS} ) target_include_directories( plugin_test PUBLIC ${CMAKE_SOURCE_DIR}/plugins/net_plugin/include From 34fa470be05a7724415cdb95507730abc48179fc Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 19 Jun 2023 18:11:45 -0500 Subject: [PATCH 134/191] GH-1251 Add prefix() method to name --- libraries/chain/include/eosio/chain/name.hpp | 35 ++++++++++++++++++++ unittests/misc_tests.cpp | 19 +++++++++++ 2 files changed, 54 insertions(+) diff --git a/libraries/chain/include/eosio/chain/name.hpp b/libraries/chain/include/eosio/chain/name.hpp index 20bdc51549..c1a5b423f0 100644 --- a/libraries/chain/include/eosio/chain/name.hpp +++ b/libraries/chain/include/eosio/chain/name.hpp @@ -75,6 +75,41 @@ namespace eosio::chain { friend constexpr bool operator != ( const name& a, uint64_t b ) { return a.value != b; } constexpr explicit operator bool()const { return value != 0; } + + /** + * Returns the prefix. + * for exmaple: + * "eosio.any" -> "eosio" + * "eosio" -> "eosio" + */ + constexpr name prefix() const { + uint64_t result = value; + bool not_dot_character_seen = false; + uint64_t mask = 0xFull; + + // Get characters one-by-one in name in order from right to left + for (int32_t offset = 0; offset <= 59;) { + auto c = (value >> offset) & mask; + + if (!c) { // if this character is a dot + if (not_dot_character_seen) { // we found the rightmost dot character + result = (value >> offset) << offset; + break; + } + } else { + not_dot_character_seen = true; + } + + if (offset == 0) { + offset += 4; + mask = 0x1Full; + } else { + offset += 5; + } + } + + return name{ result }; + } }; // Each char of the string is encoded into 5-bit chunk and left-shifted diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index 64427568a5..7a3b9c01b7 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -139,6 +139,25 @@ BOOST_AUTO_TEST_CASE(name_suffix_tests) BOOST_CHECK_EQUAL( name{name_suffix("abcdefhij.123"_n)}, name{"123"_n} ); } +BOOST_AUTO_TEST_CASE(name_prefix_tests) +{ + BOOST_CHECK_EQUAL("e"_n.prefix(), "e"_n); + BOOST_CHECK_EQUAL(""_n.prefix(), ""_n); + BOOST_CHECK_EQUAL("abcdefghijklm"_n.prefix(), "abcdefghijklm"_n); + BOOST_CHECK_EQUAL("abcdefghijkl"_n.prefix(), "abcdefghijkl"_n); + BOOST_CHECK_EQUAL("abc.xyz"_n.prefix(), "abc"_n); + BOOST_CHECK_EQUAL("abc.xyz.qrt"_n.prefix(), "abc.xyz"_n); + + BOOST_CHECK_EQUAL("eosio.any"_n.prefix(), "eosio"_n); + BOOST_CHECK_EQUAL("eosio"_n.prefix(), "eosio"_n); + BOOST_CHECK_EQUAL("eosio"_n.prefix(), config::system_account_name); + BOOST_CHECK_EQUAL("eosio."_n.prefix(), "eosio"_n); + BOOST_CHECK_EQUAL("eosio.evm"_n.prefix(), "eosio"_n); + BOOST_CHECK_NE("eosi"_n.prefix(), "eosio"_n); + BOOST_CHECK_NE("eosioeosio"_n.prefix(), "eosio"_n); + BOOST_CHECK_NE("eosioe"_n.prefix(), "eosio"_n); +} + /// Test processing of unbalanced strings BOOST_AUTO_TEST_CASE(json_from_string_test) { From 360ffb5d7f43807cf94393ddeb05b2977bd85fbc Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 19 Jun 2023 18:12:30 -0500 Subject: [PATCH 135/191] GH-1251 Fix test --- tests/performance_tests/performance_test_basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index fd82bc6d15..a22b373c04 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -131,7 +131,7 @@ def configureValidationNodes(): if not self.prodsEnableTraceApi: validationNodeSpecificNodeosStr += "--plugin eosio::trace_api_plugin " if self.nonProdsEosVmOcEnable: - validationNodeSpecificNodeosStr += "--eos-vm-oc-enable " + validationNodeSpecificNodeosStr += "--eos-vm-oc-enable all " if validationNodeSpecificNodeosStr: self.specificExtraNodeosArgs.update({f"{nodeId}" : validationNodeSpecificNodeosStr for nodeId in self._validationNodeIds}) From 23f6db5f872d85e02e0db4b7a66a5ef2d539d6f2 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 19 Jun 2023 18:13:36 -0500 Subject: [PATCH 136/191] GH-1251 Remove unused --- libraries/chain/include/eosio/chain/transaction_context.hpp | 1 - 1 file changed, 1 deletion(-) diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index fb2abc65b7..f5bc4309a3 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -184,7 +184,6 @@ namespace eosio { namespace chain { speculative_executed_adjusted_max_transaction_time // prev_billed_cpu_time_us > 0 }; tx_cpu_usage_exceeded_reason tx_cpu_usage_reason = tx_cpu_usage_exceeded_reason::account_cpu_limit; - fc::microseconds tx_cpu_usage_amount; }; } } From 1cc64a0cc3b53678acdd0bb2010404e5f77bc8ae Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 19 Jun 2023 18:13:59 -0500 Subject: [PATCH 137/191] GH-1251 Update docs --- docs/01_nodeos/03_plugins/chain_plugin/index.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/docs/01_nodeos/03_plugins/chain_plugin/index.md b/docs/01_nodeos/03_plugins/chain_plugin/index.md index c6fa5dd9dc..364cf20189 100644 --- a/docs/01_nodeos/03_plugins/chain_plugin/index.md +++ b/docs/01_nodeos/03_plugins/chain_plugin/index.md @@ -179,7 +179,16 @@ Config Options for eosio::chain_plugin: code cache --eos-vm-oc-compile-threads arg (=1) Number of threads to use for EOS VM OC tier-up - --eos-vm-oc-enable Enable EOS VM OC tier-up runtime + --eos-vm-oc-enable arg (=auto) Enable EOS VM OC tier-up runtime + ('auto', 'all', 'none'). + 'auto' - EOS VM OC tier-up is enabled + for eosio.* accounts and read-only + trxs. + 'all' - EOS VM OC tier-up is enabled + for all contract execution. + 'none' - EOS VM OC tier-up is + completely disabled. + --enable-account-queries arg (=0) enable queries to find accounts by various metadata. --max-nonprivileged-inline-action-size arg (=4096) From 025bc22dbf4292f7d8024aa636138bf95a2f41db Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 19 Jun 2023 18:15:19 -0500 Subject: [PATCH 138/191] GH-1251 Add initial implementation of should_use_eos_vm_oc() --- libraries/chain/apply_context.cpp | 9 +++++++++ .../chain/include/eosio/chain/apply_context.hpp | 2 ++ .../chain/include/eosio/chain/wasm_interface.hpp | 1 - .../include/eosio/chain/wasm_interface_private.hpp | 12 +++++++++++- libraries/chain/wasm_interface.cpp | 6 +++--- 5 files changed, 25 insertions(+), 5 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 10aa7e9066..b884f06419 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -1094,4 +1094,13 @@ action_name apply_context::get_sender() const { return action_name(); } + +bool apply_context::should_use_eos_vm_oc()const { + return trx_context.is_read_only() + || receiver.prefix() == config::system_account_name // "eosio"_n + || trx_context.explicit_billed_cpu_time // validating block, todo: disable if producer + || false; // todo: could enable for p2p but may cause spam on producer +} + + } } /// eosio::chain diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index 78a4fa0e0a..a4ce03392f 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -598,6 +598,8 @@ class apply_context { action_name get_sender() const; + bool should_use_eos_vm_oc()const; + /// Fields: public: diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 440863cd54..0d8abd5e43 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -76,7 +76,6 @@ namespace eosio { namespace chain { const digest_type& code_hash, uint8_t vm_type, uint8_t vm_version, apply_context& context)> substitute_apply; private: unique_ptr my; - vm_type vm; }; } } // eosio::chain diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index 09160c4db7..01ccc193e4 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -64,7 +64,12 @@ namespace eosio { namespace chain { }; #endif - wasm_interface_impl(wasm_interface::vm_type vm, wasm_interface::vm_oc_enable eosvmoc_tierup, const chainbase::database& d, const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile) : db(d), wasm_runtime_time(vm) { + wasm_interface_impl(wasm_interface::vm_type vm, wasm_interface::vm_oc_enable eosvmoc_tierup, const chainbase::database& d, + const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile) + : db(d) + , wasm_runtime_time(vm) + , eosvmoc_tierup(eosvmoc_tierup) + { #ifdef EOSIO_EOS_VM_RUNTIME_ENABLED if(vm == wasm_interface::vm_type::eos_vm) runtime_interface = std::make_unique>(); @@ -157,6 +162,10 @@ namespace eosio { namespace chain { return it->module; } + bool should_always_oc_tierup()const { + return wasm_runtime_time == wasm_interface::vm_type::eos_vm_oc || eosvmoc_tierup == wasm_interface::vm_oc_enable::oc_all; + } + bool is_shutting_down = false; std::unique_ptr runtime_interface; @@ -177,6 +186,7 @@ namespace eosio { namespace chain { const chainbase::database& db; const wasm_interface::vm_type wasm_runtime_time; + const wasm_interface::vm_oc_enable eosvmoc_tierup; #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED std::optional eosvmoc; diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 6ceff2e68f..4b0651c5bc 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -33,7 +33,7 @@ namespace eosio { namespace chain { wasm_interface::wasm_interface(vm_type vm, vm_oc_enable eosvmoc_tierup, const chainbase::database& d, const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile) - : my( new wasm_interface_impl(vm, eosvmoc_tierup, d, data_dir, eosvmoc_config, profile) ), vm( vm ) {} + : my( new wasm_interface_impl(vm, eosvmoc_tierup, d, data_dir, eosvmoc_config, profile) ) {} wasm_interface::~wasm_interface() {} @@ -41,7 +41,7 @@ namespace eosio { namespace chain { void wasm_interface::init_thread_local_data() { if (my->eosvmoc) my->eosvmoc->init_thread_local_data(); - else if (vm == wasm_interface::vm_type::eos_vm_oc && my->runtime_interface) + else if (my->wasm_runtime_time == wasm_interface::vm_type::eos_vm_oc && my->runtime_interface) my->runtime_interface->init_thread_local_data(); } #endif @@ -90,7 +90,7 @@ namespace eosio { namespace chain { if(substitute_apply && substitute_apply(code_hash, vm_type, vm_version, context)) return; #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - if(my->eosvmoc) { + if(my->eosvmoc && (my->should_always_oc_tierup() || context.should_use_eos_vm_oc())) { const chain::eosvmoc::code_descriptor* cd = nullptr; chain::eosvmoc::code_cache_base::get_cd_failure failure = chain::eosvmoc::code_cache_base::get_cd_failure::temporary; try { From 4cc666efd6379911f6906788d3a1aed8c4629d37 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 16 Jun 2023 19:43:03 -0400 Subject: [PATCH 139/191] remove FC_USE_PTHREAD_NAME_NP & pthread_getname_np() --- .../include/eosio/chain/thread_utils.hpp | 2 +- .../chain/platform_timer_asio_fallback.cpp | 2 +- libraries/chain/platform_timer_kqueue.cpp | 2 +- .../runtimes/eos-vm-oc/code_cache.cpp | 2 +- libraries/libfc/CMakeLists.txt | 13 --------- .../libfc/include/fc/log/logger_config.hpp | 1 - libraries/libfc/src/log/gelf_appender.cpp | 2 +- libraries/libfc/src/log/logger_config.cpp | 29 +++++++++---------- .../resource_monitor_plugin.cpp | 2 +- plugins/trace_api_plugin/store_provider.cpp | 2 +- 10 files changed, 21 insertions(+), 36 deletions(-) diff --git a/libraries/chain/include/eosio/chain/thread_utils.hpp b/libraries/chain/include/eosio/chain/thread_utils.hpp index 3a4f0f1d15..d3e9e8a261 100644 --- a/libraries/chain/include/eosio/chain/thread_utils.hpp +++ b/libraries/chain/include/eosio/chain/thread_utils.hpp @@ -104,7 +104,7 @@ namespace eosio { namespace chain { if (offset != std::string::npos) tn.erase(0, offset+2); tn = tn.substr(0, tn.find('>')) + "-" + std::to_string( i ); - fc::set_os_thread_name( tn ); + fc::set_thread_name( tn ); if ( init ) init(); } FC_LOG_AND_RETHROW() diff --git a/libraries/chain/platform_timer_asio_fallback.cpp b/libraries/chain/platform_timer_asio_fallback.cpp index a372dee8e2..3c861284fc 100644 --- a/libraries/chain/platform_timer_asio_fallback.cpp +++ b/libraries/chain/platform_timer_asio_fallback.cpp @@ -30,7 +30,7 @@ platform_timer::platform_timer() { std::promise p; auto f = p.get_future(); checktime_thread = std::thread([&p]() { - fc::set_os_thread_name("checktime"); + fc::set_thread_name("checktime"); checktime_ios = std::make_unique(); boost::asio::io_service::work work(*checktime_ios); p.set_value(); diff --git a/libraries/chain/platform_timer_kqueue.cpp b/libraries/chain/platform_timer_kqueue.cpp index ed7033c33e..3cb341a031 100644 --- a/libraries/chain/platform_timer_kqueue.cpp +++ b/libraries/chain/platform_timer_kqueue.cpp @@ -51,7 +51,7 @@ platform_timer::platform_timer() { FC_ASSERT(kevent64(kqueue_fd, &quit_event, 1, NULL, 0, KEVENT_FLAG_IMMEDIATE, NULL) == 0, "failed to create quit event"); kevent_thread = std::thread([]() { - fc::set_os_thread_name("checktime"); + fc::set_thread_name("checktime"); while(true) { struct kevent64_s anEvent; int c = kevent64(kqueue_fd, NULL, 0, &anEvent, 1, 0, NULL); diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp index 60cac3dc19..2576b46f96 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp @@ -48,7 +48,7 @@ code_cache_async::code_cache_async(const std::filesystem::path data_dir, const e wait_on_compile_monitor_message(); _monitor_reply_thread = std::thread([this]() { - fc::set_os_thread_name("oc-monitor"); + fc::set_thread_name("oc-monitor"); _ctx.run(); }); } diff --git a/libraries/libfc/CMakeLists.txt b/libraries/libfc/CMakeLists.txt index ac86842034..e0dca99888 100644 --- a/libraries/libfc/CMakeLists.txt +++ b/libraries/libfc/CMakeLists.txt @@ -65,19 +65,6 @@ file( GLOB_RECURSE fc_headers ${CMAKE_CURRENT_SOURCE_DIR} *.hpp *.h ) add_library(fc ${fc_sources} ${fc_headers}) -function(detect_thread_name) - include(CheckSymbolExists) - list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE) - list(APPEND CMAKE_REQUIRED_LIBRARIES "-pthread") - check_symbol_exists(pthread_setname_np pthread.h HAVE_PTHREAD_SETNAME_NP) - if(HAVE_PTHREAD_SETNAME_NP) - set_source_files_properties(src/log/logger_config.cpp PROPERTIES COMPILE_DEFINITIONS FC_USE_PTHREAD_NAME_NP) - endif() -endfunction() -if(CMAKE_SYSTEM_NAME MATCHES "Linux") - detect_thread_name() -endif() - # Yuck: newer CMake files from boost iostreams will effectively target_link_libraries(Boost::iostreams z;bz2;lzma;zstd) # without first "finding" those libraries. This resolves to simple -lz -lbz2 etc: it'll look for those libraries in the linker's # library search path. This is most problematic on macOS where something like libzstd isn't in the standard search path. Historically diff --git a/libraries/libfc/include/fc/log/logger_config.hpp b/libraries/libfc/include/fc/log/logger_config.hpp index 5a4eeed2f8..2474a5eb0b 100644 --- a/libraries/libfc/include/fc/log/logger_config.hpp +++ b/libraries/libfc/include/fc/log/logger_config.hpp @@ -73,7 +73,6 @@ namespace fc { void configure_logging( const std::filesystem::path& log_config ); bool configure_logging( const logging_config& l ); - void set_os_thread_name( const std::string& name ); void set_thread_name( const std::string& name ); const std::string& get_thread_name(); } diff --git a/libraries/libfc/src/log/gelf_appender.cpp b/libraries/libfc/src/log/gelf_appender.cpp index 6022d35814..20b8e7b63d 100644 --- a/libraries/libfc/src/log/gelf_appender.cpp +++ b/libraries/libfc/src/log/gelf_appender.cpp @@ -137,7 +137,7 @@ namespace fc my->thread = std::thread([this] { try { - fc::set_os_thread_name("gelf"); + fc::set_thread_name("gelf"); my->io_context.run(); } catch (std::exception& ex) { fprintf(stderr, "GELF logger caught exception at %s:%d : %s\n", __FILE__, __LINE__, ex.what()); diff --git a/libraries/libfc/src/log/logger_config.cpp b/libraries/libfc/src/log/logger_config.cpp index 88b459666d..5b81090d75 100644 --- a/libraries/libfc/src/log/logger_config.cpp +++ b/libraries/libfc/src/log/logger_config.cpp @@ -10,6 +10,9 @@ #include #include +#define BOOST_DLL_USE_STD_FS +#include + namespace fc { log_config& log_config::get() { @@ -133,26 +136,22 @@ namespace fc { } static thread_local std::string thread_name; - void set_os_thread_name( const std::string& name ) { -#ifdef FC_USE_PTHREAD_NAME_NP - pthread_setname_np( pthread_self(), name.c_str() ); -#endif - } + void set_thread_name( const std::string& name ) { thread_name = name; +#if defined(__linux__) || defined(__FreeBSD__) + pthread_setname_np( pthread_self(), name.c_str() ); +#elif defined(__APPLE__) + pthread_setname_np( name.c_str() ); +#endif } const std::string& get_thread_name() { - if( thread_name.empty() ) { -#ifdef FC_USE_PTHREAD_NAME_NP - char thr_name[64]; - int rc = pthread_getname_np( pthread_self(), thr_name, 64 ); - if( rc == 0 ) { - thread_name = thr_name; + if(thread_name.empty()) { + try { + thread_name = boost::dll::program_location().filename().generic_string(); + } catch (...) { + thread_name = "unknown"; } -#else - static int thread_count = 0; - thread_name = std::string( "thread-" ) + std::to_string( thread_count++ ); -#endif } return thread_name; } diff --git a/plugins/resource_monitor_plugin/resource_monitor_plugin.cpp b/plugins/resource_monitor_plugin/resource_monitor_plugin.cpp index cafebdded6..15cf0dcbdc 100644 --- a/plugins/resource_monitor_plugin/resource_monitor_plugin.cpp +++ b/plugins/resource_monitor_plugin/resource_monitor_plugin.cpp @@ -128,7 +128,7 @@ class resource_monitor_plugin_impl { } monitor_thread = std::thread( [this] { - fc::set_os_thread_name( "resmon" ); // console_appender uses 9 chars for thread name reporting. + fc::set_thread_name( "resmon" ); // console_appender uses 9 chars for thread name reporting. space_handler.space_monitor_loop(); ctx.run(); diff --git a/plugins/trace_api_plugin/store_provider.cpp b/plugins/trace_api_plugin/store_provider.cpp index 3677761a71..ae42e843da 100644 --- a/plugins/trace_api_plugin/store_provider.cpp +++ b/plugins/trace_api_plugin/store_provider.cpp @@ -297,7 +297,7 @@ namespace eosio::trace_api { void slice_directory::start_maintenance_thread(log_handler log) { _maintenance_thread = std::thread([this, log=std::move(log)](){ - fc::set_os_thread_name( "trace-mx" ); + fc::set_thread_name( "trace-mx" ); uint32_t last_lib = 0; while(true) { From 0e08cecdaca8633aa5e167acc2fa6f08c42a6bff Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 20 Jun 2023 08:50:31 -0400 Subject: [PATCH 140/191] on macOS do not consider EINVAL a fatal accept error --- libraries/libfc/include/fc/network/listener.hpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/libraries/libfc/include/fc/network/listener.hpp b/libraries/libfc/include/fc/network/listener.hpp index 42966b74c2..21a90a2c32 100644 --- a/libraries/libfc/include/fc/network/listener.hpp +++ b/libraries/libfc/include/fc/network/listener.hpp @@ -146,6 +146,10 @@ struct listener : std::enable_shared_from_this { code == ENETUNREACH #ifdef ENONET || code == ENONET +#endif +#ifdef __APPLE__ + //guard against failure of asio's internal SO_NOSIGPIPE call after accept() + || code == EINVAL #endif ) { // according to https://man7.org/linux/man-pages/man2/accept.2.html, reliable application should From 41eaeef80eee1ec2742eec72f09da427a31b5f21 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 20 Jun 2023 14:05:53 -0400 Subject: [PATCH 141/191] Report transaction failed if trx was exhausted in non-producing mode So we restart a speculative block to retry it immediately, instead of waiting to receive a new block --- plugins/producer_plugin/producer_plugin.cpp | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 007559e9b9..022d3d5585 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -616,7 +616,7 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { - bool exhausted = false; + bool success = false; chain::controller& chain = chain_plug->chain(); try { const auto& id = trx->id(); @@ -648,13 +648,19 @@ class producer_plugin_impl : public std::enable_shared_from_this Date: Tue, 20 Jun 2023 14:49:40 -0400 Subject: [PATCH 142/191] Bump Leap version to 4.0.3 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 71c82da0ac..6e6e261491 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -15,7 +15,7 @@ set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 4) set(VERSION_MINOR 0) -set(VERSION_PATCH 2) +set(VERSION_PATCH 3) #set(VERSION_SUFFIX rc3) if(VERSION_SUFFIX) From 197d37e92c12f9ffb62364b17482d210a2696769 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 20 Jun 2023 14:06:57 -0500 Subject: [PATCH 143/191] GH-1315 Fix merge issue --- plugins/net_plugin/net_plugin.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 7ecb308add..47c9162d55 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1460,7 +1460,7 @@ namespace eosio { } - org = 0; + org = std::chrono::nanoseconds{0}; send_time(); } @@ -2263,7 +2263,7 @@ namespace eosio { c->close( false, true ); return; } - c->latest_blk_time = c->get_time(); + c->latest_blk_time = std::chrono::system_clock::now(); c->block_status_monitor_.accepted(); stages state = sync_state; peer_dlog( c, "state ${s}", ("s", stage_str( state )) ); @@ -3307,7 +3307,7 @@ namespace eosio { msg.dst = get_time().count(); if (msg.org != 0) { - if (msg.org == org) { + if (msg.org == org.count()) { auto ping = msg.dst - msg.org; peer_dlog(this, "send_time ping ${p}us", ("p", ping / 1000)); peer_ping_time_ns = ping; From f2123eae5bd500517c76e83163328cb9d2149b3f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 20 Jun 2023 14:36:25 -0500 Subject: [PATCH 144/191] GH-1251 Disable oc on producer when applying blocks --- libraries/chain/apply_context.cpp | 2 +- libraries/chain/controller.cpp | 9 +++++++++ libraries/chain/include/eosio/chain/controller.hpp | 3 +++ .../chain/include/eosio/chain/wasm_interface.hpp | 10 ++++++++++ plugins/chain_plugin/chain_plugin.cpp | 12 ------------ plugins/producer_plugin/producer_plugin.cpp | 4 +++- 6 files changed, 26 insertions(+), 14 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index b884f06419..5706987cc0 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -1098,7 +1098,7 @@ action_name apply_context::get_sender() const { bool apply_context::should_use_eos_vm_oc()const { return trx_context.is_read_only() || receiver.prefix() == config::system_account_name // "eosio"_n - || trx_context.explicit_billed_cpu_time // validating block, todo: disable if producer + || (trx_context.explicit_billed_cpu_time && !control.is_producer_node()) // validating/applying block || false; // todo: could enable for p2p but may cause spam on producer } diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index a93ee546bd..77bcf58bdf 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -240,6 +240,7 @@ struct controller_impl { controller::config conf; const chain_id_type chain_id; // read by thread_pool threads, value will not be changed bool replaying = false; + bool producer_node = false; // true if node is configured as a block producer db_read_mode read_mode = db_read_mode::HEAD; bool in_trx_requiring_checks = false; ///< if true, checks that are normally skipped on replay (e.g. auth checks) cannot be skipped std::optional subjective_cpu_leeway; @@ -3677,6 +3678,14 @@ void controller::replace_account_keys( name account, name permission, const publ rlm.verify_account_ram_usage(account); } +void controller::set_producer_node(bool is_producer_node) { + my->producer_node = is_producer_node; +} + +bool controller::is_producer_node()const { + return my->producer_node; +} + void controller::set_db_read_only_mode() { mutable_db().set_read_only_mode(); } diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 385fbeb1a4..c1d188ba1b 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -355,6 +355,9 @@ namespace eosio { namespace chain { void replace_producer_keys( const public_key_type& key ); void replace_account_keys( name account, name permission, const public_key_type& key ); + void set_producer_node(bool is_producer_node); + bool is_producer_node()const; + void set_db_read_only_mode(); void unset_db_read_only_mode(); void init_thread_local_data(); diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 0d8abd5e43..4d3964dda0 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -82,6 +82,16 @@ namespace eosio { namespace chain { namespace eosio{ namespace chain { std::istream& operator>>(std::istream& in, wasm_interface::vm_type& runtime); + inline std::ostream& operator<<(std::ostream& os, wasm_interface::vm_oc_enable t) { + if (t == wasm_interface::vm_oc_enable::oc_auto) { + os << "auto"; + } else if (t == wasm_interface::vm_oc_enable::oc_all) { + os << "all"; + } else if (t == wasm_interface::vm_oc_enable::oc_none) { + os << "none"; + } + return os; + } }} FC_REFLECT_ENUM( eosio::chain::wasm_interface::vm_type, (eos_vm)(eos_vm_jit)(eos_vm_oc) ) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index a83d7d31ff..7dba9ef8d4 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -114,18 +114,6 @@ void validate(boost::any& v, } } -std::ostream& operator<<(std::ostream& os, wasm_interface::vm_oc_enable t) { - if (t == wasm_interface::vm_oc_enable::oc_auto) { - os << "auto"; - } else if (t == wasm_interface::vm_oc_enable::oc_all) { - os << "all"; - } else if (t == wasm_interface::vm_oc_enable::oc_none) { - os << "none"; - } - - return os; -} - void validate(boost::any& v, const std::vector& values, wasm_interface::vm_oc_enable* /* target_type */, diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 36ea7d22f0..8407df755b 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1026,7 +1026,9 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia _options = &options; LOAD_VALUE_SET(options, "producer-name", _producers) - chain::controller& chain = chain_plug->chain(); + chain::controller& chain = chain_plug->chain(); + + chain.set_producer_node(!_producers.empty()); if (options.count("signature-provider")) { const std::vector key_spec_pairs = options["signature-provider"].as>(); From 3e11f790c606bf07a8e31a8f13040bd7c1bf6100 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 20 Jun 2023 15:38:00 -0400 Subject: [PATCH 145/191] name/terminology update according to PR comments --- plugins/producer_plugin/producer_plugin.cpp | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 022d3d5585..e325fe5023 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -616,7 +616,7 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { - bool success = false; + bool exhausted = false; chain::controller& chain = chain_plug->chain(); try { const auto& id = trx->id(); @@ -648,18 +648,17 @@ class producer_plugin_impl : public std::enable_shared_from_this Date: Tue, 20 Jun 2023 15:53:54 -0400 Subject: [PATCH 146/191] Add `in_producing_mode()` and `in_speculating_mode()` members. --- plugins/producer_plugin/producer_plugin.cpp | 55 ++++++++++----------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index e325fe5023..1edc983623 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -449,7 +449,7 @@ class producer_plugin_impl : public std::enable_shared_from_this& block_id, const block_state_ptr& bsp) { auto& chain = chain_plug->chain(); - if ( _pending_block_mode == pending_block_mode::producing ) { + if ( in_producing_mode() ) { fc_wlog( _log, "dropped incoming block #${num} id: ${id}", ("num", block->block_num())("id", block_id ? (*block_id).str() : "UNKNOWN") ); return false; @@ -599,7 +599,7 @@ class producer_plugin_impl : public std::enable_shared_from_thisprocess_incoming_transaction_async( result, persist_until_expired, return_failure_traces, next) ) { - if( self->_pending_block_mode == pending_block_mode::producing ) { + if( self->in_producing_mode() ) { self->schedule_maybe_produce_block( true ); } else { self->restart_speculative_block(); @@ -656,7 +656,7 @@ class producer_plugin_impl : public std::enable_shared_from_this& weak_this, std::optional wake_up_time); std::optional calculate_producer_wake_up_time( const block_timestamp_type& ref_block_time ) const; + bool in_producing_mode() const { return _pending_block_mode == pending_block_mode::producing; } + bool in_speculating_mode() const { return _pending_block_mode == pending_block_mode::speculating; } }; void new_chain_banner(const eosio::chain::controller& db) @@ -1126,7 +1128,7 @@ void producer_plugin::resume() { // it is possible that we are only speculating because of this policy which we have now changed // re-evaluate that now // - if (my->_pending_block_mode == pending_block_mode::speculating) { + if (my->in_speculating_mode()) { my->abort_block(); fc_ilog(_log, "Producer resumed. Scheduling production."); my->schedule_production_loop(); @@ -1168,7 +1170,7 @@ void producer_plugin::update_runtime_options(const runtime_options& options) { my->_incoming_defer_ratio = *options.incoming_defer_ratio; } - if (check_speculating && my->_pending_block_mode == pending_block_mode::speculating) { + if (check_speculating && my->in_speculating_mode()) { my->abort_block(); my->schedule_production_loop(); } @@ -1608,7 +1610,7 @@ fc::time_point producer_plugin_impl::calculate_pending_block_time() const { } fc::time_point producer_plugin_impl::calculate_block_deadline( const fc::time_point& block_time ) const { - if( _pending_block_mode == pending_block_mode::producing ) { + if( in_producing_mode() ) { bool last_block = ((block_timestamp_type( block_time ).slot % config::producer_repetitions) == config::producer_repetitions - 1); return block_time + fc::microseconds(last_block ? _last_block_time_offset_us : _produce_time_offset_us); } else { @@ -1617,7 +1619,7 @@ fc::time_point producer_plugin_impl::calculate_block_deadline( const fc::time_po } bool producer_plugin_impl::should_interrupt_start_block( const fc::time_point& deadline, uint32_t pending_block_num ) const { - if( _pending_block_mode == pending_block_mode::producing ) { + if( in_producing_mode() ) { return deadline <= fc::time_point::now(); } // if we can produce then honor deadline so production starts on time @@ -1677,7 +1679,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { _pending_block_mode = pending_block_mode::speculating; } - if (_pending_block_mode == pending_block_mode::producing) { + if (in_producing_mode()) { // determine if our watermark excludes us from producing at this point if (current_watermark) { const block_timestamp_type block_timestamp{block_time}; @@ -1697,13 +1699,13 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } } - if (_pending_block_mode == pending_block_mode::speculating) { + if (in_speculating_mode()) { auto head_block_age = now - chain.head_block_time(); if (head_block_age > fc::seconds(5)) return start_block_result::waiting_for_block; } - if (_pending_block_mode == pending_block_mode::producing) { + if (in_producing_mode()) { const auto start_block_time = block_time - fc::microseconds( config::block_interval_us ); if( now < start_block_time ) { fc_dlog(_log, "Not producing block waiting for production window ${n} ${bt}", ("n", pending_block_num)("bt", block_time) ); @@ -1725,7 +1727,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { try { uint16_t blocks_to_confirm = 0; - if (_pending_block_mode == pending_block_mode::producing) { + if (in_producing_mode()) { // determine how many blocks this producer can confirm // 1) if it is not a producer from this node, assume no confirmations (we will discard this block anyway) // 2) if it is a producer on this node that has never produced, the conservative approach is to assume no @@ -1746,7 +1748,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { abort_block(); auto features_to_activate = chain.get_preactivated_protocol_features(); - if( _pending_block_mode == pending_block_mode::producing && _protocol_features_to_activate.size() > 0 ) { + if( in_producing_mode() && _protocol_features_to_activate.size() > 0 ) { bool drop_features_to_activate = false; try { chain.validate_protocol_features( _protocol_features_to_activate ); @@ -1794,7 +1796,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { if( chain.is_building_block() ) { const auto& pending_block_signing_authority = chain.pending_block_signing_authority(); - if (_pending_block_mode == pending_block_mode::producing && pending_block_signing_authority != scheduled_producer.authority) { + if (in_producing_mode() && pending_block_signing_authority != scheduled_producer.authority) { elog("Unexpected block signing authority, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.authority)("actual", pending_block_signing_authority)); _pending_block_mode = pending_block_mode::speculating; } @@ -1818,7 +1820,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { if( !process_unapplied_trxs( preprocess_deadline ) ) return start_block_result::exhausted; - if (_pending_block_mode == pending_block_mode::producing) { + if (in_producing_mode()) { auto scheduled_trx_deadline = preprocess_deadline; if (_max_scheduled_transaction_time_per_block_ms >= 0) { scheduled_trx_deadline = std::min( @@ -1875,7 +1877,7 @@ bool producer_plugin_impl::remove_expired_trxs( const fc::time_point& deadline ) } }); - if( exhausted && _pending_block_mode == pending_block_mode::producing ) { + if( exhausted && in_producing_mode() ) { fc_wlog( _log, "Unable to process all expired transactions in unapplied queue before deadline, " "Persistent expired ${persistent_expired}, Other expired ${other_expired}", ("persistent_expired", num_expired_persistent)("other_expired", num_expired_other) ); @@ -1978,7 +1980,7 @@ void producer_plugin_impl::log_trx_results( const packed_transaction_ptr& trx, bool except = except_ptr || (trace && trace->except); if (except) { - if (_pending_block_mode == pending_block_mode::producing) { + if (in_producing_mode()) { fc_dlog(_trx_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${trx}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) ("trx", chain_plug->get_log_trx(trx->get_transaction()))); @@ -1999,7 +2001,7 @@ void producer_plugin_impl::log_trx_results( const packed_transaction_ptr& trx, ("entire_trace", get_trace(trace, except_ptr))); } } else { - if (_pending_block_mode == pending_block_mode::producing) { + if (in_producing_mode()) { fc_dlog(_trx_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${trx}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) ("trx", chain_plug->get_log_trx(trx->get_transaction()))); @@ -2053,15 +2055,14 @@ producer_plugin_impl::push_transaction( const fc::time_point& block_deadline, fc::microseconds max_trx_time = fc::milliseconds( _max_transaction_time_ms.load() ); if( max_trx_time.count() < 0 ) max_trx_time = fc::microseconds::maximum(); - bool disable_subjective_billing = ( _pending_block_mode == pending_block_mode::producing ) - || disable_subjective_enforcement; + bool disable_subjective_billing = in_producing_mode() || disable_subjective_enforcement; int64_t sub_bill = 0; if( !disable_subjective_billing ) sub_bill = _subjective_billing.get_subjective_bill( first_auth, fc::time_point::now() ); auto prev_billed_cpu_time_us = trx->billed_cpu_time_us; - if( _pending_block_mode == pending_block_mode::producing && prev_billed_cpu_time_us > 0 ) { + if( in_producing_mode() && prev_billed_cpu_time_us > 0 ) { const auto& rl = chain.get_resource_limits_manager(); if ( !_subjective_billing.is_account_disabled( first_auth ) && !rl.is_unlimited_cpu( first_auth ) ) { int64_t prev_billed_plus100_us = prev_billed_cpu_time_us + EOS_PERCENT( prev_billed_cpu_time_us, 100 * config::percent_1 ); @@ -2075,7 +2076,7 @@ producer_plugin_impl::push_transaction( const fc::time_point& block_deadline, if( trace->except ) { _time_tracker.add_fail_time(end - start); if( exception_is_exhausted( *trace->except ) ) { - if( _pending_block_mode == pending_block_mode::producing ) { + if( in_producing_mode() ) { fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING ", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("txid", trx->id())); } else { @@ -2141,10 +2142,8 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin int num_applied = 0, num_failed = 0, num_processed = 0; auto unapplied_trxs_size = _unapplied_transactions.size(); // unapplied and persisted do not have a next method to call - auto itr = (_pending_block_mode == pending_block_mode::producing) ? - _unapplied_transactions.unapplied_begin() : _unapplied_transactions.persisted_begin(); - auto end_itr = (_pending_block_mode == pending_block_mode::producing) ? - _unapplied_transactions.unapplied_end() : _unapplied_transactions.persisted_end(); + auto itr = in_producing_mode() ? _unapplied_transactions.unapplied_begin() : _unapplied_transactions.persisted_begin(); + auto end_itr = in_producing_mode() ? _unapplied_transactions.unapplied_end() : _unapplied_transactions.persisted_end(); while( itr != end_itr ) { if( should_interrupt_start_block( deadline, pending_block_num ) ) { exhausted = true; @@ -2406,10 +2405,10 @@ void producer_plugin_impl::schedule_production_loop() { } else if (result == start_block_result::waiting_for_production) { // scheduled in start_block() - } else if (_pending_block_mode == pending_block_mode::producing) { + } else if (in_producing_mode()) { schedule_maybe_produce_block( result == start_block_result::exhausted ); - } else if (_pending_block_mode == pending_block_mode::speculating && !_producers.empty() && !production_disabled_by_policy()){ + } else if (in_speculating_mode() && !_producers.empty() && !production_disabled_by_policy()){ chain::controller& chain = chain_plug->chain(); fc_dlog(_log, "Speculative Block Created; Scheduling Speculative/Production Change"); EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "speculating without pending_block_state" ); @@ -2526,7 +2525,7 @@ static auto maybe_make_debug_time_logger() -> std::optionalchain(); EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "pending_block_state does not exist but it should, another plugin may have corrupted it"); From dc9901203e5948c3c976798019e57aa6bb19abbb Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 20 Jun 2023 16:37:20 -0500 Subject: [PATCH 147/191] Update to use eos-system-contracts instead of reference-contracts. --- .github/workflows/build.yaml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index c3a9405643..e09f20a4fb 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -310,20 +310,20 @@ jobs: rm ./*.deb # Reference Contracts - - name: checkout reference-contracts + - name: checkout eos-system-contracts uses: actions/checkout@v3 with: - repository: AntelopeIO/reference-contracts - path: reference-contracts + repository: eosnetworkfoundation/eos-system-contracts + path: eos-system-contracts - if: ${{ matrix.test == 'deb-install' }} - name: Install reference-contracts deps + name: Install eos-system-contracts deps run: | apt-get -y install cmake build-essential - - name: Build & Test reference-contracts + - name: Build & Test eos-system-contracts run: | - cmake -S reference-contracts -B reference-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On - cmake --build reference-contracts/build -- -j $(nproc) - cd reference-contracts/build/tests + cmake -S eos-system-contracts -B eos-system-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On + cmake --build eos-system-contracts/build -- -j $(nproc) + cd eos-system-contracts/build/tests ctest --output-on-failure -j $(nproc) all-passing: From a44ca9b0cd45587672104ab95f5f41d7ba68bb14 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 20 Jun 2023 16:47:33 -0500 Subject: [PATCH 148/191] Add control knob for selecting eos-system-contracts ref. Defaults to release/3.1 --- .cicd/defaults.json | 3 +++ .github/workflows/build.yaml | 8 +++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/.cicd/defaults.json b/.cicd/defaults.json index 45e05ac7b6..56c0051df9 100644 --- a/.cicd/defaults.json +++ b/.cicd/defaults.json @@ -2,5 +2,8 @@ "cdt":{ "target":"4", "prerelease":false + }, + "eos-system-contracts":{ + "ref":"release/3.1" } } diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index e09f20a4fb..676639619e 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -48,8 +48,9 @@ jobs: outputs: cdt-target: ${{steps.versions.outputs.cdt-target}} cdt-prerelease: ${{steps.versions.outputs.cdt-prerelease}} + eos-system-contracts-ref: ${{steps.versions.outputs.eos-system-contracts-ref}} steps: - - name: Setup cdt versions + - name: Setup cdt and eos-system-contracts versions id: versions env: GH_TOKEN: ${{secrets.GITHUB_TOKEN}} @@ -57,6 +58,7 @@ jobs: DEFAULTS_JSON=$(curl -sSfL $(gh api https://api.github.com/repos/${{github.repository}}/contents/.cicd/defaults.json?ref=${{github.sha}} --jq .download_url)) echo cdt-target=$(echo "$DEFAULTS_JSON" | jq -r '.cdt.target') >> $GITHUB_OUTPUT echo cdt-prerelease=$(echo "$DEFAULTS_JSON" | jq -r '.cdt.prerelease') >> $GITHUB_OUTPUT + echo eos-system-contracts-ref=$(echo "$DEFAULTS_JSON" | jq -r '.eos-system-contracts.ref') >> $GITHUB_OUTPUT if [[ "${{inputs.override-cdt}}" != "" ]]; then echo cdt-target=${{inputs.override-cdt}} >> $GITHUB_OUTPUT @@ -64,6 +66,9 @@ jobs: if [[ "${{inputs.override-cdt-prerelease}}" == +(true|false) ]]; then echo cdt-prerelease=${{inputs.override-cdt-prerelease}} >> $GITHUB_OUTPUT fi + if [[ "${{inputs.override-eos-system-contracts}}" != "" ]]; then + echo eos-system-contracts-ref=${{inputs.override-eos-system-contracts}} >> $GITHUB_OUTPUT + fi build-platforms: name: Build Platforms needs: d @@ -315,6 +320,7 @@ jobs: with: repository: eosnetworkfoundation/eos-system-contracts path: eos-system-contracts + ref: '${{needs.v.outputs.eos-system-contracts-ref}}' - if: ${{ matrix.test == 'deb-install' }} name: Install eos-system-contracts deps run: | From a3b84cec5e7e9133eb3f3cf85e8ba482d6afaa1c Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 20 Jun 2023 17:13:39 -0500 Subject: [PATCH 149/191] Add eos-system-contractgs override input. --- .github/workflows/build.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 676639619e..3059298deb 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -18,6 +18,9 @@ on: - default - true - false + override-eos-system-contracts: + description: 'Override eos-system-contracts ref' + type: string permissions: packages: read From 0b56986bba5fd9b9f850d0cd2e8fe6bf039b10b6 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Tue, 20 Jun 2023 17:35:20 -0500 Subject: [PATCH 150/191] Remove dash from json for jq. --- .cicd/defaults.json | 2 +- .github/workflows/build.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.cicd/defaults.json b/.cicd/defaults.json index 56c0051df9..fd637bc48a 100644 --- a/.cicd/defaults.json +++ b/.cicd/defaults.json @@ -3,7 +3,7 @@ "target":"4", "prerelease":false }, - "eos-system-contracts":{ + "eossystemcontracts":{ "ref":"release/3.1" } } diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 3059298deb..ab573279bb 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -61,7 +61,7 @@ jobs: DEFAULTS_JSON=$(curl -sSfL $(gh api https://api.github.com/repos/${{github.repository}}/contents/.cicd/defaults.json?ref=${{github.sha}} --jq .download_url)) echo cdt-target=$(echo "$DEFAULTS_JSON" | jq -r '.cdt.target') >> $GITHUB_OUTPUT echo cdt-prerelease=$(echo "$DEFAULTS_JSON" | jq -r '.cdt.prerelease') >> $GITHUB_OUTPUT - echo eos-system-contracts-ref=$(echo "$DEFAULTS_JSON" | jq -r '.eos-system-contracts.ref') >> $GITHUB_OUTPUT + echo eos-system-contracts-ref=$(echo "$DEFAULTS_JSON" | jq -r '.eossystemcontracts.ref') >> $GITHUB_OUTPUT if [[ "${{inputs.override-cdt}}" != "" ]]; then echo cdt-target=${{inputs.override-cdt}} >> $GITHUB_OUTPUT From c293d332f729490a8723fc5fe7b80fa87aa14a17 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 21 Jun 2023 11:34:38 -0400 Subject: [PATCH 151/191] Remove comment as per PR review. --- plugins/producer_plugin/producer_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 1edc983623..461d6dab08 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -654,7 +654,7 @@ class producer_plugin_impl : public std::enable_shared_from_this Date: Wed, 21 Jun 2023 12:10:24 -0500 Subject: [PATCH 152/191] Disable leap version check to allow running with whatever leap is currently at. --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index ab573279bb..8fa80f6ab5 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -330,7 +330,7 @@ jobs: apt-get -y install cmake build-essential - name: Build & Test eos-system-contracts run: | - cmake -S eos-system-contracts -B eos-system-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On + cmake -S eos-system-contracts -B eos-system-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -DSYSTEM_ENABLE_LEAP_VERSION_CHECK=Off cmake --build eos-system-contracts/build -- -j $(nproc) cd eos-system-contracts/build/tests ctest --output-on-failure -j $(nproc) From 4c7772f096ca6d7f80d24cc92fb59ae02997ab69 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 21 Jun 2023 12:11:45 -0500 Subject: [PATCH 153/191] Currently eos-system-contracts 3.1 sets a CDT_VERSION_SOFT_MAX to 3, so update to compatible version for default. --- .cicd/defaults.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cicd/defaults.json b/.cicd/defaults.json index fd637bc48a..4c514224f3 100644 --- a/.cicd/defaults.json +++ b/.cicd/defaults.json @@ -1,6 +1,6 @@ { "cdt":{ - "target":"4", + "target":"3", "prerelease":false }, "eossystemcontracts":{ From 38439b3e69e1551683c45694f66743e513c54056 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Wed, 21 Jun 2023 14:29:35 -0500 Subject: [PATCH 154/191] Use option to diable SYSTEM_ENABLE_LEAP_VERSION_CHECK --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 8fa80f6ab5..09e926b18d 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -330,7 +330,7 @@ jobs: apt-get -y install cmake build-essential - name: Build & Test eos-system-contracts run: | - cmake -S eos-system-contracts -B eos-system-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -DSYSTEM_ENABLE_LEAP_VERSION_CHECK=Off + cmake -S eos-system-contracts -B eos-system-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -DSYSTEM_ENABLE_LEAP_VERSION_CHECK=Off -DSYSTEM_ENABLE_CDT_VERSION_CHECK=Off cmake --build eos-system-contracts/build -- -j $(nproc) cd eos-system-contracts/build/tests ctest --output-on-failure -j $(nproc) From 9faefa2629bbd932edc4f43231080f583ca838f9 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 22 Jun 2023 07:49:12 -0500 Subject: [PATCH 155/191] GH-1251 Increase perf harness genesis to have 150ms max transaction time to match genesis used in other tests. This allows for setcode in slow ci/cd env. --- tests/performance_tests/genesis.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance_tests/genesis.json b/tests/performance_tests/genesis.json index 57474c902d..a215407af4 100644 --- a/tests/performance_tests/genesis.json +++ b/tests/performance_tests/genesis.json @@ -11,7 +11,7 @@ "context_free_discount_net_usage_den": 100, "max_block_cpu_usage": 500000, "target_block_cpu_usage_pct": 500, - "max_transaction_cpu_usage": 90000, + "max_transaction_cpu_usage": 150000, "min_transaction_cpu_usage": 0, "max_transaction_lifetime": 3600, "deferred_trx_expiration_window": 600, From 8fc11f0041f4c428aabf93c4d85aabe75d5228b0 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 22 Jun 2023 08:08:58 -0500 Subject: [PATCH 156/191] GH-1251 Add additional allowed true/false alternatives --- plugins/chain_plugin/chain_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 7dba9ef8d4..6071f62273 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -130,9 +130,9 @@ void validate(boost::any& v, if (s == "auto") { v = boost::any(wasm_interface::vm_oc_enable::oc_auto); - } else if (s == "all" || s == "true" || s == "on") { + } else if (s == "all" || s == "true" || s == "on" || s == "yes" || s == "1") { v = boost::any(wasm_interface::vm_oc_enable::oc_all); - } else if (s == "none" || s == "false" || s == "off") { + } else if (s == "none" || s == "false" || s == "off" || s == "no" || s == "0") { v = boost::any(wasm_interface::vm_oc_enable::oc_none); } else { throw validation_error(validation_error::invalid_option_value); From 870673840b94532ee2c07bb70d4b1a1b71182b02 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 22 Jun 2023 08:22:40 -0500 Subject: [PATCH 157/191] GH-1251 Add additional comments --- libraries/chain/apply_context.cpp | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 5706987cc0..b11226d3a5 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -1094,12 +1094,18 @@ action_name apply_context::get_sender() const { return action_name(); } - +// Context | OC? +//------------------------------------------------------------------------------- +// Building block | baseline, OC for eosio.* +// Applying block | OC unless a producer, OC for eosio.* including producers +// Speculative API trx | baseline, OC for eosio.* +// Speculative P2P trx | baseline, OC for eosio.* +// Compute trx | baseline, OC for eosio.* +// Read only trx | OC bool apply_context::should_use_eos_vm_oc()const { return trx_context.is_read_only() - || receiver.prefix() == config::system_account_name // "eosio"_n - || (trx_context.explicit_billed_cpu_time && !control.is_producer_node()) // validating/applying block - || false; // todo: could enable for p2p but may cause spam on producer + || receiver.prefix() == config::system_account_name // "eosio"_n, all cases use OC + || (trx_context.explicit_billed_cpu_time && !control.is_producer_node()); // validating/applying block } From 02c43ca656671772aab51a685ca5d612dcc20cb9 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 22 Jun 2023 09:30:09 -0500 Subject: [PATCH 158/191] Use enf-x86-beefy-long runner --- .github/workflows/pinned_build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 7b8caf1de9..7791ffff26 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -18,7 +18,7 @@ jobs: fail-fast: false matrix: platform: [ubuntu18, ubuntu20, ubuntu22] - runs-on: ["self-hosted", "enf-x86-hightier-long"] + runs-on: ["self-hosted", "enf-x86-beefy-long"] container: ${{ matrix.platform == 'ubuntu18' && 'ubuntu:bionic' || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} steps: - name: Conditionally update git repo From 9e82b160c759fb71f5e162f63d22ed659aa2286b Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 22 Jun 2023 11:45:48 -0500 Subject: [PATCH 159/191] Update artifact name that was changed in 3.2 --- .github/workflows/pinned_build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 4cd29592d2..d0107016e7 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -51,7 +51,7 @@ jobs: uses: actions/upload-artifact@v3 with: name: leap-${{matrix.platform}}-pinned-amd64 - path: build/leap-3*.deb + path: build/leap_*.deb - name: Run Parallel Tests run: | cd build From b194862aa26d53a7569c75ad27d322b6b7cd411b Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 22 Jun 2023 13:08:06 -0500 Subject: [PATCH 160/191] Skip parallel tests on ubuntu18 (as done in build.yaml). --- .github/workflows/pinned_build.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index d0107016e7..7ec42bda11 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -53,6 +53,7 @@ jobs: name: leap-${{matrix.platform}}-pinned-amd64 path: build/leap_*.deb - name: Run Parallel Tests + if: ${{ matrix.platform != 'ubuntu18' }} run: | cd build ctest --output-on-failure -j $(nproc) -LE "(nonparallelizable_tests|long_running_tests)" --timeout 420 From 98e3b81c21b97386f08a714bcbceab37de34db07 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 22 Jun 2023 14:50:22 -0500 Subject: [PATCH 161/191] GH-1251 Address peer review comments --- docs/01_nodeos/03_plugins/chain_plugin/index.md | 4 ++-- libraries/chain/apply_context.cpp | 6 +++--- libraries/chain/controller.cpp | 6 +++--- plugins/chain_plugin/chain_plugin.cpp | 2 +- unittests/misc_tests.cpp | 2 ++ 5 files changed, 11 insertions(+), 9 deletions(-) diff --git a/docs/01_nodeos/03_plugins/chain_plugin/index.md b/docs/01_nodeos/03_plugins/chain_plugin/index.md index 364cf20189..c35055d6c7 100644 --- a/docs/01_nodeos/03_plugins/chain_plugin/index.md +++ b/docs/01_nodeos/03_plugins/chain_plugin/index.md @@ -182,8 +182,8 @@ Config Options for eosio::chain_plugin: --eos-vm-oc-enable arg (=auto) Enable EOS VM OC tier-up runtime ('auto', 'all', 'none'). 'auto' - EOS VM OC tier-up is enabled - for eosio.* accounts and read-only - trxs. + for eosio.* accounts, read-only trxs, + and applying blocks. 'all' - EOS VM OC tier-up is enabled for all contract execution. 'none' - EOS VM OC tier-up is diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index b11226d3a5..e0afd261bf 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -1103,9 +1103,9 @@ action_name apply_context::get_sender() const { // Compute trx | baseline, OC for eosio.* // Read only trx | OC bool apply_context::should_use_eos_vm_oc()const { - return trx_context.is_read_only() - || receiver.prefix() == config::system_account_name // "eosio"_n, all cases use OC - || (trx_context.explicit_billed_cpu_time && !control.is_producer_node()); // validating/applying block + return receiver.prefix() == config::system_account_name // "eosio"_n, all cases use OC + || (trx_context.explicit_billed_cpu_time && !control.is_producer_node()) // validating/applying block + || trx_context.is_read_only(); } diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 77bcf58bdf..2a022612ef 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -240,7 +240,7 @@ struct controller_impl { controller::config conf; const chain_id_type chain_id; // read by thread_pool threads, value will not be changed bool replaying = false; - bool producer_node = false; // true if node is configured as a block producer + bool is_producer_node = false; // true if node is configured as a block producer db_read_mode read_mode = db_read_mode::HEAD; bool in_trx_requiring_checks = false; ///< if true, checks that are normally skipped on replay (e.g. auth checks) cannot be skipped std::optional subjective_cpu_leeway; @@ -3679,11 +3679,11 @@ void controller::replace_account_keys( name account, name permission, const publ } void controller::set_producer_node(bool is_producer_node) { - my->producer_node = is_producer_node; + my->is_producer_node = is_producer_node; } bool controller::is_producer_node()const { - return my->producer_node; + return my->is_producer_node; } void controller::set_db_read_only_mode() { diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 6071f62273..7a27f84678 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -362,7 +362,7 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip }), "Number of threads to use for EOS VM OC tier-up") ("eos-vm-oc-enable", bpo::value()->default_value(chain::wasm_interface::vm_oc_enable::oc_auto), "Enable EOS VM OC tier-up runtime ('auto', 'all', 'none').\n" - "'auto' - EOS VM OC tier-up is enabled for eosio.* accounts and read-only trxs.\n" + "'auto' - EOS VM OC tier-up is enabled for eosio.* accounts, read-only trxs, and applying blocks.\n" "'all' - EOS VM OC tier-up is enabled for all contract execution.\n" "'none' - EOS VM OC tier-up is completely disabled.\n") #endif diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index 7a3b9c01b7..4f61553040 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -147,12 +147,14 @@ BOOST_AUTO_TEST_CASE(name_prefix_tests) BOOST_CHECK_EQUAL("abcdefghijkl"_n.prefix(), "abcdefghijkl"_n); BOOST_CHECK_EQUAL("abc.xyz"_n.prefix(), "abc"_n); BOOST_CHECK_EQUAL("abc.xyz.qrt"_n.prefix(), "abc.xyz"_n); + BOOST_CHECK_EQUAL("."_n.prefix(), ""_n); BOOST_CHECK_EQUAL("eosio.any"_n.prefix(), "eosio"_n); BOOST_CHECK_EQUAL("eosio"_n.prefix(), "eosio"_n); BOOST_CHECK_EQUAL("eosio"_n.prefix(), config::system_account_name); BOOST_CHECK_EQUAL("eosio."_n.prefix(), "eosio"_n); BOOST_CHECK_EQUAL("eosio.evm"_n.prefix(), "eosio"_n); + BOOST_CHECK_EQUAL(".eosio"_n.prefix(), ""_n); BOOST_CHECK_NE("eosi"_n.prefix(), "eosio"_n); BOOST_CHECK_NE("eosioeosio"_n.prefix(), "eosio"_n); BOOST_CHECK_NE("eosioe"_n.prefix(), "eosio"_n); From 67b43790bb4c2c77cd59abbd93a6d551995710e4 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 22 Jun 2023 16:12:50 -0500 Subject: [PATCH 162/191] Fix lost quotes in merge. --- scripts/pinned_build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/pinned_build.sh b/scripts/pinned_build.sh index d44b3104aa..8d8823e77d 100755 --- a/scripts/pinned_build.sh +++ b/scripts/pinned_build.sh @@ -131,7 +131,7 @@ pushdir "${LEAP_DIR}" # build Leap echo "Building Leap ${SCRIPT_DIR}" -try cmake -DCMAKE_TOOLCHAIN_FILE=${SCRIPT_DIR}/pinned_toolchain.cmake -DCMAKE_INSTALL_PREFIX=${LEAP_PINNED_INSTALL_PREFIX:-/usr/local} -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=${LLVM_DIR}/lib/cmake -DCMAKE_PREFIX_PATH=${BOOST_DIR}/bin ${SCRIPT_DIR}/.. +try cmake -DCMAKE_TOOLCHAIN_FILE="${SCRIPT_DIR}/pinned_toolchain.cmake" -DCMAKE_INSTALL_PREFIX=${LEAP_PINNED_INSTALL_PREFIX:-/usr/local} -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="${LLVM_DIR}/lib/cmake" -DCMAKE_PREFIX_PATH="${BOOST_DIR}/bin" "${SCRIPT_DIR}/.." try make -j "${JOBS}" try cpack From 0861ef660a6d4ede8b53475c5456450ef4d1dd9a Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 22 Jun 2023 16:22:09 -0500 Subject: [PATCH 163/191] Update cdt target to version 4.x --- .cicd/defaults.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cicd/defaults.json b/.cicd/defaults.json index 4c514224f3..fd637bc48a 100644 --- a/.cicd/defaults.json +++ b/.cicd/defaults.json @@ -1,6 +1,6 @@ { "cdt":{ - "target":"3", + "target":"4", "prerelease":false }, "eossystemcontracts":{ From 933c550ce90c8b2e0e4eb38d44d1b04466ca1a56 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 22 Jun 2023 19:11:35 -0500 Subject: [PATCH 164/191] Remove ubuntu18 support in pinned builds. --- .github/workflows/pinned_build.yaml | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml index 7ec42bda11..54b87ee93e 100644 --- a/.github/workflows/pinned_build.yaml +++ b/.github/workflows/pinned_build.yaml @@ -17,17 +17,10 @@ jobs: strategy: fail-fast: false matrix: - platform: [ubuntu18, ubuntu20, ubuntu22] + platform: [ubuntu20, ubuntu22] runs-on: ["self-hosted", "enf-x86-beefy-long"] - container: ${{ matrix.platform == 'ubuntu18' && 'ubuntu:bionic' || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} + container: ${{ matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} steps: - - name: Conditionally update git repo - if: ${{ matrix.platform == 'ubuntu18' }} - run: | - apt-get update - apt-get install -y software-properties-common - apt-get update - add-apt-repository ppa:git-core/ppa - name: Update and Install git run: | apt-get update @@ -53,7 +46,6 @@ jobs: name: leap-${{matrix.platform}}-pinned-amd64 path: build/leap_*.deb - name: Run Parallel Tests - if: ${{ matrix.platform != 'ubuntu18' }} run: | cd build ctest --output-on-failure -j $(nproc) -LE "(nonparallelizable_tests|long_running_tests)" --timeout 420 From d4ee46b7a12c0202b761cfb71bea4798e73e1f7d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 23 Jun 2023 11:37:10 -0500 Subject: [PATCH 165/191] GH-1251 Add is_applying_block() method --- libraries/chain/apply_context.cpp | 2 +- libraries/chain/include/eosio/chain/apply_context.hpp | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index e0afd261bf..b61ee77bbe 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -1104,7 +1104,7 @@ action_name apply_context::get_sender() const { // Read only trx | OC bool apply_context::should_use_eos_vm_oc()const { return receiver.prefix() == config::system_account_name // "eosio"_n, all cases use OC - || (trx_context.explicit_billed_cpu_time && !control.is_producer_node()) // validating/applying block + || (is_applying_block() && !control.is_producer_node()) // validating/applying block || trx_context.is_read_only(); } diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index a4ce03392f..090531bfcb 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -598,6 +598,7 @@ class apply_context { action_name get_sender() const; + bool is_applying_block() const { return trx_context.explicit_billed_cpu_time; } bool should_use_eos_vm_oc()const; /// Fields: From dc00e24c98f08fcceca58b6cf51e288c3e49f779 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 23 Jun 2023 11:38:11 -0500 Subject: [PATCH 166/191] GH-1251 Add integration test that verifies auto oc tierup --- libraries/chain/wasm_interface.cpp | 2 ++ tests/CMakeLists.txt | 4 +++- tests/TestHarness/Node.py | 10 ++++++++++ tests/read_only_trx_test.py | 29 ++++++++++++++++++++++++++--- 4 files changed, 41 insertions(+), 4 deletions(-) diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 4b0651c5bc..5373f26612 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -105,6 +105,8 @@ namespace eosio { namespace chain { once_is_enough = true; } if(cd) { + if (!context.is_applying_block()) + tlog("speculatively executing ${h} with eos vm oc", ("h", code_hash)); my->eosvmoc->exec->execute(*cd, my->eosvmoc->mem, context); return; } diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 50dc6e968b..71954df6ca 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -132,8 +132,10 @@ add_test(NAME read-only-trx-basic-test COMMAND tests/read_only_trx_test.py -v -p set_property(TEST read-only-trx-basic-test PROPERTY LABELS nonparallelizable_tests) add_test(NAME read-only-trx-parallel-test COMMAND tests/read_only_trx_test.py -v -p 2 -n 3 --read-only-threads 6 --num-test-runs 3 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST read-only-trx-parallel-test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME read-only-trx-parallel-eos-vm-oc-test COMMAND tests/read_only_trx_test.py -v -p 2 -n 3 --eos-vm-oc-enable --read-only-threads 6 --num-test-runs 3 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME read-only-trx-parallel-eos-vm-oc-test COMMAND tests/read_only_trx_test.py -v -p 2 -n 3 --eos-vm-oc-enable all --read-only-threads 6 --num-test-runs 3 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST read-only-trx-parallel-eos-vm-oc-test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME read-only-trx-parallel-no-oc-test COMMAND tests/read_only_trx_test.py -v -p 2 -n 3 --eos-vm-oc-enable none --read-only-threads 6 --num-test-runs 2 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST read-only-trx-parallel-no-oc-test PROPERTY LABELS nonparallelizable_tests) add_test(NAME subjective_billing_test COMMAND tests/subjective_billing_test.py -v -p 2 -n 4 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST subjective_billing_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME get_account_test COMMAND tests/get_account_test.py -v -p 2 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/tests/TestHarness/Node.py b/tests/TestHarness/Node.py index 8ae9da0633..e4d55211ad 100644 --- a/tests/TestHarness/Node.py +++ b/tests/TestHarness/Node.py @@ -535,6 +535,16 @@ def findStderrFiles(path): files.sort() return files + def findInLog(self, searchStr): + dataDir=Utils.getNodeDataDir(self.nodeId) + files=Node.findStderrFiles(dataDir) + for file in files: + with open(file, 'r') as f: + for line in f: + if searchStr in line: + return True + return False + def analyzeProduction(self, specificBlockNum=None, thresholdMs=500): dataDir=Utils.getNodeDataDir(self.nodeId) files=Node.findStderrFiles(dataDir) diff --git a/tests/read_only_trx_test.py b/tests/read_only_trx_test.py index 9461c64914..ac86f281d3 100755 --- a/tests/read_only_trx_test.py +++ b/tests/read_only_trx_test.py @@ -22,7 +22,7 @@ appArgs=AppArgs() appArgs.add(flag="--read-only-threads", type=int, help="number of read-only threads", default=0) appArgs.add(flag="--num-test-runs", type=int, help="number of times to run the tests", default=1) -appArgs.add_bool(flag="--eos-vm-oc-enable", help="enable eos-vm-oc") +appArgs.add(flag="--eos-vm-oc-enable", type=str, help="specify eos-vm-oc-enable option", default="auto") appArgs.add(flag="--wasm-runtime", type=str, help="if set to eos-vm-oc, must compile with EOSIO_EOS_VM_OC_DEVELOPER", default="eos-vm-jit") args=TestHelper.parse_args({"-p","-n","-d","-s","--nodes-file","--seed" @@ -47,9 +47,12 @@ Utils.Debug=debug testSuccessful=False errorInThread=False +noOC = args.eos_vm_oc_enable == "none" +allOC = args.eos_vm_oc_enable == "all" random.seed(seed) # Use a fixed seed for repeatability. -cluster=Cluster(unshared=args.unshared, keepRunning=True if nodesFile is not None else args.leave_running, keepLogs=args.keep_logs) +# all debuglevel so that "executing ${h} with eos vm oc" is logged +cluster=Cluster(loggingLevel="all", unshared=args.unshared, keepRunning=True if nodesFile is not None else args.leave_running, keepLogs=args.keep_logs) walletMgr=WalletMgr(True) EOSIO_ACCT_PRIVATE_DEFAULT_KEY = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3" @@ -61,6 +64,15 @@ userAccountName = "user" payloadlessAccountName = "payloadless" +def getCodeHash(node, account): + # Example get code result: code hash: 67d0598c72e2521a1d588161dad20bbe9f8547beb5ce6d14f3abd550ab27d3dc + cmd = f"get code {account}" + codeHash = node.processCleosCmd(cmd, cmd, silentErrors=False, returnType=ReturnType.raw) + if codeHash is None: errorExit(f"Unable to get code {account} from node {node.nodeId}") + else: codeHash = codeHash.split(' ')[2].strip() + if Utils.Debug: Utils.Print(f"{account} code hash: {codeHash}") + return codeHash + def startCluster(): global total_nodes global producerNode @@ -91,7 +103,8 @@ def startCluster(): specificExtraNodeosArgs[pnodes]+=" --read-only-threads " specificExtraNodeosArgs[pnodes]+=str(args.read_only_threads) if args.eos_vm_oc_enable: - specificExtraNodeosArgs[pnodes]+=" --eos-vm-oc-enable all" + specificExtraNodeosArgs[pnodes]+=" --eos-vm-oc-enable " + specificExtraNodeosArgs[pnodes]+=args.eos_vm_oc_enable if args.wasm_runtime: specificExtraNodeosArgs[pnodes]+=" --wasm-runtime " specificExtraNodeosArgs[pnodes]+=args.wasm_runtime @@ -107,6 +120,12 @@ def startCluster(): producerNode = cluster.getNode() apiNode = cluster.nodes[-1] + eosioCodeHash = getCodeHash(producerNode, "eosio") + # eosio.* should be using oc unless oc tierup disabled + Utils.Print(f"search: executing {eosioCodeHash} with eos vm oc") + found = producerNode.findInLog(f"executing {eosioCodeHash} with eos vm oc") + assert( found or (noOC and not found) ) + def deployTestContracts(): Utils.Print("create test accounts") testAccount = Account(testAccountName) @@ -243,6 +262,10 @@ def basicTests(): assert(results[0]) apiNode.waitForTransactionInBlock(results[1]['transaction_id']) + testAccountCodeHash = getCodeHash(producerNode, testAccountName) + found = producerNode.findInLog(f"executing {testAccountCodeHash} with eos vm oc") + assert( (allOC and found) or not found ) + # verify the return value (age) from read-only is the same as created. Print("Send a read-only Get transaction to verify previous Insert") results = sendTransaction(testAccountName, 'getage', {"user": userAccountName}, opts='--read') From 6008c728927f524e06a6d77463c5a5c165190c18 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 23 Jun 2023 13:06:37 -0500 Subject: [PATCH 167/191] GH-1251 Use eosio.token as eosio.system contract can not tierup in time to be found in logs on slow ci/cd machine --- libraries/chain/wasm_interface.cpp | 2 +- tests/read_only_trx_test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 5373f26612..c7aeb46f8e 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -106,7 +106,7 @@ namespace eosio { namespace chain { } if(cd) { if (!context.is_applying_block()) - tlog("speculatively executing ${h} with eos vm oc", ("h", code_hash)); + tlog("${a} speculatively executing ${h} with eos vm oc", ("a", context.get_receiver())("h", code_hash)); my->eosvmoc->exec->execute(*cd, my->eosvmoc->mem, context); return; } diff --git a/tests/read_only_trx_test.py b/tests/read_only_trx_test.py index ac86f281d3..3bc209c2c2 100755 --- a/tests/read_only_trx_test.py +++ b/tests/read_only_trx_test.py @@ -120,7 +120,7 @@ def startCluster(): producerNode = cluster.getNode() apiNode = cluster.nodes[-1] - eosioCodeHash = getCodeHash(producerNode, "eosio") + eosioCodeHash = getCodeHash(producerNode, "eosio.token") # eosio.* should be using oc unless oc tierup disabled Utils.Print(f"search: executing {eosioCodeHash} with eos vm oc") found = producerNode.findInLog(f"executing {eosioCodeHash} with eos vm oc") From 0997826de8c315c155deaebd96f15471d234ff14 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 23 Jun 2023 23:02:44 -0500 Subject: [PATCH 168/191] GH-1251 Use deque instead of unordered_set to process contracts in order. Prioritize eosio.*. --- .../chain/webassembly/eos-vm-oc/code_cache.hpp | 4 ++-- libraries/chain/wasm_interface.cpp | 2 +- .../webassembly/runtimes/eos-vm-oc/code_cache.cpp | 14 ++++++++++---- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp index fe7ff49788..1584c96406 100644 --- a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp @@ -80,7 +80,7 @@ class code_cache_base { //these are really only useful to the async code cache, but keep them here so //free_code can be shared - std::unordered_set _queued_compiles; + deque _queued_compiles; std::unordered_map _outstanding_compiles_and_poison; size_t _free_bytes_eviction_threshold; @@ -101,7 +101,7 @@ class code_cache_async : public code_cache_base { //If code is in cache: returns pointer & bumps to front of MRU list //If code is not in cache, and not blacklisted, and not currently compiling: return nullptr and kick off compile //otherwise: return nullptr - const code_descriptor* const get_descriptor_for_code(const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure); + const code_descriptor* const get_descriptor_for_code(const account_name& receiver, const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure); private: std::thread _monitor_reply_thread; diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index c7aeb46f8e..ba7e268579 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -94,7 +94,7 @@ namespace eosio { namespace chain { const chain::eosvmoc::code_descriptor* cd = nullptr; chain::eosvmoc::code_cache_base::get_cd_failure failure = chain::eosvmoc::code_cache_base::get_cd_failure::temporary; try { - cd = my->eosvmoc->cc.get_descriptor_for_code(code_hash, vm_version, context.control.is_write_window(), failure); + cd = my->eosvmoc->cc.get_descriptor_for_code(context.get_receiver(), code_hash, vm_version, context.control.is_write_window(), failure); } catch(...) { //swallow errors here, if EOS VM OC has gone in to the weeds we shouldn't bail: continue to try and run baseline diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp index a43f8ac932..cab0728030 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -106,7 +107,7 @@ std::tuple code_cache_async::consume_compile_thread_queue() { } -const code_descriptor* const code_cache_async::get_descriptor_for_code(const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure) { +const code_descriptor* const code_cache_async::get_descriptor_for_code(const account_name& receiver, const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure) { //if there are any outstanding compiles, process the result queue now //When app is in write window, all tasks are running sequentially and read-only threads //are not running. Safe to update cache entries. @@ -156,13 +157,16 @@ const code_descriptor* const code_cache_async::get_descriptor_for_code(const dig it->second = false; return nullptr; } - if(_queued_compiles.find(ct) != _queued_compiles.end()) { + if(std::find(_queued_compiles.cbegin(), _queued_compiles.cend(), ct) != _queued_compiles.end()) { failure = get_cd_failure::temporary; // Compile might not be done yet return nullptr; } if(_outstanding_compiles_and_poison.size() >= _threads) { - _queued_compiles.emplace(ct); + if (receiver.prefix() == chain::config::system_account_name) + _queued_compiles.push_front(ct); + else + _queued_compiles.push_back(ct); failure = get_cd_failure::temporary; // Compile might not be done yet return nullptr; } @@ -383,7 +387,9 @@ void code_cache_base::free_code(const digest_type& code_id, const uint8_t& vm_ve } //if it's in the queued list, erase it - _queued_compiles.erase({code_id, vm_version}); + auto i = std::find(_queued_compiles.cbegin(), _queued_compiles.cend(), code_tuple{code_id, vm_version}); + if (i != _queued_compiles.cend()) + _queued_compiles.erase(i); //however, if it's currently being compiled there is no way to cancel the compile, //so instead set a poison boolean that indicates not to insert the code in to the cache From 9e2bab5cdf4768f0106315ad97b024a3fef10135 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 26 Jun 2023 07:39:45 -0500 Subject: [PATCH 169/191] Fix unused warning --- tests/trx_generator/trx_provider.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/trx_generator/trx_provider.cpp b/tests/trx_generator/trx_provider.cpp index c9b5b8fc85..9e6a51f817 100644 --- a/tests/trx_generator/trx_provider.cpp +++ b/tests/trx_generator/trx_provider.cpp @@ -138,7 +138,6 @@ namespace eosio::testing { if (resp_json.is_object() && resp_json.get_object().contains("processed")) { const auto& processed = resp_json["processed"]; const auto& block_num = processed["block_num"].as_uint64(); - const auto& transaction_id = processed["id"].as_string(); const auto& block_time = processed["block_time"].as_string(); std::string status = "failed"; uint32_t net = 0; From fec0a7a8794620482ee77e258c9b7c37e9089e19 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 26 Jun 2023 08:03:44 -0500 Subject: [PATCH 170/191] GH-1251 pass by reference --- .../include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp | 4 ++-- libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp index 1584c96406..29d432ebaf 100644 --- a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp @@ -39,7 +39,7 @@ struct config; class code_cache_base { public: - code_cache_base(const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db); + code_cache_base(const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db); ~code_cache_base(); const int& fd() const { return _cache_fd; } @@ -95,7 +95,7 @@ class code_cache_base { class code_cache_async : public code_cache_base { public: - code_cache_async(const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db); + code_cache_async(const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db); ~code_cache_async(); //If code is in cache: returns pointer & bumps to front of MRU list diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp index cab0728030..62a08756ce 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp @@ -39,7 +39,7 @@ static constexpr size_t descriptor_ptr_from_file_start = header_offset + offseto static_assert(sizeof(code_cache_header) <= header_size, "code_cache_header too big"); -code_cache_async::code_cache_async(const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) : +code_cache_async::code_cache_async(const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) : code_cache_base(data_dir, eosvmoc_config, db), _result_queue(eosvmoc_config.threads * 2), _threads(eosvmoc_config.threads) @@ -225,7 +225,7 @@ const code_descriptor* const code_cache_sync::get_descriptor_for_code_sync(const return &*_cache_index.push_front(std::move(std::get(result.result))).first; } -code_cache_base::code_cache_base(const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) : +code_cache_base::code_cache_base(const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) : _db(db), _cache_file_path(data_dir/"code_cache.bin") { From 57a1d719e8199efa09569ef3ede9637c1dc394ee Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 26 Jun 2023 13:37:28 -0400 Subject: [PATCH 171/191] GH-1251 Use multiindex, process in order of last used, prioritize eosio.* --- .../chain/webassembly/eos-vm-oc/code_cache.hpp | 18 ++++++++++++++---- .../runtimes/eos-vm-oc/code_cache.cpp | 8 ++++---- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp index 29d432ebaf..ec8c13b9ab 100644 --- a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp @@ -15,7 +15,6 @@ #include -#include namespace std { template<> struct hash { @@ -78,9 +77,20 @@ class code_cache_base { local::datagram_protocol::socket _compile_monitor_write_socket{_ctx}; local::datagram_protocol::socket _compile_monitor_read_socket{_ctx}; - //these are really only useful to the async code cache, but keep them here so - //free_code can be shared - deque _queued_compiles; + //these are really only useful to the async code cache, but keep them here so free_code can be shared + using queued_compilies_t = boost::multi_index_container< + code_tuple, + indexed_by< + sequenced<>, + hashed_unique, + composite_key< code_tuple, + member, + member + > + > + > + >; + queued_compilies_t _queued_compiles; std::unordered_map _outstanding_compiles_and_poison; size_t _free_bytes_eviction_threshold; diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp index 62a08756ce..88fe0f3929 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp @@ -157,7 +157,8 @@ const code_descriptor* const code_cache_async::get_descriptor_for_code(const acc it->second = false; return nullptr; } - if(std::find(_queued_compiles.cbegin(), _queued_compiles.cend(), ct) != _queued_compiles.end()) { + if(auto it = _queued_compiles.get().find(boost::make_tuple(std::ref(code_id), vm_version)); it != _queued_compiles.get().end()) { + _queued_compiles.relocate(_queued_compiles.begin(), _queued_compiles.project<0>(it)); failure = get_cd_failure::temporary; // Compile might not be done yet return nullptr; } @@ -387,9 +388,8 @@ void code_cache_base::free_code(const digest_type& code_id, const uint8_t& vm_ve } //if it's in the queued list, erase it - auto i = std::find(_queued_compiles.cbegin(), _queued_compiles.cend(), code_tuple{code_id, vm_version}); - if (i != _queued_compiles.cend()) - _queued_compiles.erase(i); + if(auto i = _queued_compiles.get().find(boost::make_tuple(std::ref(code_id), vm_version)); i != _queued_compiles.get().end()) + _queued_compiles.get().erase(i); //however, if it's currently being compiled there is no way to cancel the compile, //so instead set a poison boolean that indicates not to insert the code in to the cache From d4fb84efd1ec2eec01cd773f0b28306e5469faf5 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 26 Jun 2023 14:27:25 -0400 Subject: [PATCH 172/191] Make members private --- libraries/libfc/include/fc/network/listener.hpp | 9 ++++++--- plugins/state_history_plugin/tests/session_test.cpp | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/libraries/libfc/include/fc/network/listener.hpp b/libraries/libfc/include/fc/network/listener.hpp index 6ebfc212a2..50aec37cf1 100644 --- a/libraries/libfc/include/fc/network/listener.hpp +++ b/libraries/libfc/include/fc/network/listener.hpp @@ -61,8 +61,7 @@ struct listener_base { ///////////////////////////////////////////////////////////////////////////////////////////// template struct listener : listener_base, std::enable_shared_from_this> { - using endpoint_type = typename Protocol::endpoint; - + private: typename Protocol::acceptor acceptor_; boost::asio::deadline_timer accept_error_timer_; boost::posix_time::time_duration accept_timeout_; @@ -70,6 +69,8 @@ struct listener : listener_base, std::enable_shared_from_this, std::enable_shared_from_thisshared_from_this()](boost::system::error_code ec, auto&& peer_socket) { self->on_accept(ec, std::forward(peer_socket)); @@ -191,7 +194,7 @@ void create_listener(boost::asio::io_context& executor, logger& logger, boost::p has_unspecified_ipv6_only = ip_addr.is_unspecified() && ip_addr.is_v6(); if (has_unspecified_ipv6_only) { boost::asio::ip::v6_only option; - listener->acceptor_.get_option(option); + listener->acceptor().get_option(option); has_unspecified_ipv6_only &= option.value(); } diff --git a/plugins/state_history_plugin/tests/session_test.cpp b/plugins/state_history_plugin/tests/session_test.cpp index ca642f37e2..7d843ba8e1 100644 --- a/plugins/state_history_plugin/tests/session_test.cpp +++ b/plugins/state_history_plugin/tests/session_test.cpp @@ -167,7 +167,7 @@ struct test_server : mock_state_history_plugin { auto server = std::make_shared>( ship_ioc, logger, boost::posix_time::milliseconds(100), "", local_address, "", create_session); server->do_accept(); - local_address = server->acceptor_.local_endpoint(); + local_address = server->acceptor().local_endpoint(); } ~test_server() { From 9cdcef8dcb81e095ec780cb07ad8d20ca19857df Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 26 Jun 2023 14:36:08 -0400 Subject: [PATCH 173/191] GH-1251 Remove MRU, just go with FIFO --- libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp index 88fe0f3929..40d94f6e35 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp @@ -158,7 +158,6 @@ const code_descriptor* const code_cache_async::get_descriptor_for_code(const acc return nullptr; } if(auto it = _queued_compiles.get().find(boost::make_tuple(std::ref(code_id), vm_version)); it != _queued_compiles.get().end()) { - _queued_compiles.relocate(_queued_compiles.begin(), _queued_compiles.project<0>(it)); failure = get_cd_failure::temporary; // Compile might not be done yet return nullptr; } From f51599cbfa630755496eb3d5fc687cca90ddbcbf Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 26 Jun 2023 15:19:06 -0400 Subject: [PATCH 174/191] GH-1251 Remove chain::config use from eos-vm-oc --- .../include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp | 2 +- libraries/chain/wasm_interface.cpp | 3 ++- .../chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp | 5 ++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp index ec8c13b9ab..d753a9dcaa 100644 --- a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp @@ -111,7 +111,7 @@ class code_cache_async : public code_cache_base { //If code is in cache: returns pointer & bumps to front of MRU list //If code is not in cache, and not blacklisted, and not currently compiling: return nullptr and kick off compile //otherwise: return nullptr - const code_descriptor* const get_descriptor_for_code(const account_name& receiver, const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure); + const code_descriptor* const get_descriptor_for_code(bool high_priority, const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure); private: std::thread _monitor_reply_thread; diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index ba7e268579..3cedae97eb 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -94,7 +94,8 @@ namespace eosio { namespace chain { const chain::eosvmoc::code_descriptor* cd = nullptr; chain::eosvmoc::code_cache_base::get_cd_failure failure = chain::eosvmoc::code_cache_base::get_cd_failure::temporary; try { - cd = my->eosvmoc->cc.get_descriptor_for_code(context.get_receiver(), code_hash, vm_version, context.control.is_write_window(), failure); + const bool high_priority = context.get_receiver().prefix() == chain::config::system_account_name; + cd = my->eosvmoc->cc.get_descriptor_for_code(high_priority, code_hash, vm_version, context.control.is_write_window(), failure); } catch(...) { //swallow errors here, if EOS VM OC has gone in to the weeds we shouldn't bail: continue to try and run baseline diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp index 40d94f6e35..46dd95ba25 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp @@ -8,7 +8,6 @@ #include #include #include -#include #include #include @@ -107,7 +106,7 @@ std::tuple code_cache_async::consume_compile_thread_queue() { } -const code_descriptor* const code_cache_async::get_descriptor_for_code(const account_name& receiver, const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure) { +const code_descriptor* const code_cache_async::get_descriptor_for_code(bool high_priority, const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure) { //if there are any outstanding compiles, process the result queue now //When app is in write window, all tasks are running sequentially and read-only threads //are not running. Safe to update cache entries. @@ -163,7 +162,7 @@ const code_descriptor* const code_cache_async::get_descriptor_for_code(const acc } if(_outstanding_compiles_and_poison.size() >= _threads) { - if (receiver.prefix() == chain::config::system_account_name) + if (high_priority) _queued_compiles.push_front(ct); else _queued_compiles.push_back(ct); From 66b2047686d9640d567f4cad95c4f6f23ba1101a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 26 Jun 2023 16:45:00 -0400 Subject: [PATCH 175/191] GH-1244 Verify bios relaunch and add a pause between relaunch of relay nodes for a better test --- tests/nodeos_retry_transaction_test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/nodeos_retry_transaction_test.py b/tests/nodeos_retry_transaction_test.py index 7a5e061062..d1b82388da 100755 --- a/tests/nodeos_retry_transaction_test.py +++ b/tests/nodeos_retry_transaction_test.py @@ -88,7 +88,8 @@ Utils.Print("Bios node killed") # need bios to pass along blocks so api node can continue without its other peer, but drop trx which is the point of this test Utils.Print("Restart bios in drop transactions mode") - cluster.biosNode.relaunch("bios", addSwapFlags={"--p2p-accept-transactions": "false"}) + if not cluster.biosNode.relaunch(addSwapFlags={"--p2p-accept-transactions": "false"}): + Utils.errorExit("Failed to relaunch bios node") # *** create accounts to vote in desired producers *** @@ -270,6 +271,7 @@ def findTransInBlock(transId, transToBlock, node): if round % 3 == 0: relaunchTime = time.perf_counter() + time.sleep(1) # give time for transactions to be sent cluster.getNode(4).relaunch() cluster.getNode(6).relaunch() startRound = startRound - ( time.perf_counter() - relaunchTime ) From 9d1c673188365ab41734d2dd83ce9ceac2ab136a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 27 Jun 2023 01:45:14 -0400 Subject: [PATCH 176/191] GH-1244 Test cluster now uses defproducera as the producer after cluster launch --- tests/large-lib-test.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/large-lib-test.py b/tests/large-lib-test.py index ffbeef8f9f..6c5138b4b1 100755 --- a/tests/large-lib-test.py +++ b/tests/large-lib-test.py @@ -69,6 +69,7 @@ def relaunchNode(node: Node, chainArg="", skipGenesis=True, relaunchAssertMessag Print("Wait for producing {} blocks".format(numBlocksToProduceBeforeRelaunch)) producingNode.waitForBlock(numBlocksToProduceBeforeRelaunch, blockType=BlockType.lib) + producingNode.waitForProducer("defproducera") Print("Kill all node instances.") for clusterNode in cluster.nodes: @@ -83,8 +84,9 @@ def relaunchNode(node: Node, chainArg="", skipGenesis=True, relaunchAssertMessag Utils.rmNodeDataDir(2) Print ("Relaunch all cluster nodes instances.") - # -e -p eosio for resuming production, skipGenesis=False for launch the same chain as before - relaunchNode(producingNode, chainArg="-e -p eosio --sync-fetch-span 5 ", skipGenesis=False) + # -e for resuming production, defproducera only producer at this point + # skipGenesis=False for launch the same chain as before + relaunchNode(producingNode, chainArg="-e --sync-fetch-span 5 ", skipGenesis=False) relaunchNode(speculativeNode1, chainArg="--sync-fetch-span 5 ") relaunchNode(speculativeNode2, chainArg="--sync-fetch-span 5 ", skipGenesis=False) From 13196783d4ed8ad61fdb2d83c79ddfaa7a5dd5a6 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 27 Jun 2023 05:03:04 -0400 Subject: [PATCH 177/191] GH-1328 catch_up notice_message has 0 for known_blocks.pending --- plugins/net_plugin/net_plugin.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 0a29e161ae..963f1a8bb8 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -3089,14 +3089,14 @@ namespace eosio { } switch (msg.known_trx.mode) { case none: - break; - case last_irr_catch_up: - case catch_up : { + case last_irr_catch_up: { std::unique_lock g_conn( conn_mtx ); - last_handshake_recv.head_num = msg.known_blocks.pending; + last_handshake_recv.head_num = std::max(msg.known_blocks.pending, last_handshake_recv.head_num); g_conn.unlock(); break; } + case catch_up: + break; case normal: { my_impl->dispatcher->recv_notice( shared_from_this(), msg, false ); } From e4aeee11cc3075e95f2a3c0e1e8d1a8b52612958 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 27 Jun 2023 13:12:47 -0400 Subject: [PATCH 178/191] GH-1176 Add --keep-logs option and do not clean logs on test failure --- tests/plugin_http_api_test.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/tests/plugin_http_api_test.py b/tests/plugin_http_api_test.py index a559b912c7..33755e482f 100755 --- a/tests/plugin_http_api_test.py +++ b/tests/plugin_http_api_test.py @@ -8,6 +8,9 @@ import socket import re import shlex +import argparse +import sys +import signal from pathlib import Path from TestHarness import Account, Node, TestHelper, Utils, WalletMgr, ReturnType @@ -68,7 +71,11 @@ def createConfigDir(self): shutil.rmtree(self.config_dir) self.config_dir.mkdir() - # kill nodeos and keosd and clean up dirs + # kill nodeos. keosd shuts down automatically + def killNodes(self): + self.nodeos.kill(signal.SIGTERM) + + # clean up dirs def cleanEnv(self) : if self.data_dir.exists(): shutil.rmtree(Utils.DataPath) @@ -1536,11 +1543,24 @@ def setUpClass(self): @classmethod def tearDownClass(self): - self.cleanEnv(self) + global keepLogs + self.killNodes(self) + if unittest.TestResult().wasSuccessful() and not keepLogs: + self.cleanEnv(self) if __name__ == "__main__": test_category = True if os.environ.get("PLUGIN_HTTP_TEST_CATEGORY") == "ON" else False category_config = HttpCategoryConfig(test_category) + parser = argparse.ArgumentParser() + parser.add_argument('--keep-logs', action='store_true') + parser.add_argument('unittest_args', nargs=argparse.REMAINDER) + + args = parser.parse_args() + global keepLogs + keepLogs = args.keep_logs; + + # Now set the sys.argv to the unittest_args (leaving sys.argv[0] alone) + sys.argv[1:] = args.unittest_args unittest.main() From 1abee9ae4904884adf31df77bf7008574dc1f650 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 28 Jun 2023 02:43:17 -0400 Subject: [PATCH 179/191] GH-1328 fix merge issue --- plugins/net_plugin/net_plugin.cpp | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index d6774e8b47..ac9f4cccdc 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -3359,17 +3359,9 @@ namespace eosio { } switch (msg.known_trx.mode) { case none: -<<<<<<< HEAD - break; - case last_irr_catch_up: - case catch_up : { - fc::unique_lock g_conn( conn_mtx ); - last_handshake_recv.head_num = msg.known_blocks.pending; -======= case last_irr_catch_up: { - std::unique_lock g_conn( conn_mtx ); + fc::unique_lock g_conn( conn_mtx ); last_handshake_recv.head_num = std::max(msg.known_blocks.pending, last_handshake_recv.head_num); ->>>>>>> origin/release/4.0 g_conn.unlock(); break; } From d373b1db520952e1187591e54639d3927ed08886 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 28 Jun 2023 03:10:43 -0400 Subject: [PATCH 180/191] GH-1251 No need to link to boost unit_test_framework, using header-only --- plugins/state_history_plugin/tests/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/state_history_plugin/tests/CMakeLists.txt b/plugins/state_history_plugin/tests/CMakeLists.txt index 98ce935e2d..c01c62df61 100644 --- a/plugins/state_history_plugin/tests/CMakeLists.txt +++ b/plugins/state_history_plugin/tests/CMakeLists.txt @@ -1,5 +1,5 @@ add_executable( test_state_history main.cpp session_test.cpp plugin_config_test.cpp) -target_link_libraries(test_state_history state_history_plugin eosio_testing eosio_chain_wrap Boost::unit_test_framework) +target_link_libraries(test_state_history state_history_plugin eosio_testing eosio_chain_wrap) target_include_directories( test_state_history PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include" ) add_test(test_state_history test_state_history) \ No newline at end of file From da236cbd46f0cedbe3240b5430ae762d7dae92fb Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 28 Jun 2023 07:55:10 -0400 Subject: [PATCH 181/191] GH-1251 Update auto description --- plugins/chain_plugin/chain_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 7a27f84678..bc5416be06 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -362,7 +362,7 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip }), "Number of threads to use for EOS VM OC tier-up") ("eos-vm-oc-enable", bpo::value()->default_value(chain::wasm_interface::vm_oc_enable::oc_auto), "Enable EOS VM OC tier-up runtime ('auto', 'all', 'none').\n" - "'auto' - EOS VM OC tier-up is enabled for eosio.* accounts, read-only trxs, and applying blocks.\n" + "'auto' - EOS VM OC tier-up is enabled for eosio.* accounts, read-only trxs, and except on producers applying blocks.\n" "'all' - EOS VM OC tier-up is enabled for all contract execution.\n" "'none' - EOS VM OC tier-up is completely disabled.\n") #endif From 975ad1340cd3fc04a8b46edbf451df810f813027 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 28 Jun 2023 08:03:47 -0400 Subject: [PATCH 182/191] GH-1251 Support case-insensitive options --- plugins/chain_plugin/chain_plugin.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index bc5416be06..5ae59c844c 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -126,7 +126,8 @@ void validate(boost::any& v, // Extract the first string from 'values'. If there is more than // one string, it's an error, and exception will be thrown. - std::string const& s = validators::get_single_string(values); + std::string s = validators::get_single_string(values); + boost::algorithm::to_lower(s); if (s == "auto") { v = boost::any(wasm_interface::vm_oc_enable::oc_auto); From 774fd21156a27e9983cf67ff2da5de0d84359873 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 28 Jun 2023 08:11:43 -0400 Subject: [PATCH 183/191] GH-1251 Move setting of eosvmoc_tierup outside of loop --- libraries/testing/include/eosio/testing/tester.hpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index dfd00c9789..fef992a2f5 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -404,9 +404,10 @@ namespace eosio { namespace testing { cfg.contracts_console = true; cfg.eosvmoc_config.cache_size = 1024*1024*8; + // don't use auto tier up for tests, since the point is to test diff vms + cfg.eosvmoc_tierup = chain::wasm_interface::vm_oc_enable::oc_none; + for(int i = 0; i < boost::unit_test::framework::master_test_suite().argc; ++i) { - // don't use auto tier up for tests, since the point is to test diff vms - cfg.eosvmoc_tierup = chain::wasm_interface::vm_oc_enable::oc_none; if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--eos-vm")) cfg.wasm_runtime = chain::wasm_interface::vm_type::eos_vm; else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--eos-vm-jit")) From b260447db5ffe479898eb3dac307c60504e8250b Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 28 Jun 2023 11:30:44 -0400 Subject: [PATCH 184/191] GH-1251 Remove improper check for vm_type::eos_vm_oc --- .../chain/include/eosio/chain/wasm_interface_private.hpp | 4 ---- libraries/chain/wasm_interface.cpp | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index 01ccc193e4..1181976285 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -162,10 +162,6 @@ namespace eosio { namespace chain { return it->module; } - bool should_always_oc_tierup()const { - return wasm_runtime_time == wasm_interface::vm_type::eos_vm_oc || eosvmoc_tierup == wasm_interface::vm_oc_enable::oc_all; - } - bool is_shutting_down = false; std::unique_ptr runtime_interface; diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index c7aeb46f8e..6173544363 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -90,7 +90,7 @@ namespace eosio { namespace chain { if(substitute_apply && substitute_apply(code_hash, vm_type, vm_version, context)) return; #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - if(my->eosvmoc && (my->should_always_oc_tierup() || context.should_use_eos_vm_oc())) { + if(my->eosvmoc && (my->eosvmoc_tierup == wasm_interface::vm_oc_enable::oc_all || context.should_use_eos_vm_oc())) { const chain::eosvmoc::code_descriptor* cd = nullptr; chain::eosvmoc::code_cache_base::get_cd_failure failure = chain::eosvmoc::code_cache_base::get_cd_failure::temporary; try { From a8f20cebf41e5b0706616d9defd13e5de95a3027 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 28 Jun 2023 12:06:15 -0400 Subject: [PATCH 185/191] GH-1251 Restore #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED removed in refactor --- libraries/chain/controller.cpp | 5 ++++- .../chain/include/eosio/chain/controller.hpp | 2 ++ .../eosio/chain/wasm_interface_collection.hpp | 15 +++++++++++---- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 2a022612ef..3bdbd3e807 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -2682,9 +2682,11 @@ struct controller_impl { return app_window == app_window_type::write; } +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED bool is_eos_vm_oc_enabled() const { return wasm_if_collect.is_eos_vm_oc_enabled(); } +#endif void init_thread_local_data() { wasm_if_collect.init_thread_local_data(db, conf.state_dir, conf.eosvmoc_config, !conf.profile_accounts.empty()); @@ -3572,10 +3574,11 @@ vm::wasm_allocator& controller::get_wasm_allocator() { return my->wasm_alloc; } #endif - +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED bool controller::is_eos_vm_oc_enabled() const { return my->is_eos_vm_oc_enabled(); } +#endif std::optional controller::convert_exception_to_error_code( const fc::exception& e ) { const chain_exception* e_ptr = dynamic_cast( &e ); diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index c1d188ba1b..1ffcc4e8b4 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -322,7 +322,9 @@ namespace eosio { namespace chain { #if defined(EOSIO_EOS_VM_RUNTIME_ENABLED) || defined(EOSIO_EOS_VM_JIT_RUNTIME_ENABLED) vm::wasm_allocator& get_wasm_allocator(); #endif +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED bool is_eos_vm_oc_enabled() const; +#endif static std::optional convert_exception_to_error_code( const fc::exception& e ); diff --git a/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp index da49188401..8245406290 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp @@ -18,13 +18,15 @@ namespace eosio::chain { {} wasm_interface& get_wasm_interface() { - if (is_on_main_thread() || is_eos_vm_oc_enabled()) { + if (is_on_main_thread() +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + || is_eos_vm_oc_enabled() +#endif + ) return wasmif; - } return *threaded_wasmifs[std::this_thread::get_id()]; } - // update current lib of all wasm interfaces void current_lib(const uint32_t lib) { // producer_plugin has already asserted irreversible_block signal is called in write window @@ -38,19 +40,24 @@ namespace eosio::chain { void init_thread_local_data(const chainbase::database& d, const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, bool profile) { EOS_ASSERT(!is_on_main_thread(), misc_exception, "init_thread_local_data called on the main thread"); +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED if (is_eos_vm_oc_enabled()) { // EOSVMOC needs further initialization of its thread local data wasmif.init_thread_local_data(); - } else { + } else +#endif + { std::lock_guard g(threaded_wasmifs_mtx); // Non-EOSVMOC needs a wasmif per thread threaded_wasmifs[std::this_thread::get_id()] = std::make_unique(wasm_runtime, eosvmoc_tierup, d, data_dir, eosvmoc_config, profile); } } +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED bool is_eos_vm_oc_enabled() const { return ((eosvmoc_tierup != wasm_interface::vm_oc_enable::oc_none) || wasm_runtime == wasm_interface::vm_type::eos_vm_oc); } +#endif void code_block_num_last_used(const digest_type& code_hash, uint8_t vm_type, uint8_t vm_version, uint32_t block_num) { // The caller of this function apply_eosio_setcode has already asserted that From 1d0aef60c0b7e7c8772aaf3b0f66023cb342c2e3 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 28 Jun 2023 14:23:54 -0400 Subject: [PATCH 186/191] GH-1302 Add more info to help --- docs/01_nodeos/03_plugins/chain_plugin/index.md | 12 ++++++++---- plugins/chain_plugin/chain_plugin.cpp | 6 ++++-- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/docs/01_nodeos/03_plugins/chain_plugin/index.md b/docs/01_nodeos/03_plugins/chain_plugin/index.md index c6fa5dd9dc..458ff88f6f 100644 --- a/docs/01_nodeos/03_plugins/chain_plugin/index.md +++ b/docs/01_nodeos/03_plugins/chain_plugin/index.md @@ -192,13 +192,17 @@ Config Options for eosio::chain_plugin: feature. Setting above 0 enables this feature. --transaction-retry-interval-sec arg (=20) - How often, in seconds, to resend an - incoming transaction to network if not + How often, in seconds, to resend an + incoming transaction to network if not seen in a block. + Needs to be at least twice as large as + p2p-dedup-cache-expire-time-sec. --transaction-retry-max-expiration-sec arg (=120) - Maximum allowed transaction expiration - for retry transactions, will retry + Maximum allowed transaction expiration + for retry transactions, will retry transactions up to this value. + Should be larger than + transaction-retry-interval-sec. --transaction-finality-status-max-storage-size-gb arg Maximum size (in GiB) allowed to be allocated for the Transaction Finality diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 7e6d273396..6a08f37c79 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -335,9 +335,11 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip ("transaction-retry-max-storage-size-gb", bpo::value(), "Maximum size (in GiB) allowed to be allocated for the Transaction Retry feature. Setting above 0 enables this feature.") ("transaction-retry-interval-sec", bpo::value()->default_value(20), - "How often, in seconds, to resend an incoming transaction to network if not seen in a block.") + "How often, in seconds, to resend an incoming transaction to network if not seen in a block.\n" + "Needs to be at least twice as large as p2p-dedup-cache-expire-time-sec.") ("transaction-retry-max-expiration-sec", bpo::value()->default_value(120), - "Maximum allowed transaction expiration for retry transactions, will retry transactions up to this value.") + "Maximum allowed transaction expiration for retry transactions, will retry transactions up to this value.\n" + "Should be larger than transaction-retry-interval-sec.") ("transaction-finality-status-max-storage-size-gb", bpo::value(), "Maximum size (in GiB) allowed to be allocated for the Transaction Finality Status feature. Setting above 0 enables this feature.") ("transaction-finality-status-success-duration-sec", bpo::value()->default_value(config::default_max_transaction_finality_status_success_duration_sec), From 3971ff6159b1a69c9c94101da4494e3ef660d2a2 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 28 Jun 2023 15:31:55 -0400 Subject: [PATCH 187/191] GH-1289 Return std::optional for head_id to make it clearer when head not available --- libraries/chain/block_log.cpp | 12 ++++++------ libraries/chain/controller.cpp | 6 +++--- libraries/chain/include/eosio/chain/block_log.hpp | 2 +- tests/block_log.cpp | 3 ++- 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index e16582b5d0..190f719872 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -463,7 +463,7 @@ namespace eosio { namespace chain { std::mutex mtx; signed_block_ptr head; - block_id_type head_id; + std::optional head_id; virtual ~block_log_impl() = default; @@ -591,7 +591,7 @@ namespace eosio { namespace chain { } uint64_t get_block_pos(uint32_t block_num) final { - if (!(head && block_num <= block_header::num_from_id(head_id) && + if (!(head_id && block_num <= block_header::num_from_id(*head_id) && block_num >= working_block_file_first_block_num())) return block_log::npos; index_file.seek(sizeof(uint64_t) * (block_num - index_first_block_num())); @@ -804,7 +804,7 @@ namespace eosio { namespace chain { size_t copy_from_pos = get_block_pos(first_block_num); block_file.seek_end(-sizeof(uint32_t)); size_t copy_sz = block_file.tellp() - copy_from_pos; - const uint32_t num_blocks_in_log = chain::block_header::num_from_id(head_id) - first_block_num + 1; + const uint32_t num_blocks_in_log = chain::block_header::num_from_id(*head_id) - first_block_num + 1; const size_t offset_bytes = copy_from_pos - copy_to_pos; const size_t offset_blocks = first_block_num - index_first_block_num; @@ -1121,7 +1121,7 @@ namespace eosio { namespace chain { if ((pos & prune_config.prune_threshold) != (end & prune_config.prune_threshold)) num_blocks_in_log = prune(fc::log_level::debug); else - num_blocks_in_log = chain::block_header::num_from_id(head_id) - first_block_number + 1; + num_blocks_in_log = chain::block_header::num_from_id(*head_id) - first_block_number + 1; fc::raw::pack(block_file, num_blocks_in_log); } @@ -1142,7 +1142,7 @@ namespace eosio { namespace chain { uint32_t prune(const fc::log_level& loglevel) { if (!head) return 0; - const uint32_t head_num = chain::block_header::num_from_id(head_id); + const uint32_t head_num = chain::block_header::num_from_id(*head_id); if (head_num - first_block_number < prune_config.prune_blocks) return head_num - first_block_number + 1; @@ -1255,7 +1255,7 @@ namespace eosio { namespace chain { return my->head; } - block_id_type block_log::head_id() const { + std::optional block_log::head_id() const { std::lock_guard g(my->mtx); return my->head_id; } diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index c264bba2b0..3af8070e78 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -416,10 +416,10 @@ struct controller_impl { void log_irreversible() { EOS_ASSERT( fork_db.root(), fork_database_exception, "fork database not properly initialized" ); - const block_id_type log_head_id = blog.head_id(); - const bool valid_log_head = !log_head_id.empty(); + const std::optional log_head_id = blog.head_id(); + const bool valid_log_head = !!log_head_id; - const auto lib_num = valid_log_head ? block_header::num_from_id(log_head_id) : (blog.first_block_num() - 1); + const auto lib_num = valid_log_head ? block_header::num_from_id(*log_head_id) : (blog.first_block_num() - 1); auto root_id = fork_db.root()->id; diff --git a/libraries/chain/include/eosio/chain/block_log.hpp b/libraries/chain/include/eosio/chain/block_log.hpp index 611115cf39..10a2a598ad 100644 --- a/libraries/chain/include/eosio/chain/block_log.hpp +++ b/libraries/chain/include/eosio/chain/block_log.hpp @@ -67,7 +67,7 @@ namespace eosio { namespace chain { signed_block_ptr read_head()const; //use blocklog signed_block_ptr head()const; - block_id_type head_id()const; + std::optional head_id()const; uint32_t first_block_num() const; diff --git a/tests/block_log.cpp b/tests/block_log.cpp index 9d55ada915..837bf04f37 100644 --- a/tests/block_log.cpp +++ b/tests/block_log.cpp @@ -67,7 +67,8 @@ struct block_log_fixture { void check_range_present(uint32_t first, uint32_t last) { BOOST_REQUIRE_EQUAL(log->first_block_num(), first); - BOOST_REQUIRE_EQUAL(eosio::chain::block_header::num_from_id(log->head_id()), last); + BOOST_REQUIRE(log->head_id()); + BOOST_REQUIRE_EQUAL(eosio::chain::block_header::num_from_id(*log->head_id()), last); if(enable_read) { for(auto i = first; i <= last; i++) { std::vector buff; From 9012e7f8f97a89a7ddaaa7ab64bd07ddbfe9ca86 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 28 Jun 2023 16:56:27 -0400 Subject: [PATCH 188/191] GH-1289 Combine head and head_id into one optional --- libraries/chain/block_log.cpp | 40 ++++++++++++++--------------------- 1 file changed, 16 insertions(+), 24 deletions(-) diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index 190f719872..f76e85298c 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -462,8 +462,7 @@ namespace eosio { namespace chain { inline static uint32_t default_initial_version = block_log::max_supported_version; std::mutex mtx; - signed_block_ptr head; - std::optional head_id; + std::optional> head; virtual ~block_log_impl() = default; @@ -482,16 +481,10 @@ namespace eosio { namespace chain { virtual signed_block_ptr read_head() = 0; void update_head(const signed_block_ptr& b, const std::optional& id = {}) { - head = b; - if (id) { - head_id = *id; - } else { - if (head) { - head_id = b->calculate_id(); - } else { - head_id = {}; - } - } + if (b) + head = { b, id ? *id : b->calculate_id() }; + else + head = {}; } }; // block_log_impl @@ -504,7 +497,7 @@ namespace eosio { namespace chain { std::filesystem::remove(log_dir / "blocks.index"); } - uint32_t first_block_num() final { return head ? head->block_num() : first_block_number; } + uint32_t first_block_num() final { return head ? head->first->block_num() : first_block_number; } void append(const signed_block_ptr& b, const block_id_type& id, const std::vector& packed_block) final { update_head(b, id); } @@ -591,7 +584,7 @@ namespace eosio { namespace chain { } uint64_t get_block_pos(uint32_t block_num) final { - if (!(head_id && block_num <= block_header::num_from_id(*head_id) && + if (!(head && block_num <= block_header::num_from_id(head->second) && block_num >= working_block_file_first_block_num())) return block_log::npos; index_file.seek(sizeof(uint64_t) * (block_num - index_first_block_num())); @@ -707,7 +700,7 @@ namespace eosio { namespace chain { uint32_t num_blocks; this->block_file.seek_end(-sizeof(uint32_t)); fc::raw::unpack(this->block_file, num_blocks); - return this->head->block_num() - num_blocks + 1; + return this->head->first->block_num() - num_blocks + 1; } void reset(uint32_t first_bnum, std::variant&& chain_context, uint32_t version) { @@ -740,7 +733,6 @@ namespace eosio { namespace chain { this->reset(first_block_num, chain_id, block_log::max_supported_version); this->head.reset(); - head_id = {}; } void flush() final { @@ -804,7 +796,7 @@ namespace eosio { namespace chain { size_t copy_from_pos = get_block_pos(first_block_num); block_file.seek_end(-sizeof(uint32_t)); size_t copy_sz = block_file.tellp() - copy_from_pos; - const uint32_t num_blocks_in_log = chain::block_header::num_from_id(*head_id) - first_block_num + 1; + const uint32_t num_blocks_in_log = chain::block_header::num_from_id(head->second) - first_block_num + 1; const size_t offset_bytes = copy_from_pos - copy_to_pos; const size_t offset_blocks = first_block_num - index_first_block_num; @@ -992,7 +984,7 @@ namespace eosio { namespace chain { block_file.close(); index_file.close(); - catalog.add(preamble.first_block_num, this->head->block_num(), block_file.get_file_path().parent_path(), + catalog.add(preamble.first_block_num, this->head->first->block_num(), block_file.get_file_path().parent_path(), "blocks"); using std::swap; @@ -1007,7 +999,7 @@ namespace eosio { namespace chain { preamble.ver = block_log::max_supported_version; preamble.chain_context = preamble.chain_id(); - preamble.first_block_num = this->head->block_num() + 1; + preamble.first_block_num = this->head->first->block_num() + 1; preamble.write_to(block_file); } @@ -1018,7 +1010,7 @@ namespace eosio { namespace chain { } void post_append(uint64_t pos) final { - if (head->block_num() % stride == 0) { + if (head->first->block_num() % stride == 0) { split_log(); } } @@ -1121,7 +1113,7 @@ namespace eosio { namespace chain { if ((pos & prune_config.prune_threshold) != (end & prune_config.prune_threshold)) num_blocks_in_log = prune(fc::log_level::debug); else - num_blocks_in_log = chain::block_header::num_from_id(*head_id) - first_block_number + 1; + num_blocks_in_log = chain::block_header::num_from_id(head->second) - first_block_number + 1; fc::raw::pack(block_file, num_blocks_in_log); } @@ -1142,7 +1134,7 @@ namespace eosio { namespace chain { uint32_t prune(const fc::log_level& loglevel) { if (!head) return 0; - const uint32_t head_num = chain::block_header::num_from_id(*head_id); + const uint32_t head_num = chain::block_header::num_from_id(head->second); if (head_num - first_block_number < prune_config.prune_blocks) return head_num - first_block_number + 1; @@ -1252,12 +1244,12 @@ namespace eosio { namespace chain { signed_block_ptr block_log::head() const { std::lock_guard g(my->mtx); - return my->head; + return my->head ? my->head->first : signed_block_ptr{}; } std::optional block_log::head_id() const { std::lock_guard g(my->mtx); - return my->head_id; + return my->head ? my->head->second : std::optional{}; } uint32_t block_log::first_block_num() const { From 46037d584079435b6b3386f75f4cd765972223e9 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 29 Jun 2023 01:35:23 -0400 Subject: [PATCH 189/191] GH-1289 Use struct instead of std::pair --- libraries/chain/block_log.cpp | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index f76e85298c..082aeb02f6 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -462,7 +462,11 @@ namespace eosio { namespace chain { inline static uint32_t default_initial_version = block_log::max_supported_version; std::mutex mtx; - std::optional> head; + struct signed_block_with_id { + signed_block_ptr ptr; + block_id_type id; + }; + std::optional head; virtual ~block_log_impl() = default; @@ -497,7 +501,7 @@ namespace eosio { namespace chain { std::filesystem::remove(log_dir / "blocks.index"); } - uint32_t first_block_num() final { return head ? head->first->block_num() : first_block_number; } + uint32_t first_block_num() final { return head ? head->ptr->block_num() : first_block_number; } void append(const signed_block_ptr& b, const block_id_type& id, const std::vector& packed_block) final { update_head(b, id); } @@ -584,7 +588,7 @@ namespace eosio { namespace chain { } uint64_t get_block_pos(uint32_t block_num) final { - if (!(head && block_num <= block_header::num_from_id(head->second) && + if (!(head && block_num <= block_header::num_from_id(head->id) && block_num >= working_block_file_first_block_num())) return block_log::npos; index_file.seek(sizeof(uint64_t) * (block_num - index_first_block_num())); @@ -700,7 +704,7 @@ namespace eosio { namespace chain { uint32_t num_blocks; this->block_file.seek_end(-sizeof(uint32_t)); fc::raw::unpack(this->block_file, num_blocks); - return this->head->first->block_num() - num_blocks + 1; + return this->head->ptr->block_num() - num_blocks + 1; } void reset(uint32_t first_bnum, std::variant&& chain_context, uint32_t version) { @@ -796,7 +800,7 @@ namespace eosio { namespace chain { size_t copy_from_pos = get_block_pos(first_block_num); block_file.seek_end(-sizeof(uint32_t)); size_t copy_sz = block_file.tellp() - copy_from_pos; - const uint32_t num_blocks_in_log = chain::block_header::num_from_id(head->second) - first_block_num + 1; + const uint32_t num_blocks_in_log = chain::block_header::num_from_id(head->id) - first_block_num + 1; const size_t offset_bytes = copy_from_pos - copy_to_pos; const size_t offset_blocks = first_block_num - index_first_block_num; @@ -984,7 +988,7 @@ namespace eosio { namespace chain { block_file.close(); index_file.close(); - catalog.add(preamble.first_block_num, this->head->first->block_num(), block_file.get_file_path().parent_path(), + catalog.add(preamble.first_block_num, this->head->ptr->block_num(), block_file.get_file_path().parent_path(), "blocks"); using std::swap; @@ -999,7 +1003,7 @@ namespace eosio { namespace chain { preamble.ver = block_log::max_supported_version; preamble.chain_context = preamble.chain_id(); - preamble.first_block_num = this->head->first->block_num() + 1; + preamble.first_block_num = this->head->ptr->block_num() + 1; preamble.write_to(block_file); } @@ -1010,7 +1014,7 @@ namespace eosio { namespace chain { } void post_append(uint64_t pos) final { - if (head->first->block_num() % stride == 0) { + if (head->ptr->block_num() % stride == 0) { split_log(); } } @@ -1113,7 +1117,7 @@ namespace eosio { namespace chain { if ((pos & prune_config.prune_threshold) != (end & prune_config.prune_threshold)) num_blocks_in_log = prune(fc::log_level::debug); else - num_blocks_in_log = chain::block_header::num_from_id(head->second) - first_block_number + 1; + num_blocks_in_log = chain::block_header::num_from_id(head->id) - first_block_number + 1; fc::raw::pack(block_file, num_blocks_in_log); } @@ -1134,7 +1138,7 @@ namespace eosio { namespace chain { uint32_t prune(const fc::log_level& loglevel) { if (!head) return 0; - const uint32_t head_num = chain::block_header::num_from_id(head->second); + const uint32_t head_num = chain::block_header::num_from_id(head->id); if (head_num - first_block_number < prune_config.prune_blocks) return head_num - first_block_number + 1; @@ -1244,12 +1248,12 @@ namespace eosio { namespace chain { signed_block_ptr block_log::head() const { std::lock_guard g(my->mtx); - return my->head ? my->head->first : signed_block_ptr{}; + return my->head ? my->head->ptr : signed_block_ptr{}; } std::optional block_log::head_id() const { std::lock_guard g(my->mtx); - return my->head ? my->head->second : std::optional{}; + return my->head ? my->head->id : std::optional{}; } uint32_t block_log::first_block_num() const { From 7c43e0064a22f2866f1975bf5f42c1e46799076a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 29 Jun 2023 02:06:34 -0400 Subject: [PATCH 190/191] GH-1251 Add comment that log statement used in test --- libraries/chain/wasm_interface.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 6173544363..33bb863dbc 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -105,7 +105,7 @@ namespace eosio { namespace chain { once_is_enough = true; } if(cd) { - if (!context.is_applying_block()) + if (!context.is_applying_block()) // read_only_trx_test.py looks for this log statement tlog("${a} speculatively executing ${h} with eos vm oc", ("a", context.get_receiver())("h", code_hash)); my->eosvmoc->exec->execute(*cd, my->eosvmoc->mem, context); return; From cd744f5052681bf6f5ce02a406b53836b5e095b3 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 29 Jun 2023 03:22:48 -0400 Subject: [PATCH 191/191] GH-1251 Fix tests to not destroy temp dir before controller which uses it --- .../chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp | 6 ------ plugins/chain_plugin/test/plugin_config_test.cpp | 2 +- plugins/producer_plugin/test/test_read_only_trx.cpp | 4 ++-- plugins/producer_plugin/test/test_trx_full.cpp | 2 +- 4 files changed, 4 insertions(+), 10 deletions(-) diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp index a43f8ac932..60cac3dc19 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp @@ -306,12 +306,6 @@ code_cache_base::code_cache_base(const std::filesystem::path data_dir, const eos } void code_cache_base::set_on_disk_region_dirty(bool dirty) { - // tests can remove directory before destructor is called - if (!std::filesystem::exists(_cache_file_path)) { - wlog("Unable to sync code cache, cache file does not exist"); - return; - } - bip::file_mapping dirty_mapping(_cache_file_path.generic_string().c_str(), bip::read_write); bip::mapped_region dirty_region(dirty_mapping, bip::read_write); diff --git a/plugins/chain_plugin/test/plugin_config_test.cpp b/plugins/chain_plugin/test/plugin_config_test.cpp index ce308cc2e4..e43b0bfbd3 100644 --- a/plugins/chain_plugin/test/plugin_config_test.cpp +++ b/plugins/chain_plugin/test/plugin_config_test.cpp @@ -5,8 +5,8 @@ #include BOOST_AUTO_TEST_CASE(chain_plugin_default_tests) { - appbase::scoped_app app; fc::temp_directory tmp; + appbase::scoped_app app; auto tmp_path = tmp.path().string(); std::array args = { diff --git a/plugins/producer_plugin/test/test_read_only_trx.cpp b/plugins/producer_plugin/test/test_read_only_trx.cpp index e89dfc2d6a..19c25d0d60 100644 --- a/plugins/producer_plugin/test/test_read_only_trx.cpp +++ b/plugins/producer_plugin/test/test_read_only_trx.cpp @@ -52,8 +52,8 @@ BOOST_AUTO_TEST_SUITE(read_only_trxs) enum class app_init_status { failed, succeeded }; void test_configs_common(std::vector& specific_args, app_init_status expected_status) { - appbase::scoped_app app; fc::temp_directory temp; + appbase::scoped_app app; auto temp_dir_str = temp.path().string(); fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); @@ -92,8 +92,8 @@ BOOST_AUTO_TEST_CASE(not_check_configs_if_no_read_only_threads) { void test_trxs_common(std::vector& specific_args) { using namespace std::chrono_literals; - appbase::scoped_app app; fc::temp_directory temp; + appbase::scoped_app app; auto temp_dir_str = temp.path().string(); producer_plugin::set_test_mode(true); diff --git a/plugins/producer_plugin/test/test_trx_full.cpp b/plugins/producer_plugin/test/test_trx_full.cpp index b8c313bd29..34ddcc6ea9 100644 --- a/plugins/producer_plugin/test/test_trx_full.cpp +++ b/plugins/producer_plugin/test/test_trx_full.cpp @@ -99,9 +99,9 @@ BOOST_AUTO_TEST_SUITE(ordered_trxs_full) // Test verifies that transactions are processed, reported to caller, and not lost // even when blocks are aborted and some transactions fail. BOOST_AUTO_TEST_CASE(producer) { + fc::temp_directory temp; appbase::scoped_app app; - fc::temp_directory temp; auto temp_dir_str = temp.path().string(); {