Skip to content

Commit

Permalink
Merge pull request AntelopeIO#1342 from AntelopeIO/AntelopeIOGH-1251-…
Browse files Browse the repository at this point in the history
…auto-oc-opt

OC tierup compile in order
  • Loading branch information
heifner authored Jun 29, 2023
2 parents 91d192b + f51599c commit aed53c0
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 14 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@


#include <thread>
#include <shared_mutex>

namespace std {
template<> struct hash<eosio::chain::eosvmoc::code_tuple> {
Expand All @@ -39,7 +38,7 @@ struct config;

class code_cache_base {
public:
code_cache_base(const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db);
code_cache_base(const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db);
~code_cache_base();

const int& fd() const { return _cache_fd; }
Expand Down Expand Up @@ -78,9 +77,20 @@ class code_cache_base {
local::datagram_protocol::socket _compile_monitor_write_socket{_ctx};
local::datagram_protocol::socket _compile_monitor_read_socket{_ctx};

//these are really only useful to the async code cache, but keep them here so
//free_code can be shared
std::unordered_set<code_tuple> _queued_compiles;
//these are really only useful to the async code cache, but keep them here so free_code can be shared
using queued_compilies_t = boost::multi_index_container<
code_tuple,
indexed_by<
sequenced<>,
hashed_unique<tag<by_hash>,
composite_key< code_tuple,
member<code_tuple, digest_type, &code_tuple::code_id>,
member<code_tuple, uint8_t, &code_tuple::vm_version>
>
>
>
>;
queued_compilies_t _queued_compiles;
std::unordered_map<code_tuple, bool> _outstanding_compiles_and_poison;

size_t _free_bytes_eviction_threshold;
Expand All @@ -95,13 +105,13 @@ class code_cache_base {

class code_cache_async : public code_cache_base {
public:
code_cache_async(const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db);
code_cache_async(const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db);
~code_cache_async();

//If code is in cache: returns pointer & bumps to front of MRU list
//If code is not in cache, and not blacklisted, and not currently compiling: return nullptr and kick off compile
//otherwise: return nullptr
const code_descriptor* const get_descriptor_for_code(const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure);
const code_descriptor* const get_descriptor_for_code(bool high_priority, const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure);

private:
std::thread _monitor_reply_thread;
Expand Down
3 changes: 2 additions & 1 deletion libraries/chain/wasm_interface.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,8 @@ namespace eosio { namespace chain {
const chain::eosvmoc::code_descriptor* cd = nullptr;
chain::eosvmoc::code_cache_base::get_cd_failure failure = chain::eosvmoc::code_cache_base::get_cd_failure::temporary;
try {
cd = my->eosvmoc->cc.get_descriptor_for_code(code_hash, vm_version, context.control.is_write_window(), failure);
const bool high_priority = context.get_receiver().prefix() == chain::config::system_account_name;
cd = my->eosvmoc->cc.get_descriptor_for_code(high_priority, code_hash, vm_version, context.control.is_write_window(), failure);
}
catch(...) {
//swallow errors here, if EOS VM OC has gone in to the weeds we shouldn't bail: continue to try and run baseline
Expand Down
16 changes: 10 additions & 6 deletions libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ static constexpr size_t descriptor_ptr_from_file_start = header_offset + offseto

static_assert(sizeof(code_cache_header) <= header_size, "code_cache_header too big");

code_cache_async::code_cache_async(const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) :
code_cache_async::code_cache_async(const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) :
code_cache_base(data_dir, eosvmoc_config, db),
_result_queue(eosvmoc_config.threads * 2),
_threads(eosvmoc_config.threads)
Expand Down Expand Up @@ -106,7 +106,7 @@ std::tuple<size_t, size_t> code_cache_async::consume_compile_thread_queue() {
}


const code_descriptor* const code_cache_async::get_descriptor_for_code(const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure) {
const code_descriptor* const code_cache_async::get_descriptor_for_code(bool high_priority, const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure) {
//if there are any outstanding compiles, process the result queue now
//When app is in write window, all tasks are running sequentially and read-only threads
//are not running. Safe to update cache entries.
Expand Down Expand Up @@ -156,13 +156,16 @@ const code_descriptor* const code_cache_async::get_descriptor_for_code(const dig
it->second = false;
return nullptr;
}
if(_queued_compiles.find(ct) != _queued_compiles.end()) {
if(auto it = _queued_compiles.get<by_hash>().find(boost::make_tuple(std::ref(code_id), vm_version)); it != _queued_compiles.get<by_hash>().end()) {
failure = get_cd_failure::temporary; // Compile might not be done yet
return nullptr;
}

if(_outstanding_compiles_and_poison.size() >= _threads) {
_queued_compiles.emplace(ct);
if (high_priority)
_queued_compiles.push_front(ct);
else
_queued_compiles.push_back(ct);
failure = get_cd_failure::temporary; // Compile might not be done yet
return nullptr;
}
Expand Down Expand Up @@ -221,7 +224,7 @@ const code_descriptor* const code_cache_sync::get_descriptor_for_code_sync(const
return &*_cache_index.push_front(std::move(std::get<code_descriptor>(result.result))).first;
}

code_cache_base::code_cache_base(const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) :
code_cache_base::code_cache_base(const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) :
_db(db),
_cache_file_path(data_dir/"code_cache.bin")
{
Expand Down Expand Up @@ -377,7 +380,8 @@ void code_cache_base::free_code(const digest_type& code_id, const uint8_t& vm_ve
}

//if it's in the queued list, erase it
_queued_compiles.erase({code_id, vm_version});
if(auto i = _queued_compiles.get<by_hash>().find(boost::make_tuple(std::ref(code_id), vm_version)); i != _queued_compiles.get<by_hash>().end())
_queued_compiles.get<by_hash>().erase(i);

//however, if it's currently being compiled there is no way to cancel the compile,
//so instead set a poison boolean that indicates not to insert the code in to the cache
Expand Down

0 comments on commit aed53c0

Please sign in to comment.