Skip to content

Commit

Permalink
Make shared execute segments a run-time constructor option
Browse files Browse the repository at this point in the history
  • Loading branch information
fwsGonzo committed Jun 23, 2024
1 parent 3679c4e commit 3c0183d
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 25 deletions.
7 changes: 7 additions & 0 deletions lib/libriscv/common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,13 @@ namespace riscv
/// locality and also enables read-write arena if the CMake option is ON.
bool use_memory_arena = true;

/// @brief Enable sharing of execute segments between machines.
/// @details This will allow multiple machines to share the same execute
/// segment, reducing memory usage and increasing performance.
/// When binary translation is enabled, this will also share the dynamically
/// translated code between machines. (Prevents some optimizations)
bool use_shared_execute_segments = true;

/// @brief Override a default-injected exit function with another function
/// that is found by looking up the provided symbol name in the current program.
/// Eg. if default_exit_function is "fast_exit", then the ELF binary must have
Expand Down
53 changes: 28 additions & 25 deletions lib/libriscv/decoder_cache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,11 @@
#include "threaded_bytecodes.hpp"
#include "util/crc32.hpp"
#include <mutex>
#define RISCV_SHARED_EXECUTE_SEGMENTS // TODO: Make this a configuration option

namespace riscv
{
static constexpr bool VERBOSE_DECODER = false;

#ifdef RISCV_SHARED_EXECUTE_SEGMENTS
template <int W>
struct SharedExecuteSegments {
SharedExecuteSegments() = default;
Expand Down Expand Up @@ -60,7 +58,6 @@ namespace riscv
};
template <int W>
static SharedExecuteSegments<W> shared_execute_segments;
#endif

template <int W>
static bool is_regular_compressed(uint16_t instr) {
Expand Down Expand Up @@ -488,30 +485,38 @@ namespace riscv
// Get a free slot to reference the execute segment
auto& free_slot = this->next_execute_segment();

#ifdef RISCV_SHARED_EXECUTE_SEGMENTS
// In order to prevent others from creating the same execute segment
// we need to lock the shared execute segments mutex.
auto& segment = shared_execute_segments<W>.get_segment(hash);
std::scoped_lock lock(segment.mutex);

if (segment.segment != nullptr) {
free_slot = segment.segment;
return *free_slot;
}
#endif
if (options.use_shared_execute_segments)
{
// In order to prevent others from creating the same execute segment
// we need to lock the shared execute segments mutex.
auto& segment = shared_execute_segments<W>.get_segment(hash);
std::scoped_lock lock(segment.mutex);

if (segment.segment != nullptr) {
free_slot = segment.segment;
return *free_slot;
}

// We need to create a new execute segment, as there is no shared
// execute segment with the same hash.
free_slot = std::move(current_exec);
// Store the hash in the decoder cache
free_slot->set_crc32c_hash(hash);
// We need to create a new execute segment, as there is no shared
// execute segment with the same hash.
free_slot = std::move(current_exec);
// Store the hash in the decoder cache
free_slot->set_crc32c_hash(hash);

this->generate_decoder_cache(options, *free_slot);
this->generate_decoder_cache(options, *free_slot);

#ifdef RISCV_SHARED_EXECUTE_SEGMENTS
// Share the execute segment in the shared execute segments
segment.unlocked_set(free_slot);
#endif
// Share the execute segment
shared_execute_segments<W>.get_segment(hash).unlocked_set(free_slot);
}
else
{
free_slot = std::move(current_exec);
// Store the hash in the decoder cache
free_slot->set_crc32c_hash(hash);

this->generate_decoder_cache(options, *free_slot);
}

return *free_slot;
}
Expand Down Expand Up @@ -559,9 +564,7 @@ namespace riscv
if (segment) {
[[maybe_unused]] const uint32_t hash = segment->crc32c_hash();
segment = nullptr;
#ifdef RISCV_SHARED_EXECUTE_SEGMENTS
shared_execute_segments<W>.remove_if_unique(hash);
#endif
}
}
}
Expand Down

0 comments on commit 3c0183d

Please sign in to comment.