Skip to content

Commit

Permalink
add MSHR for non-blocking cache & output mem trace
Browse files Browse the repository at this point in the history
  • Loading branch information
Oxyw committed Dec 28, 2023
1 parent e3e1412 commit 58c5197
Show file tree
Hide file tree
Showing 7 changed files with 238 additions and 2 deletions.
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -38,4 +38,6 @@
[Ff]ast[Dd]ebug

# Backup files
*~
*~

.vscode
4 changes: 4 additions & 0 deletions core/CPUTopology.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -202,8 +202,12 @@ olympia::CoreTopologySimple::CoreTopologySimple(){
"cpu.core*.lsu.ports.in_cache_lookup_ack"
},
{
/*
"cpu.core*.dcache.ports.out_lsu_lookup_req",
"cpu.core*.lsu.ports.in_cache_lookup_req"
*/
"cpu.core*.dcache.ports.out_lsu_lookup_req_mshr",
"cpu.core*.lsu.ports.in_cache_lookup_req_mshr"
},
{
"cpu.core*.dcache.ports.out_lsu_free_req",
Expand Down
70 changes: 69 additions & 1 deletion core/DCache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,9 @@ namespace olympia {
DCache::DCache(sparta::TreeNode *n, const CacheParameterSet *p) :
sparta::Unit(n),
l1_always_hit_(p->l1_always_hit),
cache_latency_(p->cache_latency) {
cache_latency_(p->cache_latency),
l1_line_bits_(p->l1_line_bits),
dcache_mshr_size_(p->dcache_mshr_size) {

in_lsu_lookup_req_.registerConsumerHandler
(CREATE_SPARTA_HANDLER_WITH_DATA(DCache, getInstsFromLSU_, MemoryAccessInfoPtr));
Expand Down Expand Up @@ -82,21 +84,87 @@ namespace olympia {
// that L2Cache can accept requests from DCache.
// Provide a corresponsing backpressure mechanism up the pipeline.
if(!busy_) {
/*
busy_ = true;
cache_pending_inst_ = memory_access_info_ptr;
out_l2cache_req_.send(cache_pending_inst_->getInstPtr());
// Set the --dcache_l2cache_credits_ here.
*/
bool flag = true;
const InstPtr & inst_ptr = memory_access_info_ptr->getInstPtr();
uint64_t uid = inst_ptr->getUniqueID();
for (auto pending_inst_ : cache_pending_insts_)
{
uint64_t uid_tmp = pending_inst_->getInstPtr()->getUniqueID();
if(uid == uid_tmp)
{
flag = false;
break;
}
}
if(flag)
{
cache_pending_insts_.emplace_back(memory_access_info_ptr);
if (cache_pending_insts_.size() == dcache_mshr_size_) // no entry
busy_ = true;
//---
ILOG("pending instr phyAddr=0x"
<< std::hex << memory_access_info_ptr->getInstPtr()->getRAdr());
ILOG("the size of cache_pending_insts_ now: "
<< cache_pending_insts_.size());
//---
if (cache_pending_insts_.size() == 1)
out_l2cache_req_.send(cache_pending_insts_.back()->getInstPtr());
}
}
}
out_lsu_lookup_ack_.send(memory_access_info_ptr);
}

void DCache::getRespFromL2Cache_(const InstPtr &inst_ptr) {
/*
out_lsu_lookup_req_.send(cache_pending_inst_);
reloadCache_(inst_ptr->getRAdr());
cache_pending_inst_.reset();
busy_ = false;
*/
// find pending instrs in the same cache line
uint64_t phyAddr = inst_ptr->getRAdr();
uint64_t blockBaseAddr = ((phyAddr >> l1_line_bits_) << l1_line_bits_);
std::vector<MemoryAccessInfoPtr> mshr_insts_deallocate_;
for (auto cache_pending_inst_ : cache_pending_insts_)
{
uint64_t addr = cache_pending_inst_->getInstPtr()->getRAdr();
addr = ((addr >> l1_line_bits_) << l1_line_bits_);
if(addr == blockBaseAddr)
mshr_insts_deallocate_.emplace_back(cache_pending_inst_);
}
// send to lsu
out_lsu_lookup_req_mshr_.send(mshr_insts_deallocate_);
//---
ILOG("pending instr phyAddr=0x" << std::hex << phyAddr);
ILOG("before deallocate,the size of cache_pending_insts_: " << cache_pending_insts_.size());
ILOG("the size of mshr_insts_deallocate_: " << mshr_insts_deallocate_.size());
//---
// reload
reloadCache_(inst_ptr->getRAdr());
// deallocate entry in cache_pending_insts_
for (auto pending_inst_ : mshr_insts_deallocate_)
{
cache_pending_insts_.erase(
remove(cache_pending_insts_.begin(), cache_pending_insts_.end(), pending_inst_),
cache_pending_insts_.end());
}
//---
ILOG("after deallocate, the size of cache_pending_insts_: " << cache_pending_insts_.size());
//---
// reset
for (auto & pending_inst_ : mshr_insts_deallocate_)
pending_inst_.reset();
busy_ = false;
if(cache_pending_insts_.size())
out_l2cache_req_.send(cache_pending_insts_.front()->getInstPtr());
}

void DCache::getAckFromL2Cache_(const uint32_t &ack) {
Expand Down
13 changes: 13 additions & 0 deletions core/DCache.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@ namespace olympia
PARAMETER(uint32_t, l1_associativity, 8, "DL1 associativity (power of 2)")
PARAMETER(uint32_t, cache_latency, 1, "Assumed latency of the memory system")
PARAMETER(bool, l1_always_hit, false, "DL1 will always hit")
// non-blocking
PARAMETER(uint32_t, l1_line_bits, 6, "corresponding to l1_line_size")
PARAMETER(uint32_t, dcache_mshr_size, 16, "number of MSHR entries")
};

static const char name[];
Expand All @@ -48,7 +51,13 @@ namespace olympia
bool busy_ = false;
uint32_t cache_latency_ = 0;
// Keep track of the instruction that causes current outstanding cache miss
/*
MemoryAccessInfoPtr cache_pending_inst_ = nullptr;
*/
// non-blocking
std::vector<MemoryAccessInfoPtr> cache_pending_insts_;
const uint32_t l1_line_bits_;
const uint32_t dcache_mshr_size_;

// Credit bool for sending miss request to L2Cache
uint32_t dcache_l2cache_credits_ = 0;
Expand All @@ -71,8 +80,12 @@ namespace olympia
sparta::DataOutPort<MemoryAccessInfoPtr> out_lsu_lookup_ack_{&unit_port_set_,
"out_lsu_lookup_ack", 0};

/*
sparta::DataOutPort<MemoryAccessInfoPtr> out_lsu_lookup_req_{&unit_port_set_,
"out_lsu_lookup_req", 1};
*/
sparta::DataOutPort<std::vector<MemoryAccessInfoPtr>> out_lsu_lookup_req_mshr_{&unit_port_set_,
"out_lsu_lookup_req_mshr", 1};

sparta::DataOutPort<InstPtr> out_l2cache_req_{&unit_port_set_, "out_l2cache_req", 0};

Expand Down
85 changes: 85 additions & 0 deletions core/LSU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,12 @@ namespace olympia
in_mmu_lookup_ack_.registerConsumerHandler(
CREATE_SPARTA_HANDLER_WITH_DATA(LSU, getAckFromMMU_, MemoryAccessInfoPtr));

/*
in_cache_lookup_req_.registerConsumerHandler(
CREATE_SPARTA_HANDLER_WITH_DATA(LSU, handleCacheReadyReq_, MemoryAccessInfoPtr));
*/
in_cache_lookup_req_mshr_.registerConsumerHandler(
CREATE_SPARTA_HANDLER_WITH_DATA(LSU, handleCacheReadyReq_, std::vector<MemoryAccessInfoPtr>));

in_cache_lookup_ack_.registerConsumerHandler(
CREATE_SPARTA_HANDLER_WITH_DATA(LSU, getAckFromCache_, MemoryAccessInfoPtr));
Expand Down Expand Up @@ -511,6 +515,7 @@ namespace olympia

void LSU::getAckFromCache_(const MemoryAccessInfoPtr & updated_memory_access_info_ptr) {}

/*
void LSU::handleCacheReadyReq_(const MemoryAccessInfoPtr & memory_access_info_ptr)
{
auto inst_ptr = memory_access_info_ptr->getInstPtr();
Expand Down Expand Up @@ -541,6 +546,42 @@ namespace olympia
uev_issue_inst_.schedule(sparta::Clock::Cycle(0));
}
}
*/
void LSU::handleCacheReadyReq_(const std::vector<MemoryAccessInfoPtr> & memory_access_info_ptr_vector)
{
if (cache_pending_inst_flushed_)
{
cache_pending_inst_flushed_ = false;
ILOG("BIU Ack for a flushed cache miss is received!");

// Schedule an instruction (re-)issue event
// Note: some younger load/store instruction(s) might have been blocked by
// this outstanding miss
updateIssuePriorityAfterCacheReload_(memory_access_info_ptr_vector, true);
if (isReadyToIssueInsts_())
{
uev_issue_inst_.schedule(sparta::Clock::Cycle(0));
}
return;
}

for (auto memory_access_info_ptr : memory_access_info_ptr_vector)
{
auto inst_ptr = memory_access_info_ptr->getInstPtr();
ILOG("Cache ready for " << memory_access_info_ptr);
//---
ILOG("Cache ready for phyAddr=0x" << std::hex << inst_ptr->getRAdr());
//---
updateIssuePriorityAfterCacheReload_(memory_access_info_ptr);
removeInstFromReplayQueue_(inst_ptr);
}

if (isReadyToIssueInsts_())
{
ILOG("Cache ready issue");
uev_issue_inst_.schedule(sparta::Clock::Cycle(0));
}
}

void LSU::handleCacheRead_()
{
Expand Down Expand Up @@ -759,6 +800,9 @@ namespace olympia
if (cache_busy_)
{
cache_pending_inst_flushed_ = true;
//---
ILOG("cache_busy is true and cache_pending_inst_flushed_ is set true.");
//---
}

// Flush instruction issue queue
Expand Down Expand Up @@ -832,6 +876,22 @@ namespace olympia
}
}

void LSU::mshr_appendReady_(const std::vector<LoadStoreInstInfoPtr> & replay_inst_ptrs)
{
for(auto replay_inst_ptr : replay_inst_ptrs)
{
ILOG("Appending to Ready ready queue event " << replay_inst_ptr->isInReadyQueue() << " "
<< replay_inst_ptr);
if (!replay_inst_ptr->isInReadyQueue()
&& !replay_inst_ptr->getReplayQueueIterator().isValid())
appendToReadyQueue_(replay_inst_ptr);
}
if (isReadyToIssueInsts_())
{
uev_issue_inst_.schedule(sparta::Clock::Cycle(0));
}
}

////////////////////////////////////////////////////////////////////////////////
// Regular Function/Subroutine Call
////////////////////////////////////////////////////////////////////////////////
Expand Down Expand Up @@ -1194,6 +1254,31 @@ namespace olympia
inst_info_ptr->setPriority(LoadStoreInstInfo::IssuePriority::CACHE_RELOAD);
uev_append_ready_.preparePayload(inst_info_ptr)->schedule(sparta::Clock::Cycle(0));
}
// non-blocking
void LSU::updateIssuePriorityAfterCacheReload_(const std::vector<MemoryAccessInfoPtr> & mem_access_info_ptr_vector,
const bool is_flushed_inst)
{
std::vector<LoadStoreInstInfoPtr> inst_info_ptrs;
for (auto & mem_access_info_ptr : mem_access_info_ptr_vector)
{
const LoadStoreInstIterator & iter = mem_access_info_ptr->getIssueQueueIterator();
sparta_assert(
iter.isValid(),
"Attempt to rehandle cache lookup for instruction not yet in the issue queue! "
<< mem_access_info_ptr);

const LoadStoreInstInfoPtr & inst_info_ptr = *(iter);
inst_info_ptrs.emplace_back(inst_info_ptr);

// Update issue priority for this outstanding cache miss
if (inst_info_ptr->getState() != LoadStoreInstInfo::IssueState::ISSUED)
{
inst_info_ptr->setState(LoadStoreInstInfo::IssueState::READY);
}
inst_info_ptr->setPriority(LoadStoreInstInfo::IssuePriority::CACHE_RELOAD);
}
mshr_uev_append_ready_.preparePayload(inst_info_ptrs)->schedule(sparta::Clock::Cycle(0));
}

// Update issue priority after store instruction retires
void LSU::updateIssuePriorityAfterStoreInstRetire_(const InstPtr & inst_ptr)
Expand Down
14 changes: 14 additions & 0 deletions core/LSU.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,12 @@ namespace olympia
sparta::DataInPort<MemoryAccessInfoPtr> in_mmu_lookup_ack_{&unit_port_set_,
"in_mmu_lookup_ack", 0};

/*
sparta::DataInPort<MemoryAccessInfoPtr> in_cache_lookup_req_{&unit_port_set_,
"in_cache_lookup_req", 1};
*/
sparta::DataInPort<std::vector<MemoryAccessInfoPtr>> in_cache_lookup_req_mshr_{&unit_port_set_,
"in_cache_lookup_req_mshr", 1};

sparta::DataInPort<MemoryAccessInfoPtr> in_cache_lookup_ack_{&unit_port_set_,
"in_cache_lookup_ack", 0};
Expand Down Expand Up @@ -165,6 +169,7 @@ namespace olympia
const int address_calculation_stage_;
const int mmu_lookup_stage_;
const int cache_lookup_stage_;
//const int mshr_process_stage_; // non-blocking
const int cache_read_stage_;
const int complete_stage_;

Expand All @@ -191,6 +196,10 @@ namespace olympia
&unit_event_set_, "append_ready",
CREATE_SPARTA_HANDLER_WITH_DATA(LSU, appendReady_, LoadStoreInstInfoPtr)};

sparta::PayloadEvent<std::vector<LoadStoreInstInfoPtr>> mshr_uev_append_ready_{
&unit_event_set_, "mshr_append_ready",
CREATE_SPARTA_HANDLER_WITH_DATA(LSU, mshr_appendReady_, std::vector<LoadStoreInstInfoPtr>)};

////////////////////////////////////////////////////////////////////////////////
// Callbacks
////////////////////////////////////////////////////////////////////////////////
Expand Down Expand Up @@ -221,7 +230,10 @@ namespace olympia

// Handle cache access request
void handleCacheLookupReq_();
/*
void handleCacheReadyReq_(const MemoryAccessInfoPtr & memory_access_info_ptr);
*/
void handleCacheReadyReq_(const std::vector<MemoryAccessInfoPtr> & memory_access_info_ptr_vector);
void getAckFromCache_(const MemoryAccessInfoPtr & updated_memory_access_info_ptr);

// Perform cache read
Expand All @@ -241,6 +253,7 @@ namespace olympia

// Instructions in the replay ready to issue
void appendReady_(const LoadStoreInstInfoPtr &);
void mshr_appendReady_(const std::vector<LoadStoreInstInfoPtr> &);

////////////////////////////////////////////////////////////////////////////////
// Regular Function/Subroutine Call
Expand Down Expand Up @@ -289,6 +302,7 @@ namespace olympia

// Update issue priority after cache reload
void updateIssuePriorityAfterCacheReload_(const MemoryAccessInfoPtr &, const bool = false);
void updateIssuePriorityAfterCacheReload_(const std::vector<MemoryAccessInfoPtr> &, const bool = false);

// Update issue priority after store instruction retires
void updateIssuePriorityAfterStoreInstRetire_(const InstPtr &);
Expand Down
Loading

0 comments on commit 58c5197

Please sign in to comment.