diff --git a/rtl/letc/core/letc_core_cache.sv b/rtl/letc/core/letc_core_cache.sv index d3d45c2..e2c4296 100644 --- a/rtl/letc/core/letc_core_cache.sv +++ b/rtl/letc/core/letc_core_cache.sv @@ -4,6 +4,7 @@ * * Copyright: * Copyright (C) 2024 John Jekel + Copytight (C) 2024 Eric Jessee * See the LICENSE file at the root of the project for licensing info. * * A simple write-through, direct-mapped cache @@ -231,6 +232,10 @@ logic [1:0] cache_state_next; always_ff @(posedge i_clk) begin if (!i_rst_n) begin cache_state_current <= CACHE_STATE_IDLE; + end else if (stage_limp.wen_nren) begin + //is this a good idea? I think the stage should never try to write + //while waiting on a cache miss. + cache_state_current <= CACHE_STATE_IDLE; end else begin cache_state_current <= cache_state_next; end @@ -280,53 +285,86 @@ always_comb begin //state dependent outputs end -//FIXME -assign axi_fsm_limp.wen_nren = 1'b0; -assign axi_fsm_limp.size = SIZE_WORD; +//write through logic +always_comb begin + if (stage_limp.wen_nren || stage_limp.bypass) begin + //no buffering for now. Directly connect stage and axi fsm + //limp interfaces together. + //if we add buffering, we would need to handle the bypass + //seperately. + axi_fsm_limp.valid = stage_limp.valid; + stage_limp.ready = axi_fsm_limp.ready; + axi_fsm_limp.wen_nren = stage_limp.wen_nren; + axi_fsm_limp.size = stage_limp.size; + axi_fsm_limp.addr = stage_limp.addr; + stage_limp.rdata = axi_fsm_limp.rdata; + axi_fsm_limp.wdata = stage_limp.wdata; + end +end /* ------------------------------------------------------------------------------------------------ * Output Logic * --------------------------------------------------------------------------------------------- */ +//will never need the bypass signal to the axi fsm. +assign axi_fsm_limp_bypass = 1'b0; + +//refill fsm outputs always_comb begin - unique case (cache_state_current) - CACHE_STATE_IDLE: begin - addr_counter_load = hit ? 1'b0 : 1'b1; - addr_counter_en = 1'b0; - sr_load = 1'b1; - tag_wen = 1'b0; - axi_fsm_limp.valid = 1'b0; - set_line_valid = 1'b0; - cache_line_wen = 1'b0; - end - CACHE_STATE_FILL: begin - addr_counter_load = 1'b0; - addr_counter_en = axi_fsm_limp.ready; - sr_load = 1'b0; - tag_wen = 1'b0; - axi_fsm_limp.valid = 1'b1; - set_line_valid = 1'b0; - cache_line_wen = axi_fsm_limp.ready; - end - CACHE_STATE_WRITE_TAG: begin - addr_counter_load = 1'b0; - addr_counter_en = 1'b0; - sr_load = 1'b0; - tag_wen = 1'b1; - axi_fsm_limp.valid = 1'b0; - set_line_valid = 1'b1; - cache_line_wen = 1'b0; - end - default: begin + if (!stage_limp.wen_nren) begin + //when read enabled, axi interface will always be read-enabled for + //word-sized access + axi_fsm_limp.wen_nren = 1'b0; + axi_fsm_limp.size = SIZE_WORD; + unique case (cache_state_current) + CACHE_STATE_IDLE: begin + addr_counter_load = hit ? 1'b0 : 1'b1; + addr_counter_en = 1'b0; + sr_load = 1'b1; + tag_wen = 1'b0; + axi_fsm_limp.valid = 1'b0; + set_line_valid = 1'b0; + cache_line_wen = 1'b0; + end + CACHE_STATE_FILL: begin + addr_counter_load = 1'b0; + addr_counter_en = axi_fsm_limp.ready; + sr_load = 1'b0; + tag_wen = 1'b0; + axi_fsm_limp.valid = 1'b1; + set_line_valid = 1'b0; + cache_line_wen = axi_fsm_limp.ready; + end + CACHE_STATE_WRITE_TAG: begin + addr_counter_load = 1'b0; + addr_counter_en = 1'b0; + sr_load = 1'b0; + tag_wen = 1'b1; + axi_fsm_limp.valid = 1'b0; + set_line_valid = 1'b1; + cache_line_wen = 1'b0; + end + default: begin + addr_counter_load = 1'b0; + addr_counter_en = 1'b0; + sr_load = 1'b0; + tag_wen = 1'b0; + axi_fsm_limp.valid = 1'b0; + set_line_valid = 1'b0; + cache_line_wen = 1'b0; + end + endcase + end else begin //write enabled + //axi fsm limp signals will be directly connected + //to the stage limp signals. state machine should do nothing addr_counter_load = 1'b0; addr_counter_en = 1'b0; sr_load = 1'b0; tag_wen = 1'b0; - axi_fsm_limp.valid = 1'b0; set_line_valid = 1'b0; cache_line_wen = 1'b0; - end - endcase + end end + /* ------------------------------------------------------------------------------------------------ * Assertions * --------------------------------------------------------------------------------------------- */ @@ -343,6 +381,11 @@ initial begin assert(CACHE_DEPTH > 0); end +//stage shouldn't try to write while waiting on a cache miss +// initial begin +// assert((cache_state_current == CACHE_STATE_IDLE) || !stage_limp.wen_nren); +// end + //TODO `endif //SIMULATION diff --git a/rtl/letc/core/letc_core_limp_if.sv b/rtl/letc/core/letc_core_limp_if.sv index 5982f48..6d4fe7e 100644 --- a/rtl/letc/core/letc_core_limp_if.sv +++ b/rtl/letc/core/letc_core_limp_if.sv @@ -33,12 +33,12 @@ interface letc_core_limp_if logic valid; logic ready; logic wen_nren;//Write enable and not read enable +logic bypass; size_e size; paddr_t addr; word_t rdata; word_t wdata; //TODO fault signal if unaligned, AXI errors, etc -//TODO bypass signal for direct memory access from stage /* ------------------------------------------------------------------------------------------------ * Modports * --------------------------------------------------------------------------------------------- */ @@ -47,6 +47,7 @@ modport requestor ( output valid, input ready, output wen_nren, + output bypass, output size, output addr, input rdata, @@ -57,6 +58,7 @@ modport servicer ( input valid, output ready, input wen_nren, + input bypass, input size, input addr, output rdata, diff --git a/verif/nonuvm/letc/core/cache/letc_core_cache_tb.sv b/verif/nonuvm/letc/core/cache/letc_core_cache_tb.sv index c61def8..7ea4c2a 100644 --- a/verif/nonuvm/letc/core/cache/letc_core_cache_tb.sv +++ b/verif/nonuvm/letc/core/cache/letc_core_cache_tb.sv @@ -58,6 +58,7 @@ letc_core_cache dut (.*); logic stage_limp_valid; logic stage_limp_ready; logic stage_limp_wen_nren; +logic stage_limp_bypass; size_e stage_limp_size; paddr_t stage_limp_addr; word_t stage_limp_rdata; @@ -66,6 +67,7 @@ word_t stage_limp_wdata; logic axi_fsm_limp_valid; logic axi_fsm_limp_ready; logic axi_fsm_limp_wen_nren; +logic axi_fsm_limp_bypass; size_e axi_fsm_limp_size; paddr_t axi_fsm_limp_addr; word_t axi_fsm_limp_rdata; @@ -75,6 +77,7 @@ always_comb begin stage_limp.valid = stage_limp_valid; stage_limp_ready = stage_limp.ready; stage_limp.wen_nren = stage_limp_wen_nren; + stage_limp_bypass = stage_limp.bypass; stage_limp.size = stage_limp_size; stage_limp.addr = stage_limp_addr; stage_limp_rdata = stage_limp.rdata; @@ -83,6 +86,7 @@ always_comb begin axi_fsm_limp_valid = axi_fsm_limp.valid; axi_fsm_limp.ready = axi_fsm_limp_ready; axi_fsm_limp_wen_nren = axi_fsm_limp.wen_nren; + axi_fsm_limp.bypass = axi_fsm_limp_bypass; axi_fsm_limp_size = axi_fsm_limp.size; axi_fsm_limp_addr = axi_fsm_limp.addr; axi_fsm_limp.rdata = axi_fsm_limp_rdata; @@ -116,6 +120,7 @@ default clocking cb @(posedge i_clk); output stage_limp_valid; input stage_limp_ready; output stage_limp_wen_nren;//Write enable and not read enable + output stage_limp_bypass; output stage_limp_size; output stage_limp_addr; input stage_limp_rdata; @@ -125,6 +130,7 @@ default clocking cb @(posedge i_clk); input axi_fsm_limp_valid; output axi_fsm_limp_ready; input axi_fsm_limp_wen_nren;//Write enable and not read enable + input axi_fsm_limp_bypass; input axi_fsm_limp_size; input axi_fsm_limp_addr; output axi_fsm_limp_rdata; @@ -144,6 +150,8 @@ initial begin cb.stage_limp_valid <= 1'b0; cb.axi_fsm_limp_ready <= 1'b0; cb.i_flush_cache <= 1'b0; + cb.stage_limp_wen_nren <= 1'b0; + cb.stage_limp_bypass <= 1'b0; //Reset things cb.i_rst_n <= 1'b0; @@ -182,18 +190,17 @@ initial begin //TODO more axi_fsm_limp_ready <= 1'b0; -`ifndef VERILATOR - //Verilator sometimes doesn't like deassign - deassign axi_fsm_limp_rdata; ///////////////////////////// - //Testing reads, with more complex backing memory latencies + //Testing write-through ///////////////////////////// - //TODO +`ifndef VERILATOR + //Verilator sometimes doesn't like deassign + deassign axi_fsm_limp_rdata; ///////////////////////////// - //Testing write-through + //Testing reads, with more complex backing memory latencies ///////////////////////////// //TODO