Skip to content

Commit

Permalink
Fix write invalidation and improve cache tb
Browse files Browse the repository at this point in the history
- Still need to make the tb more comprehensive though :)
  • Loading branch information
JZJisawesome committed May 7, 2024
1 parent 5e59a1d commit 797956d
Show file tree
Hide file tree
Showing 2 changed files with 92 additions and 8 deletions.
18 changes: 14 additions & 4 deletions rtl/letc/core/letc_core_cache.sv
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,9 @@ amd_lutram #(
);

//Valid Flops
//TODO optimization: still make a write-through cache but ALSO write to the cache as well to avoid future read misses to the same line
logic set_line_valid;
logic invalidate_line;
logic [CACHE_DEPTH-1:0] cache_line_valid;
always_ff @(posedge i_clk) begin
if (!i_rst_n) begin
Expand All @@ -133,13 +135,13 @@ always_ff @(posedge i_clk) begin
if (i_flush_cache) begin
cache_line_valid <= '0;
end else if (set_line_valid) begin //data ready from the axi fsm means the cache line is being written
//FIXME also need to invalidate the line if it's being written to due to write-through

//Since this is a write-through cache, and there is no need to invalidate lines
//for cache coherency for example, the only time a cache line can
//become valid is when we write to it; and then it can never become invalid
//again until the cache is flushed!
//become valid is when we refill it; and then it can never become invalid
//again until the cache is flushed or the memory the line corresponds to is written.
cache_line_valid[cache_write_index] <= 1'b1;
end else if (invalidate_line) begin
cache_line_valid[stage_index] <= 1'b0;
end
end
end
Expand Down Expand Up @@ -345,6 +347,7 @@ always_comb begin
sr_load = 1'b1;
tag_wen = 1'b0;
set_line_valid = 1'b0;
invalidate_line = 1'b0;
cache_line_wen = 1'b0;
end
CACHE_STATE_FILL: begin
Expand All @@ -353,6 +356,8 @@ always_comb begin
sr_load = 1'b0;
tag_wen = 1'b0;
set_line_valid = 1'b0;
//Since the tag isn't written until the end, we don't need to invalidate to prevent a hit
invalidate_line = 1'b0;
cache_line_wen = axi_fsm_limp.ready;
end
CACHE_STATE_WRITE_TAG: begin
Expand All @@ -361,6 +366,7 @@ always_comb begin
sr_load = 1'b0;
tag_wen = 1'b1;
set_line_valid = 1'b1;
invalidate_line = 1'b0;
cache_line_wen = 1'b0;
end
default: begin
Expand All @@ -381,6 +387,7 @@ always_comb begin
sr_load = 1'b0;
tag_wen = 1'b0;
set_line_valid = 1'b0;
invalidate_line = stage_limp.valid;//Passed-through request is actually valid
cache_line_wen = 1'b0;
end
end
Expand All @@ -401,6 +408,9 @@ initial begin
assert(CACHE_DEPTH > 0);
end

//We should both validate and invalidate a cache line on the same cycle
assert property (@(posedge i_clk) disable iff (!i_rst_n) !(set_line_valid && invalidate_line));

//stage shouldn't try to write while waiting on a cache miss
// initial begin
// assert((cache_state_current == CACHE_STATE_IDLE) || !stage_limp.wen_nren);
Expand Down
82 changes: 78 additions & 4 deletions verif/nonuvm/letc/core/cache/letc_core_cache_tb.sv
Original file line number Diff line number Diff line change
Expand Up @@ -166,9 +166,9 @@ initial begin
/////////////////////////////

axi_fsm_limp_ready <= 1'b1;
assign axi_fsm_limp_rdata = ~axi_fsm_limp_addr[31:0];
assign axi_fsm_limp_rdata = 32'hEFEF5678 + axi_fsm_limp_addr[31:0];

//Fetch a word from 0, pretty simple
//Fetch a word from 32'hABCD1234, pretty simple
stage_limp_valid <= 1'b1;
stage_limp_wen_nren <= 1'b0;
stage_limp_size <= SIZE_WORD;
Expand All @@ -185,16 +185,90 @@ initial begin
end
end
timeout_counter = 0;
assert(stage_limp_rdata == ~stage_limp_addr[31:0]);
assert(stage_limp_rdata == (32'hEFEF5678 + axi_fsm_limp_addr[31:0]));

//TODO more
//Tiny break to mix things up :)
stage_limp_valid <= 1'b0;
##1;

//Fetch a byte from 32'hABCD1230, which should be on the same cache line
stage_limp_valid <= 1'b1;
stage_limp_wen_nren <= 1'b0;
stage_limp_size <= SIZE_BYTE;
stage_limp_addr <= 32'hABCD1230;
##1;//One cycle for inputs to take effect
assert(stage_limp_ready);//Should hit
assert(stage_limp_rdata == ((32'hEFEF5678 + axi_fsm_limp_addr[31:0]) & 32'hFF));

//Fetch a halfword from 32'h11111110
stage_limp_valid <= 1'b1;
stage_limp_wen_nren <= 1'b0;
stage_limp_size <= SIZE_WORD;
stage_limp_addr <= 32'h11111110;
##1;//One cycle for inputs to take effect
assert(!stage_limp_ready);//Upper bits completely different, should miss
while (!stage_limp_ready) begin
$display("Waiting for stage_limp_ready");
##1;
++timeout_counter;
if (timeout_counter > TIMEOUT) begin
$display("Timeout waiting for stage_limp_ready");
$fatal;
end
end
timeout_counter = 0;
assert(stage_limp_rdata == (32'hEFEF5678 + axi_fsm_limp_addr[31:0]));

//Fetch a halfword from 32'hABCD1200, which should be on the same cache line as the first word
stage_limp_valid <= 1'b1;
stage_limp_wen_nren <= 1'b0;
stage_limp_size <= SIZE_HALFWORD;
stage_limp_addr <= 32'hABCD1200;
##1;//One cycle for inputs to take effect
assert(stage_limp_ready);//Should hit
assert(stage_limp_rdata == ((32'hEFEF5678 + axi_fsm_limp_addr[31:0]) & 32'hFFFF));

/////////////////////////////
//Testing invalidation on writes and uncached reads
/////////////////////////////

//Write a halfword t0 32'hABCD1202, which should invalidate the cache line
stage_limp_valid <= 1'b1;
stage_limp_wen_nren <= 1'b1;
stage_limp_size <= SIZE_HALFWORD;
stage_limp_addr <= 32'hABCD1202;
stage_limp_wdata <= 32'h3C3CA5A5;
##1;//One cycle for inputs to take effect
assert(stage_limp_ready);//Should write through and work immediately
assert(axi_fsm_limp_wdata == 32'h3C3CA5A5);//The write should be passed through

//Fetch a byte from 32'hABCD1233, which will miss due to the invalidation
stage_limp_valid <= 1'b1;
stage_limp_wen_nren <= 1'b0;
stage_limp_size <= SIZE_BYTE;
stage_limp_addr <= 32'hABCD1230;
##1;//One cycle for inputs to take effect
assert(!stage_limp_ready);
while (!stage_limp_ready) begin
$display("Waiting for stage_limp_ready");
##1;
++timeout_counter;
if (timeout_counter > TIMEOUT) begin
$display("Timeout waiting for stage_limp_ready");
$fatal;
end
end
timeout_counter = 0;
assert(stage_limp_rdata == ((32'hEFEF5678 + axi_fsm_limp_addr[31:0]) & 32'hFF));

axi_fsm_limp_ready <= 1'b0;

/////////////////////////////
//Testing write-through
/////////////////////////////

//TODO

`ifndef VERILATOR
//Verilator sometimes doesn't like deassign
deassign axi_fsm_limp_rdata;
Expand Down

0 comments on commit 797956d

Please sign in to comment.