11#include <stdio.h>
2+ #include <string.h>
23
34#include "common.h"
45#include "device.h"
@@ -180,11 +181,17 @@ static inline uint32_t read_rs2(const hart_t *vm, uint32_t insn)
180181 return vm -> x_regs [decode_rs2 (insn )];
181182}
182183
184+ static inline void icache_invalidate_all (hart_t * vm )
185+ {
186+ memset (& vm -> icache , 0 , sizeof (vm -> icache ));
187+ }
188+
183189/* virtual addressing */
184190
185191void mmu_invalidate (hart_t * vm )
186192{
187- vm -> cache_fetch .n_pages = 0xFFFFFFFF ;
193+ vm -> cache_fetch [0 ].n_pages = 0xFFFFFFFF ;
194+ vm -> cache_fetch [1 ].n_pages = 0xFFFFFFFF ;
188195 /* Invalidate all 8 sets × 2 ways for load cache */
189196 for (int set = 0 ; set < 8 ; set ++ ) {
190197 for (int way = 0 ; way < 2 ; way ++ )
@@ -197,6 +204,7 @@ void mmu_invalidate(hart_t *vm)
197204 vm -> cache_store [set ].ways [way ].n_pages = 0xFFFFFFFF ;
198205 vm -> cache_store [set ].lru = 0 ; /* Reset LRU to way 0 */
199206 }
207+ icache_invalidate_all (vm );
200208}
201209
202210/* Invalidate MMU caches for a specific virtual address range.
@@ -226,10 +234,24 @@ void mmu_invalidate_range(hart_t *vm, uint32_t start_addr, uint32_t size)
226234 end_addr = UINT32_MAX ;
227235 uint32_t end_vpn = (uint32_t ) end_addr >> RV_PAGE_SHIFT ;
228236
229- /* Cache invalidation for fetch cache */
230- if (vm -> cache_fetch .n_pages >= start_vpn &&
231- vm -> cache_fetch .n_pages <= end_vpn )
232- vm -> cache_fetch .n_pages = 0xFFFFFFFF ;
237+ /* Invalidate fetch cache: 2 entry */
238+ for (int i = 0 ; i < 2 ; i ++ ) {
239+ if (vm -> cache_fetch [i ].n_pages >= start_vpn &&
240+ vm -> cache_fetch [i ].n_pages <= end_vpn )
241+ vm -> cache_fetch [i ].n_pages = 0xFFFFFFFF ;
242+ }
243+
244+ /* Invalidate I-cache: 256 blocks */
245+ for (int i = 0 ; i < ICACHE_BLOCKS ; i ++ ) {
246+ icache_block_t * blk = & vm -> icache .block [i ];
247+ if (!blk -> valid )
248+ continue ;
249+
250+ uint32_t icache_vpn = (blk -> tag << ICACHE_INDEX_BITS ) | i ;
251+ icache_vpn >>= (RV_PAGE_SHIFT - ICACHE_OFFSET_BITS );
252+ if (icache_vpn >= start_vpn && icache_vpn <= end_vpn )
253+ blk -> valid = false;
254+ }
233255
234256 /* Invalidate load cache: 8 sets × 2 ways */
235257 for (int set = 0 ; set < 8 ; set ++ ) {
@@ -361,10 +383,38 @@ static void mmu_fence(hart_t *vm, uint32_t insn UNUSED)
361383
362384static void mmu_fetch (hart_t * vm , uint32_t addr , uint32_t * value )
363385{
386+ uint32_t idx = (addr >> ICACHE_OFFSET_BITS ) & ICACHE_INDEX_MASK ;
387+ uint32_t tag = addr >> (ICACHE_OFFSET_BITS + ICACHE_INDEX_BITS );
388+ icache_block_t * blk = & vm -> icache .block [idx ];
364389 uint32_t vpn = addr >> RV_PAGE_SHIFT ;
365- if (unlikely (vpn != vm -> cache_fetch .n_pages )) {
390+ uint32_t index = __builtin_parity (vpn ) & 0x1 ;
391+
392+ #ifdef MMU_CACHE_STATS
393+ vm -> cache_fetch [index ].total_fetch ++ ;
394+ #endif
395+
396+ /* I-cache lookup */
397+ if (likely (blk -> valid && blk -> tag == tag )) {
398+ /* I-cache hit */
399+ #ifdef MMU_CACHE_STATS
400+ vm -> cache_fetch [index ].icache_hits ++ ;
401+ #endif
402+ uint32_t ofs = addr & ICACHE_BLOCK_MASK ;
403+ * value = * (const uint32_t * ) (blk -> base + ofs );
404+ return ;
405+ }
406+ /* I-cache miss */
407+ else {
408+ #ifdef MMU_CACHE_STATS
409+ vm -> cache_fetch [index ].icache_misses ++ ;
410+ #endif
411+ }
412+
413+ /* I-cache miss, 2-entry TLB lookup */
414+ if (unlikely (vpn != vm -> cache_fetch [index ].n_pages )) {
415+ /* TLB miss */
366416#ifdef MMU_CACHE_STATS
367- vm -> cache_fetch . misses ++ ;
417+ vm -> cache_fetch [ index ]. tlb_misses ++ ;
368418#endif
369419 mmu_translate (vm , & addr , (1 << 3 ), (1 << 6 ), false, RV_EXC_FETCH_FAULT ,
370420 RV_EXC_FETCH_PFAULT );
@@ -374,15 +424,24 @@ static void mmu_fetch(hart_t *vm, uint32_t addr, uint32_t *value)
374424 vm -> mem_fetch (vm , addr >> RV_PAGE_SHIFT , & page_addr );
375425 if (vm -> error )
376426 return ;
377- vm -> cache_fetch .n_pages = vpn ;
378- vm -> cache_fetch .page_addr = page_addr ;
427+ vm -> cache_fetch [ index ] .n_pages = vpn ;
428+ vm -> cache_fetch [ index ] .page_addr = page_addr ;
379429 }
380- #ifdef MMU_CACHE_STATS
430+ /* TLB hit */
381431 else {
382- vm -> cache_fetch . hits ++ ;
383- }
432+ #ifdef MMU_CACHE_STATS
433+ vm -> cache_fetch [ index ]. tlb_hits ++ ;
384434#endif
385- * value = vm -> cache_fetch .page_addr [(addr >> 2 ) & MASK (RV_PAGE_SHIFT - 2 )];
435+ }
436+
437+ * value =
438+ vm -> cache_fetch [index ].page_addr [(addr >> 2 ) & MASK (RV_PAGE_SHIFT - 2 )];
439+
440+ /* fill into the I-cache */
441+ uint32_t block_off = (addr & RV_PAGE_MASK ) & ~ICACHE_BLOCK_MASK ;
442+ blk -> base = (const uint8_t * ) vm -> cache_fetch [index ].page_addr + block_off ;
443+ blk -> tag = tag ;
444+ blk -> valid = true;
386445}
387446
388447static void mmu_load (hart_t * vm ,
0 commit comments