pc64: set accessed and dirty bits in PTE to avoid write-back

We do not use the accessed and dirty bits in
page table entries so we can just always set them.
this avoids that the cpu needs to atomically write
back to the page table when setting these bits.
This commit is contained in:
cinap_lenrek 2024-01-17 19:48:39 +00:00
parent 9c2e8e2b13
commit c2ec061689
3 changed files with 6 additions and 3 deletions

View file

@ -124,7 +124,7 @@ TEXT _warp64<>(SB), 1, $-4
MOVL SI, AX /* PML4 */
MOVL AX, DX
ADDL $(PTSZ|PTEWRITE|PTEVALID), DX /* PDP at PML4 + PTSZ */
ADDL $(PTEACCESSED|PTEDIRTY|PTSZ|PTEWRITE|PTEVALID), DX /* PDP at PML4 + PTSZ */
MOVL DX, PML4O(0)(AX) /* PML4E for double-map */
MOVL DX, PML4O(KZERO)(AX) /* PML4E for KZERO */

View file

@ -156,6 +156,8 @@
#define PTERONLY (0ull<<1)
#define PTEKERNEL (0ull<<2)
#define PTEUSER (1ull<<2)
#define PTEACCESSED (1ull<<5)
#define PTEDIRTY (1ull<<6)
#define PTESIZE (1ull<<7)
#define PTEGLOBAL (1ull<<8)
#define PTENOEXEC ((uvlong)m->havenx<<63)

View file

@ -341,6 +341,7 @@ pmap(uintptr pa, uintptr va, vlong size)
flags = pa;
pa = PPN(pa);
flags -= pa;
flags |= PTEACCESSED|PTEDIRTY;
if(va >= KZERO)
flags |= PTEGLOBAL;
while(size > 0){
@ -504,7 +505,7 @@ putmmu(uintptr va, uintptr pa, Page *)
if(pte == 0)
panic("putmmu: bug: va=%#p pa=%#p", va, pa);
old = *pte;
*pte = pa | PTEUSER;
*pte = pa | PTEACCESSED|PTEDIRTY|PTEUSER;
splx(x);
if(old & PTEVALID)
invlpg(va);
@ -553,7 +554,7 @@ kmap(Page *page)
pte = mmuwalk(m->pml4, va, 0, 1);
if(pte == 0 || (*pte & PTEVALID) != 0)
panic("kmap: pa=%#p va=%#p", pa, va);
*pte = pa | PTEWRITE|PTENOEXEC|PTEVALID;
*pte = pa | PTEACCESSED|PTEDIRTY|PTEWRITE|PTENOEXEC|PTEVALID;
splx(x);
invlpg(va);
return (KMap*)va;