ref: c9f91d50154015ef31b6e63131847742893ffc91
parent: 8061f30e559569943324a666ee2fcc74048785cd
author: cinap_lenrek <[email protected]>
date: Sat May 31 23:13:58 EDT 2014
pc64: allocate palloc.pages from upages the palloc.pages array takes arround 5% of the upages which gives us: 16GB = ~0.8GB 32GB = ~1.6GB 64GB = ~3.2GB we only have 2GB of address space above KZERO so this will not work for long. instead, pageinit() was altered to accept a preallocated memory in palloc.pages. and preallocpages() in pc64/main.c allocates the in upages memory, mapping it in the VMAP area (which has 512GB). the drawback is that we cannot poke at Page structures now from /proc/n/mem as the VMAP area is not accessible from it.
--- a/sys/src/9/pc64/fns.h
+++ b/sys/src/9/pc64/fns.h
@@ -146,7 +146,7 @@
void pcmspecialclose(int);
void (*_pcmspecialclose)(int);
void pcmunmap(int, PCMmap*);
-void pmap(uintptr *, uintptr, uintptr, int);
+void pmap(uintptr *, uintptr, uintptr, vlong);
void procrestore(Proc*);
void procsave(Proc*);
void procsetup(Proc*);
--- a/sys/src/9/pc64/main.c
+++ b/sys/src/9/pc64/main.c
@@ -231,8 +231,7 @@
* (probably ~300KB).
*/
kpages *= BY2PG;
- kpages -= conf.upages*sizeof(Page)
- + conf.nproc*sizeof(Proc)
+ kpages -= conf.nproc*sizeof(Proc)
+ conf.nimage*sizeof(Image)
+ conf.nswap
+ conf.nswppo*sizeof(Page*);
@@ -250,7 +249,45 @@
}
}
+/*
+ * The palloc.pages array takes arround 5% of the amount
+ * of upages which can be a large chunk out of the 2GB
+ * window above KZERO, so we allocate the array from
+ * upages and map in the VMAP window before pageinit()
+ */
+static void
+preallocpages(void)
+{
+ Pallocmem *pm;
+ uintptr va;
+ vlong size;
+ ulong np;
+ int i;
+ np = 0;
+ for(i=0; i<nelem(palloc.mem); i++){
+ pm = &palloc.mem[i];
+ np += pm->npage;
+ }
+ size = (uvlong)np * BY2PG;
+ size += sizeof(Page) + BY2PG; /* round up */
+ size = (size / (sizeof(Page) + BY2PG)) * sizeof(Page);
+ size = PGROUND(size);
+
+ np = size/BY2PG;
+ for(i=0; i<nelem(palloc.mem); i++){
+ pm = &palloc.mem[i];
+ if((pm->base + size) <= VMAPSIZE && pm->npage >= np){
+ va = VMAP + pm->base;
+ pmap(m->pml4, pm->base | PTEGLOBAL|PTEWRITE|PTEVALID, va, size);
+ palloc.pages = (Page*)va;
+ pm->base += size;
+ pm->npage -= np;
+ break;
+ }
+ }
+}
+
void
machinit(void)
{
@@ -492,6 +529,7 @@
links();
conf.monitor = 1;
chandevreset();
+ preallocpages();
pageinit();
swapinit();
userinit();
--- a/sys/src/9/pc64/mem.h
+++ b/sys/src/9/pc64/mem.h
@@ -56,7 +56,7 @@
#define KTZERO (KZERO+1*MiB+64*KiB)
#define VMAP (0xffffff0000000000ull)
-#define VMAPSIZE (512*GiB)
+#define VMAPSIZE (512ull*GiB)
#define KMAP (0xfffffe8000000000ull)
#define KMAPSIZE (2*MiB)
--- a/sys/src/9/pc64/memory.c
+++ b/sys/src/9/pc64/memory.c
@@ -371,25 +371,6 @@
return sigscan(KADDR(0xe0000), 0x20000, signature);
}
-static void
-lowraminit(void)
-{
- uintptr pa, x;
-
- /*
- * Initialise the memory bank information for conventional memory
- * (i.e. less than 640KB). The base is the first location after the
- * bootstrap processor MMU information and the limit is obtained from
- * the BIOS data area.
- */
- x = PADDR(PGROUND((uintptr)end));
- pa = MemMin;
- if(x > pa)
- panic("kernel too big");
- mapfree(&rmapram, x, pa-x);
- memset(KADDR(x), 0, pa-x); /* keep us honest */
-}
-
typedef struct Emap Emap;
struct Emap
{
@@ -421,7 +402,7 @@
static void
map(uintptr base, uintptr len, int type)
{
- uintptr e, n, *pte, flags, maxkpa;
+ uintptr n, flags, maxkpa;
/*
* Split any call crossing MemMin to make below simpler.
@@ -433,7 +414,7 @@
}
/*
- * Let lowraminit and umbscan hash out the low MemMin.
+ * Let umbscan hash out the low MemMin.
*/
if(base < MemMin)
return;
@@ -497,21 +478,6 @@
break;
}
- /*
- * bottom MemMin is already mapped - just twiddle flags.
- * (not currently used - see above)
- */
- if(base < MemMin){
- e = base+len;
- base &= ~((uintptr)PGLSZ(1)-1);
- for(; base<e; base+=PGLSZ(1)){
- pte = mmuwalk(m->pml4, base+KZERO, 1, 0);
- if(pte != 0 && *pte & PTEVALID)
- *pte |= flags;
- }
- return;
- }
-
if(flags){
maxkpa = -KZERO;
if(base >= maxkpa)
@@ -595,7 +561,6 @@
uintptr lost;
umbscan();
- // lowraminit();
e820scan();
/*
--- a/sys/src/9/pc64/mmu.c
+++ b/sys/src/9/pc64/mmu.c
@@ -282,13 +282,13 @@
}
void
-pmap(uintptr *pml4, uintptr pa, uintptr va, int size)
+pmap(uintptr *pml4, uintptr pa, uintptr va, vlong size)
{
uintptr *pte, *ptee, flags;
int z, l;
if((size <= 0) || va < VMAP)
- panic("pmap: pa=%#p va=%#p size=%d", pa, va, size);
+ panic("pmap: pa=%#p va=%#p size=%lld", pa, va, size);
flags = pa;
pa = PPN(pa);
flags -= pa;
@@ -310,7 +310,7 @@
size += z;
continue;
}
- panic("pmap: pa=%#p va=%#p size=%d", pa, va, size);
+ panic("pmap: pa=%#p va=%#p size=%lld", pa, va, size);
}
ptee = pte + ptecount(va, l);
while(size > 0 && pte < ptee){
--- a/sys/src/9/port/page.c
+++ b/sys/src/9/port/page.c
@@ -16,16 +16,19 @@
Page *p;
Pallocmem *pm;
vlong m, v, u;
- ulong np;
- np = 0;
- for(i=0; i<nelem(palloc.mem); i++){
- pm = &palloc.mem[i];
- np += pm->npage;
+ if(palloc.pages == nil){
+ ulong np;
+
+ np = 0;
+ for(i=0; i<nelem(palloc.mem); i++){
+ pm = &palloc.mem[i];
+ np += pm->npage;
+ }
+ palloc.pages = xalloc(np*sizeof(Page));
+ if(palloc.pages == nil)
+ panic("pageinit");
}
- palloc.pages = xalloc(np*sizeof(Page));
- if(palloc.pages == 0)
- panic("pageinit");
color = 0;
palloc.head = palloc.pages;
@@ -33,6 +36,7 @@
for(i=0; i<nelem(palloc.mem); i++){
pm = &palloc.mem[i];
for(j=0; j<pm->npage; j++){
+ memset(p, 0, sizeof *p);
p->prev = p-1;
p->next = p+1;
p->pa = pm->base+j*BY2PG;