shithub: mc

Download patch

ref: 354cd74663da3fd2ba71d09ff88006f07cccd67b
parent: dc5040ca44220629fb40e077ae8127fc764f43db
parent: be7a20c98489d1c140cee53ef29e046025afa3ce
author: Ori Bernstein <[email protected]>
date: Tue Aug 7 05:53:47 EDT 2012

Merge branch 'master' of git+ssh://mimir.eigenstate.org/git/ori/libmyr

Conflicts:
	fmt.myr
	test.myr

--- a/alloc.myr
+++ b/alloc.myr
@@ -18,9 +18,9 @@
 const Zslab	= 0 castto(slab*)
 const Zchunk	= 0 castto(chunk*)
 
-const Slabsz 	= 4096	/* on the systems this supports, anyways... */
+const Slabsz 	= 1048576	/* 1 meg slabs */
 const Cachemax	= 16	/* maximum number of slabs in the cache */
-const Bucketmax	= 1024	/* Slabsz / 8; a balance. */
+const Bucketmax	= 32768	/* Slabsz / 8; a balance. */
 const Align	= 16	/* minimum allocation alignment */
 
 var buckets	: bucket[32] /* excessive */
@@ -35,6 +35,7 @@
 ;;
 
 type slab = struct
+	head	: byte* /* head of virtual addresses, so we don't leak address space */
 	next	: slab* /* the next slab on the chain */
 	freehd	: chunk*	/* the nodes we're allocating */
 	nfree	: size  /* the number of free nodes */
@@ -114,12 +115,19 @@
 		bkt.cache = s.next
 		bkt.ncache--
 	;;
-	p = mmap(Zbyte, Slabsz, Mprotrw, Mpriv | Manon, -1, 0)
+	/* tricky: we need power of two alignment, so we allocate double the
+	   needed size, chop off the unaligned ends, and waste the address
+	   space. Since the OS is "smart enough", this shouldn't actually
+	   cost us memory, and 64 bits of address space means that we're not
+	   going to have issues with running out of address space for a
+	   while. On a 32 bit system this would be a bad idea. */
+	p = mmap(Zbyte, Slabsz*2, Mprotrw, Mpriv | Manon, -1, 0)
 	if p == Mapbad
 		die("Unable to mmap")
 	;;
 
-	s = p castto(slab*)
+	s = align(p castto(intptr), Slabsz) castto(slab*)
+	s.head = p
 	s.nfree = bkt.nper
 	/* skip past the slab header */
 	off = align(sizeof(slab), Align)
@@ -174,7 +182,9 @@
 			s.next = bkt.cache
 			bkt.cache = s
 		else
-			munmap(s castto(byte*), Slabsz)
+			/* we mapped 2*Slabsz so we could align it,
+			 so we need to unmap the same */
+			munmap(s.head, Slabsz*2)
 		;;
 	;;
 	s.nfree++
--- a/bld.sh
+++ b/bld.sh
@@ -44,6 +44,7 @@
 MYR="types.myr \
     sys-$SYS.myr \
     die.myr \
+    varargs.myr \
     alloc.myr\
     utf.myr \
     fmt.myr \
--- a/fmt.myr
+++ b/fmt.myr
@@ -2,9 +2,11 @@
 use "sys.use"
 use "types.use"
 use "utf.use"
+use "varargs.use"
 
 pkg std =
 	const bfmt	: (buf : byte[,], fmt : byte[,], args:... -> size)
+	const bfmtv	: (buf : byte[,], fmt : byte[,], ap:valist -> size)
 	const put	: (fmt : byte[,], args:... -> size)
 ;;
 
@@ -12,17 +14,24 @@
 	var buf : byte[2048]
 	var n
 	
-	n = bfmt(buf[0,2048], fmt, args)
+	n = bfmtv(buf[0,2048], fmt, vastart(&args))
 	write(1, buf[0,n])
 	-> n
 }
 
 const bfmt = {buf, fmt, args
+	-> bfmtv(buf, fmt, vastart(&args))
+}
+
+const bfmtv = {buf, fmt, ap
 	var c
 	var n
-	var ap
-	var i_val;
-	var s_val;
+	var s_val : byte[,]
+	var b_val : int8
+	var w_val : int16
+	var i_val : int32
+	var l_val : int64
+	var p_val : byte*
 
 	n = 0
 	while fmt.len
@@ -29,35 +38,31 @@
 		(c, fmt) = striter(fmt)
 		if c == '%'
 			(c, fmt) = striter(fmt)
-			ap = &args
 			match c
 			's':
-				s_val = *(ap castto(byte[,]*))
+				(s_val, ap) = vanext(ap)
 				n += strfmt(buf[n, buf.len], s_val)
 				;;
 			/* format integers */
 			'b':
-				i_val = *(ap castto(int8*)) castto(int64)
-				n += intfmt(buf[n, buf.len], i_val, 10)
+				(b_val, ap) = vanext(ap)
+				n += intfmt(buf[n, buf.len], b_val castto(int64), 10)
 				;;
-			/*
 			'w':
-				i_val = *(ap castto(int16*)) castto(int64)
-				n += intfmt(buf[n, buf.len], i_val, 10)
+				(w_val, ap) = vanext(ap)
+				n += intfmt(buf[n, buf.len], i_val castto(int64), 10)
 				;;
-			*/
 			'i':
-				i_val = *(ap castto(int32*)) castto(int64)
-				n += intfmt(buf[n, buf.len], i_val, 10)
+				(i_val, ap) = vanext(ap)
+				n += intfmt(buf[n, buf.len], i_val castto(int64), 10)
 				;;
 			'l':
-				i_val = *(ap castto(int64*))
-				n += intfmt(buf[n, buf.len], i_val, 10)
+				(l_val, ap) = vanext(ap)
+				n += intfmt(buf[n, buf.len], i_val castto(int64), 10)
 				;;
 			'p':
-				n += strfmt(buf[n,buf.len], "0x")
-				i_val = *(ap castto(int64*))
-				n += intfmt(buf[n, buf.len], i_val, 16)
+				(p_val, ap) = vanext(ap)
+				n += intfmt(buf[n, buf.len], p_val castto(int64), 16)
 				;;
 			;;
 		else
@@ -113,3 +118,4 @@
 		-> b
 	;;
 }
+
--- a/hello.myr
+++ /dev/null
@@ -1,19 +1,0 @@
-use "sys.use"
-use "alloc.use"
-extern const printf : (str:byte*, ap:... -> void)
-const main = {
-	var x : byte*[1024]
-	var sz
-	var i
-
-	for sz = 1; sz < 65536; sz *= 2
-		for i = 0; i < 1024; i++
-			x[i] = std.bytealloc(sz)
-		;;
-		for i = 0; i < 1024; i++
-			std.bytefree(x[i], sz)
-		;;
-	;;
-	std.write(1, "Hello world\n")
-	printf("some string %d %d \n" castto(byte*), 123, 456)
-}
--- a/test.myr
+++ b/test.myr
@@ -25,7 +25,7 @@
 	;;
 	std.write(1, "Hello, 世界\n")
 	chartypes()
-	std.put("format output %b\n", 12)
+	std.put("format output %i %i %s\n", 123, 321, "asdf")
 }
 
 const chartypes = {
--- /dev/null
+++ b/varargs.myr
@@ -1,0 +1,35 @@
+use "types.use"
+
+pkg std =
+	type valist
+
+	const vastart	: (args : ...* -> valist)
+	generic vanext	: (ap : valist -> [@a, valist])
+;;
+
+type valist = byte*
+
+const vastart = {args
+	-> args castto(valist)
+}
+
+generic vanext = {ap -> [@a, valist]
+	var v : @a
+	var sz
+	var p
+
+	if sizeof(@a) > 8
+		sz = 8
+	else
+		sz = sizeof(@a)
+	;;
+
+	p = ap castto(intptr)
+	p = (p + sz - 1) & ~(sz - 1)
+	ap = p castto(valist)
+
+	v = *(ap castto(@a*))
+
+	ap = ((p castto(intptr)) + sz) castto(valist)
+	-> (v, ap)
+}