Codebase list libmawk / debian/1.0.0-1 src / libmawk / zmalloc.c
debian/1.0.0-1

Tree @debian/1.0.0-1 (Download .tar.gz)

zmalloc.c @debian/1.0.0-1raw · history · blame

/********************************************
zmalloc.c

libmawk changes (C) 2009-2012, Tibor 'Igor2' Palinkas;
based on mawk code coming with the below copyright:

copyright 1991, Michael D. Brennan

This is a source file for mawk, an implementation of
the AWK programming language.

Mawk is distributed without warranty under the terms of
the GNU General Public License, version 2, 1991.
********************************************/

#include  "mawk.h"
#include  "zmalloc.h"
#include  "memory.h"

/*
  mawk_zmalloc() gets mem from malloc() in CHUNKS of 2048 bytes
  and cuts these blocks into smaller pieces that are multiples
  of eight bytes.  When a piece is returned via mawk_zfree(), it goes
  on a linked linear list indexed by its size.	The lists are
  an array, pool[].

  E.g., if you ask for 22 bytes with p = mawk_zmalloc(22), you actually get
  a piece of size 24.  When you free it with mawk_zfree(p,22) , it is added
  to the list at pool[2].


  In more details:
  When a block-allocated area is free'd, put it in pool[blocksize], which
  is a linked list. When a new reuqest comes in, try to serve it from the
  pool.

  If that fails: there is a single chunk allocated, called MAWK->avail,
  with an index (MAWK->amt_avail) pointing to the first unclaimed block; cut
  down the requested size from there, from the end of the ->avail chunk. If
  it is impossible (not enough unclaimed blocks at the end), put the unclaimed
  part of ->avail in the pool and open a new ->avail chunk.

  For corner cases see comments there.
*/

#define	 CHUNK		256
 /* number of blocks to get from malloc */

static void out_of_mem(mawk_state_t * MAWK);


static void out_of_mem(mawk_state_t * MAWK)
{
	static const char out[] = "out of memory";

	if (MAWK->mawk_state == EXECUTION)
		mawk_rt_error(MAWK, out);
	else {
		/* I don't think this will ever happen */
		mawk_compile_error(MAWK, out);
		mawk_exit(MAWK, 2);
	}
}

/* mawk_zmalloc() is a macro in front of mawk_bmalloc "BLOCK malloc" */
PTR mawk_bmalloc(mawk_state_t *MAWK, register unsigned blocks)
{
	register ZBLOCK *p;

	if (blocks > POOLSZ) {
		/* larger than pooled allocations */
		p = (ZBLOCK *) mawk_malloc(MAWK, blocks << ZSHIFT);
		if (!p)
			out_of_mem(MAWK);
		return (PTR) p;
	}

	if ((p = MAWK->pool[blocks - 1])) {
		/* remove the first item of the linked list and return it */
		MAWK->pool[blocks - 1] = p->link;
		return (PTR) p;
	}

	if (blocks > MAWK->amt_avail) {
		if (MAWK->amt_avail != 0) {
			/* block is bigger than what's available in the last chunk
			   mark the reamining of the last chunk free and put it in the
			   corresponding pool. We can do this because blocks is small
			   enough to be pooled and unclaimed area is even smaller
			   so there is a pool for it for sure. */

			MAWK->avail->link = MAWK->pool[--MAWK->amt_avail];
			MAWK->pool[MAWK->amt_avail] = MAWK->avail;
		}

		/* no unclaimed memory is available in the last chunk, alloc a new chunk */

		if ((MAWK->avail = (ZBLOCK *) mawk_malloc(MAWK, CHUNK * ZBLOCKSZ)) == NULL) {
			/* if we get here, almost out of memory - couldn't allocate a whole
			   new chunk; try to allocate the current request out-of-pool, as the
			   request may be smaller than a chunk
			
			 WARNING: this means pool[] contains not only allocated-in-chunk blocks
			 but plain mallocs as well.
			
			*/
			MAWK->amt_avail = 0;
			p = mawk_malloc(MAWK, blocks << ZSHIFT);
			if (!p)
				out_of_mem(MAWK);
			return (PTR) p;
		}
		else {
			/* we have a new chunk to play with */
			MAWK->amt_avail = CHUNK;
		}
	}

	/* get p from the end of the avail chunk - by now we made sure
	   we have enough unclaimed blocks at the end */
	p = MAWK->avail;
	MAWK->avail += blocks;
	MAWK->amt_avail -= blocks;
	return (PTR) p;
}

void mawk_bfree(mawk_state_t *MAWK, register PTR p, register unsigned blocks)
{
	if (blocks > POOLSZ)
		mawk_free(MAWK, p);
	else {
		((ZBLOCK *) p)->link = MAWK->pool[--blocks];
		MAWK->pool[blocks] = (ZBLOCK *) p;
	}
}

PTR mawk_zrealloc(mawk_state_t *MAWK, register PTR p, unsigned old_size, unsigned new_size)
{
	register PTR q;

	if (new_size > (POOLSZ << ZSHIFT) && old_size > (POOLSZ << ZSHIFT)) {
		/* was not a pool allocation, just realloc */
		if (!(q = mawk_realloc(MAWK, p, new_size)))
			out_of_mem(MAWK);
	}
	else {
		/* pool allocation: zalloc new, zfree old */
		q = mawk_zmalloc(MAWK, new_size);
		if (p != NULL) {
			memcpy(q, p, old_size < new_size ? old_size : new_size);
			mawk_zfree(MAWK, p, old_size);
		}
	}
	return q;
}

char *mawk_zstrclone(mawk_state_t *MAWK, const char *s)
{
	int l;
	char *ret;

	if (s == NULL)
		return NULL;

	l = strlen(s);
	ret = mawk_zmalloc(MAWK, l+1);
	memcpy(ret, s, l+1);
	return ret;
}


#ifndef	 __GNUC__
/* pacifier for Bison , this is really dead code */
PTR alloca(unsigned sz)
{
	/* hell just froze over */
	exit(100);
	return (PTR) 0;
}
#endif