/* $Id: q_malloc.c 5990 2009-08-20 06:58:17Z bogdan_iancu $ * * Copyright (C) 2001-2003 FhG Fokus * * This file is part of opensips, a free SIP server. * * opensips is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version * * opensips is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * History: * -------- * ????-??-?? created by andrei * 2003-04-14 more debugging added in DBG_QM_MALLOC mode (andrei) * 2003-06-29 added qm_realloc (andrei) * 2004-07-19 fragments book keeping code and support for 64 bits * memory blocks (64 bits machine & size>=2^32) (andrei) * GET_HASH s/ 4Gb mem., switched to long (andrei) * 2005-03-02 added qm_info() (andrei) * 2005-12-12 fixed realloc shrink real_used & used accounting; * fixed initial size (andrei) */ #include "precompile.h" #include "q_malloc.h" /*useful macros*/ #define FRAG_END(f) \ ((struct qm_frag_end*)((char*)(f)+sizeof(struct qm_frag)+ \ (f)->size)) #define FRAG_NEXT(f) \ ((struct qm_frag*)((char*)(f)+sizeof(struct qm_frag)+(f)->size+ \ sizeof(struct qm_frag_end))) #define FRAG_PREV(f) \ ( (struct qm_frag*) ( ((char*)(f)-sizeof(struct qm_frag_end))- \ ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))->size- \ sizeof(struct qm_frag) ) ) #define PREV_FRAG_END(f) \ ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end))) #define FRAG_OVERHEAD (sizeof(struct qm_frag)+sizeof(struct qm_frag_end)) #define ROUNDTO_MASK (~((unsigned long)ROUNDTO-1)) #define ROUNDUP(s) (((s)+(ROUNDTO-1))&ROUNDTO_MASK) #define ROUNDDOWN(s) ((s)&ROUNDTO_MASK) /* #define ROUNDUP(s) (((s)%ROUNDTO)?((s)+ROUNDTO)/ROUNDTO*ROUNDTO:(s)) #define ROUNDDOWN(s) (((s)%ROUNDTO)?((s)-ROUNDTO)/ROUNDTO*ROUNDTO:(s)) */ /* finds the hash value for s, s=ROUNDTO multiple*/ #define GET_HASH(s) ( ((unsigned long)(s)<=QM_MALLOC_OPTIMIZE)?\ (unsigned long)(s)/ROUNDTO: \ QM_MALLOC_OPTIMIZE/ROUNDTO+big_hash_idx((s))- \ QM_MALLOC_OPTIMIZE_FACTOR+1 ) #define UN_HASH(h) ( ((unsigned long)(h)<=(QM_MALLOC_OPTIMIZE/ROUNDTO))?\ (unsigned long)(h)*ROUNDTO: \ 1UL<<((h)-QM_MALLOC_OPTIMIZE/ROUNDTO+\ QM_MALLOC_OPTIMIZE_FACTOR-1)\ ) /* mark/test used/unused frags */ #define FRAG_MARK_USED(f) #define FRAG_CLEAR_USED(f) #define FRAG_WAS_USED(f) (1) /* other frag related defines: * MEM_COALESCE_FRAGS * MEM_FRAG_AVOIDANCE */ #define MEM_FRAG_AVOIDANCE /* computes hash number for big buckets*/ __inline static unsigned long big_hash_idx(unsigned long s) { int idx; /* s is rounded => s = k*2^n (ROUNDTO=2^n) * index= i such that 2^i > s >= 2^(i-1) * * => index = number of the first non null bit in s*/ idx=sizeof(long)*8-1; for (; !(s&(1UL<<(sizeof(long)*8-1))) ; s<<=1, idx--); return idx; } static __inline void qm_insert_free(struct qm_block* qm, struct qm_frag* frag) { struct qm_frag* f; struct qm_frag* prev; int hash; hash=GET_HASH(frag->size); for(f=qm->free_hash[hash].head.u.nxt_free; f!=&(qm->free_hash[hash].head); f=f->u.nxt_free){ if (frag->size <= f->size) break; } /*insert it here*/ prev=FRAG_END(f)->prev_free; prev->u.nxt_free=frag; FRAG_END(frag)->prev_free=prev; frag->u.nxt_free=f; FRAG_END(f)->prev_free=frag; qm->free_hash[hash].no++; } /* init malloc and return a qm_block*/ struct qm_block* qm_malloc_init(char* address, unsigned long size) { char* start; char* end; struct qm_block* qm; unsigned long init_overhead; int h; /* make address and size multiple of 8*/ start=(char*)ROUNDUP((unsigned long) address); if (size<(unsigned long)(start-address)) return 0; size-=(start-address); if (size <(MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0; size=ROUNDDOWN(size); init_overhead=ROUNDUP(sizeof(struct qm_block))+sizeof(struct qm_frag)+ sizeof(struct qm_frag_end); if (size < init_overhead) { /* not enough mem to create our control structures !!!*/ return 0; } end=start+size; qm=(struct qm_block*)start; memset(qm, 0, sizeof(struct qm_block)); qm->size=size; qm->real_used=init_overhead; qm->max_real_used=qm->real_used; size-=init_overhead; qm->first_frag=(struct qm_frag*)(start+ROUNDUP(sizeof(struct qm_block))); qm->last_frag_end=(struct qm_frag_end*)(end-sizeof(struct qm_frag_end)); /* init initial fragment*/ qm->first_frag->size=size; qm->last_frag_end->size=size; /* init free_hash* */ for (h=0; hfree_hash[h].head.u.nxt_free=&(qm->free_hash[h].head); qm->free_hash[h].tail.prev_free=&(qm->free_hash[h].head); qm->free_hash[h].head.size=0; qm->free_hash[h].tail.size=0; } /* link initial fragment into the free list*/ qm_insert_free(qm, qm->first_frag); /*qm->first_frag->u.nxt_free=&(qm->free_lst); qm->last_frag_end->prev_free=&(qm->free_lst); */ return qm; } static __inline void qm_detach_free(struct qm_block* qm, struct qm_frag* frag) { struct qm_frag *prev; struct qm_frag *next; prev=FRAG_END(frag)->prev_free; next=frag->u.nxt_free; prev->u.nxt_free=next; FRAG_END(next)->prev_free=prev; } static __inline struct qm_frag* qm_find_free(struct qm_block* qm, unsigned long size, int* h) { int hash; struct qm_frag* f; for (hash=GET_HASH(size); hashfree_hash[hash].head.u.nxt_free; f!=&(qm->free_hash[hash].head); f=f->u.nxt_free){ if (f->size>=size){ *h=hash; return f; } } /*try in a bigger bucket*/ } /* not found */ return 0; } /* returns 0 on success, -1 on error; * new_size < size & rounded-up already!*/ static __inline int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size) { unsigned long rest; struct qm_frag* n; struct qm_frag_end* end; rest=f->size-new_size; #ifdef MEM_FRAG_AVOIDANCE if ((rest> (FRAG_OVERHEAD+QM_MALLOC_OPTIMIZE))|| (rest>=(FRAG_OVERHEAD+new_size))){/* the residue fragm. is big enough*/ #else if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){ #endif f->size=new_size; /*split the fragment*/ end=FRAG_END(f); end->size=new_size; n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end)); n->size=rest-FRAG_OVERHEAD; FRAG_END(n)->size=n->size; FRAG_CLEAR_USED(n); /* never used */ qm->real_used+=FRAG_OVERHEAD; /* reinsert n in free list*/ qm_insert_free(qm, n); return 0; }else{ /* we cannot split this fragment any more */ return -1; } } void* qm_malloc(struct qm_block* qm, unsigned long size) { struct qm_frag* f; int hash; /*size must be a multiple of 8*/ size=ROUNDUP(size); if (size>(qm->size-qm->real_used)) return 0; /*search for a suitable free frag*/ if ((f=qm_find_free(qm, size, &hash))!=0){ /* we found it!*/ /*detach it from the free list*/ qm_detach_free(qm, f); /*mark it as "busy"*/ f->u.is_free=0; qm->free_hash[hash].no--; /* we ignore split return */ split_frag(qm, f, size); qm->real_used+=f->size; qm->used+=f->size; if (qm->max_real_usedreal_used) qm->max_real_used=qm->real_used; return (char*)f+sizeof(struct qm_frag); } return 0; } void qm_free(struct qm_block* qm, void* p) { struct qm_frag* f; struct qm_frag* prev; struct qm_frag* next; unsigned long size; if (p==0) { return; } prev=next=0; f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag)); size=f->size; qm->used-=size; qm->real_used-=size; /* join packets if possible*/ next=FRAG_NEXT(f); if (((char*)next < (char*)qm->last_frag_end) &&( next->u.is_free)){ /* join */ qm_detach_free(qm, next); size+=next->size+FRAG_OVERHEAD; qm->real_used-=FRAG_OVERHEAD; qm->free_hash[GET_HASH(next->size)].no--; /* FIXME slow */ } if (f > qm->first_frag){ prev=FRAG_PREV(f); /* (struct qm_frag*)((char*)f - (struct qm_frag_end*)((char*)f- sizeof(struct qm_frag_end))->size);*/ if (prev->u.is_free){ /*join*/ qm_detach_free(qm, prev); size+=prev->size+FRAG_OVERHEAD; qm->real_used-=FRAG_OVERHEAD; qm->free_hash[GET_HASH(prev->size)].no--; /* FIXME slow */ f=prev; } } f->size=size; FRAG_END(f)->size=f->size; qm_insert_free(qm, f); } void qm_lock(struct qm_block* qm) { DWORD i = 0; DWORD spin = 256; while (InterlockedCompareExchange((LONG*)&qm->lock, 1, 0) == 1) { for (; i < spin; ++i) if (qm->lock == 0) break; if (i >= spin) Sleep(1); /* yield cpu */ } } void qm_unlock(struct qm_block* qm) { InterlockedExchange((LONG*)&qm->lock, 0); }