q_malloc.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358
  1. /* $Id: q_malloc.c 5990 2009-08-20 06:58:17Z bogdan_iancu $
  2. *
  3. * Copyright (C) 2001-2003 FhG Fokus
  4. *
  5. * This file is part of opensips, a free SIP server.
  6. *
  7. * opensips is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version
  11. *
  12. * opensips is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. */
  21. /*
  22. * History:
  23. * --------
  24. * ????-??-?? created by andrei
  25. * 2003-04-14 more debugging added in DBG_QM_MALLOC mode (andrei)
  26. * 2003-06-29 added qm_realloc (andrei)
  27. * 2004-07-19 fragments book keeping code and support for 64 bits
  28. * memory blocks (64 bits machine & size>=2^32) (andrei)
  29. * GET_HASH s/</<=/ (avoids waste of 1 hash cell) (andrei)
  30. * 2004-11-10 support for > 4Gb mem., switched to long (andrei)
  31. * 2005-03-02 added qm_info() (andrei)
  32. * 2005-12-12 fixed realloc shrink real_used & used accounting;
  33. * fixed initial size (andrei)
  34. */
  35. #include "precompile.h"
  36. #include "q_malloc.h"
  37. /*useful macros*/
  38. #define FRAG_END(f) \
  39. ((struct qm_frag_end*)((char*)(f)+sizeof(struct qm_frag)+ \
  40. (f)->size))
  41. #define FRAG_NEXT(f) \
  42. ((struct qm_frag*)((char*)(f)+sizeof(struct qm_frag)+(f)->size+ \
  43. sizeof(struct qm_frag_end)))
  44. #define FRAG_PREV(f) \
  45. ( (struct qm_frag*) ( ((char*)(f)-sizeof(struct qm_frag_end))- \
  46. ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))->size- \
  47. sizeof(struct qm_frag) ) )
  48. #define PREV_FRAG_END(f) \
  49. ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))
  50. #define FRAG_OVERHEAD (sizeof(struct qm_frag)+sizeof(struct qm_frag_end))
  51. #define ROUNDTO_MASK (~((unsigned long)ROUNDTO-1))
  52. #define ROUNDUP(s) (((s)+(ROUNDTO-1))&ROUNDTO_MASK)
  53. #define ROUNDDOWN(s) ((s)&ROUNDTO_MASK)
  54. /*
  55. #define ROUNDUP(s) (((s)%ROUNDTO)?((s)+ROUNDTO)/ROUNDTO*ROUNDTO:(s))
  56. #define ROUNDDOWN(s) (((s)%ROUNDTO)?((s)-ROUNDTO)/ROUNDTO*ROUNDTO:(s))
  57. */
  58. /* finds the hash value for s, s=ROUNDTO multiple*/
  59. #define GET_HASH(s) ( ((unsigned long)(s)<=QM_MALLOC_OPTIMIZE)?\
  60. (unsigned long)(s)/ROUNDTO: \
  61. QM_MALLOC_OPTIMIZE/ROUNDTO+big_hash_idx((s))- \
  62. QM_MALLOC_OPTIMIZE_FACTOR+1 )
  63. #define UN_HASH(h) ( ((unsigned long)(h)<=(QM_MALLOC_OPTIMIZE/ROUNDTO))?\
  64. (unsigned long)(h)*ROUNDTO: \
  65. 1UL<<((h)-QM_MALLOC_OPTIMIZE/ROUNDTO+\
  66. QM_MALLOC_OPTIMIZE_FACTOR-1)\
  67. )
  68. /* mark/test used/unused frags */
  69. #define FRAG_MARK_USED(f)
  70. #define FRAG_CLEAR_USED(f)
  71. #define FRAG_WAS_USED(f) (1)
  72. /* other frag related defines:
  73. * MEM_COALESCE_FRAGS
  74. * MEM_FRAG_AVOIDANCE
  75. */
  76. #define MEM_FRAG_AVOIDANCE
  77. /* computes hash number for big buckets*/
  78. __inline static unsigned long big_hash_idx(unsigned long s)
  79. {
  80. int idx;
  81. /* s is rounded => s = k*2^n (ROUNDTO=2^n)
  82. * index= i such that 2^i > s >= 2^(i-1)
  83. *
  84. * => index = number of the first non null bit in s*/
  85. idx=sizeof(long)*8-1;
  86. for (; !(s&(1UL<<(sizeof(long)*8-1))) ; s<<=1, idx--);
  87. return idx;
  88. }
  89. static __inline void qm_insert_free(struct qm_block* qm, struct qm_frag* frag)
  90. {
  91. struct qm_frag* f;
  92. struct qm_frag* prev;
  93. int hash;
  94. hash=GET_HASH(frag->size);
  95. for(f=qm->free_hash[hash].head.u.nxt_free; f!=&(qm->free_hash[hash].head);
  96. f=f->u.nxt_free){
  97. if (frag->size <= f->size) break;
  98. }
  99. /*insert it here*/
  100. prev=FRAG_END(f)->prev_free;
  101. prev->u.nxt_free=frag;
  102. FRAG_END(frag)->prev_free=prev;
  103. frag->u.nxt_free=f;
  104. FRAG_END(f)->prev_free=frag;
  105. qm->free_hash[hash].no++;
  106. }
  107. /* init malloc and return a qm_block*/
  108. struct qm_block* qm_malloc_init(char* address, unsigned long size)
  109. {
  110. char* start;
  111. char* end;
  112. struct qm_block* qm;
  113. unsigned long init_overhead;
  114. int h;
  115. /* make address and size multiple of 8*/
  116. start=(char*)ROUNDUP((unsigned long) address);
  117. if (size<(unsigned long)(start-address)) return 0;
  118. size-=(start-address);
  119. if (size <(MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0;
  120. size=ROUNDDOWN(size);
  121. init_overhead=ROUNDUP(sizeof(struct qm_block))+sizeof(struct qm_frag)+
  122. sizeof(struct qm_frag_end);
  123. if (size < init_overhead)
  124. {
  125. /* not enough mem to create our control structures !!!*/
  126. return 0;
  127. }
  128. end=start+size;
  129. qm=(struct qm_block*)start;
  130. memset(qm, 0, sizeof(struct qm_block));
  131. qm->size=size;
  132. qm->real_used=init_overhead;
  133. qm->max_real_used=qm->real_used;
  134. size-=init_overhead;
  135. qm->first_frag=(struct qm_frag*)(start+ROUNDUP(sizeof(struct qm_block)));
  136. qm->last_frag_end=(struct qm_frag_end*)(end-sizeof(struct qm_frag_end));
  137. /* init initial fragment*/
  138. qm->first_frag->size=size;
  139. qm->last_frag_end->size=size;
  140. /* init free_hash* */
  141. for (h=0; h<QM_HASH_SIZE;h++){
  142. qm->free_hash[h].head.u.nxt_free=&(qm->free_hash[h].head);
  143. qm->free_hash[h].tail.prev_free=&(qm->free_hash[h].head);
  144. qm->free_hash[h].head.size=0;
  145. qm->free_hash[h].tail.size=0;
  146. }
  147. /* link initial fragment into the free list*/
  148. qm_insert_free(qm, qm->first_frag);
  149. /*qm->first_frag->u.nxt_free=&(qm->free_lst);
  150. qm->last_frag_end->prev_free=&(qm->free_lst);
  151. */
  152. return qm;
  153. }
  154. static __inline void qm_detach_free(struct qm_block* qm, struct qm_frag* frag)
  155. {
  156. struct qm_frag *prev;
  157. struct qm_frag *next;
  158. prev=FRAG_END(frag)->prev_free;
  159. next=frag->u.nxt_free;
  160. prev->u.nxt_free=next;
  161. FRAG_END(next)->prev_free=prev;
  162. }
  163. static __inline struct qm_frag* qm_find_free(struct qm_block* qm,
  164. unsigned long size,
  165. int* h)
  166. {
  167. int hash;
  168. struct qm_frag* f;
  169. for (hash=GET_HASH(size); hash<QM_HASH_SIZE; hash++){
  170. for (f=qm->free_hash[hash].head.u.nxt_free;
  171. f!=&(qm->free_hash[hash].head); f=f->u.nxt_free){
  172. if (f->size>=size){ *h=hash; return f; }
  173. }
  174. /*try in a bigger bucket*/
  175. }
  176. /* not found */
  177. return 0;
  178. }
  179. /* returns 0 on success, -1 on error;
  180. * new_size < size & rounded-up already!*/
  181. static __inline
  182. int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size)
  183. {
  184. unsigned long rest;
  185. struct qm_frag* n;
  186. struct qm_frag_end* end;
  187. rest=f->size-new_size;
  188. #ifdef MEM_FRAG_AVOIDANCE
  189. if ((rest> (FRAG_OVERHEAD+QM_MALLOC_OPTIMIZE))||
  190. (rest>=(FRAG_OVERHEAD+new_size))){/* the residue fragm. is big enough*/
  191. #else
  192. if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){
  193. #endif
  194. f->size=new_size;
  195. /*split the fragment*/
  196. end=FRAG_END(f);
  197. end->size=new_size;
  198. n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end));
  199. n->size=rest-FRAG_OVERHEAD;
  200. FRAG_END(n)->size=n->size;
  201. FRAG_CLEAR_USED(n); /* never used */
  202. qm->real_used+=FRAG_OVERHEAD;
  203. /* reinsert n in free list*/
  204. qm_insert_free(qm, n);
  205. return 0;
  206. }else{
  207. /* we cannot split this fragment any more */
  208. return -1;
  209. }
  210. }
  211. void* qm_malloc(struct qm_block* qm, unsigned long size)
  212. {
  213. struct qm_frag* f;
  214. int hash;
  215. /*size must be a multiple of 8*/
  216. size=ROUNDUP(size);
  217. if (size>(qm->size-qm->real_used)) return 0;
  218. /*search for a suitable free frag*/
  219. if ((f=qm_find_free(qm, size, &hash))!=0){
  220. /* we found it!*/
  221. /*detach it from the free list*/
  222. qm_detach_free(qm, f);
  223. /*mark it as "busy"*/
  224. f->u.is_free=0;
  225. qm->free_hash[hash].no--;
  226. /* we ignore split return */
  227. split_frag(qm, f, size);
  228. qm->real_used+=f->size;
  229. qm->used+=f->size;
  230. if (qm->max_real_used<qm->real_used)
  231. qm->max_real_used=qm->real_used;
  232. return (char*)f+sizeof(struct qm_frag);
  233. }
  234. return 0;
  235. }
  236. void qm_free(struct qm_block* qm, void* p)
  237. {
  238. struct qm_frag* f;
  239. struct qm_frag* prev;
  240. struct qm_frag* next;
  241. unsigned long size;
  242. if (p==0) {
  243. return;
  244. }
  245. prev=next=0;
  246. f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));
  247. size=f->size;
  248. qm->used-=size;
  249. qm->real_used-=size;
  250. /* join packets if possible*/
  251. next=FRAG_NEXT(f);
  252. if (((char*)next < (char*)qm->last_frag_end) &&( next->u.is_free)){
  253. /* join */
  254. qm_detach_free(qm, next);
  255. size+=next->size+FRAG_OVERHEAD;
  256. qm->real_used-=FRAG_OVERHEAD;
  257. qm->free_hash[GET_HASH(next->size)].no--; /* FIXME slow */
  258. }
  259. if (f > qm->first_frag){
  260. prev=FRAG_PREV(f);
  261. /* (struct qm_frag*)((char*)f - (struct qm_frag_end*)((char*)f-
  262. sizeof(struct qm_frag_end))->size);*/
  263. if (prev->u.is_free){
  264. /*join*/
  265. qm_detach_free(qm, prev);
  266. size+=prev->size+FRAG_OVERHEAD;
  267. qm->real_used-=FRAG_OVERHEAD;
  268. qm->free_hash[GET_HASH(prev->size)].no--; /* FIXME slow */
  269. f=prev;
  270. }
  271. }
  272. f->size=size;
  273. FRAG_END(f)->size=f->size;
  274. qm_insert_free(qm, f);
  275. }
  276. void qm_lock(struct qm_block* qm)
  277. {
  278. DWORD i = 0;
  279. DWORD spin = 256;
  280. while (InterlockedCompareExchange((LONG*)&qm->lock, 1, 0) == 1) {
  281. for (; i < spin; ++i)
  282. if (qm->lock == 0)
  283. break;
  284. if (i >= spin)
  285. Sleep(1); /* yield cpu */
  286. }
  287. }
  288. void qm_unlock(struct qm_block* qm)
  289. {
  290. InterlockedExchange((LONG*)&qm->lock, 0);
  291. }