q_malloc.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. /* $Id: q_malloc.c 5990 2009-08-20 06:58:17Z bogdan_iancu $
  2. *
  3. * Copyright (C) 2001-2003 FhG Fokus
  4. *
  5. * This file is part of opensips, a free SIP server.
  6. *
  7. * opensips is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version
  11. *
  12. * opensips is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. */
  21. /*
  22. * History:
  23. * --------
  24. * ????-??-?? created by andrei
  25. * 2003-04-14 more debugging added in DBG_QM_MALLOC mode (andrei)
  26. * 2003-06-29 added qm_realloc (andrei)
  27. * 2004-07-19 fragments book keeping code and support for 64 bits
  28. * memory blocks (64 bits machine & size>=2^32) (andrei)
  29. * GET_HASH s/</<=/ (avoids waste of 1 hash cell) (andrei)
  30. * 2004-11-10 support for > 4Gb mem., switched to long (andrei)
  31. * 2005-03-02 added qm_info() (andrei)
  32. * 2005-12-12 fixed realloc shrink real_used & used accounting;
  33. * fixed initial size (andrei)
  34. */
  35. #include "precompile.h"
  36. #include "q_malloc.h"
  37. #include "memutil.h"
  38. #include <winpr/wtypes.h>
  39. /*useful macros*/
  40. #define FRAG_END(f) \
  41. ((struct qm_frag_end*)((char*)(f)+sizeof(struct qm_frag)+ \
  42. (f)->size))
  43. #define FRAG_NEXT(f) \
  44. ((struct qm_frag*)((char*)(f)+sizeof(struct qm_frag)+(f)->size+ \
  45. sizeof(struct qm_frag_end)))
  46. #define FRAG_PREV(f) \
  47. ( (struct qm_frag*) ( ((char*)(f)-sizeof(struct qm_frag_end))- \
  48. ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))->size- \
  49. sizeof(struct qm_frag) ) )
  50. #define PREV_FRAG_END(f) \
  51. ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))
  52. #define FRAG_OVERHEAD (sizeof(struct qm_frag)+sizeof(struct qm_frag_end))
  53. #define ROUNDTO_MASK (~((unsigned long)ROUNDTO-1))
  54. #define ROUNDUP(s) (((s)+(ROUNDTO-1))&ROUNDTO_MASK)
  55. #define ROUNDDOWN(s) ((s)&ROUNDTO_MASK)
  56. /*
  57. #define ROUNDUP(s) (((s)%ROUNDTO)?((s)+ROUNDTO)/ROUNDTO*ROUNDTO:(s))
  58. #define ROUNDDOWN(s) (((s)%ROUNDTO)?((s)-ROUNDTO)/ROUNDTO*ROUNDTO:(s))
  59. */
  60. /* finds the hash value for s, s=ROUNDTO multiple*/
  61. #define GET_HASH(s) ( ((unsigned long)(s)<=QM_MALLOC_OPTIMIZE)?\
  62. (unsigned long)(s)/ROUNDTO: \
  63. QM_MALLOC_OPTIMIZE/ROUNDTO+big_hash_idx((s))- \
  64. QM_MALLOC_OPTIMIZE_FACTOR+1 )
  65. #define UN_HASH(h) ( ((unsigned long)(h)<=(QM_MALLOC_OPTIMIZE/ROUNDTO))?\
  66. (unsigned long)(h)*ROUNDTO: \
  67. 1UL<<((h)-QM_MALLOC_OPTIMIZE/ROUNDTO+\
  68. QM_MALLOC_OPTIMIZE_FACTOR-1)\
  69. )
  70. /* mark/test used/unused frags */
  71. #define FRAG_MARK_USED(f)
  72. #define FRAG_CLEAR_USED(f)
  73. #define FRAG_WAS_USED(f) (1)
  74. /* other frag related defines:
  75. * MEM_COALESCE_FRAGS
  76. * MEM_FRAG_AVOIDANCE
  77. */
  78. #define MEM_FRAG_AVOIDANCE
  79. /* computes hash number for big buckets*/
  80. __inline static unsigned long big_hash_idx(unsigned long s)
  81. {
  82. int idx;
  83. /* s is rounded => s = k*2^n (ROUNDTO=2^n)
  84. * index= i such that 2^i > s >= 2^(i-1)
  85. *
  86. * => index = number of the first non null bit in s*/
  87. idx=sizeof(long)*8-1;
  88. for (; !(s&(1UL<<(sizeof(long)*8-1))) ; s<<=1, idx--);
  89. return idx;
  90. }
  91. static __inline void qm_insert_free(struct qm_block* qm, struct qm_frag* frag)
  92. {
  93. struct qm_frag* f;
  94. struct qm_frag* prev;
  95. int hash;
  96. hash=GET_HASH(frag->size);
  97. for(f=qm->free_hash[hash].head.u.nxt_free; f!=&(qm->free_hash[hash].head);
  98. f=f->u.nxt_free){
  99. if (frag->size <= f->size) break;
  100. }
  101. /*insert it here*/
  102. prev=FRAG_END(f)->prev_free;
  103. prev->u.nxt_free=frag;
  104. FRAG_END(frag)->prev_free=prev;
  105. frag->u.nxt_free=f;
  106. FRAG_END(f)->prev_free=frag;
  107. qm->free_hash[hash].no++;
  108. }
  109. struct qm_block* qm_malloc_init(char* address, unsigned long size)
  110. {
  111. char* start;
  112. char* end;
  113. struct qm_block* qm;
  114. unsigned long init_overhead;
  115. int h;
  116. /* make address and size multiple of 8*/
  117. start=(char*)ROUNDUP((unsigned long) address);
  118. if (size<(unsigned long)(start-address)) return 0;
  119. size-=(start-address);
  120. if (size <(MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0;
  121. size=ROUNDDOWN(size);
  122. init_overhead=ROUNDUP(sizeof(struct qm_block))+sizeof(struct qm_frag)+
  123. sizeof(struct qm_frag_end);
  124. if (size < init_overhead)
  125. {
  126. /* not enough mem to create our control structures !!!*/
  127. return 0;
  128. }
  129. end=start+size;
  130. qm=(struct qm_block*)start;
  131. memset(qm, 0, sizeof(struct qm_block));
  132. qm->size=size;
  133. qm->real_used=init_overhead;
  134. qm->max_real_used=qm->real_used;
  135. size-=init_overhead;
  136. qm->first_frag=(struct qm_frag*)(start+ROUNDUP(sizeof(struct qm_block)));
  137. qm->last_frag_end=(struct qm_frag_end*)(end-sizeof(struct qm_frag_end));
  138. /* init initial fragment*/
  139. qm->first_frag->size=size;
  140. qm->last_frag_end->size=size;
  141. /* init free_hash* */
  142. for (h=0; h<QM_HASH_SIZE;h++){
  143. qm->free_hash[h].head.u.nxt_free=&(qm->free_hash[h].head);
  144. qm->free_hash[h].tail.prev_free=&(qm->free_hash[h].head);
  145. qm->free_hash[h].head.size=0;
  146. qm->free_hash[h].tail.size=0;
  147. }
  148. /* link initial fragment into the free list*/
  149. qm_insert_free(qm, qm->first_frag);
  150. /*qm->first_frag->u.nxt_free=&(qm->free_lst);
  151. qm->last_frag_end->prev_free=&(qm->free_lst);
  152. */
  153. return qm;
  154. }
  155. static __inline void qm_detach_free(struct qm_block* qm, struct qm_frag* frag)
  156. {
  157. struct qm_frag *prev;
  158. struct qm_frag *next;
  159. prev=FRAG_END(frag)->prev_free;
  160. next=frag->u.nxt_free;
  161. prev->u.nxt_free=next;
  162. FRAG_END(next)->prev_free=prev;
  163. }
  164. static __inline struct qm_frag* qm_find_free(struct qm_block* qm,
  165. unsigned long size,
  166. int* h)
  167. {
  168. int hash;
  169. struct qm_frag* f;
  170. for (hash=GET_HASH(size); hash<QM_HASH_SIZE; hash++){
  171. for (f=qm->free_hash[hash].head.u.nxt_free;
  172. f!=&(qm->free_hash[hash].head); f=f->u.nxt_free){
  173. if (f->size>=size){ *h=hash; return f; }
  174. }
  175. /*try in a bigger bucket*/
  176. }
  177. /* not found */
  178. return 0;
  179. }
  180. /* returns 0 on success, -1 on error;
  181. * new_size < size & rounded-up already!*/
  182. static __inline
  183. int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size)
  184. {
  185. unsigned long rest;
  186. struct qm_frag* n;
  187. struct qm_frag_end* end;
  188. rest=f->size-new_size;
  189. #ifdef MEM_FRAG_AVOIDANCE
  190. if ((rest> (FRAG_OVERHEAD+QM_MALLOC_OPTIMIZE))||
  191. (rest>=(FRAG_OVERHEAD+new_size))){/* the residue fragm. is big enough*/
  192. #else
  193. if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){
  194. #endif
  195. f->size=new_size;
  196. /*split the fragment*/
  197. end=FRAG_END(f);
  198. end->size=new_size;
  199. n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end));
  200. n->size=rest-FRAG_OVERHEAD;
  201. FRAG_END(n)->size=n->size;
  202. FRAG_CLEAR_USED(n); /* never used */
  203. qm->real_used+=FRAG_OVERHEAD;
  204. /* reinsert n in free list*/
  205. qm_insert_free(qm, n);
  206. return 0;
  207. }else{
  208. /* we cannot split this fragment any more */
  209. return -1;
  210. }
  211. }
  212. void* qm_malloc(struct qm_block* qm, unsigned long size)
  213. {
  214. struct qm_frag* f;
  215. int hash;
  216. /*size must be a multiple of 8*/
  217. size=ROUNDUP(size);
  218. if (size>(qm->size-qm->real_used)) return 0;
  219. /*search for a suitable free frag*/
  220. if ((f=qm_find_free(qm, size, &hash))!=0){
  221. /* we found it!*/
  222. /*detach it from the free list*/
  223. qm_detach_free(qm, f);
  224. /*mark it as "busy"*/
  225. f->u.is_free=0;
  226. qm->free_hash[hash].no--;
  227. /* we ignore split return */
  228. split_frag(qm, f, size);
  229. qm->real_used+=f->size;
  230. qm->used+=f->size;
  231. if (qm->max_real_used<qm->real_used)
  232. qm->max_real_used=qm->real_used;
  233. return (char*)f+sizeof(struct qm_frag);
  234. }
  235. return 0;
  236. }
  237. void qm_free(struct qm_block* qm, void* p)
  238. {
  239. struct qm_frag* f;
  240. struct qm_frag* prev;
  241. struct qm_frag* next;
  242. unsigned long size;
  243. if (p==0) {
  244. return;
  245. }
  246. prev=next=0;
  247. f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));
  248. size=f->size;
  249. qm->used-=size;
  250. qm->real_used-=size;
  251. /* join packets if possible*/
  252. next=FRAG_NEXT(f);
  253. if (((char*)next < (char*)qm->last_frag_end) &&( next->u.is_free)){
  254. /* join */
  255. qm_detach_free(qm, next);
  256. size+=next->size+FRAG_OVERHEAD;
  257. qm->real_used-=FRAG_OVERHEAD;
  258. qm->free_hash[GET_HASH(next->size)].no--; /* FIXME slow */
  259. }
  260. if (f > qm->first_frag){
  261. prev=FRAG_PREV(f);
  262. /* (struct qm_frag*)((char*)f - (struct qm_frag_end*)((char*)f-
  263. sizeof(struct qm_frag_end))->size);*/
  264. if (prev->u.is_free){
  265. /*join*/
  266. qm_detach_free(qm, prev);
  267. size+=prev->size+FRAG_OVERHEAD;
  268. qm->real_used-=FRAG_OVERHEAD;
  269. qm->free_hash[GET_HASH(prev->size)].no--; /* FIXME slow */
  270. f=prev;
  271. }
  272. }
  273. f->size=size;
  274. FRAG_END(f)->size=f->size;
  275. qm_insert_free(qm, f);
  276. }
  277. void qm_lock(struct qm_block* qm)
  278. {
  279. DWORD i = 0;
  280. DWORD spin = 256;
  281. while (InterlockedCompareExchange((LONG*)&qm->lock, 1, 0) == 1) {
  282. for (; i < spin; ++i)
  283. if (qm->lock == 0)
  284. break;
  285. if (i >= spin)
  286. Sleep(1); /* yield cpu */
  287. }
  288. }
  289. void qm_unlock(struct qm_block* qm)
  290. {
  291. InterlockedExchange((LONG*)&qm->lock, 0);
  292. }