XCache is a fast, stable PHP opcode cacher that has been proven and is now running on production servers under high load. https://xcache.lighttpd.net/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

322 lines
7.8 KiB

  1. #define _xc_allocator_t _xc_allocator_bestfit_t
  2. #define _xc_allocator_block_t _xc_allocator_bestfit_block_t
  3. #include "xc_allocator.h"
  4. #undef _xc_allocator_t
  5. #undef _xc_allocator_block_t
  6. #ifdef TEST
  7. # include <limits.h>
  8. # include <stdio.h>
  9. # define XCACHE_DEBUG
  10. typedef int zend_bool;
  11. # define ZEND_ATTRIBUTE_PTR_FORMAT(a, b, c)
  12. # define zend_error(type, error) fprintf(stderr, "%s", error)
  13. #else
  14. # include <php.h>
  15. #endif
  16. #ifdef XCACHE_DEBUG
  17. # define ALLOC_DEBUG_BLOCK_CHECK
  18. #endif
  19. #include <assert.h>
  20. #include <stdlib.h>
  21. #include <string.h>
  22. #include "xc_shm.h"
  23. #include "util/xc_align.h"
  24. #include "util/xc_trace.h"
  25. #if 0
  26. #undef ALLOC_DEBUG_BLOCK_CHECK
  27. #endif
  28. #define CHAR_PTR(p) ((char *) (p))
  29. #define PADD(p, a) (CHAR_PTR(p) + a)
  30. #define PSUB(p1, p2) (CHAR_PTR(p1) - CHAR_PTR(p2))
  31. /* {{{ allocator */
  32. typedef struct _xc_allocator_bestfit_block_t xc_allocator_bestfit_block_t;
  33. struct _xc_allocator_bestfit_block_t {
  34. #ifdef ALLOC_DEBUG_BLOCK_CHECK
  35. unsigned int magic;
  36. #endif
  37. xc_memsize_t size; /* reserved even after alloc */
  38. xc_allocator_bestfit_block_t *next; /* not used after alloc */
  39. };
  40. typedef struct _xc_allocator_bestfit_t {
  41. const xc_allocator_vtable_t *vtable;
  42. xc_shm_t *shm;
  43. xc_memsize_t size;
  44. xc_memsize_t avail; /* total free */
  45. xc_allocator_bestfit_block_t headblock[1]; /* just as a pointer to first block*/
  46. } xc_allocator_bestfit_t;
  47. #ifndef XtOffsetOf
  48. # include <linux/stddef.h>
  49. # define XtOffsetOf(s_type, field) offsetof(s_type, field)
  50. #endif
  51. #define SizeOf(type, field) sizeof( ((type *) 0)->field )
  52. #define BLOCK_HEADER_SIZE() (ALIGN( XtOffsetOf(xc_allocator_bestfit_block_t, size) + SizeOf(xc_allocator_bestfit_block_t, size) ))
  53. #define BLOCK_MAGIC ((unsigned int) 0x87655678)
  54. /* }}} */
  55. static inline void xc_block_setup(xc_allocator_bestfit_block_t *b, xc_memsize_t size, xc_allocator_bestfit_block_t *next) /* {{{ */
  56. {
  57. #ifdef ALLOC_DEBUG_BLOCK_CHECK
  58. b->magic = BLOCK_MAGIC;
  59. #endif
  60. b->size = size;
  61. b->next = next;
  62. }
  63. /* }}} */
  64. #ifdef ALLOC_DEBUG_BLOCK_CHECK
  65. static void xc_block_check(xc_allocator_bestfit_block_t *b) /* {{{ */
  66. {
  67. if (b->magic != BLOCK_MAGIC) {
  68. fprintf(stderr, "0x%X != 0x%X magic wrong \n", b->magic, BLOCK_MAGIC);
  69. }
  70. }
  71. /* }}} */
  72. #else
  73. # define xc_block_check(b) do { } while(0)
  74. #endif
  75. static XC_ALLOCATOR_MALLOC(xc_allocator_bestfit_malloc) /* {{{ */
  76. {
  77. xc_allocator_bestfit_block_t *prev, *cur;
  78. xc_allocator_bestfit_block_t *newb, *b;
  79. xc_memsize_t realsize;
  80. xc_memsize_t minsize;
  81. void *p;
  82. /* [xc_allocator_bestfit_block_t:size|size] */
  83. realsize = BLOCK_HEADER_SIZE() + size;
  84. /* realsize is ALIGNed so next block start at ALIGNed address */
  85. realsize = ALIGN(realsize);
  86. TRACE("avail: %lu (%luKB). Allocate size: %lu realsize: %lu (%luKB)"
  87. , allocator->avail, allocator->avail / 1024
  88. , size
  89. , realsize, realsize / 1024
  90. );
  91. do {
  92. p = NULL;
  93. if (allocator->avail < realsize) {
  94. TRACE("%s", " oom");
  95. break;
  96. }
  97. b = NULL;
  98. minsize = ULONG_MAX;
  99. /* prev|cur */
  100. for (prev = allocator->headblock; prev->next; prev = cur) {
  101. /* while (prev->next != 0) { */
  102. cur = prev->next;
  103. xc_block_check(cur);
  104. if (cur->size == realsize) {
  105. /* found a perfect fit, stop searching */
  106. b = prev;
  107. break;
  108. }
  109. /* make sure we can split on the block */
  110. else if (cur->size > (sizeof(xc_allocator_bestfit_block_t) + realsize) &&
  111. cur->size < minsize) {
  112. /* cur is acceptable and memller */
  113. b = prev;
  114. minsize = cur->size;
  115. }
  116. prev = cur;
  117. }
  118. if (b == NULL) {
  119. TRACE("%s", " no fit chunk");
  120. break;
  121. }
  122. prev = b;
  123. cur = prev->next;
  124. p = PADD(cur, BLOCK_HEADER_SIZE());
  125. /* update the block header */
  126. allocator->avail -= realsize;
  127. /* perfect fit, just unlink */
  128. if (cur->size == realsize) {
  129. prev->next = cur->next;
  130. TRACE(" perfect fit. Got: %p", p);
  131. break;
  132. }
  133. /* make new free block after alloced space */
  134. /* save, as it might be overwrited by newb (cur->size is ok) */
  135. b = cur->next;
  136. /* prev|cur |next=b */
  137. newb = (xc_allocator_bestfit_block_t *)PADD(cur, realsize);
  138. xc_block_setup(newb, cur->size - realsize, b);
  139. cur->size = realsize;
  140. /* prev|cur|newb|next
  141. * `--^
  142. */
  143. TRACE(" -> avail: %lu (%luKB). new next: %p offset: %lu %luKB. Got: %p"
  144. , allocator->avail, allocator->avail / 1024
  145. , newb
  146. , PSUB(newb, allocator), PSUB(newb, allocator) / 1024
  147. , p
  148. );
  149. prev->next = newb;
  150. /* prev|cur|newb|next
  151. * `-----^
  152. */
  153. } while (0);
  154. return p;
  155. }
  156. /* }}} */
  157. static XC_ALLOCATOR_FREE(xc_allocator_bestfit_free) /* {{{ return block size freed */
  158. {
  159. xc_allocator_bestfit_block_t *cur, *b;
  160. int size;
  161. cur = (xc_allocator_bestfit_block_t *) (CHAR_PTR(p) - BLOCK_HEADER_SIZE());
  162. TRACE("freeing: %p, size=%lu", p, cur->size);
  163. xc_block_check(cur);
  164. assert((char*)allocator < (char*)cur && (char*)cur < (char*)allocator + allocator->size);
  165. /* find free block right before the p */
  166. b = allocator->headblock;
  167. while (b->next != 0 && b->next < cur) {
  168. b = b->next;
  169. }
  170. /* restore block */
  171. cur->next = b->next;
  172. b->next = cur;
  173. size = cur->size;
  174. TRACE(" avail %lu (%luKB)", allocator->avail, allocator->avail / 1024);
  175. allocator->avail += size;
  176. /* combine prev|cur */
  177. if (PADD(b, b->size) == (char *)cur) {
  178. b->size += cur->size;
  179. b->next = cur->next;
  180. cur = b;
  181. TRACE("%s", " combine prev");
  182. }
  183. /* combine cur|next */
  184. b = cur->next;
  185. if (PADD(cur, cur->size) == (char *)b) {
  186. cur->size += b->size;
  187. cur->next = b->next;
  188. TRACE("%s", " combine next");
  189. }
  190. TRACE(" -> avail %lu (%luKB)", allocator->avail, allocator->avail / 1024);
  191. return size;
  192. }
  193. /* }}} */
  194. static XC_ALLOCATOR_CALLOC(xc_allocator_bestfit_calloc) /* {{{ */
  195. {
  196. xc_memsize_t realsize = memb * size;
  197. void *p = xc_allocator_bestfit_malloc(allocator, realsize);
  198. if (p) {
  199. memset(p, 0, realsize);
  200. }
  201. return p;
  202. }
  203. /* }}} */
  204. static XC_ALLOCATOR_REALLOC(xc_allocator_bestfit_realloc) /* {{{ */
  205. {
  206. void *newp = xc_allocator_bestfit_malloc(allocator, size);
  207. if (p && newp) {
  208. memcpy(newp, p, size);
  209. xc_allocator_bestfit_free(allocator, p);
  210. }
  211. return newp;
  212. }
  213. /* }}} */
  214. static XC_ALLOCATOR_AVAIL(xc_allocator_bestfit_avail) /* {{{ */
  215. {
  216. return allocator->avail;
  217. }
  218. /* }}} */
  219. static XC_ALLOCATOR_SIZE(xc_allocator_bestfit_size) /* {{{ */
  220. {
  221. return allocator->size;
  222. }
  223. /* }}} */
  224. static XC_ALLOCATOR_FREEBLOCK_FIRST(xc_allocator_bestfit_freeblock_first) /* {{{ */
  225. {
  226. return allocator->headblock->next;
  227. }
  228. /* }}} */
  229. static XC_ALLOCATOR_FREEBLOCK_NEXT(xc_allocator_bestfit_freeblock_next) /* {{{ */
  230. {
  231. return block->next;
  232. }
  233. /* }}} */
  234. static XC_ALLOCATOR_BLOCK_SIZE(xc_allocator_bestfit_block_size) /* {{{ */
  235. {
  236. return block->size;
  237. }
  238. /* }}} */
  239. static XC_ALLOCATOR_BLOCK_OFFSET(xc_allocator_bestfit_block_offset) /* {{{ */
  240. {
  241. return ((char *) block) - ((char *) allocator);
  242. }
  243. /* }}} */
  244. static XC_ALLOCATOR_INIT(xc_allocator_bestfit_init) /* {{{ */
  245. {
  246. xc_allocator_bestfit_block_t *b;
  247. #define MINSIZE (ALIGN(sizeof(xc_allocator_bestfit_t)) + sizeof(xc_allocator_bestfit_block_t))
  248. /* requires at least the header and 1 tail block */
  249. if (size < MINSIZE) {
  250. fprintf(stderr, "xc_allocator_bestfit_init requires %lu bytes at least\n", (unsigned long) MINSIZE);
  251. return NULL;
  252. }
  253. TRACE("size=%lu", size);
  254. allocator->shm = shm;
  255. allocator->size = size;
  256. allocator->avail = size - MINSIZE;
  257. /* pointer to first block, right after ALIGNed header */
  258. b = allocator->headblock;
  259. xc_block_setup(b, 0, (xc_allocator_bestfit_block_t *) PADD(allocator, ALIGN(sizeof(xc_allocator_bestfit_t))));
  260. /* first block*/
  261. b = b->next;
  262. xc_block_setup(b, allocator->avail, 0);
  263. #undef MINSIZE
  264. return allocator;
  265. }
  266. /* }}} */
  267. static XC_ALLOCATOR_DESTROY(xc_allocator_bestfit_destroy) /* {{{ */
  268. {
  269. }
  270. /* }}} */
  271. static xc_allocator_vtable_t xc_allocator_bestfit = XC_ALLOCATOR_VTABLE(allocator_bestfit);
  272. void xc_allocator_bestfit_register() /* {{{ */
  273. {
  274. if (xc_allocator_register("bestfit", &xc_allocator_bestfit) == 0) {
  275. zend_error(E_ERROR, "XCache: failed to register allocator 'bestfit'");
  276. }
  277. }
  278. /* }}} */