mem.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004
  1. /**
  2. * @file
  3. * Dynamic memory manager
  4. *
  5. * This is a lightweight replacement for the standard C library malloc().
  6. *
  7. * If you want to use the standard C library malloc() instead, define
  8. * MEM_LIBC_MALLOC to 1 in your lwipopts.h
  9. *
  10. * To let mem_malloc() use pools (prevents fragmentation and is much faster than
  11. * a heap but might waste some memory), define MEM_USE_POOLS to 1, define
  12. * MEMP_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
  13. * of pools like this (more pools can be added between _START and _END):
  14. *
  15. * Define three pools with sizes 256, 512, and 1512 bytes
  16. * LWIP_MALLOC_MEMPOOL_START
  17. * LWIP_MALLOC_MEMPOOL(20, 256)
  18. * LWIP_MALLOC_MEMPOOL(10, 512)
  19. * LWIP_MALLOC_MEMPOOL(5, 1512)
  20. * LWIP_MALLOC_MEMPOOL_END
  21. */
  22. /*
  23. * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
  24. * All rights reserved.
  25. *
  26. * Redistribution and use in source and binary forms, with or without modification,
  27. * are permitted provided that the following conditions are met:
  28. *
  29. * 1. Redistributions of source code must retain the above copyright notice,
  30. * this list of conditions and the following disclaimer.
  31. * 2. Redistributions in binary form must reproduce the above copyright notice,
  32. * this list of conditions and the following disclaimer in the documentation
  33. * and/or other materials provided with the distribution.
  34. * 3. The name of the author may not be used to endorse or promote products
  35. * derived from this software without specific prior written permission.
  36. *
  37. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
  38. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  39. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
  40. * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  41. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
  42. * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  43. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  44. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  45. * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
  46. * OF SUCH DAMAGE.
  47. *
  48. * This file is part of the lwIP TCP/IP stack.
  49. *
  50. * Author: Adam Dunkels <adam@sics.se>
  51. * Simon Goldschmidt
  52. *
  53. */
  54. #include "lwip/opt.h"
  55. #include "lwip/mem.h"
  56. #include "lwip/def.h"
  57. #include "lwip/sys.h"
  58. #include "lwip/stats.h"
  59. #include "lwip/err.h"
  60. #include <stdio.h> /* snprintf */
  61. #include <string.h>
  62. #if MEM_LIBC_MALLOC
  63. #include <stdlib.h> /* for malloc()/free() */
  64. #endif
  65. /* This is overridable for tests only... */
  66. #ifndef LWIP_MEM_ILLEGAL_FREE
  67. #define LWIP_MEM_ILLEGAL_FREE(msg) LWIP_ASSERT(msg, 0)
  68. #endif
  69. #define MEM_STATS_INC_LOCKED(x) SYS_ARCH_LOCKED(MEM_STATS_INC(x))
  70. #define MEM_STATS_INC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_INC_USED(x, y))
  71. #define MEM_STATS_DEC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_DEC_USED(x, y))
  72. #if MEM_OVERFLOW_CHECK
  73. #define MEM_SANITY_OFFSET MEM_SANITY_REGION_BEFORE_ALIGNED
  74. #define MEM_SANITY_OVERHEAD (MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED)
  75. #else
  76. #define MEM_SANITY_OFFSET 0
  77. #define MEM_SANITY_OVERHEAD 0
  78. #endif
  79. #if MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK
  80. /**
  81. * Check if a mep element was victim of an overflow or underflow
  82. * (e.g. the restricted area after/before it has been altered)
  83. *
  84. * @param p the mem element to check
  85. * @param size allocated size of the element
  86. * @param descr1 description of the element source shown on error
  87. * @param descr2 description of the element source shown on error
  88. */
  89. void
  90. mem_overflow_check_raw(void *p, size_t size, const char *descr1, const char *descr2)
  91. {
  92. #if MEM_SANITY_REGION_AFTER_ALIGNED || MEM_SANITY_REGION_BEFORE_ALIGNED
  93. u16_t k;
  94. u8_t *m;
  95. #if MEM_SANITY_REGION_AFTER_ALIGNED > 0
  96. m = (u8_t *)p + size;
  97. for (k = 0; k < MEM_SANITY_REGION_AFTER_ALIGNED; k++) {
  98. if (m[k] != 0xcd) {
  99. char errstr[128];
  100. snprintf(errstr, sizeof(errstr), "detected mem overflow in %s%s", descr1, descr2);
  101. LWIP_ASSERT(errstr, 0);
  102. }
  103. }
  104. #endif /* MEM_SANITY_REGION_AFTER_ALIGNED > 0 */
  105. #if MEM_SANITY_REGION_BEFORE_ALIGNED > 0
  106. m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED;
  107. for (k = 0; k < MEM_SANITY_REGION_BEFORE_ALIGNED; k++) {
  108. if (m[k] != 0xcd) {
  109. char errstr[128];
  110. snprintf(errstr, sizeof(errstr), "detected mem underflow in %s%s", descr1, descr2);
  111. LWIP_ASSERT(errstr, 0);
  112. }
  113. }
  114. #endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 */
  115. #else
  116. LWIP_UNUSED_ARG(p);
  117. LWIP_UNUSED_ARG(desc);
  118. LWIP_UNUSED_ARG(descr);
  119. #endif
  120. }
  121. /**
  122. * Initialize the restricted area of a mem element.
  123. */
  124. void
  125. mem_overflow_init_raw(void *p, size_t size)
  126. {
  127. #if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0
  128. u8_t *m;
  129. #if MEM_SANITY_REGION_BEFORE_ALIGNED > 0
  130. m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED;
  131. memset(m, 0xcd, MEM_SANITY_REGION_BEFORE_ALIGNED);
  132. #endif
  133. #if MEM_SANITY_REGION_AFTER_ALIGNED > 0
  134. m = (u8_t *)p + size;
  135. memset(m, 0xcd, MEM_SANITY_REGION_AFTER_ALIGNED);
  136. #endif
  137. #else /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */
  138. LWIP_UNUSED_ARG(p);
  139. LWIP_UNUSED_ARG(desc);
  140. #endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */
  141. }
  142. #endif /* MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK */
  143. #if MEM_CUSTOM_ALLOCATOR || MEM_USE_POOLS
  144. /** mem_init is not used when using pools instead of a heap or using
  145. * C library malloc().
  146. */
  147. void
  148. mem_init(void)
  149. {
  150. }
  151. /** mem_trim is not used when using pools instead of a heap or using
  152. * C library malloc(): we can't free part of a pool element and the stack
  153. * support mem_trim() to return a different pointer
  154. */
  155. void *
  156. mem_trim(void *mem, mem_size_t size)
  157. {
  158. LWIP_UNUSED_ARG(size);
  159. return mem;
  160. }
  161. #endif /* MEM_CUSTOM_ALLOCATOR || MEM_USE_POOLS */
  162. #if MEM_CUSTOM_ALLOCATOR
  163. #if LWIP_STATS && MEM_STATS
  164. #define MEM_LIBC_STATSHELPER_SIZE LWIP_MEM_ALIGN_SIZE(sizeof(mem_size_t))
  165. #else
  166. #define MEM_LIBC_STATSHELPER_SIZE 0
  167. #endif
  168. /**
  169. * Allocate a block of memory with a minimum of 'size' bytes.
  170. *
  171. * @param size is the minimum size of the requested block in bytes.
  172. * @return pointer to allocated memory or NULL if no free memory was found.
  173. *
  174. * Note that the returned value must always be aligned (as defined by MEM_ALIGNMENT).
  175. */
  176. void *
  177. mem_malloc(mem_size_t size)
  178. {
  179. void *ret = MEM_CUSTOM_MALLOC(size + MEM_LIBC_STATSHELPER_SIZE);
  180. if (ret == NULL) {
  181. MEM_STATS_INC_LOCKED(err);
  182. } else {
  183. LWIP_ASSERT("malloc() must return aligned memory", LWIP_MEM_ALIGN(ret) == ret);
  184. #if LWIP_STATS && MEM_STATS
  185. *(mem_size_t *)ret = size;
  186. ret = (u8_t *)ret + MEM_LIBC_STATSHELPER_SIZE;
  187. MEM_STATS_INC_USED_LOCKED(used, size);
  188. #endif
  189. }
  190. return ret;
  191. }
  192. /** Put memory back on the heap
  193. *
  194. * @param rmem is the pointer as returned by a previous call to mem_malloc()
  195. */
  196. void
  197. mem_free(void *rmem)
  198. {
  199. LWIP_ASSERT("rmem != NULL", (rmem != NULL));
  200. LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
  201. #if LWIP_STATS && MEM_STATS
  202. rmem = (u8_t *)rmem - MEM_LIBC_STATSHELPER_SIZE;
  203. MEM_STATS_DEC_USED_LOCKED(used, *(mem_size_t *)rmem);
  204. #endif
  205. MEM_CUSTOM_FREE(rmem);
  206. }
  207. #elif MEM_USE_POOLS
  208. /* lwIP heap implemented with different sized pools */
  209. /**
  210. * Allocate memory: determine the smallest pool that is big enough
  211. * to contain an element of 'size' and get an element from that pool.
  212. *
  213. * @param size the size in bytes of the memory needed
  214. * @return a pointer to the allocated memory or NULL if the pool is empty
  215. */
  216. void *
  217. mem_malloc(mem_size_t size)
  218. {
  219. void *ret;
  220. struct memp_malloc_helper *element = NULL;
  221. memp_t poolnr;
  222. mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
  223. for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) {
  224. /* is this pool big enough to hold an element of the required size
  225. plus a struct memp_malloc_helper that saves the pool this element came from? */
  226. if (required_size <= memp_pools[poolnr]->size) {
  227. element = (struct memp_malloc_helper *)memp_malloc(poolnr);
  228. if (element == NULL) {
  229. /* No need to DEBUGF or ASSERT: This error is already taken care of in memp.c */
  230. #if MEM_USE_POOLS_TRY_BIGGER_POOL
  231. /** Try a bigger pool if this one is empty! */
  232. if (poolnr < MEMP_POOL_LAST) {
  233. continue;
  234. }
  235. #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
  236. MEM_STATS_INC_LOCKED(err);
  237. return NULL;
  238. }
  239. break;
  240. }
  241. }
  242. if (poolnr > MEMP_POOL_LAST) {
  243. LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
  244. MEM_STATS_INC_LOCKED(err);
  245. return NULL;
  246. }
  247. /* save the pool number this element came from */
  248. element->poolnr = poolnr;
  249. /* and return a pointer to the memory directly after the struct memp_malloc_helper */
  250. ret = (u8_t *)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
  251. #if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS)
  252. /* truncating to u16_t is safe because struct memp_desc::size is u16_t */
  253. element->size = (u16_t)size;
  254. MEM_STATS_INC_USED_LOCKED(used, element->size);
  255. #endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */
  256. #if MEMP_OVERFLOW_CHECK
  257. /* initialize unused memory (diff between requested size and selected pool's size) */
  258. memset((u8_t *)ret + size, 0xcd, memp_pools[poolnr]->size - size);
  259. #endif /* MEMP_OVERFLOW_CHECK */
  260. return ret;
  261. }
  262. /**
  263. * Free memory previously allocated by mem_malloc. Loads the pool number
  264. * and calls memp_free with that pool number to put the element back into
  265. * its pool
  266. *
  267. * @param rmem the memory element to free
  268. */
  269. void
  270. mem_free(void *rmem)
  271. {
  272. struct memp_malloc_helper *hmem;
  273. LWIP_ASSERT("rmem != NULL", (rmem != NULL));
  274. LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
  275. /* get the original struct memp_malloc_helper */
  276. /* cast through void* to get rid of alignment warnings */
  277. hmem = (struct memp_malloc_helper *)(void *)((u8_t *)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)));
  278. LWIP_ASSERT("hmem != NULL", (hmem != NULL));
  279. LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
  280. LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
  281. MEM_STATS_DEC_USED_LOCKED(used, hmem->size);
  282. #if MEMP_OVERFLOW_CHECK
  283. {
  284. u16_t i;
  285. LWIP_ASSERT("MEM_USE_POOLS: invalid chunk size",
  286. hmem->size <= memp_pools[hmem->poolnr]->size);
  287. /* check that unused memory remained untouched (diff between requested size and selected pool's size) */
  288. for (i = hmem->size; i < memp_pools[hmem->poolnr]->size; i++) {
  289. u8_t data = *((u8_t *)rmem + i);
  290. LWIP_ASSERT("MEM_USE_POOLS: mem overflow detected", data == 0xcd);
  291. }
  292. }
  293. #endif /* MEMP_OVERFLOW_CHECK */
  294. /* and put it in the pool we saved earlier */
  295. memp_free(hmem->poolnr, hmem);
  296. }
  297. #else /* MEM_USE_POOLS */
  298. /* lwIP replacement for your libc malloc() */
  299. /**
  300. * The heap is made up as a list of structs of this type.
  301. * This does not have to be aligned since for getting its size,
  302. * we only use the macro SIZEOF_STRUCT_MEM, which automatically aligns.
  303. */
  304. struct mem {
  305. /** index (-> ram[next]) of the next struct */
  306. mem_size_t next;
  307. /** index (-> ram[prev]) of the previous struct */
  308. mem_size_t prev;
  309. /** 1: this area is used; 0: this area is unused */
  310. u8_t used;
  311. #if MEM_OVERFLOW_CHECK
  312. /** this keeps track of the user allocation size for guard checks */
  313. mem_size_t user_size;
  314. #endif
  315. };
  316. /** All allocated blocks will be MIN_SIZE bytes big, at least!
  317. * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
  318. * larger values could prevent too small blocks to fragment the RAM too much. */
  319. #ifndef MIN_SIZE
  320. #define MIN_SIZE 12
  321. #endif /* MIN_SIZE */
  322. /* some alignment macros: we define them here for better source code layout */
  323. #define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
  324. #define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
  325. #define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
  326. /** If you want to relocate the heap to external memory, simply define
  327. * LWIP_RAM_HEAP_POINTER as a void-pointer to that location.
  328. * If so, make sure the memory at that location is big enough (see below on
  329. * how that space is calculated). */
  330. #ifndef LWIP_RAM_HEAP_POINTER
  331. /** the heap. we need one struct mem at the end and some room for alignment */
  332. LWIP_DECLARE_MEMORY_ALIGNED(ram_heap, MEM_SIZE_ALIGNED + (2U * SIZEOF_STRUCT_MEM));
  333. #define LWIP_RAM_HEAP_POINTER ram_heap
  334. #endif /* LWIP_RAM_HEAP_POINTER */
  335. /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
  336. static u8_t *ram;
  337. /** the last entry, always unused! */
  338. static struct mem *ram_end;
  339. /** concurrent access protection */
  340. #if !NO_SYS
  341. static sys_mutex_t mem_mutex;
  342. #endif
  343. #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  344. static volatile u8_t mem_free_count;
  345. /* Allow mem_free from other (e.g. interrupt) context */
  346. #define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free)
  347. #define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free)
  348. #define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free)
  349. #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
  350. #define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc)
  351. #define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc)
  352. #define LWIP_MEM_LFREE_VOLATILE volatile
  353. #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  354. /* Protect the heap only by using a mutex */
  355. #define LWIP_MEM_FREE_DECL_PROTECT()
  356. #define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex)
  357. #define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex)
  358. /* mem_malloc is protected using mutex AND LWIP_MEM_ALLOC_PROTECT */
  359. #define LWIP_MEM_ALLOC_DECL_PROTECT()
  360. #define LWIP_MEM_ALLOC_PROTECT()
  361. #define LWIP_MEM_ALLOC_UNPROTECT()
  362. #define LWIP_MEM_LFREE_VOLATILE
  363. #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  364. /** pointer to the lowest free block, this is used for faster search */
  365. static struct mem * LWIP_MEM_LFREE_VOLATILE lfree;
  366. #if MEM_SANITY_CHECK
  367. static void mem_sanity(void);
  368. #define MEM_SANITY() mem_sanity()
  369. #else
  370. #define MEM_SANITY()
  371. #endif
  372. #if MEM_OVERFLOW_CHECK
  373. static void
  374. mem_overflow_init_element(struct mem *mem, mem_size_t user_size)
  375. {
  376. void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET;
  377. mem->user_size = user_size;
  378. mem_overflow_init_raw(p, user_size);
  379. }
  380. static void
  381. mem_overflow_check_element(struct mem *mem)
  382. {
  383. void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET;
  384. mem_overflow_check_raw(p, mem->user_size, "heap", "");
  385. }
  386. #else /* MEM_OVERFLOW_CHECK */
  387. #define mem_overflow_init_element(mem, size)
  388. #define mem_overflow_check_element(mem)
  389. #endif /* MEM_OVERFLOW_CHECK */
  390. static struct mem *
  391. ptr_to_mem(mem_size_t ptr)
  392. {
  393. return (struct mem *)(void *)&ram[ptr];
  394. }
  395. static mem_size_t
  396. mem_to_ptr(void *mem)
  397. {
  398. return (mem_size_t)((u8_t *)mem - ram);
  399. }
  400. /**
  401. * "Plug holes" by combining adjacent empty struct mems.
  402. * After this function is through, there should not exist
  403. * one empty struct mem pointing to another empty struct mem.
  404. *
  405. * @param mem this points to a struct mem which just has been freed
  406. * @internal this function is only called by mem_free() and mem_trim()
  407. *
  408. * This assumes access to the heap is protected by the calling function
  409. * already.
  410. */
  411. static void
  412. plug_holes(struct mem *mem)
  413. {
  414. struct mem *nmem;
  415. struct mem *pmem;
  416. LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
  417. LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
  418. LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
  419. /* plug hole forward */
  420. LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
  421. nmem = ptr_to_mem(mem->next);
  422. if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
  423. /* if mem->next is unused and not end of ram, combine mem and mem->next */
  424. if (lfree == nmem) {
  425. lfree = mem;
  426. }
  427. mem->next = nmem->next;
  428. if (nmem->next != MEM_SIZE_ALIGNED) {
  429. ptr_to_mem(nmem->next)->prev = mem_to_ptr(mem);
  430. }
  431. }
  432. /* plug hole backward */
  433. pmem = ptr_to_mem(mem->prev);
  434. if (pmem != mem && pmem->used == 0) {
  435. /* if mem->prev is unused, combine mem and mem->prev */
  436. if (lfree == mem) {
  437. lfree = pmem;
  438. }
  439. pmem->next = mem->next;
  440. if (mem->next != MEM_SIZE_ALIGNED) {
  441. ptr_to_mem(mem->next)->prev = mem_to_ptr(pmem);
  442. }
  443. }
  444. }
  445. /**
  446. * Zero the heap and initialize start, end and lowest-free
  447. */
  448. void
  449. mem_init(void)
  450. {
  451. struct mem *mem;
  452. LWIP_ASSERT("Sanity check alignment",
  453. (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT - 1)) == 0);
  454. /* align the heap */
  455. ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER);
  456. /* initialize the start of the heap */
  457. mem = (struct mem *)(void *)ram;
  458. mem->next = MEM_SIZE_ALIGNED;
  459. mem->prev = 0;
  460. mem->used = 0;
  461. /* initialize the end of the heap */
  462. ram_end = ptr_to_mem(MEM_SIZE_ALIGNED);
  463. ram_end->used = 1;
  464. ram_end->next = MEM_SIZE_ALIGNED;
  465. ram_end->prev = MEM_SIZE_ALIGNED;
  466. MEM_SANITY();
  467. /* initialize the lowest-free pointer to the start of the heap */
  468. lfree = (struct mem *)(void *)ram;
  469. MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED);
  470. if (sys_mutex_new(&mem_mutex) != ERR_OK) {
  471. LWIP_ASSERT("failed to create mem_mutex", 0);
  472. }
  473. }
  474. /* Check if a struct mem is correctly linked.
  475. * If not, double-free is a possible reason.
  476. */
  477. static int
  478. mem_link_valid(struct mem *mem)
  479. {
  480. struct mem *nmem, *pmem;
  481. mem_size_t rmem_idx;
  482. rmem_idx = mem_to_ptr(mem);
  483. nmem = ptr_to_mem(mem->next);
  484. pmem = ptr_to_mem(mem->prev);
  485. if ((mem->next > MEM_SIZE_ALIGNED) || (mem->prev > MEM_SIZE_ALIGNED) ||
  486. ((mem->prev != rmem_idx) && (pmem->next != rmem_idx)) ||
  487. ((nmem != ram_end) && (nmem->prev != rmem_idx))) {
  488. return 0;
  489. }
  490. return 1;
  491. }
  492. #if MEM_SANITY_CHECK
  493. static void
  494. mem_sanity(void)
  495. {
  496. struct mem *mem;
  497. u8_t last_used;
  498. /* begin with first element here */
  499. mem = (struct mem *)ram;
  500. LWIP_ASSERT("heap element used valid", (mem->used == 0) || (mem->used == 1));
  501. last_used = mem->used;
  502. LWIP_ASSERT("heap element prev ptr valid", mem->prev == 0);
  503. LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED);
  504. LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next)));
  505. /* check all elements before the end of the heap */
  506. for (mem = ptr_to_mem(mem->next);
  507. ((u8_t *)mem > ram) && (mem < ram_end);
  508. mem = ptr_to_mem(mem->next)) {
  509. LWIP_ASSERT("heap element aligned", LWIP_MEM_ALIGN(mem) == mem);
  510. LWIP_ASSERT("heap element prev ptr valid", mem->prev <= MEM_SIZE_ALIGNED);
  511. LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED);
  512. LWIP_ASSERT("heap element prev ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->prev) == ptr_to_mem(mem->prev)));
  513. LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next)));
  514. if (last_used == 0) {
  515. /* 2 unused elements in a row? */
  516. LWIP_ASSERT("heap element unused?", mem->used == 1);
  517. } else {
  518. LWIP_ASSERT("heap element unused member", (mem->used == 0) || (mem->used == 1));
  519. }
  520. LWIP_ASSERT("heap element link valid", mem_link_valid(mem));
  521. /* used/unused altering */
  522. last_used = mem->used;
  523. }
  524. LWIP_ASSERT("heap end ptr sanity", mem == ptr_to_mem(MEM_SIZE_ALIGNED));
  525. LWIP_ASSERT("heap element used valid", mem->used == 1);
  526. LWIP_ASSERT("heap element prev ptr valid", mem->prev == MEM_SIZE_ALIGNED);
  527. LWIP_ASSERT("heap element next ptr valid", mem->next == MEM_SIZE_ALIGNED);
  528. }
  529. #endif /* MEM_SANITY_CHECK */
  530. /**
  531. * Put a struct mem back on the heap
  532. *
  533. * @param rmem is the data portion of a struct mem as returned by a previous
  534. * call to mem_malloc()
  535. */
  536. void
  537. mem_free(void *rmem)
  538. {
  539. struct mem *mem;
  540. LWIP_MEM_FREE_DECL_PROTECT();
  541. if (rmem == NULL) {
  542. LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n"));
  543. return;
  544. }
  545. if ((((mem_ptr_t)rmem) & (MEM_ALIGNMENT - 1)) != 0) {
  546. LWIP_MEM_ILLEGAL_FREE("mem_free: sanity check alignment");
  547. LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: sanity check alignment\n"));
  548. /* protect mem stats from concurrent access */
  549. MEM_STATS_INC_LOCKED(illegal);
  550. return;
  551. }
  552. /* Get the corresponding struct mem: */
  553. /* cast through void* to get rid of alignment warnings */
  554. mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET));
  555. if ((u8_t *)mem < ram || (u8_t *)rmem + MIN_SIZE_ALIGNED > (u8_t *)ram_end) {
  556. LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory");
  557. LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n"));
  558. /* protect mem stats from concurrent access */
  559. MEM_STATS_INC_LOCKED(illegal);
  560. return;
  561. }
  562. #if MEM_OVERFLOW_CHECK
  563. mem_overflow_check_element(mem);
  564. #endif
  565. /* protect the heap from concurrent access */
  566. LWIP_MEM_FREE_PROTECT();
  567. /* mem has to be in a used state */
  568. if (!mem->used) {
  569. LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: double free");
  570. LWIP_MEM_FREE_UNPROTECT();
  571. LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: double free?\n"));
  572. /* protect mem stats from concurrent access */
  573. MEM_STATS_INC_LOCKED(illegal);
  574. return;
  575. }
  576. if (!mem_link_valid(mem)) {
  577. LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: non-linked: double free");
  578. LWIP_MEM_FREE_UNPROTECT();
  579. LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: non-linked: double free?\n"));
  580. /* protect mem stats from concurrent access */
  581. MEM_STATS_INC_LOCKED(illegal);
  582. return;
  583. }
  584. /* mem is now unused. */
  585. mem->used = 0;
  586. if (mem < lfree) {
  587. /* the newly freed struct is now the lowest */
  588. lfree = mem;
  589. }
  590. MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram)));
  591. /* finally, see if prev or next are free also */
  592. plug_holes(mem);
  593. MEM_SANITY();
  594. #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  595. mem_free_count = 1;
  596. #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  597. LWIP_MEM_FREE_UNPROTECT();
  598. }
  599. /**
  600. * Shrink memory returned by mem_malloc().
  601. *
  602. * @param rmem pointer to memory allocated by mem_malloc the is to be shrunk
  603. * @param new_size required size after shrinking (needs to be smaller than or
  604. * equal to the previous size)
  605. * @return for compatibility reasons: is always == rmem, at the moment
  606. * or NULL if newsize is > old size, in which case rmem is NOT touched
  607. * or freed!
  608. */
  609. void *
  610. mem_trim(void *rmem, mem_size_t new_size)
  611. {
  612. mem_size_t size, newsize;
  613. mem_size_t ptr, ptr2;
  614. struct mem *mem, *mem2;
  615. /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
  616. LWIP_MEM_FREE_DECL_PROTECT();
  617. /* Expand the size of the allocated memory region so that we can
  618. adjust for alignment. */
  619. newsize = (mem_size_t)LWIP_MEM_ALIGN_SIZE(new_size);
  620. if (newsize < MIN_SIZE_ALIGNED) {
  621. /* every data block must be at least MIN_SIZE_ALIGNED long */
  622. newsize = MIN_SIZE_ALIGNED;
  623. }
  624. #if MEM_OVERFLOW_CHECK
  625. newsize += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED;
  626. #endif
  627. if ((newsize > MEM_SIZE_ALIGNED) || (newsize < new_size)) {
  628. return NULL;
  629. }
  630. LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
  631. (u8_t *)rmem < (u8_t *)ram_end);
  632. if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
  633. LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n"));
  634. /* protect mem stats from concurrent access */
  635. MEM_STATS_INC_LOCKED(illegal);
  636. return rmem;
  637. }
  638. /* Get the corresponding struct mem ... */
  639. /* cast through void* to get rid of alignment warnings */
  640. mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET));
  641. #if MEM_OVERFLOW_CHECK
  642. mem_overflow_check_element(mem);
  643. #endif
  644. /* ... and its offset pointer */
  645. ptr = mem_to_ptr(mem);
  646. size = (mem_size_t)((mem_size_t)(mem->next - ptr) - (SIZEOF_STRUCT_MEM + MEM_SANITY_OVERHEAD));
  647. LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size);
  648. if (newsize > size) {
  649. /* not supported */
  650. return NULL;
  651. }
  652. if (newsize == size) {
  653. /* No change in size, simply return */
  654. return rmem;
  655. }
  656. /* protect the heap from concurrent access */
  657. LWIP_MEM_FREE_PROTECT();
  658. mem2 = ptr_to_mem(mem->next);
  659. if (mem2->used == 0) {
  660. /* The next struct is unused, we can simply move it at little */
  661. mem_size_t next;
  662. LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED);
  663. /* remember the old next pointer */
  664. next = mem2->next;
  665. /* create new struct mem which is moved directly after the shrunk mem */
  666. ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize);
  667. if (lfree == mem2) {
  668. lfree = ptr_to_mem(ptr2);
  669. }
  670. mem2 = ptr_to_mem(ptr2);
  671. mem2->used = 0;
  672. /* restore the next pointer */
  673. mem2->next = next;
  674. /* link it back to mem */
  675. mem2->prev = ptr;
  676. /* link mem to it */
  677. mem->next = ptr2;
  678. /* last thing to restore linked list: as we have moved mem2,
  679. * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
  680. * the end of the heap */
  681. if (mem2->next != MEM_SIZE_ALIGNED) {
  682. ptr_to_mem(mem2->next)->prev = ptr2;
  683. }
  684. MEM_STATS_DEC_USED(used, (size - newsize));
  685. /* no need to plug holes, we've already done that */
  686. } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
  687. /* Next struct is used but there's room for another struct mem with
  688. * at least MIN_SIZE_ALIGNED of data.
  689. * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
  690. * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
  691. * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
  692. * region that couldn't hold data, but when mem->next gets freed,
  693. * the 2 regions would be combined, resulting in more free memory */
  694. ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize);
  695. LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED);
  696. mem2 = ptr_to_mem(ptr2);
  697. if (mem2 < lfree) {
  698. lfree = mem2;
  699. }
  700. mem2->used = 0;
  701. mem2->next = mem->next;
  702. mem2->prev = ptr;
  703. mem->next = ptr2;
  704. if (mem2->next != MEM_SIZE_ALIGNED) {
  705. ptr_to_mem(mem2->next)->prev = ptr2;
  706. }
  707. MEM_STATS_DEC_USED(used, (size - newsize));
  708. /* the original mem->next is used, so no need to plug holes! */
  709. }
  710. /* else {
  711. next struct mem is used but size between mem and mem2 is not big enough
  712. to create another struct mem
  713. -> don't do anything.
  714. -> the remaining space stays unused since it is too small
  715. } */
  716. #if MEM_OVERFLOW_CHECK
  717. mem_overflow_init_element(mem, new_size);
  718. #endif
  719. MEM_SANITY();
  720. #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  721. mem_free_count = 1;
  722. #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  723. LWIP_MEM_FREE_UNPROTECT();
  724. return rmem;
  725. }
  726. /**
  727. * Allocate a block of memory with a minimum of 'size' bytes.
  728. *
  729. * @param size_in is the minimum size of the requested block in bytes.
  730. * @return pointer to allocated memory or NULL if no free memory was found.
  731. *
  732. * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
  733. */
  734. void *
  735. mem_malloc(mem_size_t size_in)
  736. {
  737. mem_size_t ptr, ptr2, size;
  738. struct mem *mem, *mem2;
  739. #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  740. u8_t local_mem_free_count = 0;
  741. #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  742. LWIP_MEM_ALLOC_DECL_PROTECT();
  743. if (size_in == 0) {
  744. return NULL;
  745. }
  746. /* Expand the size of the allocated memory region so that we can
  747. adjust for alignment. */
  748. size = (mem_size_t)LWIP_MEM_ALIGN_SIZE(size_in);
  749. if (size < MIN_SIZE_ALIGNED) {
  750. /* every data block must be at least MIN_SIZE_ALIGNED long */
  751. size = MIN_SIZE_ALIGNED;
  752. }
  753. #if MEM_OVERFLOW_CHECK
  754. size += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED;
  755. #endif
  756. if ((size > MEM_SIZE_ALIGNED) || (size < size_in)) {
  757. return NULL;
  758. }
  759. /* protect the heap from concurrent access */
  760. sys_mutex_lock(&mem_mutex);
  761. LWIP_MEM_ALLOC_PROTECT();
  762. #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  763. /* run as long as a mem_free disturbed mem_malloc or mem_trim */
  764. do {
  765. local_mem_free_count = 0;
  766. #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  767. /* Scan through the heap searching for a free block that is big enough,
  768. * beginning with the lowest free block.
  769. */
  770. for (ptr = mem_to_ptr(lfree); ptr < MEM_SIZE_ALIGNED - size;
  771. ptr = ptr_to_mem(ptr)->next) {
  772. mem = ptr_to_mem(ptr);
  773. #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  774. mem_free_count = 0;
  775. LWIP_MEM_ALLOC_UNPROTECT();
  776. /* allow mem_free or mem_trim to run */
  777. LWIP_MEM_ALLOC_PROTECT();
  778. if (mem_free_count != 0) {
  779. /* If mem_free or mem_trim have run, we have to restart since they
  780. could have altered our current struct mem. */
  781. local_mem_free_count = 1;
  782. break;
  783. }
  784. #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  785. if ((!mem->used) &&
  786. (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
  787. /* mem is not used and at least perfect fit is possible:
  788. * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
  789. if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
  790. /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
  791. * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
  792. * -> split large block, create empty remainder,
  793. * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
  794. * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
  795. * struct mem would fit in but no data between mem2 and mem2->next
  796. * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
  797. * region that couldn't hold data, but when mem->next gets freed,
  798. * the 2 regions would be combined, resulting in more free memory
  799. */
  800. ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + size);
  801. LWIP_ASSERT("invalid next ptr",ptr2 != MEM_SIZE_ALIGNED);
  802. /* create mem2 struct */
  803. mem2 = ptr_to_mem(ptr2);
  804. mem2->used = 0;
  805. mem2->next = mem->next;
  806. mem2->prev = ptr;
  807. /* and insert it between mem and mem->next */
  808. mem->next = ptr2;
  809. mem->used = 1;
  810. if (mem2->next != MEM_SIZE_ALIGNED) {
  811. ptr_to_mem(mem2->next)->prev = ptr2;
  812. }
  813. MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
  814. } else {
  815. /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
  816. * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
  817. * take care of this).
  818. * -> near fit or exact fit: do not split, no mem2 creation
  819. * also can't move mem->next directly behind mem, since mem->next
  820. * will always be used at this point!
  821. */
  822. mem->used = 1;
  823. MEM_STATS_INC_USED(used, mem->next - mem_to_ptr(mem));
  824. }
  825. #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  826. mem_malloc_adjust_lfree:
  827. #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  828. if (mem == lfree) {
  829. struct mem *cur = lfree;
  830. /* Find next free block after mem and update lowest free pointer */
  831. while (cur->used && cur != ram_end) {
  832. #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  833. mem_free_count = 0;
  834. LWIP_MEM_ALLOC_UNPROTECT();
  835. /* prevent high interrupt latency... */
  836. LWIP_MEM_ALLOC_PROTECT();
  837. if (mem_free_count != 0) {
  838. /* If mem_free or mem_trim have run, we have to restart since they
  839. could have altered our current struct mem or lfree. */
  840. goto mem_malloc_adjust_lfree;
  841. }
  842. #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  843. cur = ptr_to_mem(cur->next);
  844. }
  845. lfree = cur;
  846. LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
  847. }
  848. LWIP_MEM_ALLOC_UNPROTECT();
  849. sys_mutex_unlock(&mem_mutex);
  850. LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
  851. (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
  852. LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
  853. ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
  854. LWIP_ASSERT("mem_malloc: sanity check alignment",
  855. (((mem_ptr_t)mem) & (MEM_ALIGNMENT - 1)) == 0);
  856. #if MEM_OVERFLOW_CHECK
  857. mem_overflow_init_element(mem, size_in);
  858. #endif
  859. MEM_SANITY();
  860. return (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET;
  861. }
  862. }
  863. #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  864. /* if we got interrupted by a mem_free, try again */
  865. } while (local_mem_free_count != 0);
  866. #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  867. MEM_STATS_INC(err);
  868. LWIP_MEM_ALLOC_UNPROTECT();
  869. sys_mutex_unlock(&mem_mutex);
  870. LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
  871. return NULL;
  872. }
  873. #endif /* MEM_USE_POOLS */
  874. #if MEM_CUSTOM_ALLOCATOR && (!LWIP_STATS || !MEM_STATS)
  875. void *
  876. mem_calloc(mem_size_t count, mem_size_t size)
  877. {
  878. return MEM_CUSTOM_CALLOC(count, size);
  879. }
  880. #else /* MEM_CUSTOM_ALLOCATOR && (!LWIP_STATS || !MEM_STATS) */
  881. /**
  882. * Contiguously allocates enough space for count objects that are size bytes
  883. * of memory each and returns a pointer to the allocated memory.
  884. *
  885. * The allocated memory is filled with bytes of value zero.
  886. *
  887. * @param count number of objects to allocate
  888. * @param size size of the objects to allocate
  889. * @return pointer to allocated memory / NULL pointer if there is an error
  890. */
  891. void *
  892. mem_calloc(mem_size_t count, mem_size_t size)
  893. {
  894. void *p;
  895. size_t alloc_size = (size_t)count * (size_t)size;
  896. if ((size_t)(mem_size_t)alloc_size != alloc_size) {
  897. LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_calloc: could not allocate %"SZT_F" bytes\n", alloc_size));
  898. return NULL;
  899. }
  900. /* allocate 'count' objects of size 'size' */
  901. p = mem_malloc((mem_size_t)alloc_size);
  902. if (p) {
  903. /* zero the memory */
  904. memset(p, 0, alloc_size);
  905. }
  906. return p;
  907. }
  908. #endif /* MEM_CUSTOM_ALLOCATOR && (!LWIP_STATS || !MEM_STATS) */