rtt_mem.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2008-7-12 Bernard the first version
  9. * 2010-06-09 Bernard fix the end stub of heap
  10. * fix memory check in rt_realloc function
  11. * 2010-07-13 Bernard fix RT_ALIGN issue found by kuronca
  12. * 2010-10-14 Bernard fix rt_realloc issue when realloc a NULL pointer.
  13. * 2017-07-14 armink fix rt_realloc issue when new size is 0
  14. * 2018-10-02 Bernard Add 64bit support
  15. */
  16. /*
  17. * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
  18. * All rights reserved.
  19. *
  20. * Redistribution and use in source and binary forms, with or without modification,
  21. * are permitted provided that the following conditions are met:
  22. *
  23. * 1. Redistributions of source code must retain the above copyright notice,
  24. * this list of conditions and the following disclaimer.
  25. * 2. Redistributions in binary form must reproduce the above copyright notice,
  26. * this list of conditions and the following disclaimer in the documentation
  27. * and/or other materials provided with the distribution.
  28. * 3. The name of the author may not be used to endorse or promote products
  29. * derived from this software without specific prior written permission.
  30. *
  31. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
  32. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  33. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
  34. * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  35. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
  36. * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  37. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  38. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  39. * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
  40. * OF SUCH DAMAGE.
  41. *
  42. * This file is part of the lwIP TCP/IP stack.
  43. *
  44. * Author: Adam Dunkels <adam@sics.se>
  45. * Simon Goldschmidt
  46. *
  47. */
  48. #include <rthw.h>
  49. #include <rtthread.h>
  50. #undef RT_USING_HOOK
  51. #undef RT_MEM_STATS
  52. #undef rt_malloc
  53. #undef rt_free
  54. #undef rt_relloc
  55. #undef rt_calloc
  56. #undef RT_USING_MEMTRACE
  57. #ifndef RT_USING_MEMHEAP_AS_HEAP
  58. /* #define RT_MEM_DEBUG */
  59. #define RT_MEM_STATS
  60. #if defined (RT_USING_HEAP) && defined (RT_USING_SMALL_MEM)
  61. #ifdef RT_USING_HOOK
  62. static void (*rt_malloc_hook)(void *ptr, rt_size_t size);
  63. static void (*rt_free_hook)(void *ptr);
  64. /**
  65. * @addtogroup Hook
  66. */
  67. /**@{*/
  68. /**
  69. * This function will set a hook function, which will be invoked when a memory
  70. * block is allocated from heap memory.
  71. *
  72. * @param hook the hook function
  73. */
  74. void rt_malloc_sethook(void (*hook)(void *ptr, rt_size_t size))
  75. {
  76. rt_malloc_hook = hook;
  77. }
  78. /**
  79. * This function will set a hook function, which will be invoked when a memory
  80. * block is released to heap memory.
  81. *
  82. * @param hook the hook function
  83. */
  84. void rt_free_sethook(void (*hook)(void *ptr))
  85. {
  86. rt_free_hook = hook;
  87. }
  88. /**@}*/
  89. #endif
  90. #define HEAP_MAGIC 0x1ea0
  91. struct heap_mem
  92. {
  93. /* magic and used flag */
  94. rt_uint16_t magic;
  95. rt_uint16_t used;
  96. #ifdef ARCH_CPU_64BIT
  97. rt_uint32_t resv;
  98. #endif
  99. rt_size_t next, prev;
  100. #ifdef RT_USING_MEMTRACE
  101. #ifdef ARCH_CPU_64BIT
  102. rt_uint8_t thread[8];
  103. #else
  104. rt_uint8_t thread[4]; /* thread name */
  105. #endif
  106. #endif
  107. };
  108. /** pointer to the heap: for alignment, heap_ptr is now a pointer instead of an array */
  109. static rt_uint8_t *heap_ptr;
  110. /** the last entry, always unused! */
  111. static struct heap_mem *heap_end;
  112. #ifdef ARCH_CPU_64BIT
  113. #define MIN_SIZE 24
  114. #else
  115. #define MIN_SIZE 12
  116. #endif
  117. #define MIN_SIZE_ALIGNED RT_ALIGN(MIN_SIZE, RT_ALIGN_SIZE)
  118. #define SIZEOF_STRUCT_MEM RT_ALIGN(sizeof(struct heap_mem), RT_ALIGN_SIZE)
  119. static struct heap_mem *lfree; /* pointer to the lowest free block */
  120. static struct rt_semaphore heap_sem;
  121. static rt_size_t mem_size_aligned;
  122. #ifdef RT_MEM_STATS
  123. static rt_size_t used_mem, max_mem;
  124. #endif
  125. #ifdef RT_USING_MEMTRACE
  126. rt_inline void rt_mem_setname(struct heap_mem *mem, const char *name)
  127. {
  128. int index;
  129. for (index = 0; index < sizeof(mem->thread); index ++)
  130. {
  131. if (name[index] == '\0') break;
  132. mem->thread[index] = name[index];
  133. }
  134. for (; index < sizeof(mem->thread); index ++)
  135. {
  136. mem->thread[index] = ' ';
  137. }
  138. }
  139. #endif
  140. void luat_rt_system_heap_init(void *begin_addr, void *end_addr);
  141. void luat_rt_memory_info(rt_uint32_t *total,
  142. rt_uint32_t *used,
  143. rt_uint32_t *max_used);
  144. void *luat_rt_malloc(rt_size_t size);
  145. void *luat_rt_realloc(void *rmem, rt_size_t newsize);
  146. void *luat_rt_calloc(rt_size_t count, rt_size_t size);
  147. void luat_rt_free(void *rmem);
  148. static void plug_holes(struct heap_mem *mem)
  149. {
  150. struct heap_mem *nmem;
  151. struct heap_mem *pmem;
  152. RT_ASSERT((rt_uint8_t *)mem >= heap_ptr);
  153. RT_ASSERT((rt_uint8_t *)mem < (rt_uint8_t *)heap_end);
  154. RT_ASSERT(mem->used == 0);
  155. /* plug hole forward */
  156. nmem = (struct heap_mem *)&heap_ptr[mem->next];
  157. if (mem != nmem &&
  158. nmem->used == 0 &&
  159. (rt_uint8_t *)nmem != (rt_uint8_t *)heap_end)
  160. {
  161. /* if mem->next is unused and not end of heap_ptr,
  162. * combine mem and mem->next
  163. */
  164. if (lfree == nmem)
  165. {
  166. lfree = mem;
  167. }
  168. mem->next = nmem->next;
  169. ((struct heap_mem *)&heap_ptr[nmem->next])->prev = (rt_uint8_t *)mem - heap_ptr;
  170. }
  171. /* plug hole backward */
  172. pmem = (struct heap_mem *)&heap_ptr[mem->prev];
  173. if (pmem != mem && pmem->used == 0)
  174. {
  175. /* if mem->prev is unused, combine mem and mem->prev */
  176. if (lfree == mem)
  177. {
  178. lfree = pmem;
  179. }
  180. pmem->next = mem->next;
  181. ((struct heap_mem *)&heap_ptr[mem->next])->prev = (rt_uint8_t *)pmem - heap_ptr;
  182. }
  183. }
  184. /**
  185. * @ingroup SystemInit
  186. *
  187. * This function will initialize system heap memory.
  188. *
  189. * @param begin_addr the beginning address of system heap memory.
  190. * @param end_addr the end address of system heap memory.
  191. */
  192. void luat_rt_system_heap_init(void *begin_addr, void *end_addr)
  193. {
  194. struct heap_mem *mem;
  195. rt_ubase_t begin_align = RT_ALIGN((rt_ubase_t)begin_addr, RT_ALIGN_SIZE);
  196. rt_ubase_t end_align = RT_ALIGN_DOWN((rt_ubase_t)end_addr, RT_ALIGN_SIZE);
  197. RT_DEBUG_NOT_IN_INTERRUPT;
  198. /* alignment addr */
  199. if ((end_align > (2 * SIZEOF_STRUCT_MEM)) &&
  200. ((end_align - 2 * SIZEOF_STRUCT_MEM) >= begin_align))
  201. {
  202. /* calculate the aligned memory size */
  203. mem_size_aligned = end_align - begin_align - 2 * SIZEOF_STRUCT_MEM;
  204. }
  205. else
  206. {
  207. rt_kprintf("T:mem init, error begin address 0x%x, and end address 0x%x\n",
  208. (rt_ubase_t)begin_addr, (rt_ubase_t)end_addr);
  209. return;
  210. }
  211. /* point to begin address of heap */
  212. heap_ptr = (rt_uint8_t *)begin_align;
  213. RT_DEBUG_LOG(RT_DEBUG_MEM, ("T:mem init, heap begin address 0x%x, size %d\n",
  214. (rt_ubase_t)heap_ptr, mem_size_aligned));
  215. /* initialize the start of the heap */
  216. mem = (struct heap_mem *)heap_ptr;
  217. mem->magic = HEAP_MAGIC;
  218. mem->next = mem_size_aligned + SIZEOF_STRUCT_MEM;
  219. mem->prev = 0;
  220. mem->used = 0;
  221. #ifdef RT_USING_MEMTRACE
  222. rt_mem_setname(mem, "INIT");
  223. #endif
  224. /* initialize the end of the heap */
  225. heap_end = (struct heap_mem *)&heap_ptr[mem->next];
  226. heap_end->magic = HEAP_MAGIC;
  227. heap_end->used = 1;
  228. heap_end->next = mem_size_aligned + SIZEOF_STRUCT_MEM;
  229. heap_end->prev = mem_size_aligned + SIZEOF_STRUCT_MEM;
  230. #ifdef RT_USING_MEMTRACE
  231. rt_mem_setname(heap_end, "INIT");
  232. #endif
  233. rt_sem_init(&heap_sem, "Lheap", 1, RT_IPC_FLAG_FIFO);
  234. /* initialize the lowest-free pointer to the start of the heap */
  235. lfree = (struct heap_mem *)heap_ptr;
  236. }
  237. /**
  238. * @addtogroup MM
  239. */
  240. /**@{*/
  241. /**
  242. * Allocate a block of memory with a minimum of 'size' bytes.
  243. *
  244. * @param size is the minimum size of the requested block in bytes.
  245. *
  246. * @return pointer to allocated memory or NULL if no free memory was found.
  247. */
  248. void *luat_rt_malloc(rt_size_t size)
  249. {
  250. rt_size_t ptr, ptr2;
  251. struct heap_mem *mem, *mem2;
  252. if (size == 0)
  253. return RT_NULL;
  254. RT_DEBUG_NOT_IN_INTERRUPT;
  255. if (size != RT_ALIGN(size, RT_ALIGN_SIZE))
  256. RT_DEBUG_LOG(RT_DEBUG_MEM, ("T:malloc size %d, but align to %d\n",
  257. size, RT_ALIGN(size, RT_ALIGN_SIZE)));
  258. else
  259. RT_DEBUG_LOG(RT_DEBUG_MEM, ("T:malloc size %d\n", size));
  260. /* alignment size */
  261. size = RT_ALIGN(size, RT_ALIGN_SIZE);
  262. if (size > mem_size_aligned)
  263. {
  264. RT_DEBUG_LOG(RT_DEBUG_MEM, ("T:no memory\n"));
  265. return RT_NULL;
  266. }
  267. /* every data block must be at least MIN_SIZE_ALIGNED long */
  268. if (size < MIN_SIZE_ALIGNED)
  269. size = MIN_SIZE_ALIGNED;
  270. /* take memory semaphore */
  271. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  272. for (ptr = (rt_uint8_t *)lfree - heap_ptr;
  273. ptr < mem_size_aligned - size;
  274. ptr = ((struct heap_mem *)&heap_ptr[ptr])->next)
  275. {
  276. mem = (struct heap_mem *)&heap_ptr[ptr];
  277. if ((!mem->used) && (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size)
  278. {
  279. /* mem is not used and at least perfect fit is possible:
  280. * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
  281. if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >=
  282. (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED))
  283. {
  284. /* (in addition to the above, we test if another struct heap_mem (SIZEOF_STRUCT_MEM) containing
  285. * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
  286. * -> split large block, create empty remainder,
  287. * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
  288. * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
  289. * struct heap_mem would fit in but no data between mem2 and mem2->next
  290. * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
  291. * region that couldn't hold data, but when mem->next gets freed,
  292. * the 2 regions would be combined, resulting in more free memory
  293. */
  294. ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
  295. /* create mem2 struct */
  296. mem2 = (struct heap_mem *)&heap_ptr[ptr2];
  297. mem2->magic = HEAP_MAGIC;
  298. mem2->used = 0;
  299. mem2->next = mem->next;
  300. mem2->prev = ptr;
  301. #ifdef RT_USING_MEMTRACE
  302. rt_mem_setname(mem2, " ");
  303. #endif
  304. /* and insert it between mem and mem->next */
  305. mem->next = ptr2;
  306. mem->used = 1;
  307. if (mem2->next != mem_size_aligned + SIZEOF_STRUCT_MEM)
  308. {
  309. ((struct heap_mem *)&heap_ptr[mem2->next])->prev = ptr2;
  310. }
  311. #ifdef RT_MEM_STATS
  312. used_mem += (size + SIZEOF_STRUCT_MEM);
  313. if (max_mem < used_mem)
  314. max_mem = used_mem;
  315. #endif
  316. }
  317. else
  318. {
  319. /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
  320. * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
  321. * take care of this).
  322. * -> near fit or excact fit: do not split, no mem2 creation
  323. * also can't move mem->next directly behind mem, since mem->next
  324. * will always be used at this point!
  325. */
  326. mem->used = 1;
  327. #ifdef RT_MEM_STATS
  328. used_mem += mem->next - ((rt_uint8_t *)mem - heap_ptr);
  329. if (max_mem < used_mem)
  330. max_mem = used_mem;
  331. #endif
  332. }
  333. /* set memory block magic */
  334. mem->magic = HEAP_MAGIC;
  335. #ifdef RT_USING_MEMTRACE
  336. if (rt_thread_self())
  337. rt_mem_setname(mem, rt_thread_self()->name);
  338. else
  339. rt_mem_setname(mem, "NONE");
  340. #endif
  341. if (mem == lfree)
  342. {
  343. /* Find next free block after mem and update lowest free pointer */
  344. while (lfree->used && lfree != heap_end)
  345. lfree = (struct heap_mem *)&heap_ptr[lfree->next];
  346. RT_ASSERT(((lfree == heap_end) || (!lfree->used)));
  347. }
  348. rt_sem_release(&heap_sem);
  349. RT_ASSERT((rt_ubase_t)mem + SIZEOF_STRUCT_MEM + size <= (rt_ubase_t)heap_end);
  350. RT_ASSERT((rt_ubase_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM) % RT_ALIGN_SIZE == 0);
  351. RT_ASSERT((((rt_ubase_t)mem) & (RT_ALIGN_SIZE - 1)) == 0);
  352. RT_DEBUG_LOG(RT_DEBUG_MEM,
  353. ("T:allocate memory at 0x%x, size: %d\n",
  354. (rt_ubase_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM),
  355. (rt_ubase_t)(mem->next - ((rt_uint8_t *)mem - heap_ptr))));
  356. //RT_OBJECT_HOOK_CALL(rt_malloc_hook,
  357. // (((void *)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM)), size));
  358. /* return the memory data except mem struct */
  359. return (rt_uint8_t *)mem + SIZEOF_STRUCT_MEM;
  360. }
  361. }
  362. rt_sem_release(&heap_sem);
  363. return RT_NULL;
  364. }
  365. //RTM_EXPORT(rt_malloc);
  366. /**
  367. * This function will change the previously allocated memory block.
  368. *
  369. * @param rmem pointer to memory allocated by rt_malloc
  370. * @param newsize the required new size
  371. *
  372. * @return the changed memory block address
  373. */
  374. void *luat_rt_realloc(void *rmem, rt_size_t newsize)
  375. {
  376. rt_size_t size;
  377. rt_size_t ptr, ptr2;
  378. struct heap_mem *mem, *mem2;
  379. void *nmem;
  380. RT_DEBUG_NOT_IN_INTERRUPT;
  381. /* alignment size */
  382. newsize = RT_ALIGN(newsize, RT_ALIGN_SIZE);
  383. if (newsize > mem_size_aligned)
  384. {
  385. RT_DEBUG_LOG(RT_DEBUG_MEM, ("T:realloc: out of memory\n"));
  386. return RT_NULL;
  387. }
  388. else if (newsize == 0)
  389. {
  390. luat_rt_free(rmem);
  391. return RT_NULL;
  392. }
  393. /* allocate a new memory block */
  394. if (rmem == RT_NULL)
  395. return luat_rt_malloc(newsize);
  396. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  397. if ((rt_uint8_t *)rmem < (rt_uint8_t *)heap_ptr ||
  398. (rt_uint8_t *)rmem >= (rt_uint8_t *)heap_end)
  399. {
  400. /* illegal memory */
  401. rt_sem_release(&heap_sem);
  402. return rmem;
  403. }
  404. mem = (struct heap_mem *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM);
  405. ptr = (rt_uint8_t *)mem - heap_ptr;
  406. size = mem->next - ptr - SIZEOF_STRUCT_MEM;
  407. if (size == newsize)
  408. {
  409. /* the size is the same as */
  410. rt_sem_release(&heap_sem);
  411. return rmem;
  412. }
  413. if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE < size)
  414. {
  415. /* split memory block */
  416. #ifdef RT_MEM_STATS
  417. used_mem -= (size - newsize);
  418. #endif
  419. ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
  420. mem2 = (struct heap_mem *)&heap_ptr[ptr2];
  421. mem2->magic = HEAP_MAGIC;
  422. mem2->used = 0;
  423. mem2->next = mem->next;
  424. mem2->prev = ptr;
  425. #ifdef RT_USING_MEMTRACE
  426. rt_mem_setname(mem2, " ");
  427. #endif
  428. mem->next = ptr2;
  429. if (mem2->next != mem_size_aligned + SIZEOF_STRUCT_MEM)
  430. {
  431. ((struct heap_mem *)&heap_ptr[mem2->next])->prev = ptr2;
  432. }
  433. plug_holes(mem2);
  434. rt_sem_release(&heap_sem);
  435. return rmem;
  436. }
  437. rt_sem_release(&heap_sem);
  438. /* expand memory */
  439. nmem = luat_rt_malloc(newsize);
  440. if (nmem != RT_NULL) /* check memory */
  441. {
  442. rt_memcpy(nmem, rmem, size < newsize ? size : newsize);
  443. luat_rt_free(rmem);
  444. }
  445. return nmem;
  446. }
  447. //RTM_EXPORT(rt_realloc);
  448. /**
  449. * This function will contiguously allocate enough space for count objects
  450. * that are size bytes of memory each and returns a pointer to the allocated
  451. * memory.
  452. *
  453. * The allocated memory is filled with bytes of value zero.
  454. *
  455. * @param count number of objects to allocate
  456. * @param size size of the objects to allocate
  457. *
  458. * @return pointer to allocated memory / NULL pointer if there is an error
  459. */
  460. void *luat_rt_calloc(rt_size_t count, rt_size_t size)
  461. {
  462. void *p;
  463. /* allocate 'count' objects of size 'size' */
  464. p = luat_rt_malloc(count * size);
  465. /* zero the memory */
  466. if (p)
  467. rt_memset(p, 0, count * size);
  468. return p;
  469. }
  470. //RTM_EXPORT(rt_calloc);
  471. /**
  472. * This function will release the previously allocated memory block by
  473. * rt_malloc. The released memory block is taken back to system heap.
  474. *
  475. * @param rmem the address of memory which will be released
  476. */
  477. void luat_rt_free(void *rmem)
  478. {
  479. struct heap_mem *mem;
  480. if (rmem == RT_NULL)
  481. return;
  482. RT_DEBUG_NOT_IN_INTERRUPT;
  483. RT_ASSERT((((rt_ubase_t)rmem) & (RT_ALIGN_SIZE - 1)) == 0);
  484. RT_ASSERT((rt_uint8_t *)rmem >= (rt_uint8_t *)heap_ptr &&
  485. (rt_uint8_t *)rmem < (rt_uint8_t *)heap_end);
  486. //RT_OBJECT_HOOK_CALL(rt_free_hook, (rmem));
  487. if ((rt_uint8_t *)rmem < (rt_uint8_t *)heap_ptr ||
  488. (rt_uint8_t *)rmem >= (rt_uint8_t *)heap_end)
  489. {
  490. RT_DEBUG_LOG(RT_DEBUG_MEM, ("T:illegal memory\n"));
  491. return;
  492. }
  493. /* Get the corresponding struct heap_mem ... */
  494. mem = (struct heap_mem *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM);
  495. RT_DEBUG_LOG(RT_DEBUG_MEM,
  496. ("T:release memory 0x%x, size: %d\n",
  497. (rt_ubase_t)rmem,
  498. (rt_ubase_t)(mem->next - ((rt_uint8_t *)mem - heap_ptr))));
  499. /* protect the heap from concurrent access */
  500. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  501. /* ... which has to be in a used state ... */
  502. if (!mem->used || mem->magic != HEAP_MAGIC)
  503. {
  504. rt_kprintf("T:to free a bad data block:\n");
  505. rt_kprintf("T:mem: 0x%08x, used flag: %d, magic code: 0x%04x\n", mem, mem->used, mem->magic);
  506. }
  507. RT_ASSERT(mem->used);
  508. RT_ASSERT(mem->magic == HEAP_MAGIC);
  509. /* ... and is now unused. */
  510. mem->used = 0;
  511. mem->magic = HEAP_MAGIC;
  512. #ifdef RT_USING_MEMTRACE
  513. rt_mem_setname(mem, " ");
  514. #endif
  515. if (mem < lfree)
  516. {
  517. /* the newly freed struct is now the lowest */
  518. lfree = mem;
  519. }
  520. #ifdef RT_MEM_STATS
  521. used_mem -= (mem->next - ((rt_uint8_t *)mem - heap_ptr));
  522. #endif
  523. /* finally, see if prev or next are free also */
  524. plug_holes(mem);
  525. rt_sem_release(&heap_sem);
  526. }
  527. //RTM_EXPORT(rt_free);
  528. #ifdef RT_MEM_STATS
  529. void luat_rt_memory_info(rt_uint32_t *total,
  530. rt_uint32_t *used,
  531. rt_uint32_t *max_used)
  532. {
  533. if (total != RT_NULL)
  534. *total = mem_size_aligned;
  535. if (used != RT_NULL)
  536. *used = used_mem;
  537. if (max_used != RT_NULL)
  538. *max_used = max_mem;
  539. }
  540. #ifdef RT_USING_FINSH
  541. #include <finsh.h>
  542. void luat_free(void)
  543. {
  544. rt_kprintf("total memory: %d\n", mem_size_aligned);
  545. rt_kprintf("used memory : %d\n", used_mem);
  546. rt_kprintf("maximum allocated memory: %d\n", max_mem);
  547. }
  548. MSH_CMD_EXPORT(luat_free, list memory usage information);
  549. #ifdef RT_USING_MEMTRACE
  550. int memcheck(void)
  551. {
  552. int position;
  553. rt_ubase_t level;
  554. struct heap_mem *mem;
  555. level = rt_hw_interrupt_disable();
  556. for (mem = (struct heap_mem *)heap_ptr; mem != heap_end; mem = (struct heap_mem *)&heap_ptr[mem->next])
  557. {
  558. position = (rt_ubase_t)mem - (rt_ubase_t)heap_ptr;
  559. if (position < 0) goto __exit;
  560. if (position > (int)mem_size_aligned) goto __exit;
  561. if (mem->magic != HEAP_MAGIC) goto __exit;
  562. if (mem->used != 0 && mem->used != 1) goto __exit;
  563. }
  564. rt_hw_interrupt_enable(level);
  565. return 0;
  566. __exit:
  567. rt_kprintf("Memory block wrong:\n");
  568. rt_kprintf("address: 0x%08x\n", mem);
  569. rt_kprintf(" magic: 0x%04x\n", mem->magic);
  570. rt_kprintf(" used: %d\n", mem->used);
  571. rt_kprintf(" size: %d\n", mem->next - position - SIZEOF_STRUCT_MEM);
  572. rt_hw_interrupt_enable(level);
  573. return 0;
  574. }
  575. MSH_CMD_EXPORT(memcheck, check memory data);
  576. int memtrace(int argc, char **argv)
  577. {
  578. struct heap_mem *mem;
  579. list_mem();
  580. rt_kprintf("\nmemory heap address:\n");
  581. rt_kprintf("heap_ptr: 0x%08x\n", heap_ptr);
  582. rt_kprintf("lfree : 0x%08x\n", lfree);
  583. rt_kprintf("heap_end: 0x%08x\n", heap_end);
  584. rt_kprintf("\n--memory item information --\n");
  585. for (mem = (struct heap_mem *)heap_ptr; mem != heap_end; mem = (struct heap_mem *)&heap_ptr[mem->next])
  586. {
  587. int position = (rt_ubase_t)mem - (rt_ubase_t)heap_ptr;
  588. int size;
  589. rt_kprintf("[0x%08x - ", mem);
  590. size = mem->next - position - SIZEOF_STRUCT_MEM;
  591. if (size < 1024)
  592. rt_kprintf("%5d", size);
  593. else if (size < 1024 * 1024)
  594. rt_kprintf("%4dK", size / 1024);
  595. else
  596. rt_kprintf("%4dM", size / (1024 * 1024));
  597. rt_kprintf("] %c%c%c%c", mem->thread[0], mem->thread[1], mem->thread[2], mem->thread[3]);
  598. if (mem->magic != HEAP_MAGIC)
  599. rt_kprintf(": ***\n");
  600. else
  601. rt_kprintf("\n");
  602. }
  603. return 0;
  604. }
  605. MSH_CMD_EXPORT(memtrace, dump memory trace information);
  606. #endif /* end of RT_USING_MEMTRACE */
  607. #endif /* end of RT_USING_FINSH */
  608. #endif
  609. /**@}*/
  610. #endif /* end of RT_USING_HEAP */
  611. #endif /* end of RT_USING_MEMHEAP_AS_HEAP */