L4Re Operating System Framework
Interface and Usage Documentation
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
list_alloc
1// vim:set ft=cpp: -*- Mode: C++ -*-
2/*
3 * (c) 2008-2009 Alexander Warg <warg@os.inf.tu-dresden.de>,
4 * Torsten Frenzel <frenzel@os.inf.tu-dresden.de>
5 * economic rights: Technische Universität Dresden (Germany)
6 *
7 * License: see LICENSE.spdx (in this directory or the directories above)
8 */
9
10#pragma once
11
12#include <l4/cxx/arith>
13#include <l4/cxx/minmax>
14#include <l4/sys/consts.h>
15
16namespace cxx {
17
22{
23private:
24 friend class List_alloc_sanity_guard;
25
26 struct Mem_block
27 {
28 Mem_block *next;
29 unsigned long size;
30 };
31
32 Mem_block *_first;
33
34 inline void check_overlap(void *, unsigned long );
35 inline void sanity_check_list(char const *, char const *);
36 inline void merge();
37
38public:
39
46 List_alloc() : _first(0) {}
47
60 inline void free(void *block, unsigned long size, bool initial_free = false);
61
76 inline void *alloc(unsigned long size, unsigned long align,
77 unsigned long lower = 0, unsigned long upper = ~0UL);
78
99 inline void *alloc_max(unsigned long min, unsigned long *max,
100 unsigned long align, unsigned granularity,
101 unsigned long lower = 0, unsigned long upper = ~0UL);
102
108 inline unsigned long avail();
109
110 template <typename DBG>
111 void dump_free_list(DBG &out);
112};
113
114#if !defined (CXX_LIST_ALLOC_SANITY)
115class List_alloc_sanity_guard
116{
117public:
118 List_alloc_sanity_guard(List_alloc *, char const *)
119 {}
120
121};
122
123
124void
125List_alloc::check_overlap(void *, unsigned long )
126{}
127
128void
129List_alloc::sanity_check_list(char const *, char const *)
130{}
131
132#else
133
134class List_alloc_sanity_guard
135{
136private:
137 List_alloc *a;
138 char const *func;
139
140public:
141 List_alloc_sanity_guard(List_alloc *a, char const *func)
142 : a(a), func(func)
143 { a->sanity_check_list(func, "entry"); }
144
145 ~List_alloc_sanity_guard()
146 { a->sanity_check_list(func, "exit"); }
147};
148
149void
150List_alloc::check_overlap(void *b, unsigned long s)
151{
152 unsigned long const mb_align = (1UL << arith::Ld<sizeof(Mem_block)>::value) - 1;
153 if ((unsigned long)b & mb_align)
154 {
155 L4::cerr << "List_alloc(FATAL): trying to free unaligned memory: "
156 << b << " align=" << arith::Ld<sizeof(Mem_block)>::value << "\n";
157 }
158
159 Mem_block *c = _first;
160 for (;c ; c = c->next)
161 {
162 unsigned long x_s = (unsigned long)b;
163 unsigned long x_e = x_s + s;
164 unsigned long b_s = (unsigned long)c;
165 unsigned long b_e = b_s + c->size;
166
167 if ((x_s >= b_s && x_s < b_e)
168 || (x_e > b_s && x_e <= b_e)
169 || (b_s >= x_s && b_s < x_e)
170 || (b_e > x_s && b_e <= x_e))
171 {
172 L4::cerr << "List_alloc(FATAL): trying to free memory that "
173 "is already free: \n ["
174 << (void*)x_s << '-' << (void*)x_e << ") overlaps ["
175 << (void*)b_s << '-' << (void*)b_e << ")\n";
176 }
177 }
178}
179
180void
181List_alloc::sanity_check_list(char const *func, char const *info)
182{
183 Mem_block *c = _first;
184 for (;c ; c = c->next)
185 {
186 if (c->next)
187 {
188 if (c >= c->next)
189 {
190 L4::cerr << "List_alloc(FATAL): " << func << '(' << info
191 << "): list order violation\n";
192 }
193
194 if (((unsigned long)c) + c->size > (unsigned long)c->next)
195 {
196 L4::cerr << "List_alloc(FATAL): " << func << '(' << info
197 << "): list order violation\n";
198 }
199 }
200 }
201}
202
203#endif
204
205void
206List_alloc::merge()
207{
208 List_alloc_sanity_guard __attribute__((unused)) guard(this, __func__);
209 Mem_block *c = _first;
210 while (c && c->next)
211 {
212 unsigned long f_start = reinterpret_cast<unsigned long>(c);
213 unsigned long f_end = f_start + c->size;
214 unsigned long n_start = reinterpret_cast<unsigned long>(c->next);
215
216 if (f_end == n_start)
217 {
218 c->size += c->next->size;
219 c->next = c->next->next;
220 continue;
221 }
222
223 c = c->next;
224 }
225}
226
227void
228List_alloc::free(void *block, unsigned long size, bool initial_free)
229{
230 List_alloc_sanity_guard __attribute__((unused)) guard(this, __func__);
231
232 unsigned long const mb_align = (1UL << arith::Ld<sizeof(Mem_block)>::value) - 1;
233
234 if (initial_free)
235 {
236 // enforce alignment constraint on initial memory
237 unsigned long nblock = (reinterpret_cast<unsigned long>(block) + mb_align)
238 & ~mb_align;
239 size = (size - (nblock - reinterpret_cast<unsigned long>(block)))
240 & ~mb_align;
241 block = reinterpret_cast<void*>(nblock);
242 }
243 else
244 // blow up size to the minimum aligned size
245 size = (size + mb_align) & ~mb_align;
246
247 check_overlap(block, size);
248
249 Mem_block **c = &_first;
250 Mem_block *next = 0;
251
252 if (*c)
253 {
254 while (*c && *c < block)
255 c = &(*c)->next;
256
257 next = *c;
258 }
259
260 *c = reinterpret_cast<Mem_block*>(block);
261
262 (*c)->next = next;
263 (*c)->size = size;
264
265 merge();
266}
267
268void *
269List_alloc::alloc_max(unsigned long min, unsigned long *max, unsigned long align,
270 unsigned granularity, unsigned long lower,
271 unsigned long upper)
272{
273 List_alloc_sanity_guard __attribute__((unused)) guard(this, __func__);
274
275 unsigned char const mb_bits = arith::Ld<sizeof(Mem_block)>::value;
276 unsigned long const mb_align = (1UL << mb_bits) - 1;
277
278 // blow minimum up to at least the minimum aligned size of a Mem_block
279 min = l4_round_size(min, mb_bits);
280 // truncate maximum to at least the size of a Mem_block
281 *max = l4_trunc_size(*max, mb_bits);
282 // truncate maximum size according to granularity
283 *max = *max & ~(granularity - 1UL);
284
285 if (min > *max)
286 return 0;
287
288 unsigned long almask = align ? (align - 1UL) : 0;
289
290 // minimum alignment is given by the size of a Mem_block
291 if (almask < mb_align)
292 almask = mb_align;
293
294 Mem_block **c = &_first;
295 Mem_block **fit = 0;
296 unsigned long max_fit = 0;
297 unsigned long a_lower = (lower + almask) & ~almask;
298
299 for (; *c; c = &(*c)->next)
300 {
301 // address of free memory block
302 unsigned long n_start = reinterpret_cast<unsigned long>(*c);
303
304 // block too small, next
305 // XXX: maybe we can skip this and just do the test below
306 if ((*c)->size < min)
307 continue;
308
309 // block outside region, next
310 if (upper < n_start || a_lower > n_start + (*c)->size)
311 continue;
312
313 // aligned start address within the free block
314 unsigned long a_start = (n_start + almask) & ~almask;
315
316 // check if aligned start address is behind the block, next
317 if (a_start - n_start >= (*c)->size)
318 continue;
319
320 a_start = a_start < a_lower ? a_lower : a_start;
321
322 // end address would overflow, next
323 if (min > ~0UL - a_start)
324 continue;
325
326 // block outside region, next
327 if (a_start + min - 1UL > upper)
328 continue;
329
330 // remaining size after subtracting the padding for the alignment
331 unsigned long r_size = (*c)->size - a_start + n_start;
332
333 // upper limit can limit maximum size
334 if (a_start + r_size - 1UL > upper)
335 r_size = upper - a_start + 1UL;
336
337 // round down according to granularity
338 r_size &= ~(granularity - 1UL);
339
340 // block too small
341 if (r_size < min)
342 continue;
343
344 if (r_size >= *max)
345 {
346 fit = c;
347 max_fit = *max;
348 break;
349 }
350
351 if (r_size > max_fit)
352 {
353 max_fit = r_size;
354 fit = c;
355 }
356 }
357
358 if (fit)
359 {
360 unsigned long n_start = reinterpret_cast<unsigned long>(*fit);
361 unsigned long a_lower = (lower + almask) & ~almask;
362 unsigned long a_start = (n_start + almask) & ~almask;
363 a_start = a_start < a_lower ? a_lower : a_start;
364 unsigned long r_size = (*fit)->size - a_start + n_start;
365
366 if (a_start > n_start)
367 {
368 (*fit)->size -= r_size;
369 fit = &(*fit)->next;
370 }
371 else
372 *fit = (*fit)->next;
373
374 *max = max_fit;
375 if (r_size == max_fit)
376 return reinterpret_cast<void *>(a_start);
377
378 Mem_block *m = reinterpret_cast<Mem_block*>(a_start + max_fit);
379 m->next = *fit;
380 m->size = r_size - max_fit;
381 *fit = m;
382 return reinterpret_cast<void *>(a_start);
383 }
384
385 return 0;
386}
387
388void *
389List_alloc::alloc(unsigned long size, unsigned long align, unsigned long lower,
390 unsigned long upper)
391{
392 List_alloc_sanity_guard __attribute__((unused)) guard(this, __func__);
393
394 unsigned long const mb_align
395 = (1UL << arith::Ld<sizeof(Mem_block)>::value) - 1;
396
397 // blow up size to the minimum aligned size
398 size = (size + mb_align) & ~mb_align;
399
400 unsigned long almask = align ? (align - 1UL) : 0;
401
402 // minimum alignment is given by the size of a Mem_block
403 if (almask < mb_align)
404 almask = mb_align;
405
406 Mem_block **c = &_first;
407 unsigned long a_lower = (lower + almask) & ~almask;
408
409 for (; *c; c=&(*c)->next)
410 {
411 // address of free memory block
412 unsigned long n_start = reinterpret_cast<unsigned long>(*c);
413
414 // block too small, next
415 // XXX: maybe we can skip this and just do the test below
416 if ((*c)->size < size)
417 continue;
418
419 // block outside region, next
420 if (upper < n_start || a_lower > n_start + (*c)->size)
421 continue;
422
423 // aligned start address within the free block
424 unsigned long a_start = (n_start + almask) & ~almask;
425
426 // block too small after alignment, next
427 if (a_start - n_start >= (*c)->size)
428 continue;
429
430 a_start = a_start < a_lower ? a_lower : a_start;
431
432 // end address would overflow, next
433 if (size > ~0UL - a_start)
434 continue;
435
436 // block outside region, next
437 if (a_start + size - 1UL > upper)
438 continue;
439
440 // remaining size after subtracting the padding
441 // for the alignment
442 unsigned long r_size = (*c)->size - a_start + n_start;
443
444 // block too small
445 if (r_size < size)
446 continue;
447
448 if (a_start > n_start)
449 {
450 // have free space before the allocated block
451 // shrink the block and set c to the next pointer of that
452 // block
453 (*c)->size -= r_size;
454 c = &(*c)->next;
455 }
456 else
457 // drop the block, c remains the next pointer of the
458 // previous block
459 *c = (*c)->next;
460
461 // allocated the whole remaining space
462 if (r_size == size)
463 return reinterpret_cast<void*>(a_start);
464
465 // add a new free block behind the allocated block
466 Mem_block *m = reinterpret_cast<Mem_block*>(a_start + size);
467 m->next = *c;
468 m->size = r_size - size;
469 *c = m;
470 return reinterpret_cast<void *>(a_start);
471 }
472
473 return 0;
474}
475
476unsigned long
478{
479 List_alloc_sanity_guard __attribute__((unused)) guard(this, __FUNCTION__);
480 Mem_block *c = _first;
481 unsigned long a = 0;
482 while (c)
483 {
484 a += c->size;
485 c = c->next;
486 }
487
488 return a;
489}
490
491template <typename DBG>
492void
493List_alloc::dump_free_list(DBG &out)
494{
495 Mem_block *c = _first;
496 while (c)
497 {
498 unsigned sz;
499 const char *unit;
500
501 if (c->size < 1024)
502 {
503 sz = c->size;
504 unit = "Byte";
505 }
506 else if (c->size < 1 << 20)
507 {
508 sz = c->size >> 10;
509 unit = "kB";
510 }
511 else
512 {
513 sz = c->size >> 20;
514 unit = "MB";
515 }
516
517 out.printf("%12p - %12p (%u %s)\n", c,
518 reinterpret_cast<char *>(c) + c->size - 1, sz, unit);
519
520 c = c->next;
521 }
522}
523
524}
Standard list-based allocator.
Definition list_alloc:22
unsigned long avail()
Get the amount of available memory.
Definition list_alloc:477
void * alloc_max(unsigned long min, unsigned long *max, unsigned long align, unsigned granularity, unsigned long lower=0, unsigned long upper=~0UL)
Allocate a memory block of min <= size <= max.
Definition list_alloc:269
List_alloc()
Initializes an empty list allocator.
Definition list_alloc:46
void * alloc(unsigned long size, unsigned long align, unsigned long lower=0, unsigned long upper=~0UL)
Allocate a memory block.
Definition list_alloc:389
void free(void *block, unsigned long size, bool initial_free=false)
Return a free memory block to the allocator.
Definition list_alloc:228
l4_addr_t l4_trunc_size(l4_addr_t address, unsigned char bits) L4_NOTHROW
Round an address down to the next lower flexpage with size bits.
Definition consts.h:463
l4_addr_t l4_round_size(l4_addr_t value, unsigned char bits) L4_NOTHROW
Round value up to the next alignment with bits size.
Definition consts.h:488
BasicOStream cerr
Standard error stream.
Our C++ library.
Definition arith:11