L4Re Operating System Framework
Interface and Usage Documentation
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
region_mapping
Go to the documentation of this file.
1// -*- Mode: C++ -*-
2// vim:ft=cpp
7/*
8 * (c) 2008-2009 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
9 * Alexander Warg <warg@os.inf.tu-dresden.de>,
10 * Björn Döbel <doebel@os.inf.tu-dresden.de>
11 * economic rights: Technische Universität Dresden (Germany)
12 *
13 * License: see LICENSE.spdx (in this directory or the directories above)
14 */
15
16#pragma once
17
18#include <l4/cxx/avl_map>
19#include <l4/sys/types.h>
20#include <l4/re/rm>
21
22
23namespace L4Re { namespace Util {
24class Region
25{
26private:
27 l4_addr_t _start, _end;
28#ifdef CONFIG_L4RE_REGION_INFO
29 char _dbg_name[40]; // Not a 0-terminating string
30 unsigned char _dbg_name_len = 0;
31 static_assert(sizeof(_dbg_name) < 256);
32 Rm::Offset _dbg_backing_offset = 0;
33#endif
34
35public:
36 Region() noexcept : _start(~0UL), _end(~0UL) {}
37 Region(l4_addr_t addr) noexcept : _start(addr), _end(addr) {}
38 Region(l4_addr_t start, l4_addr_t end) noexcept
39 : _start(start), _end(end) {}
40 Region(l4_addr_t start, l4_addr_t end,
41 char const *name, unsigned name_len,
42 Rm::Offset backing_offset) noexcept
43 : _start(start), _end(end)
44 {
45#ifdef CONFIG_L4RE_REGION_INFO
46 _dbg_name_len = name_len > sizeof(_dbg_name)
47 ? sizeof(_dbg_name) : name_len;
48 for (unsigned i = 0; i < _dbg_name_len; ++i)
49 _dbg_name[i] = name[i];
50
51 _dbg_backing_offset = backing_offset;
52#else
53 (void)name;
54 (void)name_len;
55 (void)backing_offset;
56#endif
57 }
58 l4_addr_t start() const noexcept { return _start; }
59 l4_addr_t end() const noexcept { return _end; }
60 unsigned long size() const noexcept { return end() - start() + 1; }
61 bool invalid() const noexcept { return _start == ~0UL && _end == ~0UL; }
62 bool operator < (Region const &o) const noexcept
63 { return end() < o.start(); }
64 bool contains(Region const &o) const noexcept
65 { return o.start() >= start() && o.end() <= end(); }
66 bool operator == (Region const &o) const noexcept
67 { return o.start() == start() && o.end() == end(); }
68 ~Region() noexcept {}
69
70#ifdef CONFIG_L4RE_REGION_INFO
71 char const *name() const { return _dbg_name; }
72 unsigned char name_len() const { return _dbg_name_len; }
73 Rm::Offset backing_offset() const { return _dbg_backing_offset; }
74#else
75 char const *name() const { return "N/A"; }
76 unsigned char name_len() const { return 3; }
77 Rm::Offset backing_offset() const { return 0; }
78#endif
79};
80
81template< typename DS, typename OPS >
82class Region_handler
83{
84private:
85 L4Re::Rm::Offset _offs;
86 DS _mem;
87 l4_cap_idx_t _client_cap = L4_INVALID_CAP;
89
90public:
91 typedef DS Dataspace;
92 typedef OPS Ops;
93 typedef typename OPS::Map_result Map_result;
94
95 Region_handler() noexcept : _offs(0), _mem(), _flags() {}
96 Region_handler(Dataspace const &mem, l4_cap_idx_t client_cap,
97 L4Re::Rm::Offset offset = 0,
99 : _offs(offset), _mem(mem), _client_cap(client_cap), _flags(flags)
100 {}
101
102 Dataspace const &memory() const noexcept
103 {
104 return _mem;
105 }
106
107 l4_cap_idx_t client_cap_idx() const noexcept
108 {
109 return _client_cap;
110 }
111
112 L4Re::Rm::Offset offset() const noexcept
113 {
114 return _offs;
115 }
116
117 constexpr bool is_ro() const noexcept
118 {
119 return !(_flags & L4Re::Rm::F::W);
120 }
121
122 L4Re::Rm::Region_flags caching() const noexcept
123 {
124 return _flags & L4Re::Rm::F::Caching_mask;
125 }
126
127 L4Re::Rm::Region_flags flags() const noexcept
128 {
129 return _flags;
130 }
131
132 Region_handler operator + (l4_int64_t offset) const noexcept
133 {
134 Region_handler n = *this; n._offs += offset; return n;
135 }
136
137 void free(l4_addr_t start, unsigned long size) const noexcept
138 {
139 Ops::free(this, start, size);
140 }
141
142 int map(l4_addr_t addr, Region const &r, bool writable,
143 Map_result *result) const
144 {
145 return Ops::map(this, addr, r, writable, result);
146 }
147
148 int map_info(l4_addr_t *start_addr, l4_addr_t *end_addr) const
149 {
150 return Ops::map_info(this, start_addr, end_addr);
151 }
152
153};
154
155
156template< typename Hdlr, template<typename T> class Alloc >
157class Region_map
158{
159protected:
161 Tree _rm;
162 Tree _am;
163
164private:
165 l4_addr_t _start;
166 l4_addr_t _end;
167
168protected:
169 void set_limits(l4_addr_t start, l4_addr_t end) noexcept
170 {
171 _start = start;
172 _end = end;
173 }
174
175public:
176 typedef typename Tree::Item_type Item;
177 typedef typename Tree::Node Node;
178 typedef typename Tree::Key_type Key_type;
179 typedef Hdlr Region_handler;
180
181 typedef typename Tree::Iterator Iterator;
182 typedef typename Tree::Const_iterator Const_iterator;
183 typedef typename Tree::Rev_iterator Rev_iterator;
184 typedef typename Tree::Const_rev_iterator Const_rev_iterator;
185
186 Iterator begin() noexcept { return _rm.begin(); }
187 Const_iterator begin() const noexcept { return _rm.begin(); }
188 Iterator end() noexcept { return _rm.end(); }
189 Const_iterator end() const noexcept { return _rm.end(); }
190
191 Iterator area_begin() noexcept { return _am.begin(); }
192 Const_iterator area_begin() const noexcept { return _am.begin(); }
193 Iterator area_end() noexcept { return _am.end(); }
194 Const_iterator area_end() const noexcept { return _am.end(); }
195 Node area_find(Key_type const &c) const noexcept { return _am.find_node(c); }
196
197 l4_addr_t min_addr() const noexcept { return _start; }
198 l4_addr_t max_addr() const noexcept { return _end; }
199
200
201 Region_map(l4_addr_t start, l4_addr_t end) noexcept : _start(start), _end(end) {}
202
203 Node find(Key_type const &key) const noexcept
204 {
205 Node n = _rm.find_node(key);
206 if (!n)
207 return Node();
208
209 // 'find' should find any region overlapping with the searched one, the
210 // caller should check for further requirements
211 if (0)
212 if (!n->first.contains(key))
213 return Node();
214
215 return n;
216 }
217
218 Node lower_bound(Key_type const &key) const noexcept
219 {
220 Node n = _rm.lower_bound_node(key);
221 return n;
222 }
223
224 Node lower_bound_area(Key_type const &key) const noexcept
225 {
226 Node n = _am.lower_bound_node(key);
227 return n;
228 }
229
230 l4_addr_t attach_area(l4_addr_t addr, unsigned long size,
231 L4Re::Rm::Flags flags = L4Re::Rm::Flags(0),
232 unsigned char align = L4_PAGESHIFT) noexcept
233 {
234 if (size < 2)
235 return L4_INVALID_ADDR;
236
237
238 Region c;
239
240 if (!(flags & L4Re::Rm::F::Search_addr))
241 {
242 c = Region(addr, addr + size - 1);
243 Node r = _am.find_node(c);
244 if (r)
245 return L4_INVALID_ADDR;
246 }
247
248 while (flags & L4Re::Rm::F::Search_addr)
249 {
250 if (addr < min_addr() || (addr + size - 1) > max_addr())
251 addr = min_addr();
252 addr = find_free(addr, max_addr(), size, align, flags);
253 if (addr == L4_INVALID_ADDR)
254 return L4_INVALID_ADDR;
255
256 c = Region(addr, addr + size - 1);
257 Node r = _am.find_node(c);
258 if (!r)
259 break;
260
261 if (r->first.end() >= max_addr())
262 return L4_INVALID_ADDR;
263
264 addr = r->first.end() + 1;
265 }
266
267 if (_am.insert(c, Hdlr(typename Hdlr::Dataspace(), 0, 0, flags.region_flags())).second == 0)
268 return addr;
269
270 return L4_INVALID_ADDR;
271 }
272
273 bool detach_area(l4_addr_t addr) noexcept
274 {
275 if (_am.remove(addr))
276 return false;
277
278 return true;
279 }
280
281 void *attach(void *addr, unsigned long size, Hdlr const &hdlr,
282 L4Re::Rm::Flags flags = L4Re::Rm::Flags(0),
283 unsigned char align = L4_PAGESHIFT,
284 char const *name = nullptr, unsigned name_len = 0,
285 L4Re::Rm::Offset backing_offset = 0) noexcept
286 {
287 if (size < 2)
288 return L4_INVALID_PTR;
289
290 l4_addr_t beg, end;
291 int err = hdlr.map_info(&beg, &end);
292 if (err > 0)
293 {
294 // Mapping address determined by underlying dataspace. Make sure we
295 // prevent any additional alignment. We already know the place!
296 beg += hdlr.offset();
297 end = beg + size - 1U;
298 align = L4_PAGESHIFT;
299
300 // In case of exact mappings, the supplied address must match because
301 // we cannot remap.
302 if (!(flags & L4Re::Rm::F::Search_addr)
303 && reinterpret_cast<l4_addr_t>(addr) != beg)
304 return L4_INVALID_PTR;
305
306 // When searching for a suitable address, the start must cover the
307 // dataspace beginning to "find" the right spot.
308 if ((flags & L4Re::Rm::F::Search_addr)
309 && reinterpret_cast<l4_addr_t>(addr) > beg)
310 return L4_INVALID_PTR;
311 }
312 else if (err == 0)
313 {
314 beg = reinterpret_cast<l4_addr_t>(addr);
315 end = max_addr();
316 }
317 else if (err < 0)
318 return L4_INVALID_PTR;
319
320 if (flags & L4Re::Rm::F::In_area)
321 {
322 Node r = _am.find_node(Region(beg, beg + size - 1));
323 if (!r || (r->second.flags() & L4Re::Rm::F::Reserved))
324 return L4_INVALID_PTR;
325
326 end = r->first.end();
327 }
328
329 if (flags & L4Re::Rm::F::Search_addr)
330 {
331 beg = find_free(beg, end, size, align, flags);
332 if (beg == L4_INVALID_ADDR)
333 return L4_INVALID_PTR;
334 }
335
337 && _am.find_node(Region(beg, beg + size - 1)))
338 return L4_INVALID_PTR;
339
340 if (beg < min_addr() || beg + size - 1 > end)
341 return L4_INVALID_PTR;
342
343 if (_rm.insert(Region(beg, beg + size - 1,
344 name, name_len, backing_offset), hdlr).second
345 == 0)
346 return reinterpret_cast<void*>(beg);
347
348 return L4_INVALID_PTR;
349 }
350
351 int detach(void *addr, unsigned long sz, unsigned flags,
352 Region *reg, Hdlr *hdlr) noexcept
353 {
354 l4_addr_t a = reinterpret_cast<l4_addr_t>(addr);
355 Region dr(a, a + sz - 1);
356 Region res(~0UL, 0);
357
358 Node r = find(dr);
359 if (!r)
360 return -L4_ENOENT;
361
362 Region g = r->first;
363 Hdlr const &h = r->second;
364
365 if (flags & L4Re::Rm::Detach_overlap || dr.contains(g))
366 {
367 // successful removal of the AVL tree item also frees the node
368 Hdlr h_copy = h;
369
370 if (_rm.remove(g))
371 return -L4_ENOENT;
372
373 if (!(flags & L4Re::Rm::Detach_keep) && (h_copy.flags() & L4Re::Rm::F::Detach_free))
374 h_copy.free(0, g.size());
375
376 if (hdlr)
377 *hdlr = h_copy;
378 if (reg)
379 *reg = g;
380
381 if (find(dr))
383 else
384 return Rm::Detached_ds;
385 }
386 else if (dr.start() <= g.start())
387 {
388 // move the start of a region
389
390 if (!(flags & L4Re::Rm::Detach_keep) && (h.flags() & L4Re::Rm::F::Detach_free))
391 h.free(0, dr.end() + 1 - g.start());
392
393 unsigned long sz = dr.end() + 1 - g.start();
394 Item &cn = const_cast<Item &>(*r);
395 cn.first = Region(dr.end() + 1, g.end());
396 cn.second = cn.second + sz;
397 if (hdlr)
398 *hdlr = Hdlr();
399 if (reg)
400 *reg = Region(g.start(), dr.end());
401 if (find(dr))
403 else
404 return Rm::Kept_ds;
405 }
406 else if (dr.end() >= g.end())
407 {
408 // move the end of a region
409
410 if (!(flags & L4Re::Rm::Detach_keep)
411 && (h.flags() & L4Re::Rm::F::Detach_free))
412 h.free(dr.start() - g.start(), g.end() + 1 - dr.start());
413
414 Item &cn = const_cast<Item &>(*r);
415 cn.first = Region(g.start(), dr.start() - 1);
416 if (hdlr)
417 *hdlr = Hdlr();
418 if (reg)
419 *reg = Region(dr.start(), g.end());
420
421 if (find(dr))
423 else
424 return Rm::Kept_ds;
425 }
426 else if (g.contains(dr))
427 {
428 // split a single region that contains the new region
429
430 if (!(flags & L4Re::Rm::Detach_keep) && (h.flags() & L4Re::Rm::F::Detach_free))
431 h.free(dr.start() - g.start(), dr.size());
432
433 // first move the end off the existing region before the new one
434 Item &cn = const_cast<Item &>(*r);
435 cn.first = Region(g.start(), dr.start()-1);
436
437 int err;
438
439 // insert a second region for the remaining tail of
440 // the old existing region
441 err = _rm.insert(Region(dr.end() + 1, g.end()),
442 h + (dr.end() + 1 - g.start())).second;
443
444 if (err)
445 return err;
446
447 if (hdlr)
448 *hdlr = h;
449 if (reg)
450 *reg = dr;
451 return Rm::Split_ds;
452 }
453 return -L4_ENOENT;
454 }
455
456 l4_addr_t find_free(l4_addr_t start, l4_addr_t end, l4_addr_t size,
457 unsigned char align, L4Re::Rm::Flags flags) const noexcept;
458
459};
460
461
462template< typename Hdlr, template<typename T> class Alloc >
464Region_map<Hdlr, Alloc>::find_free(l4_addr_t start, l4_addr_t end,
465 unsigned long size, unsigned char align, L4Re::Rm::Flags flags) const noexcept
466{
467 l4_addr_t addr = start;
468
469 if (addr == ~0UL || addr < min_addr() || addr >= end)
470 addr = min_addr();
471
472 addr = l4_round_size(addr, align);
473 Node r;
474
475 for(;;)
476 {
477 if (addr > 0 && addr - 1 > end - size)
478 return L4_INVALID_ADDR;
479
480 Region c(addr, addr + size - 1);
481 r = _rm.find_node(c);
482
483 if (!r)
484 {
485 if (!(flags & L4Re::Rm::F::In_area) && (r = _am.find_node(c)))
486 {
487 if (r->first.end() > end - size)
488 return L4_INVALID_ADDR;
489
490 addr = l4_round_size(r->first.end() + 1, align);
491 continue;
492 }
493 break;
494 }
495 else if (r->first.end() > end - size)
496 return L4_INVALID_ADDR;
497
498 addr = l4_round_size(r->first.end() + 1, align);
499 }
500
501 if (!r)
502 return addr;
503
504 return L4_INVALID_ADDR;
505}
506
507}}
AVL map.
@ Detached_ds
Detached data sapce.
Definition rm:91
@ Detach_again
Detached data space, more to do.
Definition rm:96
@ Split_ds
Splitted data space, and done.
Definition rm:93
@ Kept_ds
Kept data space.
Definition rm:92
@ Detach_overlap
Do an unmap of all overlapping regions.
Definition rm:240
@ Detach_keep
Do not free the detached data space, ignore the F::Detach_free.
Definition rm:249
Region Key_type
Type of the key values.
Definition avl_map:59
Base_type::Node Node
Return type for find.
Definition avl_map:63
ITEM_TYPE Item_type
Type for the items store in the set.
Definition avl_set:141
unsigned long l4_addr_t
Address type.
Definition l4int.h:34
signed long long l4_int64_t
Signed 64bit value.
Definition l4int.h:30
unsigned long l4_cap_idx_t
Capability selector type.
Definition types.h:335
@ L4_INVALID_CAP
Invalid capability selector.
Definition consts.h:153
@ L4_ENOENT
No such entity.
Definition err.h:34
#define L4_INVALID_PTR
Invalid address as pointer type.
Definition consts.h:512
#define L4_PAGESHIFT
Size of a page, log2-based.
Definition consts.h:26
l4_addr_t l4_round_size(l4_addr_t value, unsigned char bits) L4_NOTHROW
Round value up to the next alignment with bits size.
Definition consts.h:484
@ L4_INVALID_ADDR
Invalid address.
Definition consts.h:505
Common L4 ABI Data Types.
L4Re C++ Interfaces.
Definition cmd_control:14
Region mapper interface.
Region_flags
Region flags (permissions, cacheability, special).
Definition rm:129
@ Reserved
Region is reserved (blocked)
Definition rm:150
@ Detach_free
Free the portion of the data space after detach.
Definition rm:146
@ W
Writable region.
Definition rm:135
@ Caching_mask
Mask of all Rm cache bits.
Definition rm:154
@ Search_addr
Search for a suitable address range.
Definition rm:114
@ In_area
Search only in area, or map into area.
Definition rm:116