22#include "vcon_stream.h"
25#include <l4/bid_config.h>
30#include <l4/cxx/hlist>
32#include <l4/cxx/std_alloc>
34#include <l4/l4re_vfs/backend>
44static int debug_mmap = 1;
45#define DEBUG_LOG(level, dbg...) do { if (level) dbg } while (0)
47#define DEBUG_LOG(level, dbg...) do { } while (0)
55#define USE_BIG_ANON_DS
63class Fd_store :
public L4Re::Core::Fd_store
71class Std_stream : public
L4Re::Core::Vcon_stream
77Fd_store::Fd_store() noexcept
81 static char m[
sizeof(Std_stream)] __attribute__((aligned(
sizeof(
long))));
85 set(0, cxx::ref_ptr(s));
86 set(1, cxx::ref_ptr(s));
87 set(2, cxx::ref_ptr(s));
90class Root_mount_tree :
public L4Re::Vfs::Mount_tree
93 Root_mount_tree() :
L4Re::Vfs::Mount_tree(0) {}
94 void operator delete (
void *) {}
104 : _early_oom(true), _root_mount(), _root(
L4Re::Env::env())
106 _root_mount.add_ref();
108 _root_mount.mount(cxx::ref_ptr(&_root));
109 _cwd = cxx::ref_ptr(&_root);
112 Ref_ptr<L4Re::Vfs::File> rom;
113 _root.openat(
"rom", 0, 0, &rom);
115 _root_mount.create_tree(
"lib/foo", rom);
117 _root.openat(
"lib", 0, 0, &_cwd);
122 int alloc_fd(Ref_ptr<L4Re::Vfs::File>
const &f)
noexcept override;
123 Ref_ptr<L4Re::Vfs::File> free_fd(
int fd)
noexcept override;
124 Ref_ptr<L4Re::Vfs::File> get_root() noexcept override;
125 Ref_ptr<
L4Re::Vfs::File> get_cwd() noexcept override;
126 void set_cwd(Ref_ptr<
L4Re::Vfs::File> const &dir) noexcept override;
127 Ref_ptr<
L4Re::Vfs::File> get_file(
int fd) noexcept override;
128 cxx::Pair<Ref_ptr<
L4Re::Vfs::File>,
int>
129 set_fd(
int fd, Ref_ptr<
L4Re::Vfs::File> const &f = Ref_ptr<>::Nil) noexcept
132 int mmap2(
void *start,
size_t len,
int prot,
int flags,
int fd,
133 off_t offset,
void **ptr) noexcept override;
135 int munmap(
void *start,
size_t len) noexcept override;
136 int mremap(
void *old,
size_t old_sz,
size_t new_sz,
int flags,
137 void **new_addr) noexcept override;
138 int mprotect(const
void *a,
size_t sz,
int prot) noexcept override;
139 int msync(
void *addr,
size_t len,
int flags) noexcept override;
140 int madvise(
void *addr,
size_t len,
int advice) noexcept override;
142 int register_file_system(
L4Re::Vfs::File_system *f) noexcept override;
143 int unregister_file_system(
L4Re::Vfs::File_system *f) noexcept override;
144 L4Re::Vfs::File_system *get_file_system(
char const *fstype) noexcept override;
145 L4Re::Vfs::File_system_list file_system_list() noexcept override;
147 int register_file_factory(
cxx::Ref_ptr<
L4Re::Vfs::File_factory> f) noexcept override;
148 int unregister_file_factory(
cxx::Ref_ptr<
L4Re::Vfs::File_factory> f) noexcept override;
149 Ref_ptr<
L4Re::Vfs::File_factory> get_file_factory(
int proto) noexcept override;
150 Ref_ptr<
L4Re::Vfs::File_factory> get_file_factory(
char const *proto_name) noexcept override;
151 int mount(
char const *path,
cxx::Ref_ptr<
L4Re::Vfs::File> const &dir) noexcept override;
153 void operator delete (
void *) {}
155 void *malloc(
size_t size)
noexcept override {
return Vfs_config::malloc(size); }
156 void free(
void *m)
noexcept override { Vfs_config::free(m); }
159 Root_mount_tree _root_mount;
160 L4Re::Core::Env_dir _root;
161 Ref_ptr<L4Re::Vfs::File> _cwd;
172 File_factory_item() =
default;
173 File_factory_item(File_factory_item
const &) =
delete;
174 File_factory_item &operator = (File_factory_item
const &) =
delete;
186 void align_mmap_start_and_length(
void **start,
size_t *length);
187 int munmap_regions(
void *start,
size_t len);
192static inline bool strequal(
char const *a,
char const *b)
194 for (;*a && *a == *b; ++a, ++b)
207 for (File_system *c = _fs_registry; c; c = c->next())
208 if (strequal(c->type(), f->type()))
211 f->next(_fs_registry);
225 File_system **p = &_fs_registry;
227 for (; *p; p = &(*p)->next())
239Vfs::find_fs_from_type(
char const *fstype)
noexcept
241 L4Re::Vfs::File_system_list fsl(_fs_registry);
242 for (L4Re::Vfs::File_system_list::Iterator c = fsl.begin();
244 if (strequal(c->type(), fstype))
249L4Re::Vfs::File_system_list
250Vfs::file_system_list() noexcept
252 return L4Re::Vfs::File_system_list(_fs_registry);
256Vfs::get_file_system(
char const *fstype)
noexcept
259 if ((fs = find_fs_from_type(fstype)))
263 int res = Vfs_config::load_module(fstype);
268 return find_fs_from_type(fstype);
277 void *x = this->malloc(
sizeof(File_factory_item));
282 _file_factories.push_front(ff);
289 for (
auto p: _file_factories)
293 _file_factories.remove(p);
294 p->~File_factory_item();
302Ref_ptr<L4Re::Vfs::File_factory>
303Vfs::get_file_factory(
int proto)
noexcept
305 for (
auto p: _file_factories)
306 if (p->f->proto() == proto)
309 return Ref_ptr<L4Re::Vfs::File_factory>();
312Ref_ptr<L4Re::Vfs::File_factory>
313Vfs::get_file_factory(
char const *proto_name)
noexcept
315 for (
auto p: _file_factories)
317 auto n = p->f->proto_name();
321 char const *b = proto_name;
322 for (; *a && *b && *a == *b; ++a, ++b)
325 if ((*a == 0) && (*b == 0))
330 return Ref_ptr<L4Re::Vfs::File_factory>();
334Vfs::alloc_fd(Ref_ptr<L4Re::Vfs::File>
const &f)
noexcept
336 int fd = fds.alloc();
346Ref_ptr<L4Re::Vfs::File>
347Vfs::free_fd(
int fd)
noexcept
349 Ref_ptr<L4Re::Vfs::File> f = fds.get(fd);
352 return Ref_ptr<>::Nil;
359Ref_ptr<L4Re::Vfs::File>
360Vfs::get_root() noexcept
362 return cxx::ref_ptr(&_root);
365Ref_ptr<L4Re::Vfs::File>
366Vfs::get_cwd() noexcept
372Vfs::set_cwd(Ref_ptr<L4Re::Vfs::File>
const &dir)
noexcept
379Ref_ptr<L4Re::Vfs::File>
380Vfs::get_file(
int fd)
noexcept
386Vfs::set_fd(
int fd, Ref_ptr<L4Re::Vfs::File>
const &f)
noexcept
388 if (!fds.check_fd(fd))
389 return cxx::pair(Ref_ptr<L4Re::Vfs::File>(Ref_ptr<>::Nil), EBADF);
391 Ref_ptr<L4Re::Vfs::File> old = fds.get(fd);
393 return cxx::pair(old, 0);
397#define GET_FILE_DBG(fd, err) \
398 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
404#define GET_FILE(fd, err) \
405 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
410Vfs::align_mmap_start_and_length(
void **start,
size_t *length)
420Vfs::munmap_regions(
void *start,
size_t len)
423 using namespace L4Re;
432 align_mmap_start_and_length(&start, &len);
436 DEBUG_LOG(debug_mmap, {
443 err = r->detach(
l4_addr_t(start), len, &ds, This_task);
447 switch (err & Rm::Detach_result_mask)
451 L4Re::virt_cap_alloc->take(ds);
453 case Rm::Detached_ds:
455 L4Re::virt_cap_alloc->release(ds);
461 if (!(err & Rm::Detach_again))
467Vfs::munmap(
void *start,
size_t len)
L4_NOTHROW
470 using namespace L4Re;
478 bool matches_area =
false;
484 area_cnt = r->get_areas((
l4_addr_t) start, &area_array);
492 size_t area_size = area_array[0].
end - area_array[0].
start + 1;
495 if (area_array[0].start == (
l4_addr_t) start && area_size == len)
505 err = munmap_regions(start, len);
506 if (err == -ENOENT && matches_area)
515 *ds = L4Re::make_shared_cap<L4Re::Dataspace>(L4Re::virt_cap_alloc);
521 if ((err = Vfs_config::allocator()->alloc(size, ds->get())) < 0)
524 DEBUG_LOG(debug_mmap, {
539#if !defined(CONFIG_MMU)
544 ANON_MEM_DS_POOL_SIZE = 256UL << 10,
545 ANON_MEM_MAX_SIZE = 32UL << 10,
547#elif defined(USE_BIG_ANON_DS)
550 ANON_MEM_DS_POOL_SIZE = 256UL << 20,
551 ANON_MEM_MAX_SIZE = 32UL << 20,
556 ANON_MEM_DS_POOL_SIZE = 256UL << 20,
557 ANON_MEM_MAX_SIZE = 0UL << 20,
561 if (size >= ANON_MEM_MAX_SIZE)
564 if ((err = alloc_ds(size, ds)) < 0)
572 return (*ds)->allocate(0, size);
575 if (!_anon_ds.is_valid() || _anon_offset + size >= ANON_MEM_DS_POOL_SIZE)
578 if ((err = alloc_ds(ANON_MEM_DS_POOL_SIZE, ds)) < 0)
589 if (
int err = (*ds)->allocate(_anon_offset, size))
593 *offset = _anon_offset;
594 _anon_offset += size;
599Vfs::mmap2(
void *start,
size_t len,
int prot,
int flags,
int fd, off_t page4k_offset,
602 DEBUG_LOG(debug_mmap, {
617 using namespace L4Re;
620 if (flags & MAP_FIXED)
624 align_mmap_start_and_length(&start, &len);
629 if ((flags & 0x1000000) || (prot == PROT_NONE))
638 *resptr =
reinterpret_cast<void*
>(area);
640 DEBUG_LOG(debug_mmap, {
653 L4Re::Rm::Flags rm_flags(0);
655 if (flags & (MAP_ANONYMOUS | MAP_PRIVATE))
659 int err = alloc_anon_mem(len, &ds, &anon_offset);
663 DEBUG_LOG(debug_mmap, {
672 if (!(flags & MAP_ANONYMOUS))
674 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd);
686 if (flags & MAP_PRIVATE)
688 DEBUG_LOG(debug_mmap,
outstring(
"COW\n"););
689 int err = ds->copy_in(anon_offset, fds, offset, len);
695 err = r->attach(&src, len,
701 err = r->attach(&dst, len,
703 ds.get(), anon_offset);
707 memcpy(dst.
get(), src.
get(), len);
712 offset = anon_offset;
716 L4Re::virt_cap_alloc->take(fds);
721 offset = anon_offset;
724 if (!(flags & MAP_FIXED) && start == 0)
727 char *data =
static_cast<char *
>(start);
732 if (flags & MAP_FIXED)
736 err = r->reserve_area(&overmap_area, len);
740 rm_flags |= Rm::F::In_area;
745 err = munmap_regions(start, len);
746 if (err && err != -ENOENT)
750 if (!(flags & MAP_FIXED))
751 rm_flags |= Rm::F::Search_addr;
752 if (prot & PROT_READ)
753 rm_flags |= Rm::F::R;
754 if (prot & PROT_WRITE)
755 rm_flags |= Rm::F::W;
756 if (prot & PROT_EXEC)
757 rm_flags |= Rm::F::X;
759 err = r->attach(&data, len, rm_flags,
765 DEBUG_LOG(debug_mmap, {
781 r->free_area(overmap_area);
809 int e = r->reserve_area(&a, sz, flags);
826 ~Auto_area() { free(); }
831Vfs::mremap(
void *old_addr,
size_t old_size,
size_t new_size,
int flags,
834 using namespace L4Re;
836 DEBUG_LOG(debug_mmap, {
846 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
850 if (oa !=
reinterpret_cast<l4_addr_t>(old_addr))
853 bool const fixed = flags & MREMAP_FIXED;
854 bool const maymove = flags & MREMAP_MAYMOVE;
864 if (new_size < old_size)
866 *new_addr = old_addr;
867 return munmap(
reinterpret_cast<void*
>(oa + new_size),
868 old_size - new_size);
871 if (new_size == old_size)
873 *new_addr = old_addr;
878 Auto_area old_area(r);
879 int err = old_area.reserve(oa, old_size, L4Re::Rm::Flags(0));
884 Auto_area new_area(r);
888 if (na !=
reinterpret_cast<l4_addr_t>(*new_addr))
892 int err = new_area.reserve(na, new_size, L4Re::Rm::Flags(0));
902 unsigned long ts = new_size - old_size;
904 long err = new_area.reserve(ta, ts, L4Re::Rm::Flags(0));
908 L4Re::Rm::Offset toffs;
909 L4Re::Rm::Flags tflags;
912 err = r->find(&ta, &ts, &toffs, &tflags, &tds);
915 if (err == -ENOENT || (err == 0 && (tflags & Rm::F::In_area)))
918 pad_addr = oa + old_size;
919 *new_addr = old_addr;
926 err = new_area.reserve(0, new_size, Rm::F::Search_addr);
930 pad_addr = new_area.a + old_size;
931 *new_addr =
reinterpret_cast<void *
>(new_area.a);
935 if (old_area.is_valid())
937 unsigned long size = old_size;
945 while (r->find(&a, &s, &o, &f, &ds) >= 0 && !(f & Rm::F::In_area))
949 auto d = old_area.a - a;
955 if (a + s > old_area.a + old_size)
956 s = old_area.a + old_size - a;
958 l4_addr_t x = a - old_area.a + new_area.a;
960 int err = r->attach(&x, s, Rm::F::In_area | f,
966 L4Re::virt_cap_alloc->take(ds);
968 err = r->detach(a, s, &ds, This_task,
969 Rm::Detach_exact | Rm::Detach_keep);
973 switch (err & Rm::Detach_result_mask)
978 L4Re::virt_cap_alloc->take(ds);
980 case Rm::Detached_ds:
982 L4Re::virt_cap_alloc->release(ds);
998 if (old_size < new_size)
1000 l4_addr_t const pad_sz = new_size - old_size;
1003 int err = alloc_anon_mem(pad_sz, &tds, &toffs);
1009 err = r->attach(&pad_addr, pad_sz,
1010 Rm::F::In_area | Rm::F::Detach_free | Rm::F::RWX,
1023Vfs::mprotect(
const void * ,
size_t ,
int prot)
L4_NOTHROW
1025 return (prot & PROT_WRITE) ? -1 : 0;
1039extern void *l4re_env_posix_vfs_ops __attribute__((alias(
"__rtld_l4re_env_posix_vfs_ops"), visibility(
"default")));
1042 class Real_mount_tree :
public L4Re::Vfs::Mount_tree
1045 explicit Real_mount_tree(
char *n) : Mount_tree(n) {}
1047 void *
operator new (
size_t size)
1048 {
return __rtld_l4re_env_posix_vfs_ops->malloc(size); }
1050 void operator delete (
void *mem)
1051 { __rtld_l4re_env_posix_vfs_ops->free(mem); }
1060 using L4Re::Vfs::Mount_tree;
1061 using L4Re::Vfs::Path;
1068 Path p = root->lookup(Path(path), &base);
1072 Path f = p.strip_first();
1077 char *name = __rtld_l4re_env_posix_vfs_ops->strndup(f.path(), f.length());
1081 auto nt = cxx::make_ref_obj<Real_mount_tree>(name);
1084 __rtld_l4re_env_posix_vfs_ops->free(name);
1088 base->add_child_node(nt);
static Env const * env() noexcept
Returns the initial environment for the current task.
L4::Cap< Log > log() const noexcept
Object-capability to the logging service.
T get() const noexcept
Return the address.
Basic interface for an L4Re::Vfs file system.
The basic interface for an open POSIX file.
Interface for the POSIX backends of an application.
bool is_valid() const noexcept
Test whether the capability is a valid capability index (i.e., not L4_INVALID_CAP).
C++ interface for capabilities.
Basic element type for a double-linked H_list.
Helper type to distinguish the oeprator new version that does not throw exceptions.
A reference-counting pointer with automatic cleanup.
unsigned int l4_size_t
Unsigned size type.
unsigned long l4_umword_t
Unsigned machine word.
unsigned long l4_addr_t
Address type.
@ L4_EINVAL
Invalid argument.
@ L4_CAP_FPAGE_RO
Read right for capability flex-pages.
@ L4_CAP_FPAGE_RW
Read and interface specific 'W' right for capability flex-pages.
l4_addr_t l4_trunc_page(l4_addr_t address) L4_NOTHROW
Round an address down to the next lower page boundary.
l4_addr_t l4_round_page(l4_addr_t address) L4_NOTHROW
Round address up to the next page.
#define L4_PAGESIZE
Minimal page size (in bytes).
@ L4_INVALID_ADDR
Invalid address.
#define L4_NOTHROW
Mark a function declaration and definition as never throwing an exception.
Functionality for invoking the kernel debugger.
void outhex32(l4_uint32_t number)
Output a 32-bit unsigned hexadecimal number via the kernel debugger.
void outstring(char const *text)
Output a string via the kernel debugger.
void outdec(l4_mword_t number)
Output a decimal unsigned machine word via the kernel debugger.
L4::Detail::Shared_cap_impl< T, Smart_count_cap< L4_FP_ALL_SPACES > > Shared_cap
Shared capability that implements automatic free and unmap of the capability selector.
Cap< T > make_cap(L4::Cap< T > cap, unsigned rights) noexcept
Make an L4::Ipc::Cap<T> for the given capability and rights.
Cap< T > make_cap_rw(L4::Cap< T > cap) noexcept
Make an L4::Ipc::Cap<T> for the given capability with L4_CAP_FPAGE_RW rights.
L4 low-level kernel interface.
Shared_cap / Shared_del_cap.
@ RW
Readable and writable region.
@ Detach_free
Free the portion of the data space after detach.
@ Search_addr
Search for a suitable address range.
A range of virtual addresses.
l4_addr_t start
First address of the range.
l4_addr_t end
Last address of the range.
Double-linked list of typed H_list_item_t elements.
Low-level assert implementation.
#define l4_assert(expr)
Low-level assert.