10#include <l4/cxx/unique_ptr>
15#include <l4/l4virtio/virtio.h>
16#include <l4/l4virtio/virtio_block.h>
17#include <l4/l4virtio/server/l4virtio>
18#include <l4/sys/cxx/ipc_epiface>
22template <
typename Ds_data>
class Block_dev_base;
27template<
typename Ds_data>
43 Data_block() =
default;
68 rp.
start(_mem_list, _request, &data);
70 unsigned total = data.len;
76 rp.
next(_mem_list, &data);
86 if (total < Header_size + 1)
89 return total - Header_size - 1;
98 while (_data.len == 0 && _rp.
has_more())
99 _rp.
next(_mem_list, &_data);
102 return (_data.len > 1 || _rp.
has_more());
121 "No more data blocks in virtio request");
123 if (_todo_blocks == 0)
127 _rp.
next(_mem_list, &_data);
130 if (_data.len > _max_block_size)
139 _data.addr =
static_cast<char *
>(_data.addr) + out.len;
154 : _mem_list(mem_list),
156 _todo_blocks(max_blocks),
157 _max_block_size(max_block_size)
160 _rp.
start(mem_list, _request, &_data);
163 if (_data.len < Header_size)
168 _data.addr =
static_cast<char *
>(_data.addr) + Header_size;
169 _data.len -= Header_size;
172 if (!_rp.
has_more() && _data.len == 0)
183 while (_rp.
next(_mem_list, &_data) && _todo_blocks > 0)
186 if (_todo_blocks > 0 && _data.len > 0)
187 *(
static_cast<l4_uint8_t *
>(_data.addr) + _data.len - 1) = status;
191 else if (_data.len > 0)
192 *(
static_cast<l4_uint8_t *
>(_data.addr)) = status;
197 queue->consumed(_request, sz);
207 Driver_mem_list_t<Ds_data> *_mem_list;
211 Request_processor _rp;
216 Virtqueue::Request _request;
218 unsigned _todo_blocks;
223struct Block_features :
public Dev_config::Features
225 Block_features() =
default;
229 CXX_BITFIELD_MEMBER( 1, 1, size_max,
raw);
231 CXX_BITFIELD_MEMBER( 2, 2, seg_max,
raw);
233 CXX_BITFIELD_MEMBER( 4, 4, geometry,
raw);
235 CXX_BITFIELD_MEMBER( 5, 5, ro,
raw);
237 CXX_BITFIELD_MEMBER( 6, 6, blk_size,
raw);
239 CXX_BITFIELD_MEMBER( 9, 9, flush,
raw);
241 CXX_BITFIELD_MEMBER(10, 10, topology,
raw);
243 CXX_BITFIELD_MEMBER(11, 11, config_wce,
raw);
245 CXX_BITFIELD_MEMBER(13, 13, discard,
raw);
247 CXX_BITFIELD_MEMBER(14, 14, write_zeroes,
raw);
256template <
typename Ds_data>
264 Dev_config_t<l4virtio_block_config_t> _dev_config;
270 Block_features negotiated_features()
const
271 {
return _dev_config.negotiated_features(0); }
273 Block_features device_features()
const
274 {
return _dev_config.host_features(0); }
276 void set_device_features(Block_features df)
277 { _dev_config.host_features(0) = df.raw; }
290 _dev_config.priv_config()->size_max = sz;
291 Block_features df = device_features();
292 df.size_max() =
true;
293 set_device_features(df);
295 _max_block_size = sz;
304 _dev_config.priv_config()->seg_max = sz;
305 Block_features df = device_features();
307 set_device_features(df);
316 pc->geometry.cylinders = cylinders;
317 pc->geometry.heads = heads;
318 pc->geometry.sectors = sectors;
319 Block_features df = device_features();
320 df.geometry() =
true;
321 set_device_features(df);
332 _dev_config.priv_config()->blk_size = sz;
333 Block_features df = device_features();
334 df.blk_size() =
true;
335 set_device_features(df);
352 pc->topology.physical_block_exp = physical_block_exp;
353 pc->topology.alignment_offset = alignment_offset;
354 pc->topology.min_io_size = min_io_size;
355 pc->topology.opt_io_size = opt_io_size;
356 Block_features df = device_features();
357 df.topology() =
true;
358 set_device_features(df);
364 Block_features df = device_features();
366 set_device_features(df);
376 pc->writeback = writeback;
377 Block_features df = device_features();
378 df.config_wce() =
true;
379 set_device_features(df);
389 return pc->writeback;
404 pc->max_discard_sectors = max_discard_sectors;
405 pc->max_discard_seg = max_discard_seg;
406 pc->discard_sector_alignment = discard_sector_alignment;
407 Block_features df = device_features();
409 set_device_features(df);
425 pc->max_write_zeroes_sectors = max_write_zeroes_sectors;
426 pc->max_write_zeroes_seg = max_write_zeroes_seg;
427 pc->write_zeroes_may_unmap = write_zeroes_may_unmap;
428 Block_features df = device_features();
429 df.write_zeroes() =
true;
430 set_device_features(df);
450 Block_features df(0);
451 df.ring_indirect_desc() =
true;
453 set_device_features(df);
457 _dev_config.priv_config()->capacity = capacity;
484 if (_dev_config.status().fail_state() || !_queue.
ready())
487 if (req->release_request(&_queue, status, sz) < 0)
494 _kick_guest_irq->trigger();
501 if (idx == 0 && this->
setup_queue(&_queue, 0, _vq_max))
510 _dev_config.reset_queue(0, _vq_max);
511 _dev_config.reset_hdr();
516 bool check_for_new_requests()
521 if (_dev_config.status().fail_state())
530 cxx::unique_ptr<Request> req;
535 if (_dev_config.status().fail_state())
544 cxx::unique_ptr<Request> cur{
545 new Request(r, &(this->
_mem_info), _vq_max, _max_block_size)};
547 req = cxx::move(cur);
559 void register_single_driver_irq()
override
562 L4Re::chkcap(this->server_iface()->
template rcv_cap<L4::Irq>(0)));
567 void trigger_driver_config_irq()
override
570 _kick_guest_irq->trigger();
573 bool check_queues()
override
585template <
typename Ds_data>
587: Block_dev_base<Ds_data>,
594 Irq_object(Block_dev<Ds_data> *parent) : _parent(parent) {}
602 Block_dev<Ds_data> *_parent;
604 Irq_object _irq_handler;
608 {
return &_irq_handler; }
613 :
Block_dev_base<Ds_data>(vendor, queue_size, capacity, read_only),
628 char const *service = 0)
649 typedef Block_request<Ds_data> Request;
664 virtual bool process_request(cxx::unique_ptr<Request> &&req) = 0;
679 if (!this->process_request(cxx::move(req)))
687 return L4::cap_cast<L4::Irq>(_irq_handler.obj_cap());
C++ interface for capabilities.
Interface for server-loop related functions.
Abstract interface for object registries.
virtual L4::Cap< L4::Irq > register_irq_obj(L4::Epiface *o)=0
Register o as server-side object for asynchronous IRQs.
virtual L4::Cap< void > register_obj(L4::Epiface *o, char const *service)=0
Register an L4::Epiface for an IPC gate available in the applications environment under the name serv...
Exception for an abstract runtime error.
Base class for virtio block devices.
void set_write_zeroes(l4_uint32_t max_write_zeroes_sectors, l4_uint32_t max_write_zeroes_seg, l4_uint8_t write_zeroes_may_unmap)
Sets constraints for and enables the write zeroes command.
virtual bool queue_stopped()=0
Return true, if the queues should not be processed further.
l4_uint8_t get_writeback()
Get the writeback field from the configuration space.
cxx::unique_ptr< Request > get_request()
Return one request if available.
void set_blk_size(l4_uint32_t sz)
Sets block disk size to be reported to the client.
void set_config_wce(l4_uint8_t writeback)
Sets cache mode and enables the writeback toggle.
Block_dev_base(l4_uint32_t vendor, unsigned queue_size, l4_uint64_t capacity, bool read_only)
Create a new virtio block device.
void set_flush()
Enables the flush command.
void set_size_max(l4_uint32_t sz)
Sets the maximum size of any single segment reported to client.
virtual void reset_device()=0
Reset the actual hardware device.
void set_discard(l4_uint32_t max_discard_sectors, l4_uint32_t max_discard_seg, l4_uint32_t discard_sector_alignment)
Sets constraints for and enables the discard command.
void reset() override
reset callback, called for doing a device reset
void set_geometry(l4_uint16_t cylinders, l4_uint8_t heads, l4_uint8_t sectors)
Set disk geometry that is reported to the client.
void set_seg_max(l4_uint32_t sz)
Sets the maximum number of segments in a request that is reported to client.
void set_topology(l4_uint8_t physical_block_exp, l4_uint8_t alignment_offset, l4_uint32_t min_io_size, l4_uint32_t opt_io_size)
Sets the I/O alignment information reported back to the client.
void finalize_request(cxx::unique_ptr< Request > req, unsigned sz, l4_uint8_t status=L4VIRTIO_BLOCK_S_OK)
Releases resources related to a request and notifies the client.
int reconfig_queue(unsigned idx) override
callback for client queue-config request
A request to read or write data.
unsigned data_size() const
Compute the total size of the data in the request.
l4virtio_block_header_t const & header() const
Return the block request header.
bool has_more()
Check if the request contains more data blocks.
Data_block next_block()
Return next block in scatter-gather list.
Server-side L4-VIRTIO device stub.
void device_error()
Transition device into DEVICE_NEEDS_RESET state.
Mem_list _mem_info
Memory region list.
bool setup_queue(Virtqueue *q, unsigned qn, unsigned num_max)
Enable/disable the specified queue.
virtual L4::Cap< L4::Irq > device_notify_irq(unsigned idx)
Callback to gather the device notification IRQ (multi IRQ).
void reset_queue_config(unsigned idx, unsigned num_max, bool inc_generation=false)
Trigger reset for the configuration space for queue idx.
List of driver memory regions assigned to a single L4-VIRTIO transport instance.
Region of driver memory, that shall be managed locally.
T * local(Ptr< T > p) const
Get the local address for driver address p.
Encapsulate the state for processing a VIRTIO request.
bool next(DESC_MAN *dm, ARGS... args)
Switch to the next descriptor in a descriptor chain.
bool has_more() const
Are there more chained descriptors?
void start(DESC_MAN *dm, Virtqueue *ring, Virtqueue::Head_desc const &request, ARGS... args)
Start processing a new request.
Virtqueue implementation for the device.
bool desc_avail() const
Test for available descriptors.
Request next_avail()
Get the next available descriptor from the available ring.
Descriptor in the descriptor table.
l4_uint32_t len
Length of described buffer.
Ptr< void > addr
Address stored in descriptor.
void disable()
Completely disable the queue.
bool no_notify_guest() const
Get the no IRQ flag of this queue.
bool ready() const
Test if this queue is in working state.
unsigned char l4_uint8_t
Unsigned 8bit value.
unsigned int l4_uint32_t
Unsigned 32bit value.
unsigned short int l4_uint16_t
Unsigned 16bit value.
unsigned long long l4_uint64_t
Unsigned 64bit value.
@ L4_EEXIST
Already exists.
@ L4_EINVAL
Invalid argument.
@ L4VIRTIO_BLOCK_S_OK
Request finished successfully.
@ L4VIRTIO_FEATURE_VERSION_1
Virtio protocol version 1 supported. Must be 1 for L4virtio.
@ L4VIRTIO_ID_BLOCK
General block device.
@ L4VIRTIO_IRQ_STATUS_VRING
VRING IRQ pending flag.
@ L4VIRTIO_IRQ_STATUS_CONFIG
CONFIG IRQ pending flag.
L4::Detail::Unique_cap_impl< T, Smart_cap_auto< L4_FP_ALL_SPACES > > Unique_cap
Unique capability that implements automatic free and unmap of the capability selector.
long chksys(long err, char const *extra="", long ret=0)
Generate C++ exception on error.
T chkcap(T &&cap, char const *extra="", long err=-L4_ENOMEM)
Check for valid capability or raise C++ exception.
L4-VIRTIO Transport C++ API.
Epiface implementation for Kobject-based interface implementations.
Base class for interface implementations.
Server_iface * server_iface() const
Get pointer to server interface at which the object is currently registered.
Epiface implementation for interrupt handlers.
Exception used by Queue to indicate descriptor errors.
@ Bad_size
Invalid size of memory block.
l4_uint32_t raw
The raw value of the features bitmap.
Device configuration for block devices.
Unique_cap / Unique_del_cap.