79 CXX_BITFIELD_MEMBER( 0, 0, need_csum, raw);
80 CXX_BITFIELD_MEMBER( 1, 1, data_valid, raw);
99 CXX_BITFIELD_MEMBER( 0, 0, csum,
raw);
100 CXX_BITFIELD_MEMBER( 1, 1, guest_csum,
raw);
101 CXX_BITFIELD_MEMBER( 5, 5, mac,
raw);
102 CXX_BITFIELD_MEMBER( 6, 6, gso,
raw);
103 CXX_BITFIELD_MEMBER( 7, 7, guest_tso4,
raw);
104 CXX_BITFIELD_MEMBER( 8, 8, guest_tso6,
raw);
105 CXX_BITFIELD_MEMBER( 9, 9, guest_ecn,
raw);
106 CXX_BITFIELD_MEMBER(10, 10, guest_ufo,
raw);
107 CXX_BITFIELD_MEMBER(11, 11, host_tso4,
raw);
108 CXX_BITFIELD_MEMBER(12, 12, host_tso6,
raw);
109 CXX_BITFIELD_MEMBER(13, 13, host_ecn,
raw);
110 CXX_BITFIELD_MEMBER(14, 14, host_ufo,
raw);
111 CXX_BITFIELD_MEMBER(15, 15, mrg_rxbuf,
raw);
112 CXX_BITFIELD_MEMBER(16, 16, status,
raw);
113 CXX_BITFIELD_MEMBER(17, 17, ctrl_vq,
raw);
114 CXX_BITFIELD_MEMBER(18, 18, ctrl_rx,
raw);
115 CXX_BITFIELD_MEMBER(19, 19, ctrl_vlan,
raw);
116 CXX_BITFIELD_MEMBER(20, 20, ctrl_rx_extra,
raw);
117 CXX_BITFIELD_MEMBER(21, 21, guest_announce,
raw);
118 CXX_BITFIELD_MEMBER(22, 22, mq,
raw);
119 CXX_BITFIELD_MEMBER(23, 23, ctrl_mac_addr,
raw);
128 struct Net_config_space
137 L4virtio::Svr::Dev_config_t<Net_config_space> _dev_config;
145 hf.ring_indirect_desc() =
true;
146 hf.mrg_rxbuf() =
true;
151 hf.host_tso4() =
true;
152 hf.host_tso6() =
true;
153 hf.host_ufo() =
true;
154 hf.host_ecn() =
true;
156 hf.guest_csum() =
true;
157 hf.guest_tso4() =
true;
158 hf.guest_tso6() =
true;
159 hf.guest_ufo() =
true;
160 hf.guest_ecn() =
true;
163 _dev_config.host_features(0) = hf.raw;
165 _dev_config.reset_hdr();
178 _dev_config.reset_hdr();
181 template<
typename T,
unsigned N >
182 static unsigned array_length(T (&)[N]) {
return N; }
186 Dbg(Dbg::Virtio, Dbg::Info,
"Virtio")
187 .printf(
"(%p): Reconfigure queue %d (%p): Status: %02x\n",
188 this, index, _q + index, _dev_config.status().raw);
190 if (index >= array_length(_q))
199 void dump_features(Dbg
const &dbg,
const volatile l4_uint32_t *p)
201 dbg.cprintf(
"%08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
202 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[17]);
207 Dbg info(Dbg::Virtio, Dbg::Info,
"Virtio");
208 if (!info.is_active())
211 auto *hdr = _dev_config.hdr();
213 info.printf(
"Device %p running (%02x)\n\thost features: ",
214 this, _dev_config.status().raw);
215 dump_features(info, hdr->dev_features_map);
216 info.printf(
"\tguest features: ");
217 dump_features(info, hdr->driver_features_map);
222 _negotiated_features = _dev_config.negotiated_features(0);
226 bool device_needs_reset()
const
227 {
return _dev_config.status().device_needs_reset(); }
236 Err().printf(
"failed to start queues\n");
253 L4Re::chkcap(server_iface()->
template rcv_cap<L4::Irq>(0)));
260 _kick_guest_irq->trigger();
273 Virtqueue *q =
static_cast<Virtqueue*
>(queue);
277 _kick_guest_irq->trigger();
281 void kick_emit_and_enable()
283 bool kick_pending =
false;
286 kick_pending |= q.kick_enable_get_pending();
291 _kick_guest_irq->trigger();
295 void kick_disable_and_remember()
298 q.kick_disable_and_remember();
301 Features negotiated_features()
const
302 {
return _negotiated_features; }
305 Virtqueue *
tx_q() {
return &_q[Tx]; }
307 Virtqueue *
rx_q() {
return &_q[Rx]; }
309 Virtqueue
const *
tx_q()
const {
return &_q[Tx]; }
311 Virtqueue
const *
rx_q()
const {
return &_q[Rx]; }
314 Features _negotiated_features;