Page MenuHomeFreeBSD

No OneTemporary

Size
308 KB
Referenced Files
None
Subscribers
None
This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/sys/dev/virtio/balloon/virtio_balloon.c b/sys/dev/virtio/balloon/virtio_balloon.c
index 0973528887c5..848dd4e9a7f5 100644
--- a/sys/dev/virtio/balloon/virtio_balloon.c
+++ b/sys/dev/virtio/balloon/virtio_balloon.c
@@ -1,581 +1,598 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Driver for VirtIO memory balloon devices. */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/endian.h>
#include <sys/kthread.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/sglist.h>
#include <sys/sysctl.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/queue.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/virtio/virtio.h>
#include <dev/virtio/virtqueue.h>
#include <dev/virtio/balloon/virtio_balloon.h>
#include "virtio_if.h"
struct vtballoon_softc {
device_t vtballoon_dev;
struct mtx vtballoon_mtx;
uint64_t vtballoon_features;
uint32_t vtballoon_flags;
#define VTBALLOON_FLAG_DETACH 0x01
struct virtqueue *vtballoon_inflate_vq;
struct virtqueue *vtballoon_deflate_vq;
uint32_t vtballoon_desired_npages;
uint32_t vtballoon_current_npages;
TAILQ_HEAD(,vm_page) vtballoon_pages;
struct thread *vtballoon_td;
uint32_t *vtballoon_page_frames;
int vtballoon_timeout;
};
static struct virtio_feature_desc vtballoon_feature_desc[] = {
{ VIRTIO_BALLOON_F_MUST_TELL_HOST, "MustTellHost" },
{ VIRTIO_BALLOON_F_STATS_VQ, "StatsVq" },
{ VIRTIO_BALLOON_F_DEFLATE_ON_OOM, "DeflateOnOOM" },
{ 0, NULL }
};
static int vtballoon_probe(device_t);
static int vtballoon_attach(device_t);
static int vtballoon_detach(device_t);
static int vtballoon_config_change(device_t);
-static void vtballoon_negotiate_features(struct vtballoon_softc *);
+static int vtballoon_negotiate_features(struct vtballoon_softc *);
+static int vtballoon_setup_features(struct vtballoon_softc *);
static int vtballoon_alloc_virtqueues(struct vtballoon_softc *);
static void vtballoon_vq_intr(void *);
static void vtballoon_inflate(struct vtballoon_softc *, int);
static void vtballoon_deflate(struct vtballoon_softc *, int);
static void vtballoon_send_page_frames(struct vtballoon_softc *,
struct virtqueue *, int);
static void vtballoon_pop(struct vtballoon_softc *);
static void vtballoon_stop(struct vtballoon_softc *);
static vm_page_t
vtballoon_alloc_page(struct vtballoon_softc *);
static void vtballoon_free_page(struct vtballoon_softc *, vm_page_t);
static int vtballoon_sleep(struct vtballoon_softc *);
static void vtballoon_thread(void *);
-static void vtballoon_add_sysctl(struct vtballoon_softc *);
+static void vtballoon_setup_sysctl(struct vtballoon_softc *);
#define vtballoon_modern(_sc) \
(((_sc)->vtballoon_features & VIRTIO_F_VERSION_1) != 0)
/* Features desired/implemented by this driver. */
#define VTBALLOON_FEATURES VIRTIO_BALLOON_F_MUST_TELL_HOST
/* Timeout between retries when the balloon needs inflating. */
#define VTBALLOON_LOWMEM_TIMEOUT hz
/*
* Maximum number of pages we'll request to inflate or deflate
* the balloon in one virtqueue request. Both Linux and NetBSD
* have settled on 256, doing up to 1MB at a time.
*/
#define VTBALLOON_PAGES_PER_REQUEST 256
/* Must be able to fix all pages frames in one page (segment). */
CTASSERT(VTBALLOON_PAGES_PER_REQUEST * sizeof(uint32_t) <= PAGE_SIZE);
#define VTBALLOON_MTX(_sc) &(_sc)->vtballoon_mtx
#define VTBALLOON_LOCK_INIT(_sc, _name) mtx_init(VTBALLOON_MTX((_sc)), _name, \
"VirtIO Balloon Lock", MTX_DEF)
#define VTBALLOON_LOCK(_sc) mtx_lock(VTBALLOON_MTX((_sc)))
#define VTBALLOON_UNLOCK(_sc) mtx_unlock(VTBALLOON_MTX((_sc)))
#define VTBALLOON_LOCK_DESTROY(_sc) mtx_destroy(VTBALLOON_MTX((_sc)))
static device_method_t vtballoon_methods[] = {
/* Device methods. */
DEVMETHOD(device_probe, vtballoon_probe),
DEVMETHOD(device_attach, vtballoon_attach),
DEVMETHOD(device_detach, vtballoon_detach),
/* VirtIO methods. */
DEVMETHOD(virtio_config_change, vtballoon_config_change),
DEVMETHOD_END
};
static driver_t vtballoon_driver = {
"vtballoon",
vtballoon_methods,
sizeof(struct vtballoon_softc)
};
static devclass_t vtballoon_devclass;
DRIVER_MODULE(virtio_balloon, virtio_mmio, vtballoon_driver,
vtballoon_devclass, 0, 0);
DRIVER_MODULE(virtio_balloon, virtio_pci, vtballoon_driver,
vtballoon_devclass, 0, 0);
MODULE_VERSION(virtio_balloon, 1);
MODULE_DEPEND(virtio_balloon, virtio, 1, 1, 1);
VIRTIO_SIMPLE_PNPTABLE(virtio_balloon, VIRTIO_ID_BALLOON,
"VirtIO Balloon Adapter");
VIRTIO_SIMPLE_PNPINFO(virtio_mmio, virtio_balloon);
VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_balloon);
static int
vtballoon_probe(device_t dev)
{
return (VIRTIO_SIMPLE_PROBE(dev, virtio_balloon));
}
static int
vtballoon_attach(device_t dev)
{
struct vtballoon_softc *sc;
int error;
sc = device_get_softc(dev);
sc->vtballoon_dev = dev;
+ virtio_set_feature_desc(dev, vtballoon_feature_desc);
VTBALLOON_LOCK_INIT(sc, device_get_nameunit(dev));
TAILQ_INIT(&sc->vtballoon_pages);
- vtballoon_add_sysctl(sc);
+ vtballoon_setup_sysctl(sc);
- virtio_set_feature_desc(dev, vtballoon_feature_desc);
- vtballoon_negotiate_features(sc);
+ error = vtballoon_setup_features(sc);
+ if (error) {
+ device_printf(dev, "cannot setup features\n");
+ goto fail;
+ }
sc->vtballoon_page_frames = malloc(VTBALLOON_PAGES_PER_REQUEST *
sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->vtballoon_page_frames == NULL) {
error = ENOMEM;
device_printf(dev,
"cannot allocate page frame request array\n");
goto fail;
}
error = vtballoon_alloc_virtqueues(sc);
if (error) {
device_printf(dev, "cannot allocate virtqueues\n");
goto fail;
}
error = virtio_setup_intr(dev, INTR_TYPE_MISC);
if (error) {
device_printf(dev, "cannot setup virtqueue interrupts\n");
goto fail;
}
error = kthread_add(vtballoon_thread, sc, NULL, &sc->vtballoon_td,
0, 0, "virtio_balloon");
if (error) {
device_printf(dev, "cannot create balloon kthread\n");
goto fail;
}
virtqueue_enable_intr(sc->vtballoon_inflate_vq);
virtqueue_enable_intr(sc->vtballoon_deflate_vq);
fail:
if (error)
vtballoon_detach(dev);
return (error);
}
static int
vtballoon_detach(device_t dev)
{
struct vtballoon_softc *sc;
sc = device_get_softc(dev);
if (sc->vtballoon_td != NULL) {
VTBALLOON_LOCK(sc);
sc->vtballoon_flags |= VTBALLOON_FLAG_DETACH;
wakeup_one(sc);
msleep(sc->vtballoon_td, VTBALLOON_MTX(sc), 0, "vtbdth", 0);
VTBALLOON_UNLOCK(sc);
sc->vtballoon_td = NULL;
}
if (device_is_attached(dev)) {
vtballoon_pop(sc);
vtballoon_stop(sc);
}
if (sc->vtballoon_page_frames != NULL) {
free(sc->vtballoon_page_frames, M_DEVBUF);
sc->vtballoon_page_frames = NULL;
}
VTBALLOON_LOCK_DESTROY(sc);
return (0);
}
static int
vtballoon_config_change(device_t dev)
{
struct vtballoon_softc *sc;
sc = device_get_softc(dev);
VTBALLOON_LOCK(sc);
wakeup_one(sc);
VTBALLOON_UNLOCK(sc);
return (1);
}
-static void
+static int
vtballoon_negotiate_features(struct vtballoon_softc *sc)
{
device_t dev;
uint64_t features;
dev = sc->vtballoon_dev;
features = VTBALLOON_FEATURES;
sc->vtballoon_features = virtio_negotiate_features(dev, features);
- virtio_finalize_features(dev);
+ return (virtio_finalize_features(dev));
+}
+
+static int
+vtballoon_setup_features(struct vtballoon_softc *sc)
+{
+ int error;
+
+ error = vtballoon_negotiate_features(sc);
+ if (error)
+ return (error);
+
+ return (0);
}
static int
vtballoon_alloc_virtqueues(struct vtballoon_softc *sc)
{
device_t dev;
struct vq_alloc_info vq_info[2];
int nvqs;
dev = sc->vtballoon_dev;
nvqs = 2;
VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtballoon_vq_intr, sc,
&sc->vtballoon_inflate_vq, "%s inflate", device_get_nameunit(dev));
VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtballoon_vq_intr, sc,
&sc->vtballoon_deflate_vq, "%s deflate", device_get_nameunit(dev));
return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
}
static void
vtballoon_vq_intr(void *xsc)
{
struct vtballoon_softc *sc;
sc = xsc;
VTBALLOON_LOCK(sc);
wakeup_one(sc);
VTBALLOON_UNLOCK(sc);
}
static void
vtballoon_inflate(struct vtballoon_softc *sc, int npages)
{
struct virtqueue *vq;
vm_page_t m;
int i;
vq = sc->vtballoon_inflate_vq;
if (npages > VTBALLOON_PAGES_PER_REQUEST)
npages = VTBALLOON_PAGES_PER_REQUEST;
for (i = 0; i < npages; i++) {
if ((m = vtballoon_alloc_page(sc)) == NULL) {
sc->vtballoon_timeout = VTBALLOON_LOWMEM_TIMEOUT;
break;
}
sc->vtballoon_page_frames[i] =
VM_PAGE_TO_PHYS(m) >> VIRTIO_BALLOON_PFN_SHIFT;
KASSERT(m->a.queue == PQ_NONE,
("%s: allocated page %p on queue", __func__, m));
TAILQ_INSERT_TAIL(&sc->vtballoon_pages, m, plinks.q);
}
if (i > 0)
vtballoon_send_page_frames(sc, vq, i);
}
static void
vtballoon_deflate(struct vtballoon_softc *sc, int npages)
{
TAILQ_HEAD(, vm_page) free_pages;
struct virtqueue *vq;
vm_page_t m;
int i;
vq = sc->vtballoon_deflate_vq;
TAILQ_INIT(&free_pages);
if (npages > VTBALLOON_PAGES_PER_REQUEST)
npages = VTBALLOON_PAGES_PER_REQUEST;
for (i = 0; i < npages; i++) {
m = TAILQ_FIRST(&sc->vtballoon_pages);
KASSERT(m != NULL, ("%s: no more pages to deflate", __func__));
sc->vtballoon_page_frames[i] =
VM_PAGE_TO_PHYS(m) >> VIRTIO_BALLOON_PFN_SHIFT;
TAILQ_REMOVE(&sc->vtballoon_pages, m, plinks.q);
TAILQ_INSERT_TAIL(&free_pages, m, plinks.q);
}
if (i > 0) {
/* Always tell host first before freeing the pages. */
vtballoon_send_page_frames(sc, vq, i);
while ((m = TAILQ_FIRST(&free_pages)) != NULL) {
TAILQ_REMOVE(&free_pages, m, plinks.q);
vtballoon_free_page(sc, m);
}
}
KASSERT((TAILQ_EMPTY(&sc->vtballoon_pages) &&
sc->vtballoon_current_npages == 0) ||
(!TAILQ_EMPTY(&sc->vtballoon_pages) &&
sc->vtballoon_current_npages != 0),
("%s: bogus page count %d", __func__,
sc->vtballoon_current_npages));
}
static void
vtballoon_send_page_frames(struct vtballoon_softc *sc, struct virtqueue *vq,
int npages)
{
struct sglist sg;
struct sglist_seg segs[1];
void *c;
int error;
sglist_init(&sg, 1, segs);
error = sglist_append(&sg, sc->vtballoon_page_frames,
npages * sizeof(uint32_t));
KASSERT(error == 0, ("error adding page frames to sglist"));
error = virtqueue_enqueue(vq, vq, &sg, 1, 0);
KASSERT(error == 0, ("error enqueuing page frames to virtqueue"));
virtqueue_notify(vq);
/*
* Inflate and deflate operations are done synchronously. The
* interrupt handler will wake us up.
*/
VTBALLOON_LOCK(sc);
while ((c = virtqueue_dequeue(vq, NULL)) == NULL)
msleep(sc, VTBALLOON_MTX(sc), 0, "vtbspf", 0);
VTBALLOON_UNLOCK(sc);
KASSERT(c == vq, ("unexpected balloon operation response"));
}
static void
vtballoon_pop(struct vtballoon_softc *sc)
{
while (!TAILQ_EMPTY(&sc->vtballoon_pages))
vtballoon_deflate(sc, sc->vtballoon_current_npages);
}
static void
vtballoon_stop(struct vtballoon_softc *sc)
{
virtqueue_disable_intr(sc->vtballoon_inflate_vq);
virtqueue_disable_intr(sc->vtballoon_deflate_vq);
virtio_stop(sc->vtballoon_dev);
}
static vm_page_t
vtballoon_alloc_page(struct vtballoon_softc *sc)
{
vm_page_t m;
m = vm_page_alloc(NULL, 0,
VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP);
if (m != NULL)
sc->vtballoon_current_npages++;
return (m);
}
static void
vtballoon_free_page(struct vtballoon_softc *sc, vm_page_t m)
{
vm_page_free(m);
sc->vtballoon_current_npages--;
}
static uint32_t
vtballoon_desired_size(struct vtballoon_softc *sc)
{
uint32_t desired;
desired = virtio_read_dev_config_4(sc->vtballoon_dev,
offsetof(struct virtio_balloon_config, num_pages));
if (vtballoon_modern(sc))
return (desired);
else
return (le32toh(desired));
}
static void
vtballoon_update_size(struct vtballoon_softc *sc)
{
uint32_t npages;
npages = sc->vtballoon_current_npages;
if (!vtballoon_modern(sc))
npages = htole32(npages);
virtio_write_dev_config_4(sc->vtballoon_dev,
offsetof(struct virtio_balloon_config, actual), npages);
}
static int
vtballoon_sleep(struct vtballoon_softc *sc)
{
int rc, timeout;
uint32_t current, desired;
rc = 0;
current = sc->vtballoon_current_npages;
VTBALLOON_LOCK(sc);
for (;;) {
if (sc->vtballoon_flags & VTBALLOON_FLAG_DETACH) {
rc = 1;
break;
}
desired = vtballoon_desired_size(sc);
sc->vtballoon_desired_npages = desired;
/*
* If given, use non-zero timeout on the first time through
* the loop. On subsequent times, timeout will be zero so
* we will reevaluate the desired size of the balloon and
* break out to retry if needed.
*/
timeout = sc->vtballoon_timeout;
sc->vtballoon_timeout = 0;
if (current > desired)
break;
if (current < desired && timeout == 0)
break;
msleep(sc, VTBALLOON_MTX(sc), 0, "vtbslp", timeout);
}
VTBALLOON_UNLOCK(sc);
return (rc);
}
static void
vtballoon_thread(void *xsc)
{
struct vtballoon_softc *sc;
uint32_t current, desired;
sc = xsc;
for (;;) {
if (vtballoon_sleep(sc) != 0)
break;
current = sc->vtballoon_current_npages;
desired = sc->vtballoon_desired_npages;
if (desired != current) {
if (desired > current)
vtballoon_inflate(sc, desired - current);
else
vtballoon_deflate(sc, current - desired);
vtballoon_update_size(sc);
}
}
kthread_exit();
}
static void
-vtballoon_add_sysctl(struct vtballoon_softc *sc)
+vtballoon_setup_sysctl(struct vtballoon_softc *sc)
{
device_t dev;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *tree;
struct sysctl_oid_list *child;
dev = sc->vtballoon_dev;
ctx = device_get_sysctl_ctx(dev);
tree = device_get_sysctl_tree(dev);
child = SYSCTL_CHILDREN(tree);
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "desired",
CTLFLAG_RD, &sc->vtballoon_desired_npages, sizeof(uint32_t),
"Desired balloon size in pages");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "current",
CTLFLAG_RD, &sc->vtballoon_current_npages, sizeof(uint32_t),
"Current balloon size in pages");
}
diff --git a/sys/dev/virtio/block/virtio_blk.c b/sys/dev/virtio/block/virtio_blk.c
index 08df77d6de5b..6056771e3735 100644
--- a/sys/dev/virtio/block/virtio_blk.c
+++ b/sys/dev/virtio/block/virtio_blk.c
@@ -1,1468 +1,1478 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Driver for VirtIO block devices. */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/bio.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/sglist.h>
#include <sys/sysctl.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/queue.h>
#include <geom/geom.h>
#include <geom/geom_disk.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/virtio/virtio.h>
#include <dev/virtio/virtqueue.h>
#include <dev/virtio/block/virtio_blk.h>
#include "virtio_if.h"
struct vtblk_request {
struct virtio_blk_outhdr vbr_hdr;
struct bio *vbr_bp;
uint8_t vbr_ack;
TAILQ_ENTRY(vtblk_request) vbr_link;
};
enum vtblk_cache_mode {
VTBLK_CACHE_WRITETHROUGH,
VTBLK_CACHE_WRITEBACK,
VTBLK_CACHE_MAX
};
struct vtblk_softc {
device_t vtblk_dev;
struct mtx vtblk_mtx;
uint64_t vtblk_features;
uint32_t vtblk_flags;
#define VTBLK_FLAG_INDIRECT 0x0001
#define VTBLK_FLAG_DETACH 0x0002
#define VTBLK_FLAG_SUSPEND 0x0004
#define VTBLK_FLAG_BARRIER 0x0008
#define VTBLK_FLAG_WCE_CONFIG 0x0010
struct virtqueue *vtblk_vq;
struct sglist *vtblk_sglist;
struct disk *vtblk_disk;
struct bio_queue_head vtblk_bioq;
TAILQ_HEAD(, vtblk_request)
vtblk_req_free;
TAILQ_HEAD(, vtblk_request)
vtblk_req_ready;
struct vtblk_request *vtblk_req_ordered;
int vtblk_max_nsegs;
int vtblk_request_count;
enum vtblk_cache_mode vtblk_write_cache;
struct bio_queue vtblk_dump_queue;
struct vtblk_request vtblk_dump_request;
};
static struct virtio_feature_desc vtblk_feature_desc[] = {
{ VIRTIO_BLK_F_BARRIER, "HostBarrier" },
{ VIRTIO_BLK_F_SIZE_MAX, "MaxSegSize" },
{ VIRTIO_BLK_F_SEG_MAX, "MaxNumSegs" },
{ VIRTIO_BLK_F_GEOMETRY, "DiskGeometry" },
{ VIRTIO_BLK_F_RO, "ReadOnly" },
{ VIRTIO_BLK_F_BLK_SIZE, "BlockSize" },
{ VIRTIO_BLK_F_SCSI, "SCSICmds" },
{ VIRTIO_BLK_F_FLUSH, "FlushCmd" },
{ VIRTIO_BLK_F_TOPOLOGY, "Topology" },
{ VIRTIO_BLK_F_CONFIG_WCE, "ConfigWCE" },
{ VIRTIO_BLK_F_MQ, "Multiqueue" },
{ VIRTIO_BLK_F_DISCARD, "Discard" },
{ VIRTIO_BLK_F_WRITE_ZEROES, "WriteZeros" },
{ 0, NULL }
};
static int vtblk_modevent(module_t, int, void *);
static int vtblk_probe(device_t);
static int vtblk_attach(device_t);
static int vtblk_detach(device_t);
static int vtblk_suspend(device_t);
static int vtblk_resume(device_t);
static int vtblk_shutdown(device_t);
static int vtblk_config_change(device_t);
static int vtblk_open(struct disk *);
static int vtblk_close(struct disk *);
static int vtblk_ioctl(struct disk *, u_long, void *, int,
struct thread *);
static int vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
static void vtblk_strategy(struct bio *);
-static void vtblk_negotiate_features(struct vtblk_softc *);
-static void vtblk_setup_features(struct vtblk_softc *);
+static int vtblk_negotiate_features(struct vtblk_softc *);
+static int vtblk_setup_features(struct vtblk_softc *);
static int vtblk_maximum_segments(struct vtblk_softc *,
struct virtio_blk_config *);
static int vtblk_alloc_virtqueue(struct vtblk_softc *);
static void vtblk_resize_disk(struct vtblk_softc *, uint64_t);
static void vtblk_alloc_disk(struct vtblk_softc *,
struct virtio_blk_config *);
static void vtblk_create_disk(struct vtblk_softc *);
static int vtblk_request_prealloc(struct vtblk_softc *);
static void vtblk_request_free(struct vtblk_softc *);
static struct vtblk_request *
vtblk_request_dequeue(struct vtblk_softc *);
static void vtblk_request_enqueue(struct vtblk_softc *,
struct vtblk_request *);
static struct vtblk_request *
vtblk_request_next_ready(struct vtblk_softc *);
static void vtblk_request_requeue_ready(struct vtblk_softc *,
struct vtblk_request *);
static struct vtblk_request *
vtblk_request_next(struct vtblk_softc *);
static struct vtblk_request *
vtblk_request_bio(struct vtblk_softc *);
static int vtblk_request_execute(struct vtblk_softc *,
struct vtblk_request *);
static int vtblk_request_error(struct vtblk_request *);
static void vtblk_queue_completed(struct vtblk_softc *,
struct bio_queue *);
static void vtblk_done_completed(struct vtblk_softc *,
struct bio_queue *);
static void vtblk_drain_vq(struct vtblk_softc *);
static void vtblk_drain(struct vtblk_softc *);
static void vtblk_startio(struct vtblk_softc *);
static void vtblk_bio_done(struct vtblk_softc *, struct bio *, int);
static void vtblk_read_config(struct vtblk_softc *,
struct virtio_blk_config *);
static void vtblk_ident(struct vtblk_softc *);
static int vtblk_poll_request(struct vtblk_softc *,
struct vtblk_request *);
static int vtblk_quiesce(struct vtblk_softc *);
static void vtblk_vq_intr(void *);
static void vtblk_stop(struct vtblk_softc *);
static void vtblk_dump_quiesce(struct vtblk_softc *);
static int vtblk_dump_write(struct vtblk_softc *, void *, off_t, size_t);
static int vtblk_dump_flush(struct vtblk_softc *);
static void vtblk_dump_complete(struct vtblk_softc *);
static void vtblk_set_write_cache(struct vtblk_softc *, int);
static int vtblk_write_cache_enabled(struct vtblk_softc *sc,
struct virtio_blk_config *);
static int vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS);
static void vtblk_setup_sysctl(struct vtblk_softc *);
static int vtblk_tunable_int(struct vtblk_softc *, const char *, int);
#define vtblk_modern(_sc) (((_sc)->vtblk_features & VIRTIO_F_VERSION_1) != 0)
#define vtblk_htog16(_sc, _val) virtio_htog16(vtblk_modern(_sc), _val)
#define vtblk_htog32(_sc, _val) virtio_htog32(vtblk_modern(_sc), _val)
#define vtblk_htog64(_sc, _val) virtio_htog64(vtblk_modern(_sc), _val)
#define vtblk_gtoh16(_sc, _val) virtio_gtoh16(vtblk_modern(_sc), _val)
#define vtblk_gtoh32(_sc, _val) virtio_gtoh32(vtblk_modern(_sc), _val)
#define vtblk_gtoh64(_sc, _val) virtio_gtoh64(vtblk_modern(_sc), _val)
/* Tunables. */
static int vtblk_no_ident = 0;
TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
static int vtblk_writecache_mode = -1;
TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
#define VTBLK_COMMON_FEATURES \
(VIRTIO_BLK_F_SIZE_MAX | \
VIRTIO_BLK_F_SEG_MAX | \
VIRTIO_BLK_F_GEOMETRY | \
VIRTIO_BLK_F_RO | \
VIRTIO_BLK_F_BLK_SIZE | \
VIRTIO_BLK_F_FLUSH | \
VIRTIO_BLK_F_TOPOLOGY | \
VIRTIO_BLK_F_CONFIG_WCE | \
VIRTIO_BLK_F_DISCARD | \
VIRTIO_RING_F_INDIRECT_DESC)
#define VTBLK_MODERN_FEATURES (VTBLK_COMMON_FEATURES)
#define VTBLK_LEGACY_FEATURES (VIRTIO_BLK_F_BARRIER | VTBLK_COMMON_FEATURES)
#define VTBLK_MTX(_sc) &(_sc)->vtblk_mtx
#define VTBLK_LOCK_INIT(_sc, _name) \
mtx_init(VTBLK_MTX((_sc)), (_name), \
"VirtIO Block Lock", MTX_DEF)
#define VTBLK_LOCK(_sc) mtx_lock(VTBLK_MTX((_sc)))
#define VTBLK_UNLOCK(_sc) mtx_unlock(VTBLK_MTX((_sc)))
#define VTBLK_LOCK_DESTROY(_sc) mtx_destroy(VTBLK_MTX((_sc)))
#define VTBLK_LOCK_ASSERT(_sc) mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
#define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
#define VTBLK_DISK_NAME "vtbd"
#define VTBLK_QUIESCE_TIMEOUT (30 * hz)
#define VTBLK_BSIZE 512
/*
* Each block request uses at least two segments - one for the header
* and one for the status.
*/
#define VTBLK_MIN_SEGMENTS 2
static device_method_t vtblk_methods[] = {
/* Device methods. */
DEVMETHOD(device_probe, vtblk_probe),
DEVMETHOD(device_attach, vtblk_attach),
DEVMETHOD(device_detach, vtblk_detach),
DEVMETHOD(device_suspend, vtblk_suspend),
DEVMETHOD(device_resume, vtblk_resume),
DEVMETHOD(device_shutdown, vtblk_shutdown),
/* VirtIO methods. */
DEVMETHOD(virtio_config_change, vtblk_config_change),
DEVMETHOD_END
};
static driver_t vtblk_driver = {
"vtblk",
vtblk_methods,
sizeof(struct vtblk_softc)
};
static devclass_t vtblk_devclass;
DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
vtblk_modevent, 0);
DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
vtblk_modevent, 0);
MODULE_VERSION(virtio_blk, 1);
MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
VIRTIO_SIMPLE_PNPTABLE(virtio_blk, VIRTIO_ID_BLOCK, "VirtIO Block Adapter");
VIRTIO_SIMPLE_PNPINFO(virtio_mmio, virtio_blk);
VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_blk);
static int
vtblk_modevent(module_t mod, int type, void *unused)
{
int error;
error = 0;
switch (type) {
case MOD_LOAD:
case MOD_QUIESCE:
case MOD_UNLOAD:
case MOD_SHUTDOWN:
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
static int
vtblk_probe(device_t dev)
{
return (VIRTIO_SIMPLE_PROBE(dev, virtio_blk));
}
static int
vtblk_attach(device_t dev)
{
struct vtblk_softc *sc;
struct virtio_blk_config blkcfg;
int error;
- virtio_set_feature_desc(dev, vtblk_feature_desc);
-
sc = device_get_softc(dev);
sc->vtblk_dev = dev;
+ virtio_set_feature_desc(dev, vtblk_feature_desc);
+
VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
bioq_init(&sc->vtblk_bioq);
TAILQ_INIT(&sc->vtblk_dump_queue);
TAILQ_INIT(&sc->vtblk_req_free);
TAILQ_INIT(&sc->vtblk_req_ready);
vtblk_setup_sysctl(sc);
- vtblk_setup_features(sc);
+
+ error = vtblk_setup_features(sc);
+ if (error) {
+ device_printf(dev, "cannot setup features\n");
+ goto fail;
+ }
vtblk_read_config(sc, &blkcfg);
/*
* With the current sglist(9) implementation, it is not easy
* for us to support a maximum segment size as adjacent
* segments are coalesced. For now, just make sure it's larger
* than the maximum supported transfer size.
*/
if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
if (blkcfg.size_max < maxphys) {
error = ENOTSUP;
device_printf(dev, "host requires unsupported "
"maximum segment size feature\n");
goto fail;
}
}
sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
error = EINVAL;
device_printf(dev, "fewer than minimum number of segments "
"allowed: %d\n", sc->vtblk_max_nsegs);
goto fail;
}
sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
if (sc->vtblk_sglist == NULL) {
error = ENOMEM;
device_printf(dev, "cannot allocate sglist\n");
goto fail;
}
error = vtblk_alloc_virtqueue(sc);
if (error) {
device_printf(dev, "cannot allocate virtqueue\n");
goto fail;
}
error = vtblk_request_prealloc(sc);
if (error) {
device_printf(dev, "cannot preallocate requests\n");
goto fail;
}
vtblk_alloc_disk(sc, &blkcfg);
error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
if (error) {
device_printf(dev, "cannot setup virtqueue interrupt\n");
goto fail;
}
vtblk_create_disk(sc);
virtqueue_enable_intr(sc->vtblk_vq);
fail:
if (error)
vtblk_detach(dev);
return (error);
}
static int
vtblk_detach(device_t dev)
{
struct vtblk_softc *sc;
sc = device_get_softc(dev);
VTBLK_LOCK(sc);
sc->vtblk_flags |= VTBLK_FLAG_DETACH;
if (device_is_attached(dev))
vtblk_stop(sc);
VTBLK_UNLOCK(sc);
vtblk_drain(sc);
if (sc->vtblk_disk != NULL) {
disk_destroy(sc->vtblk_disk);
sc->vtblk_disk = NULL;
}
if (sc->vtblk_sglist != NULL) {
sglist_free(sc->vtblk_sglist);
sc->vtblk_sglist = NULL;
}
VTBLK_LOCK_DESTROY(sc);
return (0);
}
static int
vtblk_suspend(device_t dev)
{
struct vtblk_softc *sc;
int error;
sc = device_get_softc(dev);
VTBLK_LOCK(sc);
sc->vtblk_flags |= VTBLK_FLAG_SUSPEND;
/* XXX BMV: virtio_stop(), etc needed here? */
error = vtblk_quiesce(sc);
if (error)
sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
VTBLK_UNLOCK(sc);
return (error);
}
static int
vtblk_resume(device_t dev)
{
struct vtblk_softc *sc;
sc = device_get_softc(dev);
VTBLK_LOCK(sc);
/* XXX BMV: virtio_reinit(), etc needed here? */
sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
vtblk_startio(sc);
VTBLK_UNLOCK(sc);
return (0);
}
static int
vtblk_shutdown(device_t dev)
{
return (0);
}
static int
vtblk_config_change(device_t dev)
{
struct vtblk_softc *sc;
struct virtio_blk_config blkcfg;
uint64_t capacity;
sc = device_get_softc(dev);
vtblk_read_config(sc, &blkcfg);
/* Capacity is always in 512-byte units. */
capacity = blkcfg.capacity * VTBLK_BSIZE;
if (sc->vtblk_disk->d_mediasize != capacity)
vtblk_resize_disk(sc, capacity);
return (0);
}
static int
vtblk_open(struct disk *dp)
{
struct vtblk_softc *sc;
if ((sc = dp->d_drv1) == NULL)
return (ENXIO);
return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0);
}
static int
vtblk_close(struct disk *dp)
{
struct vtblk_softc *sc;
if ((sc = dp->d_drv1) == NULL)
return (ENXIO);
return (0);
}
static int
vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
struct thread *td)
{
struct vtblk_softc *sc;
if ((sc = dp->d_drv1) == NULL)
return (ENXIO);
return (ENOTTY);
}
static int
vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
size_t length)
{
struct disk *dp;
struct vtblk_softc *sc;
int error;
dp = arg;
error = 0;
if ((sc = dp->d_drv1) == NULL)
return (ENXIO);
VTBLK_LOCK(sc);
vtblk_dump_quiesce(sc);
if (length > 0)
error = vtblk_dump_write(sc, virtual, offset, length);
if (error || (virtual == NULL && offset == 0))
vtblk_dump_complete(sc);
VTBLK_UNLOCK(sc);
return (error);
}
static void
vtblk_strategy(struct bio *bp)
{
struct vtblk_softc *sc;
if ((sc = bp->bio_disk->d_drv1) == NULL) {
vtblk_bio_done(NULL, bp, EINVAL);
return;
}
if ((bp->bio_cmd != BIO_READ) && (bp->bio_cmd != BIO_WRITE) &&
(bp->bio_cmd != BIO_FLUSH) && (bp->bio_cmd != BIO_DELETE)) {
vtblk_bio_done(sc, bp, EOPNOTSUPP);
return;
}
VTBLK_LOCK(sc);
if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
VTBLK_UNLOCK(sc);
vtblk_bio_done(sc, bp, ENXIO);
return;
}
bioq_insert_tail(&sc->vtblk_bioq, bp);
vtblk_startio(sc);
VTBLK_UNLOCK(sc);
}
-static void
+static int
vtblk_negotiate_features(struct vtblk_softc *sc)
{
device_t dev;
uint64_t features;
dev = sc->vtblk_dev;
features = virtio_bus_is_modern(dev) ? VTBLK_MODERN_FEATURES :
VTBLK_LEGACY_FEATURES;
sc->vtblk_features = virtio_negotiate_features(dev, features);
- virtio_finalize_features(dev);
+ return (virtio_finalize_features(dev));
}
-static void
+static int
vtblk_setup_features(struct vtblk_softc *sc)
{
device_t dev;
+ int error;
dev = sc->vtblk_dev;
- vtblk_negotiate_features(sc);
+ error = vtblk_negotiate_features(sc);
+ if (error)
+ return (error);
if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
sc->vtblk_flags |= VTBLK_FLAG_WCE_CONFIG;
/* Legacy. */
if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
+
+ return (0);
}
static int
vtblk_maximum_segments(struct vtblk_softc *sc,
struct virtio_blk_config *blkcfg)
{
device_t dev;
int nsegs;
dev = sc->vtblk_dev;
nsegs = VTBLK_MIN_SEGMENTS;
if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
nsegs += MIN(blkcfg->seg_max, maxphys / PAGE_SIZE + 1);
if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
} else
nsegs += 1;
return (nsegs);
}
static int
vtblk_alloc_virtqueue(struct vtblk_softc *sc)
{
device_t dev;
struct vq_alloc_info vq_info;
dev = sc->vtblk_dev;
VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
vtblk_vq_intr, sc, &sc->vtblk_vq,
"%s request", device_get_nameunit(dev));
return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
}
static void
vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity)
{
device_t dev;
struct disk *dp;
int error;
dev = sc->vtblk_dev;
dp = sc->vtblk_disk;
dp->d_mediasize = new_capacity;
if (bootverbose) {
device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n",
(uintmax_t) dp->d_mediasize >> 20,
(uintmax_t) dp->d_mediasize / dp->d_sectorsize,
dp->d_sectorsize);
}
error = disk_resize(dp, M_NOWAIT);
if (error) {
device_printf(dev,
"disk_resize(9) failed, error: %d\n", error);
}
}
static void
vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
{
device_t dev;
struct disk *dp;
dev = sc->vtblk_dev;
sc->vtblk_disk = dp = disk_alloc();
dp->d_open = vtblk_open;
dp->d_close = vtblk_close;
dp->d_ioctl = vtblk_ioctl;
dp->d_strategy = vtblk_strategy;
dp->d_name = VTBLK_DISK_NAME;
dp->d_unit = device_get_unit(dev);
dp->d_drv1 = sc;
dp->d_flags = DISKFLAG_UNMAPPED_BIO | DISKFLAG_DIRECT_COMPLETION;
dp->d_hba_vendor = virtio_get_vendor(dev);
dp->d_hba_device = virtio_get_device(dev);
dp->d_hba_subvendor = virtio_get_subvendor(dev);
dp->d_hba_subdevice = virtio_get_subdevice(dev);
if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
dp->d_flags |= DISKFLAG_WRITE_PROTECT;
else {
if (virtio_with_feature(dev, VIRTIO_BLK_F_FLUSH))
dp->d_flags |= DISKFLAG_CANFLUSHCACHE;
dp->d_dump = vtblk_dump;
}
/* Capacity is always in 512-byte units. */
dp->d_mediasize = blkcfg->capacity * VTBLK_BSIZE;
if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
dp->d_sectorsize = blkcfg->blk_size;
else
dp->d_sectorsize = VTBLK_BSIZE;
/*
* The VirtIO maximum I/O size is given in terms of segments.
* However, FreeBSD limits I/O size by logical buffer size, not
* by physically contiguous pages. Therefore, we have to assume
* no pages are contiguous. This may impose an artificially low
* maximum I/O size. But in practice, since QEMU advertises 128
* segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
* which is typically greater than maxphys. Eventually we should
* just advertise maxphys and split buffers that are too big.
*
* Note we must subtract one additional segment in case of non
* page aligned buffers.
*/
dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
PAGE_SIZE;
if (dp->d_maxsize < PAGE_SIZE)
dp->d_maxsize = PAGE_SIZE; /* XXX */
if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
dp->d_fwsectors = blkcfg->geometry.sectors;
dp->d_fwheads = blkcfg->geometry.heads;
}
if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY) &&
blkcfg->topology.physical_block_exp > 0) {
dp->d_stripesize = dp->d_sectorsize *
(1 << blkcfg->topology.physical_block_exp);
dp->d_stripeoffset = (dp->d_stripesize -
blkcfg->topology.alignment_offset * dp->d_sectorsize) %
dp->d_stripesize;
}
if (virtio_with_feature(dev, VIRTIO_BLK_F_DISCARD)) {
dp->d_flags |= DISKFLAG_CANDELETE;
dp->d_delmaxsize = blkcfg->max_discard_sectors * VTBLK_BSIZE;
}
if (vtblk_write_cache_enabled(sc, blkcfg) != 0)
sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK;
else
sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH;
}
static void
vtblk_create_disk(struct vtblk_softc *sc)
{
struct disk *dp;
dp = sc->vtblk_disk;
vtblk_ident(sc);
device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
(uintmax_t) dp->d_mediasize >> 20,
(uintmax_t) dp->d_mediasize / dp->d_sectorsize,
dp->d_sectorsize);
disk_create(dp, DISK_VERSION);
}
static int
vtblk_request_prealloc(struct vtblk_softc *sc)
{
struct vtblk_request *req;
int i, nreqs;
nreqs = virtqueue_size(sc->vtblk_vq);
/*
* Preallocate sufficient requests to keep the virtqueue full. Each
* request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
* the number allocated when indirect descriptors are not available.
*/
if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
nreqs /= VTBLK_MIN_SEGMENTS;
for (i = 0; i < nreqs; i++) {
req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT);
if (req == NULL)
return (ENOMEM);
MPASS(sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr)) == 1);
MPASS(sglist_count(&req->vbr_ack, sizeof(req->vbr_ack)) == 1);
sc->vtblk_request_count++;
vtblk_request_enqueue(sc, req);
}
return (0);
}
static void
vtblk_request_free(struct vtblk_softc *sc)
{
struct vtblk_request *req;
MPASS(TAILQ_EMPTY(&sc->vtblk_req_ready));
while ((req = vtblk_request_dequeue(sc)) != NULL) {
sc->vtblk_request_count--;
free(req, M_DEVBUF);
}
KASSERT(sc->vtblk_request_count == 0,
("%s: leaked %d requests", __func__, sc->vtblk_request_count));
}
static struct vtblk_request *
vtblk_request_dequeue(struct vtblk_softc *sc)
{
struct vtblk_request *req;
req = TAILQ_FIRST(&sc->vtblk_req_free);
if (req != NULL) {
TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
bzero(req, sizeof(struct vtblk_request));
}
return (req);
}
static void
vtblk_request_enqueue(struct vtblk_softc *sc, struct vtblk_request *req)
{
TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
}
static struct vtblk_request *
vtblk_request_next_ready(struct vtblk_softc *sc)
{
struct vtblk_request *req;
req = TAILQ_FIRST(&sc->vtblk_req_ready);
if (req != NULL)
TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
return (req);
}
static void
vtblk_request_requeue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
{
/* NOTE: Currently, there will be at most one request in the queue. */
TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
}
static struct vtblk_request *
vtblk_request_next(struct vtblk_softc *sc)
{
struct vtblk_request *req;
req = vtblk_request_next_ready(sc);
if (req != NULL)
return (req);
return (vtblk_request_bio(sc));
}
static struct vtblk_request *
vtblk_request_bio(struct vtblk_softc *sc)
{
struct bio_queue_head *bioq;
struct vtblk_request *req;
struct bio *bp;
bioq = &sc->vtblk_bioq;
if (bioq_first(bioq) == NULL)
return (NULL);
req = vtblk_request_dequeue(sc);
if (req == NULL)
return (NULL);
bp = bioq_takefirst(bioq);
req->vbr_bp = bp;
req->vbr_ack = -1;
req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
switch (bp->bio_cmd) {
case BIO_FLUSH:
req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_FLUSH);
req->vbr_hdr.sector = 0;
break;
case BIO_READ:
req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_IN);
req->vbr_hdr.sector = vtblk_gtoh64(sc, bp->bio_offset / VTBLK_BSIZE);
break;
case BIO_WRITE:
req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_OUT);
req->vbr_hdr.sector = vtblk_gtoh64(sc, bp->bio_offset / VTBLK_BSIZE);
break;
case BIO_DELETE:
req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_DISCARD);
req->vbr_hdr.sector = vtblk_gtoh64(sc, bp->bio_offset / VTBLK_BSIZE);
break;
default:
panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
}
if (bp->bio_flags & BIO_ORDERED)
req->vbr_hdr.type |= vtblk_gtoh32(sc, VIRTIO_BLK_T_BARRIER);
return (req);
}
static int
vtblk_request_execute(struct vtblk_softc *sc, struct vtblk_request *req)
{
struct virtqueue *vq;
struct sglist *sg;
struct bio *bp;
int ordered, readable, writable, error;
vq = sc->vtblk_vq;
sg = sc->vtblk_sglist;
bp = req->vbr_bp;
ordered = 0;
writable = 0;
/*
* Some hosts (such as bhyve) do not implement the barrier feature,
* so we emulate it in the driver by allowing the barrier request
* to be the only one in flight.
*/
if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
if (sc->vtblk_req_ordered != NULL)
return (EBUSY);
if (bp->bio_flags & BIO_ORDERED) {
if (!virtqueue_empty(vq))
return (EBUSY);
ordered = 1;
req->vbr_hdr.type &= vtblk_gtoh32(sc,
~VIRTIO_BLK_T_BARRIER);
}
}
sglist_reset(sg);
sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
error = sglist_append_bio(sg, bp);
if (error || sg->sg_nseg == sg->sg_maxseg) {
panic("%s: bio %p data buffer too big %d",
__func__, bp, error);
}
/* BIO_READ means the host writes into our buffer. */
if (bp->bio_cmd == BIO_READ)
writable = sg->sg_nseg - 1;
} else if (bp->bio_cmd == BIO_DELETE) {
struct virtio_blk_discard_write_zeroes *discard;
discard = malloc(sizeof(*discard), M_DEVBUF, M_NOWAIT | M_ZERO);
if (discard == NULL)
return (ENOMEM);
bp->bio_driver1 = discard;
discard->sector = vtblk_gtoh64(sc, bp->bio_offset / VTBLK_BSIZE);
discard->num_sectors = vtblk_gtoh32(sc, bp->bio_bcount / VTBLK_BSIZE);
error = sglist_append(sg, discard, sizeof(*discard));
if (error || sg->sg_nseg == sg->sg_maxseg) {
panic("%s: bio %p data buffer too big %d",
__func__, bp, error);
}
}
writable++;
sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
readable = sg->sg_nseg - writable;
error = virtqueue_enqueue(vq, req, sg, readable, writable);
if (error == 0 && ordered)
sc->vtblk_req_ordered = req;
return (error);
}
static int
vtblk_request_error(struct vtblk_request *req)
{
int error;
switch (req->vbr_ack) {
case VIRTIO_BLK_S_OK:
error = 0;
break;
case VIRTIO_BLK_S_UNSUPP:
error = ENOTSUP;
break;
default:
error = EIO;
break;
}
return (error);
}
static void
vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue)
{
struct vtblk_request *req;
struct bio *bp;
while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
if (sc->vtblk_req_ordered != NULL) {
MPASS(sc->vtblk_req_ordered == req);
sc->vtblk_req_ordered = NULL;
}
bp = req->vbr_bp;
bp->bio_error = vtblk_request_error(req);
TAILQ_INSERT_TAIL(queue, bp, bio_queue);
vtblk_request_enqueue(sc, req);
}
}
static void
vtblk_done_completed(struct vtblk_softc *sc, struct bio_queue *queue)
{
struct bio *bp, *tmp;
TAILQ_FOREACH_SAFE(bp, queue, bio_queue, tmp) {
if (bp->bio_error != 0)
disk_err(bp, "hard error", -1, 1);
vtblk_bio_done(sc, bp, bp->bio_error);
}
}
static void
vtblk_drain_vq(struct vtblk_softc *sc)
{
struct virtqueue *vq;
struct vtblk_request *req;
int last;
vq = sc->vtblk_vq;
last = 0;
while ((req = virtqueue_drain(vq, &last)) != NULL) {
vtblk_bio_done(sc, req->vbr_bp, ENXIO);
vtblk_request_enqueue(sc, req);
}
sc->vtblk_req_ordered = NULL;
KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
}
static void
vtblk_drain(struct vtblk_softc *sc)
{
struct bio_queue_head *bioq;
struct vtblk_request *req;
struct bio *bp;
bioq = &sc->vtblk_bioq;
if (sc->vtblk_vq != NULL) {
struct bio_queue queue;
TAILQ_INIT(&queue);
vtblk_queue_completed(sc, &queue);
vtblk_done_completed(sc, &queue);
vtblk_drain_vq(sc);
}
while ((req = vtblk_request_next_ready(sc)) != NULL) {
vtblk_bio_done(sc, req->vbr_bp, ENXIO);
vtblk_request_enqueue(sc, req);
}
while (bioq_first(bioq) != NULL) {
bp = bioq_takefirst(bioq);
vtblk_bio_done(sc, bp, ENXIO);
}
vtblk_request_free(sc);
}
static void
vtblk_startio(struct vtblk_softc *sc)
{
struct virtqueue *vq;
struct vtblk_request *req;
int enq;
VTBLK_LOCK_ASSERT(sc);
vq = sc->vtblk_vq;
enq = 0;
if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
return;
while (!virtqueue_full(vq)) {
req = vtblk_request_next(sc);
if (req == NULL)
break;
if (vtblk_request_execute(sc, req) != 0) {
vtblk_request_requeue_ready(sc, req);
break;
}
enq++;
}
if (enq > 0)
virtqueue_notify(vq);
}
static void
vtblk_bio_done(struct vtblk_softc *sc, struct bio *bp, int error)
{
/* Because of GEOM direct dispatch, we cannot hold any locks. */
if (sc != NULL)
VTBLK_LOCK_ASSERT_NOTOWNED(sc);
if (error) {
bp->bio_resid = bp->bio_bcount;
bp->bio_error = error;
bp->bio_flags |= BIO_ERROR;
}
if (bp->bio_driver1 != NULL) {
free(bp->bio_driver1, M_DEVBUF);
bp->bio_driver1 = NULL;
}
biodone(bp);
}
#define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg) \
if (virtio_with_feature(_dev, _feature)) { \
virtio_read_device_config(_dev, \
offsetof(struct virtio_blk_config, _field), \
&(_cfg)->_field, sizeof((_cfg)->_field)); \
}
static void
vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
{
device_t dev;
dev = sc->vtblk_dev;
bzero(blkcfg, sizeof(struct virtio_blk_config));
/* The capacity is always available. */
virtio_read_device_config(dev, offsetof(struct virtio_blk_config,
capacity), &blkcfg->capacity, sizeof(blkcfg->capacity));
/* Read the configuration if the feature was negotiated. */
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
geometry.cylinders, blkcfg);
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
geometry.heads, blkcfg);
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
geometry.sectors, blkcfg);
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
topology.physical_block_exp, blkcfg);
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
topology.alignment_offset, blkcfg);
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
topology.min_io_size, blkcfg);
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
topology.opt_io_size, blkcfg);
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, wce, blkcfg);
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_DISCARD, max_discard_sectors,
blkcfg);
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_DISCARD, max_discard_seg, blkcfg);
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_DISCARD, discard_sector_alignment,
blkcfg);
}
#undef VTBLK_GET_CONFIG
static void
vtblk_ident(struct vtblk_softc *sc)
{
struct bio buf;
struct disk *dp;
struct vtblk_request *req;
int len, error;
dp = sc->vtblk_disk;
len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0)
return;
req = vtblk_request_dequeue(sc);
if (req == NULL)
return;
req->vbr_ack = -1;
req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_GET_ID);
req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
req->vbr_hdr.sector = 0;
req->vbr_bp = &buf;
g_reset_bio(&buf);
buf.bio_cmd = BIO_READ;
buf.bio_data = dp->d_ident;
buf.bio_bcount = len;
VTBLK_LOCK(sc);
error = vtblk_poll_request(sc, req);
VTBLK_UNLOCK(sc);
vtblk_request_enqueue(sc, req);
if (error) {
device_printf(sc->vtblk_dev,
"error getting device identifier: %d\n", error);
}
}
static int
vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
{
struct virtqueue *vq;
int error;
vq = sc->vtblk_vq;
if (!virtqueue_empty(vq))
return (EBUSY);
error = vtblk_request_execute(sc, req);
if (error)
return (error);
virtqueue_notify(vq);
virtqueue_poll(vq, NULL);
error = vtblk_request_error(req);
if (error && bootverbose) {
device_printf(sc->vtblk_dev,
"%s: IO error: %d\n", __func__, error);
}
return (error);
}
static int
vtblk_quiesce(struct vtblk_softc *sc)
{
int error;
VTBLK_LOCK_ASSERT(sc);
error = 0;
while (!virtqueue_empty(sc->vtblk_vq)) {
if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq",
VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) {
error = EBUSY;
break;
}
}
return (error);
}
static void
vtblk_vq_intr(void *xsc)
{
struct vtblk_softc *sc;
struct virtqueue *vq;
struct bio_queue queue;
sc = xsc;
vq = sc->vtblk_vq;
TAILQ_INIT(&queue);
VTBLK_LOCK(sc);
again:
if (sc->vtblk_flags & VTBLK_FLAG_DETACH)
goto out;
vtblk_queue_completed(sc, &queue);
vtblk_startio(sc);
if (virtqueue_enable_intr(vq) != 0) {
virtqueue_disable_intr(vq);
goto again;
}
if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
wakeup(&sc->vtblk_vq);
out:
VTBLK_UNLOCK(sc);
vtblk_done_completed(sc, &queue);
}
static void
vtblk_stop(struct vtblk_softc *sc)
{
virtqueue_disable_intr(sc->vtblk_vq);
virtio_stop(sc->vtblk_dev);
}
static void
vtblk_dump_quiesce(struct vtblk_softc *sc)
{
/*
* Spin here until all the requests in-flight at the time of the
* dump are completed and queued. The queued requests will be
* biodone'd once the dump is finished.
*/
while (!virtqueue_empty(sc->vtblk_vq))
vtblk_queue_completed(sc, &sc->vtblk_dump_queue);
}
static int
vtblk_dump_write(struct vtblk_softc *sc, void *virtual, off_t offset,
size_t length)
{
struct bio buf;
struct vtblk_request *req;
req = &sc->vtblk_dump_request;
req->vbr_ack = -1;
req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_OUT);
req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
req->vbr_hdr.sector = vtblk_gtoh64(sc, offset / VTBLK_BSIZE);
req->vbr_bp = &buf;
g_reset_bio(&buf);
buf.bio_cmd = BIO_WRITE;
buf.bio_data = virtual;
buf.bio_bcount = length;
return (vtblk_poll_request(sc, req));
}
static int
vtblk_dump_flush(struct vtblk_softc *sc)
{
struct bio buf;
struct vtblk_request *req;
req = &sc->vtblk_dump_request;
req->vbr_ack = -1;
req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_FLUSH);
req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
req->vbr_hdr.sector = 0;
req->vbr_bp = &buf;
g_reset_bio(&buf);
buf.bio_cmd = BIO_FLUSH;
return (vtblk_poll_request(sc, req));
}
static void
vtblk_dump_complete(struct vtblk_softc *sc)
{
vtblk_dump_flush(sc);
VTBLK_UNLOCK(sc);
vtblk_done_completed(sc, &sc->vtblk_dump_queue);
VTBLK_LOCK(sc);
}
static void
vtblk_set_write_cache(struct vtblk_softc *sc, int wc)
{
/* Set either writeback (1) or writethrough (0) mode. */
virtio_write_dev_config_1(sc->vtblk_dev,
offsetof(struct virtio_blk_config, wce), wc);
}
static int
vtblk_write_cache_enabled(struct vtblk_softc *sc,
struct virtio_blk_config *blkcfg)
{
int wc;
if (sc->vtblk_flags & VTBLK_FLAG_WCE_CONFIG) {
wc = vtblk_tunable_int(sc, "writecache_mode",
vtblk_writecache_mode);
if (wc >= 0 && wc < VTBLK_CACHE_MAX)
vtblk_set_write_cache(sc, wc);
else
wc = blkcfg->wce;
} else
wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_FLUSH);
return (wc);
}
static int
vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS)
{
struct vtblk_softc *sc;
int wc, error;
sc = oidp->oid_arg1;
wc = sc->vtblk_write_cache;
error = sysctl_handle_int(oidp, &wc, 0, req);
if (error || req->newptr == NULL)
return (error);
if ((sc->vtblk_flags & VTBLK_FLAG_WCE_CONFIG) == 0)
return (EPERM);
if (wc < 0 || wc >= VTBLK_CACHE_MAX)
return (EINVAL);
VTBLK_LOCK(sc);
sc->vtblk_write_cache = wc;
vtblk_set_write_cache(sc, sc->vtblk_write_cache);
VTBLK_UNLOCK(sc);
return (0);
}
static void
vtblk_setup_sysctl(struct vtblk_softc *sc)
{
device_t dev;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *tree;
struct sysctl_oid_list *child;
dev = sc->vtblk_dev;
ctx = device_get_sysctl_ctx(dev);
tree = device_get_sysctl_tree(dev);
child = SYSCTL_CHILDREN(tree);
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
vtblk_write_cache_sysctl, "I",
"Write cache mode (writethrough (0) or writeback (1))");
}
static int
vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def)
{
char path[64];
snprintf(path, sizeof(path),
"hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob);
TUNABLE_INT_FETCH(path, &def);
return (def);
}
diff --git a/sys/dev/virtio/console/virtio_console.c b/sys/dev/virtio/console/virtio_console.c
index 0bd7c982e3f5..315eb59716b4 100644
--- a/sys/dev/virtio/console/virtio_console.c
+++ b/sys/dev/virtio/console/virtio_console.c
@@ -1,1513 +1,1522 @@
/*-
* Copyright (c) 2014, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Driver for VirtIO console devices. */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/ctype.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/kdb.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sglist.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <sys/queue.h>
#include <sys/conf.h>
#include <sys/cons.h>
#include <sys/tty.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <dev/virtio/virtio.h>
#include <dev/virtio/virtqueue.h>
#include <dev/virtio/console/virtio_console.h>
#include "virtio_if.h"
#define VTCON_MAX_PORTS 32
#define VTCON_TTY_PREFIX "V"
#define VTCON_TTY_ALIAS_PREFIX "vtcon"
#define VTCON_BULK_BUFSZ 128
#define VTCON_CTRL_BUFSZ 128
/*
* The buffers cannot cross more than one page boundary due to the
* size of the sglist segment array used.
*/
CTASSERT(VTCON_BULK_BUFSZ <= PAGE_SIZE);
CTASSERT(VTCON_CTRL_BUFSZ <= PAGE_SIZE);
CTASSERT(sizeof(struct virtio_console_config) <= VTCON_CTRL_BUFSZ);
struct vtcon_softc;
struct vtcon_softc_port;
struct vtcon_port {
struct mtx vtcport_mtx;
struct vtcon_softc *vtcport_sc;
struct vtcon_softc_port *vtcport_scport;
struct tty *vtcport_tty;
struct virtqueue *vtcport_invq;
struct virtqueue *vtcport_outvq;
int vtcport_id;
int vtcport_flags;
#define VTCON_PORT_FLAG_GONE 0x01
#define VTCON_PORT_FLAG_CONSOLE 0x02
#define VTCON_PORT_FLAG_ALIAS 0x04
#if defined(KDB)
int vtcport_alt_break_state;
#endif
};
#define VTCON_PORT_LOCK(_port) mtx_lock(&(_port)->vtcport_mtx)
#define VTCON_PORT_UNLOCK(_port) mtx_unlock(&(_port)->vtcport_mtx)
struct vtcon_softc_port {
struct vtcon_softc *vcsp_sc;
struct vtcon_port *vcsp_port;
struct virtqueue *vcsp_invq;
struct virtqueue *vcsp_outvq;
};
struct vtcon_softc {
device_t vtcon_dev;
struct mtx vtcon_mtx;
uint64_t vtcon_features;
uint32_t vtcon_max_ports;
uint32_t vtcon_flags;
#define VTCON_FLAG_DETACHED 0x01
#define VTCON_FLAG_SIZE 0x02
#define VTCON_FLAG_MULTIPORT 0x04
/*
* Ports can be added and removed during runtime, but we have
* to allocate all the virtqueues during attach. This array is
* indexed by the port ID.
*/
struct vtcon_softc_port *vtcon_ports;
struct task vtcon_ctrl_task;
struct virtqueue *vtcon_ctrl_rxvq;
struct virtqueue *vtcon_ctrl_txvq;
struct mtx vtcon_ctrl_tx_mtx;
};
#define VTCON_LOCK(_sc) mtx_lock(&(_sc)->vtcon_mtx)
#define VTCON_UNLOCK(_sc) mtx_unlock(&(_sc)->vtcon_mtx)
#define VTCON_LOCK_ASSERT(_sc) \
mtx_assert(&(_sc)->vtcon_mtx, MA_OWNED)
#define VTCON_LOCK_ASSERT_NOTOWNED(_sc) \
mtx_assert(&(_sc)->vtcon_mtx, MA_NOTOWNED)
#define VTCON_CTRL_TX_LOCK(_sc) mtx_lock(&(_sc)->vtcon_ctrl_tx_mtx)
#define VTCON_CTRL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->vtcon_ctrl_tx_mtx)
#define VTCON_ASSERT_VALID_PORTID(_sc, _id) \
KASSERT((_id) >= 0 && (_id) < (_sc)->vtcon_max_ports, \
("%s: port ID %d out of range", __func__, _id))
#define VTCON_FEATURES VIRTIO_CONSOLE_F_MULTIPORT
static struct virtio_feature_desc vtcon_feature_desc[] = {
{ VIRTIO_CONSOLE_F_SIZE, "ConsoleSize" },
{ VIRTIO_CONSOLE_F_MULTIPORT, "MultiplePorts" },
{ VIRTIO_CONSOLE_F_EMERG_WRITE, "EmergencyWrite" },
{ 0, NULL }
};
static int vtcon_modevent(module_t, int, void *);
static void vtcon_drain_all(void);
static int vtcon_probe(device_t);
static int vtcon_attach(device_t);
static int vtcon_detach(device_t);
static int vtcon_config_change(device_t);
-static void vtcon_setup_features(struct vtcon_softc *);
-static void vtcon_negotiate_features(struct vtcon_softc *);
+static int vtcon_setup_features(struct vtcon_softc *);
+static int vtcon_negotiate_features(struct vtcon_softc *);
static int vtcon_alloc_scports(struct vtcon_softc *);
static int vtcon_alloc_virtqueues(struct vtcon_softc *);
static void vtcon_read_config(struct vtcon_softc *,
struct virtio_console_config *);
static void vtcon_determine_max_ports(struct vtcon_softc *,
struct virtio_console_config *);
static void vtcon_destroy_ports(struct vtcon_softc *);
static void vtcon_stop(struct vtcon_softc *);
static int vtcon_ctrl_event_enqueue(struct vtcon_softc *,
struct virtio_console_control *);
static int vtcon_ctrl_event_create(struct vtcon_softc *);
static void vtcon_ctrl_event_requeue(struct vtcon_softc *,
struct virtio_console_control *);
static int vtcon_ctrl_event_populate(struct vtcon_softc *);
static void vtcon_ctrl_event_drain(struct vtcon_softc *);
static int vtcon_ctrl_init(struct vtcon_softc *);
static void vtcon_ctrl_deinit(struct vtcon_softc *);
static void vtcon_ctrl_port_add_event(struct vtcon_softc *, int);
static void vtcon_ctrl_port_remove_event(struct vtcon_softc *, int);
static void vtcon_ctrl_port_console_event(struct vtcon_softc *, int);
static void vtcon_ctrl_port_open_event(struct vtcon_softc *, int);
static void vtcon_ctrl_port_name_event(struct vtcon_softc *, int,
const char *, size_t);
static void vtcon_ctrl_process_event(struct vtcon_softc *,
struct virtio_console_control *, void *, size_t);
static void vtcon_ctrl_task_cb(void *, int);
static void vtcon_ctrl_event_intr(void *);
static void vtcon_ctrl_poll(struct vtcon_softc *,
struct virtio_console_control *control);
static void vtcon_ctrl_send_control(struct vtcon_softc *, uint32_t,
uint16_t, uint16_t);
static int vtcon_port_enqueue_buf(struct vtcon_port *, void *, size_t);
static int vtcon_port_create_buf(struct vtcon_port *);
static void vtcon_port_requeue_buf(struct vtcon_port *, void *);
static int vtcon_port_populate(struct vtcon_port *);
static void vtcon_port_destroy(struct vtcon_port *);
static int vtcon_port_create(struct vtcon_softc *, int);
static void vtcon_port_dev_alias(struct vtcon_port *, const char *,
size_t);
static void vtcon_port_drain_bufs(struct virtqueue *);
static void vtcon_port_drain(struct vtcon_port *);
static void vtcon_port_teardown(struct vtcon_port *);
static void vtcon_port_change_size(struct vtcon_port *, uint16_t,
uint16_t);
static void vtcon_port_update_console_size(struct vtcon_softc *);
static void vtcon_port_enable_intr(struct vtcon_port *);
static void vtcon_port_disable_intr(struct vtcon_port *);
static void vtcon_port_in(struct vtcon_port *);
static void vtcon_port_intr(void *);
static void vtcon_port_out(struct vtcon_port *, void *, int);
static void vtcon_port_submit_event(struct vtcon_port *, uint16_t,
uint16_t);
static int vtcon_tty_open(struct tty *);
static void vtcon_tty_close(struct tty *);
static void vtcon_tty_outwakeup(struct tty *);
static void vtcon_tty_free(void *);
static void vtcon_get_console_size(struct vtcon_softc *, uint16_t *,
uint16_t *);
static void vtcon_enable_interrupts(struct vtcon_softc *);
static void vtcon_disable_interrupts(struct vtcon_softc *);
#define vtcon_modern(_sc) (((_sc)->vtcon_features & VIRTIO_F_VERSION_1) != 0)
#define vtcon_htog16(_sc, _val) virtio_htog16(vtcon_modern(_sc), _val)
#define vtcon_htog32(_sc, _val) virtio_htog32(vtcon_modern(_sc), _val)
#define vtcon_htog64(_sc, _val) virtio_htog64(vtcon_modern(_sc), _val)
#define vtcon_gtoh16(_sc, _val) virtio_gtoh16(vtcon_modern(_sc), _val)
#define vtcon_gtoh32(_sc, _val) virtio_gtoh32(vtcon_modern(_sc), _val)
#define vtcon_gtoh64(_sc, _val) virtio_gtoh64(vtcon_modern(_sc), _val)
static int vtcon_pending_free;
static struct ttydevsw vtcon_tty_class = {
.tsw_flags = 0,
.tsw_open = vtcon_tty_open,
.tsw_close = vtcon_tty_close,
.tsw_outwakeup = vtcon_tty_outwakeup,
.tsw_free = vtcon_tty_free,
};
static device_method_t vtcon_methods[] = {
/* Device methods. */
DEVMETHOD(device_probe, vtcon_probe),
DEVMETHOD(device_attach, vtcon_attach),
DEVMETHOD(device_detach, vtcon_detach),
/* VirtIO methods. */
DEVMETHOD(virtio_config_change, vtcon_config_change),
DEVMETHOD_END
};
static driver_t vtcon_driver = {
"vtcon",
vtcon_methods,
sizeof(struct vtcon_softc)
};
static devclass_t vtcon_devclass;
DRIVER_MODULE(virtio_console, virtio_mmio, vtcon_driver, vtcon_devclass,
vtcon_modevent, 0);
DRIVER_MODULE(virtio_console, virtio_pci, vtcon_driver, vtcon_devclass,
vtcon_modevent, 0);
MODULE_VERSION(virtio_console, 1);
MODULE_DEPEND(virtio_console, virtio, 1, 1, 1);
VIRTIO_SIMPLE_PNPTABLE(virtio_console, VIRTIO_ID_CONSOLE,
"VirtIO Console Adapter");
VIRTIO_SIMPLE_PNPINFO(virtio_mmio, virtio_console);
VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_console);
static int
vtcon_modevent(module_t mod, int type, void *unused)
{
int error;
switch (type) {
case MOD_LOAD:
error = 0;
break;
case MOD_QUIESCE:
error = 0;
break;
case MOD_UNLOAD:
vtcon_drain_all();
error = 0;
break;
case MOD_SHUTDOWN:
error = 0;
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
static void
vtcon_drain_all(void)
{
int first;
for (first = 1; vtcon_pending_free != 0; first = 0) {
if (first != 0) {
printf("virtio_console: Waiting for all detached TTY "
"devices to have open fds closed.\n");
}
pause("vtcondra", hz);
}
}
static int
vtcon_probe(device_t dev)
{
return (VIRTIO_SIMPLE_PROBE(dev, virtio_console));
}
static int
vtcon_attach(device_t dev)
{
struct vtcon_softc *sc;
struct virtio_console_config concfg;
int error;
sc = device_get_softc(dev);
sc->vtcon_dev = dev;
+ virtio_set_feature_desc(dev, vtcon_feature_desc);
mtx_init(&sc->vtcon_mtx, "vtconmtx", NULL, MTX_DEF);
mtx_init(&sc->vtcon_ctrl_tx_mtx, "vtconctrlmtx", NULL, MTX_DEF);
- virtio_set_feature_desc(dev, vtcon_feature_desc);
- vtcon_setup_features(sc);
+ error = vtcon_setup_features(sc);
+ if (error) {
+ device_printf(dev, "cannot setup features\n");
+ goto fail;
+ }
vtcon_read_config(sc, &concfg);
vtcon_determine_max_ports(sc, &concfg);
error = vtcon_alloc_scports(sc);
if (error) {
device_printf(dev, "cannot allocate softc port structures\n");
goto fail;
}
error = vtcon_alloc_virtqueues(sc);
if (error) {
device_printf(dev, "cannot allocate virtqueues\n");
goto fail;
}
if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT) {
TASK_INIT(&sc->vtcon_ctrl_task, 0, vtcon_ctrl_task_cb, sc);
error = vtcon_ctrl_init(sc);
if (error)
goto fail;
} else {
error = vtcon_port_create(sc, 0);
if (error)
goto fail;
if (sc->vtcon_flags & VTCON_FLAG_SIZE)
vtcon_port_update_console_size(sc);
}
error = virtio_setup_intr(dev, INTR_TYPE_TTY);
if (error) {
device_printf(dev, "cannot setup virtqueue interrupts\n");
goto fail;
}
vtcon_enable_interrupts(sc);
vtcon_ctrl_send_control(sc, VIRTIO_CONSOLE_BAD_ID,
VIRTIO_CONSOLE_DEVICE_READY, 1);
fail:
if (error)
vtcon_detach(dev);
return (error);
}
static int
vtcon_detach(device_t dev)
{
struct vtcon_softc *sc;
sc = device_get_softc(dev);
VTCON_LOCK(sc);
sc->vtcon_flags |= VTCON_FLAG_DETACHED;
if (device_is_attached(dev))
vtcon_stop(sc);
VTCON_UNLOCK(sc);
if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT) {
taskqueue_drain(taskqueue_thread, &sc->vtcon_ctrl_task);
vtcon_ctrl_deinit(sc);
}
vtcon_destroy_ports(sc);
mtx_destroy(&sc->vtcon_mtx);
mtx_destroy(&sc->vtcon_ctrl_tx_mtx);
return (0);
}
static int
vtcon_config_change(device_t dev)
{
struct vtcon_softc *sc;
sc = device_get_softc(dev);
/*
* When the multiport feature is negotiated, all configuration
* changes are done through control virtqueue events.
*/
if ((sc->vtcon_flags & VTCON_FLAG_MULTIPORT) == 0) {
if (sc->vtcon_flags & VTCON_FLAG_SIZE)
vtcon_port_update_console_size(sc);
}
return (0);
}
-static void
+static int
vtcon_negotiate_features(struct vtcon_softc *sc)
{
device_t dev;
uint64_t features;
dev = sc->vtcon_dev;
features = VTCON_FEATURES;
sc->vtcon_features = virtio_negotiate_features(dev, features);
- virtio_finalize_features(dev);
+ return (virtio_finalize_features(dev));
}
-static void
+static int
vtcon_setup_features(struct vtcon_softc *sc)
{
device_t dev;
+ int error;
dev = sc->vtcon_dev;
- vtcon_negotiate_features(sc);
+ error = vtcon_negotiate_features(sc);
+ if (error)
+ return (error);
if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_SIZE))
sc->vtcon_flags |= VTCON_FLAG_SIZE;
if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_MULTIPORT))
sc->vtcon_flags |= VTCON_FLAG_MULTIPORT;
+
+ return (0);
}
#define VTCON_GET_CONFIG(_dev, _feature, _field, _cfg) \
if (virtio_with_feature(_dev, _feature)) { \
virtio_read_device_config(_dev, \
offsetof(struct virtio_console_config, _field), \
&(_cfg)->_field, sizeof((_cfg)->_field)); \
}
static void
vtcon_read_config(struct vtcon_softc *sc, struct virtio_console_config *concfg)
{
device_t dev;
dev = sc->vtcon_dev;
bzero(concfg, sizeof(struct virtio_console_config));
VTCON_GET_CONFIG(dev, VIRTIO_CONSOLE_F_SIZE, cols, concfg);
VTCON_GET_CONFIG(dev, VIRTIO_CONSOLE_F_SIZE, rows, concfg);
VTCON_GET_CONFIG(dev, VIRTIO_CONSOLE_F_MULTIPORT, max_nr_ports, concfg);
}
#undef VTCON_GET_CONFIG
static int
vtcon_alloc_scports(struct vtcon_softc *sc)
{
struct vtcon_softc_port *scport;
int max, i;
max = sc->vtcon_max_ports;
sc->vtcon_ports = malloc(sizeof(struct vtcon_softc_port) * max,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->vtcon_ports == NULL)
return (ENOMEM);
for (i = 0; i < max; i++) {
scport = &sc->vtcon_ports[i];
scport->vcsp_sc = sc;
}
return (0);
}
static int
vtcon_alloc_virtqueues(struct vtcon_softc *sc)
{
device_t dev;
struct vq_alloc_info *info;
struct vtcon_softc_port *scport;
int i, idx, portidx, nvqs, error;
dev = sc->vtcon_dev;
nvqs = sc->vtcon_max_ports * 2;
if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT)
nvqs += 2;
info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT);
if (info == NULL)
return (ENOMEM);
for (i = 0, idx = 0, portidx = 0; i < nvqs / 2; i++, idx += 2) {
if (i == 1) {
/* The control virtqueues are after the first port. */
VQ_ALLOC_INFO_INIT(&info[idx], 0,
vtcon_ctrl_event_intr, sc, &sc->vtcon_ctrl_rxvq,
"%s-control rx", device_get_nameunit(dev));
VQ_ALLOC_INFO_INIT(&info[idx+1], 0,
NULL, sc, &sc->vtcon_ctrl_txvq,
"%s-control tx", device_get_nameunit(dev));
continue;
}
scport = &sc->vtcon_ports[portidx];
VQ_ALLOC_INFO_INIT(&info[idx], 0, vtcon_port_intr,
scport, &scport->vcsp_invq, "%s-port%d in",
device_get_nameunit(dev), i);
VQ_ALLOC_INFO_INIT(&info[idx+1], 0, NULL,
NULL, &scport->vcsp_outvq, "%s-port%d out",
device_get_nameunit(dev), i);
portidx++;
}
error = virtio_alloc_virtqueues(dev, 0, nvqs, info);
free(info, M_TEMP);
return (error);
}
static void
vtcon_determine_max_ports(struct vtcon_softc *sc,
struct virtio_console_config *concfg)
{
if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT) {
sc->vtcon_max_ports =
min(concfg->max_nr_ports, VTCON_MAX_PORTS);
if (sc->vtcon_max_ports == 0)
sc->vtcon_max_ports = 1;
} else
sc->vtcon_max_ports = 1;
}
static void
vtcon_destroy_ports(struct vtcon_softc *sc)
{
struct vtcon_softc_port *scport;
struct vtcon_port *port;
struct virtqueue *vq;
int i;
if (sc->vtcon_ports == NULL)
return;
VTCON_LOCK(sc);
for (i = 0; i < sc->vtcon_max_ports; i++) {
scport = &sc->vtcon_ports[i];
port = scport->vcsp_port;
if (port != NULL) {
scport->vcsp_port = NULL;
VTCON_PORT_LOCK(port);
VTCON_UNLOCK(sc);
vtcon_port_teardown(port);
VTCON_LOCK(sc);
}
vq = scport->vcsp_invq;
if (vq != NULL)
vtcon_port_drain_bufs(vq);
}
VTCON_UNLOCK(sc);
free(sc->vtcon_ports, M_DEVBUF);
sc->vtcon_ports = NULL;
}
static void
vtcon_stop(struct vtcon_softc *sc)
{
vtcon_disable_interrupts(sc);
virtio_stop(sc->vtcon_dev);
}
static int
vtcon_ctrl_event_enqueue(struct vtcon_softc *sc,
struct virtio_console_control *control)
{
struct sglist_seg segs[2];
struct sglist sg;
struct virtqueue *vq;
int error;
vq = sc->vtcon_ctrl_rxvq;
sglist_init(&sg, 2, segs);
error = sglist_append(&sg, control, VTCON_CTRL_BUFSZ);
KASSERT(error == 0, ("%s: error %d adding control to sglist",
__func__, error));
return (virtqueue_enqueue(vq, control, &sg, 0, sg.sg_nseg));
}
static int
vtcon_ctrl_event_create(struct vtcon_softc *sc)
{
struct virtio_console_control *control;
int error;
control = malloc(VTCON_CTRL_BUFSZ, M_DEVBUF, M_ZERO | M_NOWAIT);
if (control == NULL)
return (ENOMEM);
error = vtcon_ctrl_event_enqueue(sc, control);
if (error)
free(control, M_DEVBUF);
return (error);
}
static void
vtcon_ctrl_event_requeue(struct vtcon_softc *sc,
struct virtio_console_control *control)
{
int error;
bzero(control, VTCON_CTRL_BUFSZ);
error = vtcon_ctrl_event_enqueue(sc, control);
KASSERT(error == 0,
("%s: cannot requeue control buffer %d", __func__, error));
}
static int
vtcon_ctrl_event_populate(struct vtcon_softc *sc)
{
struct virtqueue *vq;
int nbufs, error;
vq = sc->vtcon_ctrl_rxvq;
error = ENOSPC;
for (nbufs = 0; !virtqueue_full(vq); nbufs++) {
error = vtcon_ctrl_event_create(sc);
if (error)
break;
}
if (nbufs > 0) {
virtqueue_notify(vq);
error = 0;
}
return (error);
}
static void
vtcon_ctrl_event_drain(struct vtcon_softc *sc)
{
struct virtio_console_control *control;
struct virtqueue *vq;
int last;
vq = sc->vtcon_ctrl_rxvq;
last = 0;
if (vq == NULL)
return;
VTCON_LOCK(sc);
while ((control = virtqueue_drain(vq, &last)) != NULL)
free(control, M_DEVBUF);
VTCON_UNLOCK(sc);
}
static int
vtcon_ctrl_init(struct vtcon_softc *sc)
{
int error;
error = vtcon_ctrl_event_populate(sc);
return (error);
}
static void
vtcon_ctrl_deinit(struct vtcon_softc *sc)
{
vtcon_ctrl_event_drain(sc);
}
static void
vtcon_ctrl_port_add_event(struct vtcon_softc *sc, int id)
{
device_t dev;
int error;
dev = sc->vtcon_dev;
/* This single thread only way for ports to be created. */
if (sc->vtcon_ports[id].vcsp_port != NULL) {
device_printf(dev, "%s: adding port %d, but already exists\n",
__func__, id);
return;
}
error = vtcon_port_create(sc, id);
if (error) {
device_printf(dev, "%s: cannot create port %d: %d\n",
__func__, id, error);
vtcon_ctrl_send_control(sc, id, VIRTIO_CONSOLE_PORT_READY, 0);
return;
}
}
static void
vtcon_ctrl_port_remove_event(struct vtcon_softc *sc, int id)
{
device_t dev;
struct vtcon_softc_port *scport;
struct vtcon_port *port;
dev = sc->vtcon_dev;
scport = &sc->vtcon_ports[id];
VTCON_LOCK(sc);
port = scport->vcsp_port;
if (port == NULL) {
VTCON_UNLOCK(sc);
device_printf(dev, "%s: remove port %d, but does not exist\n",
__func__, id);
return;
}
scport->vcsp_port = NULL;
VTCON_PORT_LOCK(port);
VTCON_UNLOCK(sc);
vtcon_port_teardown(port);
}
static void
vtcon_ctrl_port_console_event(struct vtcon_softc *sc, int id)
{
device_t dev;
struct vtcon_softc_port *scport;
struct vtcon_port *port;
dev = sc->vtcon_dev;
scport = &sc->vtcon_ports[id];
VTCON_LOCK(sc);
port = scport->vcsp_port;
if (port == NULL) {
VTCON_UNLOCK(sc);
device_printf(dev, "%s: console port %d, but does not exist\n",
__func__, id);
return;
}
VTCON_PORT_LOCK(port);
VTCON_UNLOCK(sc);
port->vtcport_flags |= VTCON_PORT_FLAG_CONSOLE;
vtcon_port_submit_event(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
VTCON_PORT_UNLOCK(port);
}
static void
vtcon_ctrl_port_open_event(struct vtcon_softc *sc, int id)
{
device_t dev;
struct vtcon_softc_port *scport;
struct vtcon_port *port;
dev = sc->vtcon_dev;
scport = &sc->vtcon_ports[id];
VTCON_LOCK(sc);
port = scport->vcsp_port;
if (port == NULL) {
VTCON_UNLOCK(sc);
device_printf(dev, "%s: open port %d, but does not exist\n",
__func__, id);
return;
}
VTCON_PORT_LOCK(port);
VTCON_UNLOCK(sc);
vtcon_port_enable_intr(port);
VTCON_PORT_UNLOCK(port);
}
static void
vtcon_ctrl_port_name_event(struct vtcon_softc *sc, int id, const char *name,
size_t len)
{
device_t dev;
struct vtcon_softc_port *scport;
struct vtcon_port *port;
dev = sc->vtcon_dev;
scport = &sc->vtcon_ports[id];
/*
* The VirtIO specification says the NUL terminator is not included in
* the length, but QEMU includes it. Adjust the length if needed.
*/
if (name == NULL || len == 0)
return;
if (name[len - 1] == '\0') {
len--;
if (len == 0)
return;
}
VTCON_LOCK(sc);
port = scport->vcsp_port;
if (port == NULL) {
VTCON_UNLOCK(sc);
device_printf(dev, "%s: name port %d, but does not exist\n",
__func__, id);
return;
}
VTCON_PORT_LOCK(port);
VTCON_UNLOCK(sc);
vtcon_port_dev_alias(port, name, len);
VTCON_PORT_UNLOCK(port);
}
static void
vtcon_ctrl_process_event(struct vtcon_softc *sc,
struct virtio_console_control *control, void *data, size_t data_len)
{
device_t dev;
uint32_t id;
uint16_t event;
dev = sc->vtcon_dev;
id = vtcon_htog32(sc, control->id);
event = vtcon_htog16(sc, control->event);
if (id >= sc->vtcon_max_ports) {
device_printf(dev, "%s: event %d invalid port ID %d\n",
__func__, event, id);
return;
}
switch (event) {
case VIRTIO_CONSOLE_PORT_ADD:
vtcon_ctrl_port_add_event(sc, id);
break;
case VIRTIO_CONSOLE_PORT_REMOVE:
vtcon_ctrl_port_remove_event(sc, id);
break;
case VIRTIO_CONSOLE_CONSOLE_PORT:
vtcon_ctrl_port_console_event(sc, id);
break;
case VIRTIO_CONSOLE_RESIZE:
break;
case VIRTIO_CONSOLE_PORT_OPEN:
vtcon_ctrl_port_open_event(sc, id);
break;
case VIRTIO_CONSOLE_PORT_NAME:
vtcon_ctrl_port_name_event(sc, id, (const char *)data, data_len);
break;
}
}
static void
vtcon_ctrl_task_cb(void *xsc, int pending)
{
struct vtcon_softc *sc;
struct virtqueue *vq;
struct virtio_console_control *control;
void *data;
size_t data_len;
int detached;
uint32_t len;
sc = xsc;
vq = sc->vtcon_ctrl_rxvq;
VTCON_LOCK(sc);
while ((detached = (sc->vtcon_flags & VTCON_FLAG_DETACHED)) == 0) {
control = virtqueue_dequeue(vq, &len);
if (control == NULL)
break;
if (len > sizeof(struct virtio_console_control)) {
data = (void *) &control[1];
data_len = len - sizeof(struct virtio_console_control);
} else {
data = NULL;
data_len = 0;
}
VTCON_UNLOCK(sc);
vtcon_ctrl_process_event(sc, control, data, data_len);
VTCON_LOCK(sc);
vtcon_ctrl_event_requeue(sc, control);
}
if (!detached) {
virtqueue_notify(vq);
if (virtqueue_enable_intr(vq) != 0)
taskqueue_enqueue(taskqueue_thread,
&sc->vtcon_ctrl_task);
}
VTCON_UNLOCK(sc);
}
static void
vtcon_ctrl_event_intr(void *xsc)
{
struct vtcon_softc *sc;
sc = xsc;
/*
* Only some events require us to potentially block, but it
* easier to just defer all event handling to the taskqueue.
*/
taskqueue_enqueue(taskqueue_thread, &sc->vtcon_ctrl_task);
}
static void
vtcon_ctrl_poll(struct vtcon_softc *sc,
struct virtio_console_control *control)
{
struct sglist_seg segs[2];
struct sglist sg;
struct virtqueue *vq;
int error;
vq = sc->vtcon_ctrl_txvq;
sglist_init(&sg, 2, segs);
error = sglist_append(&sg, control,
sizeof(struct virtio_console_control));
KASSERT(error == 0, ("%s: error %d adding control to sglist",
__func__, error));
/*
* We cannot use the softc lock to serialize access to this
* virtqueue since this is called from the tty layer with the
* port lock held. Acquiring the softc would violate our lock
* ordering.
*/
VTCON_CTRL_TX_LOCK(sc);
KASSERT(virtqueue_empty(vq),
("%s: virtqueue is not emtpy", __func__));
error = virtqueue_enqueue(vq, control, &sg, sg.sg_nseg, 0);
if (error == 0) {
virtqueue_notify(vq);
virtqueue_poll(vq, NULL);
}
VTCON_CTRL_TX_UNLOCK(sc);
}
static void
vtcon_ctrl_send_control(struct vtcon_softc *sc, uint32_t portid,
uint16_t event, uint16_t value)
{
struct virtio_console_control control;
if ((sc->vtcon_flags & VTCON_FLAG_MULTIPORT) == 0)
return;
control.id = vtcon_gtoh32(sc, portid);
control.event = vtcon_gtoh16(sc, event);
control.value = vtcon_gtoh16(sc, value);
vtcon_ctrl_poll(sc, &control);
}
static int
vtcon_port_enqueue_buf(struct vtcon_port *port, void *buf, size_t len)
{
struct sglist_seg segs[2];
struct sglist sg;
struct virtqueue *vq;
int error;
vq = port->vtcport_invq;
sglist_init(&sg, 2, segs);
error = sglist_append(&sg, buf, len);
KASSERT(error == 0,
("%s: error %d adding buffer to sglist", __func__, error));
error = virtqueue_enqueue(vq, buf, &sg, 0, sg.sg_nseg);
return (error);
}
static int
vtcon_port_create_buf(struct vtcon_port *port)
{
void *buf;
int error;
buf = malloc(VTCON_BULK_BUFSZ, M_DEVBUF, M_ZERO | M_NOWAIT);
if (buf == NULL)
return (ENOMEM);
error = vtcon_port_enqueue_buf(port, buf, VTCON_BULK_BUFSZ);
if (error)
free(buf, M_DEVBUF);
return (error);
}
static void
vtcon_port_requeue_buf(struct vtcon_port *port, void *buf)
{
int error;
error = vtcon_port_enqueue_buf(port, buf, VTCON_BULK_BUFSZ);
KASSERT(error == 0,
("%s: cannot requeue input buffer %d", __func__, error));
}
static int
vtcon_port_populate(struct vtcon_port *port)
{
struct virtqueue *vq;
int nbufs, error;
vq = port->vtcport_invq;
error = ENOSPC;
for (nbufs = 0; !virtqueue_full(vq); nbufs++) {
error = vtcon_port_create_buf(port);
if (error)
break;
}
if (nbufs > 0) {
virtqueue_notify(vq);
error = 0;
}
return (error);
}
static void
vtcon_port_destroy(struct vtcon_port *port)
{
port->vtcport_sc = NULL;
port->vtcport_scport = NULL;
port->vtcport_invq = NULL;
port->vtcport_outvq = NULL;
port->vtcport_id = -1;
mtx_destroy(&port->vtcport_mtx);
free(port, M_DEVBUF);
}
static int
vtcon_port_init_vqs(struct vtcon_port *port)
{
struct vtcon_softc_port *scport;
int error;
scport = port->vtcport_scport;
port->vtcport_invq = scport->vcsp_invq;
port->vtcport_outvq = scport->vcsp_outvq;
/*
* Free any data left over from when this virtqueue was in use by a
* prior port. We have not yet notified the host that the port is
* ready, so assume nothing in the virtqueue can be for us.
*/
vtcon_port_drain(port);
KASSERT(virtqueue_empty(port->vtcport_invq),
("%s: in virtqueue is not empty", __func__));
KASSERT(virtqueue_empty(port->vtcport_outvq),
("%s: out virtqueue is not empty", __func__));
error = vtcon_port_populate(port);
if (error)
return (error);
return (0);
}
static int
vtcon_port_create(struct vtcon_softc *sc, int id)
{
device_t dev;
struct vtcon_softc_port *scport;
struct vtcon_port *port;
int error;
dev = sc->vtcon_dev;
scport = &sc->vtcon_ports[id];
VTCON_ASSERT_VALID_PORTID(sc, id);
MPASS(scport->vcsp_port == NULL);
port = malloc(sizeof(struct vtcon_port), M_DEVBUF, M_NOWAIT | M_ZERO);
if (port == NULL)
return (ENOMEM);
port->vtcport_sc = sc;
port->vtcport_scport = scport;
port->vtcport_id = id;
mtx_init(&port->vtcport_mtx, "vtcpmtx", NULL, MTX_DEF);
port->vtcport_tty = tty_alloc_mutex(&vtcon_tty_class, port,
&port->vtcport_mtx);
error = vtcon_port_init_vqs(port);
if (error) {
VTCON_PORT_LOCK(port);
vtcon_port_teardown(port);
return (error);
}
VTCON_LOCK(sc);
VTCON_PORT_LOCK(port);
scport->vcsp_port = port;
vtcon_port_enable_intr(port);
vtcon_port_submit_event(port, VIRTIO_CONSOLE_PORT_READY, 1);
VTCON_PORT_UNLOCK(port);
VTCON_UNLOCK(sc);
tty_makedev(port->vtcport_tty, NULL, "%s%r.%r", VTCON_TTY_PREFIX,
device_get_unit(dev), id);
return (0);
}
static void
vtcon_port_dev_alias(struct vtcon_port *port, const char *name, size_t len)
{
struct vtcon_softc *sc;
struct cdev *pdev;
struct tty *tp;
int i, error;
sc = port->vtcport_sc;
tp = port->vtcport_tty;
if (port->vtcport_flags & VTCON_PORT_FLAG_ALIAS)
return;
/* Port name is UTF-8, but we can only handle ASCII. */
for (i = 0; i < len; i++) {
if (!isascii(name[i]))
return;
}
/*
* Port name may not conform to the devfs requirements so we cannot use
* tty_makealias() because the MAKEDEV_CHECKNAME flag must be specified.
*/
error = make_dev_alias_p(MAKEDEV_NOWAIT | MAKEDEV_CHECKNAME, &pdev,
tp->t_dev, "%s/%*s", VTCON_TTY_ALIAS_PREFIX, (int)len, name);
if (error) {
device_printf(sc->vtcon_dev,
"%s: cannot make dev alias (%s/%*s) error %d\n", __func__,
VTCON_TTY_ALIAS_PREFIX, (int)len, name, error);
} else
port->vtcport_flags |= VTCON_PORT_FLAG_ALIAS;
}
static void
vtcon_port_drain_bufs(struct virtqueue *vq)
{
void *buf;
int last;
last = 0;
while ((buf = virtqueue_drain(vq, &last)) != NULL)
free(buf, M_DEVBUF);
}
static void
vtcon_port_drain(struct vtcon_port *port)
{
vtcon_port_drain_bufs(port->vtcport_invq);
}
static void
vtcon_port_teardown(struct vtcon_port *port)
{
struct tty *tp;
tp = port->vtcport_tty;
port->vtcport_flags |= VTCON_PORT_FLAG_GONE;
if (tp != NULL) {
atomic_add_int(&vtcon_pending_free, 1);
tty_rel_gone(tp);
} else
vtcon_port_destroy(port);
}
static void
vtcon_port_change_size(struct vtcon_port *port, uint16_t cols, uint16_t rows)
{
struct tty *tp;
struct winsize sz;
tp = port->vtcport_tty;
if (tp == NULL)
return;
bzero(&sz, sizeof(struct winsize));
sz.ws_col = cols;
sz.ws_row = rows;
tty_set_winsize(tp, &sz);
}
static void
vtcon_port_update_console_size(struct vtcon_softc *sc)
{
struct vtcon_port *port;
struct vtcon_softc_port *scport;
uint16_t cols, rows;
vtcon_get_console_size(sc, &cols, &rows);
/*
* For now, assume the first (only) port is the console. Note
* QEMU does not implement this feature yet.
*/
scport = &sc->vtcon_ports[0];
VTCON_LOCK(sc);
port = scport->vcsp_port;
if (port != NULL) {
VTCON_PORT_LOCK(port);
VTCON_UNLOCK(sc);
vtcon_port_change_size(port, cols, rows);
VTCON_PORT_UNLOCK(port);
} else
VTCON_UNLOCK(sc);
}
static void
vtcon_port_enable_intr(struct vtcon_port *port)
{
/*
* NOTE: The out virtqueue is always polled, so its interrupt
* kept disabled.
*/
virtqueue_enable_intr(port->vtcport_invq);
}
static void
vtcon_port_disable_intr(struct vtcon_port *port)
{
if (port->vtcport_invq != NULL)
virtqueue_disable_intr(port->vtcport_invq);
if (port->vtcport_outvq != NULL)
virtqueue_disable_intr(port->vtcport_outvq);
}
static void
vtcon_port_in(struct vtcon_port *port)
{
struct virtqueue *vq;
struct tty *tp;
char *buf;
uint32_t len;
int i, deq;
tp = port->vtcport_tty;
vq = port->vtcport_invq;
again:
deq = 0;
while ((buf = virtqueue_dequeue(vq, &len)) != NULL) {
for (i = 0; i < len; i++) {
#if defined(KDB)
if (port->vtcport_flags & VTCON_PORT_FLAG_CONSOLE)
kdb_alt_break(buf[i],
&port->vtcport_alt_break_state);
#endif
ttydisc_rint(tp, buf[i], 0);
}
vtcon_port_requeue_buf(port, buf);
deq++;
}
ttydisc_rint_done(tp);
if (deq > 0)
virtqueue_notify(vq);
if (virtqueue_enable_intr(vq) != 0)
goto again;
}
static void
vtcon_port_intr(void *scportx)
{
struct vtcon_softc_port *scport;
struct vtcon_softc *sc;
struct vtcon_port *port;
scport = scportx;
sc = scport->vcsp_sc;
VTCON_LOCK(sc);
port = scport->vcsp_port;
if (port == NULL) {
VTCON_UNLOCK(sc);
return;
}
VTCON_PORT_LOCK(port);
VTCON_UNLOCK(sc);
if ((port->vtcport_flags & VTCON_PORT_FLAG_GONE) == 0)
vtcon_port_in(port);
VTCON_PORT_UNLOCK(port);
}
static void
vtcon_port_out(struct vtcon_port *port, void *buf, int bufsize)
{
struct sglist_seg segs[2];
struct sglist sg;
struct virtqueue *vq;
int error;
vq = port->vtcport_outvq;
KASSERT(virtqueue_empty(vq),
("%s: port %p out virtqueue not emtpy", __func__, port));
sglist_init(&sg, 2, segs);
error = sglist_append(&sg, buf, bufsize);
KASSERT(error == 0, ("%s: error %d adding buffer to sglist",
__func__, error));
error = virtqueue_enqueue(vq, buf, &sg, sg.sg_nseg, 0);
if (error == 0) {
virtqueue_notify(vq);
virtqueue_poll(vq, NULL);
}
}
static void
vtcon_port_submit_event(struct vtcon_port *port, uint16_t event,
uint16_t value)
{
struct vtcon_softc *sc;
sc = port->vtcport_sc;
vtcon_ctrl_send_control(sc, port->vtcport_id, event, value);
}
static int
vtcon_tty_open(struct tty *tp)
{
struct vtcon_port *port;
port = tty_softc(tp);
if (port->vtcport_flags & VTCON_PORT_FLAG_GONE)
return (ENXIO);
vtcon_port_submit_event(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
return (0);
}
static void
vtcon_tty_close(struct tty *tp)
{
struct vtcon_port *port;
port = tty_softc(tp);
if (port->vtcport_flags & VTCON_PORT_FLAG_GONE)
return;
vtcon_port_submit_event(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
}
static void
vtcon_tty_outwakeup(struct tty *tp)
{
struct vtcon_port *port;
char buf[VTCON_BULK_BUFSZ];
int len;
port = tty_softc(tp);
if (port->vtcport_flags & VTCON_PORT_FLAG_GONE)
return;
while ((len = ttydisc_getc(tp, buf, sizeof(buf))) != 0)
vtcon_port_out(port, buf, len);
}
static void
vtcon_tty_free(void *xport)
{
struct vtcon_port *port;
port = xport;
vtcon_port_destroy(port);
atomic_subtract_int(&vtcon_pending_free, 1);
}
static void
vtcon_get_console_size(struct vtcon_softc *sc, uint16_t *cols, uint16_t *rows)
{
struct virtio_console_config concfg;
KASSERT(sc->vtcon_flags & VTCON_FLAG_SIZE,
("%s: size feature not negotiated", __func__));
vtcon_read_config(sc, &concfg);
*cols = concfg.cols;
*rows = concfg.rows;
}
static void
vtcon_enable_interrupts(struct vtcon_softc *sc)
{
struct vtcon_softc_port *scport;
struct vtcon_port *port;
int i;
VTCON_LOCK(sc);
if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT)
virtqueue_enable_intr(sc->vtcon_ctrl_rxvq);
for (i = 0; i < sc->vtcon_max_ports; i++) {
scport = &sc->vtcon_ports[i];
port = scport->vcsp_port;
if (port == NULL)
continue;
VTCON_PORT_LOCK(port);
vtcon_port_enable_intr(port);
VTCON_PORT_UNLOCK(port);
}
VTCON_UNLOCK(sc);
}
static void
vtcon_disable_interrupts(struct vtcon_softc *sc)
{
struct vtcon_softc_port *scport;
struct vtcon_port *port;
int i;
VTCON_LOCK_ASSERT(sc);
if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT)
virtqueue_disable_intr(sc->vtcon_ctrl_rxvq);
for (i = 0; i < sc->vtcon_max_ports; i++) {
scport = &sc->vtcon_ports[i];
port = scport->vcsp_port;
if (port == NULL)
continue;
VTCON_PORT_LOCK(port);
vtcon_port_disable_intr(port);
VTCON_PORT_UNLOCK(port);
}
}
diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c
index ed3065b61283..8d0770f5ac2d 100644
--- a/sys/dev/virtio/network/if_vtnet.c
+++ b/sys/dev/virtio/network/if_vtnet.c
@@ -1,4421 +1,4430 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Driver for VirtIO network devices. */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/eventhandler.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/random.h>
#include <sys/sglist.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/taskqueue.h>
#include <sys/smp.h>
#include <machine/smp.h>
#include <vm/uma.h>
#include <net/debugnet.h>
#include <net/ethernet.h>
#include <net/pfil.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_types.h>
#include <net/if_media.h>
#include <net/if_vlan_var.h>
#include <net/bpf.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet6/ip6_var.h>
#include <netinet/udp.h>
#include <netinet/tcp.h>
#include <netinet/tcp_lro.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/virtio/virtio.h>
#include <dev/virtio/virtqueue.h>
#include <dev/virtio/network/virtio_net.h>
#include <dev/virtio/network/if_vtnetvar.h>
#include "virtio_if.h"
#include "opt_inet.h"
#include "opt_inet6.h"
#if defined(INET) || defined(INET6)
#include <machine/in_cksum.h>
#endif
static int vtnet_modevent(module_t, int, void *);
static int vtnet_probe(device_t);
static int vtnet_attach(device_t);
static int vtnet_detach(device_t);
static int vtnet_suspend(device_t);
static int vtnet_resume(device_t);
static int vtnet_shutdown(device_t);
static int vtnet_attach_completed(device_t);
static int vtnet_config_change(device_t);
-static void vtnet_negotiate_features(struct vtnet_softc *);
-static void vtnet_setup_features(struct vtnet_softc *);
+static int vtnet_negotiate_features(struct vtnet_softc *);
+static int vtnet_setup_features(struct vtnet_softc *);
static int vtnet_init_rxq(struct vtnet_softc *, int);
static int vtnet_init_txq(struct vtnet_softc *, int);
static int vtnet_alloc_rxtx_queues(struct vtnet_softc *);
static void vtnet_free_rxtx_queues(struct vtnet_softc *);
static int vtnet_alloc_rx_filters(struct vtnet_softc *);
static void vtnet_free_rx_filters(struct vtnet_softc *);
static int vtnet_alloc_virtqueues(struct vtnet_softc *);
static int vtnet_alloc_interface(struct vtnet_softc *);
static int vtnet_setup_interface(struct vtnet_softc *);
static int vtnet_ioctl_mtu(struct vtnet_softc *, int);
static int vtnet_ioctl_ifflags(struct vtnet_softc *);
static int vtnet_ioctl_multi(struct vtnet_softc *);
static int vtnet_ioctl_ifcap(struct vtnet_softc *, struct ifreq *);
static int vtnet_ioctl(struct ifnet *, u_long, caddr_t);
static uint64_t vtnet_get_counter(struct ifnet *, ift_counter);
static int vtnet_rxq_populate(struct vtnet_rxq *);
static void vtnet_rxq_free_mbufs(struct vtnet_rxq *);
static struct mbuf *
vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
static int vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *,
struct mbuf *, int);
static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
static int vtnet_rxq_new_buf(struct vtnet_rxq *);
static int vtnet_rxq_csum_needs_csum(struct vtnet_rxq *, struct mbuf *,
uint16_t, int, struct virtio_net_hdr *);
static int vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *,
uint16_t, int, struct virtio_net_hdr *);
static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
struct virtio_net_hdr *);
static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *);
static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int);
static void vtnet_rxq_input(struct vtnet_rxq *, struct mbuf *,
struct virtio_net_hdr *);
static int vtnet_rxq_eof(struct vtnet_rxq *);
static void vtnet_rx_vq_process(struct vtnet_rxq *rxq, int tries);
static void vtnet_rx_vq_intr(void *);
static void vtnet_rxq_tq_intr(void *, int);
static int vtnet_txq_intr_threshold(struct vtnet_txq *);
static int vtnet_txq_below_threshold(struct vtnet_txq *);
static int vtnet_txq_notify(struct vtnet_txq *);
static void vtnet_txq_free_mbufs(struct vtnet_txq *);
static int vtnet_txq_offload_ctx(struct vtnet_txq *, struct mbuf *,
int *, int *, int *);
static int vtnet_txq_offload_tso(struct vtnet_txq *, struct mbuf *, int,
int, struct virtio_net_hdr *);
static struct mbuf *
vtnet_txq_offload(struct vtnet_txq *, struct mbuf *,
struct virtio_net_hdr *);
static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **,
struct vtnet_tx_header *);
static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **, int);
#ifdef VTNET_LEGACY_TX
static void vtnet_start_locked(struct vtnet_txq *, struct ifnet *);
static void vtnet_start(struct ifnet *);
#else
static int vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *);
static int vtnet_txq_mq_start(struct ifnet *, struct mbuf *);
static void vtnet_txq_tq_deferred(void *, int);
#endif
static void vtnet_txq_start(struct vtnet_txq *);
static void vtnet_txq_tq_intr(void *, int);
static int vtnet_txq_eof(struct vtnet_txq *);
static void vtnet_tx_vq_intr(void *);
static void vtnet_tx_start_all(struct vtnet_softc *);
#ifndef VTNET_LEGACY_TX
static void vtnet_qflush(struct ifnet *);
#endif
static int vtnet_watchdog(struct vtnet_txq *);
static void vtnet_accum_stats(struct vtnet_softc *,
struct vtnet_rxq_stats *, struct vtnet_txq_stats *);
static void vtnet_tick(void *);
static void vtnet_start_taskqueues(struct vtnet_softc *);
static void vtnet_free_taskqueues(struct vtnet_softc *);
static void vtnet_drain_taskqueues(struct vtnet_softc *);
static void vtnet_drain_rxtx_queues(struct vtnet_softc *);
static void vtnet_stop_rendezvous(struct vtnet_softc *);
static void vtnet_stop(struct vtnet_softc *);
static int vtnet_virtio_reinit(struct vtnet_softc *);
static void vtnet_init_rx_filters(struct vtnet_softc *);
static int vtnet_init_rx_queues(struct vtnet_softc *);
static int vtnet_init_tx_queues(struct vtnet_softc *);
static int vtnet_init_rxtx_queues(struct vtnet_softc *);
static void vtnet_set_active_vq_pairs(struct vtnet_softc *);
static void vtnet_update_rx_offloads(struct vtnet_softc *);
static int vtnet_reinit(struct vtnet_softc *);
static void vtnet_init_locked(struct vtnet_softc *, int);
static void vtnet_init(void *);
static void vtnet_free_ctrl_vq(struct vtnet_softc *);
static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
struct sglist *, int, int);
static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
static int vtnet_ctrl_guest_offloads(struct vtnet_softc *, uint64_t);
static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, uint8_t, int);
static int vtnet_set_promisc(struct vtnet_softc *, int);
static int vtnet_set_allmulti(struct vtnet_softc *, int);
static void vtnet_rx_filter(struct vtnet_softc *);
static void vtnet_rx_filter_mac(struct vtnet_softc *);
static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
static void vtnet_rx_filter_vlan(struct vtnet_softc *);
static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
static void vtnet_register_vlan(void *, struct ifnet *, uint16_t);
static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
static void vtnet_update_speed_duplex(struct vtnet_softc *);
static int vtnet_is_link_up(struct vtnet_softc *);
static void vtnet_update_link_status(struct vtnet_softc *);
static int vtnet_ifmedia_upd(struct ifnet *);
static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
static void vtnet_get_macaddr(struct vtnet_softc *);
static void vtnet_set_macaddr(struct vtnet_softc *);
static void vtnet_attached_set_macaddr(struct vtnet_softc *);
static void vtnet_vlan_tag_remove(struct mbuf *);
static void vtnet_set_rx_process_limit(struct vtnet_softc *);
static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
struct sysctl_oid_list *, struct vtnet_rxq *);
static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
struct sysctl_oid_list *, struct vtnet_txq *);
static void vtnet_setup_queue_sysctl(struct vtnet_softc *);
static void vtnet_load_tunables(struct vtnet_softc *);
static void vtnet_setup_sysctl(struct vtnet_softc *);
static int vtnet_rxq_enable_intr(struct vtnet_rxq *);
static void vtnet_rxq_disable_intr(struct vtnet_rxq *);
static int vtnet_txq_enable_intr(struct vtnet_txq *);
static void vtnet_txq_disable_intr(struct vtnet_txq *);
static void vtnet_enable_rx_interrupts(struct vtnet_softc *);
static void vtnet_enable_tx_interrupts(struct vtnet_softc *);
static void vtnet_enable_interrupts(struct vtnet_softc *);
static void vtnet_disable_rx_interrupts(struct vtnet_softc *);
static void vtnet_disable_tx_interrupts(struct vtnet_softc *);
static void vtnet_disable_interrupts(struct vtnet_softc *);
static int vtnet_tunable_int(struct vtnet_softc *, const char *, int);
DEBUGNET_DEFINE(vtnet);
#define vtnet_htog16(_sc, _val) virtio_htog16(vtnet_modern(_sc), _val)
#define vtnet_htog32(_sc, _val) virtio_htog32(vtnet_modern(_sc), _val)
#define vtnet_htog64(_sc, _val) virtio_htog64(vtnet_modern(_sc), _val)
#define vtnet_gtoh16(_sc, _val) virtio_gtoh16(vtnet_modern(_sc), _val)
#define vtnet_gtoh32(_sc, _val) virtio_gtoh32(vtnet_modern(_sc), _val)
#define vtnet_gtoh64(_sc, _val) virtio_gtoh64(vtnet_modern(_sc), _val)
/* Tunables. */
static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"VirtIO Net driver parameters");
static int vtnet_csum_disable = 0;
SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
&vtnet_csum_disable, 0, "Disables receive and send checksum offload");
static int vtnet_fixup_needs_csum = 0;
SYSCTL_INT(_hw_vtnet, OID_AUTO, fixup_needs_csum, CTLFLAG_RDTUN,
&vtnet_fixup_needs_csum, 0,
"Calculate valid checksum for NEEDS_CSUM packets");
static int vtnet_tso_disable = 0;
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN,
&vtnet_tso_disable, 0, "Disables TSO");
static int vtnet_lro_disable = 0;
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN,
&vtnet_lro_disable, 0, "Disables hardware LRO");
static int vtnet_mq_disable = 0;
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN,
&vtnet_mq_disable, 0, "Disables multiqueue support");
static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
&vtnet_mq_max_pairs, 0, "Maximum number of multiqueue pairs");
static int vtnet_tso_maxlen = IP_MAXPACKET;
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN,
&vtnet_tso_maxlen, 0, "TSO burst limit");
static int vtnet_rx_process_limit = 1024;
SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
&vtnet_rx_process_limit, 0,
"Number of RX segments processed in one pass");
static int vtnet_lro_entry_count = 128;
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN,
&vtnet_lro_entry_count, 0, "Software LRO entry count");
/* Enable sorted LRO, and the depth of the mbuf queue. */
static int vtnet_lro_mbufq_depth = 0;
SYSCTL_UINT(_hw_vtnet, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN,
&vtnet_lro_mbufq_depth, 0, "Depth of software LRO mbuf queue");
static uma_zone_t vtnet_tx_header_zone;
static struct virtio_feature_desc vtnet_feature_desc[] = {
{ VIRTIO_NET_F_CSUM, "TxChecksum" },
{ VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" },
{ VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "CtrlRxOffloads" },
{ VIRTIO_NET_F_MAC, "MAC" },
{ VIRTIO_NET_F_GSO, "TxGSO" },
{ VIRTIO_NET_F_GUEST_TSO4, "RxLROv4" },
{ VIRTIO_NET_F_GUEST_TSO6, "RxLROv6" },
{ VIRTIO_NET_F_GUEST_ECN, "RxLROECN" },
{ VIRTIO_NET_F_GUEST_UFO, "RxUFO" },
{ VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" },
{ VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" },
{ VIRTIO_NET_F_HOST_ECN, "TxTSOECN" },
{ VIRTIO_NET_F_HOST_UFO, "TxUFO" },
{ VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" },
{ VIRTIO_NET_F_STATUS, "Status" },
{ VIRTIO_NET_F_CTRL_VQ, "CtrlVq" },
{ VIRTIO_NET_F_CTRL_RX, "CtrlRxMode" },
{ VIRTIO_NET_F_CTRL_VLAN, "CtrlVLANFilter" },
{ VIRTIO_NET_F_CTRL_RX_EXTRA, "CtrlRxModeExtra" },
{ VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" },
{ VIRTIO_NET_F_MQ, "Multiqueue" },
{ VIRTIO_NET_F_CTRL_MAC_ADDR, "CtrlMacAddr" },
{ VIRTIO_NET_F_SPEED_DUPLEX, "SpeedDuplex" },
{ 0, NULL }
};
static device_method_t vtnet_methods[] = {
/* Device methods. */
DEVMETHOD(device_probe, vtnet_probe),
DEVMETHOD(device_attach, vtnet_attach),
DEVMETHOD(device_detach, vtnet_detach),
DEVMETHOD(device_suspend, vtnet_suspend),
DEVMETHOD(device_resume, vtnet_resume),
DEVMETHOD(device_shutdown, vtnet_shutdown),
/* VirtIO methods. */
DEVMETHOD(virtio_attach_completed, vtnet_attach_completed),
DEVMETHOD(virtio_config_change, vtnet_config_change),
DEVMETHOD_END
};
#ifdef DEV_NETMAP
#include <dev/netmap/if_vtnet_netmap.h>
#endif
static driver_t vtnet_driver = {
.name = "vtnet",
.methods = vtnet_methods,
.size = sizeof(struct vtnet_softc)
};
static devclass_t vtnet_devclass;
DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass,
vtnet_modevent, 0);
DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
vtnet_modevent, 0);
MODULE_VERSION(vtnet, 1);
MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
#ifdef DEV_NETMAP
MODULE_DEPEND(vtnet, netmap, 1, 1, 1);
#endif
VIRTIO_SIMPLE_PNPTABLE(vtnet, VIRTIO_ID_NETWORK, "VirtIO Networking Adapter");
VIRTIO_SIMPLE_PNPINFO(virtio_mmio, vtnet);
VIRTIO_SIMPLE_PNPINFO(virtio_pci, vtnet);
static int
vtnet_modevent(module_t mod, int type, void *unused)
{
int error = 0;
static int loaded = 0;
switch (type) {
case MOD_LOAD:
if (loaded++ == 0) {
vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr",
sizeof(struct vtnet_tx_header),
NULL, NULL, NULL, NULL, 0, 0);
#ifdef DEBUGNET
/*
* We need to allocate from this zone in the transmit path, so ensure
* that we have at least one item per header available.
* XXX add a separate zone like we do for mbufs? otherwise we may alloc
* buckets
*/
uma_zone_reserve(vtnet_tx_header_zone, DEBUGNET_MAX_IN_FLIGHT * 2);
uma_prealloc(vtnet_tx_header_zone, DEBUGNET_MAX_IN_FLIGHT * 2);
#endif
}
break;
case MOD_QUIESCE:
if (uma_zone_get_cur(vtnet_tx_header_zone) > 0)
error = EBUSY;
break;
case MOD_UNLOAD:
if (--loaded == 0) {
uma_zdestroy(vtnet_tx_header_zone);
vtnet_tx_header_zone = NULL;
}
break;
case MOD_SHUTDOWN:
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
static int
vtnet_probe(device_t dev)
{
return (VIRTIO_SIMPLE_PROBE(dev, vtnet));
}
static int
vtnet_attach(device_t dev)
{
struct vtnet_softc *sc;
int error;
sc = device_get_softc(dev);
sc->vtnet_dev = dev;
-
virtio_set_feature_desc(dev, vtnet_feature_desc);
VTNET_CORE_LOCK_INIT(sc);
callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
vtnet_load_tunables(sc);
error = vtnet_alloc_interface(sc);
if (error) {
device_printf(dev, "cannot allocate interface\n");
goto fail;
}
vtnet_setup_sysctl(sc);
- vtnet_setup_features(sc);
+
+ error = vtnet_setup_features(sc);
+ if (error) {
+ device_printf(dev, "cannot setup features\n");
+ goto fail;
+ }
error = vtnet_alloc_rx_filters(sc);
if (error) {
device_printf(dev, "cannot allocate Rx filters\n");
goto fail;
}
error = vtnet_alloc_rxtx_queues(sc);
if (error) {
device_printf(dev, "cannot allocate queues\n");
goto fail;
}
error = vtnet_alloc_virtqueues(sc);
if (error) {
device_printf(dev, "cannot allocate virtqueues\n");
goto fail;
}
error = vtnet_setup_interface(sc);
if (error) {
device_printf(dev, "cannot setup interface\n");
goto fail;
}
error = virtio_setup_intr(dev, INTR_TYPE_NET);
if (error) {
device_printf(dev, "cannot setup interrupts\n");
ether_ifdetach(sc->vtnet_ifp);
goto fail;
}
#ifdef DEV_NETMAP
vtnet_netmap_attach(sc);
#endif
vtnet_start_taskqueues(sc);
fail:
if (error)
vtnet_detach(dev);
return (error);
}
static int
vtnet_detach(device_t dev)
{
struct vtnet_softc *sc;
struct ifnet *ifp;
sc = device_get_softc(dev);
ifp = sc->vtnet_ifp;
if (device_is_attached(dev)) {
VTNET_CORE_LOCK(sc);
vtnet_stop(sc);
VTNET_CORE_UNLOCK(sc);
callout_drain(&sc->vtnet_tick_ch);
vtnet_drain_taskqueues(sc);
ether_ifdetach(ifp);
}
#ifdef DEV_NETMAP
netmap_detach(ifp);
#endif
vtnet_free_taskqueues(sc);
if (sc->vtnet_vlan_attach != NULL) {
EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
sc->vtnet_vlan_attach = NULL;
}
if (sc->vtnet_vlan_detach != NULL) {
EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach);
sc->vtnet_vlan_detach = NULL;
}
ifmedia_removeall(&sc->vtnet_media);
if (ifp != NULL) {
if_free(ifp);
sc->vtnet_ifp = NULL;
}
vtnet_free_rxtx_queues(sc);
vtnet_free_rx_filters(sc);
if (sc->vtnet_ctrl_vq != NULL)
vtnet_free_ctrl_vq(sc);
VTNET_CORE_LOCK_DESTROY(sc);
return (0);
}
static int
vtnet_suspend(device_t dev)
{
struct vtnet_softc *sc;
sc = device_get_softc(dev);
VTNET_CORE_LOCK(sc);
vtnet_stop(sc);
sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
VTNET_CORE_UNLOCK(sc);
return (0);
}
static int
vtnet_resume(device_t dev)
{
struct vtnet_softc *sc;
struct ifnet *ifp;
sc = device_get_softc(dev);
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK(sc);
if (ifp->if_flags & IFF_UP)
vtnet_init_locked(sc, 0);
sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
VTNET_CORE_UNLOCK(sc);
return (0);
}
static int
vtnet_shutdown(device_t dev)
{
/*
* Suspend already does all of what we need to
* do here; we just never expect to be resumed.
*/
return (vtnet_suspend(dev));
}
static int
vtnet_attach_completed(device_t dev)
{
struct vtnet_softc *sc;
sc = device_get_softc(dev);
VTNET_CORE_LOCK(sc);
vtnet_attached_set_macaddr(sc);
VTNET_CORE_UNLOCK(sc);
return (0);
}
static int
vtnet_config_change(device_t dev)
{
struct vtnet_softc *sc;
sc = device_get_softc(dev);
VTNET_CORE_LOCK(sc);
vtnet_update_link_status(sc);
if (sc->vtnet_link_active != 0)
vtnet_tx_start_all(sc);
VTNET_CORE_UNLOCK(sc);
return (0);
}
-static void
+static int
vtnet_negotiate_features(struct vtnet_softc *sc)
{
device_t dev;
uint64_t features, negotiated_features;
int no_csum;
dev = sc->vtnet_dev;
features = virtio_bus_is_modern(dev) ? VTNET_MODERN_FEATURES :
VTNET_LEGACY_FEATURES;
/*
* TSO and LRO are only available when their corresponding checksum
* offload feature is also negotiated.
*/
no_csum = vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable);
if (no_csum)
features &= ~(VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM);
if (no_csum || vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
features &= ~VTNET_TSO_FEATURES;
if (no_csum || vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
features &= ~VTNET_LRO_FEATURES;
#ifndef VTNET_LEGACY_TX
if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
features &= ~VIRTIO_NET_F_MQ;
#else
features &= ~VIRTIO_NET_F_MQ;
#endif
negotiated_features = virtio_negotiate_features(dev, features);
if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) {
uint16_t mtu;
mtu = virtio_read_dev_config_2(dev,
offsetof(struct virtio_net_config, mtu));
if (mtu < VTNET_MIN_MTU /* || mtu > VTNET_MAX_MTU */) {
device_printf(dev, "Invalid MTU value: %d. "
"MTU feature disabled.\n", mtu);
features &= ~VIRTIO_NET_F_MTU;
negotiated_features =
virtio_negotiate_features(dev, features);
}
}
if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) {
uint16_t npairs;
npairs = virtio_read_dev_config_2(dev,
offsetof(struct virtio_net_config, max_virtqueue_pairs));
if (npairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
npairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
device_printf(dev, "Invalid max_virtqueue_pairs value: "
"%d. Multiqueue feature disabled.\n", npairs);
features &= ~VIRTIO_NET_F_MQ;
negotiated_features =
virtio_negotiate_features(dev, features);
}
}
if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
/*
* LRO without mergeable buffers requires special care. This
* is not ideal because every receive buffer must be large
* enough to hold the maximum TCP packet, the Ethernet header,
* and the header. This requires up to 34 descriptors with
* MCLBYTES clusters. If we do not have indirect descriptors,
* LRO is disabled since the virtqueue will not contain very
* many receive buffers.
*/
if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
device_printf(dev,
"Host LRO disabled since both mergeable buffers "
"and indirect descriptors were not negotiated\n");
features &= ~VTNET_LRO_FEATURES;
negotiated_features =
virtio_negotiate_features(dev, features);
} else
sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
}
sc->vtnet_features = negotiated_features;
sc->vtnet_negotiated_features = negotiated_features;
- virtio_finalize_features(dev);
+ return (virtio_finalize_features(dev));
}
-static void
+static int
vtnet_setup_features(struct vtnet_softc *sc)
{
device_t dev;
+ int error;
dev = sc->vtnet_dev;
- vtnet_negotiate_features(sc);
+ error = vtnet_negotiate_features(sc);
+ if (error)
+ return (error);
if (virtio_with_feature(dev, VIRTIO_F_VERSION_1))
sc->vtnet_flags |= VTNET_FLAG_MODERN;
if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX;
if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
/* This feature should always be negotiated. */
sc->vtnet_flags |= VTNET_FLAG_MAC;
}
if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) {
sc->vtnet_max_mtu = virtio_read_dev_config_2(dev,
offsetof(struct virtio_net_config, mtu));
} else
sc->vtnet_max_mtu = VTNET_MAX_MTU;
if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
} else if (vtnet_modern(sc)) {
/* This is identical to the mergeable header. */
sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_v1);
} else
sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
if (vtnet_modern(sc) || sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_INLINE;
else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
sc->vtnet_rx_nsegs = VTNET_RX_SEGS_LRO_NOMRG;
else
sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_SEPARATE;
/*
* Favor "hardware" LRO if negotiated, but support software LRO as
* a fallback; there is usually little benefit (or worse) with both.
*/
if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) == 0 &&
virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6) == 0)
sc->vtnet_flags |= VTNET_FLAG_SW_LRO;
if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MAX;
else
sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MIN;
sc->vtnet_req_vq_pairs = 1;
sc->vtnet_max_vq_pairs = 1;
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) {
sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
offsetof(struct virtio_net_config,
max_virtqueue_pairs));
}
}
if (sc->vtnet_max_vq_pairs > 1) {
int req;
/*
* Limit the maximum number of requested queue pairs to the
* number of CPUs and the configured maximum.
*/
req = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
if (req < 0)
req = 1;
if (req == 0)
req = mp_ncpus;
if (req > sc->vtnet_max_vq_pairs)
req = sc->vtnet_max_vq_pairs;
if (req > mp_ncpus)
req = mp_ncpus;
if (req > 1) {
sc->vtnet_req_vq_pairs = req;
sc->vtnet_flags |= VTNET_FLAG_MQ;
}
}
+
+ return (0);
}
static int
vtnet_init_rxq(struct vtnet_softc *sc, int id)
{
struct vtnet_rxq *rxq;
rxq = &sc->vtnet_rxqs[id];
snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d",
device_get_nameunit(sc->vtnet_dev), id);
mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF);
rxq->vtnrx_sc = sc;
rxq->vtnrx_id = id;
rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT);
if (rxq->vtnrx_sg == NULL)
return (ENOMEM);
#if defined(INET) || defined(INET6)
if (vtnet_software_lro(sc)) {
if (tcp_lro_init_args(&rxq->vtnrx_lro, sc->vtnet_ifp,
sc->vtnet_lro_entry_count, sc->vtnet_lro_mbufq_depth) != 0)
return (ENOMEM);
}
#endif
NET_TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
taskqueue_thread_enqueue, &rxq->vtnrx_tq);
return (rxq->vtnrx_tq == NULL ? ENOMEM : 0);
}
static int
vtnet_init_txq(struct vtnet_softc *sc, int id)
{
struct vtnet_txq *txq;
txq = &sc->vtnet_txqs[id];
snprintf(txq->vtntx_name, sizeof(txq->vtntx_name), "%s-tx%d",
device_get_nameunit(sc->vtnet_dev), id);
mtx_init(&txq->vtntx_mtx, txq->vtntx_name, NULL, MTX_DEF);
txq->vtntx_sc = sc;
txq->vtntx_id = id;
txq->vtntx_sg = sglist_alloc(sc->vtnet_tx_nsegs, M_NOWAIT);
if (txq->vtntx_sg == NULL)
return (ENOMEM);
#ifndef VTNET_LEGACY_TX
txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF,
M_NOWAIT, &txq->vtntx_mtx);
if (txq->vtntx_br == NULL)
return (ENOMEM);
TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq);
#endif
TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq);
txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT,
taskqueue_thread_enqueue, &txq->vtntx_tq);
if (txq->vtntx_tq == NULL)
return (ENOMEM);
return (0);
}
static int
vtnet_alloc_rxtx_queues(struct vtnet_softc *sc)
{
int i, npairs, error;
npairs = sc->vtnet_max_vq_pairs;
sc->vtnet_rxqs = malloc(sizeof(struct vtnet_rxq) * npairs, M_DEVBUF,
M_NOWAIT | M_ZERO);
sc->vtnet_txqs = malloc(sizeof(struct vtnet_txq) * npairs, M_DEVBUF,
M_NOWAIT | M_ZERO);
if (sc->vtnet_rxqs == NULL || sc->vtnet_txqs == NULL)
return (ENOMEM);
for (i = 0; i < npairs; i++) {
error = vtnet_init_rxq(sc, i);
if (error)
return (error);
error = vtnet_init_txq(sc, i);
if (error)
return (error);
}
vtnet_set_rx_process_limit(sc);
vtnet_setup_queue_sysctl(sc);
return (0);
}
static void
vtnet_destroy_rxq(struct vtnet_rxq *rxq)
{
rxq->vtnrx_sc = NULL;
rxq->vtnrx_id = -1;
#if defined(INET) || defined(INET6)
tcp_lro_free(&rxq->vtnrx_lro);
#endif
if (rxq->vtnrx_sg != NULL) {
sglist_free(rxq->vtnrx_sg);
rxq->vtnrx_sg = NULL;
}
if (mtx_initialized(&rxq->vtnrx_mtx) != 0)
mtx_destroy(&rxq->vtnrx_mtx);
}
static void
vtnet_destroy_txq(struct vtnet_txq *txq)
{
txq->vtntx_sc = NULL;
txq->vtntx_id = -1;
if (txq->vtntx_sg != NULL) {
sglist_free(txq->vtntx_sg);
txq->vtntx_sg = NULL;
}
#ifndef VTNET_LEGACY_TX
if (txq->vtntx_br != NULL) {
buf_ring_free(txq->vtntx_br, M_DEVBUF);
txq->vtntx_br = NULL;
}
#endif
if (mtx_initialized(&txq->vtntx_mtx) != 0)
mtx_destroy(&txq->vtntx_mtx);
}
static void
vtnet_free_rxtx_queues(struct vtnet_softc *sc)
{
int i;
if (sc->vtnet_rxqs != NULL) {
for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
vtnet_destroy_rxq(&sc->vtnet_rxqs[i]);
free(sc->vtnet_rxqs, M_DEVBUF);
sc->vtnet_rxqs = NULL;
}
if (sc->vtnet_txqs != NULL) {
for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
vtnet_destroy_txq(&sc->vtnet_txqs[i]);
free(sc->vtnet_txqs, M_DEVBUF);
sc->vtnet_txqs = NULL;
}
}
static int
vtnet_alloc_rx_filters(struct vtnet_softc *sc)
{
if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
sc->vtnet_mac_filter = malloc(sizeof(struct vtnet_mac_filter),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->vtnet_mac_filter == NULL)
return (ENOMEM);
}
if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
sc->vtnet_vlan_filter = malloc(sizeof(uint32_t) *
VTNET_VLAN_FILTER_NWORDS, M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->vtnet_vlan_filter == NULL)
return (ENOMEM);
}
return (0);
}
static void
vtnet_free_rx_filters(struct vtnet_softc *sc)
{
if (sc->vtnet_mac_filter != NULL) {
free(sc->vtnet_mac_filter, M_DEVBUF);
sc->vtnet_mac_filter = NULL;
}
if (sc->vtnet_vlan_filter != NULL) {
free(sc->vtnet_vlan_filter, M_DEVBUF);
sc->vtnet_vlan_filter = NULL;
}
}
static int
vtnet_alloc_virtqueues(struct vtnet_softc *sc)
{
device_t dev;
struct vq_alloc_info *info;
struct vtnet_rxq *rxq;
struct vtnet_txq *txq;
int i, idx, flags, nvqs, error;
dev = sc->vtnet_dev;
flags = 0;
nvqs = sc->vtnet_max_vq_pairs * 2;
if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
nvqs++;
info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT);
if (info == NULL)
return (ENOMEM);
for (i = 0, idx = 0; i < sc->vtnet_req_vq_pairs; i++, idx += 2) {
rxq = &sc->vtnet_rxqs[i];
VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
"%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
txq = &sc->vtnet_txqs[i];
VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
"%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
}
/* These queues will not be used so allocate the minimum resources. */
for (/**/; i < sc->vtnet_max_vq_pairs; i++, idx += 2) {
rxq = &sc->vtnet_rxqs[i];
VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, rxq, &rxq->vtnrx_vq,
"%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
txq = &sc->vtnet_txqs[i];
VQ_ALLOC_INFO_INIT(&info[idx+1], 0, NULL, txq, &txq->vtntx_vq,
"%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
}
if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
&sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
}
/*
* TODO: Enable interrupt binding if this is multiqueue. This will
* only matter when per-virtqueue MSIX is available.
*/
if (sc->vtnet_flags & VTNET_FLAG_MQ)
flags |= 0;
error = virtio_alloc_virtqueues(dev, flags, nvqs, info);
free(info, M_TEMP);
return (error);
}
static int
vtnet_alloc_interface(struct vtnet_softc *sc)
{
device_t dev;
struct ifnet *ifp;
dev = sc->vtnet_dev;
ifp = if_alloc(IFT_ETHER);
if (ifp == NULL)
return (ENOMEM);
sc->vtnet_ifp = ifp;
ifp->if_softc = sc;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
return (0);
}
static int
vtnet_setup_interface(struct vtnet_softc *sc)
{
device_t dev;
struct pfil_head_args pa;
struct ifnet *ifp;
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
IFF_KNOWSEPOCH;
ifp->if_baudrate = IF_Gbps(10);
ifp->if_init = vtnet_init;
ifp->if_ioctl = vtnet_ioctl;
ifp->if_get_counter = vtnet_get_counter;
#ifndef VTNET_LEGACY_TX
ifp->if_transmit = vtnet_txq_mq_start;
ifp->if_qflush = vtnet_qflush;
#else
struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq;
ifp->if_start = vtnet_start;
IFQ_SET_MAXLEN(&ifp->if_snd, virtqueue_size(vq) - 1);
ifp->if_snd.ifq_drv_maxlen = virtqueue_size(vq) - 1;
IFQ_SET_READY(&ifp->if_snd);
#endif
vtnet_get_macaddr(sc);
if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
ifp->if_capabilities |= IFCAP_LINKSTATE;
ifmedia_init(&sc->vtnet_media, 0, vtnet_ifmedia_upd, vtnet_ifmedia_sts);
ifmedia_add(&sc->vtnet_media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&sc->vtnet_media, IFM_ETHER | IFM_AUTO);
if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
int gso;
ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
gso = virtio_with_feature(dev, VIRTIO_NET_F_GSO);
if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
ifp->if_capabilities |= IFCAP_TSO4;
if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
ifp->if_capabilities |= IFCAP_TSO6;
if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
if (ifp->if_capabilities & (IFCAP_TSO4 | IFCAP_TSO6)) {
int tso_maxlen;
ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
tso_maxlen = vtnet_tunable_int(sc, "tso_maxlen",
vtnet_tso_maxlen);
ifp->if_hw_tsomax = tso_maxlen -
(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
ifp->if_hw_tsomaxsegcount = sc->vtnet_tx_nsegs - 1;
ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
}
}
if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
ifp->if_capabilities |= IFCAP_RXCSUM;
#ifdef notyet
/* BMV: Rx checksums not distinguished between IPv4 and IPv6. */
ifp->if_capabilities |= IFCAP_RXCSUM_IPV6;
#endif
if (vtnet_tunable_int(sc, "fixup_needs_csum",
vtnet_fixup_needs_csum) != 0)
sc->vtnet_flags |= VTNET_FLAG_FIXUP_NEEDS_CSUM;
/* Support either "hardware" or software LRO. */
ifp->if_capabilities |= IFCAP_LRO;
}
if (ifp->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6)) {
/*
* VirtIO does not support VLAN tagging, but we can fake
* it by inserting and removing the 802.1Q header during
* transmit and receive. We are then able to do checksum
* offloading of VLAN frames.
*/
ifp->if_capabilities |=
IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
}
if (sc->vtnet_max_mtu >= ETHERMTU_JUMBO)
ifp->if_capabilities |= IFCAP_JUMBO_MTU;
ifp->if_capabilities |= IFCAP_VLAN_MTU;
/*
* Capabilities after here are not enabled by default.
*/
ifp->if_capenable = ifp->if_capabilities;
if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
}
ether_ifattach(ifp, sc->vtnet_hwaddr);
/* Tell the upper layer(s) we support long frames. */
ifp->if_hdrlen = sizeof(struct ether_vlan_header);
DEBUGNET_SET(ifp, vtnet);
pa.pa_version = PFIL_VERSION;
pa.pa_flags = PFIL_IN;
pa.pa_type = PFIL_TYPE_ETHERNET;
pa.pa_headname = ifp->if_xname;
sc->vtnet_pfil = pfil_head_register(&pa);
return (0);
}
static int
vtnet_rx_cluster_size(struct vtnet_softc *sc, int mtu)
{
int framesz;
if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
return (MJUMPAGESIZE);
else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
return (MCLBYTES);
/*
* Try to scale the receive mbuf cluster size from the MTU. We
* could also use the VQ size to influence the selected size,
* but that would only matter for very small queues.
*/
if (vtnet_modern(sc)) {
MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr_v1));
framesz = sizeof(struct virtio_net_hdr_v1);
} else
framesz = sizeof(struct vtnet_rx_header);
framesz += sizeof(struct ether_vlan_header) + mtu;
if (framesz <= MCLBYTES)
return (MCLBYTES);
else if (framesz <= MJUMPAGESIZE)
return (MJUMPAGESIZE);
else if (framesz <= MJUM9BYTES)
return (MJUM9BYTES);
/* Sane default; avoid 16KB clusters. */
return (MCLBYTES);
}
static int
vtnet_ioctl_mtu(struct vtnet_softc *sc, int mtu)
{
struct ifnet *ifp;
int clustersz;
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK_ASSERT(sc);
if (ifp->if_mtu == mtu)
return (0);
else if (mtu < ETHERMIN || mtu > sc->vtnet_max_mtu)
return (EINVAL);
ifp->if_mtu = mtu;
clustersz = vtnet_rx_cluster_size(sc, mtu);
if (clustersz != sc->vtnet_rx_clustersz &&
ifp->if_drv_flags & IFF_DRV_RUNNING) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
vtnet_init_locked(sc, 0);
}
return (0);
}
static int
vtnet_ioctl_ifflags(struct vtnet_softc *sc)
{
struct ifnet *ifp;
int drv_running;
ifp = sc->vtnet_ifp;
drv_running = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
VTNET_CORE_LOCK_ASSERT(sc);
if ((ifp->if_flags & IFF_UP) == 0) {
if (drv_running)
vtnet_stop(sc);
goto out;
}
if (!drv_running) {
vtnet_init_locked(sc, 0);
goto out;
}
if ((ifp->if_flags ^ sc->vtnet_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) {
if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
return (ENOTSUP);
vtnet_rx_filter(sc);
}
out:
sc->vtnet_if_flags = ifp->if_flags;
return (0);
}
static int
vtnet_ioctl_multi(struct vtnet_softc *sc)
{
struct ifnet *ifp;
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK_ASSERT(sc);
if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX &&
ifp->if_drv_flags & IFF_DRV_RUNNING)
vtnet_rx_filter_mac(sc);
return (0);
}
static int
vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr)
{
struct ifnet *ifp;
int mask, reinit, update;
ifp = sc->vtnet_ifp;
mask = (ifr->ifr_reqcap & ifp->if_capabilities) ^ ifp->if_capenable;
reinit = update = 0;
VTNET_CORE_LOCK_ASSERT(sc);
if (mask & IFCAP_TXCSUM)
ifp->if_capenable ^= IFCAP_TXCSUM;
if (mask & IFCAP_TXCSUM_IPV6)
ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
if (mask & IFCAP_TSO4)
ifp->if_capenable ^= IFCAP_TSO4;
if (mask & IFCAP_TSO6)
ifp->if_capenable ^= IFCAP_TSO6;
if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) {
/*
* These Rx features require the negotiated features to
* be updated. Avoid a full reinit if possible.
*/
if (sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
update = 1;
else
reinit = 1;
/* BMV: Avoid needless renegotiation for just software LRO. */
if ((mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) ==
IFCAP_LRO && vtnet_software_lro(sc))
reinit = update = 0;
if (mask & IFCAP_RXCSUM)
ifp->if_capenable ^= IFCAP_RXCSUM;
if (mask & IFCAP_RXCSUM_IPV6)
ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
if (mask & IFCAP_LRO)
ifp->if_capenable ^= IFCAP_LRO;
/*
* VirtIO does not distinguish between IPv4 and IPv6 checksums
* so treat them as a pair. Guest TSO (LRO) requires receive
* checksums.
*/
if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
ifp->if_capenable |= IFCAP_RXCSUM;
#ifdef notyet
ifp->if_capenable |= IFCAP_RXCSUM_IPV6;
#endif
} else
ifp->if_capenable &=
~(IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO);
}
if (mask & IFCAP_VLAN_HWFILTER) {
/* These Rx features require renegotiation. */
reinit = 1;
if (mask & IFCAP_VLAN_HWFILTER)
ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
}
if (mask & IFCAP_VLAN_HWTSO)
ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
if (mask & IFCAP_VLAN_HWTAGGING)
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if (reinit) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
vtnet_init_locked(sc, 0);
} else if (update)
vtnet_update_rx_offloads(sc);
}
return (0);
}
static int
vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct vtnet_softc *sc;
struct ifreq *ifr;
int error;
sc = ifp->if_softc;
ifr = (struct ifreq *) data;
error = 0;
switch (cmd) {
case SIOCSIFMTU:
VTNET_CORE_LOCK(sc);
error = vtnet_ioctl_mtu(sc, ifr->ifr_mtu);
VTNET_CORE_UNLOCK(sc);
break;
case SIOCSIFFLAGS:
VTNET_CORE_LOCK(sc);
error = vtnet_ioctl_ifflags(sc);
VTNET_CORE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
VTNET_CORE_LOCK(sc);
error = vtnet_ioctl_multi(sc);
VTNET_CORE_UNLOCK(sc);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
break;
case SIOCSIFCAP:
VTNET_CORE_LOCK(sc);
error = vtnet_ioctl_ifcap(sc, ifr);
VTNET_CORE_UNLOCK(sc);
VLAN_CAPABILITIES(ifp);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
VTNET_CORE_LOCK_ASSERT_NOTOWNED(sc);
return (error);
}
static int
vtnet_rxq_populate(struct vtnet_rxq *rxq)
{
struct virtqueue *vq;
int nbufs, error;
#ifdef DEV_NETMAP
error = vtnet_netmap_rxq_populate(rxq);
if (error >= 0)
return (error);
#endif /* DEV_NETMAP */
vq = rxq->vtnrx_vq;
error = ENOSPC;
for (nbufs = 0; !virtqueue_full(vq); nbufs++) {
error = vtnet_rxq_new_buf(rxq);
if (error)
break;
}
if (nbufs > 0) {
virtqueue_notify(vq);
/*
* EMSGSIZE signifies the virtqueue did not have enough
* entries available to hold the last mbuf. This is not
* an error.
*/
if (error == EMSGSIZE)
error = 0;
}
return (error);
}
static void
vtnet_rxq_free_mbufs(struct vtnet_rxq *rxq)
{
struct virtqueue *vq;
struct mbuf *m;
int last;
#ifdef DEV_NETMAP
struct netmap_kring *kring = netmap_kring_on(NA(rxq->vtnrx_sc->vtnet_ifp),
rxq->vtnrx_id, NR_RX);
#else /* !DEV_NETMAP */
void *kring = NULL;
#endif /* !DEV_NETMAP */
vq = rxq->vtnrx_vq;
last = 0;
while ((m = virtqueue_drain(vq, &last)) != NULL) {
if (kring == NULL)
m_freem(m);
}
KASSERT(virtqueue_empty(vq),
("%s: mbufs remaining in rx queue %p", __func__, rxq));
}
static struct mbuf *
vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
{
struct mbuf *m_head, *m_tail, *m;
int i, size;
m_head = NULL;
size = sc->vtnet_rx_clustersz;
KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
("%s: mbuf %d chain requested without LRO_NOMRG", __func__, nbufs));
for (i = 0; i < nbufs; i++) {
m = m_getjcl(M_NOWAIT, MT_DATA, i == 0 ? M_PKTHDR : 0, size);
if (m == NULL) {
sc->vtnet_stats.mbuf_alloc_failed++;
m_freem(m_head);
return (NULL);
}
m->m_len = size;
if (m_head != NULL) {
m_tail->m_next = m;
m_tail = m;
} else
m_head = m_tail = m;
}
if (m_tailp != NULL)
*m_tailp = m_tail;
return (m_head);
}
/*
* Slow path for when LRO without mergeable buffers is negotiated.
*/
static int
vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
int len0)
{
struct vtnet_softc *sc;
struct mbuf *m, *m_prev, *m_new, *m_tail;
int len, clustersz, nreplace, error;
sc = rxq->vtnrx_sc;
clustersz = sc->vtnet_rx_clustersz;
m_prev = NULL;
m_tail = NULL;
nreplace = 0;
m = m0;
len = len0;
/*
* Since these mbuf chains are so large, avoid allocating a complete
* replacement when the received frame did not consume the entire
* chain. Unused mbufs are moved to the tail of the replacement mbuf.
*/
while (len > 0) {
if (m == NULL) {
sc->vtnet_stats.rx_frame_too_large++;
return (EMSGSIZE);
}
/*
* Every mbuf should have the expected cluster size since that
* is also used to allocate the replacements.
*/
KASSERT(m->m_len == clustersz,
("%s: mbuf size %d not expected cluster size %d", __func__,
m->m_len, clustersz));
m->m_len = MIN(m->m_len, len);
len -= m->m_len;
m_prev = m;
m = m->m_next;
nreplace++;
}
KASSERT(nreplace > 0 && nreplace <= sc->vtnet_rx_nmbufs,
("%s: invalid replacement mbuf count %d max %d", __func__,
nreplace, sc->vtnet_rx_nmbufs));
m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
if (m_new == NULL) {
m_prev->m_len = clustersz;
return (ENOBUFS);
}
/*
* Move any unused mbufs from the received mbuf chain onto the
* end of the replacement chain.
*/
if (m_prev->m_next != NULL) {
m_tail->m_next = m_prev->m_next;
m_prev->m_next = NULL;
}
error = vtnet_rxq_enqueue_buf(rxq, m_new);
if (error) {
/*
* The replacement is suppose to be an copy of the one
* dequeued so this is a very unexpected error.
*
* Restore the m0 chain to the original state if it was
* modified so we can then discard it.
*/
if (m_tail->m_next != NULL) {
m_prev->m_next = m_tail->m_next;
m_tail->m_next = NULL;
}
m_prev->m_len = clustersz;
sc->vtnet_stats.rx_enq_replacement_failed++;
m_freem(m_new);
}
return (error);
}
static int
vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len)
{
struct vtnet_softc *sc;
struct mbuf *m_new;
int error;
sc = rxq->vtnrx_sc;
if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
return (vtnet_rxq_replace_lro_nomrg_buf(rxq, m, len));
MPASS(m->m_next == NULL);
if (m->m_len < len)
return (EMSGSIZE);
m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
if (m_new == NULL)
return (ENOBUFS);
error = vtnet_rxq_enqueue_buf(rxq, m_new);
if (error) {
sc->vtnet_stats.rx_enq_replacement_failed++;
m_freem(m_new);
} else
m->m_len = len;
return (error);
}
static int
vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m)
{
struct vtnet_softc *sc;
struct sglist *sg;
int header_inlined, error;
sc = rxq->vtnrx_sc;
sg = rxq->vtnrx_sg;
KASSERT(m->m_next == NULL || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
("%s: mbuf chain without LRO_NOMRG", __func__));
VTNET_RXQ_LOCK_ASSERT(rxq);
sglist_reset(sg);
header_inlined = vtnet_modern(sc) ||
(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) != 0; /* TODO: ANY_LAYOUT */
if (header_inlined)
error = sglist_append_mbuf(sg, m);
else {
struct vtnet_rx_header *rxhdr =
mtod(m, struct vtnet_rx_header *);
MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
/* Append the header and remaining mbuf data. */
error = sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
if (error)
return (error);
error = sglist_append(sg, &rxhdr[1],
m->m_len - sizeof(struct vtnet_rx_header));
if (error)
return (error);
if (m->m_next != NULL)
error = sglist_append_mbuf(sg, m->m_next);
}
if (error)
return (error);
return (virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg));
}
static int
vtnet_rxq_new_buf(struct vtnet_rxq *rxq)
{
struct vtnet_softc *sc;
struct mbuf *m;
int error;
sc = rxq->vtnrx_sc;
m = vtnet_rx_alloc_buf(sc, sc->vtnet_rx_nmbufs, NULL);
if (m == NULL)
return (ENOBUFS);
error = vtnet_rxq_enqueue_buf(rxq, m);
if (error)
m_freem(m);
return (error);
}
static int
vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t etype,
int hoff, struct virtio_net_hdr *hdr)
{
struct vtnet_softc *sc;
int error;
sc = rxq->vtnrx_sc;
/*
* NEEDS_CSUM corresponds to Linux's CHECKSUM_PARTIAL, but FreeBSD does
* not have an analogous CSUM flag. The checksum has been validated,
* but is incomplete (TCP/UDP pseudo header).
*
* The packet is likely from another VM on the same host that itself
* performed checksum offloading so Tx/Rx is basically a memcpy and
* the checksum has little value.
*
* Default to receiving the packet as-is for performance reasons, but
* this can cause issues if the packet is to be forwarded because it
* does not contain a valid checksum. This patch may be helpful:
* https://reviews.freebsd.org/D6611. In the meantime, have the driver
* compute the checksum if requested.
*
* BMV: Need to add an CSUM_PARTIAL flag?
*/
if ((sc->vtnet_flags & VTNET_FLAG_FIXUP_NEEDS_CSUM) == 0) {
error = vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr);
return (error);
}
/*
* Compute the checksum in the driver so the packet will contain a
* valid checksum. The checksum is at csum_offset from csum_start.
*/
switch (etype) {
#if defined(INET) || defined(INET6)
case ETHERTYPE_IP:
case ETHERTYPE_IPV6: {
int csum_off, csum_end;
uint16_t csum;
csum_off = hdr->csum_start + hdr->csum_offset;
csum_end = csum_off + sizeof(uint16_t);
/* Assume checksum will be in the first mbuf. */
if (m->m_len < csum_end || m->m_pkthdr.len < csum_end)
return (1);
/*
* Like in_delayed_cksum()/in6_delayed_cksum(), compute the
* checksum and write it at the specified offset. We could
* try to verify the packet: csum_start should probably
* correspond to the start of the TCP/UDP header.
*
* BMV: Need to properly handle UDP with zero checksum. Is
* the IPv4 header checksum implicitly validated?
*/
csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start);
*(uint16_t *)(mtodo(m, csum_off)) = csum;
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xFFFF;
break;
}
#endif
default:
sc->vtnet_stats.rx_csum_bad_ethtype++;
return (1);
}
return (0);
}
static int
vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m,
uint16_t etype, int hoff, struct virtio_net_hdr *hdr)
{
struct vtnet_softc *sc;
int protocol;
sc = rxq->vtnrx_sc;
switch (etype) {
#if defined(INET)
case ETHERTYPE_IP:
if (__predict_false(m->m_len < hoff + sizeof(struct ip)))
protocol = IPPROTO_DONE;
else {
struct ip *ip = (struct ip *)(m->m_data + hoff);
protocol = ip->ip_p;
}
break;
#endif
#if defined(INET6)
case ETHERTYPE_IPV6:
if (__predict_false(m->m_len < hoff + sizeof(struct ip6_hdr))
|| ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0)
protocol = IPPROTO_DONE;
break;
#endif
default:
protocol = IPPROTO_DONE;
break;
}
switch (protocol) {
case IPPROTO_TCP:
case IPPROTO_UDP:
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xFFFF;
break;
default:
/*
* FreeBSD does not support checksum offloading of this
* protocol. Let the stack re-verify the checksum later
* if the protocol is supported.
*/
#if 0
if_printf(sc->vtnet_ifp,
"%s: checksum offload of unsupported protocol "
"etype=%#x protocol=%d csum_start=%d csum_offset=%d\n",
__func__, etype, protocol, hdr->csum_start,
hdr->csum_offset);
#endif
break;
}
return (0);
}
static int
vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
struct virtio_net_hdr *hdr)
{
const struct ether_header *eh;
int hoff;
uint16_t etype;
eh = mtod(m, const struct ether_header *);
etype = ntohs(eh->ether_type);
if (etype == ETHERTYPE_VLAN) {
/* TODO BMV: Handle QinQ. */
const struct ether_vlan_header *evh =
mtod(m, const struct ether_vlan_header *);
etype = ntohs(evh->evl_proto);
hoff = sizeof(struct ether_vlan_header);
} else
hoff = sizeof(struct ether_header);
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
return (vtnet_rxq_csum_needs_csum(rxq, m, etype, hoff, hdr));
else /* VIRTIO_NET_HDR_F_DATA_VALID */
return (vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr));
}
static void
vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs)
{
struct mbuf *m;
while (--nbufs > 0) {
m = virtqueue_dequeue(rxq->vtnrx_vq, NULL);
if (m == NULL)
break;
vtnet_rxq_discard_buf(rxq, m);
}
}
static void
vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m)
{
int error;
/*
* Requeue the discarded mbuf. This should always be successful
* since it was just dequeued.
*/
error = vtnet_rxq_enqueue_buf(rxq, m);
KASSERT(error == 0,
("%s: cannot requeue discarded mbuf %d", __func__, error));
}
static int
vtnet_rxq_merged_eof(struct vtnet_rxq *rxq, struct mbuf *m_head, int nbufs)
{
struct vtnet_softc *sc;
struct virtqueue *vq;
struct mbuf *m_tail;
sc = rxq->vtnrx_sc;
vq = rxq->vtnrx_vq;
m_tail = m_head;
while (--nbufs > 0) {
struct mbuf *m;
int len;
m = virtqueue_dequeue(vq, &len);
if (m == NULL) {
rxq->vtnrx_stats.vrxs_ierrors++;
goto fail;
}
if (vtnet_rxq_new_buf(rxq) != 0) {
rxq->vtnrx_stats.vrxs_iqdrops++;
vtnet_rxq_discard_buf(rxq, m);
if (nbufs > 1)
vtnet_rxq_discard_merged_bufs(rxq, nbufs);
goto fail;
}
if (m->m_len < len)
len = m->m_len;
m->m_len = len;
m->m_flags &= ~M_PKTHDR;
m_head->m_pkthdr.len += len;
m_tail->m_next = m;
m_tail = m;
}
return (0);
fail:
sc->vtnet_stats.rx_mergeable_failed++;
m_freem(m_head);
return (1);
}
#if defined(INET) || defined(INET6)
static int
vtnet_lro_rx(struct vtnet_rxq *rxq, struct mbuf *m)
{
struct lro_ctrl *lro;
lro = &rxq->vtnrx_lro;
if (lro->lro_mbuf_max != 0) {
tcp_lro_queue_mbuf(lro, m);
return (0);
}
return (tcp_lro_rx(lro, m, 0));
}
#endif
static void
vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
struct virtio_net_hdr *hdr)
{
struct vtnet_softc *sc;
struct ifnet *ifp;
sc = rxq->vtnrx_sc;
ifp = sc->vtnet_ifp;
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
struct ether_header *eh = mtod(m, struct ether_header *);
if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
vtnet_vlan_tag_remove(m);
/*
* With the 802.1Q header removed, update the
* checksum starting location accordingly.
*/
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
hdr->csum_start -= ETHER_VLAN_ENCAP_LEN;
}
}
m->m_pkthdr.flowid = rxq->vtnrx_id;
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
if (hdr->flags &
(VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) {
if (vtnet_rxq_csum(rxq, m, hdr) == 0)
rxq->vtnrx_stats.vrxs_csum++;
else
rxq->vtnrx_stats.vrxs_csum_failed++;
}
if (hdr->gso_size != 0) {
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
case VIRTIO_NET_HDR_GSO_TCPV6:
m->m_pkthdr.lro_nsegs =
howmany(m->m_pkthdr.len, hdr->gso_size);
rxq->vtnrx_stats.vrxs_host_lro++;
break;
}
}
rxq->vtnrx_stats.vrxs_ipackets++;
rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
#if defined(INET) || defined(INET6)
if (vtnet_software_lro(sc) && ifp->if_capenable & IFCAP_LRO) {
if (vtnet_lro_rx(rxq, m) == 0)
return;
}
#endif
(*ifp->if_input)(ifp, m);
}
static int
vtnet_rxq_eof(struct vtnet_rxq *rxq)
{
struct virtio_net_hdr lhdr, *hdr;
struct vtnet_softc *sc;
struct ifnet *ifp;
struct virtqueue *vq;
int deq, count;
sc = rxq->vtnrx_sc;
vq = rxq->vtnrx_vq;
ifp = sc->vtnet_ifp;
deq = 0;
count = sc->vtnet_rx_process_limit;
VTNET_RXQ_LOCK_ASSERT(rxq);
while (count-- > 0) {
struct mbuf *m;
int len, nbufs, adjsz;
m = virtqueue_dequeue(vq, &len);
if (m == NULL)
break;
deq++;
if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
rxq->vtnrx_stats.vrxs_ierrors++;
vtnet_rxq_discard_buf(rxq, m);
continue;
}
if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) {
struct virtio_net_hdr_mrg_rxbuf *mhdr =
mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
nbufs = vtnet_htog16(sc, mhdr->num_buffers);
adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
} else if (vtnet_modern(sc)) {
nbufs = 1; /* num_buffers is always 1 */
adjsz = sizeof(struct virtio_net_hdr_v1);
} else {
nbufs = 1;
adjsz = sizeof(struct vtnet_rx_header);
/*
* Account for our gap between the header and start of
* data to keep the segments separated.
*/
len += VTNET_RX_HEADER_PAD;
}
if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
rxq->vtnrx_stats.vrxs_iqdrops++;
vtnet_rxq_discard_buf(rxq, m);
if (nbufs > 1)
vtnet_rxq_discard_merged_bufs(rxq, nbufs);
continue;
}
m->m_pkthdr.len = len;
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.csum_flags = 0;
if (nbufs > 1) {
/* Dequeue the rest of chain. */
if (vtnet_rxq_merged_eof(rxq, m, nbufs) != 0)
continue;
}
/*
* Save an endian swapped version of the header prior to it
* being stripped. The header is always at the start of the
* mbuf data. num_buffers was already saved (and not needed)
* so use the standard header.
*/
hdr = mtod(m, struct virtio_net_hdr *);
lhdr.flags = hdr->flags;
lhdr.gso_type = hdr->gso_type;
lhdr.hdr_len = vtnet_htog16(sc, hdr->hdr_len);
lhdr.gso_size = vtnet_htog16(sc, hdr->gso_size);
lhdr.csum_start = vtnet_htog16(sc, hdr->csum_start);
lhdr.csum_offset = vtnet_htog16(sc, hdr->csum_offset);
m_adj(m, adjsz);
if (PFIL_HOOKED_IN(sc->vtnet_pfil)) {
pfil_return_t pfil;
pfil = pfil_run_hooks(sc->vtnet_pfil, &m, ifp, PFIL_IN,
NULL);
switch (pfil) {
case PFIL_REALLOCED:
m = pfil_mem2mbuf(m->m_data);
break;
case PFIL_DROPPED:
case PFIL_CONSUMED:
continue;
default:
KASSERT(pfil == PFIL_PASS,
("Filter returned %d!", pfil));
}
}
vtnet_rxq_input(rxq, m, &lhdr);
}
if (deq > 0) {
#if defined(INET) || defined(INET6)
tcp_lro_flush_all(&rxq->vtnrx_lro);
#endif
virtqueue_notify(vq);
}
return (count > 0 ? 0 : EAGAIN);
}
static void
vtnet_rx_vq_process(struct vtnet_rxq *rxq, int tries)
{
struct vtnet_softc *sc;
struct ifnet *ifp;
int more;
#ifdef DEV_NETMAP
int nmirq;
#endif /* DEV_NETMAP */
sc = rxq->vtnrx_sc;
ifp = sc->vtnet_ifp;
if (__predict_false(rxq->vtnrx_id >= sc->vtnet_act_vq_pairs)) {
/*
* Ignore this interrupt. Either this is a spurious interrupt
* or multiqueue without per-VQ MSIX so every queue needs to
* be polled (a brain dead configuration we could try harder
* to avoid).
*/
vtnet_rxq_disable_intr(rxq);
return;
}
VTNET_RXQ_LOCK(rxq);
#ifdef DEV_NETMAP
/*
* We call netmap_rx_irq() under lock to prevent concurrent calls.
* This is not necessary to serialize the access to the RX vq, but
* rather to avoid races that may happen if this interface is
* attached to a VALE switch, which would cause received packets
* to stall in the RX queue (nm_kr_tryget() could find the kring
* busy when called from netmap_bwrap_intr_notify()).
*/
nmirq = netmap_rx_irq(ifp, rxq->vtnrx_id, &more);
if (nmirq != NM_IRQ_PASS) {
VTNET_RXQ_UNLOCK(rxq);
if (nmirq == NM_IRQ_RESCHED) {
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
}
return;
}
#endif /* DEV_NETMAP */
again:
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
VTNET_RXQ_UNLOCK(rxq);
return;
}
more = vtnet_rxq_eof(rxq);
if (more || vtnet_rxq_enable_intr(rxq) != 0) {
if (!more)
vtnet_rxq_disable_intr(rxq);
/*
* This is an occasional condition or race (when !more),
* so retry a few times before scheduling the taskqueue.
*/
if (tries-- > 0)
goto again;
rxq->vtnrx_stats.vrxs_rescheduled++;
VTNET_RXQ_UNLOCK(rxq);
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
} else
VTNET_RXQ_UNLOCK(rxq);
}
static void
vtnet_rx_vq_intr(void *xrxq)
{
struct vtnet_rxq *rxq;
rxq = xrxq;
vtnet_rx_vq_process(rxq, VTNET_INTR_DISABLE_RETRIES);
}
static void
vtnet_rxq_tq_intr(void *xrxq, int pending)
{
struct vtnet_rxq *rxq;
rxq = xrxq;
vtnet_rx_vq_process(rxq, 0);
}
static int
vtnet_txq_intr_threshold(struct vtnet_txq *txq)
{
struct vtnet_softc *sc;
int threshold;
sc = txq->vtntx_sc;
/*
* The Tx interrupt is disabled until the queue free count falls
* below our threshold. Completed frames are drained from the Tx
* virtqueue before transmitting new frames and in the watchdog
* callout, so the frequency of Tx interrupts is greatly reduced,
* at the cost of not freeing mbufs as quickly as they otherwise
* would be.
*/
threshold = virtqueue_size(txq->vtntx_vq) / 4;
/*
* Without indirect descriptors, leave enough room for the most
* segments we handle.
*/
if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
threshold < sc->vtnet_tx_nsegs)
threshold = sc->vtnet_tx_nsegs;
return (threshold);
}
static int
vtnet_txq_below_threshold(struct vtnet_txq *txq)
{
struct virtqueue *vq;
vq = txq->vtntx_vq;
return (virtqueue_nfree(vq) <= txq->vtntx_intr_threshold);
}
static int
vtnet_txq_notify(struct vtnet_txq *txq)
{
struct virtqueue *vq;
vq = txq->vtntx_vq;
txq->vtntx_watchdog = VTNET_TX_TIMEOUT;
virtqueue_notify(vq);
if (vtnet_txq_enable_intr(txq) == 0)
return (0);
/*
* Drain frames that were completed since last checked. If this
* causes the queue to go above the threshold, the caller should
* continue transmitting.
*/
if (vtnet_txq_eof(txq) != 0 && vtnet_txq_below_threshold(txq) == 0) {
virtqueue_disable_intr(vq);
return (1);
}
return (0);
}
static void
vtnet_txq_free_mbufs(struct vtnet_txq *txq)
{
struct virtqueue *vq;
struct vtnet_tx_header *txhdr;
int last;
#ifdef DEV_NETMAP
struct netmap_kring *kring = netmap_kring_on(NA(txq->vtntx_sc->vtnet_ifp),
txq->vtntx_id, NR_TX);
#else /* !DEV_NETMAP */
void *kring = NULL;
#endif /* !DEV_NETMAP */
vq = txq->vtntx_vq;
last = 0;
while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
if (kring == NULL) {
m_freem(txhdr->vth_mbuf);
uma_zfree(vtnet_tx_header_zone, txhdr);
}
}
KASSERT(virtqueue_empty(vq),
("%s: mbufs remaining in tx queue %p", __func__, txq));
}
/*
* BMV: This can go away once we finally have offsets in the mbuf header.
*/
static int
vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m, int *etype,
int *proto, int *start)
{
struct vtnet_softc *sc;
struct ether_vlan_header *evh;
int offset;
sc = txq->vtntx_sc;
evh = mtod(m, struct ether_vlan_header *);
if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
/* BMV: We should handle nested VLAN tags too. */
*etype = ntohs(evh->evl_proto);
offset = sizeof(struct ether_vlan_header);
} else {
*etype = ntohs(evh->evl_encap_proto);
offset = sizeof(struct ether_header);
}
switch (*etype) {
#if defined(INET)
case ETHERTYPE_IP: {
struct ip *ip, iphdr;
if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
m_copydata(m, offset, sizeof(struct ip),
(caddr_t) &iphdr);
ip = &iphdr;
} else
ip = (struct ip *)(m->m_data + offset);
*proto = ip->ip_p;
*start = offset + (ip->ip_hl << 2);
break;
}
#endif
#if defined(INET6)
case ETHERTYPE_IPV6:
*proto = -1;
*start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
/* Assert the network stack sent us a valid packet. */
KASSERT(*start > offset,
("%s: mbuf %p start %d offset %d proto %d", __func__, m,
*start, offset, *proto));
break;
#endif
default:
sc->vtnet_stats.tx_csum_unknown_ethtype++;
return (EINVAL);
}
return (0);
}
static int
vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int flags,
int offset, struct virtio_net_hdr *hdr)
{
static struct timeval lastecn;
static int curecn;
struct vtnet_softc *sc;
struct tcphdr *tcp, tcphdr;
sc = txq->vtntx_sc;
if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) {
m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr);
tcp = &tcphdr;
} else
tcp = (struct tcphdr *)(m->m_data + offset);
hdr->hdr_len = vtnet_gtoh16(sc, offset + (tcp->th_off << 2));
hdr->gso_size = vtnet_gtoh16(sc, m->m_pkthdr.tso_segsz);
hdr->gso_type = (flags & CSUM_IP_TSO) ?
VIRTIO_NET_HDR_GSO_TCPV4 : VIRTIO_NET_HDR_GSO_TCPV6;
if (__predict_false(tcp->th_flags & TH_CWR)) {
/*
* Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In
* FreeBSD, ECN support is not on a per-interface basis,
* but globally via the net.inet.tcp.ecn.enable sysctl
* knob. The default is off.
*/
if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
if (ppsratecheck(&lastecn, &curecn, 1))
if_printf(sc->vtnet_ifp,
"TSO with ECN not negotiated with host\n");
return (ENOTSUP);
}
hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
}
txq->vtntx_stats.vtxs_tso++;
return (0);
}
static struct mbuf *
vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m,
struct virtio_net_hdr *hdr)
{
struct vtnet_softc *sc;
int flags, etype, csum_start, proto, error;
sc = txq->vtntx_sc;
flags = m->m_pkthdr.csum_flags;
error = vtnet_txq_offload_ctx(txq, m, &etype, &proto, &csum_start);
if (error)
goto drop;
if (flags & (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6)) {
/* Sanity check the parsed mbuf matches the offload flags. */
if (__predict_false((flags & VTNET_CSUM_OFFLOAD &&
etype != ETHERTYPE_IP) || (flags & VTNET_CSUM_OFFLOAD_IPV6
&& etype != ETHERTYPE_IPV6))) {
sc->vtnet_stats.tx_csum_proto_mismatch++;
goto drop;
}
hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
hdr->csum_start = vtnet_gtoh16(sc, csum_start);
hdr->csum_offset = vtnet_gtoh16(sc, m->m_pkthdr.csum_data);
txq->vtntx_stats.vtxs_csum++;
}
if (flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) {
/*
* Sanity check the parsed mbuf IP protocol is TCP, and
* VirtIO TSO reqires the checksum offloading above.
*/
if (__predict_false(proto != IPPROTO_TCP)) {
sc->vtnet_stats.tx_tso_not_tcp++;
goto drop;
} else if (__predict_false((hdr->flags &
VIRTIO_NET_HDR_F_NEEDS_CSUM) == 0)) {
sc->vtnet_stats.tx_tso_without_csum++;
goto drop;
}
error = vtnet_txq_offload_tso(txq, m, flags, csum_start, hdr);
if (error)
goto drop;
}
return (m);
drop:
m_freem(m);
return (NULL);
}
static int
vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
struct vtnet_tx_header *txhdr)
{
struct vtnet_softc *sc;
struct virtqueue *vq;
struct sglist *sg;
struct mbuf *m;
int error;
sc = txq->vtntx_sc;
vq = txq->vtntx_vq;
sg = txq->vtntx_sg;
m = *m_head;
sglist_reset(sg);
error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
if (error != 0 || sg->sg_nseg != 1) {
KASSERT(0, ("%s: cannot add header to sglist error %d nseg %d",
__func__, error, sg->sg_nseg));
goto fail;
}
error = sglist_append_mbuf(sg, m);
if (error) {
m = m_defrag(m, M_NOWAIT);
if (m == NULL)
goto fail;
*m_head = m;
sc->vtnet_stats.tx_defragged++;
error = sglist_append_mbuf(sg, m);
if (error)
goto fail;
}
txhdr->vth_mbuf = m;
error = virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0);
return (error);
fail:
sc->vtnet_stats.tx_defrag_failed++;
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
static int
vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head, int flags)
{
struct vtnet_tx_header *txhdr;
struct virtio_net_hdr *hdr;
struct mbuf *m;
int error;
m = *m_head;
M_ASSERTPKTHDR(m);
txhdr = uma_zalloc(vtnet_tx_header_zone, flags | M_ZERO);
if (txhdr == NULL) {
m_freem(m);
*m_head = NULL;
return (ENOMEM);
}
/*
* Always use the non-mergeable header, regardless if mergable headers
* were negotiated, because for transmit num_buffers is always zero.
* The vtnet_hdr_size is used to enqueue the right header size segment.
*/
hdr = &txhdr->vth_uhdr.hdr;
if (m->m_flags & M_VLANTAG) {
m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
if ((*m_head = m) == NULL) {
error = ENOBUFS;
goto fail;
}
m->m_flags &= ~M_VLANTAG;
}
if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) {
m = vtnet_txq_offload(txq, m, hdr);
if ((*m_head = m) == NULL) {
error = ENOBUFS;
goto fail;
}
}
error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
fail:
if (error)
uma_zfree(vtnet_tx_header_zone, txhdr);
return (error);
}
#ifdef VTNET_LEGACY_TX
static void
vtnet_start_locked(struct vtnet_txq *txq, struct ifnet *ifp)
{
struct vtnet_softc *sc;
struct virtqueue *vq;
struct mbuf *m0;
int tries, enq;
sc = txq->vtntx_sc;
vq = txq->vtntx_vq;
tries = 0;
VTNET_TXQ_LOCK_ASSERT(txq);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
sc->vtnet_link_active == 0)
return;
vtnet_txq_eof(txq);
again:
enq = 0;
while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
if (virtqueue_full(vq))
break;
IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
if (m0 == NULL)
break;
if (vtnet_txq_encap(txq, &m0, M_NOWAIT) != 0) {
if (m0 != NULL)
IFQ_DRV_PREPEND(&ifp->if_snd, m0);
break;
}
enq++;
ETHER_BPF_MTAP(ifp, m0);
}
if (enq > 0 && vtnet_txq_notify(txq) != 0) {
if (tries++ < VTNET_NOTIFY_RETRIES)
goto again;
txq->vtntx_stats.vtxs_rescheduled++;
taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
}
}
static void
vtnet_start(struct ifnet *ifp)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
sc = ifp->if_softc;
txq = &sc->vtnet_txqs[0];
VTNET_TXQ_LOCK(txq);
vtnet_start_locked(txq, ifp);
VTNET_TXQ_UNLOCK(txq);
}
#else /* !VTNET_LEGACY_TX */
static int
vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m)
{
struct vtnet_softc *sc;
struct virtqueue *vq;
struct buf_ring *br;
struct ifnet *ifp;
int enq, tries, error;
sc = txq->vtntx_sc;
vq = txq->vtntx_vq;
br = txq->vtntx_br;
ifp = sc->vtnet_ifp;
tries = 0;
error = 0;
VTNET_TXQ_LOCK_ASSERT(txq);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
sc->vtnet_link_active == 0) {
if (m != NULL)
error = drbr_enqueue(ifp, br, m);
return (error);
}
if (m != NULL) {
error = drbr_enqueue(ifp, br, m);
if (error)
return (error);
}
vtnet_txq_eof(txq);
again:
enq = 0;
while ((m = drbr_peek(ifp, br)) != NULL) {
if (virtqueue_full(vq)) {
drbr_putback(ifp, br, m);
break;
}
if (vtnet_txq_encap(txq, &m, M_NOWAIT) != 0) {
if (m != NULL)
drbr_putback(ifp, br, m);
else
drbr_advance(ifp, br);
break;
}
drbr_advance(ifp, br);
enq++;
ETHER_BPF_MTAP(ifp, m);
}
if (enq > 0 && vtnet_txq_notify(txq) != 0) {
if (tries++ < VTNET_NOTIFY_RETRIES)
goto again;
txq->vtntx_stats.vtxs_rescheduled++;
taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
}
return (0);
}
static int
vtnet_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
int i, npairs, error;
sc = ifp->if_softc;
npairs = sc->vtnet_act_vq_pairs;
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
i = m->m_pkthdr.flowid % npairs;
else
i = curcpu % npairs;
txq = &sc->vtnet_txqs[i];
if (VTNET_TXQ_TRYLOCK(txq) != 0) {
error = vtnet_txq_mq_start_locked(txq, m);
VTNET_TXQ_UNLOCK(txq);
} else {
error = drbr_enqueue(ifp, txq->vtntx_br, m);
taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_defrtask);
}
return (error);
}
static void
vtnet_txq_tq_deferred(void *xtxq, int pending)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
txq = xtxq;
sc = txq->vtntx_sc;
VTNET_TXQ_LOCK(txq);
if (!drbr_empty(sc->vtnet_ifp, txq->vtntx_br))
vtnet_txq_mq_start_locked(txq, NULL);
VTNET_TXQ_UNLOCK(txq);
}
#endif /* VTNET_LEGACY_TX */
static void
vtnet_txq_start(struct vtnet_txq *txq)
{
struct vtnet_softc *sc;
struct ifnet *ifp;
sc = txq->vtntx_sc;
ifp = sc->vtnet_ifp;
#ifdef VTNET_LEGACY_TX
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
vtnet_start_locked(txq, ifp);
#else
if (!drbr_empty(ifp, txq->vtntx_br))
vtnet_txq_mq_start_locked(txq, NULL);
#endif
}
static void
vtnet_txq_tq_intr(void *xtxq, int pending)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
struct ifnet *ifp;
txq = xtxq;
sc = txq->vtntx_sc;
ifp = sc->vtnet_ifp;
VTNET_TXQ_LOCK(txq);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
VTNET_TXQ_UNLOCK(txq);
return;
}
vtnet_txq_eof(txq);
vtnet_txq_start(txq);
VTNET_TXQ_UNLOCK(txq);
}
static int
vtnet_txq_eof(struct vtnet_txq *txq)
{
struct virtqueue *vq;
struct vtnet_tx_header *txhdr;
struct mbuf *m;
int deq;
vq = txq->vtntx_vq;
deq = 0;
VTNET_TXQ_LOCK_ASSERT(txq);
while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
m = txhdr->vth_mbuf;
deq++;
txq->vtntx_stats.vtxs_opackets++;
txq->vtntx_stats.vtxs_obytes += m->m_pkthdr.len;
if (m->m_flags & M_MCAST)
txq->vtntx_stats.vtxs_omcasts++;
m_freem(m);
uma_zfree(vtnet_tx_header_zone, txhdr);
}
if (virtqueue_empty(vq))
txq->vtntx_watchdog = 0;
return (deq);
}
static void
vtnet_tx_vq_intr(void *xtxq)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
struct ifnet *ifp;
txq = xtxq;
sc = txq->vtntx_sc;
ifp = sc->vtnet_ifp;
if (__predict_false(txq->vtntx_id >= sc->vtnet_act_vq_pairs)) {
/*
* Ignore this interrupt. Either this is a spurious interrupt
* or multiqueue without per-VQ MSIX so every queue needs to
* be polled (a brain dead configuration we could try harder
* to avoid).
*/
vtnet_txq_disable_intr(txq);
return;
}
#ifdef DEV_NETMAP
if (netmap_tx_irq(ifp, txq->vtntx_id) != NM_IRQ_PASS)
return;
#endif /* DEV_NETMAP */
VTNET_TXQ_LOCK(txq);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
VTNET_TXQ_UNLOCK(txq);
return;
}
vtnet_txq_eof(txq);
vtnet_txq_start(txq);
VTNET_TXQ_UNLOCK(txq);
}
static void
vtnet_tx_start_all(struct vtnet_softc *sc)
{
struct vtnet_txq *txq;
int i;
VTNET_CORE_LOCK_ASSERT(sc);
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
txq = &sc->vtnet_txqs[i];
VTNET_TXQ_LOCK(txq);
vtnet_txq_start(txq);
VTNET_TXQ_UNLOCK(txq);
}
}
#ifndef VTNET_LEGACY_TX
static void
vtnet_qflush(struct ifnet *ifp)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
struct mbuf *m;
int i;
sc = ifp->if_softc;
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
txq = &sc->vtnet_txqs[i];
VTNET_TXQ_LOCK(txq);
while ((m = buf_ring_dequeue_sc(txq->vtntx_br)) != NULL)
m_freem(m);
VTNET_TXQ_UNLOCK(txq);
}
if_qflush(ifp);
}
#endif
static int
vtnet_watchdog(struct vtnet_txq *txq)
{
struct ifnet *ifp;
ifp = txq->vtntx_sc->vtnet_ifp;
VTNET_TXQ_LOCK(txq);
if (txq->vtntx_watchdog == 1) {
/*
* Only drain completed frames if the watchdog is about to
* expire. If any frames were drained, there may be enough
* free descriptors now available to transmit queued frames.
* In that case, the timer will immediately be decremented
* below, but the timeout is generous enough that should not
* be a problem.
*/
if (vtnet_txq_eof(txq) != 0)
vtnet_txq_start(txq);
}
if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) {
VTNET_TXQ_UNLOCK(txq);
return (0);
}
VTNET_TXQ_UNLOCK(txq);
if_printf(ifp, "watchdog timeout on queue %d\n", txq->vtntx_id);
return (1);
}
static void
vtnet_accum_stats(struct vtnet_softc *sc, struct vtnet_rxq_stats *rxacc,
struct vtnet_txq_stats *txacc)
{
bzero(rxacc, sizeof(struct vtnet_rxq_stats));
bzero(txacc, sizeof(struct vtnet_txq_stats));
for (int i = 0; i < sc->vtnet_max_vq_pairs; i++) {
struct vtnet_rxq_stats *rxst;
struct vtnet_txq_stats *txst;
rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
rxacc->vrxs_ipackets += rxst->vrxs_ipackets;
rxacc->vrxs_ibytes += rxst->vrxs_ibytes;
rxacc->vrxs_iqdrops += rxst->vrxs_iqdrops;
rxacc->vrxs_csum += rxst->vrxs_csum;
rxacc->vrxs_csum_failed += rxst->vrxs_csum_failed;
rxacc->vrxs_rescheduled += rxst->vrxs_rescheduled;
txst = &sc->vtnet_txqs[i].vtntx_stats;
txacc->vtxs_opackets += txst->vtxs_opackets;
txacc->vtxs_obytes += txst->vtxs_obytes;
txacc->vtxs_csum += txst->vtxs_csum;
txacc->vtxs_tso += txst->vtxs_tso;
txacc->vtxs_rescheduled += txst->vtxs_rescheduled;
}
}
static uint64_t
vtnet_get_counter(if_t ifp, ift_counter cnt)
{
struct vtnet_softc *sc;
struct vtnet_rxq_stats rxaccum;
struct vtnet_txq_stats txaccum;
sc = if_getsoftc(ifp);
vtnet_accum_stats(sc, &rxaccum, &txaccum);
switch (cnt) {
case IFCOUNTER_IPACKETS:
return (rxaccum.vrxs_ipackets);
case IFCOUNTER_IQDROPS:
return (rxaccum.vrxs_iqdrops);
case IFCOUNTER_IERRORS:
return (rxaccum.vrxs_ierrors);
case IFCOUNTER_OPACKETS:
return (txaccum.vtxs_opackets);
#ifndef VTNET_LEGACY_TX
case IFCOUNTER_OBYTES:
return (txaccum.vtxs_obytes);
case IFCOUNTER_OMCASTS:
return (txaccum.vtxs_omcasts);
#endif
default:
return (if_get_counter_default(ifp, cnt));
}
}
static void
vtnet_tick(void *xsc)
{
struct vtnet_softc *sc;
struct ifnet *ifp;
int i, timedout;
sc = xsc;
ifp = sc->vtnet_ifp;
timedout = 0;
VTNET_CORE_LOCK_ASSERT(sc);
for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
timedout |= vtnet_watchdog(&sc->vtnet_txqs[i]);
if (timedout != 0) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
vtnet_init_locked(sc, 0);
} else
callout_schedule(&sc->vtnet_tick_ch, hz);
}
static void
vtnet_start_taskqueues(struct vtnet_softc *sc)
{
device_t dev;
struct vtnet_rxq *rxq;
struct vtnet_txq *txq;
int i, error;
dev = sc->vtnet_dev;
/*
* Errors here are very difficult to recover from - we cannot
* easily fail because, if this is during boot, we will hang
* when freeing any successfully started taskqueues because
* the scheduler isn't up yet.
*
* Most drivers just ignore the return value - it only fails
* with ENOMEM so an error is not likely.
*/
for (i = 0; i < sc->vtnet_req_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
"%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
if (error) {
device_printf(dev, "failed to start rx taskq %d\n",
rxq->vtnrx_id);
}
txq = &sc->vtnet_txqs[i];
error = taskqueue_start_threads(&txq->vtntx_tq, 1, PI_NET,
"%s txq %d", device_get_nameunit(dev), txq->vtntx_id);
if (error) {
device_printf(dev, "failed to start tx taskq %d\n",
txq->vtntx_id);
}
}
}
static void
vtnet_free_taskqueues(struct vtnet_softc *sc)
{
struct vtnet_rxq *rxq;
struct vtnet_txq *txq;
int i;
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
if (rxq->vtnrx_tq != NULL) {
taskqueue_free(rxq->vtnrx_tq);
rxq->vtnrx_tq = NULL;
}
txq = &sc->vtnet_txqs[i];
if (txq->vtntx_tq != NULL) {
taskqueue_free(txq->vtntx_tq);
txq->vtntx_tq = NULL;
}
}
}
static void
vtnet_drain_taskqueues(struct vtnet_softc *sc)
{
struct vtnet_rxq *rxq;
struct vtnet_txq *txq;
int i;
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
if (rxq->vtnrx_tq != NULL)
taskqueue_drain(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
txq = &sc->vtnet_txqs[i];
if (txq->vtntx_tq != NULL) {
taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask);
#ifndef VTNET_LEGACY_TX
taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask);
#endif
}
}
}
static void
vtnet_drain_rxtx_queues(struct vtnet_softc *sc)
{
struct vtnet_rxq *rxq;
struct vtnet_txq *txq;
int i;
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
vtnet_rxq_free_mbufs(rxq);
txq = &sc->vtnet_txqs[i];
vtnet_txq_free_mbufs(txq);
}
}
static void
vtnet_stop_rendezvous(struct vtnet_softc *sc)
{
struct vtnet_rxq *rxq;
struct vtnet_txq *txq;
int i;
VTNET_CORE_LOCK_ASSERT(sc);
/*
* Lock and unlock the per-queue mutex so we known the stop
* state is visible. Doing only the active queues should be
* sufficient, but it does not cost much extra to do all the
* queues.
*/
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
VTNET_RXQ_LOCK(rxq);
VTNET_RXQ_UNLOCK(rxq);
txq = &sc->vtnet_txqs[i];
VTNET_TXQ_LOCK(txq);
VTNET_TXQ_UNLOCK(txq);
}
}
static void
vtnet_stop(struct vtnet_softc *sc)
{
device_t dev;
struct ifnet *ifp;
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK_ASSERT(sc);
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
sc->vtnet_link_active = 0;
callout_stop(&sc->vtnet_tick_ch);
/* Only advisory. */
vtnet_disable_interrupts(sc);
#ifdef DEV_NETMAP
/* Stop any pending txsync/rxsync and disable them. */
netmap_disable_all_rings(ifp);
#endif /* DEV_NETMAP */
/*
* Stop the host adapter. This resets it to the pre-initialized
* state. It will not generate any interrupts until after it is
* reinitialized.
*/
virtio_stop(dev);
vtnet_stop_rendezvous(sc);
vtnet_drain_rxtx_queues(sc);
sc->vtnet_act_vq_pairs = 1;
}
static int
vtnet_virtio_reinit(struct vtnet_softc *sc)
{
device_t dev;
struct ifnet *ifp;
uint64_t features;
int error;
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp;
features = sc->vtnet_negotiated_features;
/*
* Re-negotiate with the host, removing any disabled receive
* features. Transmit features are disabled only on our side
* via if_capenable and if_hwassist.
*/
if ((ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) == 0)
features &= ~(VIRTIO_NET_F_GUEST_CSUM | VTNET_LRO_FEATURES);
if ((ifp->if_capenable & IFCAP_LRO) == 0)
features &= ~VTNET_LRO_FEATURES;
if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
features &= ~VIRTIO_NET_F_CTRL_VLAN;
error = virtio_reinit(dev, features);
if (error) {
device_printf(dev, "virtio reinit error %d\n", error);
return (error);
}
sc->vtnet_features = features;
virtio_reinit_complete(dev);
return (0);
}
static void
vtnet_init_rx_filters(struct vtnet_softc *sc)
{
struct ifnet *ifp;
ifp = sc->vtnet_ifp;
if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
vtnet_rx_filter(sc);
vtnet_rx_filter_mac(sc);
}
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
vtnet_rx_filter_vlan(sc);
}
static int
vtnet_init_rx_queues(struct vtnet_softc *sc)
{
device_t dev;
struct ifnet *ifp;
struct vtnet_rxq *rxq;
int i, clustersz, error;
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp;
clustersz = vtnet_rx_cluster_size(sc, ifp->if_mtu);
sc->vtnet_rx_clustersz = clustersz;
if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) {
sc->vtnet_rx_nmbufs = howmany(sizeof(struct vtnet_rx_header) +
VTNET_MAX_RX_SIZE, clustersz);
KASSERT(sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
("%s: too many rx mbufs %d for %d segments", __func__,
sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
} else
sc->vtnet_rx_nmbufs = 1;
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
/* Hold the lock to satisfy asserts. */
VTNET_RXQ_LOCK(rxq);
error = vtnet_rxq_populate(rxq);
VTNET_RXQ_UNLOCK(rxq);
if (error) {
device_printf(dev, "cannot populate Rx queue %d\n", i);
return (error);
}
}
return (0);
}
static int
vtnet_init_tx_queues(struct vtnet_softc *sc)
{
struct vtnet_txq *txq;
int i;
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
txq = &sc->vtnet_txqs[i];
txq->vtntx_watchdog = 0;
txq->vtntx_intr_threshold = vtnet_txq_intr_threshold(txq);
#ifdef DEV_NETMAP
netmap_reset(NA(sc->vtnet_ifp), NR_TX, i, 0);
#endif /* DEV_NETMAP */
}
return (0);
}
static int
vtnet_init_rxtx_queues(struct vtnet_softc *sc)
{
int error;
error = vtnet_init_rx_queues(sc);
if (error)
return (error);
error = vtnet_init_tx_queues(sc);
if (error)
return (error);
return (0);
}
static void
vtnet_set_active_vq_pairs(struct vtnet_softc *sc)
{
device_t dev;
int npairs;
dev = sc->vtnet_dev;
if ((sc->vtnet_flags & VTNET_FLAG_MQ) == 0) {
sc->vtnet_act_vq_pairs = 1;
return;
}
npairs = sc->vtnet_req_vq_pairs;
if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
device_printf(dev, "cannot set active queue pairs to %d, "
"falling back to 1 queue pair\n", npairs);
npairs = 1;
}
sc->vtnet_act_vq_pairs = npairs;
}
static void
vtnet_update_rx_offloads(struct vtnet_softc *sc)
{
struct ifnet *ifp;
uint64_t features;
int error;
ifp = sc->vtnet_ifp;
features = sc->vtnet_features;
VTNET_CORE_LOCK_ASSERT(sc);
if (ifp->if_capabilities & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
features |= VIRTIO_NET_F_GUEST_CSUM;
else
features &= ~VIRTIO_NET_F_GUEST_CSUM;
}
if (ifp->if_capabilities & IFCAP_LRO && !vtnet_software_lro(sc)) {
if (ifp->if_capenable & IFCAP_LRO)
features |= VTNET_LRO_FEATURES;
else
features &= ~VTNET_LRO_FEATURES;
}
error = vtnet_ctrl_guest_offloads(sc,
features & (VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_TSO4 |
VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN |
VIRTIO_NET_F_GUEST_UFO));
if (error) {
device_printf(sc->vtnet_dev,
"%s: cannot update Rx features\n", __func__);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
vtnet_init_locked(sc, 0);
}
} else
sc->vtnet_features = features;
}
static int
vtnet_reinit(struct vtnet_softc *sc)
{
device_t dev;
struct ifnet *ifp;
int error;
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp;
bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
error = vtnet_virtio_reinit(sc);
if (error)
return (error);
vtnet_set_macaddr(sc);
vtnet_set_active_vq_pairs(sc);
if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
vtnet_init_rx_filters(sc);
ifp->if_hwassist = 0;
if (ifp->if_capenable & IFCAP_TXCSUM)
ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
ifp->if_hwassist |= VTNET_CSUM_OFFLOAD_IPV6;
if (ifp->if_capenable & IFCAP_TSO4)
ifp->if_hwassist |= CSUM_IP_TSO;
if (ifp->if_capenable & IFCAP_TSO6)
ifp->if_hwassist |= CSUM_IP6_TSO;
error = vtnet_init_rxtx_queues(sc);
if (error)
return (error);
return (0);
}
static void
vtnet_init_locked(struct vtnet_softc *sc, int init_mode)
{
device_t dev;
struct ifnet *ifp;
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK_ASSERT(sc);
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
return;
vtnet_stop(sc);
#ifdef DEV_NETMAP
/* Once stopped we can update the netmap flags, if necessary. */
switch (init_mode) {
case VTNET_INIT_NETMAP_ENTER:
nm_set_native_flags(NA(ifp));
break;
case VTNET_INIT_NETMAP_EXIT:
nm_clear_native_flags(NA(ifp));
break;
}
#endif /* DEV_NETMAP */
if (vtnet_reinit(sc) != 0) {
vtnet_stop(sc);
return;
}
ifp->if_drv_flags |= IFF_DRV_RUNNING;
vtnet_update_link_status(sc);
vtnet_enable_interrupts(sc);
callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
#ifdef DEV_NETMAP
/* Re-enable txsync/rxsync. */
netmap_enable_all_rings(ifp);
#endif /* DEV_NETMAP */
}
static void
vtnet_init(void *xsc)
{
struct vtnet_softc *sc;
sc = xsc;
VTNET_CORE_LOCK(sc);
vtnet_init_locked(sc, 0);
VTNET_CORE_UNLOCK(sc);
}
static void
vtnet_free_ctrl_vq(struct vtnet_softc *sc)
{
/*
* The control virtqueue is only polled and therefore it should
* already be empty.
*/
KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
("%s: ctrl vq %p not empty", __func__, sc->vtnet_ctrl_vq));
}
static void
vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
struct sglist *sg, int readable, int writable)
{
struct virtqueue *vq;
vq = sc->vtnet_ctrl_vq;
MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ);
VTNET_CORE_LOCK_ASSERT(sc);
if (!virtqueue_empty(vq))
return;
/*
* Poll for the response, but the command is likely completed before
* returning from the notify.
*/
if (virtqueue_enqueue(vq, cookie, sg, readable, writable) == 0) {
virtqueue_notify(vq);
virtqueue_poll(vq, NULL);
}
}
static int
vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
{
struct sglist_seg segs[3];
struct sglist sg;
struct {
struct virtio_net_ctrl_hdr hdr __aligned(2);
uint8_t pad1;
uint8_t addr[ETHER_ADDR_LEN] __aligned(8);
uint8_t pad2;
uint8_t ack;
} s;
int error;
error = 0;
MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_MAC);
s.hdr.class = VIRTIO_NET_CTRL_MAC;
s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
bcopy(hwaddr, &s.addr[0], ETHER_ADDR_LEN);
s.ack = VIRTIO_NET_ERR;
sglist_init(&sg, nitems(segs), segs);
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
error |= sglist_append(&sg, &s.addr[0], ETHER_ADDR_LEN);
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
MPASS(error == 0 && sg.sg_nseg == nitems(segs));
if (error == 0)
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
}
static int
vtnet_ctrl_guest_offloads(struct vtnet_softc *sc, uint64_t offloads)
{
struct sglist_seg segs[3];
struct sglist sg;
struct {
struct virtio_net_ctrl_hdr hdr __aligned(2);
uint8_t pad1;
uint64_t offloads __aligned(8);
uint8_t pad2;
uint8_t ack;
} s;
int error;
error = 0;
MPASS(sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
s.hdr.class = VIRTIO_NET_CTRL_GUEST_OFFLOADS;
s.hdr.cmd = VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET;
s.offloads = vtnet_gtoh64(sc, offloads);
s.ack = VIRTIO_NET_ERR;
sglist_init(&sg, nitems(segs), segs);
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
error |= sglist_append(&sg, &s.offloads, sizeof(uint64_t));
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
MPASS(error == 0 && sg.sg_nseg == nitems(segs));
if (error == 0)
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
}
static int
vtnet_ctrl_mq_cmd(struct vtnet_softc *sc, uint16_t npairs)
{
struct sglist_seg segs[3];
struct sglist sg;
struct {
struct virtio_net_ctrl_hdr hdr __aligned(2);
uint8_t pad1;
struct virtio_net_ctrl_mq mq __aligned(2);
uint8_t pad2;
uint8_t ack;
} s;
int error;
error = 0;
MPASS(sc->vtnet_flags & VTNET_FLAG_MQ);
s.hdr.class = VIRTIO_NET_CTRL_MQ;
s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
s.mq.virtqueue_pairs = vtnet_gtoh16(sc, npairs);
s.ack = VIRTIO_NET_ERR;
sglist_init(&sg, nitems(segs), segs);
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
MPASS(error == 0 && sg.sg_nseg == nitems(segs));
if (error == 0)
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
}
static int
vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, uint8_t cmd, int on)
{
struct sglist_seg segs[3];
struct sglist sg;
struct {
struct virtio_net_ctrl_hdr hdr __aligned(2);
uint8_t pad1;
uint8_t onoff;
uint8_t pad2;
uint8_t ack;
} s;
int error;
error = 0;
MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX);
s.hdr.class = VIRTIO_NET_CTRL_RX;
s.hdr.cmd = cmd;
s.onoff = !!on;
s.ack = VIRTIO_NET_ERR;
sglist_init(&sg, nitems(segs), segs);
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
MPASS(error == 0 && sg.sg_nseg == nitems(segs));
if (error == 0)
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
}
static int
vtnet_set_promisc(struct vtnet_softc *sc, int on)
{
return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
}
static int
vtnet_set_allmulti(struct vtnet_softc *sc, int on)
{
return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
}
static void
vtnet_rx_filter(struct vtnet_softc *sc)
{
device_t dev;
struct ifnet *ifp;
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK_ASSERT(sc);
if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) {
device_printf(dev, "cannot %s promiscuous mode\n",
ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
}
if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) {
device_printf(dev, "cannot %s all-multicast mode\n",
ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
}
}
static u_int
vtnet_copy_ifaddr(void *arg, struct sockaddr_dl *sdl, u_int ucnt)
{
struct vtnet_softc *sc = arg;
if (memcmp(LLADDR(sdl), sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
return (0);
if (ucnt < VTNET_MAX_MAC_ENTRIES)
bcopy(LLADDR(sdl),
&sc->vtnet_mac_filter->vmf_unicast.macs[ucnt],
ETHER_ADDR_LEN);
return (1);
}
static u_int
vtnet_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
{
struct vtnet_mac_filter *filter = arg;
if (mcnt < VTNET_MAX_MAC_ENTRIES)
bcopy(LLADDR(sdl), &filter->vmf_multicast.macs[mcnt],
ETHER_ADDR_LEN);
return (1);
}
static void
vtnet_rx_filter_mac(struct vtnet_softc *sc)
{
struct virtio_net_ctrl_hdr hdr __aligned(2);
struct vtnet_mac_filter *filter;
struct sglist_seg segs[4];
struct sglist sg;
struct ifnet *ifp;
bool promisc, allmulti;
u_int ucnt, mcnt;
int error;
uint8_t ack;
ifp = sc->vtnet_ifp;
filter = sc->vtnet_mac_filter;
error = 0;
MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX);
VTNET_CORE_LOCK_ASSERT(sc);
/* Unicast MAC addresses: */
ucnt = if_foreach_lladdr(ifp, vtnet_copy_ifaddr, sc);
promisc = (ucnt > VTNET_MAX_MAC_ENTRIES);
if (promisc) {
ucnt = 0;
if_printf(ifp, "more than %d MAC addresses assigned, "
"falling back to promiscuous mode\n",
VTNET_MAX_MAC_ENTRIES);
}
/* Multicast MAC addresses: */
mcnt = if_foreach_llmaddr(ifp, vtnet_copy_maddr, filter);
allmulti = (mcnt > VTNET_MAX_MAC_ENTRIES);
if (allmulti) {
mcnt = 0;
if_printf(ifp, "more than %d multicast MAC addresses "
"assigned, falling back to all-multicast mode\n",
VTNET_MAX_MAC_ENTRIES);
}
if (promisc && allmulti)
goto out;
filter->vmf_unicast.nentries = vtnet_gtoh32(sc, ucnt);
filter->vmf_multicast.nentries = vtnet_gtoh32(sc, mcnt);
hdr.class = VIRTIO_NET_CTRL_MAC;
hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
ack = VIRTIO_NET_ERR;
sglist_init(&sg, nitems(segs), segs);
error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
error |= sglist_append(&sg, &filter->vmf_unicast,
sizeof(uint32_t) + ucnt * ETHER_ADDR_LEN);
error |= sglist_append(&sg, &filter->vmf_multicast,
sizeof(uint32_t) + mcnt * ETHER_ADDR_LEN);
error |= sglist_append(&sg, &ack, sizeof(uint8_t));
MPASS(error == 0 && sg.sg_nseg == nitems(segs));
if (error == 0)
vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
if (ack != VIRTIO_NET_OK)
if_printf(ifp, "error setting host MAC filter table\n");
out:
if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0)
if_printf(ifp, "cannot enable promiscuous mode\n");
if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0)
if_printf(ifp, "cannot enable all-multicast mode\n");
}
static int
vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
{
struct sglist_seg segs[3];
struct sglist sg;
struct {
struct virtio_net_ctrl_hdr hdr __aligned(2);
uint8_t pad1;
uint16_t tag __aligned(2);
uint8_t pad2;
uint8_t ack;
} s;
int error;
error = 0;
MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER);
s.hdr.class = VIRTIO_NET_CTRL_VLAN;
s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
s.tag = vtnet_gtoh16(sc, tag);
s.ack = VIRTIO_NET_ERR;
sglist_init(&sg, nitems(segs), segs);
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
MPASS(error == 0 && sg.sg_nseg == nitems(segs));
if (error == 0)
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
}
static void
vtnet_rx_filter_vlan(struct vtnet_softc *sc)
{
int i, bit;
uint32_t w;
uint16_t tag;
MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER);
VTNET_CORE_LOCK_ASSERT(sc);
/* Enable the filter for each configured VLAN. */
for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
w = sc->vtnet_vlan_filter[i];
while ((bit = ffs(w) - 1) != -1) {
w &= ~(1 << bit);
tag = sizeof(w) * CHAR_BIT * i + bit;
if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
device_printf(sc->vtnet_dev,
"cannot enable VLAN %d filter\n", tag);
}
}
}
}
static void
vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
{
struct ifnet *ifp;
int idx, bit;
ifp = sc->vtnet_ifp;
idx = (tag >> 5) & 0x7F;
bit = tag & 0x1F;
if (tag == 0 || tag > 4095)
return;
VTNET_CORE_LOCK(sc);
if (add)
sc->vtnet_vlan_filter[idx] |= (1 << bit);
else
sc->vtnet_vlan_filter[idx] &= ~(1 << bit);
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
ifp->if_drv_flags & IFF_DRV_RUNNING &&
vtnet_exec_vlan_filter(sc, add, tag) != 0) {
device_printf(sc->vtnet_dev,
"cannot %s VLAN %d %s the host filter table\n",
add ? "add" : "remove", tag, add ? "to" : "from");
}
VTNET_CORE_UNLOCK(sc);
}
static void
vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
{
if (ifp->if_softc != arg)
return;
vtnet_update_vlan_filter(arg, 1, tag);
}
static void
vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
{
if (ifp->if_softc != arg)
return;
vtnet_update_vlan_filter(arg, 0, tag);
}
static void
vtnet_update_speed_duplex(struct vtnet_softc *sc)
{
struct ifnet *ifp;
uint32_t speed;
ifp = sc->vtnet_ifp;
if ((sc->vtnet_features & VIRTIO_NET_F_SPEED_DUPLEX) == 0)
return;
/* BMV: Ignore duplex. */
speed = virtio_read_dev_config_4(sc->vtnet_dev,
offsetof(struct virtio_net_config, speed));
if (speed != -1)
ifp->if_baudrate = IF_Mbps(speed);
}
static int
vtnet_is_link_up(struct vtnet_softc *sc)
{
uint16_t status;
if ((sc->vtnet_features & VIRTIO_NET_F_STATUS) == 0)
return (1);
status = virtio_read_dev_config_2(sc->vtnet_dev,
offsetof(struct virtio_net_config, status));
return ((status & VIRTIO_NET_S_LINK_UP) != 0);
}
static void
vtnet_update_link_status(struct vtnet_softc *sc)
{
struct ifnet *ifp;
int link;
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK_ASSERT(sc);
link = vtnet_is_link_up(sc);
/* Notify if the link status has changed. */
if (link != 0 && sc->vtnet_link_active == 0) {
vtnet_update_speed_duplex(sc);
sc->vtnet_link_active = 1;
if_link_state_change(ifp, LINK_STATE_UP);
} else if (link == 0 && sc->vtnet_link_active != 0) {
sc->vtnet_link_active = 0;
if_link_state_change(ifp, LINK_STATE_DOWN);
}
}
static int
vtnet_ifmedia_upd(struct ifnet *ifp)
{
return (EOPNOTSUPP);
}
static void
vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct vtnet_softc *sc;
sc = ifp->if_softc;
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
VTNET_CORE_LOCK(sc);
if (vtnet_is_link_up(sc) != 0) {
ifmr->ifm_status |= IFM_ACTIVE;
ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
} else
ifmr->ifm_active |= IFM_NONE;
VTNET_CORE_UNLOCK(sc);
}
static void
vtnet_get_macaddr(struct vtnet_softc *sc)
{
if (sc->vtnet_flags & VTNET_FLAG_MAC) {
virtio_read_device_config_array(sc->vtnet_dev,
offsetof(struct virtio_net_config, mac),
&sc->vtnet_hwaddr[0], sizeof(uint8_t), ETHER_ADDR_LEN);
} else {
/* Generate a random locally administered unicast address. */
sc->vtnet_hwaddr[0] = 0xB2;
arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
}
}
static void
vtnet_set_macaddr(struct vtnet_softc *sc)
{
device_t dev;
int error;
dev = sc->vtnet_dev;
if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
error = vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr);
if (error)
device_printf(dev, "unable to set MAC address\n");
return;
}
/* MAC in config is read-only in modern VirtIO. */
if (!vtnet_modern(sc) && sc->vtnet_flags & VTNET_FLAG_MAC) {
for (int i = 0; i < ETHER_ADDR_LEN; i++) {
virtio_write_dev_config_1(dev,
offsetof(struct virtio_net_config, mac) + i,
sc->vtnet_hwaddr[i]);
}
}
}
static void
vtnet_attached_set_macaddr(struct vtnet_softc *sc)
{
/* Assign MAC address if it was generated. */
if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0)
vtnet_set_macaddr(sc);
}
static void
vtnet_vlan_tag_remove(struct mbuf *m)
{
struct ether_vlan_header *evh;
evh = mtod(m, struct ether_vlan_header *);
m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
m->m_flags |= M_VLANTAG;
/* Strip the 802.1Q header. */
bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
ETHER_HDR_LEN - ETHER_TYPE_LEN);
m_adj(m, ETHER_VLAN_ENCAP_LEN);
}
static void
vtnet_set_rx_process_limit(struct vtnet_softc *sc)
{
int limit;
limit = vtnet_tunable_int(sc, "rx_process_limit",
vtnet_rx_process_limit);
if (limit < 0)
limit = INT_MAX;
sc->vtnet_rx_process_limit = limit;
}
static void
vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
{
struct sysctl_oid *node;
struct sysctl_oid_list *list;
struct vtnet_rxq_stats *stats;
char namebuf[16];
snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vtnrx_id);
node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Receive Queue");
list = SYSCTL_CHILDREN(node);
stats = &rxq->vtnrx_stats;
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
&stats->vrxs_ipackets, "Receive packets");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
&stats->vrxs_ibytes, "Receive bytes");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
&stats->vrxs_iqdrops, "Receive drops");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
&stats->vrxs_ierrors, "Receive errors");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
&stats->vrxs_csum, "Receive checksum offloaded");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
&stats->vrxs_csum_failed, "Receive checksum offload failed");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro", CTLFLAG_RD,
&stats->vrxs_host_lro, "Receive host segmentation offloaded");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
&stats->vrxs_rescheduled,
"Receive interrupt handler rescheduled");
}
static void
vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child, struct vtnet_txq *txq)
{
struct sysctl_oid *node;
struct sysctl_oid_list *list;
struct vtnet_txq_stats *stats;
char namebuf[16];
snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vtntx_id);
node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Transmit Queue");
list = SYSCTL_CHILDREN(node);
stats = &txq->vtntx_stats;
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
&stats->vtxs_opackets, "Transmit packets");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
&stats->vtxs_obytes, "Transmit bytes");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
&stats->vtxs_omcasts, "Transmit multicasts");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
&stats->vtxs_csum, "Transmit checksum offloaded");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
&stats->vtxs_tso, "Transmit TCP segmentation offloaded");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
&stats->vtxs_rescheduled,
"Transmit interrupt handler rescheduled");
}
static void
vtnet_setup_queue_sysctl(struct vtnet_softc *sc)
{
device_t dev;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *tree;
struct sysctl_oid_list *child;
int i;
dev = sc->vtnet_dev;
ctx = device_get_sysctl_ctx(dev);
tree = device_get_sysctl_tree(dev);
child = SYSCTL_CHILDREN(tree);
for (i = 0; i < sc->vtnet_req_vq_pairs; i++) {
vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
}
}
static void
vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child, struct vtnet_softc *sc)
{
struct vtnet_statistics *stats;
struct vtnet_rxq_stats rxaccum;
struct vtnet_txq_stats txaccum;
vtnet_accum_stats(sc, &rxaccum, &txaccum);
stats = &sc->vtnet_stats;
stats->rx_csum_offloaded = rxaccum.vrxs_csum;
stats->rx_csum_failed = rxaccum.vrxs_csum_failed;
stats->rx_task_rescheduled = rxaccum.vrxs_rescheduled;
stats->tx_csum_offloaded = txaccum.vtxs_csum;
stats->tx_tso_offloaded = txaccum.vtxs_tso;
stats->tx_task_rescheduled = txaccum.vtxs_rescheduled;
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
CTLFLAG_RD, &stats->mbuf_alloc_failed,
"Mbuf cluster allocation failures");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
CTLFLAG_RD, &stats->rx_frame_too_large,
"Received frame larger than the mbuf chain");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
CTLFLAG_RD, &stats->rx_enq_replacement_failed,
"Enqueuing the replacement receive mbuf failed");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
CTLFLAG_RD, &stats->rx_mergeable_failed,
"Mergeable buffers receive failures");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
"Received checksum offloaded buffer with unsupported "
"Ethernet type");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
"Received checksum offloaded buffer with incorrect IP protocol");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
CTLFLAG_RD, &stats->rx_csum_bad_offset,
"Received checksum offloaded buffer with incorrect offset");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto",
CTLFLAG_RD, &stats->rx_csum_bad_proto,
"Received checksum offloaded buffer with incorrect protocol");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
CTLFLAG_RD, &stats->rx_csum_failed,
"Received buffer checksum offload failed");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
CTLFLAG_RD, &stats->rx_csum_offloaded,
"Received buffer checksum offload succeeded");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
CTLFLAG_RD, &stats->rx_task_rescheduled,
"Times the receive interrupt task rescheduled itself");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_unknown_ethtype",
CTLFLAG_RD, &stats->tx_csum_unknown_ethtype,
"Aborted transmit of checksum offloaded buffer with unknown "
"Ethernet type");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_proto_mismatch",
CTLFLAG_RD, &stats->tx_csum_proto_mismatch,
"Aborted transmit of checksum offloaded buffer because mismatched "
"protocols");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
CTLFLAG_RD, &stats->tx_tso_not_tcp,
"Aborted transmit of TSO buffer with non TCP protocol");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_without_csum",
CTLFLAG_RD, &stats->tx_tso_without_csum,
"Aborted transmit of TSO buffer without TCP checksum offload");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
CTLFLAG_RD, &stats->tx_defragged,
"Transmit mbufs defragged");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
CTLFLAG_RD, &stats->tx_defrag_failed,
"Aborted transmit of buffer because defrag failed");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
CTLFLAG_RD, &stats->tx_csum_offloaded,
"Offloaded checksum of transmitted buffer");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
CTLFLAG_RD, &stats->tx_tso_offloaded,
"Segmentation offload of transmitted buffer");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
CTLFLAG_RD, &stats->tx_task_rescheduled,
"Times the transmit interrupt task rescheduled itself");
}
static void
vtnet_setup_sysctl(struct vtnet_softc *sc)
{
device_t dev;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *tree;
struct sysctl_oid_list *child;
dev = sc->vtnet_dev;
ctx = device_get_sysctl_ctx(dev);
tree = device_get_sysctl_tree(dev);
child = SYSCTL_CHILDREN(tree);
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
"Number of maximum supported virtqueue pairs");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "req_vq_pairs",
CTLFLAG_RD, &sc->vtnet_req_vq_pairs, 0,
"Number of requested virtqueue pairs");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
"Number of active virtqueue pairs");
vtnet_setup_stat_sysctl(ctx, child, sc);
}
static void
vtnet_load_tunables(struct vtnet_softc *sc)
{
sc->vtnet_lro_entry_count = vtnet_tunable_int(sc,
"lro_entry_count", vtnet_lro_entry_count);
if (sc->vtnet_lro_entry_count < TCP_LRO_ENTRIES)
sc->vtnet_lro_entry_count = TCP_LRO_ENTRIES;
sc->vtnet_lro_mbufq_depth = vtnet_tunable_int(sc,
"lro_mbufq_depth", vtnet_lro_mbufq_depth);
}
static int
vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
{
return (virtqueue_enable_intr(rxq->vtnrx_vq));
}
static void
vtnet_rxq_disable_intr(struct vtnet_rxq *rxq)
{
virtqueue_disable_intr(rxq->vtnrx_vq);
}
static int
vtnet_txq_enable_intr(struct vtnet_txq *txq)
{
struct virtqueue *vq;
vq = txq->vtntx_vq;
if (vtnet_txq_below_threshold(txq) != 0)
return (virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG));
/*
* The free count is above our threshold. Keep the Tx interrupt
* disabled until the queue is fuller.
*/
return (0);
}
static void
vtnet_txq_disable_intr(struct vtnet_txq *txq)
{
virtqueue_disable_intr(txq->vtntx_vq);
}
static void
vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
{
struct vtnet_rxq *rxq;
int i;
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
if (vtnet_rxq_enable_intr(rxq) != 0)
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
}
}
static void
vtnet_enable_tx_interrupts(struct vtnet_softc *sc)
{
int i;
for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
vtnet_txq_enable_intr(&sc->vtnet_txqs[i]);
}
static void
vtnet_enable_interrupts(struct vtnet_softc *sc)
{
vtnet_enable_rx_interrupts(sc);
vtnet_enable_tx_interrupts(sc);
}
static void
vtnet_disable_rx_interrupts(struct vtnet_softc *sc)
{
int i;
for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
}
static void
vtnet_disable_tx_interrupts(struct vtnet_softc *sc)
{
int i;
for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
}
static void
vtnet_disable_interrupts(struct vtnet_softc *sc)
{
vtnet_disable_rx_interrupts(sc);
vtnet_disable_tx_interrupts(sc);
}
static int
vtnet_tunable_int(struct vtnet_softc *sc, const char *knob, int def)
{
char path[64];
snprintf(path, sizeof(path),
"hw.vtnet.%d.%s", device_get_unit(sc->vtnet_dev), knob);
TUNABLE_INT_FETCH(path, &def);
return (def);
}
#ifdef DEBUGNET
static void
vtnet_debugnet_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
{
struct vtnet_softc *sc;
sc = if_getsoftc(ifp);
VTNET_CORE_LOCK(sc);
*nrxr = sc->vtnet_req_vq_pairs;
*ncl = DEBUGNET_MAX_IN_FLIGHT;
*clsize = sc->vtnet_rx_clustersz;
VTNET_CORE_UNLOCK(sc);
}
static void
vtnet_debugnet_event(struct ifnet *ifp __unused, enum debugnet_ev event __unused)
{
}
static int
vtnet_debugnet_transmit(struct ifnet *ifp, struct mbuf *m)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
int error;
sc = if_getsoftc(ifp);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return (EBUSY);
txq = &sc->vtnet_txqs[0];
error = vtnet_txq_encap(txq, &m, M_NOWAIT | M_USE_RESERVE);
if (error == 0)
(void)vtnet_txq_notify(txq);
return (error);
}
static int
vtnet_debugnet_poll(struct ifnet *ifp, int count)
{
struct vtnet_softc *sc;
int i;
sc = if_getsoftc(ifp);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return (EBUSY);
(void)vtnet_txq_eof(&sc->vtnet_txqs[0]);
for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
(void)vtnet_rxq_eof(&sc->vtnet_rxqs[i]);
return (0);
}
#endif /* DEBUGNET */
diff --git a/sys/dev/virtio/pci/virtio_pci_modern.c b/sys/dev/virtio/pci/virtio_pci_modern.c
index 09ac0a1232e7..7029d2ff76ce 100644
--- a/sys/dev/virtio/pci/virtio_pci_modern.c
+++ b/sys/dev/virtio/pci/virtio_pci_modern.c
@@ -1,1448 +1,1446 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2017, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Driver for the modern VirtIO PCI interface. */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/lock.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <machine/bus.h>
#include <machine/cpu.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <dev/virtio/virtio.h>
#include <dev/virtio/virtqueue.h>
#include <dev/virtio/pci/virtio_pci.h>
#include <dev/virtio/pci/virtio_pci_modern_var.h>
#include "virtio_bus_if.h"
#include "virtio_pci_if.h"
#include "virtio_if.h"
struct vtpci_modern_resource_map {
struct resource_map vtrm_map;
int vtrm_cap_offset;
int vtrm_bar;
int vtrm_offset;
int vtrm_length;
int vtrm_type; /* SYS_RES_{MEMORY, IOPORT} */
};
struct vtpci_modern_bar_resource {
struct resource *vtbr_res;
int vtbr_type;
};
struct vtpci_modern_softc {
device_t vtpci_dev;
struct vtpci_common vtpci_common;
uint32_t vtpci_notify_offset_multiplier;
uint16_t vtpci_devid;
int vtpci_msix_bar;
struct resource *vtpci_msix_res;
struct vtpci_modern_resource_map vtpci_common_res_map;
struct vtpci_modern_resource_map vtpci_notify_res_map;
struct vtpci_modern_resource_map vtpci_isr_res_map;
struct vtpci_modern_resource_map vtpci_device_res_map;
#define VTPCI_MODERN_MAX_BARS 6
struct vtpci_modern_bar_resource vtpci_bar_res[VTPCI_MODERN_MAX_BARS];
};
static int vtpci_modern_probe(device_t);
static int vtpci_modern_attach(device_t);
static int vtpci_modern_detach(device_t);
static int vtpci_modern_suspend(device_t);
static int vtpci_modern_resume(device_t);
static int vtpci_modern_shutdown(device_t);
static void vtpci_modern_driver_added(device_t, driver_t *);
static void vtpci_modern_child_detached(device_t, device_t);
static int vtpci_modern_read_ivar(device_t, device_t, int, uintptr_t *);
static int vtpci_modern_write_ivar(device_t, device_t, int, uintptr_t);
static uint8_t vtpci_modern_read_isr(device_t);
static uint16_t vtpci_modern_get_vq_size(device_t, int);
static bus_size_t vtpci_modern_get_vq_notify_off(device_t, int);
static void vtpci_modern_set_vq(device_t, struct virtqueue *);
static void vtpci_modern_disable_vq(device_t, int);
static int vtpci_modern_register_msix(struct vtpci_modern_softc *, int,
struct vtpci_interrupt *);
static int vtpci_modern_register_cfg_msix(device_t,
struct vtpci_interrupt *);
static int vtpci_modern_register_vq_msix(device_t, int idx,
struct vtpci_interrupt *);
static uint64_t vtpci_modern_negotiate_features(device_t, uint64_t);
static int vtpci_modern_finalize_features(device_t);
static int vtpci_modern_with_feature(device_t, uint64_t);
static int vtpci_modern_alloc_virtqueues(device_t, int, int,
struct vq_alloc_info *);
static int vtpci_modern_setup_interrupts(device_t, enum intr_type);
static void vtpci_modern_stop(device_t);
static int vtpci_modern_reinit(device_t, uint64_t);
static void vtpci_modern_reinit_complete(device_t);
static void vtpci_modern_notify_vq(device_t, uint16_t, bus_size_t);
static int vtpci_modern_config_generation(device_t);
static void vtpci_modern_read_dev_config(device_t, bus_size_t, void *, int);
static void vtpci_modern_write_dev_config(device_t, bus_size_t, void *, int);
static int vtpci_modern_probe_configs(device_t);
static int vtpci_modern_find_cap(device_t, uint8_t, int *);
static int vtpci_modern_map_configs(struct vtpci_modern_softc *);
static void vtpci_modern_unmap_configs(struct vtpci_modern_softc *);
static int vtpci_modern_find_cap_resource(struct vtpci_modern_softc *,
uint8_t, int, int, struct vtpci_modern_resource_map *);
static int vtpci_modern_bar_type(struct vtpci_modern_softc *, int);
static struct resource *vtpci_modern_get_bar_resource(
struct vtpci_modern_softc *, int, int);
static struct resource *vtpci_modern_alloc_bar_resource(
struct vtpci_modern_softc *, int, int);
static void vtpci_modern_free_bar_resources(struct vtpci_modern_softc *);
static int vtpci_modern_alloc_resource_map(struct vtpci_modern_softc *,
struct vtpci_modern_resource_map *);
static void vtpci_modern_free_resource_map(struct vtpci_modern_softc *,
struct vtpci_modern_resource_map *);
static void vtpci_modern_alloc_msix_resource(struct vtpci_modern_softc *);
static void vtpci_modern_free_msix_resource(struct vtpci_modern_softc *);
static void vtpci_modern_probe_and_attach_child(struct vtpci_modern_softc *);
static uint64_t vtpci_modern_read_features(struct vtpci_modern_softc *);
static void vtpci_modern_write_features(struct vtpci_modern_softc *,
uint64_t);
static void vtpci_modern_select_virtqueue(struct vtpci_modern_softc *, int);
static uint8_t vtpci_modern_get_status(struct vtpci_modern_softc *);
static void vtpci_modern_set_status(struct vtpci_modern_softc *, uint8_t);
static void vtpci_modern_reset(struct vtpci_modern_softc *);
static void vtpci_modern_enable_virtqueues(struct vtpci_modern_softc *);
static uint8_t vtpci_modern_read_common_1(struct vtpci_modern_softc *,
bus_size_t);
static uint16_t vtpci_modern_read_common_2(struct vtpci_modern_softc *,
bus_size_t);
static uint32_t vtpci_modern_read_common_4(struct vtpci_modern_softc *,
bus_size_t);
static void vtpci_modern_write_common_1(struct vtpci_modern_softc *,
bus_size_t, uint8_t);
static void vtpci_modern_write_common_2(struct vtpci_modern_softc *,
bus_size_t, uint16_t);
static void vtpci_modern_write_common_4(struct vtpci_modern_softc *,
bus_size_t, uint32_t);
static void vtpci_modern_write_common_8(struct vtpci_modern_softc *,
bus_size_t, uint64_t);
static void vtpci_modern_write_notify_2(struct vtpci_modern_softc *,
bus_size_t, uint16_t);
static uint8_t vtpci_modern_read_isr_1(struct vtpci_modern_softc *,
bus_size_t);
static uint8_t vtpci_modern_read_device_1(struct vtpci_modern_softc *,
bus_size_t);
static uint16_t vtpci_modern_read_device_2(struct vtpci_modern_softc *,
bus_size_t);
static uint32_t vtpci_modern_read_device_4(struct vtpci_modern_softc *,
bus_size_t);
static uint64_t vtpci_modern_read_device_8(struct vtpci_modern_softc *,
bus_size_t);
static void vtpci_modern_write_device_1(struct vtpci_modern_softc *,
bus_size_t, uint8_t);
static void vtpci_modern_write_device_2(struct vtpci_modern_softc *,
bus_size_t, uint16_t);
static void vtpci_modern_write_device_4(struct vtpci_modern_softc *,
bus_size_t, uint32_t);
static void vtpci_modern_write_device_8(struct vtpci_modern_softc *,
bus_size_t, uint64_t);
/* Tunables. */
static int vtpci_modern_transitional = 0;
TUNABLE_INT("hw.virtio.pci.transitional", &vtpci_modern_transitional);
static device_method_t vtpci_modern_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, vtpci_modern_probe),
DEVMETHOD(device_attach, vtpci_modern_attach),
DEVMETHOD(device_detach, vtpci_modern_detach),
DEVMETHOD(device_suspend, vtpci_modern_suspend),
DEVMETHOD(device_resume, vtpci_modern_resume),
DEVMETHOD(device_shutdown, vtpci_modern_shutdown),
/* Bus interface. */
DEVMETHOD(bus_driver_added, vtpci_modern_driver_added),
DEVMETHOD(bus_child_detached, vtpci_modern_child_detached),
DEVMETHOD(bus_child_pnpinfo_str, virtio_child_pnpinfo_str),
DEVMETHOD(bus_read_ivar, vtpci_modern_read_ivar),
DEVMETHOD(bus_write_ivar, vtpci_modern_write_ivar),
/* VirtIO PCI interface. */
DEVMETHOD(virtio_pci_read_isr, vtpci_modern_read_isr),
DEVMETHOD(virtio_pci_get_vq_size, vtpci_modern_get_vq_size),
DEVMETHOD(virtio_pci_get_vq_notify_off, vtpci_modern_get_vq_notify_off),
DEVMETHOD(virtio_pci_set_vq, vtpci_modern_set_vq),
DEVMETHOD(virtio_pci_disable_vq, vtpci_modern_disable_vq),
DEVMETHOD(virtio_pci_register_cfg_msix, vtpci_modern_register_cfg_msix),
DEVMETHOD(virtio_pci_register_vq_msix, vtpci_modern_register_vq_msix),
/* VirtIO bus interface. */
DEVMETHOD(virtio_bus_negotiate_features, vtpci_modern_negotiate_features),
DEVMETHOD(virtio_bus_finalize_features, vtpci_modern_finalize_features),
DEVMETHOD(virtio_bus_with_feature, vtpci_modern_with_feature),
DEVMETHOD(virtio_bus_alloc_virtqueues, vtpci_modern_alloc_virtqueues),
DEVMETHOD(virtio_bus_setup_intr, vtpci_modern_setup_interrupts),
DEVMETHOD(virtio_bus_stop, vtpci_modern_stop),
DEVMETHOD(virtio_bus_reinit, vtpci_modern_reinit),
DEVMETHOD(virtio_bus_reinit_complete, vtpci_modern_reinit_complete),
DEVMETHOD(virtio_bus_notify_vq, vtpci_modern_notify_vq),
DEVMETHOD(virtio_bus_config_generation, vtpci_modern_config_generation),
DEVMETHOD(virtio_bus_read_device_config, vtpci_modern_read_dev_config),
DEVMETHOD(virtio_bus_write_device_config, vtpci_modern_write_dev_config),
DEVMETHOD_END
};
static driver_t vtpci_modern_driver = {
.name = "virtio_pci",
.methods = vtpci_modern_methods,
.size = sizeof(struct vtpci_modern_softc)
};
devclass_t vtpci_modern_devclass;
DRIVER_MODULE(virtio_pci_modern, pci, vtpci_modern_driver,
vtpci_modern_devclass, 0, 0);
static int
vtpci_modern_probe(device_t dev)
{
char desc[64];
const char *name;
uint16_t devid;
if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID)
return (ENXIO);
if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN ||
pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MODERN_MAX)
return (ENXIO);
if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MODERN_MIN) {
if (!vtpci_modern_transitional)
return (ENXIO);
devid = pci_get_subdevice(dev);
} else
devid = pci_get_device(dev) - VIRTIO_PCI_DEVICEID_MODERN_MIN;
if (vtpci_modern_probe_configs(dev) != 0)
return (ENXIO);
name = virtio_device_name(devid);
if (name == NULL)
name = "Unknown";
snprintf(desc, sizeof(desc), "VirtIO PCI (modern) %s adapter", name);
device_set_desc_copy(dev, desc);
return (BUS_PROBE_DEFAULT);
}
static int
vtpci_modern_attach(device_t dev)
{
struct vtpci_modern_softc *sc;
int error;
sc = device_get_softc(dev);
sc->vtpci_dev = dev;
vtpci_init(&sc->vtpci_common, dev, true);
if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MODERN_MIN)
sc->vtpci_devid = pci_get_subdevice(dev);
else
sc->vtpci_devid = pci_get_device(dev) -
VIRTIO_PCI_DEVICEID_MODERN_MIN;
error = vtpci_modern_map_configs(sc);
if (error) {
device_printf(dev, "cannot map configs\n");
vtpci_modern_unmap_configs(sc);
return (error);
}
vtpci_modern_reset(sc);
/* Tell the host we've noticed this device. */
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
error = vtpci_add_child(&sc->vtpci_common);
if (error)
goto fail;
vtpci_modern_probe_and_attach_child(sc);
return (0);
fail:
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_FAILED);
vtpci_modern_detach(dev);
return (error);
}
static int
vtpci_modern_detach(device_t dev)
{
struct vtpci_modern_softc *sc;
int error;
sc = device_get_softc(dev);
error = vtpci_delete_child(&sc->vtpci_common);
if (error)
return (error);
vtpci_modern_reset(sc);
vtpci_modern_unmap_configs(sc);
return (0);
}
static int
vtpci_modern_suspend(device_t dev)
{
return (bus_generic_suspend(dev));
}
static int
vtpci_modern_resume(device_t dev)
{
return (bus_generic_resume(dev));
}
static int
vtpci_modern_shutdown(device_t dev)
{
(void) bus_generic_shutdown(dev);
/* Forcibly stop the host device. */
vtpci_modern_stop(dev);
return (0);
}
static void
vtpci_modern_driver_added(device_t dev, driver_t *driver)
{
vtpci_modern_probe_and_attach_child(device_get_softc(dev));
}
static void
vtpci_modern_child_detached(device_t dev, device_t child)
{
struct vtpci_modern_softc *sc;
sc = device_get_softc(dev);
vtpci_modern_reset(sc);
vtpci_child_detached(&sc->vtpci_common);
/* After the reset, retell the host we've noticed this device. */
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
}
static int
vtpci_modern_read_ivar(device_t dev, device_t child, int index,
uintptr_t *result)
{
struct vtpci_modern_softc *sc;
struct vtpci_common *cn;
sc = device_get_softc(dev);
cn = &sc->vtpci_common;
if (vtpci_child_device(cn) != child)
return (ENOENT);
switch (index) {
case VIRTIO_IVAR_DEVTYPE:
*result = sc->vtpci_devid;
break;
default:
return (vtpci_read_ivar(cn, index, result));
}
return (0);
}
static int
vtpci_modern_write_ivar(device_t dev, device_t child, int index,
uintptr_t value)
{
struct vtpci_modern_softc *sc;
struct vtpci_common *cn;
sc = device_get_softc(dev);
cn = &sc->vtpci_common;
if (vtpci_child_device(cn) != child)
return (ENOENT);
switch (index) {
default:
return (vtpci_write_ivar(cn, index, value));
}
return (0);
}
static uint64_t
vtpci_modern_negotiate_features(device_t dev, uint64_t child_features)
{
struct vtpci_modern_softc *sc;
uint64_t host_features, features;
sc = device_get_softc(dev);
host_features = vtpci_modern_read_features(sc);
/*
* Since the driver was added as a child of the modern PCI bus,
* always add the V1 flag.
*/
child_features |= VIRTIO_F_VERSION_1;
features = vtpci_negotiate_features(&sc->vtpci_common,
child_features, host_features);
vtpci_modern_write_features(sc, features);
return (features);
}
static int
vtpci_modern_finalize_features(device_t dev)
{
struct vtpci_modern_softc *sc;
uint8_t status;
sc = device_get_softc(dev);
/*
* Must re-read the status after setting it to verify the negotiated
* features were accepted by the device.
- *
- * BMV: TODO Drivers need to handle possible failure of this method!
*/
vtpci_modern_set_status(sc, VIRTIO_CONFIG_S_FEATURES_OK);
status = vtpci_modern_get_status(sc);
if ((status & VIRTIO_CONFIG_S_FEATURES_OK) == 0) {
device_printf(dev, "desired features were not accepted\n");
return (ENOTSUP);
}
return (0);
}
static int
vtpci_modern_with_feature(device_t dev, uint64_t feature)
{
struct vtpci_modern_softc *sc;
sc = device_get_softc(dev);
return (vtpci_with_feature(&sc->vtpci_common, feature));
}
static uint64_t
vtpci_modern_read_features(struct vtpci_modern_softc *sc)
{
uint32_t features0, features1;
vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_DFSELECT, 0);
features0 = vtpci_modern_read_common_4(sc, VIRTIO_PCI_COMMON_DF);
vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_DFSELECT, 1);
features1 = vtpci_modern_read_common_4(sc, VIRTIO_PCI_COMMON_DF);
return (((uint64_t) features1 << 32) | features0);
}
static void
vtpci_modern_write_features(struct vtpci_modern_softc *sc, uint64_t features)
{
uint32_t features0, features1;
features0 = features;
features1 = features >> 32;
vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GFSELECT, 0);
vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GF, features0);
vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GFSELECT, 1);
vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GF, features1);
}
static int
vtpci_modern_alloc_virtqueues(device_t dev, int flags, int nvqs,
struct vq_alloc_info *vq_info)
{
struct vtpci_modern_softc *sc;
struct vtpci_common *cn;
uint16_t max_nvqs;
sc = device_get_softc(dev);
cn = &sc->vtpci_common;
max_nvqs = vtpci_modern_read_common_2(sc, VIRTIO_PCI_COMMON_NUMQ);
if (nvqs > max_nvqs) {
device_printf(sc->vtpci_dev, "requested virtqueue count %d "
"exceeds max %d\n", nvqs, max_nvqs);
return (E2BIG);
}
return (vtpci_alloc_virtqueues(cn, flags, nvqs, vq_info));
}
static int
vtpci_modern_setup_interrupts(device_t dev, enum intr_type type)
{
struct vtpci_modern_softc *sc;
int error;
sc = device_get_softc(dev);
error = vtpci_setup_interrupts(&sc->vtpci_common, type);
if (error == 0)
vtpci_modern_enable_virtqueues(sc);
return (error);
}
static void
vtpci_modern_stop(device_t dev)
{
vtpci_modern_reset(device_get_softc(dev));
}
static int
vtpci_modern_reinit(device_t dev, uint64_t features)
{
struct vtpci_modern_softc *sc;
struct vtpci_common *cn;
int error;
sc = device_get_softc(dev);
cn = &sc->vtpci_common;
/*
* Redrive the device initialization. This is a bit of an abuse of
* the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to
* play nice.
*
* We do not allow the host device to change from what was originally
* negotiated beyond what the guest driver changed. MSIX state should
* not change, number of virtqueues and their size remain the same, etc.
* This will need to be rethought when we want to support migration.
*/
if (vtpci_modern_get_status(sc) != VIRTIO_CONFIG_STATUS_RESET)
vtpci_modern_stop(dev);
/*
* Quickly drive the status through ACK and DRIVER. The device does
* not become usable again until DRIVER_OK in reinit complete.
*/
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER);
/*
* TODO: Check that features are not added as to what was
* originally negotiated.
*/
vtpci_modern_negotiate_features(dev, features);
error = vtpci_modern_finalize_features(dev);
if (error) {
device_printf(dev, "cannot finalize features during reinit\n");
return (error);
}
error = vtpci_reinit(cn);
if (error)
return (error);
return (0);
}
static void
vtpci_modern_reinit_complete(device_t dev)
{
struct vtpci_modern_softc *sc;
sc = device_get_softc(dev);
vtpci_modern_enable_virtqueues(sc);
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER_OK);
}
static void
vtpci_modern_notify_vq(device_t dev, uint16_t queue, bus_size_t offset)
{
struct vtpci_modern_softc *sc;
sc = device_get_softc(dev);
vtpci_modern_write_notify_2(sc, offset, queue);
}
static uint8_t
vtpci_modern_get_status(struct vtpci_modern_softc *sc)
{
return (vtpci_modern_read_common_1(sc, VIRTIO_PCI_COMMON_STATUS));
}
static void
vtpci_modern_set_status(struct vtpci_modern_softc *sc, uint8_t status)
{
if (status != VIRTIO_CONFIG_STATUS_RESET)
status |= vtpci_modern_get_status(sc);
vtpci_modern_write_common_1(sc, VIRTIO_PCI_COMMON_STATUS, status);
}
static int
vtpci_modern_config_generation(device_t dev)
{
struct vtpci_modern_softc *sc;
uint8_t gen;
sc = device_get_softc(dev);
gen = vtpci_modern_read_common_1(sc, VIRTIO_PCI_COMMON_CFGGENERATION);
return (gen);
}
static void
vtpci_modern_read_dev_config(device_t dev, bus_size_t offset, void *dst,
int length)
{
struct vtpci_modern_softc *sc;
sc = device_get_softc(dev);
if (sc->vtpci_device_res_map.vtrm_map.r_size == 0) {
panic("%s: attempt to read dev config but not present",
__func__);
}
switch (length) {
case 1:
*(uint8_t *) dst = vtpci_modern_read_device_1(sc, offset);
break;
case 2:
*(uint16_t *) dst = virtio_htog16(true,
vtpci_modern_read_device_2(sc, offset));
break;
case 4:
*(uint32_t *) dst = virtio_htog32(true,
vtpci_modern_read_device_4(sc, offset));
break;
case 8:
*(uint64_t *) dst = virtio_htog64(true,
vtpci_modern_read_device_8(sc, offset));
break;
default:
panic("%s: device %s invalid device read length %d offset %d",
__func__, device_get_nameunit(dev), length, (int) offset);
}
}
static void
vtpci_modern_write_dev_config(device_t dev, bus_size_t offset, void *src,
int length)
{
struct vtpci_modern_softc *sc;
sc = device_get_softc(dev);
if (sc->vtpci_device_res_map.vtrm_map.r_size == 0) {
panic("%s: attempt to write dev config but not present",
__func__);
}
switch (length) {
case 1:
vtpci_modern_write_device_1(sc, offset, *(uint8_t *) src);
break;
case 2: {
uint16_t val = virtio_gtoh16(true, *(uint16_t *) src);
vtpci_modern_write_device_2(sc, offset, val);
break;
}
case 4: {
uint32_t val = virtio_gtoh32(true, *(uint32_t *) src);
vtpci_modern_write_device_4(sc, offset, val);
break;
}
case 8: {
uint64_t val = virtio_gtoh64(true, *(uint64_t *) src);
vtpci_modern_write_device_8(sc, offset, val);
break;
}
default:
panic("%s: device %s invalid device write length %d offset %d",
__func__, device_get_nameunit(dev), length, (int) offset);
}
}
static int
vtpci_modern_probe_configs(device_t dev)
{
int error;
/*
* These config capabilities must be present. The DEVICE_CFG
* capability is only present if the device requires it.
*/
error = vtpci_modern_find_cap(dev, VIRTIO_PCI_CAP_COMMON_CFG, NULL);
if (error) {
device_printf(dev, "cannot find COMMON_CFG capability\n");
return (error);
}
error = vtpci_modern_find_cap(dev, VIRTIO_PCI_CAP_NOTIFY_CFG, NULL);
if (error) {
device_printf(dev, "cannot find NOTIFY_CFG capability\n");
return (error);
}
error = vtpci_modern_find_cap(dev, VIRTIO_PCI_CAP_ISR_CFG, NULL);
if (error) {
device_printf(dev, "cannot find ISR_CFG capability\n");
return (error);
}
return (0);
}
static int
vtpci_modern_find_cap(device_t dev, uint8_t cfg_type, int *cap_offset)
{
uint32_t type, bar;
int capreg, error;
for (error = pci_find_cap(dev, PCIY_VENDOR, &capreg);
error == 0;
error = pci_find_next_cap(dev, PCIY_VENDOR, capreg, &capreg)) {
type = pci_read_config(dev, capreg +
offsetof(struct virtio_pci_cap, cfg_type), 1);
bar = pci_read_config(dev, capreg +
offsetof(struct virtio_pci_cap, bar), 1);
/* Must ignore reserved BARs. */
if (bar >= VTPCI_MODERN_MAX_BARS)
continue;
if (type == cfg_type) {
if (cap_offset != NULL)
*cap_offset = capreg;
break;
}
}
return (error);
}
static int
vtpci_modern_map_common_config(struct vtpci_modern_softc *sc)
{
device_t dev;
int error;
dev = sc->vtpci_dev;
error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_COMMON_CFG,
sizeof(struct virtio_pci_common_cfg), 4, &sc->vtpci_common_res_map);
if (error) {
device_printf(dev, "cannot find cap COMMON_CFG resource\n");
return (error);
}
error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_common_res_map);
if (error) {
device_printf(dev, "cannot alloc resource for COMMON_CFG\n");
return (error);
}
return (0);
}
static int
vtpci_modern_map_notify_config(struct vtpci_modern_softc *sc)
{
device_t dev;
int cap_offset, error;
dev = sc->vtpci_dev;
error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_NOTIFY_CFG,
-1, 2, &sc->vtpci_notify_res_map);
if (error) {
device_printf(dev, "cannot find cap NOTIFY_CFG resource\n");
return (error);
}
cap_offset = sc->vtpci_notify_res_map.vtrm_cap_offset;
sc->vtpci_notify_offset_multiplier = pci_read_config(dev, cap_offset +
offsetof(struct virtio_pci_notify_cap, notify_off_multiplier), 4);
error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_notify_res_map);
if (error) {
device_printf(dev, "cannot alloc resource for NOTIFY_CFG\n");
return (error);
}
return (0);
}
static int
vtpci_modern_map_isr_config(struct vtpci_modern_softc *sc)
{
device_t dev;
int error;
dev = sc->vtpci_dev;
error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_ISR_CFG,
sizeof(uint8_t), 1, &sc->vtpci_isr_res_map);
if (error) {
device_printf(dev, "cannot find cap ISR_CFG resource\n");
return (error);
}
error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_isr_res_map);
if (error) {
device_printf(dev, "cannot alloc resource for ISR_CFG\n");
return (error);
}
return (0);
}
static int
vtpci_modern_map_device_config(struct vtpci_modern_softc *sc)
{
device_t dev;
int error;
dev = sc->vtpci_dev;
error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_DEVICE_CFG,
-1, 4, &sc->vtpci_device_res_map);
if (error == ENOENT) {
/* Device configuration is optional depending on device. */
return (0);
} else if (error) {
device_printf(dev, "cannot find cap DEVICE_CFG resource\n");
return (error);
}
error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_device_res_map);
if (error) {
device_printf(dev, "cannot alloc resource for DEVICE_CFG\n");
return (error);
}
return (0);
}
static int
vtpci_modern_map_configs(struct vtpci_modern_softc *sc)
{
int error;
error = vtpci_modern_map_common_config(sc);
if (error)
return (error);
error = vtpci_modern_map_notify_config(sc);
if (error)
return (error);
error = vtpci_modern_map_isr_config(sc);
if (error)
return (error);
error = vtpci_modern_map_device_config(sc);
if (error)
return (error);
vtpci_modern_alloc_msix_resource(sc);
return (0);
}
static void
vtpci_modern_unmap_configs(struct vtpci_modern_softc *sc)
{
vtpci_modern_free_resource_map(sc, &sc->vtpci_common_res_map);
vtpci_modern_free_resource_map(sc, &sc->vtpci_notify_res_map);
vtpci_modern_free_resource_map(sc, &sc->vtpci_isr_res_map);
vtpci_modern_free_resource_map(sc, &sc->vtpci_device_res_map);
vtpci_modern_free_bar_resources(sc);
vtpci_modern_free_msix_resource(sc);
sc->vtpci_notify_offset_multiplier = 0;
}
static int
vtpci_modern_find_cap_resource(struct vtpci_modern_softc *sc, uint8_t cfg_type,
int min_size, int alignment, struct vtpci_modern_resource_map *res)
{
device_t dev;
int cap_offset, offset, length, error;
uint8_t bar, cap_length;
dev = sc->vtpci_dev;
error = vtpci_modern_find_cap(dev, cfg_type, &cap_offset);
if (error)
return (error);
cap_length = pci_read_config(dev,
cap_offset + offsetof(struct virtio_pci_cap, cap_len), 1);
if (cap_length < sizeof(struct virtio_pci_cap)) {
device_printf(dev, "cap %u length %d less than expected\n",
cfg_type, cap_length);
return (ENXIO);
}
bar = pci_read_config(dev,
cap_offset + offsetof(struct virtio_pci_cap, bar), 1);
offset = pci_read_config(dev,
cap_offset + offsetof(struct virtio_pci_cap, offset), 4);
length = pci_read_config(dev,
cap_offset + offsetof(struct virtio_pci_cap, length), 4);
if (min_size != -1 && length < min_size) {
device_printf(dev, "cap %u struct length %d less than min %d\n",
cfg_type, length, min_size);
return (ENXIO);
}
if (offset % alignment) {
device_printf(dev, "cap %u struct offset %d not aligned to %d\n",
cfg_type, offset, alignment);
return (ENXIO);
}
/* BMV: TODO Can we determine the size of the BAR here? */
res->vtrm_cap_offset = cap_offset;
res->vtrm_bar = bar;
res->vtrm_offset = offset;
res->vtrm_length = length;
res->vtrm_type = vtpci_modern_bar_type(sc, bar);
return (0);
}
static int
vtpci_modern_bar_type(struct vtpci_modern_softc *sc, int bar)
{
uint32_t val;
/*
* The BAR described by a config capability may be either an IOPORT or
* MEM, but we must know the type when calling bus_alloc_resource().
*/
val = pci_read_config(sc->vtpci_dev, PCIR_BAR(bar), 4);
if (PCI_BAR_IO(val))
return (SYS_RES_IOPORT);
else
return (SYS_RES_MEMORY);
}
static struct resource *
vtpci_modern_get_bar_resource(struct vtpci_modern_softc *sc, int bar, int type)
{
struct resource *res;
MPASS(bar >= 0 && bar < VTPCI_MODERN_MAX_BARS);
res = sc->vtpci_bar_res[bar].vtbr_res;
MPASS(res == NULL || sc->vtpci_bar_res[bar].vtbr_type == type);
return (res);
}
static struct resource *
vtpci_modern_alloc_bar_resource(struct vtpci_modern_softc *sc, int bar,
int type)
{
struct resource *res;
int rid;
MPASS(bar >= 0 && bar < VTPCI_MODERN_MAX_BARS);
MPASS(type == SYS_RES_MEMORY || type == SYS_RES_IOPORT);
res = sc->vtpci_bar_res[bar].vtbr_res;
if (res != NULL) {
MPASS(sc->vtpci_bar_res[bar].vtbr_type == type);
return (res);
}
rid = PCIR_BAR(bar);
res = bus_alloc_resource_any(sc->vtpci_dev, type, &rid,
RF_ACTIVE | RF_UNMAPPED);
if (res != NULL) {
sc->vtpci_bar_res[bar].vtbr_res = res;
sc->vtpci_bar_res[bar].vtbr_type = type;
}
return (res);
}
static void
vtpci_modern_free_bar_resources(struct vtpci_modern_softc *sc)
{
device_t dev;
struct resource *res;
int bar, rid, type;
dev = sc->vtpci_dev;
for (bar = 0; bar < VTPCI_MODERN_MAX_BARS; bar++) {
res = sc->vtpci_bar_res[bar].vtbr_res;
type = sc->vtpci_bar_res[bar].vtbr_type;
if (res != NULL) {
rid = PCIR_BAR(bar);
bus_release_resource(dev, type, rid, res);
sc->vtpci_bar_res[bar].vtbr_res = NULL;
sc->vtpci_bar_res[bar].vtbr_type = 0;
}
}
}
static int
vtpci_modern_alloc_resource_map(struct vtpci_modern_softc *sc,
struct vtpci_modern_resource_map *map)
{
struct resource_map_request req;
struct resource *res;
int type;
type = map->vtrm_type;
res = vtpci_modern_alloc_bar_resource(sc, map->vtrm_bar, type);
if (res == NULL)
return (ENXIO);
resource_init_map_request(&req);
req.offset = map->vtrm_offset;
req.length = map->vtrm_length;
return (bus_map_resource(sc->vtpci_dev, type, res, &req,
&map->vtrm_map));
}
static void
vtpci_modern_free_resource_map(struct vtpci_modern_softc *sc,
struct vtpci_modern_resource_map *map)
{
struct resource *res;
int type;
type = map->vtrm_type;
res = vtpci_modern_get_bar_resource(sc, map->vtrm_bar, type);
if (res != NULL && map->vtrm_map.r_size != 0) {
bus_unmap_resource(sc->vtpci_dev, type, res, &map->vtrm_map);
bzero(map, sizeof(struct vtpci_modern_resource_map));
}
}
static void
vtpci_modern_alloc_msix_resource(struct vtpci_modern_softc *sc)
{
device_t dev;
int bar;
dev = sc->vtpci_dev;
if (!vtpci_is_msix_available(&sc->vtpci_common) ||
(bar = pci_msix_table_bar(dev)) == -1)
return;
/* TODO: Can this BAR be in the 0-5 range? */
sc->vtpci_msix_bar = bar;
if ((sc->vtpci_msix_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&bar, RF_ACTIVE)) == NULL)
device_printf(dev, "Unable to map MSIX table\n");
}
static void
vtpci_modern_free_msix_resource(struct vtpci_modern_softc *sc)
{
device_t dev;
dev = sc->vtpci_dev;
if (sc->vtpci_msix_res != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY, sc->vtpci_msix_bar,
sc->vtpci_msix_res);
sc->vtpci_msix_bar = 0;
sc->vtpci_msix_res = NULL;
}
}
static void
vtpci_modern_probe_and_attach_child(struct vtpci_modern_softc *sc)
{
device_t dev, child;
dev = sc->vtpci_dev;
child = vtpci_child_device(&sc->vtpci_common);
if (child == NULL || device_get_state(child) != DS_NOTPRESENT)
return;
if (device_probe(child) != 0)
return;
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER);
if (device_attach(child) != 0) {
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_FAILED);
/* Reset state for later attempt. */
vtpci_modern_child_detached(dev, child);
} else {
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER_OK);
VIRTIO_ATTACH_COMPLETED(child);
}
}
static int
vtpci_modern_register_msix(struct vtpci_modern_softc *sc, int offset,
struct vtpci_interrupt *intr)
{
uint16_t vector;
if (intr != NULL) {
/* Map from guest rid to host vector. */
vector = intr->vti_rid - 1;
} else
vector = VIRTIO_MSI_NO_VECTOR;
vtpci_modern_write_common_2(sc, offset, vector);
return (vtpci_modern_read_common_2(sc, offset) == vector ? 0 : ENODEV);
}
static int
vtpci_modern_register_cfg_msix(device_t dev, struct vtpci_interrupt *intr)
{
struct vtpci_modern_softc *sc;
int error;
sc = device_get_softc(dev);
error = vtpci_modern_register_msix(sc, VIRTIO_PCI_COMMON_MSIX, intr);
if (error) {
device_printf(dev,
"unable to register config MSIX interrupt\n");
return (error);
}
return (0);
}
static int
vtpci_modern_register_vq_msix(device_t dev, int idx,
struct vtpci_interrupt *intr)
{
struct vtpci_modern_softc *sc;
int error;
sc = device_get_softc(dev);
vtpci_modern_select_virtqueue(sc, idx);
error = vtpci_modern_register_msix(sc, VIRTIO_PCI_COMMON_Q_MSIX, intr);
if (error) {
device_printf(dev,
"unable to register virtqueue MSIX interrupt\n");
return (error);
}
return (0);
}
static void
vtpci_modern_reset(struct vtpci_modern_softc *sc)
{
/*
* Setting the status to RESET sets the host device to the
* original, uninitialized state. Must poll the status until
* the reset is complete.
*/
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_RESET);
while (vtpci_modern_get_status(sc) != VIRTIO_CONFIG_STATUS_RESET)
cpu_spinwait();
}
static void
vtpci_modern_select_virtqueue(struct vtpci_modern_softc *sc, int idx)
{
vtpci_modern_write_common_2(sc, VIRTIO_PCI_COMMON_Q_SELECT, idx);
}
static uint8_t
vtpci_modern_read_isr(device_t dev)
{
return (vtpci_modern_read_isr_1(device_get_softc(dev), 0));
}
static uint16_t
vtpci_modern_get_vq_size(device_t dev, int idx)
{
struct vtpci_modern_softc *sc;
sc = device_get_softc(dev);
vtpci_modern_select_virtqueue(sc, idx);
return (vtpci_modern_read_common_2(sc, VIRTIO_PCI_COMMON_Q_SIZE));
}
static bus_size_t
vtpci_modern_get_vq_notify_off(device_t dev, int idx)
{
struct vtpci_modern_softc *sc;
uint16_t q_notify_off;
sc = device_get_softc(dev);
vtpci_modern_select_virtqueue(sc, idx);
q_notify_off = vtpci_modern_read_common_2(sc, VIRTIO_PCI_COMMON_Q_NOFF);
return (q_notify_off * sc->vtpci_notify_offset_multiplier);
}
static void
vtpci_modern_set_vq(device_t dev, struct virtqueue *vq)
{
struct vtpci_modern_softc *sc;
sc = device_get_softc(dev);
vtpci_modern_select_virtqueue(sc, virtqueue_index(vq));
/* BMV: Currently we never adjust the device's proposed VQ size. */
vtpci_modern_write_common_2(sc,
VIRTIO_PCI_COMMON_Q_SIZE, virtqueue_size(vq));
vtpci_modern_write_common_8(sc,
VIRTIO_PCI_COMMON_Q_DESCLO, virtqueue_desc_paddr(vq));
vtpci_modern_write_common_8(sc,
VIRTIO_PCI_COMMON_Q_AVAILLO, virtqueue_avail_paddr(vq));
vtpci_modern_write_common_8(sc,
VIRTIO_PCI_COMMON_Q_USEDLO, virtqueue_used_paddr(vq));
}
static void
vtpci_modern_disable_vq(device_t dev, int idx)
{
struct vtpci_modern_softc *sc;
sc = device_get_softc(dev);
vtpci_modern_select_virtqueue(sc, idx);
vtpci_modern_write_common_8(sc, VIRTIO_PCI_COMMON_Q_DESCLO, 0ULL);
vtpci_modern_write_common_8(sc, VIRTIO_PCI_COMMON_Q_AVAILLO, 0ULL);
vtpci_modern_write_common_8(sc, VIRTIO_PCI_COMMON_Q_USEDLO, 0ULL);
}
static void
vtpci_modern_enable_virtqueues(struct vtpci_modern_softc *sc)
{
int idx;
for (idx = 0; idx < sc->vtpci_common.vtpci_nvqs; idx++) {
vtpci_modern_select_virtqueue(sc, idx);
vtpci_modern_write_common_2(sc, VIRTIO_PCI_COMMON_Q_ENABLE, 1);
}
}
static uint8_t
vtpci_modern_read_common_1(struct vtpci_modern_softc *sc, bus_size_t off)
{
return (bus_read_1(&sc->vtpci_common_res_map.vtrm_map, off));
}
static uint16_t
vtpci_modern_read_common_2(struct vtpci_modern_softc *sc, bus_size_t off)
{
return (bus_read_2(&sc->vtpci_common_res_map.vtrm_map, off));
}
static uint32_t
vtpci_modern_read_common_4(struct vtpci_modern_softc *sc, bus_size_t off)
{
return (bus_read_4(&sc->vtpci_common_res_map.vtrm_map, off));
}
static void
vtpci_modern_write_common_1(struct vtpci_modern_softc *sc, bus_size_t off,
uint8_t val)
{
bus_write_1(&sc->vtpci_common_res_map.vtrm_map, off, val);
}
static void
vtpci_modern_write_common_2(struct vtpci_modern_softc *sc, bus_size_t off,
uint16_t val)
{
bus_write_2(&sc->vtpci_common_res_map.vtrm_map, off, val);
}
static void
vtpci_modern_write_common_4(struct vtpci_modern_softc *sc, bus_size_t off,
uint32_t val)
{
bus_write_4(&sc->vtpci_common_res_map.vtrm_map, off, val);
}
static void
vtpci_modern_write_common_8(struct vtpci_modern_softc *sc, bus_size_t off,
uint64_t val)
{
uint32_t val0, val1;
val0 = (uint32_t) val;
val1 = val >> 32;
vtpci_modern_write_common_4(sc, off, val0);
vtpci_modern_write_common_4(sc, off + 4, val1);
}
static void
vtpci_modern_write_notify_2(struct vtpci_modern_softc *sc, bus_size_t off,
uint16_t val)
{
bus_write_2(&sc->vtpci_notify_res_map.vtrm_map, off, val);
}
static uint8_t
vtpci_modern_read_isr_1(struct vtpci_modern_softc *sc, bus_size_t off)
{
return (bus_read_1(&sc->vtpci_isr_res_map.vtrm_map, off));
}
static uint8_t
vtpci_modern_read_device_1(struct vtpci_modern_softc *sc, bus_size_t off)
{
return (bus_read_1(&sc->vtpci_device_res_map.vtrm_map, off));
}
static uint16_t
vtpci_modern_read_device_2(struct vtpci_modern_softc *sc, bus_size_t off)
{
return (bus_read_2(&sc->vtpci_device_res_map.vtrm_map, off));
}
static uint32_t
vtpci_modern_read_device_4(struct vtpci_modern_softc *sc, bus_size_t off)
{
return (bus_read_4(&sc->vtpci_device_res_map.vtrm_map, off));
}
static uint64_t
vtpci_modern_read_device_8(struct vtpci_modern_softc *sc, bus_size_t off)
{
device_t dev;
int gen;
uint32_t val0, val1;
dev = sc->vtpci_dev;
/*
* Treat the 64-bit field as two 32-bit fields. Use the generation
* to ensure a consistent read.
*/
do {
gen = vtpci_modern_config_generation(dev);
val0 = vtpci_modern_read_device_4(sc, off);
val1 = vtpci_modern_read_device_4(sc, off + 4);
} while (gen != vtpci_modern_config_generation(dev));
return (((uint64_t) val1 << 32) | val0);
}
static void
vtpci_modern_write_device_1(struct vtpci_modern_softc *sc, bus_size_t off,
uint8_t val)
{
bus_write_1(&sc->vtpci_device_res_map.vtrm_map, off, val);
}
static void
vtpci_modern_write_device_2(struct vtpci_modern_softc *sc, bus_size_t off,
uint16_t val)
{
bus_write_2(&sc->vtpci_device_res_map.vtrm_map, off, val);
}
static void
vtpci_modern_write_device_4(struct vtpci_modern_softc *sc, bus_size_t off,
uint32_t val)
{
bus_write_4(&sc->vtpci_device_res_map.vtrm_map, off, val);
}
static void
vtpci_modern_write_device_8(struct vtpci_modern_softc *sc, bus_size_t off,
uint64_t val)
{
uint32_t val0, val1;
val0 = (uint32_t) val;
val1 = val >> 32;
vtpci_modern_write_device_4(sc, off, val0);
vtpci_modern_write_device_4(sc, off + 4, val1);
}
diff --git a/sys/dev/virtio/random/virtio_random.c b/sys/dev/virtio/random/virtio_random.c
index 8c01b1cf6ae3..ee3a24bb5513 100644
--- a/sys/dev/virtio/random/virtio_random.c
+++ b/sys/dev/virtio/random/virtio_random.c
@@ -1,269 +1,290 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2013, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Driver for VirtIO entropy device. */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/sglist.h>
-#include <sys/callout.h>
#include <sys/random.h>
#include <sys/stdatomic.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <dev/random/randomdev.h>
#include <dev/random/random_harvestq.h>
#include <dev/virtio/virtio.h>
#include <dev/virtio/virtqueue.h>
struct vtrnd_softc {
+ device_t vtrnd_dev;
uint64_t vtrnd_features;
struct virtqueue *vtrnd_vq;
};
static int vtrnd_modevent(module_t, int, void *);
static int vtrnd_probe(device_t);
static int vtrnd_attach(device_t);
static int vtrnd_detach(device_t);
-static void vtrnd_negotiate_features(device_t);
-static int vtrnd_alloc_virtqueue(device_t);
+static int vtrnd_negotiate_features(struct vtrnd_softc *);
+static int vtrnd_setup_features(struct vtrnd_softc *);
+static int vtrnd_alloc_virtqueue(struct vtrnd_softc *);
static int vtrnd_harvest(struct vtrnd_softc *, void *, size_t *);
static unsigned vtrnd_read(void *, unsigned);
#define VTRND_FEATURES 0
static struct virtio_feature_desc vtrnd_feature_desc[] = {
{ 0, NULL }
};
static struct random_source random_vtrnd = {
.rs_ident = "VirtIO Entropy Adapter",
.rs_source = RANDOM_PURE_VIRTIO,
.rs_read = vtrnd_read,
};
/* Kludge for API limitations of random(4). */
static _Atomic(struct vtrnd_softc *) g_vtrnd_softc;
static device_method_t vtrnd_methods[] = {
/* Device methods. */
DEVMETHOD(device_probe, vtrnd_probe),
DEVMETHOD(device_attach, vtrnd_attach),
DEVMETHOD(device_detach, vtrnd_detach),
DEVMETHOD_END
};
static driver_t vtrnd_driver = {
"vtrnd",
vtrnd_methods,
sizeof(struct vtrnd_softc)
};
static devclass_t vtrnd_devclass;
DRIVER_MODULE(virtio_random, virtio_mmio, vtrnd_driver, vtrnd_devclass,
vtrnd_modevent, 0);
DRIVER_MODULE(virtio_random, virtio_pci, vtrnd_driver, vtrnd_devclass,
vtrnd_modevent, 0);
MODULE_VERSION(virtio_random, 1);
MODULE_DEPEND(virtio_random, virtio, 1, 1, 1);
MODULE_DEPEND(virtio_random, random_device, 1, 1, 1);
VIRTIO_SIMPLE_PNPTABLE(virtio_random, VIRTIO_ID_ENTROPY,
"VirtIO Entropy Adapter");
VIRTIO_SIMPLE_PNPINFO(virtio_mmio, virtio_random);
VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_random);
static int
vtrnd_modevent(module_t mod, int type, void *unused)
{
int error;
switch (type) {
case MOD_LOAD:
case MOD_QUIESCE:
case MOD_UNLOAD:
case MOD_SHUTDOWN:
error = 0;
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
static int
vtrnd_probe(device_t dev)
{
return (VIRTIO_SIMPLE_PROBE(dev, virtio_random));
}
static int
vtrnd_attach(device_t dev)
{
struct vtrnd_softc *sc, *exp;
int error;
sc = device_get_softc(dev);
-
+ sc->vtrnd_dev = dev;
virtio_set_feature_desc(dev, vtrnd_feature_desc);
- vtrnd_negotiate_features(dev);
- error = vtrnd_alloc_virtqueue(dev);
+ error = vtrnd_setup_features(sc);
+ if (error) {
+ device_printf(dev, "cannot setup features\n");
+ goto fail;
+ }
+
+ error = vtrnd_alloc_virtqueue(sc);
if (error) {
device_printf(dev, "cannot allocate virtqueue\n");
goto fail;
}
exp = NULL;
if (!atomic_compare_exchange_strong_explicit(&g_vtrnd_softc, &exp, sc,
memory_order_release, memory_order_acquire)) {
error = EEXIST;
goto fail;
}
random_source_register(&random_vtrnd);
fail:
if (error)
vtrnd_detach(dev);
return (error);
}
static int
vtrnd_detach(device_t dev)
{
struct vtrnd_softc *sc;
sc = device_get_softc(dev);
KASSERT(
atomic_load_explicit(&g_vtrnd_softc, memory_order_acquire) == sc,
("only one global instance at a time"));
random_source_deregister(&random_vtrnd);
atomic_store_explicit(&g_vtrnd_softc, NULL, memory_order_release);
return (0);
}
-static void
-vtrnd_negotiate_features(device_t dev)
+static int
+vtrnd_negotiate_features(struct vtrnd_softc *sc)
{
- struct vtrnd_softc *sc;
+ device_t dev;
+ uint64_t features;
- sc = device_get_softc(dev);
- sc->vtrnd_features = virtio_negotiate_features(dev, VTRND_FEATURES);
- virtio_finalize_features(dev);
+ dev = sc->vtrnd_dev;
+ features = VTRND_FEATURES;
+
+ sc->vtrnd_features = virtio_negotiate_features(dev, features);
+ return (virtio_finalize_features(dev));
}
static int
-vtrnd_alloc_virtqueue(device_t dev)
+vtrnd_setup_features(struct vtrnd_softc *sc)
{
- struct vtrnd_softc *sc;
+ int error;
+
+ error = vtrnd_negotiate_features(sc);
+ if (error)
+ return (error);
+
+ return (0);
+}
+
+static int
+vtrnd_alloc_virtqueue(struct vtrnd_softc *sc)
+{
+ device_t dev;
struct vq_alloc_info vq_info;
- sc = device_get_softc(dev);
+ dev = sc->vtrnd_dev;
VQ_ALLOC_INFO_INIT(&vq_info, 0, NULL, sc, &sc->vtrnd_vq,
"%s request", device_get_nameunit(dev));
return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
}
static int
vtrnd_harvest(struct vtrnd_softc *sc, void *buf, size_t *sz)
{
struct sglist_seg segs[1];
struct sglist sg;
struct virtqueue *vq;
uint32_t value[HARVESTSIZE] __aligned(sizeof(uint32_t) * HARVESTSIZE);
uint32_t rdlen;
int error;
_Static_assert(sizeof(value) < PAGE_SIZE, "sglist assumption");
sglist_init(&sg, 1, segs);
error = sglist_append(&sg, value, *sz);
if (error != 0)
panic("%s: sglist_append error=%d", __func__, error);
vq = sc->vtrnd_vq;
KASSERT(virtqueue_empty(vq), ("%s: non-empty queue", __func__));
error = virtqueue_enqueue(vq, buf, &sg, 0, 1);
if (error != 0)
return (error);
/*
* Poll for the response, but the command is likely already
* done when we return from the notify.
*/
virtqueue_notify(vq);
virtqueue_poll(vq, &rdlen);
if (rdlen > *sz)
panic("%s: random device wrote %zu bytes beyond end of provided"
" buffer %p:%zu", __func__, (size_t)rdlen - *sz,
(void *)value, *sz);
else if (rdlen == 0)
return (EAGAIN);
*sz = MIN(rdlen, *sz);
memcpy(buf, value, *sz);
explicit_bzero(value, *sz);
return (0);
}
static unsigned
vtrnd_read(void *buf, unsigned usz)
{
struct vtrnd_softc *sc;
size_t sz;
int error;
sc = g_vtrnd_softc;
if (sc == NULL)
return (0);
sz = usz;
error = vtrnd_harvest(sc, buf, &sz);
if (error != 0)
return (0);
return (sz);
}
diff --git a/sys/dev/virtio/scsi/virtio_scsi.c b/sys/dev/virtio/scsi/virtio_scsi.c
index f4c716af3725..737b6d0a7a42 100644
--- a/sys/dev/virtio/scsi/virtio_scsi.c
+++ b/sys/dev/virtio/scsi/virtio_scsi.c
@@ -1,2355 +1,2375 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Driver for VirtIO SCSI devices. */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/sglist.h>
#include <sys/sysctl.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/callout.h>
#include <sys/queue.h>
#include <sys/sbuf.h>
#include <machine/stdarg.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/cam_sim.h>
#include <cam/cam_periph.h>
#include <cam/cam_xpt_sim.h>
#include <cam/cam_debug.h>
#include <cam/scsi/scsi_all.h>
#include <cam/scsi/scsi_message.h>
#include <dev/virtio/virtio.h>
#include <dev/virtio/virtqueue.h>
#include <dev/virtio/scsi/virtio_scsi.h>
#include <dev/virtio/scsi/virtio_scsivar.h>
#include "virtio_if.h"
static int vtscsi_modevent(module_t, int, void *);
static int vtscsi_probe(device_t);
static int vtscsi_attach(device_t);
static int vtscsi_detach(device_t);
static int vtscsi_suspend(device_t);
static int vtscsi_resume(device_t);
-static void vtscsi_negotiate_features(struct vtscsi_softc *);
+static int vtscsi_negotiate_features(struct vtscsi_softc *);
+static int vtscsi_setup_features(struct vtscsi_softc *);
static void vtscsi_read_config(struct vtscsi_softc *,
struct virtio_scsi_config *);
static int vtscsi_maximum_segments(struct vtscsi_softc *, int);
static int vtscsi_alloc_virtqueues(struct vtscsi_softc *);
static void vtscsi_check_sizes(struct vtscsi_softc *);
static void vtscsi_write_device_config(struct vtscsi_softc *);
static int vtscsi_reinit(struct vtscsi_softc *);
static int vtscsi_alloc_cam(struct vtscsi_softc *);
static int vtscsi_register_cam(struct vtscsi_softc *);
static void vtscsi_free_cam(struct vtscsi_softc *);
static void vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *);
static int vtscsi_register_async(struct vtscsi_softc *);
static void vtscsi_deregister_async(struct vtscsi_softc *);
static void vtscsi_cam_action(struct cam_sim *, union ccb *);
static void vtscsi_cam_poll(struct cam_sim *);
static void vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *,
union ccb *);
static void vtscsi_cam_get_tran_settings(struct vtscsi_softc *,
union ccb *);
static void vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *);
static void vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *);
static void vtscsi_cam_abort(struct vtscsi_softc *, union ccb *);
static void vtscsi_cam_path_inquiry(struct vtscsi_softc *,
struct cam_sim *, union ccb *);
static int vtscsi_sg_append_scsi_buf(struct vtscsi_softc *,
struct sglist *, struct ccb_scsiio *);
static int vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *,
struct vtscsi_request *, int *, int *);
static int vtscsi_execute_scsi_cmd(struct vtscsi_softc *,
struct vtscsi_request *);
static int vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *);
static void vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *,
struct vtscsi_request *);
static int vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *,
struct vtscsi_request *);
static void vtscsi_timedout_scsi_cmd(void *);
static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *);
static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *,
struct ccb_scsiio *, struct virtio_scsi_cmd_resp *);
static void vtscsi_complete_scsi_cmd(struct vtscsi_softc *,
struct vtscsi_request *);
static void vtscsi_poll_ctrl_req(struct vtscsi_softc *,
struct vtscsi_request *);
static int vtscsi_execute_ctrl_req(struct vtscsi_softc *,
struct vtscsi_request *, struct sglist *, int, int, int);
static void vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c,
struct vtscsi_request *);
static int vtscsi_execute_abort_task_cmd(struct vtscsi_softc *,
struct vtscsi_request *);
static int vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *,
struct vtscsi_request *);
static void vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
static void vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
static void vtscsi_init_scsi_cmd_req(struct vtscsi_softc *,
struct ccb_scsiio *, struct virtio_scsi_cmd_req *);
static void vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *, struct ccb_hdr *,
uint32_t, uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
static void vtscsi_freeze_simq(struct vtscsi_softc *, int);
static int vtscsi_thaw_simq(struct vtscsi_softc *, int);
static void vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t,
lun_id_t);
static void vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t,
lun_id_t);
static void vtscsi_execute_rescan_bus(struct vtscsi_softc *);
static void vtscsi_handle_event(struct vtscsi_softc *,
struct virtio_scsi_event *);
static int vtscsi_enqueue_event_buf(struct vtscsi_softc *,
struct virtio_scsi_event *);
static int vtscsi_init_event_vq(struct vtscsi_softc *);
static void vtscsi_reinit_event_vq(struct vtscsi_softc *);
static void vtscsi_drain_event_vq(struct vtscsi_softc *);
static void vtscsi_complete_vqs_locked(struct vtscsi_softc *);
static void vtscsi_complete_vqs(struct vtscsi_softc *);
static void vtscsi_drain_vqs(struct vtscsi_softc *);
static void vtscsi_cancel_request(struct vtscsi_softc *,
struct vtscsi_request *);
static void vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *);
static void vtscsi_stop(struct vtscsi_softc *);
static int vtscsi_reset_bus(struct vtscsi_softc *);
static void vtscsi_init_request(struct vtscsi_softc *,
struct vtscsi_request *);
static int vtscsi_alloc_requests(struct vtscsi_softc *);
static void vtscsi_free_requests(struct vtscsi_softc *);
static void vtscsi_enqueue_request(struct vtscsi_softc *,
struct vtscsi_request *);
static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *);
static void vtscsi_complete_request(struct vtscsi_request *);
static void vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *);
static void vtscsi_control_vq_intr(void *);
static void vtscsi_event_vq_intr(void *);
static void vtscsi_request_vq_intr(void *);
static void vtscsi_disable_vqs_intr(struct vtscsi_softc *);
static void vtscsi_enable_vqs_intr(struct vtscsi_softc *);
static void vtscsi_get_tunables(struct vtscsi_softc *);
-static void vtscsi_add_sysctl(struct vtscsi_softc *);
+static void vtscsi_setup_sysctl(struct vtscsi_softc *);
static void vtscsi_printf_req(struct vtscsi_request *, const char *,
const char *, ...);
#define vtscsi_modern(_sc) (((_sc)->vtscsi_features & VIRTIO_F_VERSION_1) != 0)
#define vtscsi_htog16(_sc, _val) virtio_htog16(vtscsi_modern(_sc), _val)
#define vtscsi_htog32(_sc, _val) virtio_htog32(vtscsi_modern(_sc), _val)
#define vtscsi_htog64(_sc, _val) virtio_htog64(vtscsi_modern(_sc), _val)
#define vtscsi_gtoh16(_sc, _val) virtio_gtoh16(vtscsi_modern(_sc), _val)
#define vtscsi_gtoh32(_sc, _val) virtio_gtoh32(vtscsi_modern(_sc), _val)
#define vtscsi_gtoh64(_sc, _val) virtio_gtoh64(vtscsi_modern(_sc), _val)
/* Global tunables. */
/*
* The current QEMU VirtIO SCSI implementation does not cancel in-flight
* IO during virtio_stop(). So in-flight requests still complete after the
* device reset. We would have to wait for all the in-flight IO to complete,
* which defeats the typical purpose of a bus reset. We could simulate the
* bus reset with either I_T_NEXUS_RESET of all the targets, or with
* LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the
* control virtqueue). But this isn't very useful if things really go off
* the rails, so default to disabled for now.
*/
static int vtscsi_bus_reset_disable = 1;
TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable);
static struct virtio_feature_desc vtscsi_feature_desc[] = {
{ VIRTIO_SCSI_F_INOUT, "InOut" },
{ VIRTIO_SCSI_F_HOTPLUG, "Hotplug" },
{ VIRTIO_SCSI_F_CHANGE, "ChangeEvent" },
{ VIRTIO_SCSI_F_T10_PI, "T10PI" },
{ 0, NULL }
};
static device_method_t vtscsi_methods[] = {
/* Device methods. */
DEVMETHOD(device_probe, vtscsi_probe),
DEVMETHOD(device_attach, vtscsi_attach),
DEVMETHOD(device_detach, vtscsi_detach),
DEVMETHOD(device_suspend, vtscsi_suspend),
DEVMETHOD(device_resume, vtscsi_resume),
DEVMETHOD_END
};
static driver_t vtscsi_driver = {
"vtscsi",
vtscsi_methods,
sizeof(struct vtscsi_softc)
};
static devclass_t vtscsi_devclass;
DRIVER_MODULE(virtio_scsi, virtio_mmio, vtscsi_driver, vtscsi_devclass,
vtscsi_modevent, 0);
DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass,
vtscsi_modevent, 0);
MODULE_VERSION(virtio_scsi, 1);
MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
VIRTIO_SIMPLE_PNPTABLE(virtio_scsi, VIRTIO_ID_SCSI, "VirtIO SCSI Adapter");
VIRTIO_SIMPLE_PNPINFO(virtio_mmio, virtio_scsi);
VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_scsi);
static int
vtscsi_modevent(module_t mod, int type, void *unused)
{
int error;
switch (type) {
case MOD_LOAD:
case MOD_QUIESCE:
case MOD_UNLOAD:
case MOD_SHUTDOWN:
error = 0;
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
static int
vtscsi_probe(device_t dev)
{
return (VIRTIO_SIMPLE_PROBE(dev, virtio_scsi));
}
static int
vtscsi_attach(device_t dev)
{
struct vtscsi_softc *sc;
struct virtio_scsi_config scsicfg;
int error;
sc = device_get_softc(dev);
sc->vtscsi_dev = dev;
+ virtio_set_feature_desc(dev, vtscsi_feature_desc);
VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
TAILQ_INIT(&sc->vtscsi_req_free);
vtscsi_get_tunables(sc);
- vtscsi_add_sysctl(sc);
-
- virtio_set_feature_desc(dev, vtscsi_feature_desc);
- vtscsi_negotiate_features(sc);
+ vtscsi_setup_sysctl(sc);
- if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
- sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
- if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
- sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
- if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
- sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
+ error = vtscsi_setup_features(sc);
+ if (error) {
+ device_printf(dev, "cannot setup features\n");
+ goto fail;
+ }
vtscsi_read_config(sc, &scsicfg);
sc->vtscsi_max_channel = scsicfg.max_channel;
sc->vtscsi_max_target = scsicfg.max_target;
sc->vtscsi_max_lun = scsicfg.max_lun;
sc->vtscsi_event_buf_size = scsicfg.event_info_size;
vtscsi_write_device_config(sc);
sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max);
sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT);
if (sc->vtscsi_sglist == NULL) {
error = ENOMEM;
device_printf(dev, "cannot allocate sglist\n");
goto fail;
}
error = vtscsi_alloc_virtqueues(sc);
if (error) {
device_printf(dev, "cannot allocate virtqueues\n");
goto fail;
}
vtscsi_check_sizes(sc);
error = vtscsi_init_event_vq(sc);
if (error) {
device_printf(dev, "cannot populate the eventvq\n");
goto fail;
}
error = vtscsi_alloc_requests(sc);
if (error) {
device_printf(dev, "cannot allocate requests\n");
goto fail;
}
error = vtscsi_alloc_cam(sc);
if (error) {
device_printf(dev, "cannot allocate CAM structures\n");
goto fail;
}
error = virtio_setup_intr(dev, INTR_TYPE_CAM);
if (error) {
device_printf(dev, "cannot setup virtqueue interrupts\n");
goto fail;
}
vtscsi_enable_vqs_intr(sc);
/*
* Register with CAM after interrupts are enabled so we will get
* notified of the probe responses.
*/
error = vtscsi_register_cam(sc);
if (error) {
device_printf(dev, "cannot register with CAM\n");
goto fail;
}
fail:
if (error)
vtscsi_detach(dev);
return (error);
}
static int
vtscsi_detach(device_t dev)
{
struct vtscsi_softc *sc;
sc = device_get_softc(dev);
VTSCSI_LOCK(sc);
sc->vtscsi_flags |= VTSCSI_FLAG_DETACH;
if (device_is_attached(dev))
vtscsi_stop(sc);
VTSCSI_UNLOCK(sc);
vtscsi_complete_vqs(sc);
vtscsi_drain_vqs(sc);
vtscsi_free_cam(sc);
vtscsi_free_requests(sc);
if (sc->vtscsi_sglist != NULL) {
sglist_free(sc->vtscsi_sglist);
sc->vtscsi_sglist = NULL;
}
VTSCSI_LOCK_DESTROY(sc);
return (0);
}
static int
vtscsi_suspend(device_t dev)
{
return (0);
}
static int
vtscsi_resume(device_t dev)
{
return (0);
}
-static void
+static int
vtscsi_negotiate_features(struct vtscsi_softc *sc)
{
device_t dev;
uint64_t features;
dev = sc->vtscsi_dev;
features = VTSCSI_FEATURES;
sc->vtscsi_features = virtio_negotiate_features(dev, features);
- virtio_finalize_features(dev);
+ return (virtio_finalize_features(dev));
+}
+
+static int
+vtscsi_setup_features(struct vtscsi_softc *sc)
+{
+ device_t dev;
+ int error;
+
+ dev = sc->vtscsi_dev;
+
+ error = vtscsi_negotiate_features(sc);
+ if (error)
+ return (error);
+
+ if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
+ sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
+ if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
+ sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
+ if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
+ sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
+
+ return (0);
}
#define VTSCSI_GET_CONFIG(_dev, _field, _cfg) \
virtio_read_device_config(_dev, \
offsetof(struct virtio_scsi_config, _field), \
&(_cfg)->_field, sizeof((_cfg)->_field)) \
static void
vtscsi_read_config(struct vtscsi_softc *sc,
struct virtio_scsi_config *scsicfg)
{
device_t dev;
dev = sc->vtscsi_dev;
bzero(scsicfg, sizeof(struct virtio_scsi_config));
VTSCSI_GET_CONFIG(dev, num_queues, scsicfg);
VTSCSI_GET_CONFIG(dev, seg_max, scsicfg);
VTSCSI_GET_CONFIG(dev, max_sectors, scsicfg);
VTSCSI_GET_CONFIG(dev, cmd_per_lun, scsicfg);
VTSCSI_GET_CONFIG(dev, event_info_size, scsicfg);
VTSCSI_GET_CONFIG(dev, sense_size, scsicfg);
VTSCSI_GET_CONFIG(dev, cdb_size, scsicfg);
VTSCSI_GET_CONFIG(dev, max_channel, scsicfg);
VTSCSI_GET_CONFIG(dev, max_target, scsicfg);
VTSCSI_GET_CONFIG(dev, max_lun, scsicfg);
}
#undef VTSCSI_GET_CONFIG
static int
vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
{
int nsegs;
nsegs = VTSCSI_MIN_SEGMENTS;
if (seg_max > 0) {
nsegs += MIN(seg_max, maxphys / PAGE_SIZE + 1);
if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
} else
nsegs += 1;
return (nsegs);
}
static int
vtscsi_alloc_virtqueues(struct vtscsi_softc *sc)
{
device_t dev;
struct vq_alloc_info vq_info[3];
int nvqs;
dev = sc->vtscsi_dev;
nvqs = 3;
VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc,
&sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev));
VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc,
&sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev));
VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs,
vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq,
"%s request", device_get_nameunit(dev));
return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
}
static void
vtscsi_check_sizes(struct vtscsi_softc *sc)
{
int rqsize;
if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) {
/*
* Ensure the assertions in virtqueue_enqueue(),
* even if the hypervisor reports a bad seg_max.
*/
rqsize = virtqueue_size(sc->vtscsi_request_vq);
if (sc->vtscsi_max_nsegs > rqsize) {
device_printf(sc->vtscsi_dev,
"clamping seg_max (%d %d)\n", sc->vtscsi_max_nsegs,
rqsize);
sc->vtscsi_max_nsegs = rqsize;
}
}
}
static void
vtscsi_write_device_config(struct vtscsi_softc *sc)
{
virtio_write_dev_config_4(sc->vtscsi_dev,
offsetof(struct virtio_scsi_config, sense_size),
VIRTIO_SCSI_SENSE_SIZE);
/*
* This is the size in the virtio_scsi_cmd_req structure. Note
* this value (32) is larger than the maximum CAM CDB size (16).
*/
virtio_write_dev_config_4(sc->vtscsi_dev,
offsetof(struct virtio_scsi_config, cdb_size),
VIRTIO_SCSI_CDB_SIZE);
}
static int
vtscsi_reinit(struct vtscsi_softc *sc)
{
device_t dev;
int error;
dev = sc->vtscsi_dev;
error = virtio_reinit(dev, sc->vtscsi_features);
if (error == 0) {
vtscsi_write_device_config(sc);
virtio_reinit_complete(dev);
vtscsi_reinit_event_vq(sc);
vtscsi_enable_vqs_intr(sc);
}
vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error);
return (error);
}
static int
vtscsi_alloc_cam(struct vtscsi_softc *sc)
{
device_t dev;
struct cam_devq *devq;
int openings;
dev = sc->vtscsi_dev;
openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS;
devq = cam_simq_alloc(openings);
if (devq == NULL) {
device_printf(dev, "cannot allocate SIM queue\n");
return (ENOMEM);
}
sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll,
"vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1,
openings, devq);
if (sc->vtscsi_sim == NULL) {
cam_simq_free(devq);
device_printf(dev, "cannot allocate SIM\n");
return (ENOMEM);
}
return (0);
}
static int
vtscsi_register_cam(struct vtscsi_softc *sc)
{
device_t dev;
int registered, error;
dev = sc->vtscsi_dev;
registered = 0;
VTSCSI_LOCK(sc);
if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) {
error = ENOMEM;
device_printf(dev, "cannot register XPT bus\n");
goto fail;
}
registered = 1;
if (xpt_create_path(&sc->vtscsi_path, NULL,
cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD,
CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
error = ENOMEM;
device_printf(dev, "cannot create bus path\n");
goto fail;
}
if (vtscsi_register_async(sc) != CAM_REQ_CMP) {
error = EIO;
device_printf(dev, "cannot register async callback\n");
goto fail;
}
VTSCSI_UNLOCK(sc);
return (0);
fail:
if (sc->vtscsi_path != NULL) {
xpt_free_path(sc->vtscsi_path);
sc->vtscsi_path = NULL;
}
if (registered != 0)
xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
VTSCSI_UNLOCK(sc);
return (error);
}
static void
vtscsi_free_cam(struct vtscsi_softc *sc)
{
VTSCSI_LOCK(sc);
if (sc->vtscsi_path != NULL) {
vtscsi_deregister_async(sc);
xpt_free_path(sc->vtscsi_path);
sc->vtscsi_path = NULL;
xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
}
if (sc->vtscsi_sim != NULL) {
cam_sim_free(sc->vtscsi_sim, 1);
sc->vtscsi_sim = NULL;
}
VTSCSI_UNLOCK(sc);
}
static void
vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg)
{
struct cam_sim *sim;
struct vtscsi_softc *sc;
sim = cb_arg;
sc = cam_sim_softc(sim);
vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code);
/*
* TODO Once QEMU supports event reporting, we should
* (un)subscribe to events here.
*/
switch (code) {
case AC_FOUND_DEVICE:
break;
case AC_LOST_DEVICE:
break;
}
}
static int
vtscsi_register_async(struct vtscsi_softc *sc)
{
struct ccb_setasync csa;
xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
csa.ccb_h.func_code = XPT_SASYNC_CB;
csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE;
csa.callback = vtscsi_cam_async;
csa.callback_arg = sc->vtscsi_sim;
xpt_action((union ccb *) &csa);
return (csa.ccb_h.status);
}
static void
vtscsi_deregister_async(struct vtscsi_softc *sc)
{
struct ccb_setasync csa;
xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
csa.ccb_h.func_code = XPT_SASYNC_CB;
csa.event_enable = 0;
csa.callback = vtscsi_cam_async;
csa.callback_arg = sc->vtscsi_sim;
xpt_action((union ccb *) &csa);
}
static void
vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
{
struct vtscsi_softc *sc;
struct ccb_hdr *ccbh;
sc = cam_sim_softc(sim);
ccbh = &ccb->ccb_h;
VTSCSI_LOCK_OWNED(sc);
if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) {
/*
* The VTSCSI_MTX is briefly dropped between setting
* VTSCSI_FLAG_DETACH and deregistering with CAM, so
* drop any CCBs that come in during that window.
*/
ccbh->status = CAM_NO_HBA;
xpt_done(ccb);
return;
}
switch (ccbh->func_code) {
case XPT_SCSI_IO:
vtscsi_cam_scsi_io(sc, sim, ccb);
break;
case XPT_SET_TRAN_SETTINGS:
ccbh->status = CAM_FUNC_NOTAVAIL;
xpt_done(ccb);
break;
case XPT_GET_TRAN_SETTINGS:
vtscsi_cam_get_tran_settings(sc, ccb);
break;
case XPT_RESET_BUS:
vtscsi_cam_reset_bus(sc, ccb);
break;
case XPT_RESET_DEV:
vtscsi_cam_reset_dev(sc, ccb);
break;
case XPT_ABORT:
vtscsi_cam_abort(sc, ccb);
break;
case XPT_CALC_GEOMETRY:
cam_calc_geometry(&ccb->ccg, 1);
xpt_done(ccb);
break;
case XPT_PATH_INQ:
vtscsi_cam_path_inquiry(sc, sim, ccb);
break;
default:
vtscsi_dprintf(sc, VTSCSI_ERROR,
"invalid ccb=%p func=%#x\n", ccb, ccbh->func_code);
ccbh->status = CAM_REQ_INVALID;
xpt_done(ccb);
break;
}
}
static void
vtscsi_cam_poll(struct cam_sim *sim)
{
struct vtscsi_softc *sc;
sc = cam_sim_softc(sim);
vtscsi_complete_vqs_locked(sc);
}
static void
vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim,
union ccb *ccb)
{
struct ccb_hdr *ccbh;
struct ccb_scsiio *csio;
int error;
ccbh = &ccb->ccb_h;
csio = &ccb->csio;
if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) {
error = EINVAL;
ccbh->status = CAM_REQ_INVALID;
goto done;
}
if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH &&
(sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) {
error = EINVAL;
ccbh->status = CAM_REQ_INVALID;
goto done;
}
error = vtscsi_start_scsi_cmd(sc, ccb);
done:
if (error) {
vtscsi_dprintf(sc, VTSCSI_ERROR,
"error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status);
xpt_done(ccb);
}
}
static void
vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb)
{
struct ccb_trans_settings *cts;
struct ccb_trans_settings_scsi *scsi;
cts = &ccb->cts;
scsi = &cts->proto_specific.scsi;
cts->protocol = PROTO_SCSI;
cts->protocol_version = SCSI_REV_SPC3;
cts->transport = XPORT_SAS;
cts->transport_version = 0;
scsi->valid = CTS_SCSI_VALID_TQ;
scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
}
static void
vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb)
{
int error;
error = vtscsi_reset_bus(sc);
if (error == 0)
ccb->ccb_h.status = CAM_REQ_CMP;
else
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n",
error, ccb, ccb->ccb_h.status);
xpt_done(ccb);
}
static void
vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb)
{
struct ccb_hdr *ccbh;
struct vtscsi_request *req;
int error;
ccbh = &ccb->ccb_h;
req = vtscsi_dequeue_request(sc);
if (req == NULL) {
error = EAGAIN;
vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
goto fail;
}
req->vsr_ccb = ccb;
error = vtscsi_execute_reset_dev_cmd(sc, req);
if (error == 0)
return;
vtscsi_enqueue_request(sc, req);
fail:
vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
error, req, ccb);
if (error == EAGAIN)
ccbh->status = CAM_RESRC_UNAVAIL;
else
ccbh->status = CAM_REQ_CMP_ERR;
xpt_done(ccb);
}
static void
vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb)
{
struct vtscsi_request *req;
struct ccb_hdr *ccbh;
int error;
ccbh = &ccb->ccb_h;
req = vtscsi_dequeue_request(sc);
if (req == NULL) {
error = EAGAIN;
vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
goto fail;
}
req->vsr_ccb = ccb;
error = vtscsi_execute_abort_task_cmd(sc, req);
if (error == 0)
return;
vtscsi_enqueue_request(sc, req);
fail:
vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
error, req, ccb);
if (error == EAGAIN)
ccbh->status = CAM_RESRC_UNAVAIL;
else
ccbh->status = CAM_REQ_CMP_ERR;
xpt_done(ccb);
}
static void
vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim,
union ccb *ccb)
{
device_t dev;
struct ccb_pathinq *cpi;
dev = sc->vtscsi_dev;
cpi = &ccb->cpi;
vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb);
cpi->version_num = 1;
cpi->hba_inquiry = PI_TAG_ABLE;
cpi->target_sprt = 0;
cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
if (vtscsi_bus_reset_disable != 0)
cpi->hba_misc |= PIM_NOBUSRESET;
cpi->hba_eng_cnt = 0;
cpi->max_target = sc->vtscsi_max_target;
cpi->max_lun = sc->vtscsi_max_lun;
cpi->initiator_id = cpi->max_target + 1;
strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
cpi->unit_number = cam_sim_unit(sim);
cpi->bus_id = cam_sim_bus(sim);
cpi->base_transfer_speed = 300000;
cpi->protocol = PROTO_SCSI;
cpi->protocol_version = SCSI_REV_SPC3;
cpi->transport = XPORT_SAS;
cpi->transport_version = 0;
cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) *
PAGE_SIZE;
cpi->hba_vendor = virtio_get_vendor(dev);
cpi->hba_device = virtio_get_device(dev);
cpi->hba_subvendor = virtio_get_subvendor(dev);
cpi->hba_subdevice = virtio_get_subdevice(dev);
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
}
static int
vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
struct ccb_scsiio *csio)
{
struct ccb_hdr *ccbh;
struct bus_dma_segment *dseg;
int i, error;
ccbh = &csio->ccb_h;
error = 0;
switch ((ccbh->flags & CAM_DATA_MASK)) {
case CAM_DATA_VADDR:
error = sglist_append(sg, csio->data_ptr, csio->dxfer_len);
break;
case CAM_DATA_PADDR:
error = sglist_append_phys(sg,
(vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len);
break;
case CAM_DATA_SG:
for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
error = sglist_append(sg,
(void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len);
}
break;
case CAM_DATA_SG_PADDR:
for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
error = sglist_append_phys(sg,
(vm_paddr_t) dseg->ds_addr, dseg->ds_len);
}
break;
case CAM_DATA_BIO:
error = sglist_append_bio(sg, (struct bio *) csio->data_ptr);
break;
default:
error = EINVAL;
break;
}
return (error);
}
static int
vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req,
int *readable, int *writable)
{
struct sglist *sg;
struct ccb_hdr *ccbh;
struct ccb_scsiio *csio;
struct virtio_scsi_cmd_req *cmd_req;
struct virtio_scsi_cmd_resp *cmd_resp;
int error;
sg = sc->vtscsi_sglist;
csio = &req->vsr_ccb->csio;
ccbh = &csio->ccb_h;
cmd_req = &req->vsr_cmd_req;
cmd_resp = &req->vsr_cmd_resp;
sglist_reset(sg);
sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req));
if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
/* At least one segment must be left for the response. */
if (error || sg->sg_nseg == sg->sg_maxseg)
goto fail;
}
*readable = sg->sg_nseg;
sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp));
if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) {
error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
if (error)
goto fail;
}
*writable = sg->sg_nseg - *readable;
vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d "
"writable=%d\n", req, ccbh, *readable, *writable);
return (0);
fail:
/*
* This should never happen unless maxio was incorrectly set.
*/
vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0);
vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p "
"nseg=%d maxseg=%d\n",
error, req, ccbh, sg->sg_nseg, sg->sg_maxseg);
return (EFBIG);
}
static int
vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
{
struct sglist *sg;
struct virtqueue *vq;
struct ccb_scsiio *csio;
struct ccb_hdr *ccbh;
struct virtio_scsi_cmd_req *cmd_req;
struct virtio_scsi_cmd_resp *cmd_resp;
int readable, writable, error;
sg = sc->vtscsi_sglist;
vq = sc->vtscsi_request_vq;
csio = &req->vsr_ccb->csio;
ccbh = &csio->ccb_h;
cmd_req = &req->vsr_cmd_req;
cmd_resp = &req->vsr_cmd_resp;
vtscsi_init_scsi_cmd_req(sc, csio, cmd_req);
error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
if (error)
return (error);
req->vsr_complete = vtscsi_complete_scsi_cmd;
cmd_resp->response = -1;
error = virtqueue_enqueue(vq, req, sg, readable, writable);
if (error) {
vtscsi_dprintf(sc, VTSCSI_ERROR,
"enqueue error=%d req=%p ccb=%p\n", error, req, ccbh);
ccbh->status = CAM_REQUEUE_REQ;
vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ);
return (error);
}
ccbh->status |= CAM_SIM_QUEUED;
ccbh->ccbh_vtscsi_req = req;
virtqueue_notify(vq);
if (ccbh->timeout != CAM_TIME_INFINITY) {
req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET;
callout_reset_sbt(&req->vsr_callout, SBT_1MS * ccbh->timeout,
0, vtscsi_timedout_scsi_cmd, req, 0);
}
vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n",
req, ccbh);
return (0);
}
static int
vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb)
{
struct vtscsi_request *req;
int error;
req = vtscsi_dequeue_request(sc);
if (req == NULL) {
ccb->ccb_h.status = CAM_REQUEUE_REQ;
vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
return (ENOBUFS);
}
req->vsr_ccb = ccb;
error = vtscsi_execute_scsi_cmd(sc, req);
if (error)
vtscsi_enqueue_request(sc, req);
return (error);
}
static void
vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
struct vtscsi_request *req)
{
struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
struct vtscsi_request *to_req;
uint8_t response;
tmf_resp = &req->vsr_tmf_resp;
response = tmf_resp->response;
to_req = req->vsr_timedout_req;
vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n",
req, to_req, response);
vtscsi_enqueue_request(sc, req);
/*
* The timedout request could have completed between when the
* abort task was sent and when the host processed it.
*/
if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT)
return;
/* The timedout request was successfully aborted. */
if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE)
return;
/* Don't bother if the device is going away. */
if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
return;
/* The timedout request will be aborted by the reset. */
if (sc->vtscsi_flags & VTSCSI_FLAG_RESET)
return;
vtscsi_reset_bus(sc);
}
static int
vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
struct vtscsi_request *to_req)
{
struct sglist *sg;
struct ccb_hdr *to_ccbh;
struct vtscsi_request *req;
struct virtio_scsi_ctrl_tmf_req *tmf_req;
struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
int error;
sg = sc->vtscsi_sglist;
to_ccbh = &to_req->vsr_ccb->ccb_h;
req = vtscsi_dequeue_request(sc);
if (req == NULL) {
error = ENOBUFS;
goto fail;
}
tmf_req = &req->vsr_tmf_req;
tmf_resp = &req->vsr_tmf_resp;
vtscsi_init_ctrl_tmf_req(sc, to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
(uintptr_t) to_ccbh, tmf_req);
sglist_reset(sg);
sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
req->vsr_timedout_req = to_req;
req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd;
tmf_resp->response = -1;
error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
VTSCSI_EXECUTE_ASYNC);
if (error == 0)
return (0);
vtscsi_enqueue_request(sc, req);
fail:
vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p "
"timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh);
return (error);
}
static void
vtscsi_timedout_scsi_cmd(void *xreq)
{
struct vtscsi_softc *sc;
struct vtscsi_request *to_req;
to_req = xreq;
sc = to_req->vsr_softc;
vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n",
to_req, to_req->vsr_ccb, to_req->vsr_state);
/* Don't bother if the device is going away. */
if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
return;
/*
* Bail if the request is not in use. We likely raced when
* stopping the callout handler or it has already been aborted.
*/
if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE ||
(to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0)
return;
/*
* Complete the request queue in case the timedout request is
* actually just pending.
*/
vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE)
return;
sc->vtscsi_stats.scsi_cmd_timeouts++;
to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT;
if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0)
return;
vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n");
vtscsi_reset_bus(sc);
}
static cam_status
vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp)
{
cam_status status;
switch (cmd_resp->response) {
case VIRTIO_SCSI_S_OK:
status = CAM_REQ_CMP;
break;
case VIRTIO_SCSI_S_OVERRUN:
status = CAM_DATA_RUN_ERR;
break;
case VIRTIO_SCSI_S_ABORTED:
status = CAM_REQ_ABORTED;
break;
case VIRTIO_SCSI_S_BAD_TARGET:
status = CAM_SEL_TIMEOUT;
break;
case VIRTIO_SCSI_S_RESET:
status = CAM_SCSI_BUS_RESET;
break;
case VIRTIO_SCSI_S_BUSY:
status = CAM_SCSI_BUSY;
break;
case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
case VIRTIO_SCSI_S_TARGET_FAILURE:
case VIRTIO_SCSI_S_NEXUS_FAILURE:
status = CAM_SCSI_IT_NEXUS_LOST;
break;
default: /* VIRTIO_SCSI_S_FAILURE */
status = CAM_REQ_CMP_ERR;
break;
}
return (status);
}
static cam_status
vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
{
uint32_t resp_sense_length;
cam_status status;
csio->scsi_status = cmd_resp->status;
csio->resid = vtscsi_htog32(sc, cmd_resp->resid);
if (csio->scsi_status == SCSI_STATUS_OK)
status = CAM_REQ_CMP;
else
status = CAM_SCSI_STATUS_ERROR;
resp_sense_length = vtscsi_htog32(sc, cmd_resp->sense_len);
if (resp_sense_length > 0) {
status |= CAM_AUTOSNS_VALID;
if (resp_sense_length < csio->sense_len)
csio->sense_resid = csio->sense_len - resp_sense_length;
else
csio->sense_resid = 0;
memcpy(&csio->sense_data, cmd_resp->sense,
csio->sense_len - csio->sense_resid);
}
vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR,
"ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n",
csio, csio->scsi_status, csio->resid, csio->sense_resid);
return (status);
}
static void
vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
{
struct ccb_hdr *ccbh;
struct ccb_scsiio *csio;
struct virtio_scsi_cmd_resp *cmd_resp;
cam_status status;
csio = &req->vsr_ccb->csio;
ccbh = &csio->ccb_h;
cmd_resp = &req->vsr_cmd_resp;
KASSERT(ccbh->ccbh_vtscsi_req == req,
("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req));
if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
callout_stop(&req->vsr_callout);
status = vtscsi_scsi_cmd_cam_status(cmd_resp);
if (status == CAM_REQ_ABORTED) {
if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT)
status = CAM_CMD_TIMEOUT;
} else if (status == CAM_REQ_CMP)
status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp);
if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
status |= CAM_DEV_QFRZN;
xpt_freeze_devq(ccbh->path, 1);
}
if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
status |= CAM_RELEASE_SIMQ;
vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n",
req, ccbh, status);
ccbh->status = status;
xpt_done(req->vsr_ccb);
vtscsi_enqueue_request(sc, req);
}
static void
vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req)
{
/* XXX We probably shouldn't poll forever. */
req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED;
do
vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0);
req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED;
}
static int
vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req,
struct sglist *sg, int readable, int writable, int flag)
{
struct virtqueue *vq;
int error;
vq = sc->vtscsi_control_vq;
MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL);
error = virtqueue_enqueue(vq, req, sg, readable, writable);
if (error) {
/*
* Return EAGAIN when the virtqueue does not have enough
* descriptors available.
*/
if (error == ENOSPC || error == EMSGSIZE)
error = EAGAIN;
return (error);
}
virtqueue_notify(vq);
if (flag == VTSCSI_EXECUTE_POLL)
vtscsi_poll_ctrl_req(sc, req);
return (0);
}
static void
vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc,
struct vtscsi_request *req)
{
union ccb *ccb;
struct ccb_hdr *ccbh;
struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
ccb = req->vsr_ccb;
ccbh = &ccb->ccb_h;
tmf_resp = &req->vsr_tmf_resp;
switch (tmf_resp->response) {
case VIRTIO_SCSI_S_FUNCTION_COMPLETE:
ccbh->status = CAM_REQ_CMP;
break;
case VIRTIO_SCSI_S_FUNCTION_REJECTED:
ccbh->status = CAM_UA_ABORT;
break;
default:
ccbh->status = CAM_REQ_CMP_ERR;
break;
}
xpt_done(ccb);
vtscsi_enqueue_request(sc, req);
}
static int
vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc,
struct vtscsi_request *req)
{
struct sglist *sg;
struct ccb_abort *cab;
struct ccb_hdr *ccbh;
struct ccb_hdr *abort_ccbh;
struct vtscsi_request *abort_req;
struct virtio_scsi_ctrl_tmf_req *tmf_req;
struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
int error;
sg = sc->vtscsi_sglist;
cab = &req->vsr_ccb->cab;
ccbh = &cab->ccb_h;
tmf_req = &req->vsr_tmf_req;
tmf_resp = &req->vsr_tmf_resp;
/* CCB header and request that's to be aborted. */
abort_ccbh = &cab->abort_ccb->ccb_h;
abort_req = abort_ccbh->ccbh_vtscsi_req;
if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) {
error = EINVAL;
goto fail;
}
/* Only attempt to abort requests that could be in-flight. */
if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) {
error = EALREADY;
goto fail;
}
abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED;
if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
callout_stop(&abort_req->vsr_callout);
vtscsi_init_ctrl_tmf_req(sc, ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
(uintptr_t) abort_ccbh, tmf_req);
sglist_reset(sg);
sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
req->vsr_complete = vtscsi_complete_abort_task_cmd;
tmf_resp->response = -1;
error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
VTSCSI_EXECUTE_ASYNC);
fail:
vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p "
"abort_req=%p\n", error, req, abort_ccbh, abort_req);
return (error);
}
static void
vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc,
struct vtscsi_request *req)
{
union ccb *ccb;
struct ccb_hdr *ccbh;
struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
ccb = req->vsr_ccb;
ccbh = &ccb->ccb_h;
tmf_resp = &req->vsr_tmf_resp;
vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n",
req, ccb, tmf_resp->response);
if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
ccbh->status = CAM_REQ_CMP;
vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id,
ccbh->target_lun);
} else
ccbh->status = CAM_REQ_CMP_ERR;
xpt_done(ccb);
vtscsi_enqueue_request(sc, req);
}
static int
vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc,
struct vtscsi_request *req)
{
struct sglist *sg;
struct ccb_resetdev *crd;
struct ccb_hdr *ccbh;
struct virtio_scsi_ctrl_tmf_req *tmf_req;
struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
uint32_t subtype;
int error;
sg = sc->vtscsi_sglist;
crd = &req->vsr_ccb->crd;
ccbh = &crd->ccb_h;
tmf_req = &req->vsr_tmf_req;
tmf_resp = &req->vsr_tmf_resp;
if (ccbh->target_lun == CAM_LUN_WILDCARD)
subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET;
else
subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
vtscsi_init_ctrl_tmf_req(sc, ccbh, subtype, 0, tmf_req);
sglist_reset(sg);
sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
req->vsr_complete = vtscsi_complete_reset_dev_cmd;
tmf_resp->response = -1;
error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
VTSCSI_EXECUTE_ASYNC);
vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n",
error, req, ccbh);
return (error);
}
static void
vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id)
{
*target_id = lun[1];
*lun_id = (lun[2] << 8) | lun[3];
}
static void
vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[])
{
lun[0] = 1;
lun[1] = ccbh->target_id;
lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F);
lun[3] = ccbh->target_lun & 0xFF;
}
static void
vtscsi_init_scsi_cmd_req(struct vtscsi_softc *sc, struct ccb_scsiio *csio,
struct virtio_scsi_cmd_req *cmd_req)
{
uint8_t attr;
switch (csio->tag_action) {
case MSG_HEAD_OF_Q_TAG:
attr = VIRTIO_SCSI_S_HEAD;
break;
case MSG_ORDERED_Q_TAG:
attr = VIRTIO_SCSI_S_ORDERED;
break;
case MSG_ACA_TASK:
attr = VIRTIO_SCSI_S_ACA;
break;
default: /* MSG_SIMPLE_Q_TAG */
attr = VIRTIO_SCSI_S_SIMPLE;
break;
}
vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
cmd_req->tag = vtscsi_gtoh64(sc, (uintptr_t) csio);
cmd_req->task_attr = attr;
memcpy(cmd_req->cdb,
csio->ccb_h.flags & CAM_CDB_POINTER ?
csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes,
csio->cdb_len);
}
static void
vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *sc, struct ccb_hdr *ccbh,
uint32_t subtype, uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
{
vtscsi_set_request_lun(ccbh, tmf_req->lun);
tmf_req->type = vtscsi_gtoh32(sc, VIRTIO_SCSI_T_TMF);
tmf_req->subtype = vtscsi_gtoh32(sc, subtype);
tmf_req->tag = vtscsi_gtoh64(sc, tag);
}
static void
vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason)
{
int frozen;
frozen = sc->vtscsi_frozen;
if (reason & VTSCSI_REQUEST &&
(sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0)
sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS;
if (reason & VTSCSI_REQUEST_VQ &&
(sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0)
sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL;
/* Freeze the SIMQ if transitioned to frozen. */
if (frozen == 0 && sc->vtscsi_frozen != 0) {
vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n");
xpt_freeze_simq(sc->vtscsi_sim, 1);
}
}
static int
vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason)
{
int thawed;
if (sc->vtscsi_frozen == 0 || reason == 0)
return (0);
if (reason & VTSCSI_REQUEST &&
sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS)
sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS;
if (reason & VTSCSI_REQUEST_VQ &&
sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL)
sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL;
thawed = sc->vtscsi_frozen == 0;
if (thawed != 0)
vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n");
return (thawed);
}
static void
vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code,
target_id_t target_id, lun_id_t lun_id)
{
struct cam_path *path;
/* Use the wildcard path from our softc for bus announcements. */
if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) {
xpt_async(ac_code, sc->vtscsi_path, NULL);
return;
}
if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim),
target_id, lun_id) != CAM_REQ_CMP) {
vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n");
return;
}
xpt_async(ac_code, path, NULL);
xpt_free_path(path);
}
static void
vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id,
lun_id_t lun_id)
{
union ccb *ccb;
cam_status status;
ccb = xpt_alloc_ccb_nowait();
if (ccb == NULL) {
vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
return;
}
status = xpt_create_path(&ccb->ccb_h.path, NULL,
cam_sim_path(sc->vtscsi_sim), target_id, lun_id);
if (status != CAM_REQ_CMP) {
xpt_free_ccb(ccb);
return;
}
xpt_rescan(ccb);
}
static void
vtscsi_execute_rescan_bus(struct vtscsi_softc *sc)
{
vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
}
static void
vtscsi_transport_reset_event(struct vtscsi_softc *sc,
struct virtio_scsi_event *event)
{
target_id_t target_id;
lun_id_t lun_id;
vtscsi_get_request_lun(event->lun, &target_id, &lun_id);
switch (event->reason) {
case VIRTIO_SCSI_EVT_RESET_RESCAN:
case VIRTIO_SCSI_EVT_RESET_REMOVED:
vtscsi_execute_rescan(sc, target_id, lun_id);
break;
default:
device_printf(sc->vtscsi_dev,
"unhandled transport event reason: %d\n", event->reason);
break;
}
}
static void
vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event)
{
int error;
if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) {
switch (event->event) {
case VIRTIO_SCSI_T_TRANSPORT_RESET:
vtscsi_transport_reset_event(sc, event);
break;
default:
device_printf(sc->vtscsi_dev,
"unhandled event: %d\n", event->event);
break;
}
} else
vtscsi_execute_rescan_bus(sc);
/*
* This should always be successful since the buffer
* was just dequeued.
*/
error = vtscsi_enqueue_event_buf(sc, event);
KASSERT(error == 0,
("cannot requeue event buffer: %d", error));
}
static int
vtscsi_enqueue_event_buf(struct vtscsi_softc *sc,
struct virtio_scsi_event *event)
{
struct sglist *sg;
struct virtqueue *vq;
int size, error;
sg = sc->vtscsi_sglist;
vq = sc->vtscsi_event_vq;
size = sc->vtscsi_event_buf_size;
bzero(event, size);
sglist_reset(sg);
error = sglist_append(sg, event, size);
if (error)
return (error);
error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg);
if (error)
return (error);
virtqueue_notify(vq);
return (0);
}
static int
vtscsi_init_event_vq(struct vtscsi_softc *sc)
{
struct virtio_scsi_event *event;
int i, size, error;
/*
* The first release of QEMU with VirtIO SCSI support would crash
* when attempting to notify the event virtqueue. This was fixed
* when hotplug support was added.
*/
if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG)
size = sc->vtscsi_event_buf_size;
else
size = 0;
if (size < sizeof(struct virtio_scsi_event))
return (0);
for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
event = &sc->vtscsi_event_bufs[i];
error = vtscsi_enqueue_event_buf(sc, event);
if (error)
break;
}
/*
* Even just one buffer is enough. Missed events are
* denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag.
*/
if (i > 0)
error = 0;
return (error);
}
static void
vtscsi_reinit_event_vq(struct vtscsi_softc *sc)
{
struct virtio_scsi_event *event;
int i, error;
if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 ||
sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event))
return;
for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
event = &sc->vtscsi_event_bufs[i];
error = vtscsi_enqueue_event_buf(sc, event);
if (error)
break;
}
KASSERT(i > 0, ("cannot reinit event vq: %d", error));
}
static void
vtscsi_drain_event_vq(struct vtscsi_softc *sc)
{
struct virtqueue *vq;
int last;
vq = sc->vtscsi_event_vq;
last = 0;
while (virtqueue_drain(vq, &last) != NULL)
;
KASSERT(virtqueue_empty(vq), ("eventvq not empty"));
}
static void
vtscsi_complete_vqs_locked(struct vtscsi_softc *sc)
{
VTSCSI_LOCK_OWNED(sc);
if (sc->vtscsi_request_vq != NULL)
vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
if (sc->vtscsi_control_vq != NULL)
vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
}
static void
vtscsi_complete_vqs(struct vtscsi_softc *sc)
{
VTSCSI_LOCK(sc);
vtscsi_complete_vqs_locked(sc);
VTSCSI_UNLOCK(sc);
}
static void
vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
{
union ccb *ccb;
int detach;
ccb = req->vsr_ccb;
vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb);
/*
* The callout must be drained when detaching since the request is
* about to be freed. The VTSCSI_MTX must not be held for this in
* case the callout is pending because there is a deadlock potential.
* Otherwise, the virtqueue is being drained because of a bus reset
* so we only need to attempt to stop the callouts.
*/
detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0;
if (detach != 0)
VTSCSI_LOCK_NOTOWNED(sc);
else
VTSCSI_LOCK_OWNED(sc);
if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) {
if (detach != 0)
callout_drain(&req->vsr_callout);
else
callout_stop(&req->vsr_callout);
}
if (ccb != NULL) {
if (detach != 0) {
VTSCSI_LOCK(sc);
ccb->ccb_h.status = CAM_NO_HBA;
} else
ccb->ccb_h.status = CAM_REQUEUE_REQ;
xpt_done(ccb);
if (detach != 0)
VTSCSI_UNLOCK(sc);
}
vtscsi_enqueue_request(sc, req);
}
static void
vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
{
struct vtscsi_request *req;
int last;
last = 0;
vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq);
while ((req = virtqueue_drain(vq, &last)) != NULL)
vtscsi_cancel_request(sc, req);
KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
}
static void
vtscsi_drain_vqs(struct vtscsi_softc *sc)
{
if (sc->vtscsi_control_vq != NULL)
vtscsi_drain_vq(sc, sc->vtscsi_control_vq);
if (sc->vtscsi_request_vq != NULL)
vtscsi_drain_vq(sc, sc->vtscsi_request_vq);
if (sc->vtscsi_event_vq != NULL)
vtscsi_drain_event_vq(sc);
}
static void
vtscsi_stop(struct vtscsi_softc *sc)
{
vtscsi_disable_vqs_intr(sc);
virtio_stop(sc->vtscsi_dev);
}
static int
vtscsi_reset_bus(struct vtscsi_softc *sc)
{
int error;
VTSCSI_LOCK_OWNED(sc);
if (vtscsi_bus_reset_disable != 0) {
device_printf(sc->vtscsi_dev, "bus reset disabled\n");
return (0);
}
sc->vtscsi_flags |= VTSCSI_FLAG_RESET;
/*
* vtscsi_stop() will cause the in-flight requests to be canceled.
* Those requests are then completed here so CAM will retry them
* after the reset is complete.
*/
vtscsi_stop(sc);
vtscsi_complete_vqs_locked(sc);
/* Rid the virtqueues of any remaining requests. */
vtscsi_drain_vqs(sc);
/*
* Any resource shortage that froze the SIMQ cannot persist across
* a bus reset so ensure it gets thawed here.
*/
if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
xpt_release_simq(sc->vtscsi_sim, 0);
error = vtscsi_reinit(sc);
if (error) {
device_printf(sc->vtscsi_dev,
"reinitialization failed, stopping device...\n");
vtscsi_stop(sc);
} else
vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
CAM_LUN_WILDCARD);
sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET;
return (error);
}
static void
vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
{
#ifdef INVARIANTS
int req_nsegs, resp_nsegs;
req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq));
resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp));
KASSERT(req_nsegs == 1, ("request crossed page boundary"));
KASSERT(resp_nsegs == 1, ("response crossed page boundary"));
#endif
req->vsr_softc = sc;
callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0);
}
static int
vtscsi_alloc_requests(struct vtscsi_softc *sc)
{
struct vtscsi_request *req;
int i, nreqs;
/*
* Commands destined for either the request or control queues come
* from the same SIM queue. Use the size of the request virtqueue
* as it (should) be much more frequently used. Some additional
* requests are allocated for internal (TMF) use.
*/
nreqs = virtqueue_size(sc->vtscsi_request_vq);
if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0)
nreqs /= VTSCSI_MIN_SEGMENTS;
nreqs += VTSCSI_RESERVED_REQUESTS;
for (i = 0; i < nreqs; i++) {
req = malloc(sizeof(struct vtscsi_request), M_DEVBUF,
M_NOWAIT);
if (req == NULL)
return (ENOMEM);
vtscsi_init_request(sc, req);
sc->vtscsi_nrequests++;
vtscsi_enqueue_request(sc, req);
}
return (0);
}
static void
vtscsi_free_requests(struct vtscsi_softc *sc)
{
struct vtscsi_request *req;
while ((req = vtscsi_dequeue_request(sc)) != NULL) {
KASSERT(callout_active(&req->vsr_callout) == 0,
("request callout still active"));
sc->vtscsi_nrequests--;
free(req, M_DEVBUF);
}
KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d",
sc->vtscsi_nrequests));
}
static void
vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
{
KASSERT(req->vsr_softc == sc,
("non-matching request vsr_softc %p/%p", req->vsr_softc, sc));
vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
/* A request is available so the SIMQ could be released. */
if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0)
xpt_release_simq(sc->vtscsi_sim, 1);
req->vsr_ccb = NULL;
req->vsr_complete = NULL;
req->vsr_ptr0 = NULL;
req->vsr_state = VTSCSI_REQ_STATE_FREE;
req->vsr_flags = 0;
bzero(&req->vsr_ureq, sizeof(req->vsr_ureq));
bzero(&req->vsr_uresp, sizeof(req->vsr_uresp));
/*
* We insert at the tail of the queue in order to make it
* very unlikely a request will be reused if we race with
* stopping its callout handler.
*/
TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link);
}
static struct vtscsi_request *
vtscsi_dequeue_request(struct vtscsi_softc *sc)
{
struct vtscsi_request *req;
req = TAILQ_FIRST(&sc->vtscsi_req_free);
if (req != NULL) {
req->vsr_state = VTSCSI_REQ_STATE_INUSE;
TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link);
} else
sc->vtscsi_stats.dequeue_no_requests++;
vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
return (req);
}
static void
vtscsi_complete_request(struct vtscsi_request *req)
{
if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED)
req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE;
if (req->vsr_complete != NULL)
req->vsr_complete(req->vsr_softc, req);
}
static void
vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
{
struct vtscsi_request *req;
VTSCSI_LOCK_OWNED(sc);
while ((req = virtqueue_dequeue(vq, NULL)) != NULL)
vtscsi_complete_request(req);
}
static void
vtscsi_control_vq_intr(void *xsc)
{
struct vtscsi_softc *sc;
struct virtqueue *vq;
sc = xsc;
vq = sc->vtscsi_control_vq;
again:
VTSCSI_LOCK(sc);
vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
if (virtqueue_enable_intr(vq) != 0) {
virtqueue_disable_intr(vq);
VTSCSI_UNLOCK(sc);
goto again;
}
VTSCSI_UNLOCK(sc);
}
static void
vtscsi_event_vq_intr(void *xsc)
{
struct vtscsi_softc *sc;
struct virtqueue *vq;
struct virtio_scsi_event *event;
sc = xsc;
vq = sc->vtscsi_event_vq;
again:
VTSCSI_LOCK(sc);
while ((event = virtqueue_dequeue(vq, NULL)) != NULL)
vtscsi_handle_event(sc, event);
if (virtqueue_enable_intr(vq) != 0) {
virtqueue_disable_intr(vq);
VTSCSI_UNLOCK(sc);
goto again;
}
VTSCSI_UNLOCK(sc);
}
static void
vtscsi_request_vq_intr(void *xsc)
{
struct vtscsi_softc *sc;
struct virtqueue *vq;
sc = xsc;
vq = sc->vtscsi_request_vq;
again:
VTSCSI_LOCK(sc);
vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
if (virtqueue_enable_intr(vq) != 0) {
virtqueue_disable_intr(vq);
VTSCSI_UNLOCK(sc);
goto again;
}
VTSCSI_UNLOCK(sc);
}
static void
vtscsi_disable_vqs_intr(struct vtscsi_softc *sc)
{
virtqueue_disable_intr(sc->vtscsi_control_vq);
virtqueue_disable_intr(sc->vtscsi_event_vq);
virtqueue_disable_intr(sc->vtscsi_request_vq);
}
static void
vtscsi_enable_vqs_intr(struct vtscsi_softc *sc)
{
virtqueue_enable_intr(sc->vtscsi_control_vq);
virtqueue_enable_intr(sc->vtscsi_event_vq);
virtqueue_enable_intr(sc->vtscsi_request_vq);
}
static void
vtscsi_get_tunables(struct vtscsi_softc *sc)
{
char tmpstr[64];
TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug);
snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level",
device_get_unit(sc->vtscsi_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug);
}
static void
-vtscsi_add_sysctl(struct vtscsi_softc *sc)
+vtscsi_setup_sysctl(struct vtscsi_softc *sc)
{
device_t dev;
struct vtscsi_statistics *stats;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *tree;
struct sysctl_oid_list *child;
dev = sc->vtscsi_dev;
stats = &sc->vtscsi_stats;
ctx = device_get_sysctl_ctx(dev);
tree = device_get_sysctl_tree(dev);
child = SYSCTL_CHILDREN(tree);
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level",
CTLFLAG_RW, &sc->vtscsi_debug, 0,
"Debug level");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts",
CTLFLAG_RD, &stats->scsi_cmd_timeouts,
"SCSI command timeouts");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests",
CTLFLAG_RD, &stats->dequeue_no_requests,
"No available requests to dequeue");
}
static void
vtscsi_printf_req(struct vtscsi_request *req, const char *func,
const char *fmt, ...)
{
struct vtscsi_softc *sc;
union ccb *ccb;
struct sbuf sb;
va_list ap;
char str[192];
char path_str[64];
if (req == NULL)
return;
sc = req->vsr_softc;
ccb = req->vsr_ccb;
va_start(ap, fmt);
sbuf_new(&sb, str, sizeof(str), 0);
if (ccb == NULL) {
sbuf_printf(&sb, "(noperiph:%s%d:%u): ",
cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim),
cam_sim_bus(sc->vtscsi_sim));
} else {
xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
sbuf_cat(&sb, path_str);
if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
scsi_command_string(&ccb->csio, &sb);
sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
}
}
sbuf_vprintf(&sb, fmt, ap);
va_end(ap);
sbuf_finish(&sb);
printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func,
sbuf_data(&sb));
}

File Metadata

Mime Type
text/x-diff
Expires
Sun, Mar 29, 1:46 PM (1 d, 22 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28218344
Default Alt Text
(308 KB)

Event Timeline