Merge pull request #35 from xez/vmnet-tap

Fix and reenable optional TAP device backend for virtio-net
This commit is contained in:
Michael Steil 2015-07-12 08:12:12 -07:00
commit 0a02fbd974
4 changed files with 75 additions and 15 deletions

View file

@ -50,7 +50,8 @@ XHYVE_SRC := \
src/pci_lpc.c \
src/pci_uart.c \
src/pci_virtio_block.c \
src/pci_virtio_vmnet.c \
src/pci_virtio_net_tap.c \
src/pci_virtio_net_vmnet.c \
src/pci_virtio_rnd.c \
src/pm.c \
src/post.c \

View file

@ -156,6 +156,18 @@ If you want the same IP address across VM reboots, assign a UUID to a particular
$ xhyve [-U uuid]
**Optional:**
If you need more advanced networking and already have a configured [TAP](http://tuntaposx.sourceforge.net) device you can use it with:
virtio-tap,tapX
instead of:
virtio-net
Where *X* is your tap device, i.e. */dev/tapX*.
Issues
------
If you are, or were, running any version of VirtualBox, prior to 5.0, and
@ -174,25 +186,26 @@ TODO
----
- vmm:
- enable APIC access page to speed up APIC emulation
- enable x2APIC MSRs (even faster)
- enable APIC access page to speed up APIC emulation (**performance**)
- enable x2APIC MSRs (even faster) (**performance**)
- vmm_callout:
- is a quick'n'dirty implementation of the FreeBSD kernel callout mechanism
- seems to be racy
- fix races or perhaps replace with something better
- use per vCPU timer event thread (performance)?
- use per vCPU timer event thread (**performance**)?
- use hardware VMX preemption timer instead of `pthread_cond_wait` (**performance**)
- some 32-bit guests are broken (support PAE paging in VMCS)
- PCID guest support (performance)
- PCID guest support (**performance**)
- block_if:
- OS X does not support preadv/pwritev, we need to serialize reads and writes for the time being until we find a better solution.
- OS X does not support `preadv`/`pwritev`, we need to serialize reads and writes for the time being until we find a better solution. (**performance**)
- support block devices other than plain files
- virtio_net:
- make it not require root
- unify TAP and vmnet backends
- performance: send/receive more than a single packet at a time
- vmnet: make it not require root
- vmnet: send/receive more than a single packet at a time (**performance**)
- virtio_rnd:
- is untested
- remove explicit state transitions:
- since only the owning task/thread can modify the VM/vCPUs a lot of the synchronization might be unnecessary
- since only the owning task/thread can modify the VM/vCPUs a lot of the synchronization might be unnecessary (**performance**)
- performance, performance and performance
- remove vestigial code, cleanup

View file

@ -51,6 +51,8 @@
#include <xhyve/mevent.h>
#include <xhyve/virtio.h>
#define USE_MEVENT 0
#define VTNET_RINGSZ 1024
#define VTNET_MAXSEGS 32
@ -370,6 +372,7 @@ pci_vtnet_tap_rx(struct pci_vtnet_softc *sc)
vq_endchains(vq, 1);
}
#if USE_MEVENT
static void
pci_vtnet_tap_callback(UNUSED int fd, UNUSED enum ev_type type, void *param)
{
@ -383,6 +386,37 @@ pci_vtnet_tap_callback(UNUSED int fd, UNUSED enum ev_type type, void *param)
}
#else /* !USE_MEVENT */
static void *
pci_vtnet_tap_select_func(void *vsc) {
struct pci_vtnet_softc *sc;
fd_set rfd;
sc = vsc;
assert(sc);
assert(sc->vsc_tapfd != -1);
FD_ZERO(&rfd);
FD_SET(sc->vsc_tapfd, &rfd);
while (1) {
if (select((sc->vsc_tapfd + 1), &rfd, NULL, NULL, NULL) == -1) {
abort();
}
pthread_mutex_lock(&sc->rx_mtx);
sc->rx_in_progress = 1;
pci_vtnet_tap_rx(sc);
sc->rx_in_progress = 0;
pthread_mutex_unlock(&sc->rx_mtx);
}
return (NULL);
}
#endif
static void
pci_vtnet_ping_rxq(void *vsc, struct vqueue_info *vq)
{
@ -542,6 +576,9 @@ pci_vtnet_init(struct pci_devinst *pi, char *opts)
char *devname;
char *vtopts;
int mac_provided;
#if !USE_MEVENT
pthread_t sthrd;
#endif
sc = calloc(1, sizeof(struct pci_vtnet_softc));
@ -601,6 +638,7 @@ pci_vtnet_init(struct pci_devinst *pi, char *opts)
sc->vsc_tapfd = -1;
}
#if USE_MEVENT
sc->vsc_mevp = mevent_add(sc->vsc_tapfd,
EVF_READ,
pci_vtnet_tap_callback,
@ -610,7 +648,15 @@ pci_vtnet_init(struct pci_devinst *pi, char *opts)
close(sc->vsc_tapfd);
sc->vsc_tapfd = -1;
}
}
#else /* !USE_MEVENT */
if (pthread_create(&sthrd, NULL, pci_vtnet_tap_select_func, sc)) {
WPRINTF(("Could not create tap receive thread\n"));
close(sc->vsc_tapfd);
sc->vsc_tapfd = -1;
}
#endif
}
}
/*
@ -715,10 +761,10 @@ pci_vtnet_neg_features(void *vsc, uint64_t negotiated_features)
}
}
static struct pci_devemu pci_de_vnet = {
.pe_emu = "virtio-net",
static struct pci_devemu pci_de_vnet_tap = {
.pe_emu = "virtio-tap",
.pe_init = pci_vtnet_init,
.pe_barwrite = vi_pci_write,
.pe_barread = vi_pci_read
};
PCI_EMUL_SET(pci_de_vnet);
PCI_EMUL_SET(pci_de_vnet_tap);

View file

@ -811,10 +811,10 @@ pci_vtnet_neg_features(void *vsc, uint64_t negotiated_features)
}
}
static struct pci_devemu pci_de_vnet = {
static struct pci_devemu pci_de_vnet_vmnet = {
.pe_emu = "virtio-net",
.pe_init = pci_vtnet_init,
.pe_barwrite = vi_pci_write,
.pe_barread = vi_pci_read
};
PCI_EMUL_SET(pci_de_vnet);
PCI_EMUL_SET(pci_de_vnet_vmnet);