This commit is contained in:
Michael Steil 2015-06-09 21:28:44 -07:00
parent 04180a5a81
commit 6322d86531
173 changed files with 18281 additions and 24451 deletions

103
Makefile Normal file
View file

@ -0,0 +1,103 @@
ifeq ($V, 1)
VERBOSE =
else
VERBOSE = @
endif
include config.mk
VMM_SRC := \
src/vmm/x86.c \
src/vmm/vmm.c \
src/vmm/vmm_host.c \
src/vmm/vmm_mem.c \
src/vmm/vmm_lapic.c \
src/vmm/vmm_instruction_emul.c \
src/vmm/vmm_ioport.c \
src/vmm/vmm_callout.c \
src/vmm/vmm_stat.c \
src/vmm/vmm_util.c \
src/vmm/vmm_api.c \
src/vmm/intel/vmx.c \
src/vmm/intel/vmx_msr.c \
src/vmm/intel/vmcs.c \
src/vmm/io/vatpic.c \
src/vmm/io/vatpit.c \
src/vmm/io/vhpet.c \
src/vmm/io/vioapic.c \
src/vmm/io/vlapic.c \
src/vmm/io/vpmtmr.c \
src/vmm/io/vrtc.c
XHYVE_SRC := \
src/acpi.c \
src/atkbdc.c \
src/block_if.c \
src/consport.c \
src/dbgport.c \
src/inout.c \
src/ioapic.c \
src/md5c.c \
src/mem.c \
src/mevent.c \
src/mptbl.c \
src/pci_ahci.c \
src/pci_emul.c \
src/pci_hostbridge.c \
src/pci_irq.c \
src/pci_lpc.c \
src/pci_uart.c \
src/pci_virtio_block.c \
src/pci_virtio_vmnet.c \
src/pci_virtio_rnd.c \
src/pm.c \
src/post.c \
src/rtc.c \
src/smbiostbl.c \
src/task_switch.c \
src/uart_emul.c \
src/xhyve.c \
src/virtio.c \
src/xmsr.c
FIRMWARE_SRC := \
src/firmware/kexec.c
SRC := \
$(VMM_SRC) \
$(XHYVE_SRC) \
$(FIRMWARE_SRC)
OBJ := $(SRC:src/%.c=build/%.o)
DEP := $(OBJ:%.o=%.d)
INC := -Iinclude
TARGET = build/xhyve
all: $(TARGET) | build
.PHONY: clean all
.SUFFIXES:
-include $(DEP)
build:
@mkdir -p build
build/%.o: src/%.c
@echo cc $<
@mkdir -p $(dir $@)
$(VERBOSE) $(ENV) $(CC) $(CFLAGS) $(INC) $(DEF) -MMD -MT $@ -MF build/$*.d -o $@ -c $<
$(TARGET).sym: $(OBJ)
@echo ld $(notdir $@)
$(VERBOSE) $(ENV) $(LD) $(LDFLAGS) -Xlinker $(TARGET).lto.o -o $@ $(OBJ)
@echo dsym $(notdir $(TARGET).dSYM)
$(VERBOSE) $(ENV) $(DSYM) $@ -o $(TARGET).dSYM
$(TARGET): $(TARGET).sym
@echo strip $(notdir $@)
$(VERBOSE) $(ENV) $(STRIP) $(TARGET).sym -o $@
clean:
@rm -rf build

184
README.md Normal file
View file

@ -0,0 +1,184 @@
# xhyve
![](./xhyve_logo.png)
<!-- https://thenounproject.com/term/squirrel/57718/ -->
About
-----
The *xhyve hypervisor* is a port of [bhyve](http://www.bhyve.org) to OS X. It is built on top of Hypervisor.framework in OS X 10.10 Yosemite and higher, runs entirely in userspace, and has no other dependencies. It can run vanilla Linux distributions and may gain support for other guest operating systems in the future.
License: BSD
Introduction: [http://www.pagetable.com/?p=831](http://www.pagetable.com/?p=831)
Requirements
------------
* OS X 10.10 Yosemite or later
* A 2010 or later Mac
Building
--------
$ make
The resulting binary will be in build/xhyve
Usage
-----
$ xhyve -h
What is bhyve?
--------------
bhyve is the FreeBSD hypervisor, roughly analogous to KVM + QEMU on Linux. It has a focus on simplicity and being legacy free.
It exposes the following peripherals to virtual machines:
- Local x(2)APIC
- IO-APIC
- 8259A PIC
- 8253/8254 PIT
- HPET
- PM Timer
- RTC
- PCI
- host bridge
- passthrough
- UART
- AHCI (i.e. HDD and CD)
- VirtIO block device
- VirtIO networking
- VirtIO RNG
Notably absent are sound, USB, HID and any kind of graphics support. With a focus on server virtualization this is not strictly a requirement. bhyve may gain desktop virtualization capabilities in the future but this doesn't seem to be a priority.
Unlike QEMU, byhve also currently lacks any kind of guest-side firmware (QEMU uses the GPL3 [SeaBIOS](http://www.seabios.org)), but aims to provide a compatible [OVMF EFI](http://www.linux-kvm.org/page/OVMF) in the near future. It does however provide ACPI, SMBIOS and MP Tables.
bhyve architecture
------------------
Linux
I/O VM control FreeBSD NetBSD
OpenBSD
| A | A | |
V | V | V V
+-------------++-------------++-------------++-------------+
| || || || |
| bhyve || bhyvectl || bhyveload || grub2-bhyve |
| || || || |
| || || || |
+-------------++-------------++-------------++-------------+
+----------------------------------------------------------+
| libvmmapi |
+----------------------------------------------------------+
A
| user
------------------------------┼------------------------------
| ioctl FreeBSD kernel
V
+----------------------------+
| VMX/SVM host |
| VMX/SVM guest |
| VMX/SVM nested paging |
| Timers |
| Interrupts |
+----------------------------+
vmm.ko
**vmm.ko**
The bhyve FreeBSD kernel module. Manages VM and vCPU objects, the guest physical address space and handles guest interaction with PIC, PIT, HPET, PM Timer, x(2)APIC and I/O-APIC. Contains a minimal x86 emulator to decode guest MMIO. Executes the two innermost vCPU runloops (VMX/SVM and interrupts/timers/paging). Has backends for Intel VMX and AMD SVM. Provides an ioctl and mmap API to userspace.
**libvmmapi**
Thin abstraction layer between the vmm.ko ioctl interface and the userspace C API.
**bhyve**
The userspace bhyve component (kind of a very light-weight QEMU) that executes virtual machines. Runs the guest I/O vCPU runloops. Manages ACPI, PCI and all non in-kernel devices. Interacts with vmm.ko through libvmmapi.
**bhyvectl**
Somewhat superfluous utility to introspect and manage the life cycle of virtual machines. Virtual machines and vCPUs can exist as kernel objects independently of a bhyve host process. Typically used to delete VM objects after use. Odd architectural choice.
**bhyveload**
Userspace port of the FreeBSD bootloader. Since bhyve still lacks a firmware this is a cumbersome workaround to bootstrap a guest operating system. It creates a VM object, loads the FreeBSD kernel into guest memory, sets up the initial vCPU state and then exits. Only then a VM can be executed by bhyve.
**grub2-bhyve**
Performs the same function as bhyveload but is a userspace port of [GRUB2](http://github.com/grehan-freebsd/grub2-bhyve). It is used to bootstrap guest operating systems other than FreeBSD, i.e. Linux, OpenBSD and NetBSD.
Support for Windows guests is work in progress and dependent on the EFI port.
xhyve architecture
------------------
+----------------------------------------------------------+
| xhyve |
| |
| I/O |
| |
| |
| |
|+--------------------------------------------------------+|
|| vmm VMX guest ||
|| Timers ||
|| Interrupts ||
|+--------------------------------------------------------+|
+----------------------------------------------------------+
+----------------------------------------------------------+
| Hypervisor.framework |
+----------------------------------------------------------+
A
| user
------------------------------┼------------------------------
|syscall xnu kernel
V
VMX host
VMX nested paging
xhyve shares most of the code with bhyve but is architecturally very different. Hypervisor.framework provides an interface to the VMX VMCS guest state and a safe subset of the VMCS control fields, thus making userspace hypervisors without any additional kernel extensions possible. The VMX host state and all aspects of nested paging are handled by the OS X kernel, you can manage the guest physical address space simply through mapping of regions of your own address space.
*xhyve* is equivalent to the *bhyve* process but gains a subset of a userspace port of the vmm kernel module. SVM, PCI passthrough and the VMX host and EPT aspects are dropped. The vmm component provides a libvmmapi compatible interface to xhyve. Hypervisor.framework seems to enforce a strict 1:1 relationship between a host process/VM and host thread/vCPU, that means VMs and vCPUs can only be interacted with by the processes and threads that created them. Therefore, unlike bhyve, xhyve needs to adhere to a single process model. Multiple virtual machines can be created by launching multiple instances of xhyve. xhyve retains most of the bhyve command line interface.
*bhyvectl*, *bhyveload* and *grub2-bhyve* are incompatible with a single process model and are dropped. As a stop-gap solution until we have a proper firmware xhyve supports the Linux [kexec protocol](http://www.kernel.org/doc/Documentation/x86/boot.txt), a very simple and straightforward way to bootstrap a Linux kernel. It takes a bzImage and optionally initrd image and kernel parameter string as input.
TODO
----
- vmm:
- enable APIC access page to speed up APIC emulation
- enable x2APIC MSRs (even faster)
- vmm_callout:
- is a quick'n'dirty implementation of the FreeBSD kernel callout mechanism
- seems to be racy
- fix races or perhaps replace with something better
- use per vCPU timer event thread (performance)?
- some 32-bit guests are broken (support PAE paging in VMCS)
- PCID guest support (performance)
- block_if:
- OS X does not support preadv/pwritev, we need to serialize reads and writes for the time being until we find a better solution.
- support block devices other than plain files
- virtio_net:
- make it not require root
- unify TAP and vmnet backends
- performance: send/receive more than a single packet at a time
- ACPI tables don't work
- bhyve creates ASL on the fly and then calls out to an ASL compiler (iasl) on
every VM boot to create the DSDT:
- remove dependency on iasl by creating AML bytecode directly
- shouldn't be to hard since we we are only interested in a very small
subset of ASL
- virtio_rnd:
- is untested
- remove explicit state transitions:
- since only the owning task/thread can modify the VM/vCPUs a lot of the synchronization might be unnecessary
- performance, performance and performance
- remove vestigial code, cleanup

View file

@ -1,50 +0,0 @@
#
# $FreeBSD$
#
PROG= bhyve
DEBUG_FLAGS= -g -O0
MAN= bhyve.8
SRCS= \
atkbdc.c \
acpi.c \
bhyverun.c \
block_if.c \
consport.c \
dbgport.c \
inout.c \
ioapic.c \
mem.c \
mevent.c \
mptbl.c \
pci_ahci.c \
pci_emul.c \
pci_hostbridge.c \
pci_irq.c \
pci_lpc.c \
pci_passthru.c \
pci_virtio_block.c \
pci_virtio_net.c \
pci_virtio_rnd.c \
pci_uart.c \
pm.c \
post.c \
rtc.c \
smbiostbl.c \
task_switch.c \
uart_emul.c \
virtio.c \
xmsr.c \
spinup_ap.c
.PATH: ${.CURDIR}/../../sys/amd64/vmm
SRCS+= vmm_instruction_emul.c
LIBADD= vmmapi md pthread
WARNS?= 2
.include <bsd.prog.mk>

View file

@ -1,325 +0,0 @@
.\" Copyright (c) 2013 Peter Grehan
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd September 17, 2014
.Dt BHYVE 8
.Os
.Sh NAME
.Nm bhyve
.Nd "run a guest operating system inside a virtual machine"
.Sh SYNOPSIS
.Nm
.Op Fl abehuwxACHPWY
.Op Fl c Ar numcpus
.Op Fl g Ar gdbport
.Op Fl l Ar lpcdev Ns Op , Ns Ar conf
.Op Fl m Ar size Ns Op Ar K|k|M|m|G|g|T|t
.Op Fl p Ar vcpu:hostcpu
.Op Fl s Ar slot,emulation Ns Op , Ns Ar conf
.Op Fl U Ar uuid
.Ar vmname
.Sh DESCRIPTION
.Nm
is a hypervisor that runs guest operating systems inside a
virtual machine.
.Pp
Parameters such as the number of virtual CPUs, amount of guest memory, and
I/O connectivity can be specified with command-line parameters.
.Pp
The guest operating system must be loaded with
.Xr bhyveload 4
or a similar boot loader before running
.Nm .
.Pp
.Nm
runs until the guest operating system reboots or an unhandled hypervisor
exit is detected.
.Sh OPTIONS
.Bl -tag -width 10n
.It Fl a
The guest's local APIC is configured in xAPIC mode.
The xAPIC mode is the default setting so this option is redundant. It will be
deprecated in a future version.
.It Fl A
Generate ACPI tables.
Required for
.Fx Ns /amd64
guests.
.It Fl b
Enable a low-level console device supported by
.Fx
kernels compiled with
.Cd "device bvmconsole" .
This option will be deprecated in a future version.
.It Fl c Ar numcpus
Number of guest virtual CPUs.
The default is 1 and the maximum is 16.
.It Fl C
Include guest memory in core file.
.It Fl e
Force
.Nm
to exit when a guest issues an access to an I/O port that is not emulated.
This is intended for debug purposes.
.It Fl g Ar gdbport
For
.Fx
kernels compiled with
.Cd "device bvmdebug" ,
allow a remote kernel kgdb to be relayed to the guest kernel gdb stub
via a local IPv4 address and this port.
This option will be deprecated in a future version.
.It Fl h
Print help message and exit.
.It Fl H
Yield the virtual CPU thread when a HLT instruction is detected.
If this option is not specified, virtual CPUs will use 100% of a host CPU.
.It Fl l Ar lpcdev Ns Op , Ns Ar conf
Allow devices behind the LPC PCI-ISA bridge to be configured.
The only supported devices are the TTY-class devices,
.Li com1
and
.Li com2 .
.It Fl m Ar size Ns Op Ar K|k|M|m|G|g|T|t
Guest physical memory size in bytes.
This must be the same size that was given to
.Xr bhyveload 8 .
.Pp
The size argument may be suffixed with one of K, M, G or T (either upper
or lower case) to indicate a multiple of kilobytes, megabytes, gigabytes,
or terabytes.
If no suffix is given, the value is assumed to be in megabytes.
.It Fl p Ar vcpu:hostcpu
Pin guest's virtual CPU
.Em vcpu
to
.Em hostcpu .
.It Fl P
Force the guest virtual CPU to exit when a PAUSE instruction is detected.
.It Fl s Ar slot,emulation Ns Op , Ns Ar conf
Configure a virtual PCI slot and function.
.Pp
.Nm bhyve
provides PCI bus emulation and virtual devices that can be attached to
slots on the bus.
There are 32 available slots, with the option of providing up to 8 functions
per slot.
.Bl -tag -width 10n
.It Ar slot
.Ar pcislot[:function]
.Ar bus:pcislot:function
.Pp
The
.Ar pcislot
value is 0 to 31. The optional function value is 0 to 7. The optional
.Ar bus
value is 0 to 255.
If not specified, the function value defaults to 0.
If not specified, the bus value defaults to 0.
.It Ar emulation
.Bl -tag -width 10n
.It Li hostbridge | Li amd_hostbridge
.Pp
Provide a simple host bridge.
This is usually configured at slot 0, and is required by most guest
operating systems.
The
.Li amd_hostbridge
emulation is identical but uses a PCI vendor ID of
.Li AMD .
.It Li passthru
PCI pass-through device.
.It Li virtio-net
Virtio network interface.
.It Li virtio-blk
Virtio block storage interface.
.It Li virtio-rnd
Virtio RNG interface.
.It Li ahci-cd
AHCI controller attached to an ATAPI CD/DVD.
.It Li ahci-hd
AHCI controller attached to a SATA hard-drive.
.It Li uart
PCI 16550 serial device.
.It Li lpc
LPC PCI-ISA bridge with COM1 and COM2 16550 serial ports. The LPC bridge
emulation can only be configured on bus 0.
.El
.It Op Ar conf
This optional parameter describes the backend for device emulations.
If
.Ar conf
is not specified, the device emulation has no backend and can be
considered unconnected.
.Pp
Network devices:
.Bl -tag -width 10n
.It Ar tapN Ns Op , Ns Ar mac=xx:xx:xx:xx:xx:xx
.It Ar vmnetN Ns Op , Ns Ar mac=xx:xx:xx:xx:xx:xx
.Pp
If
.Ar mac
is not specified, the MAC address is derived from a fixed OUI and the
remaining bytes from an MD5 hash of the slot and function numbers and
the device name.
.Pp
The MAC address is an ASCII string in
.Xr ethers 5
format.
.El
.Pp
Block storage devices:
.Bl -tag -width 10n
.It Pa /filename Ns Oo , Ns Ar block-device-options Oc
.It Pa /dev/xxx Ns Oo , Ns Ar block-device-options Oc
.El
.Pp
The
.Ar block-device-options
are:
.Bl -tag -width 8n
.It Li nocache
Open the file with
.Dv O_DIRECT .
.It Li direct
Open the file using
.Dv O_SYNC .
.It Li ro
Force the file to be opened read-only.
.It Li sectorsize= Ns Ar logical Ns Oo / Ns Ar physical Oc
Specify the logical and physical sector sizes of the emulated disk.
The physical sector size is optional and is equal to the logical sector size
if not explicitly specified.
.El
.Pp
TTY devices:
.Bl -tag -width 10n
.It Li stdio
Connect the serial port to the standard input and output of
the bhyve process.
.It Pa /dev/xxx
Use the host TTY device for serial port I/O.
.El
.Pp
Pass-through devices:
.Bl -tag -width 10n
.It Ns Ar slot Ns / Ns Ar bus Ns / Ns Ar function
Connect to a PCI device on the host at the selector described by
.Ar slot ,
.Ar bus ,
and
.Ar function
numbers.
.El
.Pp
The host device must have been reserved at boot-time using the
.Va pptdev
loader variable as described in
.Xr vmm 4 .
.El
.It Fl u
RTC keeps UTC time.
.It Fl U Ar uuid
Set the universally unique identifier
.Pq UUID
in the guest's System Management BIOS System Information structure.
By default a UUID is generated from the host's hostname and
.Ar vmname .
.It Fl w
Ignore accesses to unimplemented Model Specific Registers (MSRs). This is intended for debug purposes.
.It Fl W
Force virtio PCI device emulations to use MSI interrupts instead of MSI-X
interrupts.
.It Fl x
The guest's local APIC is configured in x2APIC mode.
.It Fl Y
Disable MPtable generation.
.It Ar vmname
Alphanumeric name of the guest.
This should be the same as that created by
.Xr bhyveload 8 .
.El
.Sh EXAMPLES
The guest operating system must have been loaded with
.Xr bhyveload 4
or a similar boot loader before
.Xr bhyve 4
can be run.
.Pp
To run a virtual machine with 1GB of memory, two virtual CPUs, a virtio
block device backed by the
.Pa /my/image
filesystem image, and a serial port for the console:
.Bd -literal -offset indent
bhyve -c 2 -s 0,hostbridge -s 1,lpc -s 2,virtio-blk,/my/image \\
-l com1,stdio -A -H -P -m 1G vm1
.Ed
.Pp
Run a 24GB single-CPU virtual machine with three network ports, one of which
has a MAC address specified:
.Bd -literal -offset indent
bhyve -s 0,hostbridge -s 1,lpc -s 2:0,virtio-net,tap0 \\
-s 2:1,virtio-net,tap1 \\
-s 2:2,virtio-net,tap2,mac=00:be:fa:76:45:00 \\
-s 3,virtio-blk,/my/image -l com1,stdio \\
-A -H -P -m 24G bigvm
.Ed
.Pp
Run an 8GB quad-CPU virtual machine with 8 AHCI SATA disks, an AHCI ATAPI
CD-ROM, a single virtio network port, an AMD hostbridge, and the console
port connected to an
.Xr nmdm 4
null-model device.
.Bd -literal -offset indent
bhyve -c 4 \e\
-s 0,amd_hostbridge -s 1,lpc \\
-s 1:0,ahci-hd,/images/disk.1 \\
-s 1:1,ahci-hd,/images/disk.2 \\
-s 1:2,ahci-hd,/images/disk.3 \\
-s 1:3,ahci-hd,/images/disk.4 \\
-s 1:4,ahci-hd,/images/disk.5 \\
-s 1:5,ahci-hd,/images/disk.6 \\
-s 1:6,ahci-hd,/images/disk.7 \\
-s 1:7,ahci-hd,/images/disk.8 \\
-s 2,ahci-cd,/images.install.iso \\
-s 3,virtio-net,tap0 \\
-l com1,/dev/nmdm0A \\
-A -H -P -m 8G
.Ed
.Sh SEE ALSO
.Xr bhyve 4 ,
.Xr nmdm 4 ,
.Xr vmm 4 ,
.Xr ethers 5 ,
.Xr bhyvectl 8 ,
.Xr bhyveload 8
.Sh HISTORY
.Nm
first appeared in
.Fx 10.0 .
.Sh AUTHORS
.An Neel Natu Aq Mt neel@freebsd.org
.An Peter Grehan Aq Mt grehan@freebsd.org

View file

@ -1,892 +0,0 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/time.h>
#include <machine/atomic.h>
#include <machine/segments.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <err.h>
#include <libgen.h>
#include <unistd.h>
#include <assert.h>
#include <errno.h>
#include <pthread.h>
#include <pthread_np.h>
#include <sysexits.h>
#include <machine/vmm.h>
#include <vmmapi.h>
#include "bhyverun.h"
#include "acpi.h"
#include "inout.h"
#include "dbgport.h"
#include "ioapic.h"
#include "mem.h"
#include "mevent.h"
#include "mptbl.h"
#include "pci_emul.h"
#include "pci_irq.h"
#include "pci_lpc.h"
#include "smbiostbl.h"
#include "xmsr.h"
#include "spinup_ap.h"
#include "rtc.h"
#define GUEST_NIO_PORT 0x488 /* guest upcalls via i/o port */
#define MB (1024UL * 1024)
#define GB (1024UL * MB)
typedef int (*vmexit_handler_t)(struct vmctx *, struct vm_exit *, int *vcpu);
extern int vmexit_task_switch(struct vmctx *, struct vm_exit *, int *vcpu);
char *vmname;
int guest_ncpus;
char *guest_uuid_str;
static int guest_vmexit_on_hlt, guest_vmexit_on_pause;
static int virtio_msix = 1;
static int x2apic_mode = 0; /* default is xAPIC */
static int strictio;
static int strictmsr = 1;
static int acpi;
static char *progname;
static const int BSP = 0;
static cpuset_t cpumask;
static void vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip);
static struct vm_exit vmexit[VM_MAXCPU];
struct bhyvestats {
uint64_t vmexit_bogus;
uint64_t vmexit_bogus_switch;
uint64_t vmexit_hlt;
uint64_t vmexit_pause;
uint64_t vmexit_mtrap;
uint64_t vmexit_inst_emul;
uint64_t cpu_switch_rotate;
uint64_t cpu_switch_direct;
} stats;
struct mt_vmm_info {
pthread_t mt_thr;
struct vmctx *mt_ctx;
int mt_vcpu;
} mt_vmm_info[VM_MAXCPU];
static cpuset_t *vcpumap[VM_MAXCPU] = { NULL };
static void
usage(int code)
{
fprintf(stderr,
"Usage: %s [-abehuwxACHPWY] [-c vcpus] [-g <gdb port>] [-l <lpc>]\n"
" %*s [-m mem] [-p vcpu:hostcpu] [-s <pci>] [-U uuid] <vm>\n"
" -a: local apic is in xAPIC mode (deprecated)\n"
" -A: create ACPI tables\n"
" -c: # cpus (default 1)\n"
" -C: include guest memory in core file\n"
" -e: exit on unhandled I/O access\n"
" -g: gdb port\n"
" -h: help\n"
" -H: vmexit from the guest on hlt\n"
" -l: LPC device configuration\n"
" -m: memory size in MB\n"
" -p: pin 'vcpu' to 'hostcpu'\n"
" -P: vmexit from the guest on pause\n"
" -s: <slot,driver,configinfo> PCI slot config\n"
" -u: RTC keeps UTC time\n"
" -U: uuid\n"
" -w: ignore unimplemented MSRs\n"
" -W: force virtio to use single-vector MSI\n"
" -x: local apic is in x2APIC mode\n"
" -Y: disable MPtable generation\n",
progname, (int)strlen(progname), "");
exit(code);
}
static int
pincpu_parse(const char *opt)
{
int vcpu, pcpu;
if (sscanf(opt, "%d:%d", &vcpu, &pcpu) != 2) {
fprintf(stderr, "invalid format: %s\n", opt);
return (-1);
}
if (vcpu < 0 || vcpu >= VM_MAXCPU) {
fprintf(stderr, "vcpu '%d' outside valid range from 0 to %d\n",
vcpu, VM_MAXCPU - 1);
return (-1);
}
if (pcpu < 0 || pcpu >= CPU_SETSIZE) {
fprintf(stderr, "hostcpu '%d' outside valid range from "
"0 to %d\n", pcpu, CPU_SETSIZE - 1);
return (-1);
}
if (vcpumap[vcpu] == NULL) {
if ((vcpumap[vcpu] = malloc(sizeof(cpuset_t))) == NULL) {
perror("malloc");
return (-1);
}
CPU_ZERO(vcpumap[vcpu]);
}
CPU_SET(pcpu, vcpumap[vcpu]);
return (0);
}
void
vm_inject_fault(void *arg, int vcpu, int vector, int errcode_valid,
int errcode)
{
struct vmctx *ctx;
int error, restart_instruction;
ctx = arg;
restart_instruction = 1;
error = vm_inject_exception(ctx, vcpu, vector, errcode_valid, errcode,
restart_instruction);
assert(error == 0);
}
void *
paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len)
{
return (vm_map_gpa(ctx, gaddr, len));
}
int
fbsdrun_vmexit_on_pause(void)
{
return (guest_vmexit_on_pause);
}
int
fbsdrun_vmexit_on_hlt(void)
{
return (guest_vmexit_on_hlt);
}
int
fbsdrun_virtio_msix(void)
{
return (virtio_msix);
}
static void *
fbsdrun_start_thread(void *param)
{
char tname[MAXCOMLEN + 1];
struct mt_vmm_info *mtp;
int vcpu;
mtp = param;
vcpu = mtp->mt_vcpu;
snprintf(tname, sizeof(tname), "vcpu %d", vcpu);
pthread_set_name_np(mtp->mt_thr, tname);
vm_loop(mtp->mt_ctx, vcpu, vmexit[vcpu].rip);
/* not reached */
exit(1);
return (NULL);
}
void
fbsdrun_addcpu(struct vmctx *ctx, int fromcpu, int newcpu, uint64_t rip)
{
int error;
assert(fromcpu == BSP);
/*
* The 'newcpu' must be activated in the context of 'fromcpu'. If
* vm_activate_cpu() is delayed until newcpu's pthread starts running
* then vmm.ko is out-of-sync with bhyve and this can create a race
* with vm_suspend().
*/
error = vm_activate_cpu(ctx, newcpu);
assert(error == 0);
CPU_SET_ATOMIC(newcpu, &cpumask);
/*
* Set up the vmexit struct to allow execution to start
* at the given RIP
*/
vmexit[newcpu].rip = rip;
vmexit[newcpu].inst_length = 0;
mt_vmm_info[newcpu].mt_ctx = ctx;
mt_vmm_info[newcpu].mt_vcpu = newcpu;
error = pthread_create(&mt_vmm_info[newcpu].mt_thr, NULL,
fbsdrun_start_thread, &mt_vmm_info[newcpu]);
assert(error == 0);
}
static int
fbsdrun_deletecpu(struct vmctx *ctx, int vcpu)
{
if (!CPU_ISSET(vcpu, &cpumask)) {
fprintf(stderr, "Attempting to delete unknown cpu %d\n", vcpu);
exit(1);
}
CPU_CLR_ATOMIC(vcpu, &cpumask);
return (CPU_EMPTY(&cpumask));
}
static int
vmexit_handle_notify(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu,
uint32_t eax)
{
#if BHYVE_DEBUG
/*
* put guest-driven debug here
*/
#endif
return (VMEXIT_CONTINUE);
}
static int
vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
{
int error;
int bytes, port, in, out, string;
int vcpu;
vcpu = *pvcpu;
port = vme->u.inout.port;
bytes = vme->u.inout.bytes;
string = vme->u.inout.string;
in = vme->u.inout.in;
out = !in;
/* Extra-special case of host notifications */
if (out && port == GUEST_NIO_PORT) {
error = vmexit_handle_notify(ctx, vme, pvcpu, vme->u.inout.eax);
return (error);
}
error = emulate_inout(ctx, vcpu, vme, strictio);
if (error) {
fprintf(stderr, "Unhandled %s%c 0x%04x at 0x%lx\n",
in ? "in" : "out",
bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'),
port, vmexit->rip);
return (VMEXIT_ABORT);
} else {
return (VMEXIT_CONTINUE);
}
}
static int
vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
{
uint64_t val;
uint32_t eax, edx;
int error;
val = 0;
error = emulate_rdmsr(ctx, *pvcpu, vme->u.msr.code, &val);
if (error != 0) {
fprintf(stderr, "rdmsr to register %#x on vcpu %d\n",
vme->u.msr.code, *pvcpu);
if (strictmsr) {
vm_inject_gp(ctx, *pvcpu);
return (VMEXIT_CONTINUE);
}
}
eax = val;
error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RAX, eax);
assert(error == 0);
edx = val >> 32;
error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RDX, edx);
assert(error == 0);
return (VMEXIT_CONTINUE);
}
static int
vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
{
int error;
error = emulate_wrmsr(ctx, *pvcpu, vme->u.msr.code, vme->u.msr.wval);
if (error != 0) {
fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n",
vme->u.msr.code, vme->u.msr.wval, *pvcpu);
if (strictmsr) {
vm_inject_gp(ctx, *pvcpu);
return (VMEXIT_CONTINUE);
}
}
return (VMEXIT_CONTINUE);
}
static int
vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
{
int newcpu;
int retval = VMEXIT_CONTINUE;
newcpu = spinup_ap(ctx, *pvcpu,
vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip);
return (retval);
}
#define DEBUG_EPT_MISCONFIG
#ifdef DEBUG_EPT_MISCONFIG
#define EXIT_REASON_EPT_MISCONFIG 49
#define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400
#define VMCS_IDENT(x) ((x) | 0x80000000)
static uint64_t ept_misconfig_gpa, ept_misconfig_pte[4];
static int ept_misconfig_ptenum;
#endif
static int
vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
{
fprintf(stderr, "vm exit[%d]\n", *pvcpu);
fprintf(stderr, "\treason\t\tVMX\n");
fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip);
fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length);
fprintf(stderr, "\tstatus\t\t%d\n", vmexit->u.vmx.status);
fprintf(stderr, "\texit_reason\t%u\n", vmexit->u.vmx.exit_reason);
fprintf(stderr, "\tqualification\t0x%016lx\n",
vmexit->u.vmx.exit_qualification);
fprintf(stderr, "\tinst_type\t\t%d\n", vmexit->u.vmx.inst_type);
fprintf(stderr, "\tinst_error\t\t%d\n", vmexit->u.vmx.inst_error);
#ifdef DEBUG_EPT_MISCONFIG
if (vmexit->u.vmx.exit_reason == EXIT_REASON_EPT_MISCONFIG) {
vm_get_register(ctx, *pvcpu,
VMCS_IDENT(VMCS_GUEST_PHYSICAL_ADDRESS),
&ept_misconfig_gpa);
vm_get_gpa_pmap(ctx, ept_misconfig_gpa, ept_misconfig_pte,
&ept_misconfig_ptenum);
fprintf(stderr, "\tEPT misconfiguration:\n");
fprintf(stderr, "\t\tGPA: %#lx\n", ept_misconfig_gpa);
fprintf(stderr, "\t\tPTE(%d): %#lx %#lx %#lx %#lx\n",
ept_misconfig_ptenum, ept_misconfig_pte[0],
ept_misconfig_pte[1], ept_misconfig_pte[2],
ept_misconfig_pte[3]);
}
#endif /* DEBUG_EPT_MISCONFIG */
return (VMEXIT_ABORT);
}
static int
vmexit_svm(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
{
fprintf(stderr, "vm exit[%d]\n", *pvcpu);
fprintf(stderr, "\treason\t\tSVM\n");
fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip);
fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length);
fprintf(stderr, "\texitcode\t%#lx\n", vmexit->u.svm.exitcode);
fprintf(stderr, "\texitinfo1\t%#lx\n", vmexit->u.svm.exitinfo1);
fprintf(stderr, "\texitinfo2\t%#lx\n", vmexit->u.svm.exitinfo2);
return (VMEXIT_ABORT);
}
static int
vmexit_bogus(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
{
assert(vmexit->inst_length == 0);
stats.vmexit_bogus++;
return (VMEXIT_CONTINUE);
}
static int
vmexit_hlt(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
{
stats.vmexit_hlt++;
/*
* Just continue execution with the next instruction. We use
* the HLT VM exit as a way to be friendly with the host
* scheduler.
*/
return (VMEXIT_CONTINUE);
}
static int
vmexit_pause(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
{
stats.vmexit_pause++;
return (VMEXIT_CONTINUE);
}
static int
vmexit_mtrap(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
{
assert(vmexit->inst_length == 0);
stats.vmexit_mtrap++;
return (VMEXIT_CONTINUE);
}
static int
vmexit_inst_emul(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
{
int err, i;
struct vie *vie;
stats.vmexit_inst_emul++;
vie = &vmexit->u.inst_emul.vie;
err = emulate_mem(ctx, *pvcpu, vmexit->u.inst_emul.gpa,
vie, &vmexit->u.inst_emul.paging);
if (err) {
if (err == ESRCH) {
fprintf(stderr, "Unhandled memory access to 0x%lx\n",
vmexit->u.inst_emul.gpa);
}
fprintf(stderr, "Failed to emulate instruction [");
for (i = 0; i < vie->num_valid; i++) {
fprintf(stderr, "0x%02x%s", vie->inst[i],
i != (vie->num_valid - 1) ? " " : "");
}
fprintf(stderr, "] at 0x%lx\n", vmexit->rip);
return (VMEXIT_ABORT);
}
return (VMEXIT_CONTINUE);
}
static pthread_mutex_t resetcpu_mtx = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t resetcpu_cond = PTHREAD_COND_INITIALIZER;
static int
vmexit_suspend(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
{
enum vm_suspend_how how;
how = vmexit->u.suspended.how;
fbsdrun_deletecpu(ctx, *pvcpu);
if (*pvcpu != BSP) {
pthread_mutex_lock(&resetcpu_mtx);
pthread_cond_signal(&resetcpu_cond);
pthread_mutex_unlock(&resetcpu_mtx);
pthread_exit(NULL);
}
pthread_mutex_lock(&resetcpu_mtx);
while (!CPU_EMPTY(&cpumask)) {
pthread_cond_wait(&resetcpu_cond, &resetcpu_mtx);
}
pthread_mutex_unlock(&resetcpu_mtx);
switch (how) {
case VM_SUSPEND_RESET:
exit(0);
case VM_SUSPEND_POWEROFF:
exit(1);
case VM_SUSPEND_HALT:
exit(2);
case VM_SUSPEND_TRIPLEFAULT:
exit(3);
default:
fprintf(stderr, "vmexit_suspend: invalid reason %d\n", how);
exit(100);
}
return (0); /* NOTREACHED */
}
static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
[VM_EXITCODE_INOUT] = vmexit_inout,
[VM_EXITCODE_INOUT_STR] = vmexit_inout,
[VM_EXITCODE_VMX] = vmexit_vmx,
[VM_EXITCODE_SVM] = vmexit_svm,
[VM_EXITCODE_BOGUS] = vmexit_bogus,
[VM_EXITCODE_RDMSR] = vmexit_rdmsr,
[VM_EXITCODE_WRMSR] = vmexit_wrmsr,
[VM_EXITCODE_MTRAP] = vmexit_mtrap,
[VM_EXITCODE_INST_EMUL] = vmexit_inst_emul,
[VM_EXITCODE_SPINUP_AP] = vmexit_spinup_ap,
[VM_EXITCODE_SUSPENDED] = vmexit_suspend,
[VM_EXITCODE_TASK_SWITCH] = vmexit_task_switch,
};
static void
vm_loop(struct vmctx *ctx, int vcpu, uint64_t startrip)
{
int error, rc, prevcpu;
enum vm_exitcode exitcode;
cpuset_t active_cpus;
if (vcpumap[vcpu] != NULL) {
error = pthread_setaffinity_np(pthread_self(),
sizeof(cpuset_t), vcpumap[vcpu]);
assert(error == 0);
}
error = vm_active_cpus(ctx, &active_cpus);
assert(CPU_ISSET(vcpu, &active_cpus));
error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, startrip);
assert(error == 0);
while (1) {
error = vm_run(ctx, vcpu, &vmexit[vcpu]);
if (error != 0)
break;
prevcpu = vcpu;
exitcode = vmexit[vcpu].exitcode;
if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) {
fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n",
exitcode);
exit(1);
}
rc = (*handler[exitcode])(ctx, &vmexit[vcpu], &vcpu);
switch (rc) {
case VMEXIT_CONTINUE:
break;
case VMEXIT_ABORT:
abort();
default:
exit(1);
}
}
fprintf(stderr, "vm_run error %d, errno %d\n", error, errno);
}
static int
num_vcpus_allowed(struct vmctx *ctx)
{
int tmp, error;
error = vm_get_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, &tmp);
/*
* The guest is allowed to spinup more than one processor only if the
* UNRESTRICTED_GUEST capability is available.
*/
if (error == 0)
return (VM_MAXCPU);
else
return (1);
}
void
fbsdrun_set_capabilities(struct vmctx *ctx, int cpu)
{
int err, tmp;
if (fbsdrun_vmexit_on_hlt()) {
err = vm_get_capability(ctx, cpu, VM_CAP_HALT_EXIT, &tmp);
if (err < 0) {
fprintf(stderr, "VM exit on HLT not supported\n");
exit(1);
}
vm_set_capability(ctx, cpu, VM_CAP_HALT_EXIT, 1);
if (cpu == BSP)
handler[VM_EXITCODE_HLT] = vmexit_hlt;
}
if (fbsdrun_vmexit_on_pause()) {
/*
* pause exit support required for this mode
*/
err = vm_get_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, &tmp);
if (err < 0) {
fprintf(stderr,
"SMP mux requested, no pause support\n");
exit(1);
}
vm_set_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, 1);
if (cpu == BSP)
handler[VM_EXITCODE_PAUSE] = vmexit_pause;
}
if (x2apic_mode)
err = vm_set_x2apic_state(ctx, cpu, X2APIC_ENABLED);
else
err = vm_set_x2apic_state(ctx, cpu, X2APIC_DISABLED);
if (err) {
fprintf(stderr, "Unable to set x2apic state (%d)\n", err);
exit(1);
}
vm_set_capability(ctx, cpu, VM_CAP_ENABLE_INVPCID, 1);
}
int
main(int argc, char *argv[])
{
int c, error, gdb_port, err, bvmcons;
int dump_guest_memory, max_vcpus, mptgen;
int rtc_localtime;
struct vmctx *ctx;
uint64_t rip;
size_t memsize;
bvmcons = 0;
dump_guest_memory = 0;
progname = basename(argv[0]);
gdb_port = 0;
guest_ncpus = 1;
memsize = 256 * MB;
mptgen = 1;
rtc_localtime = 1;
while ((c = getopt(argc, argv, "abehuwxACHIPWYp:g:c:s:m:l:U:")) != -1) {
switch (c) {
case 'a':
x2apic_mode = 0;
break;
case 'A':
acpi = 1;
break;
case 'b':
bvmcons = 1;
break;
case 'p':
if (pincpu_parse(optarg) != 0) {
errx(EX_USAGE, "invalid vcpu pinning "
"configuration '%s'", optarg);
}
break;
case 'c':
guest_ncpus = atoi(optarg);
break;
case 'C':
dump_guest_memory = 1;
break;
case 'g':
gdb_port = atoi(optarg);
break;
case 'l':
if (lpc_device_parse(optarg) != 0) {
errx(EX_USAGE, "invalid lpc device "
"configuration '%s'", optarg);
}
break;
case 's':
if (pci_parse_slot(optarg) != 0)
exit(1);
else
break;
case 'm':
error = vm_parse_memsize(optarg, &memsize);
if (error)
errx(EX_USAGE, "invalid memsize '%s'", optarg);
break;
case 'H':
guest_vmexit_on_hlt = 1;
break;
case 'I':
/*
* The "-I" option was used to add an ioapic to the
* virtual machine.
*
* An ioapic is now provided unconditionally for each
* virtual machine and this option is now deprecated.
*/
break;
case 'P':
guest_vmexit_on_pause = 1;
break;
case 'e':
strictio = 1;
break;
case 'u':
rtc_localtime = 0;
break;
case 'U':
guest_uuid_str = optarg;
break;
case 'w':
strictmsr = 0;
break;
case 'W':
virtio_msix = 0;
break;
case 'x':
x2apic_mode = 1;
break;
case 'Y':
mptgen = 0;
break;
case 'h':
usage(0);
default:
usage(1);
}
}
argc -= optind;
argv += optind;
if (argc != 1)
usage(1);
vmname = argv[0];
ctx = vm_open(vmname);
if (ctx == NULL) {
perror("vm_open");
exit(1);
}
if (guest_ncpus < 1) {
fprintf(stderr, "Invalid guest vCPUs (%d)\n", guest_ncpus);
exit(1);
}
max_vcpus = num_vcpus_allowed(ctx);
if (guest_ncpus > max_vcpus) {
fprintf(stderr, "%d vCPUs requested but only %d available\n",
guest_ncpus, max_vcpus);
exit(1);
}
fbsdrun_set_capabilities(ctx, BSP);
if (dump_guest_memory)
vm_set_memflags(ctx, VM_MEM_F_INCORE);
err = vm_setup_memory(ctx, memsize, VM_MMAP_ALL);
if (err) {
fprintf(stderr, "Unable to setup memory (%d)\n", err);
exit(1);
}
error = init_msr();
if (error) {
fprintf(stderr, "init_msr error %d", error);
exit(1);
}
init_mem();
init_inout();
pci_irq_init(ctx);
ioapic_init(ctx);
rtc_init(ctx, rtc_localtime);
sci_init(ctx);
/*
* Exit if a device emulation finds an error in it's initilization
*/
if (init_pci(ctx) != 0)
exit(1);
if (gdb_port != 0)
init_dbgport(gdb_port);
if (bvmcons)
init_bvmcons();
error = vm_get_register(ctx, BSP, VM_REG_GUEST_RIP, &rip);
assert(error == 0);
/*
* build the guest tables, MP etc.
*/
if (mptgen) {
error = mptable_build(ctx, guest_ncpus);
if (error)
exit(1);
}
error = smbios_build(ctx);
assert(error == 0);
if (acpi) {
error = acpi_build(ctx, guest_ncpus);
assert(error == 0);
}
/*
* Change the proc title to include the VM name.
*/
setproctitle("%s", vmname);
/*
* Add CPU 0
*/
fbsdrun_addcpu(ctx, BSP, BSP, rip);
/*
* Head off to the main event dispatch loop
*/
mevent_dispatch();
exit(1);
}

View file

@ -1,283 +0,0 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _PCI_EMUL_H_
#define _PCI_EMUL_H_
#include <sys/types.h>
#include <sys/queue.h>
#include <sys/kernel.h>
#include <sys/_pthreadtypes.h>
#include <dev/pci/pcireg.h>
#include <assert.h>
#define PCI_BARMAX PCIR_MAX_BAR_0 /* BAR registers in a Type 0 header */
struct vmctx;
struct pci_devinst;
struct memory_region;
struct pci_devemu {
char *pe_emu; /* Name of device emulation */
/* instance creation */
int (*pe_init)(struct vmctx *, struct pci_devinst *,
char *opts);
/* ACPI DSDT enumeration */
void (*pe_write_dsdt)(struct pci_devinst *);
/* config space read/write callbacks */
int (*pe_cfgwrite)(struct vmctx *ctx, int vcpu,
struct pci_devinst *pi, int offset,
int bytes, uint32_t val);
int (*pe_cfgread)(struct vmctx *ctx, int vcpu,
struct pci_devinst *pi, int offset,
int bytes, uint32_t *retval);
/* BAR read/write callbacks */
void (*pe_barwrite)(struct vmctx *ctx, int vcpu,
struct pci_devinst *pi, int baridx,
uint64_t offset, int size, uint64_t value);
uint64_t (*pe_barread)(struct vmctx *ctx, int vcpu,
struct pci_devinst *pi, int baridx,
uint64_t offset, int size);
};
#define PCI_EMUL_SET(x) DATA_SET(pci_devemu_set, x);
enum pcibar_type {
PCIBAR_NONE,
PCIBAR_IO,
PCIBAR_MEM32,
PCIBAR_MEM64,
PCIBAR_MEMHI64
};
struct pcibar {
enum pcibar_type type; /* io or memory */
uint64_t size;
uint64_t addr;
};
#define PI_NAMESZ 40
struct msix_table_entry {
uint64_t addr;
uint32_t msg_data;
uint32_t vector_control;
} __packed;
/*
* In case the structure is modified to hold extra information, use a define
* for the size that should be emulated.
*/
#define MSIX_TABLE_ENTRY_SIZE 16
#define MAX_MSIX_TABLE_ENTRIES 2048
#define PBA_SIZE(msgnum) (roundup2((msgnum), 64) / 8)
enum lintr_stat {
IDLE,
ASSERTED,
PENDING
};
struct pci_devinst {
struct pci_devemu *pi_d;
struct vmctx *pi_vmctx;
uint8_t pi_bus, pi_slot, pi_func;
char pi_name[PI_NAMESZ];
int pi_bar_getsize;
int pi_prevcap;
int pi_capend;
struct {
int8_t pin;
enum lintr_stat state;
int pirq_pin;
int ioapic_irq;
pthread_mutex_t lock;
} pi_lintr;
struct {
int enabled;
uint64_t addr;
uint64_t msg_data;
int maxmsgnum;
} pi_msi;
struct {
int enabled;
int table_bar;
int pba_bar;
uint32_t table_offset;
int table_count;
uint32_t pba_offset;
int pba_size;
int function_mask;
struct msix_table_entry *table; /* allocated at runtime */
} pi_msix;
void *pi_arg; /* devemu-private data */
u_char pi_cfgdata[PCI_REGMAX + 1];
struct pcibar pi_bar[PCI_BARMAX + 1];
};
struct msicap {
uint8_t capid;
uint8_t nextptr;
uint16_t msgctrl;
uint32_t addrlo;
uint32_t addrhi;
uint16_t msgdata;
} __packed;
struct msixcap {
uint8_t capid;
uint8_t nextptr;
uint16_t msgctrl;
uint32_t table_info; /* bar index and offset within it */
uint32_t pba_info; /* bar index and offset within it */
} __packed;
struct pciecap {
uint8_t capid;
uint8_t nextptr;
uint16_t pcie_capabilities;
uint32_t dev_capabilities; /* all devices */
uint16_t dev_control;
uint16_t dev_status;
uint32_t link_capabilities; /* devices with links */
uint16_t link_control;
uint16_t link_status;
uint32_t slot_capabilities; /* ports with slots */
uint16_t slot_control;
uint16_t slot_status;
uint16_t root_control; /* root ports */
uint16_t root_capabilities;
uint32_t root_status;
uint32_t dev_capabilities2; /* all devices */
uint16_t dev_control2;
uint16_t dev_status2;
uint32_t link_capabilities2; /* devices with links */
uint16_t link_control2;
uint16_t link_status2;
uint32_t slot_capabilities2; /* ports with slots */
uint16_t slot_control2;
uint16_t slot_status2;
} __packed;
typedef void (*pci_lintr_cb)(int b, int s, int pin, int pirq_pin,
int ioapic_irq, void *arg);
int init_pci(struct vmctx *ctx);
void msicap_cfgwrite(struct pci_devinst *pi, int capoff, int offset,
int bytes, uint32_t val);
void msixcap_cfgwrite(struct pci_devinst *pi, int capoff, int offset,
int bytes, uint32_t val);
void pci_callback(void);
int pci_emul_alloc_bar(struct pci_devinst *pdi, int idx,
enum pcibar_type type, uint64_t size);
int pci_emul_alloc_pbar(struct pci_devinst *pdi, int idx,
uint64_t hostbase, enum pcibar_type type, uint64_t size);
int pci_emul_add_msicap(struct pci_devinst *pi, int msgnum);
int pci_emul_add_pciecap(struct pci_devinst *pi, int pcie_device_type);
void pci_generate_msi(struct pci_devinst *pi, int msgnum);
void pci_generate_msix(struct pci_devinst *pi, int msgnum);
void pci_lintr_assert(struct pci_devinst *pi);
void pci_lintr_deassert(struct pci_devinst *pi);
void pci_lintr_request(struct pci_devinst *pi);
int pci_msi_enabled(struct pci_devinst *pi);
int pci_msix_enabled(struct pci_devinst *pi);
int pci_msix_table_bar(struct pci_devinst *pi);
int pci_msix_pba_bar(struct pci_devinst *pi);
int pci_msi_msgnum(struct pci_devinst *pi);
int pci_parse_slot(char *opt);
void pci_populate_msicap(struct msicap *cap, int msgs, int nextptr);
int pci_emul_add_msixcap(struct pci_devinst *pi, int msgnum, int barnum);
int pci_emul_msix_twrite(struct pci_devinst *pi, uint64_t offset, int size,
uint64_t value);
uint64_t pci_emul_msix_tread(struct pci_devinst *pi, uint64_t offset, int size);
int pci_count_lintr(int bus);
void pci_walk_lintr(int bus, pci_lintr_cb cb, void *arg);
void pci_write_dsdt(void);
uint64_t pci_ecfg_base(void);
int pci_bus_configured(int bus);
static __inline void
pci_set_cfgdata8(struct pci_devinst *pi, int offset, uint8_t val)
{
assert(offset <= PCI_REGMAX);
*(uint8_t *)(pi->pi_cfgdata + offset) = val;
}
static __inline void
pci_set_cfgdata16(struct pci_devinst *pi, int offset, uint16_t val)
{
assert(offset <= (PCI_REGMAX - 1) && (offset & 1) == 0);
*(uint16_t *)(pi->pi_cfgdata + offset) = val;
}
static __inline void
pci_set_cfgdata32(struct pci_devinst *pi, int offset, uint32_t val)
{
assert(offset <= (PCI_REGMAX - 3) && (offset & 3) == 0);
*(uint32_t *)(pi->pi_cfgdata + offset) = val;
}
static __inline uint8_t
pci_get_cfgdata8(struct pci_devinst *pi, int offset)
{
assert(offset <= PCI_REGMAX);
return (*(uint8_t *)(pi->pi_cfgdata + offset));
}
static __inline uint16_t
pci_get_cfgdata16(struct pci_devinst *pi, int offset)
{
assert(offset <= (PCI_REGMAX - 1) && (offset & 1) == 0);
return (*(uint16_t *)(pi->pi_cfgdata + offset));
}
static __inline uint32_t
pci_get_cfgdata32(struct pci_devinst *pi, int offset)
{
assert(offset <= (PCI_REGMAX - 3) && (offset & 3) == 0);
return (*(uint32_t *)(pi->pi_cfgdata + offset));
}
#endif /* _PCI_EMUL_H_ */

View file

@ -1,790 +0,0 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/types.h>
#include <sys/pciio.h>
#include <sys/ioctl.h>
#include <dev/io/iodev.h>
#include <dev/pci/pcireg.h>
#include <machine/iodev.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <machine/vmm.h>
#include <vmmapi.h>
#include "pci_emul.h"
#include "mem.h"
#ifndef _PATH_DEVPCI
#define _PATH_DEVPCI "/dev/pci"
#endif
#ifndef _PATH_DEVIO
#define _PATH_DEVIO "/dev/io"
#endif
#define LEGACY_SUPPORT 1
#define MSIX_TABLE_COUNT(ctrl) (((ctrl) & PCIM_MSIXCTRL_TABLE_SIZE) + 1)
#define MSIX_CAPLEN 12
static int pcifd = -1;
static int iofd = -1;
struct passthru_softc {
struct pci_devinst *psc_pi;
struct pcibar psc_bar[PCI_BARMAX + 1];
struct {
int capoff;
int msgctrl;
int emulated;
} psc_msi;
struct {
int capoff;
} psc_msix;
struct pcisel psc_sel;
};
static int
msi_caplen(int msgctrl)
{
int len;
len = 10; /* minimum length of msi capability */
if (msgctrl & PCIM_MSICTRL_64BIT)
len += 4;
#if 0
/*
* Ignore the 'mask' and 'pending' bits in the MSI capability.
* We'll let the guest manipulate them directly.
*/
if (msgctrl & PCIM_MSICTRL_VECTOR)
len += 10;
#endif
return (len);
}
static uint32_t
read_config(const struct pcisel *sel, long reg, int width)
{
struct pci_io pi;
bzero(&pi, sizeof(pi));
pi.pi_sel = *sel;
pi.pi_reg = reg;
pi.pi_width = width;
if (ioctl(pcifd, PCIOCREAD, &pi) < 0)
return (0); /* XXX */
else
return (pi.pi_data);
}
static void
write_config(const struct pcisel *sel, long reg, int width, uint32_t data)
{
struct pci_io pi;
bzero(&pi, sizeof(pi));
pi.pi_sel = *sel;
pi.pi_reg = reg;
pi.pi_width = width;
pi.pi_data = data;
(void)ioctl(pcifd, PCIOCWRITE, &pi); /* XXX */
}
#ifdef LEGACY_SUPPORT
static int
passthru_add_msicap(struct pci_devinst *pi, int msgnum, int nextptr)
{
int capoff, i;
struct msicap msicap;
u_char *capdata;
pci_populate_msicap(&msicap, msgnum, nextptr);
/*
* XXX
* Copy the msi capability structure in the last 16 bytes of the
* config space. This is wrong because it could shadow something
* useful to the device.
*/
capoff = 256 - roundup(sizeof(msicap), 4);
capdata = (u_char *)&msicap;
for (i = 0; i < sizeof(msicap); i++)
pci_set_cfgdata8(pi, capoff + i, capdata[i]);
return (capoff);
}
#endif /* LEGACY_SUPPORT */
static int
cfginitmsi(struct passthru_softc *sc)
{
int i, ptr, capptr, cap, sts, caplen, table_size;
uint32_t u32;
struct pcisel sel;
struct pci_devinst *pi;
struct msixcap msixcap;
uint32_t *msixcap_ptr;
pi = sc->psc_pi;
sel = sc->psc_sel;
/*
* Parse the capabilities and cache the location of the MSI
* and MSI-X capabilities.
*/
sts = read_config(&sel, PCIR_STATUS, 2);
if (sts & PCIM_STATUS_CAPPRESENT) {
ptr = read_config(&sel, PCIR_CAP_PTR, 1);
while (ptr != 0 && ptr != 0xff) {
cap = read_config(&sel, ptr + PCICAP_ID, 1);
if (cap == PCIY_MSI) {
/*
* Copy the MSI capability into the config
* space of the emulated pci device
*/
sc->psc_msi.capoff = ptr;
sc->psc_msi.msgctrl = read_config(&sel,
ptr + 2, 2);
sc->psc_msi.emulated = 0;
caplen = msi_caplen(sc->psc_msi.msgctrl);
capptr = ptr;
while (caplen > 0) {
u32 = read_config(&sel, capptr, 4);
pci_set_cfgdata32(pi, capptr, u32);
caplen -= 4;
capptr += 4;
}
} else if (cap == PCIY_MSIX) {
/*
* Copy the MSI-X capability
*/
sc->psc_msix.capoff = ptr;
caplen = 12;
msixcap_ptr = (uint32_t*) &msixcap;
capptr = ptr;
while (caplen > 0) {
u32 = read_config(&sel, capptr, 4);
*msixcap_ptr = u32;
pci_set_cfgdata32(pi, capptr, u32);
caplen -= 4;
capptr += 4;
msixcap_ptr++;
}
}
ptr = read_config(&sel, ptr + PCICAP_NEXTPTR, 1);
}
}
if (sc->psc_msix.capoff != 0) {
pi->pi_msix.pba_bar =
msixcap.pba_info & PCIM_MSIX_BIR_MASK;
pi->pi_msix.pba_offset =
msixcap.pba_info & ~PCIM_MSIX_BIR_MASK;
pi->pi_msix.table_bar =
msixcap.table_info & PCIM_MSIX_BIR_MASK;
pi->pi_msix.table_offset =
msixcap.table_info & ~PCIM_MSIX_BIR_MASK;
pi->pi_msix.table_count = MSIX_TABLE_COUNT(msixcap.msgctrl);
pi->pi_msix.pba_size = PBA_SIZE(pi->pi_msix.table_count);
/* Allocate the emulated MSI-X table array */
table_size = pi->pi_msix.table_count * MSIX_TABLE_ENTRY_SIZE;
pi->pi_msix.table = calloc(1, table_size);
/* Mask all table entries */
for (i = 0; i < pi->pi_msix.table_count; i++) {
pi->pi_msix.table[i].vector_control |=
PCIM_MSIX_VCTRL_MASK;
}
}
#ifdef LEGACY_SUPPORT
/*
* If the passthrough device does not support MSI then craft a
* MSI capability for it. We link the new MSI capability at the
* head of the list of capabilities.
*/
if ((sts & PCIM_STATUS_CAPPRESENT) != 0 && sc->psc_msi.capoff == 0) {
int origptr, msiptr;
origptr = read_config(&sel, PCIR_CAP_PTR, 1);
msiptr = passthru_add_msicap(pi, 1, origptr);
sc->psc_msi.capoff = msiptr;
sc->psc_msi.msgctrl = pci_get_cfgdata16(pi, msiptr + 2);
sc->psc_msi.emulated = 1;
pci_set_cfgdata8(pi, PCIR_CAP_PTR, msiptr);
}
#endif
/* Make sure one of the capabilities is present */
if (sc->psc_msi.capoff == 0 && sc->psc_msix.capoff == 0)
return (-1);
else
return (0);
}
static uint64_t
msix_table_read(struct passthru_softc *sc, uint64_t offset, int size)
{
struct pci_devinst *pi;
struct msix_table_entry *entry;
uint8_t *src8;
uint16_t *src16;
uint32_t *src32;
uint64_t *src64;
uint64_t data;
size_t entry_offset;
int index;
pi = sc->psc_pi;
if (offset < pi->pi_msix.table_offset)
return (-1);
offset -= pi->pi_msix.table_offset;
index = offset / MSIX_TABLE_ENTRY_SIZE;
if (index >= pi->pi_msix.table_count)
return (-1);
entry = &pi->pi_msix.table[index];
entry_offset = offset % MSIX_TABLE_ENTRY_SIZE;
switch(size) {
case 1:
src8 = (uint8_t *)((void *)entry + entry_offset);
data = *src8;
break;
case 2:
src16 = (uint16_t *)((void *)entry + entry_offset);
data = *src16;
break;
case 4:
src32 = (uint32_t *)((void *)entry + entry_offset);
data = *src32;
break;
case 8:
src64 = (uint64_t *)((void *)entry + entry_offset);
data = *src64;
break;
default:
return (-1);
}
return (data);
}
static void
msix_table_write(struct vmctx *ctx, int vcpu, struct passthru_softc *sc,
uint64_t offset, int size, uint64_t data)
{
struct pci_devinst *pi;
struct msix_table_entry *entry;
uint32_t *dest;
size_t entry_offset;
uint32_t vector_control;
int error, index;
pi = sc->psc_pi;
if (offset < pi->pi_msix.table_offset)
return;
offset -= pi->pi_msix.table_offset;
index = offset / MSIX_TABLE_ENTRY_SIZE;
if (index >= pi->pi_msix.table_count)
return;
entry = &pi->pi_msix.table[index];
entry_offset = offset % MSIX_TABLE_ENTRY_SIZE;
/* Only 4 byte naturally-aligned writes are supported */
assert(size == 4);
assert(entry_offset % 4 == 0);
vector_control = entry->vector_control;
dest = (uint32_t *)((void *)entry + entry_offset);
*dest = data;
/* If MSI-X hasn't been enabled, do nothing */
if (pi->pi_msix.enabled) {
/* If the entry is masked, don't set it up */
if ((entry->vector_control & PCIM_MSIX_VCTRL_MASK) == 0 ||
(vector_control & PCIM_MSIX_VCTRL_MASK) == 0) {
error = vm_setup_pptdev_msix(ctx, vcpu,
sc->psc_sel.pc_bus, sc->psc_sel.pc_dev,
sc->psc_sel.pc_func, index, entry->addr,
entry->msg_data, entry->vector_control);
}
}
}
static int
init_msix_table(struct vmctx *ctx, struct passthru_softc *sc, uint64_t base)
{
int b, s, f;
int error, idx;
size_t len, remaining;
uint32_t table_size, table_offset;
uint32_t pba_size, pba_offset;
vm_paddr_t start;
struct pci_devinst *pi = sc->psc_pi;
assert(pci_msix_table_bar(pi) >= 0 && pci_msix_pba_bar(pi) >= 0);
b = sc->psc_sel.pc_bus;
s = sc->psc_sel.pc_dev;
f = sc->psc_sel.pc_func;
/*
* If the MSI-X table BAR maps memory intended for
* other uses, it is at least assured that the table
* either resides in its own page within the region,
* or it resides in a page shared with only the PBA.
*/
table_offset = rounddown2(pi->pi_msix.table_offset, 4096);
table_size = pi->pi_msix.table_offset - table_offset;
table_size += pi->pi_msix.table_count * MSIX_TABLE_ENTRY_SIZE;
table_size = roundup2(table_size, 4096);
if (pi->pi_msix.pba_bar == pi->pi_msix.table_bar) {
pba_offset = pi->pi_msix.pba_offset;
pba_size = pi->pi_msix.pba_size;
if (pba_offset >= table_offset + table_size ||
table_offset >= pba_offset + pba_size) {
/*
* The PBA can reside in the same BAR as the MSI-x
* tables as long as it does not overlap with any
* naturally aligned page occupied by the tables.
*/
} else {
/* Need to also emulate the PBA, not supported yet */
printf("Unsupported MSI-X configuration: %d/%d/%d\n",
b, s, f);
return (-1);
}
}
idx = pi->pi_msix.table_bar;
start = pi->pi_bar[idx].addr;
remaining = pi->pi_bar[idx].size;
/* Map everything before the MSI-X table */
if (table_offset > 0) {
len = table_offset;
error = vm_map_pptdev_mmio(ctx, b, s, f, start, len, base);
if (error)
return (error);
base += len;
start += len;
remaining -= len;
}
/* Skip the MSI-X table */
base += table_size;
start += table_size;
remaining -= table_size;
/* Map everything beyond the end of the MSI-X table */
if (remaining > 0) {
len = remaining;
error = vm_map_pptdev_mmio(ctx, b, s, f, start, len, base);
if (error)
return (error);
}
return (0);
}
static int
cfginitbar(struct vmctx *ctx, struct passthru_softc *sc)
{
int i, error;
struct pci_devinst *pi;
struct pci_bar_io bar;
enum pcibar_type bartype;
uint64_t base, size;
pi = sc->psc_pi;
/*
* Initialize BAR registers
*/
for (i = 0; i <= PCI_BARMAX; i++) {
bzero(&bar, sizeof(bar));
bar.pbi_sel = sc->psc_sel;
bar.pbi_reg = PCIR_BAR(i);
if (ioctl(pcifd, PCIOCGETBAR, &bar) < 0)
continue;
if (PCI_BAR_IO(bar.pbi_base)) {
bartype = PCIBAR_IO;
base = bar.pbi_base & PCIM_BAR_IO_BASE;
} else {
switch (bar.pbi_base & PCIM_BAR_MEM_TYPE) {
case PCIM_BAR_MEM_64:
bartype = PCIBAR_MEM64;
break;
default:
bartype = PCIBAR_MEM32;
break;
}
base = bar.pbi_base & PCIM_BAR_MEM_BASE;
}
size = bar.pbi_length;
if (bartype != PCIBAR_IO) {
if (((base | size) & PAGE_MASK) != 0) {
printf("passthru device %d/%d/%d BAR %d: "
"base %#lx or size %#lx not page aligned\n",
sc->psc_sel.pc_bus, sc->psc_sel.pc_dev,
sc->psc_sel.pc_func, i, base, size);
return (-1);
}
}
/* Cache information about the "real" BAR */
sc->psc_bar[i].type = bartype;
sc->psc_bar[i].size = size;
sc->psc_bar[i].addr = base;
/* Allocate the BAR in the guest I/O or MMIO space */
error = pci_emul_alloc_pbar(pi, i, base, bartype, size);
if (error)
return (-1);
/* The MSI-X table needs special handling */
if (i == pci_msix_table_bar(pi)) {
error = init_msix_table(ctx, sc, base);
if (error)
return (-1);
} else if (bartype != PCIBAR_IO) {
/* Map the physical BAR in the guest MMIO space */
error = vm_map_pptdev_mmio(ctx, sc->psc_sel.pc_bus,
sc->psc_sel.pc_dev, sc->psc_sel.pc_func,
pi->pi_bar[i].addr, pi->pi_bar[i].size, base);
if (error)
return (-1);
}
/*
* 64-bit BAR takes up two slots so skip the next one.
*/
if (bartype == PCIBAR_MEM64) {
i++;
assert(i <= PCI_BARMAX);
sc->psc_bar[i].type = PCIBAR_MEMHI64;
}
}
return (0);
}
static int
cfginit(struct vmctx *ctx, struct pci_devinst *pi, int bus, int slot, int func)
{
int error;
struct passthru_softc *sc;
error = 1;
sc = pi->pi_arg;
bzero(&sc->psc_sel, sizeof(struct pcisel));
sc->psc_sel.pc_bus = bus;
sc->psc_sel.pc_dev = slot;
sc->psc_sel.pc_func = func;
if (cfginitmsi(sc) != 0)
goto done;
if (cfginitbar(ctx, sc) != 0)
goto done;
error = 0; /* success */
done:
return (error);
}
static int
passthru_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
{
int bus, slot, func, error;
struct passthru_softc *sc;
sc = NULL;
error = 1;
if (pcifd < 0) {
pcifd = open(_PATH_DEVPCI, O_RDWR, 0);
if (pcifd < 0)
goto done;
}
if (iofd < 0) {
iofd = open(_PATH_DEVIO, O_RDWR, 0);
if (iofd < 0)
goto done;
}
if (opts == NULL ||
sscanf(opts, "%d/%d/%d", &bus, &slot, &func) != 3)
goto done;
if (vm_assign_pptdev(ctx, bus, slot, func) != 0)
goto done;
sc = calloc(1, sizeof(struct passthru_softc));
pi->pi_arg = sc;
sc->psc_pi = pi;
/* initialize config space */
if ((error = cfginit(ctx, pi, bus, slot, func)) != 0)
goto done;
error = 0; /* success */
done:
if (error) {
free(sc);
vm_unassign_pptdev(ctx, bus, slot, func);
}
return (error);
}
static int
bar_access(int coff)
{
if (coff >= PCIR_BAR(0) && coff < PCIR_BAR(PCI_BARMAX + 1))
return (1);
else
return (0);
}
static int
msicap_access(struct passthru_softc *sc, int coff)
{
int caplen;
if (sc->psc_msi.capoff == 0)
return (0);
caplen = msi_caplen(sc->psc_msi.msgctrl);
if (coff >= sc->psc_msi.capoff && coff < sc->psc_msi.capoff + caplen)
return (1);
else
return (0);
}
static int
msixcap_access(struct passthru_softc *sc, int coff)
{
if (sc->psc_msix.capoff == 0)
return (0);
return (coff >= sc->psc_msix.capoff &&
coff < sc->psc_msix.capoff + MSIX_CAPLEN);
}
static int
passthru_cfgread(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
int coff, int bytes, uint32_t *rv)
{
struct passthru_softc *sc;
sc = pi->pi_arg;
/*
* PCI BARs and MSI capability is emulated.
*/
if (bar_access(coff) || msicap_access(sc, coff))
return (-1);
#ifdef LEGACY_SUPPORT
/*
* Emulate PCIR_CAP_PTR if this device does not support MSI capability
* natively.
*/
if (sc->psc_msi.emulated) {
if (coff >= PCIR_CAP_PTR && coff < PCIR_CAP_PTR + 4)
return (-1);
}
#endif
/* Everything else just read from the device's config space */
*rv = read_config(&sc->psc_sel, coff, bytes);
return (0);
}
static int
passthru_cfgwrite(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
int coff, int bytes, uint32_t val)
{
int error, msix_table_entries, i;
struct passthru_softc *sc;
sc = pi->pi_arg;
/*
* PCI BARs are emulated
*/
if (bar_access(coff))
return (-1);
/*
* MSI capability is emulated
*/
if (msicap_access(sc, coff)) {
msicap_cfgwrite(pi, sc->psc_msi.capoff, coff, bytes, val);
error = vm_setup_pptdev_msi(ctx, vcpu, sc->psc_sel.pc_bus,
sc->psc_sel.pc_dev, sc->psc_sel.pc_func,
pi->pi_msi.addr, pi->pi_msi.msg_data,
pi->pi_msi.maxmsgnum);
if (error != 0) {
printf("vm_setup_pptdev_msi error %d\r\n", errno);
exit(1);
}
return (0);
}
if (msixcap_access(sc, coff)) {
msixcap_cfgwrite(pi, sc->psc_msix.capoff, coff, bytes, val);
if (pi->pi_msix.enabled) {
msix_table_entries = pi->pi_msix.table_count;
for (i = 0; i < msix_table_entries; i++) {
error = vm_setup_pptdev_msix(ctx, vcpu,
sc->psc_sel.pc_bus, sc->psc_sel.pc_dev,
sc->psc_sel.pc_func, i,
pi->pi_msix.table[i].addr,
pi->pi_msix.table[i].msg_data,
pi->pi_msix.table[i].vector_control);
if (error) {
printf("vm_setup_pptdev_msix error "
"%d\r\n", errno);
exit(1);
}
}
}
return (0);
}
#ifdef LEGACY_SUPPORT
/*
* If this device does not support MSI natively then we cannot let
* the guest disable legacy interrupts from the device. It is the
* legacy interrupt that is triggering the virtual MSI to the guest.
*/
if (sc->psc_msi.emulated && pci_msi_enabled(pi)) {
if (coff == PCIR_COMMAND && bytes == 2)
val &= ~PCIM_CMD_INTxDIS;
}
#endif
write_config(&sc->psc_sel, coff, bytes, val);
return (0);
}
static void
passthru_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
uint64_t offset, int size, uint64_t value)
{
struct passthru_softc *sc;
struct iodev_pio_req pio;
sc = pi->pi_arg;
if (baridx == pci_msix_table_bar(pi)) {
msix_table_write(ctx, vcpu, sc, offset, size, value);
} else {
assert(pi->pi_bar[baridx].type == PCIBAR_IO);
bzero(&pio, sizeof(struct iodev_pio_req));
pio.access = IODEV_PIO_WRITE;
pio.port = sc->psc_bar[baridx].addr + offset;
pio.width = size;
pio.val = value;
(void)ioctl(iofd, IODEV_PIO, &pio);
}
}
static uint64_t
passthru_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
uint64_t offset, int size)
{
struct passthru_softc *sc;
struct iodev_pio_req pio;
uint64_t val;
sc = pi->pi_arg;
if (baridx == pci_msix_table_bar(pi)) {
val = msix_table_read(sc, offset, size);
} else {
assert(pi->pi_bar[baridx].type == PCIBAR_IO);
bzero(&pio, sizeof(struct iodev_pio_req));
pio.access = IODEV_PIO_READ;
pio.port = sc->psc_bar[baridx].addr + offset;
pio.width = size;
pio.val = 0;
(void)ioctl(iofd, IODEV_PIO, &pio);
val = pio.val;
}
return (val);
}
struct pci_devemu passthru = {
.pe_emu = "passthru",
.pe_init = passthru_init,
.pe_cfgwrite = passthru_cfgwrite,
.pe_cfgread = passthru_cfgread,
.pe_barwrite = passthru_write,
.pe_barread = passthru_read,
};
PCI_EMUL_SET(passthru);

View file

@ -1,104 +0,0 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/types.h>
#include <machine/vmm.h>
#include <vmmapi.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "bhyverun.h"
#include "spinup_ap.h"
static void
spinup_ap_realmode(struct vmctx *ctx, int newcpu, uint64_t *rip)
{
int vector, error;
uint16_t cs;
uint64_t desc_base;
uint32_t desc_limit, desc_access;
vector = *rip >> PAGE_SHIFT;
*rip = 0;
/*
* Update the %cs and %rip of the guest so that it starts
* executing real mode code at at 'vector << 12'.
*/
error = vm_set_register(ctx, newcpu, VM_REG_GUEST_RIP, *rip);
assert(error == 0);
error = vm_get_desc(ctx, newcpu, VM_REG_GUEST_CS, &desc_base,
&desc_limit, &desc_access);
assert(error == 0);
desc_base = vector << PAGE_SHIFT;
error = vm_set_desc(ctx, newcpu, VM_REG_GUEST_CS,
desc_base, desc_limit, desc_access);
assert(error == 0);
cs = (vector << PAGE_SHIFT) >> 4;
error = vm_set_register(ctx, newcpu, VM_REG_GUEST_CS, cs);
assert(error == 0);
}
int
spinup_ap(struct vmctx *ctx, int vcpu, int newcpu, uint64_t rip)
{
int error;
assert(newcpu != 0);
assert(newcpu < guest_ncpus);
error = vcpu_reset(ctx, newcpu);
assert(error == 0);
fbsdrun_set_capabilities(ctx, newcpu);
/*
* Enable the 'unrestricted guest' mode for 'newcpu'.
*
* Set up the processor state in power-on 16-bit mode, with the CS:IP
* init'd to the specified low-mem 4K page.
*/
error = vm_set_capability(ctx, newcpu, VM_CAP_UNRESTRICTED_GUEST, 1);
assert(error == 0);
spinup_ap_realmode(ctx, newcpu, &rip);
fbsdrun_addcpu(ctx, vcpu, newcpu, rip);
return (newcpu);
}

View file

@ -1,230 +0,0 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <machine/cpufunc.h>
#include <machine/vmm.h>
#include <machine/specialreg.h>
#include <vmmapi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "xmsr.h"
static int cpu_vendor_intel, cpu_vendor_amd;
int
emulate_wrmsr(struct vmctx *ctx, int vcpu, uint32_t num, uint64_t val)
{
if (cpu_vendor_intel) {
switch (num) {
case 0xd04: /* Sandy Bridge uncore PMCs */
case 0xc24:
return (0);
case MSR_BIOS_UPDT_TRIG:
return (0);
case MSR_BIOS_SIGN:
return (0);
default:
break;
}
} else if (cpu_vendor_amd) {
switch (num) {
case MSR_HWCR:
/*
* Ignore writes to hardware configuration MSR.
*/
return (0);
case MSR_NB_CFG1:
case MSR_IC_CFG:
return (0); /* Ignore writes */
case MSR_PERFEVSEL0:
case MSR_PERFEVSEL1:
case MSR_PERFEVSEL2:
case MSR_PERFEVSEL3:
/* Ignore writes to the PerfEvtSel MSRs */
return (0);
case MSR_K7_PERFCTR0:
case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2:
case MSR_K7_PERFCTR3:
/* Ignore writes to the PerfCtr MSRs */
return (0);
case MSR_P_STATE_CONTROL:
/* Ignore write to change the P-state */
return (0);
default:
break;
}
}
return (-1);
}
int
emulate_rdmsr(struct vmctx *ctx, int vcpu, uint32_t num, uint64_t *val)
{
int error = 0;
if (cpu_vendor_intel) {
switch (num) {
case MSR_BIOS_SIGN:
case MSR_IA32_PLATFORM_ID:
case MSR_PKG_ENERGY_STATUS:
case MSR_PP0_ENERGY_STATUS:
case MSR_PP1_ENERGY_STATUS:
case MSR_DRAM_ENERGY_STATUS:
*val = 0;
break;
case MSR_RAPL_POWER_UNIT:
/*
* Use the default value documented in section
* "RAPL Interfaces" in Intel SDM vol3.
*/
*val = 0x000a1003;
break;
default:
error = -1;
break;
}
} else if (cpu_vendor_amd) {
switch (num) {
case MSR_BIOS_SIGN:
*val = 0;
break;
case MSR_HWCR:
/*
* Bios and Kernel Developer's Guides for AMD Families
* 12H, 14H, 15H and 16H.
*/
*val = 0x01000010; /* Reset value */
*val |= 1 << 9; /* MONITOR/MWAIT disable */
break;
case MSR_NB_CFG1:
case MSR_IC_CFG:
/*
* The reset value is processor family dependent so
* just return 0.
*/
*val = 0;
break;
case MSR_PERFEVSEL0:
case MSR_PERFEVSEL1:
case MSR_PERFEVSEL2:
case MSR_PERFEVSEL3:
/*
* PerfEvtSel MSRs are not properly virtualized so just
* return zero.
*/
*val = 0;
break;
case MSR_K7_PERFCTR0:
case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2:
case MSR_K7_PERFCTR3:
/*
* PerfCtr MSRs are not properly virtualized so just
* return zero.
*/
*val = 0;
break;
case MSR_SMM_ADDR:
case MSR_SMM_MASK:
/*
* Return the reset value defined in the AMD Bios and
* Kernel Developer's Guide.
*/
*val = 0;
break;
case MSR_P_STATE_LIMIT:
case MSR_P_STATE_CONTROL:
case MSR_P_STATE_STATUS:
case MSR_P_STATE_CONFIG(0): /* P0 configuration */
*val = 0;
break;
/*
* OpenBSD guests test bit 0 of this MSR to detect if the
* workaround for erratum 721 is already applied.
* http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf
*/
case 0xC0011029:
*val = 1;
break;
default:
error = -1;
break;
}
} else {
error = -1;
}
return (error);
}
int
init_msr(void)
{
int error;
u_int regs[4];
char cpu_vendor[13];
do_cpuid(0, regs);
((u_int *)&cpu_vendor)[0] = regs[1];
((u_int *)&cpu_vendor)[1] = regs[3];
((u_int *)&cpu_vendor)[2] = regs[2];
cpu_vendor[12] = '\0';
error = 0;
if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
cpu_vendor_amd = 1;
} else if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
cpu_vendor_intel = 1;
} else {
fprintf(stderr, "Unknown cpu vendor \"%s\"\n", cpu_vendor);
error = -1;
}
return (error);
}

View file

@ -1,16 +0,0 @@
#
# $FreeBSD$
#
PROG= bhyvectl
SRCS= bhyvectl.c
MAN=
LIBADD= vmmapi
WARNS?= 3
CFLAGS+= -I${.CURDIR}/../../sys/amd64/vmm
.include <bsd.prog.mk>

File diff suppressed because it is too large Load diff

View file

@ -1,13 +0,0 @@
# $FreeBSD$
PROG= bhyveload
SRCS= bhyveload.c
MAN= bhyveload.8
LIBADD= vmmapi
WARNS?= 3
CFLAGS+=-I${.CURDIR}/../../sys/boot/userboot
.include <bsd.prog.mk>

View file

@ -1,157 +0,0 @@
.\"
.\" Copyright (c) 2012 NetApp Inc
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd January 7, 2012
.Dt BHYVELOAD 8
.Os
.Sh NAME
.Nm bhyveload
.Nd load a
.Fx
guest inside a bhyve virtual machine
.Sh SYNOPSIS
.Nm
.Op Fl c Ar cons-dev
.Op Fl d Ar disk-path
.Op Fl e Ar name=value
.Op Fl h Ar host-path
.Op Fl m Ar mem-size
.Ar vmname
.Sh DESCRIPTION
.Nm
is used to load a
.Fx
guest inside a
.Xr bhyve 4
virtual machine.
.Pp
.Nm
is based on
.Xr loader 8
and will present an interface identical to the
.Fx
loader on the user's terminal.
.Pp
The virtual machine is identified as
.Ar vmname
and will be created if it does not already exist.
.Sh OPTIONS
The following options are available:
.Bl -tag -width indent
.It Fl c Ar cons-dev
.Ar cons-dev
is a
.Xr tty 4
device to use for
.Nm
terminal I/O.
.Pp
The text string "stdio" is also accepted and selects the use of
unbuffered standard I/O. This is the default value.
.It Fl d Ar disk-path
The
.Ar disk-path
is the pathname of the guest's boot disk image.
.It Fl e Ar name=value
Set the FreeBSD loader environment variable
.Ar name
to
.Ar value .
.Pp
The option may be used more than once to set more than one environment
variable.
.It Fl h Ar host-path
The
.Ar host-path
is the directory at the top of the guest's boot filesystem.
.It Fl m Ar mem-size Xo
.Sm off
.Op Cm K | k | M | m | G | g | T | t
.Xc
.Sm on
.Ar mem-size
is the amount of memory allocated to the guest.
.Pp
The
.Ar mem-size
argument may be suffixed with one of
.Cm K ,
.Cm M ,
.Cm G
or
.Cm T
(either upper or lower case) to indicate a multiple of
Kilobytes, Megabytes, Gigabytes or Terabytes
respectively.
.Pp
The default value of
.Ar mem-size
is 256M.
.El
.Sh EXAMPLES
To create a virtual machine named
.Ar freebsd-vm
that boots off the ISO image
.Pa /freebsd/release.iso
and has 1GB memory allocated to it:
.Pp
.Dl "bhyveload -m 1G -d /freebsd/release.iso freebsd-vm"
.Pp
To create a virtual machine named
.Ar test-vm
with 256MB of memory allocated, the guest root filesystem under the host
directory
.Pa /user/images/test
and terminal I/O sent to the
.Xr nmdm 4
device
.Pa /dev/nmdm1B
.Pp
.Dl "bhyveload -m 256MB -h /usr/images/test -c /dev/nmdm1B test-vm"
.Sh SEE ALSO
.Xr bhyve 4 ,
.Xr nmdm 4 ,
.Xr vmm 4 ,
.Xr bhyve 8 ,
.Xr loader 8
.Sh HISTORY
.Nm
first appeared in
.Fx 10.0 ,
and was developed at NetApp Inc.
.Sh AUTHORS
.Nm
was developed by
.An -nosplit
.An Neel Natu Aq Mt neel@FreeBSD.org
at NetApp Inc with a lot of help from
.An Doug Rabson Aq Mt dfr@FreeBSD.org .
.Sh BUGS
.Nm
can only load
.Fx
as a guest.

View file

@ -1,746 +0,0 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*-
* Copyright (c) 2011 Google, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/disk.h>
#include <sys/queue.h>
#include <machine/specialreg.h>
#include <machine/vmm.h>
#include <dirent.h>
#include <dlfcn.h>
#include <errno.h>
#include <err.h>
#include <fcntl.h>
#include <getopt.h>
#include <libgen.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sysexits.h>
#include <termios.h>
#include <unistd.h>
#include <vmmapi.h>
#include "userboot.h"
#define MB (1024 * 1024UL)
#define GB (1024 * 1024 * 1024UL)
#define BSP 0
#define NDISKS 32
static char *host_base;
static struct termios term, oldterm;
static int disk_fd[NDISKS];
static int ndisks;
static int consin_fd, consout_fd;
static char *vmname, *progname;
static struct vmctx *ctx;
static uint64_t gdtbase, cr3, rsp;
static void cb_exit(void *arg, int v);
/*
* Console i/o callbacks
*/
static void
cb_putc(void *arg, int ch)
{
char c = ch;
(void) write(consout_fd, &c, 1);
}
static int
cb_getc(void *arg)
{
char c;
if (read(consin_fd, &c, 1) == 1)
return (c);
return (-1);
}
static int
cb_poll(void *arg)
{
int n;
if (ioctl(consin_fd, FIONREAD, &n) >= 0)
return (n > 0);
return (0);
}
/*
* Host filesystem i/o callbacks
*/
struct cb_file {
int cf_isdir;
size_t cf_size;
struct stat cf_stat;
union {
int fd;
DIR *dir;
} cf_u;
};
static int
cb_open(void *arg, const char *filename, void **hp)
{
struct stat st;
struct cb_file *cf;
char path[PATH_MAX];
if (!host_base)
return (ENOENT);
strlcpy(path, host_base, PATH_MAX);
if (path[strlen(path) - 1] == '/')
path[strlen(path) - 1] = 0;
strlcat(path, filename, PATH_MAX);
cf = malloc(sizeof(struct cb_file));
if (stat(path, &cf->cf_stat) < 0) {
free(cf);
return (errno);
}
cf->cf_size = st.st_size;
if (S_ISDIR(cf->cf_stat.st_mode)) {
cf->cf_isdir = 1;
cf->cf_u.dir = opendir(path);
if (!cf->cf_u.dir)
goto out;
*hp = cf;
return (0);
}
if (S_ISREG(cf->cf_stat.st_mode)) {
cf->cf_isdir = 0;
cf->cf_u.fd = open(path, O_RDONLY);
if (cf->cf_u.fd < 0)
goto out;
*hp = cf;
return (0);
}
out:
free(cf);
return (EINVAL);
}
static int
cb_close(void *arg, void *h)
{
struct cb_file *cf = h;
if (cf->cf_isdir)
closedir(cf->cf_u.dir);
else
close(cf->cf_u.fd);
free(cf);
return (0);
}
static int
cb_isdir(void *arg, void *h)
{
struct cb_file *cf = h;
return (cf->cf_isdir);
}
static int
cb_read(void *arg, void *h, void *buf, size_t size, size_t *resid)
{
struct cb_file *cf = h;
ssize_t sz;
if (cf->cf_isdir)
return (EINVAL);
sz = read(cf->cf_u.fd, buf, size);
if (sz < 0)
return (EINVAL);
*resid = size - sz;
return (0);
}
static int
cb_readdir(void *arg, void *h, uint32_t *fileno_return, uint8_t *type_return,
size_t *namelen_return, char *name)
{
struct cb_file *cf = h;
struct dirent *dp;
if (!cf->cf_isdir)
return (EINVAL);
dp = readdir(cf->cf_u.dir);
if (!dp)
return (ENOENT);
/*
* Note: d_namlen is in the range 0..255 and therefore less
* than PATH_MAX so we don't need to test before copying.
*/
*fileno_return = dp->d_fileno;
*type_return = dp->d_type;
*namelen_return = dp->d_namlen;
memcpy(name, dp->d_name, dp->d_namlen);
name[dp->d_namlen] = 0;
return (0);
}
static int
cb_seek(void *arg, void *h, uint64_t offset, int whence)
{
struct cb_file *cf = h;
if (cf->cf_isdir)
return (EINVAL);
if (lseek(cf->cf_u.fd, offset, whence) < 0)
return (errno);
return (0);
}
static int
cb_stat(void *arg, void *h, int *mode, int *uid, int *gid, uint64_t *size)
{
struct cb_file *cf = h;
*mode = cf->cf_stat.st_mode;
*uid = cf->cf_stat.st_uid;
*gid = cf->cf_stat.st_gid;
*size = cf->cf_stat.st_size;
return (0);
}
/*
* Disk image i/o callbacks
*/
static int
cb_diskread(void *arg, int unit, uint64_t from, void *to, size_t size,
size_t *resid)
{
ssize_t n;
if (unit < 0 || unit >= ndisks )
return (EIO);
n = pread(disk_fd[unit], to, size, from);
if (n < 0)
return (errno);
*resid = size - n;
return (0);
}
static int
cb_diskioctl(void *arg, int unit, u_long cmd, void *data)
{
struct stat sb;
if (unit < 0 || unit >= ndisks)
return (EBADF);
switch (cmd) {
case DIOCGSECTORSIZE:
*(u_int *)data = 512;
break;
case DIOCGMEDIASIZE:
if (fstat(disk_fd[unit], &sb) == 0)
*(off_t *)data = sb.st_size;
else
return (ENOTTY);
break;
default:
return (ENOTTY);
}
return (0);
}
/*
* Guest virtual machine i/o callbacks
*/
static int
cb_copyin(void *arg, const void *from, uint64_t to, size_t size)
{
char *ptr;
to &= 0x7fffffff;
ptr = vm_map_gpa(ctx, to, size);
if (ptr == NULL)
return (EFAULT);
memcpy(ptr, from, size);
return (0);
}
static int
cb_copyout(void *arg, uint64_t from, void *to, size_t size)
{
char *ptr;
from &= 0x7fffffff;
ptr = vm_map_gpa(ctx, from, size);
if (ptr == NULL)
return (EFAULT);
memcpy(to, ptr, size);
return (0);
}
static void
cb_setreg(void *arg, int r, uint64_t v)
{
int error;
enum vm_reg_name vmreg;
vmreg = VM_REG_LAST;
switch (r) {
case 4:
vmreg = VM_REG_GUEST_RSP;
rsp = v;
break;
default:
break;
}
if (vmreg == VM_REG_LAST) {
printf("test_setreg(%d): not implemented\n", r);
cb_exit(NULL, USERBOOT_EXIT_QUIT);
}
error = vm_set_register(ctx, BSP, vmreg, v);
if (error) {
perror("vm_set_register");
cb_exit(NULL, USERBOOT_EXIT_QUIT);
}
}
static void
cb_setmsr(void *arg, int r, uint64_t v)
{
int error;
enum vm_reg_name vmreg;
vmreg = VM_REG_LAST;
switch (r) {
case MSR_EFER:
vmreg = VM_REG_GUEST_EFER;
break;
default:
break;
}
if (vmreg == VM_REG_LAST) {
printf("test_setmsr(%d): not implemented\n", r);
cb_exit(NULL, USERBOOT_EXIT_QUIT);
}
error = vm_set_register(ctx, BSP, vmreg, v);
if (error) {
perror("vm_set_msr");
cb_exit(NULL, USERBOOT_EXIT_QUIT);
}
}
static void
cb_setcr(void *arg, int r, uint64_t v)
{
int error;
enum vm_reg_name vmreg;
vmreg = VM_REG_LAST;
switch (r) {
case 0:
vmreg = VM_REG_GUEST_CR0;
break;
case 3:
vmreg = VM_REG_GUEST_CR3;
cr3 = v;
break;
case 4:
vmreg = VM_REG_GUEST_CR4;
break;
default:
break;
}
if (vmreg == VM_REG_LAST) {
printf("test_setcr(%d): not implemented\n", r);
cb_exit(NULL, USERBOOT_EXIT_QUIT);
}
error = vm_set_register(ctx, BSP, vmreg, v);
if (error) {
perror("vm_set_cr");
cb_exit(NULL, USERBOOT_EXIT_QUIT);
}
}
static void
cb_setgdt(void *arg, uint64_t base, size_t size)
{
int error;
error = vm_set_desc(ctx, BSP, VM_REG_GUEST_GDTR, base, size - 1, 0);
if (error != 0) {
perror("vm_set_desc(gdt)");
cb_exit(NULL, USERBOOT_EXIT_QUIT);
}
gdtbase = base;
}
static void
cb_exec(void *arg, uint64_t rip)
{
int error;
if (cr3 == 0)
error = vm_setup_freebsd_registers_i386(ctx, BSP, rip, gdtbase,
rsp);
else
error = vm_setup_freebsd_registers(ctx, BSP, rip, cr3, gdtbase,
rsp);
if (error) {
perror("vm_setup_freebsd_registers");
cb_exit(NULL, USERBOOT_EXIT_QUIT);
}
cb_exit(NULL, 0);
}
/*
* Misc
*/
static void
cb_delay(void *arg, int usec)
{
usleep(usec);
}
static void
cb_exit(void *arg, int v)
{
tcsetattr(consout_fd, TCSAFLUSH, &oldterm);
exit(v);
}
static void
cb_getmem(void *arg, uint64_t *ret_lowmem, uint64_t *ret_highmem)
{
*ret_lowmem = vm_get_lowmem_size(ctx);
*ret_highmem = vm_get_highmem_size(ctx);
}
struct env {
const char *str; /* name=value */
SLIST_ENTRY(env) next;
};
static SLIST_HEAD(envhead, env) envhead;
static void
addenv(const char *str)
{
struct env *env;
env = malloc(sizeof(struct env));
env->str = str;
SLIST_INSERT_HEAD(&envhead, env, next);
}
static const char *
cb_getenv(void *arg, int num)
{
int i;
struct env *env;
i = 0;
SLIST_FOREACH(env, &envhead, next) {
if (i == num)
return (env->str);
i++;
}
return (NULL);
}
static struct loader_callbacks cb = {
.getc = cb_getc,
.putc = cb_putc,
.poll = cb_poll,
.open = cb_open,
.close = cb_close,
.isdir = cb_isdir,
.read = cb_read,
.readdir = cb_readdir,
.seek = cb_seek,
.stat = cb_stat,
.diskread = cb_diskread,
.diskioctl = cb_diskioctl,
.copyin = cb_copyin,
.copyout = cb_copyout,
.setreg = cb_setreg,
.setmsr = cb_setmsr,
.setcr = cb_setcr,
.setgdt = cb_setgdt,
.exec = cb_exec,
.delay = cb_delay,
.exit = cb_exit,
.getmem = cb_getmem,
.getenv = cb_getenv,
};
static int
altcons_open(char *path)
{
struct stat sb;
int err;
int fd;
/*
* Allow stdio to be passed in so that the same string
* can be used for the bhyveload console and bhyve com-port
* parameters
*/
if (!strcmp(path, "stdio"))
return (0);
err = stat(path, &sb);
if (err == 0) {
if (!S_ISCHR(sb.st_mode))
err = ENOTSUP;
else {
fd = open(path, O_RDWR | O_NONBLOCK);
if (fd < 0)
err = errno;
else
consin_fd = consout_fd = fd;
}
}
return (err);
}
static int
disk_open(char *path)
{
int err, fd;
if (ndisks >= NDISKS)
return (ERANGE);
err = 0;
fd = open(path, O_RDONLY);
if (fd > 0) {
disk_fd[ndisks] = fd;
ndisks++;
} else
err = errno;
return (err);
}
static void
usage(void)
{
fprintf(stderr,
"usage: %s [-c <console-device>] [-d <disk-path>] [-e <name=value>]\n"
" %*s [-h <host-path>] [-m mem-size] <vmname>\n",
progname,
(int)strlen(progname), "");
exit(1);
}
int
main(int argc, char** argv)
{
void *h;
void (*func)(struct loader_callbacks *, void *, int, int);
uint64_t mem_size;
int opt, error, need_reinit;
progname = basename(argv[0]);
mem_size = 256 * MB;
consin_fd = STDIN_FILENO;
consout_fd = STDOUT_FILENO;
while ((opt = getopt(argc, argv, "c:d:e:h:m:")) != -1) {
switch (opt) {
case 'c':
error = altcons_open(optarg);
if (error != 0)
errx(EX_USAGE, "Could not open '%s'", optarg);
break;
case 'd':
error = disk_open(optarg);
if (error != 0)
errx(EX_USAGE, "Could not open '%s'", optarg);
break;
case 'e':
addenv(optarg);
break;
case 'h':
host_base = optarg;
break;
case 'm':
error = vm_parse_memsize(optarg, &mem_size);
if (error != 0)
errx(EX_USAGE, "Invalid memsize '%s'", optarg);
break;
case '?':
usage();
}
}
argc -= optind;
argv += optind;
if (argc != 1)
usage();
vmname = argv[0];
need_reinit = 0;
error = vm_create(vmname);
if (error) {
if (errno != EEXIST) {
perror("vm_create");
exit(1);
}
need_reinit = 1;
}
ctx = vm_open(vmname);
if (ctx == NULL) {
perror("vm_open");
exit(1);
}
if (need_reinit) {
error = vm_reinit(ctx);
if (error) {
perror("vm_reinit");
exit(1);
}
}
error = vm_setup_memory(ctx, mem_size, VM_MMAP_ALL);
if (error) {
perror("vm_setup_memory");
exit(1);
}
tcgetattr(consout_fd, &term);
oldterm = term;
cfmakeraw(&term);
term.c_cflag |= CLOCAL;
tcsetattr(consout_fd, TCSAFLUSH, &term);
h = dlopen("/boot/userboot.so", RTLD_LOCAL);
if (!h) {
printf("%s\n", dlerror());
return (1);
}
func = dlsym(h, "loader_main");
if (!func) {
printf("%s\n", dlerror());
return (1);
}
addenv("smbios.bios.vendor=BHYVE");
addenv("boot_serial=1");
func(&cb, NULL, USERBOOT_VERSION_3, ndisks);
}

73
config.mk Normal file
View file

@ -0,0 +1,73 @@
###############################################################################
# Config #
# #
# [XHYVE_CONFIG_ASSERT] VMM asserts (disable for release builds?) #
# [XHYVE_CONFIG_TRACE] VMM event tracer #
# [XHYVE_CONFIG_STATS] VMM event profiler #
###############################################################################
DEFINES := \
-DXHYVE_CONFIG_ASSERT
###############################################################################
# Toolchain #
###############################################################################
CC := clang
AS := clang
LD := clang
STRIP := strip
DSYM := dsymutil
ENV := \
LANG=en_US.US-ASCII
###############################################################################
# CFLAGS #
###############################################################################
CFLAGS_OPT := \
-Os \
-flto \
-fstrict-aliasing
CFLAGS_WARN := \
-Weverything \
-Werror \
-Wno-unknown-warning-option \
-Wno-reserved-id-macro \
-pedantic
CFLAGS_DIAG := \
-fmessage-length=152 \
-fdiagnostics-show-note-include-stack \
-fmacro-backtrace-limit=0 \
-fcolor-diagnostics
CFLAGS_DBG := \
-g
CFLAGS := \
-arch x86_64 \
-x c \
-std=c11 \
-fno-common \
-fvisibility=hidden \
$(DEFINES) \
$(CFLAGS_OPT) \
$(CFLAGS_WARN) \
$(CFLAGS_DIAG) \
$(CFLAGS_DBG)
###############################################################################
# LDFLAGS #
###############################################################################
LDFLAGS_DBG := \
-Xlinker -object_path_lto
LDFLAGS := \
-arch x86_64 \
-framework Hypervisor \
-framework vmnet \
$(LDFLAGS_DBG)

View file

@ -26,29 +26,27 @@
* $FreeBSD$
*/
#ifndef _ACPI_H_
#define _ACPI_H_
#pragma once
#define SCI_INT 9
#include <stdint.h>
#define SMI_CMD 0xb2
#define BHYVE_ACPI_ENABLE 0xa0
#define BHYVE_ACPI_DISABLE 0xa1
#define SCI_INT 9
#define PM1A_EVT_ADDR 0x400
#define PM1A_CNT_ADDR 0x404
#define SMI_CMD 0xb2
#define BHYVE_ACPI_ENABLE 0xa0
#define BHYVE_ACPI_DISABLE 0xa1
#define IO_PMTMR 0x408 /* 4-byte i/o port for the timer */
#define PM1A_EVT_ADDR 0x400
#define PM1A_EVT_ADDR2 0x402
#define PM1A_CNT_ADDR 0x404
struct vmctx;
#define IO_PMTMR 0x408 /* 4-byte i/o port for the timer */
int acpi_build(struct vmctx *ctx, int ncpu);
void dsdt_line(const char *fmt, ...);
void dsdt_fixed_ioport(uint16_t iobase, uint16_t length);
void dsdt_fixed_irq(uint8_t irq);
void dsdt_fixed_mem32(uint32_t base, uint32_t length);
void dsdt_indent(int levels);
void dsdt_unindent(int levels);
void sci_init(struct vmctx *ctx);
#endif /* _ACPI_H_ */
int acpi_build(int ncpu);
void dsdt_line(const char *fmt, ...);
void dsdt_fixed_ioport(uint16_t iobase, uint16_t length);
void dsdt_fixed_irq(uint8_t irq);
void dsdt_fixed_mem32(uint32_t base, uint32_t length);
void dsdt_indent(int levels);
void dsdt_unindent(int levels);
void sci_init(void);

View file

@ -27,8 +27,7 @@
* $FreeBSD$
*/
#ifndef _AHCI_H_
#define _AHCI_H_
#pragma once
/* ATA register defines */
#define ATA_DATA 0 /* (RW) data */
@ -306,17 +305,15 @@
/* Just to be sure, if building as module. */
#if MAXPHYS < 512 * 1024
#undef MAXPHYS
#define MAXPHYS 512 * 1024
#define MAXPHYS 512 * 1024
#endif
/* Pessimistic prognosis on number of required S/G entries */
#define AHCI_SG_ENTRIES (roundup(btoc(MAXPHYS) + 1, 8))
#define AHCI_SG_ENTRIES (roundup(btoc(MAXPHYS) + 1, 8))
/* Command list. 32 commands. First, 1Kbyte aligned. */
#define AHCI_CL_OFFSET 0
#define AHCI_CL_SIZE 32
#define AHCI_CL_OFFSET 0
#define AHCI_CL_SIZE 32
/* Command tables. Up to 32 commands, Each, 128byte aligned. */
#define AHCI_CT_OFFSET (AHCI_CL_OFFSET + AHCI_CL_SIZE * AHCI_MAX_SLOTS)
#define AHCI_CT_SIZE (128 + AHCI_SG_ENTRIES * 16)
#define AHCI_CT_OFFSET (AHCI_CL_OFFSET + AHCI_CL_SIZE * AHCI_MAX_SLOTS)
#define AHCI_CT_SIZE (128 + AHCI_SG_ENTRIES * 16)
/* Total main work area. */
#define AHCI_WORK_SIZE (AHCI_CT_OFFSET + AHCI_CT_SIZE * ch->numslots)
#endif /* _AHCI_H_ */
#define AHCI_WORK_SIZE (AHCI_CT_OFFSET + AHCI_CT_SIZE * ch->numslots)

View file

@ -33,38 +33,37 @@
* another thread.
*/
#ifndef _BLOCK_IF_H_
#define _BLOCK_IF_H_
#pragma once
#include <sys/uio.h>
#include <sys/unistd.h>
#define BLOCKIF_IOV_MAX 33 /* not practical to be IOV_MAX */
#define BLOCKIF_IOV_MAX 33 /* not practical to be IOV_MAX */
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct blockif_req {
struct iovec br_iov[BLOCKIF_IOV_MAX];
int br_iovcnt;
off_t br_offset;
ssize_t br_resid;
void (*br_callback)(struct blockif_req *req, int err);
void *br_param;
struct iovec br_iov[BLOCKIF_IOV_MAX];
int br_iovcnt;
off_t br_offset;
ssize_t br_resid;
void (*br_callback)(struct blockif_req *req, int err);
void *br_param;
};
#pragma clang diagnostic pop
struct blockif_ctxt;
struct blockif_ctxt *blockif_open(const char *optstr, const char *ident);
off_t blockif_size(struct blockif_ctxt *bc);
void blockif_chs(struct blockif_ctxt *bc, uint16_t *c, uint8_t *h,
uint8_t *s);
int blockif_sectsz(struct blockif_ctxt *bc);
void blockif_psectsz(struct blockif_ctxt *bc, int *size, int *off);
int blockif_queuesz(struct blockif_ctxt *bc);
int blockif_is_ro(struct blockif_ctxt *bc);
int blockif_candelete(struct blockif_ctxt *bc);
int blockif_read(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_write(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_flush(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_delete(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_cancel(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_close(struct blockif_ctxt *bc);
#endif /* _BLOCK_IF_H_ */
off_t blockif_size(struct blockif_ctxt *bc);
void blockif_chs(struct blockif_ctxt *bc, uint16_t *c, uint8_t *h, uint8_t *s);
int blockif_sectsz(struct blockif_ctxt *bc);
void blockif_psectsz(struct blockif_ctxt *bc, int *size, int *off);
int blockif_queuesz(struct blockif_ctxt *bc);
int blockif_is_ro(struct blockif_ctxt *bc);
int blockif_candelete(struct blockif_ctxt *bc);
int blockif_read(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_write(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_flush(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_delete(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_cancel(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_close(struct blockif_ctxt *bc);

View file

@ -26,9 +26,6 @@
* $FreeBSD$
*/
#ifndef _DBGPORT_H_
#define _DBGPORT_H_
#pragma once
void init_dbgport(int port);
#endif
void init_dbgport(int port);

View file

@ -0,0 +1,89 @@
#pragma once
#include <stdint.h>
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpacked"
struct setup_header {
uint8_t setup_sects; /* The size of the setup in sectors */
uint16_t root_flags; /* If set, the root is mounted readonly */
uint32_t syssize; /* The size of the 32-bit code in 16-byte paras */
uint16_t ram_size; /* DO NOT USE - for bootsect.S use only */
uint16_t vid_mode; /* Video mode control */
uint16_t root_dev; /* Default root device number */
uint16_t boot_flag; /* 0xAA55 magic number */
uint16_t jump; /* Jump instruction */
uint32_t header; /* Magic signature "HdrS" */
uint16_t version; /* Boot protocol version supported */
uint32_t realmode_swtch; /* Boot loader hook (see below) */
uint16_t start_sys_seg; /* The load-low segment (0x1000) (obsolete) */
uint16_t kernel_version; /* Pointer to kernel version string */
uint8_t type_of_loader; /* Boot loader identifier */
uint8_t loadflags; /* Boot protocol option flags */
uint16_t setup_move_size; /* Move to high memory size (used with hooks) */
uint32_t code32_start; /* Boot loader hook (see below) */
uint32_t ramdisk_image; /* initrd load address (set by boot loader) */
uint32_t ramdisk_size; /* initrd size (set by boot loader) */
uint32_t bootsect_kludge; /* DO NOT USE - for bootsect.S use only */
uint16_t heap_end_ptr; /* Free memory after setup end */
uint8_t ext_loader_ver; /* Extended boot loader version */
uint8_t ext_loader_type; /* Extended boot loader ID */
uint32_t cmd_line_ptr; /* 32-bit pointer to the kernel command line */
uint32_t nitrd_addr_max; /* Highest legal initrd address */
uint32_t kernel_alignment; /* Physical addr alignment required for kernel */
uint8_t relocatable_kernel; /* Whether kernel is relocatable or not */
uint8_t min_alignment; /* Minimum alignment, as a power of two */
uint16_t xloadflags; /* Boot protocol option flags */
uint32_t cmdline_size; /* Maximum size of the kernel command line */
uint32_t hardware_subarch; /* Hardware subarchitecture */
uint64_t hardware_subarch_data; /* Subarchitecture-specific data */
uint32_t payload_offset; /* Offset of kernel payload */
uint32_t payload_length; /* Length of kernel payload */
uint64_t setup_data; /* 64bit pointer to linked list of struct setup_data */
uint64_t pref_address; /* Preferred loading address */
uint32_t init_size; /* Linear memory required during initialization */
uint32_t handover_offset; /* Offset of handover entry point */
} __attribute__((packed));
struct zero_page {
uint8_t screen_info[64];
uint8_t apm_bios_info[20];
uint8_t _0[4];
uint64_t tboot_addr;
uint8_t ist_info[16];
uint8_t _1[16];
uint8_t hd0_info[16];
uint8_t hd1_info[16];
uint8_t sys_desc_table[16];
uint8_t olpc_ofw_header[16];
uint32_t ext_ramdisk_image;
uint32_t ext_ramdisk_size;
uint32_t ext_cmd_line_ptr;
uint8_t _2[116];
uint8_t edid_info[128];
uint8_t efi_info[32];
uint32_t alt_mem_k;
uint32_t scratch;
uint8_t e820_entries;
uint8_t eddbuf_entries;
uint8_t edd_mbr_sig_buf_entries;
uint8_t kbd_status;
uint8_t _3[3];
uint8_t sentinel;
uint8_t _4[1];
struct setup_header setup_header;
uint8_t _5[(0x290 - 0x1f1 - sizeof(struct setup_header))];
uint32_t edd_mbr_sig_buffer[16];
struct {
uint64_t addr;
uint64_t size;
uint32_t type;
} __attribute__((packed)) e820_map[128];
uint8_t _6[48];
uint8_t eddbuf[492];
uint8_t _7[276];
} __attribute__((packed));
#pragma clang diagnostic pop
void kexec_init(char *kernel_path, char *initrd_path, char *cmdline);
uint64_t kexec(void);

View file

@ -26,54 +26,54 @@
* $FreeBSD$
*/
#ifndef _INOUT_H_
#define _INOUT_H_
#pragma once
#include <sys/linker_set.h>
#include <stdint.h>
#include <xhyve/support/linker_set.h>
struct vmctx;
struct vm_exit;
/*
* inout emulation handlers return 0 on success and -1 on failure.
*/
typedef int (*inout_func_t)(struct vmctx *ctx, int vcpu, int in, int port,
int bytes, uint32_t *eax, void *arg);
typedef int (*inout_func_t)(int vcpu, int in, int port,
int bytes, uint32_t *eax, void *arg);
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct inout_port {
const char *name;
int port;
int size;
int flags;
inout_func_t handler;
void *arg;
const char *name;
int port;
int size;
int flags;
inout_func_t handler;
void *arg;
};
#define IOPORT_F_IN 0x1
#define IOPORT_F_OUT 0x2
#define IOPORT_F_INOUT (IOPORT_F_IN | IOPORT_F_OUT)
#pragma clang diagnostic pop
#define IOPORT_F_IN 0x1
#define IOPORT_F_OUT 0x2
#define IOPORT_F_INOUT (IOPORT_F_IN | IOPORT_F_OUT)
/*
* The following flags are used internally and must not be used by
* device models.
*/
#define IOPORT_F_DEFAULT 0x80000000 /* claimed by default handler */
#define IOPORT_F_DEFAULT 0x80000000 /* claimed by default handler */
#define INOUT_PORT(name, port, flags, handler) \
static struct inout_port __CONCAT(__inout_port, __LINE__) = { \
#name, \
(port), \
1, \
(flags), \
(handler), \
0 \
}; \
DATA_SET(inout_port_set, __CONCAT(__inout_port, __LINE__))
void init_inout(void);
int emulate_inout(struct vmctx *, int vcpu, struct vm_exit *vmexit,
int strict);
int register_inout(struct inout_port *iop);
int unregister_inout(struct inout_port *iop);
void init_bvmcons(void);
#define INOUT_PORT(name, port, flags, handler) \
static struct inout_port __CONCAT(__inout_port, port) = { \
#name, \
(port), \
1, \
(flags), \
(handler), \
0 \
}; \
DATA_SET(inout_port_set, __CONCAT(__inout_port, port))
#endif /* _INOUT_H_ */
void init_inout(void);
int emulate_inout(int vcpu, struct vm_exit *vmexit, int strict);
int register_inout(struct inout_port *iop);
int unregister_inout(struct inout_port *iop);
void init_bvmcons(void);

View file

@ -27,13 +27,10 @@
* $FreeBSD$
*/
#ifndef _IOAPIC_H_
#define _IOAPIC_H_
#pragma once
/*
* Allocate a PCI IRQ from the I/O APIC.
*/
void ioapic_init(struct vmctx *ctx);
int ioapic_pci_alloc_irq(void);
#endif
void ioapic_init(void);
int ioapic_pci_alloc_irq(void);

View file

@ -26,36 +26,36 @@
* $FreeBSD$
*/
#ifndef _MEM_H_
#define _MEM_H_
#pragma once
#include <sys/linker_set.h>
#include <stdint.h>
#include <xhyve/support/linker_set.h>
struct vmctx;
typedef int (*mem_func_t)(struct vmctx *ctx, int vcpu, int dir, uint64_t addr,
int size, uint64_t *val, void *arg1, long arg2);
typedef int (*mem_func_t)(int vcpu, int dir, uint64_t addr, int size,
uint64_t *val, void *arg1, long arg2);
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct mem_range {
const char *name;
int flags;
mem_func_t handler;
void *arg1;
long arg2;
uint64_t base;
uint64_t size;
const char *name;
int flags;
mem_func_t handler;
void *arg1;
long arg2;
uint64_t base;
uint64_t size;
};
#define MEM_F_READ 0x1
#define MEM_F_WRITE 0x2
#define MEM_F_RW 0x3
#define MEM_F_IMMUTABLE 0x4 /* mem_range cannot be unregistered */
#pragma clang diagnostic pop
void init_mem(void);
int emulate_mem(struct vmctx *, int vcpu, uint64_t paddr, struct vie *vie,
struct vm_guest_paging *paging);
int register_mem(struct mem_range *memp);
int register_mem_fallback(struct mem_range *memp);
int unregister_mem(struct mem_range *memp);
#define MEM_F_READ 0x1
#define MEM_F_WRITE 0x2
#define MEM_F_RW 0x3
#define MEM_F_IMMUTABLE 0x4 /* mem_range cannot be unregistered */
#endif /* _MEM_H_ */
void init_mem(void);
int emulate_mem(int vcpu, uint64_t paddr, struct vie *vie,
struct vm_guest_paging *paging);
int register_mem(struct mem_range *memp);
int register_mem_fallback(struct mem_range *memp);
int unregister_mem(struct mem_range *memp);

View file

@ -26,8 +26,7 @@
* $FreeBSD$
*/
#ifndef _MEVENT_H_
#define _MEVENT_H_
#pragma once
enum ev_type {
EVF_READ,
@ -38,14 +37,11 @@ enum ev_type {
struct mevent;
struct mevent *mevent_add(int fd, enum ev_type type,
void (*func)(int, enum ev_type, void *),
void *param);
int mevent_enable(struct mevent *evp);
int mevent_disable(struct mevent *evp);
int mevent_delete(struct mevent *evp);
int mevent_delete_close(struct mevent *evp);
struct mevent *mevent_add(int fd, enum ev_type type,
void (*func)(int, enum ev_type, void *), void *param);
int mevent_enable(struct mevent *evp);
int mevent_disable(struct mevent *evp);
int mevent_delete(struct mevent *evp);
int mevent_delete_close(struct mevent *evp);
void mevent_dispatch(void);
#endif /* _MEVENT_H_ */
void mevent_dispatch(void);

View file

@ -26,10 +26,7 @@
* $FreeBSD$
*/
#ifndef _MPTBL_H_
#define _MPTBL_H_
#pragma once
int mptable_build(struct vmctx *ctx, int ncpu);
void mptable_add_oemtbl(void *tbl, int tblsz);
#endif /* _MPTBL_H_ */
int mptable_build(int ncpu);
void mptable_add_oemtbl(void *tbl, int tblsz);

278
include/xhyve/pci_emul.h Normal file
View file

@ -0,0 +1,278 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#pragma once
#include <stdint.h>
#include <pthread.h>
#include <assert.h>
#include <xhyve/support/misc.h>
#include <xhyve/support/pcireg.h>
#include <xhyve/support/linker_set.h>
#define PCI_BARMAX PCIR_MAX_BAR_0 /* BAR registers in a Type 0 header */
struct pci_devinst;
struct memory_region;
struct pci_devemu {
/* name of device emulation */
char *pe_emu;
/* instance creation */
int (*pe_init)(struct pci_devinst *, char *opts);
/* ACPI DSDT enumeration */
void (*pe_write_dsdt)(struct pci_devinst *);
/* config space read/write callbacks */
int (*pe_cfgwrite)(int vcpu, struct pci_devinst *pi,
int offset, int bytes, uint32_t val);
int (*pe_cfgread)(int vcpu, struct pci_devinst *pi, int offset, int bytes,
uint32_t *retval);
/* BAR read/write callbacks */
void (*pe_barwrite)(int vcpu, struct pci_devinst *pi, int baridx,
uint64_t offset, int size, uint64_t value);
uint64_t (*pe_barread)(int vcpu, struct pci_devinst *pi, int baridx,
uint64_t offset, int size);
};
#define PCI_EMUL_SET(x) DATA_SET(pci_devemu_set, x)
enum pcibar_type {
PCIBAR_NONE,
PCIBAR_IO,
PCIBAR_MEM32,
PCIBAR_MEM64,
PCIBAR_MEMHI64
};
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct pcibar {
enum pcibar_type type; /* io or memory */
uint64_t size;
uint64_t addr;
};
#pragma clang diagnostic pop
#define PI_NAMESZ 40
struct msix_table_entry {
uint64_t addr;
uint32_t msg_data;
uint32_t vector_control;
};
/*
* In case the structure is modified to hold extra information, use a define
* for the size that should be emulated.
*/
#define MSIX_TABLE_ENTRY_SIZE 16
#define MAX_MSIX_TABLE_ENTRIES 2048
#define PBA_SIZE(msgnum) (roundup2((msgnum), 64) / 8)
enum lintr_stat {
IDLE,
ASSERTED,
PENDING
};
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct pci_devinst {
struct pci_devemu *pi_d;
uint8_t pi_bus, pi_slot, pi_func;
char pi_name[PI_NAMESZ];
int pi_bar_getsize;
int pi_prevcap;
int pi_capend;
struct {
int8_t pin;
enum lintr_stat state;
int pirq_pin;
int ioapic_irq;
pthread_mutex_t lock;
} pi_lintr;
struct {
int enabled;
uint64_t addr;
uint64_t msg_data;
int maxmsgnum;
} pi_msi;
struct {
int enabled;
int table_bar;
int pba_bar;
uint32_t table_offset;
int table_count;
uint32_t pba_offset;
int pba_size;
int function_mask;
struct msix_table_entry *table; /* allocated at runtime */
} pi_msix;
void *pi_arg; /* devemu-private data */
u_char pi_cfgdata[PCI_REGMAX + 1];
struct pcibar pi_bar[PCI_BARMAX + 1];
};
#pragma clang diagnostic pop
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpacked"
struct msicap {
uint8_t capid;
uint8_t nextptr;
uint16_t msgctrl;
uint32_t addrlo;
uint32_t addrhi;
uint16_t msgdata;
} __packed;
struct msixcap {
uint8_t capid;
uint8_t nextptr;
uint16_t msgctrl;
uint32_t table_info; /* bar index and offset within it */
uint32_t pba_info; /* bar index and offset within it */
} __packed;
struct pciecap {
uint8_t capid;
uint8_t nextptr;
uint16_t pcie_capabilities;
uint32_t dev_capabilities; /* all devices */
uint16_t dev_control;
uint16_t dev_status;
uint32_t link_capabilities; /* devices with links */
uint16_t link_control;
uint16_t link_status;
uint32_t slot_capabilities; /* ports with slots */
uint16_t slot_control;
uint16_t slot_status;
uint16_t root_control; /* root ports */
uint16_t root_capabilities;
uint32_t root_status;
uint32_t dev_capabilities2; /* all devices */
uint16_t dev_control2;
uint16_t dev_status2;
uint32_t link_capabilities2; /* devices with links */
uint16_t link_control2;
uint16_t link_status2;
uint32_t slot_capabilities2; /* ports with slots */
uint16_t slot_control2;
uint16_t slot_status2;
} __packed;
#pragma clang diagnostic pop
typedef void (*pci_lintr_cb)(int b, int s, int pin, int pirq_pin,
int ioapic_irq, void *arg);
int init_pci(void);
void msicap_cfgwrite(struct pci_devinst *pi, int capoff, int offset,
int bytes, uint32_t val);
void msixcap_cfgwrite(struct pci_devinst *pi, int capoff, int offset,
int bytes, uint32_t val);
void pci_callback(void);
int pci_emul_alloc_bar(struct pci_devinst *pdi, int idx,
enum pcibar_type type, uint64_t size);
int pci_emul_alloc_pbar(struct pci_devinst *pdi, int idx,
uint64_t hostbase, enum pcibar_type type, uint64_t size);
int pci_emul_add_msicap(struct pci_devinst *pi, int msgnum);
int pci_emul_add_pciecap(struct pci_devinst *pi, int pcie_device_type);
void pci_generate_msi(struct pci_devinst *pi, int msgnum);
void pci_generate_msix(struct pci_devinst *pi, int msgnum);
void pci_lintr_assert(struct pci_devinst *pi);
void pci_lintr_deassert(struct pci_devinst *pi);
void pci_lintr_request(struct pci_devinst *pi);
int pci_msi_enabled(struct pci_devinst *pi);
int pci_msix_enabled(struct pci_devinst *pi);
int pci_msix_table_bar(struct pci_devinst *pi);
int pci_msix_pba_bar(struct pci_devinst *pi);
int pci_msi_msgnum(struct pci_devinst *pi);
int pci_parse_slot(char *opt);
void pci_populate_msicap(struct msicap *cap, int msgs, int nextptr);
int pci_emul_add_msixcap(struct pci_devinst *pi, int msgnum, int barnum);
int pci_emul_msix_twrite(struct pci_devinst *pi, uint64_t offset, int size,
uint64_t value);
uint64_t pci_emul_msix_tread(struct pci_devinst *pi, uint64_t offset, int size);
int pci_count_lintr(int bus);
void pci_walk_lintr(int bus, pci_lintr_cb cb, void *arg);
void pci_write_dsdt(void);
uint64_t pci_ecfg_base(void);
int pci_bus_configured(int bus);
static __inline void
pci_set_cfgdata8(struct pci_devinst *pi, int offset, uint8_t val)
{
assert(offset <= PCI_REGMAX);
*(uint8_t *)(((uintptr_t) &pi->pi_cfgdata) + ((unsigned) offset)) = val;
}
static __inline void
pci_set_cfgdata16(struct pci_devinst *pi, int offset, uint16_t val)
{
assert(offset <= (PCI_REGMAX - 1) && (offset & 1) == 0);
*(uint16_t *)(((uintptr_t) &pi->pi_cfgdata) + ((unsigned) offset)) = val;
}
static __inline void
pci_set_cfgdata32(struct pci_devinst *pi, int offset, uint32_t val)
{
assert(offset <= (PCI_REGMAX - 3) && (offset & 3) == 0);
*(uint32_t *)(((uintptr_t) &pi->pi_cfgdata) + ((unsigned) offset)) = val;
}
static __inline uint8_t
pci_get_cfgdata8(struct pci_devinst *pi, int offset)
{
assert(offset <= PCI_REGMAX);
return (*(uint8_t *)(((uintptr_t) &pi->pi_cfgdata) + ((unsigned) offset)));
}
static __inline uint16_t
pci_get_cfgdata16(struct pci_devinst *pi, int offset)
{
assert(offset <= (PCI_REGMAX - 1) && (offset & 1) == 0);
return (*(uint16_t *)(((uintptr_t) &pi->pi_cfgdata) + ((unsigned) offset)));
}
static __inline uint32_t
pci_get_cfgdata32(struct pci_devinst *pi, int offset)
{
assert(offset <= (PCI_REGMAX - 3) && (offset & 3) == 0);
return (*(uint32_t *)(((uintptr_t) &pi->pi_cfgdata) + ((unsigned) offset)));
}

View file

@ -27,19 +27,16 @@
* $FreeBSD$
*/
#ifndef __PCI_IRQ_H__
#define __PCI_IRQ_H__
#pragma once
struct pci_devinst;
void pci_irq_assert(struct pci_devinst *pi);
void pci_irq_deassert(struct pci_devinst *pi);
void pci_irq_init(struct vmctx *ctx);
void pci_irq_reserve(int irq);
void pci_irq_use(int irq);
int pirq_alloc_pin(struct vmctx *ctx);
int pirq_irq(int pin);
uint8_t pirq_read(int pin);
void pirq_write(struct vmctx *ctx, int pin, uint8_t val);
#endif
void pci_irq_assert(struct pci_devinst *pi);
void pci_irq_deassert(struct pci_devinst *pi);
void pci_irq_init(void);
void pci_irq_reserve(int irq);
void pci_irq_use(int irq);
int pirq_alloc_pin(void);
int pirq_irq(int pin);
uint8_t pirq_read(int pin);
void pirq_write(int pin, uint8_t val);

View file

@ -26,10 +26,10 @@
* $FreeBSD$
*/
#ifndef _LPC_H_
#define _LPC_H_
#pragma once
#include <sys/linker_set.h>
#include <stdint.h>
#include <xhyve/support/linker_set.h>
typedef void (*lpc_write_dsdt_t)(void);
@ -37,36 +37,37 @@ struct lpc_dsdt {
lpc_write_dsdt_t handler;
};
#define LPC_DSDT(handler) \
static struct lpc_dsdt __CONCAT(__lpc_dsdt, __LINE__) = { \
(handler), \
}; \
DATA_SET(lpc_dsdt_set, __CONCAT(__lpc_dsdt, __LINE__))
#define LPC_DSDT(handler) \
static struct lpc_dsdt __CONCAT(__lpc_dsdt, handler) = { \
(handler), \
}; \
DATA_SET(lpc_dsdt_set, __CONCAT(__lpc_dsdt, handler))
enum lpc_sysres_type {
LPC_SYSRES_IO,
LPC_SYSRES_MEM
};
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct lpc_sysres {
enum lpc_sysres_type type;
uint32_t base;
uint32_t length;
};
#pragma clang diagnostic pop
#define LPC_SYSRES(type, base, length) \
static struct lpc_sysres __CONCAT(__lpc_sysres, __LINE__) = { \
(type), \
(base), \
(length) \
}; \
DATA_SET(lpc_sysres_set, __CONCAT(__lpc_sysres, __LINE__))
#define LPC_SYSRES(type, base, length) \
static struct lpc_sysres __CONCAT(__lpc_sysres, base) = {\
(type), \
(base), \
(length) \
}; \
DATA_SET(lpc_sysres_set, __CONCAT(__lpc_sysres, base))
#define SYSRES_IO(base, length) LPC_SYSRES(LPC_SYSRES_IO, base, length)
#define SYSRES_MEM(base, length) LPC_SYSRES(LPC_SYSRES_MEM, base, length)
#define SYSRES_IO(base, length) LPC_SYSRES(LPC_SYSRES_IO, base, length)
#define SYSRES_MEM(base, length) LPC_SYSRES(LPC_SYSRES_MEM, base, length)
int lpc_device_parse(const char *opt);
char *lpc_pirq_name(int pin);
void lpc_pirq_routed(void);
#endif
int lpc_device_parse(const char *opt);
char *lpc_pirq_name(int pin);
void lpc_pirq_routed(void);

View file

@ -26,9 +26,7 @@
* $FreeBSD$
*/
#ifndef _RTC_H_
#define _RTC_H_
#pragma once
void rtc_init(struct vmctx *ctx, int use_localtime);
void rtc_init(int use_localtime);
#endif /* _RTC_H_ */

View file

@ -26,11 +26,6 @@
* $FreeBSD$
*/
#ifndef _SMBIOSTBL_H_
#define _SMBIOSTBL_H_
#pragma once
struct vmctx;
int smbios_build(struct vmctx *ctx);
#endif /* _SMBIOSTBL_H_ */
int smbios_build(void);

View file

@ -0,0 +1,64 @@
/*-
* Copyright (c) 2005 Poul-Henning Kamp
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#pragma once
#define HPET_MEM_WIDTH 0x400 /* Expected memory region size */
/* General registers */
#define HPET_CAPABILITIES 0x0 /* General capabilities and ID */
#define HPET_CAP_VENDOR_ID 0xffff0000
#define HPET_CAP_LEG_RT 0x00008000
#define HPET_CAP_COUNT_SIZE 0x00002000 /* 1 = 64-bit, 0 = 32-bit */
#define HPET_CAP_NUM_TIM 0x00001f00
#define HPET_CAP_REV_ID 0x000000ff
#define HPET_PERIOD 0x4 /* Period (1/hz) of timer */
#define HPET_CONFIG 0x10 /* General configuration register */
#define HPET_CNF_LEG_RT 0x00000002
#define HPET_CNF_ENABLE 0x00000001
#define HPET_ISR 0x20 /* General interrupt status register */
#define HPET_MAIN_COUNTER 0xf0 /* Main counter register */
/* Timer registers */
#define HPET_TIMER_CAP_CNF(x) ((x) * 0x20 + 0x100)
#define HPET_TCAP_INT_ROUTE 0xffffffff00000000
#define HPET_TCAP_FSB_INT_DEL 0x00008000
#define HPET_TCNF_FSB_EN 0x00004000
#define HPET_TCNF_INT_ROUTE 0x00003e00
#define HPET_TCNF_32MODE 0x00000100
#define HPET_TCNF_VAL_SET 0x00000040
#define HPET_TCAP_SIZE 0x00000020 /* 1 = 64-bit, 0 = 32-bit */
#define HPET_TCAP_PER_INT 0x00000010 /* Supports periodic interrupts */
#define HPET_TCNF_TYPE 0x00000008 /* 1 = periodic, 0 = one-shot */
#define HPET_TCNF_INT_ENB 0x00000004
#define HPET_TCNF_INT_TYPE 0x00000002 /* 1 = level triggered, 0 = edge */
#define HPET_TIMER_COMPARATOR(x) ((x) * 0x20 + 0x108)
#define HPET_TIMER_FSB_VAL(x) ((x) * 0x20 + 0x110)
#define HPET_TIMER_FSB_ADDR(x) ((x) * 0x20 + 0x114)
#define HPET_MIN_CYCLES 128 /* Period considered reliable. */

View file

@ -0,0 +1,509 @@
/*-
* Copyright (c) 1996, by Peter Wemm and Steve Passe
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. The name of the developer may NOT be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#pragma once
#include <stdint.h>
/*
* Local && I/O APIC definitions.
*/
/*
* Pentium P54C+ Built-in APIC
* (Advanced programmable Interrupt Controller)
*
* Base Address of Built-in APIC in memory location
* is 0xfee00000.
*
* Map of APIC Registers:
*
* Offset (hex) Description Read/Write state
* 000 Reserved
* 010 Reserved
* 020 ID Local APIC ID R/W
* 030 VER Local APIC Version R
* 040 Reserved
* 050 Reserved
* 060 Reserved
* 070 Reserved
* 080 Task Priority Register R/W
* 090 Arbitration Priority Register R
* 0A0 Processor Priority Register R
* 0B0 EOI Register W
* 0C0 RRR Remote read R
* 0D0 Logical Destination R/W
* 0E0 Destination Format Register 0..27 R; 28..31 R/W
* 0F0 SVR Spurious Interrupt Vector Reg. 0..3 R; 4..9 R/W
* 100 ISR 000-031 R
* 110 ISR 032-063 R
* 120 ISR 064-095 R
* 130 ISR 095-128 R
* 140 ISR 128-159 R
* 150 ISR 160-191 R
* 160 ISR 192-223 R
* 170 ISR 224-255 R
* 180 TMR 000-031 R
* 190 TMR 032-063 R
* 1A0 TMR 064-095 R
* 1B0 TMR 095-128 R
* 1C0 TMR 128-159 R
* 1D0 TMR 160-191 R
* 1E0 TMR 192-223 R
* 1F0 TMR 224-255 R
* 200 IRR 000-031 R
* 210 IRR 032-063 R
* 220 IRR 064-095 R
* 230 IRR 095-128 R
* 240 IRR 128-159 R
* 250 IRR 160-191 R
* 260 IRR 192-223 R
* 270 IRR 224-255 R
* 280 Error Status Register R
* 290 Reserved
* 2A0 Reserved
* 2B0 Reserved
* 2C0 Reserved
* 2D0 Reserved
* 2E0 Reserved
* 2F0 Local Vector Table (CMCI) R/W
* 300 ICR_LOW Interrupt Command Reg. (0-31) R/W
* 310 ICR_HI Interrupt Command Reg. (32-63) R/W
* 320 Local Vector Table (Timer) R/W
* 330 Local Vector Table (Thermal) R/W (PIV+)
* 340 Local Vector Table (Performance) R/W (P6+)
* 350 LVT1 Local Vector Table (LINT0) R/W
* 360 LVT2 Local Vector Table (LINT1) R/W
* 370 LVT3 Local Vector Table (ERROR) R/W
* 380 Initial Count Reg. for Timer R/W
* 390 Current Count of Timer R
* 3A0 Reserved
* 3B0 Reserved
* 3C0 Reserved
* 3D0 Reserved
* 3E0 Timer Divide Configuration Reg. R/W
* 3F0 Reserved
*/
/******************************************************************************
* global defines, etc.
*/
/******************************************************************************
* LOCAL APIC structure
*/
#define PAD3 int : 32; int : 32; int : 32
#define PAD4 int : 32; int : 32; int : 32; int : 32
struct LAPIC {
/* reserved */ PAD4;
/* reserved */ PAD4;
uint32_t id; PAD3;
uint32_t version; PAD3;
/* reserved */ PAD4;
/* reserved */ PAD4;
/* reserved */ PAD4;
/* reserved */ PAD4;
uint32_t tpr; PAD3;
uint32_t apr; PAD3;
uint32_t ppr; PAD3;
uint32_t eoi; PAD3;
/* reserved */ PAD4;
uint32_t ldr; PAD3;
uint32_t dfr; PAD3;
uint32_t svr; PAD3;
uint32_t isr0; PAD3;
uint32_t isr1; PAD3;
uint32_t isr2; PAD3;
uint32_t isr3; PAD3;
uint32_t isr4; PAD3;
uint32_t isr5; PAD3;
uint32_t isr6; PAD3;
uint32_t isr7; PAD3;
uint32_t tmr0; PAD3;
uint32_t tmr1; PAD3;
uint32_t tmr2; PAD3;
uint32_t tmr3; PAD3;
uint32_t tmr4; PAD3;
uint32_t tmr5; PAD3;
uint32_t tmr6; PAD3;
uint32_t tmr7; PAD3;
uint32_t irr0; PAD3;
uint32_t irr1; PAD3;
uint32_t irr2; PAD3;
uint32_t irr3; PAD3;
uint32_t irr4; PAD3;
uint32_t irr5; PAD3;
uint32_t irr6; PAD3;
uint32_t irr7; PAD3;
uint32_t esr; PAD3;
/* reserved */ PAD4;
/* reserved */ PAD4;
/* reserved */ PAD4;
/* reserved */ PAD4;
/* reserved */ PAD4;
/* reserved */ PAD4;
uint32_t lvt_cmci; PAD3;
uint32_t icr_lo; PAD3;
uint32_t icr_hi; PAD3;
uint32_t lvt_timer; PAD3;
uint32_t lvt_thermal; PAD3;
uint32_t lvt_pcint; PAD3;
uint32_t lvt_lint0; PAD3;
uint32_t lvt_lint1; PAD3;
uint32_t lvt_error; PAD3;
uint32_t icr_timer; PAD3;
uint32_t ccr_timer; PAD3;
/* reserved */ PAD4;
/* reserved */ PAD4;
/* reserved */ PAD4;
/* reserved */ PAD4;
uint32_t dcr_timer; PAD3;
/* reserved */ PAD4;
};
typedef struct LAPIC lapic_t;
enum LAPIC_REGISTERS {
LAPIC_ID = 0x2,
LAPIC_VERSION = 0x3,
LAPIC_TPR = 0x8,
LAPIC_APR = 0x9,
LAPIC_PPR = 0xa,
LAPIC_EOI = 0xb,
LAPIC_LDR = 0xd,
LAPIC_DFR = 0xe, /* Not in x2APIC */
LAPIC_SVR = 0xf,
LAPIC_ISR0 = 0x10,
LAPIC_ISR1 = 0x11,
LAPIC_ISR2 = 0x12,
LAPIC_ISR3 = 0x13,
LAPIC_ISR4 = 0x14,
LAPIC_ISR5 = 0x15,
LAPIC_ISR6 = 0x16,
LAPIC_ISR7 = 0x17,
LAPIC_TMR0 = 0x18,
LAPIC_TMR1 = 0x19,
LAPIC_TMR2 = 0x1a,
LAPIC_TMR3 = 0x1b,
LAPIC_TMR4 = 0x1c,
LAPIC_TMR5 = 0x1d,
LAPIC_TMR6 = 0x1e,
LAPIC_TMR7 = 0x1f,
LAPIC_IRR0 = 0x20,
LAPIC_IRR1 = 0x21,
LAPIC_IRR2 = 0x22,
LAPIC_IRR3 = 0x23,
LAPIC_IRR4 = 0x24,
LAPIC_IRR5 = 0x25,
LAPIC_IRR6 = 0x26,
LAPIC_IRR7 = 0x27,
LAPIC_ESR = 0x28,
LAPIC_LVT_CMCI = 0x2f,
LAPIC_ICR_LO = 0x30,
LAPIC_ICR_HI = 0x31, /* Not in x2APIC */
LAPIC_LVT_TIMER = 0x32,
LAPIC_LVT_THERMAL = 0x33,
LAPIC_LVT_PCINT = 0x34,
LAPIC_LVT_LINT0 = 0x35,
LAPIC_LVT_LINT1 = 0x36,
LAPIC_LVT_ERROR = 0x37,
LAPIC_ICR_TIMER = 0x38,
LAPIC_CCR_TIMER = 0x39,
LAPIC_DCR_TIMER = 0x3e,
LAPIC_SELF_IPI = 0x3f, /* Only in x2APIC */
};
/*
* The LAPIC_SELF_IPI register only exists in x2APIC mode. The
* formula below is applicable only to reserve the memory region,
* i.e. for xAPIC mode, where LAPIC_SELF_IPI finely serves as the
* address past end of the region.
*/
#define LAPIC_MEM_REGION (LAPIC_SELF_IPI * 0x10)
#define LAPIC_MEM_MUL 0x10
/******************************************************************************
* I/O APIC structure
*/
struct IOAPIC {
uint32_t ioregsel; PAD3;
uint32_t iowin; PAD3;
};
typedef struct IOAPIC ioapic_t;
#undef PAD4
#undef PAD3
/******************************************************************************
* various code 'logical' values
*/
/******************************************************************************
* LOCAL APIC defines
*/
/* default physical locations of LOCAL (CPU) APICs */
#define DEFAULT_APIC_BASE 0xfee00000
/* constants relating to APIC ID registers */
#define APIC_ID_MASK 0xff000000
#define APIC_ID_SHIFT 24
#define APIC_ID_CLUSTER 0xf0
#define APIC_ID_CLUSTER_ID 0x0f
#define APIC_MAX_CLUSTER 0xe
#define APIC_MAX_INTRACLUSTER_ID 3
#define APIC_ID_CLUSTER_SHIFT 4
/* fields in VER */
#define APIC_VER_VERSION 0x000000ff
#define APIC_VER_MAXLVT 0x00ff0000
#define MAXLVTSHIFT 16
#define APIC_VER_EOI_SUPPRESSION 0x01000000
/* fields in LDR */
#define APIC_LDR_RESERVED 0x00ffffff
/* fields in DFR */
#define APIC_DFR_RESERVED 0x0fffffff
#define APIC_DFR_MODEL_MASK 0xf0000000
#define APIC_DFR_MODEL_FLAT 0xf0000000
#define APIC_DFR_MODEL_CLUSTER 0x00000000
/* fields in SVR */
#define APIC_SVR_VECTOR 0x000000ff
#define APIC_SVR_VEC_PROG 0x000000f0
#define APIC_SVR_VEC_FIX 0x0000000f
#define APIC_SVR_ENABLE 0x00000100
# define APIC_SVR_SWDIS 0x00000000
# define APIC_SVR_SWEN 0x00000100
#define APIC_SVR_FOCUS 0x00000200
# define APIC_SVR_FEN 0x00000000
# define APIC_SVR_FDIS 0x00000200
#define APIC_SVR_EOI_SUPPRESSION 0x00001000
/* fields in TPR */
#define APIC_TPR_PRIO 0x000000ff
# define APIC_TPR_INT 0x000000f0
# define APIC_TPR_SUB 0x0000000f
/* fields in ESR */
#define APIC_ESR_SEND_CS_ERROR 0x00000001
#define APIC_ESR_RECEIVE_CS_ERROR 0x00000002
#define APIC_ESR_SEND_ACCEPT 0x00000004
#define APIC_ESR_RECEIVE_ACCEPT 0x00000008
#define APIC_ESR_SEND_ILLEGAL_VECTOR 0x00000020
#define APIC_ESR_RECEIVE_ILLEGAL_VECTOR 0x00000040
#define APIC_ESR_ILLEGAL_REGISTER 0x00000080
/* fields in ICR_LOW */
#define APIC_VECTOR_MASK 0x000000ff
#define APIC_DELMODE_MASK 0x00000700
# define APIC_DELMODE_FIXED 0x00000000
# define APIC_DELMODE_LOWPRIO 0x00000100
# define APIC_DELMODE_SMI 0x00000200
# define APIC_DELMODE_RR 0x00000300
# define APIC_DELMODE_NMI 0x00000400
# define APIC_DELMODE_INIT 0x00000500
# define APIC_DELMODE_STARTUP 0x00000600
# define APIC_DELMODE_RESV 0x00000700
#define APIC_DESTMODE_MASK 0x00000800
# define APIC_DESTMODE_PHY 0x00000000
# define APIC_DESTMODE_LOG 0x00000800
#define APIC_DELSTAT_MASK 0x00001000
# define APIC_DELSTAT_IDLE 0x00000000
# define APIC_DELSTAT_PEND 0x00001000
#define APIC_RESV1_MASK 0x00002000
#define APIC_LEVEL_MASK 0x00004000
# define APIC_LEVEL_DEASSERT 0x00000000
# define APIC_LEVEL_ASSERT 0x00004000
#define APIC_TRIGMOD_MASK 0x00008000
# define APIC_TRIGMOD_EDGE 0x00000000
# define APIC_TRIGMOD_LEVEL 0x00008000
#define APIC_RRSTAT_MASK 0x00030000
# define APIC_RRSTAT_INVALID 0x00000000
# define APIC_RRSTAT_INPROG 0x00010000
# define APIC_RRSTAT_VALID 0x00020000
# define APIC_RRSTAT_RESV 0x00030000
#define APIC_DEST_MASK 0x000c0000
# define APIC_DEST_DESTFLD 0x00000000
# define APIC_DEST_SELF 0x00040000
# define APIC_DEST_ALLISELF 0x00080000
# define APIC_DEST_ALLESELF 0x000c0000
#define APIC_RESV2_MASK 0xfff00000
#define APIC_ICRLO_RESV_MASK (APIC_RESV1_MASK | APIC_RESV2_MASK)
/* fields in LVT1/2 */
#define APIC_LVT_VECTOR 0x000000ff
#define APIC_LVT_DM 0x00000700
# define APIC_LVT_DM_FIXED 0x00000000
# define APIC_LVT_DM_SMI 0x00000200
# define APIC_LVT_DM_NMI 0x00000400
# define APIC_LVT_DM_INIT 0x00000500
# define APIC_LVT_DM_EXTINT 0x00000700
#define APIC_LVT_DS 0x00001000
#define APIC_LVT_IIPP 0x00002000
#define APIC_LVT_IIPP_INTALO 0x00002000
#define APIC_LVT_IIPP_INTAHI 0x00000000
#define APIC_LVT_RIRR 0x00004000
#define APIC_LVT_TM 0x00008000
#define APIC_LVT_M 0x00010000
/* fields in LVT Timer */
#define APIC_LVTT_VECTOR 0x000000ff
#define APIC_LVTT_DS 0x00001000
#define APIC_LVTT_M 0x00010000
#define APIC_LVTT_TM 0x00020000
# define APIC_LVTT_TM_ONE_SHOT 0x00000000
# define APIC_LVTT_TM_PERIODIC 0x00020000
/* APIC timer current count */
#define APIC_TIMER_MAX_COUNT 0xffffffff
/* fields in TDCR */
#define APIC_TDCR_2 0x00
#define APIC_TDCR_4 0x01
#define APIC_TDCR_8 0x02
#define APIC_TDCR_16 0x03
#define APIC_TDCR_32 0x08
#define APIC_TDCR_64 0x09
#define APIC_TDCR_128 0x0a
#define APIC_TDCR_1 0x0b
/* LVT table indices */
#define APIC_LVT_LINT0 0
#define APIC_LVT_LINT1 1
#define APIC_LVT_TIMER 2
#define APIC_LVT_ERROR 3
#define APIC_LVT_PMC 4
#define APIC_LVT_THERMAL 5
#define APIC_LVT_CMCI 6
#define APIC_LVT_MAX APIC_LVT_CMCI
/******************************************************************************
* I/O APIC defines
*/
/* default physical locations of an IO APIC */
#define DEFAULT_IO_APIC_BASE 0xfec00000
/* window register offset */
#define IOAPIC_WINDOW 0x10
#define IOAPIC_EOIR 0x40
/* indexes into IO APIC */
#define IOAPIC_ID 0x00
#define IOAPIC_VER 0x01
#define IOAPIC_ARB 0x02
#define IOAPIC_REDTBL 0x10
#define IOAPIC_REDTBL0 IOAPIC_REDTBL
#define IOAPIC_REDTBL1 (IOAPIC_REDTBL+0x02)
#define IOAPIC_REDTBL2 (IOAPIC_REDTBL+0x04)
#define IOAPIC_REDTBL3 (IOAPIC_REDTBL+0x06)
#define IOAPIC_REDTBL4 (IOAPIC_REDTBL+0x08)
#define IOAPIC_REDTBL5 (IOAPIC_REDTBL+0x0a)
#define IOAPIC_REDTBL6 (IOAPIC_REDTBL+0x0c)
#define IOAPIC_REDTBL7 (IOAPIC_REDTBL+0x0e)
#define IOAPIC_REDTBL8 (IOAPIC_REDTBL+0x10)
#define IOAPIC_REDTBL9 (IOAPIC_REDTBL+0x12)
#define IOAPIC_REDTBL10 (IOAPIC_REDTBL+0x14)
#define IOAPIC_REDTBL11 (IOAPIC_REDTBL+0x16)
#define IOAPIC_REDTBL12 (IOAPIC_REDTBL+0x18)
#define IOAPIC_REDTBL13 (IOAPIC_REDTBL+0x1a)
#define IOAPIC_REDTBL14 (IOAPIC_REDTBL+0x1c)
#define IOAPIC_REDTBL15 (IOAPIC_REDTBL+0x1e)
#define IOAPIC_REDTBL16 (IOAPIC_REDTBL+0x20)
#define IOAPIC_REDTBL17 (IOAPIC_REDTBL+0x22)
#define IOAPIC_REDTBL18 (IOAPIC_REDTBL+0x24)
#define IOAPIC_REDTBL19 (IOAPIC_REDTBL+0x26)
#define IOAPIC_REDTBL20 (IOAPIC_REDTBL+0x28)
#define IOAPIC_REDTBL21 (IOAPIC_REDTBL+0x2a)
#define IOAPIC_REDTBL22 (IOAPIC_REDTBL+0x2c)
#define IOAPIC_REDTBL23 (IOAPIC_REDTBL+0x2e)
/* fields in VER */
#define IOART_VER_VERSION 0x000000ff
#define IOART_VER_MAXREDIR 0x00ff0000
#define MAXREDIRSHIFT 16
/*
* fields in the IO APIC's redirection table entries
*/
#define IOART_DEST APIC_ID_MASK /* broadcast addr: all APICs */
#define IOART_RESV 0x00fe0000 /* reserved */
#define IOART_INTMASK 0x00010000 /* R/W: INTerrupt mask */
# define IOART_INTMCLR 0x00000000 /* clear, allow INTs */
# define IOART_INTMSET 0x00010000 /* set, inhibit INTs */
#define IOART_TRGRMOD 0x00008000 /* R/W: trigger mode */
# define IOART_TRGREDG 0x00000000 /* edge */
# define IOART_TRGRLVL 0x00008000 /* level */
#define IOART_REM_IRR 0x00004000 /* RO: remote IRR */
#define IOART_INTPOL 0x00002000 /* R/W: INT input pin polarity */
# define IOART_INTAHI 0x00000000 /* active high */
# define IOART_INTALO 0x00002000 /* active low */
#define IOART_DELIVS 0x00001000 /* RO: delivery status */
#define IOART_DESTMOD 0x00000800 /* R/W: destination mode */
# define IOART_DESTPHY 0x00000000 /* physical */
# define IOART_DESTLOG 0x00000800 /* logical */
#define IOART_DELMOD 0x00000700 /* R/W: delivery mode */
# define IOART_DELFIXED 0x00000000 /* fixed */
# define IOART_DELLOPRI 0x00000100 /* lowest priority */
# define IOART_DELSMI 0x00000200 /* System Management INT */
# define IOART_DELRSV1 0x00000300 /* reserved */
# define IOART_DELNMI 0x00000400 /* NMI signal */
# define IOART_DELINIT 0x00000500 /* INIT signal */
# define IOART_DELRSV2 0x00000600 /* reserved */
# define IOART_DELEXINT 0x00000700 /* External INTerrupt */
#define IOART_INTVEC 0x000000ff /* R/W: INTerrupt vector field */

643
include/xhyve/support/ata.h Normal file
View file

@ -0,0 +1,643 @@
/*-
* Copyright (c) 2000 - 2008 Søren Schmidt <sos@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification, immediately at the beginning of the file.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#pragma once
#include <xhyve/support/misc.h>
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wpacked"
/* ATA/ATAPI device parameters */
struct ata_params {
/*000*/ u_int16_t config; /* configuration info */
#define ATA_PROTO_MASK 0x8003
#define ATA_PROTO_ATAPI 0x8000
#define ATA_PROTO_ATAPI_12 0x8000
#define ATA_PROTO_ATAPI_16 0x8001
#define ATA_PROTO_CFA 0x848a
#define ATA_ATAPI_TYPE_MASK 0x1f00
#define ATA_ATAPI_TYPE_DIRECT 0x0000 /* disk/floppy */
#define ATA_ATAPI_TYPE_TAPE 0x0100 /* streaming tape */
#define ATA_ATAPI_TYPE_CDROM 0x0500 /* CD-ROM device */
#define ATA_ATAPI_TYPE_OPTICAL 0x0700 /* optical disk */
#define ATA_DRQ_MASK 0x0060
#define ATA_DRQ_SLOW 0x0000 /* cpu 3 ms delay */
#define ATA_DRQ_INTR 0x0020 /* interrupt 10 ms delay */
#define ATA_DRQ_FAST 0x0040 /* accel 50 us delay */
#define ATA_RESP_INCOMPLETE 0x0004
/*001*/ u_int16_t cylinders; /* # of cylinders */
/*002*/ u_int16_t specconf; /* specific configuration */
/*003*/ u_int16_t heads; /* # heads */
u_int16_t obsolete4;
u_int16_t obsolete5;
/*006*/ u_int16_t sectors; /* # sectors/track */
/*007*/ u_int16_t vendor7[3];
/*010*/ u_int8_t serial[20]; /* serial number */
/*020*/ u_int16_t retired20;
u_int16_t retired21;
u_int16_t obsolete22;
/*023*/ u_int8_t revision[8]; /* firmware revision */
/*027*/ u_int8_t model[40]; /* model name */
/*047*/ u_int16_t sectors_intr; /* sectors per interrupt */
/*048*/ u_int16_t usedmovsd; /* double word read/write? */
/*049*/ u_int16_t capabilities1;
#define ATA_SUPPORT_DMA 0x0100
#define ATA_SUPPORT_LBA 0x0200
#define ATA_SUPPORT_IORDY 0x0400
#define ATA_SUPPORT_IORDYDIS 0x0800
#define ATA_SUPPORT_OVERLAP 0x4000
/*050*/ u_int16_t capabilities2;
/*051*/ u_int16_t retired_piomode; /* PIO modes 0-2 */
#define ATA_RETIRED_PIO_MASK 0x0300
/*052*/ u_int16_t retired_dmamode; /* DMA modes */
#define ATA_RETIRED_DMA_MASK 0x0003
/*053*/ u_int16_t atavalid; /* fields valid */
#define ATA_FLAG_54_58 0x0001 /* words 54-58 valid */
#define ATA_FLAG_64_70 0x0002 /* words 64-70 valid */
#define ATA_FLAG_88 0x0004 /* word 88 valid */
/*054*/ u_int16_t current_cylinders;
/*055*/ u_int16_t current_heads;
/*056*/ u_int16_t current_sectors;
/*057*/ u_int16_t current_size_1;
/*058*/ u_int16_t current_size_2;
/*059*/ u_int16_t multi;
#define ATA_MULTI_VALID 0x0100
/*060*/ u_int16_t lba_size_1;
u_int16_t lba_size_2;
u_int16_t obsolete62;
/*063*/ u_int16_t mwdmamodes; /* multiword DMA modes */
/*064*/ u_int16_t apiomodes; /* advanced PIO modes */
/*065*/ u_int16_t mwdmamin; /* min. M/W DMA time/word ns */
/*066*/ u_int16_t mwdmarec; /* rec. M/W DMA time ns */
/*067*/ u_int16_t pioblind; /* min. PIO cycle w/o flow */
/*068*/ u_int16_t pioiordy; /* min. PIO cycle IORDY flow */
/*069*/ u_int16_t support3;
#define ATA_SUPPORT_RZAT 0x0020
#define ATA_SUPPORT_DRAT 0x4000
u_int16_t reserved70;
/*071*/ u_int16_t rlsovlap; /* rel time (us) for overlap */
/*072*/ u_int16_t rlsservice; /* rel time (us) for service */
u_int16_t reserved73;
u_int16_t reserved74;
/*075*/ u_int16_t queue;
#define ATA_QUEUE_LEN(x) ((x) & 0x001f)
/*76*/ u_int16_t satacapabilities;
#define ATA_SATA_GEN1 0x0002
#define ATA_SATA_GEN2 0x0004
#define ATA_SATA_GEN3 0x0008
#define ATA_SUPPORT_NCQ 0x0100
#define ATA_SUPPORT_IFPWRMNGTRCV 0x0200
#define ATA_SUPPORT_PHYEVENTCNT 0x0400
#define ATA_SUPPORT_NCQ_UNLOAD 0x0800
#define ATA_SUPPORT_NCQ_PRIO 0x1000
#define ATA_SUPPORT_HAPST 0x2000
#define ATA_SUPPORT_DAPST 0x4000
#define ATA_SUPPORT_READLOGDMAEXT 0x8000
/*77*/ u_int16_t satacapabilities2;
#define ATA_SATA_CURR_GEN_MASK 0x0006
#define ATA_SUPPORT_NCQ_STREAM 0x0010
#define ATA_SUPPORT_NCQ_QMANAGEMENT 0x0020
#define ATA_SUPPORT_RCVSND_FPDMA_QUEUED 0x0040
/*78*/ u_int16_t satasupport;
#define ATA_SUPPORT_NONZERO 0x0002
#define ATA_SUPPORT_AUTOACTIVATE 0x0004
#define ATA_SUPPORT_IFPWRMNGT 0x0008
#define ATA_SUPPORT_INORDERDATA 0x0010
#define ATA_SUPPORT_ASYNCNOTIF 0x0020
#define ATA_SUPPORT_SOFTSETPRESERVE 0x0040
/*79*/ u_int16_t sataenabled;
#define ATA_ENABLED_DAPST 0x0080
/*080*/ u_int16_t version_major;
/*081*/ u_int16_t version_minor;
struct {
/*082/085*/ u_int16_t command1;
#define ATA_SUPPORT_SMART 0x0001
#define ATA_SUPPORT_SECURITY 0x0002
#define ATA_SUPPORT_REMOVABLE 0x0004
#define ATA_SUPPORT_POWERMGT 0x0008
#define ATA_SUPPORT_PACKET 0x0010
#define ATA_SUPPORT_WRITECACHE 0x0020
#define ATA_SUPPORT_LOOKAHEAD 0x0040
#define ATA_SUPPORT_RELEASEIRQ 0x0080
#define ATA_SUPPORT_SERVICEIRQ 0x0100
#define ATA_SUPPORT_RESET 0x0200
#define ATA_SUPPORT_PROTECTED 0x0400
#define ATA_SUPPORT_WRITEBUFFER 0x1000
#define ATA_SUPPORT_READBUFFER 0x2000
#define ATA_SUPPORT_NOP 0x4000
/*083/086*/ u_int16_t command2;
#define ATA_SUPPORT_MICROCODE 0x0001
#define ATA_SUPPORT_QUEUED 0x0002
#define ATA_SUPPORT_CFA 0x0004
#define ATA_SUPPORT_APM 0x0008
#define ATA_SUPPORT_NOTIFY 0x0010
#define ATA_SUPPORT_STANDBY 0x0020
#define ATA_SUPPORT_SPINUP 0x0040
#define ATA_SUPPORT_MAXSECURITY 0x0100
#define ATA_SUPPORT_AUTOACOUSTIC 0x0200
#define ATA_SUPPORT_ADDRESS48 0x0400
#define ATA_SUPPORT_OVERLAY 0x0800
#define ATA_SUPPORT_FLUSHCACHE 0x1000
#define ATA_SUPPORT_FLUSHCACHE48 0x2000
/*084/087*/ u_int16_t extension;
#define ATA_SUPPORT_SMARTLOG 0x0001
#define ATA_SUPPORT_SMARTTEST 0x0002
#define ATA_SUPPORT_MEDIASN 0x0004
#define ATA_SUPPORT_MEDIAPASS 0x0008
#define ATA_SUPPORT_STREAMING 0x0010
#define ATA_SUPPORT_GENLOG 0x0020
#define ATA_SUPPORT_WRITEDMAFUAEXT 0x0040
#define ATA_SUPPORT_WRITEDMAQFUAEXT 0x0080
#define ATA_SUPPORT_64BITWWN 0x0100
#define ATA_SUPPORT_UNLOAD 0x2000
} __packed support, enabled;
/*088*/ u_int16_t udmamodes; /* UltraDMA modes */
/*089*/ u_int16_t erase_time; /* time req'd in 2min units */
/*090*/ u_int16_t enhanced_erase_time; /* time req'd in 2min units */
/*091*/ u_int16_t apm_value;
/*092*/ u_int16_t master_passwd_revision; /* password revision code */
/*093*/ u_int16_t hwres;
#define ATA_CABLE_ID 0x2000
/*094*/ u_int16_t acoustic;
#define ATA_ACOUSTIC_CURRENT(x) ((x) & 0x00ff)
#define ATA_ACOUSTIC_VENDOR(x) (((x) & 0xff00) >> 8)
/*095*/ u_int16_t stream_min_req_size;
/*096*/ u_int16_t stream_transfer_time;
/*097*/ u_int16_t stream_access_latency;
/*098*/ u_int32_t stream_granularity;
/*100*/ u_int16_t lba_size48_1;
u_int16_t lba_size48_2;
u_int16_t lba_size48_3;
u_int16_t lba_size48_4;
u_int16_t reserved104;
/*105*/ u_int16_t max_dsm_blocks;
/*106*/ u_int16_t pss;
#define ATA_PSS_LSPPS 0x000F
#define ATA_PSS_LSSABOVE512 0x1000
#define ATA_PSS_MULTLS 0x2000
#define ATA_PSS_VALID_MASK 0xC000
#define ATA_PSS_VALID_VALUE 0x4000
/*107*/ u_int16_t isd;
/*108*/ u_int16_t wwn[4];
u_int16_t reserved112[5];
/*117*/ u_int16_t lss_1;
/*118*/ u_int16_t lss_2;
/*119*/ u_int16_t support2;
#define ATA_SUPPORT_WRITEREADVERIFY 0x0002
#define ATA_SUPPORT_WRITEUNCORREXT 0x0004
#define ATA_SUPPORT_RWLOGDMAEXT 0x0008
#define ATA_SUPPORT_MICROCODE3 0x0010
#define ATA_SUPPORT_FREEFALL 0x0020
/*120*/ u_int16_t enabled2;
u_int16_t reserved121[6];
/*127*/ u_int16_t removable_status;
/*128*/ u_int16_t security_status;
#define ATA_SECURITY_LEVEL 0x0100 /* 0: high, 1: maximum */
#define ATA_SECURITY_ENH_SUPP 0x0020 /* enhanced erase supported */
#define ATA_SECURITY_COUNT_EXP 0x0010 /* count expired */
#define ATA_SECURITY_FROZEN 0x0008 /* security config is frozen */
#define ATA_SECURITY_LOCKED 0x0004 /* drive is locked */
#define ATA_SECURITY_ENABLED 0x0002 /* ATA Security is enabled */
#define ATA_SECURITY_SUPPORTED 0x0001 /* ATA Security is supported */
u_int16_t reserved129[31];
/*160*/ u_int16_t cfa_powermode1;
u_int16_t reserved161;
/*162*/ u_int16_t cfa_kms_support;
/*163*/ u_int16_t cfa_trueide_modes;
/*164*/ u_int16_t cfa_memory_modes;
u_int16_t reserved165[4];
/*169*/ u_int16_t support_dsm;
#define ATA_SUPPORT_DSM_TRIM 0x0001
u_int16_t reserved170[6];
/*176*/ u_int8_t media_serial[60];
/*206*/ u_int16_t sct;
u_int16_t reserved206[2];
/*209*/ u_int16_t lsalign;
/*210*/ u_int16_t wrv_sectors_m3_1;
u_int16_t wrv_sectors_m3_2;
/*212*/ u_int16_t wrv_sectors_m2_1;
u_int16_t wrv_sectors_m2_2;
/*214*/ u_int16_t nv_cache_caps;
/*215*/ u_int16_t nv_cache_size_1;
u_int16_t nv_cache_size_2;
/*217*/ u_int16_t media_rotation_rate;
#define ATA_RATE_NOT_REPORTED 0x0000
#define ATA_RATE_NON_ROTATING 0x0001
u_int16_t reserved218;
/*219*/ u_int16_t nv_cache_opt;
/*220*/ u_int16_t wrv_mode;
u_int16_t reserved221;
/*222*/ u_int16_t transport_major;
/*223*/ u_int16_t transport_minor;
u_int16_t reserved224[31];
/*255*/ u_int16_t integrity;
} __packed;
/* ATA Dataset Management */
#define ATA_DSM_BLK_SIZE 512
#define ATA_DSM_BLK_RANGES 64
#define ATA_DSM_RANGE_SIZE 8
#define ATA_DSM_RANGE_MAX 65535
/*
* ATA Device Register
*
* bit 7 Obsolete (was 1 in early ATA specs)
* bit 6 Sets LBA/CHS mode. 1=LBA, 0=CHS
* bit 5 Obsolete (was 1 in early ATA specs)
* bit 4 1 = Slave Drive, 0 = Master Drive
* bit 3-0 In LBA mode, 27-24 of address. In CHS mode, head number
*/
#define ATA_DEV_MASTER 0x00
#define ATA_DEV_SLAVE 0x10
#define ATA_DEV_LBA 0x40
/* ATA limits */
#define ATA_MAX_28BIT_LBA 268435455UL
/* ATA Status Register */
#define ATA_STATUS_ERROR 0x01
#define ATA_STATUS_DEVICE_FAULT 0x20
/* ATA Error Register */
#define ATA_ERROR_ABORT 0x04
#define ATA_ERROR_ID_NOT_FOUND 0x10
/* ATA HPA Features */
#define ATA_HPA_FEAT_MAX_ADDR 0x00
#define ATA_HPA_FEAT_SET_PWD 0x01
#define ATA_HPA_FEAT_LOCK 0x02
#define ATA_HPA_FEAT_UNLOCK 0x03
#define ATA_HPA_FEAT_FREEZE 0x04
/* ATA transfer modes */
#define ATA_MODE_MASK 0x0f
#define ATA_DMA_MASK 0xf0
#define ATA_PIO 0x00
#define ATA_PIO0 0x08
#define ATA_PIO1 0x09
#define ATA_PIO2 0x0a
#define ATA_PIO3 0x0b
#define ATA_PIO4 0x0c
#define ATA_PIO_MAX 0x0f
#define ATA_DMA 0x10
#define ATA_WDMA0 0x20
#define ATA_WDMA1 0x21
#define ATA_WDMA2 0x22
#define ATA_UDMA0 0x40
#define ATA_UDMA1 0x41
#define ATA_UDMA2 0x42
#define ATA_UDMA3 0x43
#define ATA_UDMA4 0x44
#define ATA_UDMA5 0x45
#define ATA_UDMA6 0x46
#define ATA_SA150 0x47
#define ATA_SA300 0x48
#define ATA_DMA_MAX 0x4f
/* ATA commands */
#define ATA_NOP 0x00 /* NOP */
#define ATA_NF_FLUSHQUEUE 0x00 /* flush queued cmd's */
#define ATA_NF_AUTOPOLL 0x01 /* start autopoll function */
#define ATA_DATA_SET_MANAGEMENT 0x06
#define ATA_DSM_TRIM 0x01
#define ATA_DEVICE_RESET 0x08 /* reset device */
#define ATA_READ 0x20 /* read */
#define ATA_READ48 0x24 /* read 48bit LBA */
#define ATA_READ_DMA48 0x25 /* read DMA 48bit LBA */
#define ATA_READ_DMA_QUEUED48 0x26 /* read DMA QUEUED 48bit LBA */
#define ATA_READ_NATIVE_MAX_ADDRESS48 0x27 /* read native max addr 48bit */
#define ATA_READ_MUL48 0x29 /* read multi 48bit LBA */
#define ATA_READ_STREAM_DMA48 0x2a /* read DMA stream 48bit LBA */
#define ATA_READ_LOG_EXT 0x2f /* read log ext - PIO Data-In */
#define ATA_READ_STREAM48 0x2b /* read stream 48bit LBA */
#define ATA_WRITE 0x30 /* write */
#define ATA_WRITE48 0x34 /* write 48bit LBA */
#define ATA_WRITE_DMA48 0x35 /* write DMA 48bit LBA */
#define ATA_WRITE_DMA_QUEUED48 0x36 /* write DMA QUEUED 48bit LBA*/
#define ATA_SET_MAX_ADDRESS48 0x37 /* set max address 48bit */
#define ATA_WRITE_MUL48 0x39 /* write multi 48bit LBA */
#define ATA_WRITE_STREAM_DMA48 0x3a
#define ATA_WRITE_STREAM48 0x3b
#define ATA_WRITE_DMA_FUA48 0x3d
#define ATA_WRITE_DMA_QUEUED_FUA48 0x3e
#define ATA_WRITE_LOG_EXT 0x3f
#define ATA_READ_VERIFY 0x40
#define ATA_READ_VERIFY48 0x42
#define ATA_READ_LOG_DMA_EXT 0x47 /* read log DMA ext - PIO Data-In */
#define ATA_READ_FPDMA_QUEUED 0x60 /* read DMA NCQ */
#define ATA_WRITE_FPDMA_QUEUED 0x61 /* write DMA NCQ */
#define ATA_NCQ_NON_DATA 0x63 /* NCQ non-data command */
#define ATA_SEND_FPDMA_QUEUED 0x64 /* send DMA NCQ */
#define ATA_SFPDMA_DSM 0x00 /* Data set management */
#define ATA_SFPDMA_DSM_TRIM 0x01 /* Set trim bit in auxilary */
#define ATA_SFPDMA_HYBRID_EVICT 0x01 /* Hybrid Evict */
#define ATA_SFPDMA_WLDMA 0x02 /* Write Log DMA EXT */
#define ATA_RECV_FPDMA_QUEUED 0x65 /* recieve DMA NCQ */
#define ATA_SEP_ATTN 0x67 /* SEP request */
#define ATA_SEEK 0x70 /* seek */
#define ATA_PACKET_CMD 0xa0 /* packet command */
#define ATA_ATAPI_IDENTIFY 0xa1 /* get ATAPI params*/
#define ATA_SERVICE 0xa2 /* service command */
#define ATA_SMART_CMD 0xb0 /* SMART command */
#define ATA_CFA_ERASE 0xc0 /* CFA erase */
#define ATA_READ_MUL 0xc4 /* read multi */
#define ATA_WRITE_MUL 0xc5 /* write multi */
#define ATA_SET_MULTI 0xc6 /* set multi size */
#define ATA_READ_DMA_QUEUED 0xc7 /* read DMA QUEUED */
#define ATA_READ_DMA 0xc8 /* read DMA */
#define ATA_WRITE_DMA 0xca /* write DMA */
#define ATA_WRITE_DMA_QUEUED 0xcc /* write DMA QUEUED */
#define ATA_WRITE_MUL_FUA48 0xce
#define ATA_STANDBY_IMMEDIATE 0xe0 /* standby immediate */
#define ATA_IDLE_IMMEDIATE 0xe1 /* idle immediate */
#define ATA_STANDBY_CMD 0xe2 /* standby */
#define ATA_IDLE_CMD 0xe3 /* idle */
#define ATA_READ_BUFFER 0xe4 /* read buffer */
#define ATA_READ_PM 0xe4 /* read portmultiplier */
#define ATA_SLEEP 0xe6 /* sleep */
#define ATA_FLUSHCACHE 0xe7 /* flush cache to disk */
#define ATA_WRITE_PM 0xe8 /* write portmultiplier */
#define ATA_FLUSHCACHE48 0xea /* flush cache to disk */
#define ATA_ATA_IDENTIFY 0xec /* get ATA params */
#define ATA_SETFEATURES 0xef /* features command */
#define ATA_SF_SETXFER 0x03 /* set transfer mode */
#define ATA_SF_ENAB_WCACHE 0x02 /* enable write cache */
#define ATA_SF_DIS_WCACHE 0x82 /* disable write cache */
#define ATA_SF_ENAB_PUIS 0x06 /* enable PUIS */
#define ATA_SF_DIS_PUIS 0x86 /* disable PUIS */
#define ATA_SF_PUIS_SPINUP 0x07 /* PUIS spin-up */
#define ATA_SF_ENAB_RCACHE 0xaa /* enable readahead cache */
#define ATA_SF_DIS_RCACHE 0x55 /* disable readahead cache */
#define ATA_SF_ENAB_RELIRQ 0x5d /* enable release interrupt */
#define ATA_SF_DIS_RELIRQ 0xdd /* disable release interrupt */
#define ATA_SF_ENAB_SRVIRQ 0x5e /* enable service interrupt */
#define ATA_SF_DIS_SRVIRQ 0xde /* disable service interrupt */
#define ATA_SECURITY_SET_PASSWORD 0xf1 /* set drive password */
#define ATA_SECURITY_UNLOCK 0xf2 /* unlock drive using passwd */
#define ATA_SECURITY_ERASE_PREPARE 0xf3 /* prepare to erase drive */
#define ATA_SECURITY_ERASE_UNIT 0xf4 /* erase all blocks on drive */
#define ATA_SECURITY_FREEZE_LOCK 0xf5 /* freeze security config */
#define ATA_SECURITY_DISABLE_PASSWORD 0xf6 /* disable drive password */
#define ATA_READ_NATIVE_MAX_ADDRESS 0xf8 /* read native max address */
#define ATA_SET_MAX_ADDRESS 0xf9 /* set max address */
/* ATAPI commands */
#define ATAPI_TEST_UNIT_READY 0x00 /* check if device is ready */
#define ATAPI_REZERO 0x01 /* rewind */
#define ATAPI_REQUEST_SENSE 0x03 /* get sense data */
#define ATAPI_FORMAT 0x04 /* format unit */
#define ATAPI_READ 0x08 /* read data */
#define ATAPI_WRITE 0x0a /* write data */
#define ATAPI_WEOF 0x10 /* write filemark */
#define ATAPI_WF_WRITE 0x01
#define ATAPI_SPACE 0x11 /* space command */
#define ATAPI_SP_FM 0x01
#define ATAPI_SP_EOD 0x03
#define ATAPI_INQUIRY 0x12 /* get inquiry data */
#define ATAPI_MODE_SELECT 0x15 /* mode select */
#define ATAPI_ERASE 0x19 /* erase */
#define ATAPI_MODE_SENSE 0x1a /* mode sense */
#define ATAPI_START_STOP 0x1b /* start/stop unit */
#define ATAPI_SS_LOAD 0x01
#define ATAPI_SS_RETENSION 0x02
#define ATAPI_SS_EJECT 0x04
#define ATAPI_PREVENT_ALLOW 0x1e /* media removal */
#define ATAPI_READ_FORMAT_CAPACITIES 0x23 /* get format capacities */
#define ATAPI_READ_CAPACITY 0x25 /* get volume capacity */
#define ATAPI_READ_BIG 0x28 /* read data */
#define ATAPI_WRITE_BIG 0x2a /* write data */
#define ATAPI_LOCATE 0x2b /* locate to position */
#define ATAPI_READ_POSITION 0x34 /* read position */
#define ATAPI_SYNCHRONIZE_CACHE 0x35 /* flush buf, close channel */
#define ATAPI_WRITE_BUFFER 0x3b /* write device buffer */
#define ATAPI_READ_BUFFER 0x3c /* read device buffer */
#define ATAPI_READ_SUBCHANNEL 0x42 /* get subchannel info */
#define ATAPI_READ_TOC 0x43 /* get table of contents */
#define ATAPI_PLAY_10 0x45 /* play by lba */
#define ATAPI_PLAY_MSF 0x47 /* play by MSF address */
#define ATAPI_PLAY_TRACK 0x48 /* play by track number */
#define ATAPI_PAUSE 0x4b /* pause audio operation */
#define ATAPI_READ_DISK_INFO 0x51 /* get disk info structure */
#define ATAPI_READ_TRACK_INFO 0x52 /* get track info structure */
#define ATAPI_RESERVE_TRACK 0x53 /* reserve track */
#define ATAPI_SEND_OPC_INFO 0x54 /* send OPC structurek */
#define ATAPI_MODE_SELECT_BIG 0x55 /* set device parameters */
#define ATAPI_REPAIR_TRACK 0x58 /* repair track */
#define ATAPI_READ_MASTER_CUE 0x59 /* read master CUE info */
#define ATAPI_MODE_SENSE_BIG 0x5a /* get device parameters */
#define ATAPI_CLOSE_TRACK 0x5b /* close track/session */
#define ATAPI_READ_BUFFER_CAPACITY 0x5c /* get buffer capicity */
#define ATAPI_SEND_CUE_SHEET 0x5d /* send CUE sheet */
#define ATAPI_SERVICE_ACTION_IN 0x96 /* get service data */
#define ATAPI_BLANK 0xa1 /* blank the media */
#define ATAPI_SEND_KEY 0xa3 /* send DVD key structure */
#define ATAPI_REPORT_KEY 0xa4 /* get DVD key structure */
#define ATAPI_PLAY_12 0xa5 /* play by lba */
#define ATAPI_LOAD_UNLOAD 0xa6 /* changer control command */
#define ATAPI_READ_STRUCTURE 0xad /* get DVD structure */
#define ATAPI_PLAY_CD 0xb4 /* universal play command */
#define ATAPI_SET_SPEED 0xbb /* set drive speed */
#define ATAPI_MECH_STATUS 0xbd /* get changer status */
#define ATAPI_READ_CD 0xbe /* read data */
#define ATAPI_POLL_DSC 0xff /* poll DSC status bit */
struct ata_ioc_devices {
int channel;
char name[2][32];
struct ata_params params[2];
};
/* pr channel ATA ioctl calls */
#define IOCATAGMAXCHANNEL _IOR('a', 1, int)
#define IOCATAREINIT _IOW('a', 2, int)
#define IOCATAATTACH _IOW('a', 3, int)
#define IOCATADETACH _IOW('a', 4, int)
#define IOCATADEVICES _IOWR('a', 5, struct ata_ioc_devices)
/* ATAPI request sense structure */
struct atapi_sense {
u_int8_t error; /* current or deferred errors */
#define ATA_SENSE_VALID 0x80
u_int8_t segment; /* segment number */
u_int8_t key; /* sense key */
#define ATA_SENSE_KEY_MASK 0x0f /* sense key mask */
#define ATA_SENSE_NO_SENSE 0x00 /* no specific sense key info */
#define ATA_SENSE_RECOVERED_ERROR 0x01 /* command OK, data recovered */
#define ATA_SENSE_NOT_READY 0x02 /* no access to drive */
#define ATA_SENSE_MEDIUM_ERROR 0x03 /* non-recovered data error */
#define ATA_SENSE_HARDWARE_ERROR 0x04 /* non-recoverable HW failure */
#define ATA_SENSE_ILLEGAL_REQUEST 0x05 /* invalid command param(s) */
#define ATA_SENSE_UNIT_ATTENTION 0x06 /* media changed */
#define ATA_SENSE_DATA_PROTECT 0x07 /* write protect */
#define ATA_SENSE_BLANK_CHECK 0x08 /* blank check */
#define ATA_SENSE_VENDOR_SPECIFIC 0x09 /* vendor specific skey */
#define ATA_SENSE_COPY_ABORTED 0x0a /* copy aborted */
#define ATA_SENSE_ABORTED_COMMAND 0x0b /* command aborted, try again */
#define ATA_SENSE_EQUAL 0x0c /* equal */
#define ATA_SENSE_VOLUME_OVERFLOW 0x0d /* volume overflow */
#define ATA_SENSE_MISCOMPARE 0x0e /* data dont match the medium */
#define ATA_SENSE_RESERVED 0x0f
#define ATA_SENSE_ILI 0x20;
#define ATA_SENSE_EOM 0x40;
#define ATA_SENSE_FILEMARK 0x80;
u_int32_t cmd_info; /* cmd information */
u_int8_t sense_length; /* additional sense len (n-7) */
u_int32_t cmd_specific_info; /* additional cmd spec info */
u_int8_t asc; /* additional sense code */
u_int8_t ascq; /* additional sense code qual */
u_int8_t replaceable_unit_code; /* replaceable unit code */
u_int8_t specific; /* sense key specific */
#define ATA_SENSE_SPEC_VALID 0x80
#define ATA_SENSE_SPEC_MASK 0x7f
u_int8_t specific1; /* sense key specific */
u_int8_t specific2; /* sense key specific */
} __packed;
struct ata_ioc_request {
union {
struct {
u_int8_t command;
u_int8_t feature;
u_int64_t lba;
u_int16_t count;
} ata;
struct {
char ccb[16];
struct atapi_sense sense;
} atapi;
} u;
caddr_t data;
int count;
int flags;
#define ATA_CMD_CONTROL 0x01
#define ATA_CMD_READ 0x02
#define ATA_CMD_WRITE 0x04
#define ATA_CMD_ATAPI 0x08
int timeout;
int error;
};
struct ata_security_password {
u_int16_t ctrl;
#define ATA_SECURITY_PASSWORD_USER 0x0000
#define ATA_SECURITY_PASSWORD_MASTER 0x0001
#define ATA_SECURITY_ERASE_NORMAL 0x0000
#define ATA_SECURITY_ERASE_ENHANCED 0x0002
#define ATA_SECURITY_LEVEL_HIGH 0x0000
#define ATA_SECURITY_LEVEL_MAXIMUM 0x0100
u_int8_t password[32];
u_int16_t revision;
u_int16_t reserved[238];
};
/* pr device ATA ioctl calls */
#define IOCATAREQUEST _IOWR('a', 100, struct ata_ioc_request)
#define IOCATAGPARM _IOR('a', 101, struct ata_params)
#define IOCATAGMODE _IOR('a', 102, int)
#define IOCATASMODE _IOW('a', 103, int)
#define IOCATAGSPINDOWN _IOR('a', 104, int)
#define IOCATASSPINDOWN _IOW('a', 105, int)
struct ata_ioc_raid_config {
int lun;
int type;
#define AR_JBOD 0x0001
#define AR_SPAN 0x0002
#define AR_RAID0 0x0004
#define AR_RAID1 0x0008
#define AR_RAID01 0x0010
#define AR_RAID3 0x0020
#define AR_RAID4 0x0040
#define AR_RAID5 0x0080
int interleave;
int status;
#define AR_READY 1
#define AR_DEGRADED 2
#define AR_REBUILDING 4
int progress;
int total_disks;
int disks[16];
};
struct ata_ioc_raid_status {
int lun;
int type;
int interleave;
int status;
int progress;
int total_disks;
struct {
int state;
#define AR_DISK_ONLINE 0x01
#define AR_DISK_PRESENT 0x02
#define AR_DISK_SPARE 0x04
int lun;
} disks[16];
};
/* ATA RAID ioctl calls */
#define IOCATARAIDCREATE _IOWR('a', 200, struct ata_ioc_raid_config)
#define IOCATARAIDDELETE _IOW('a', 201, int)
#define IOCATARAIDSTATUS _IOWR('a', 202, struct ata_ioc_raid_status)
#define IOCATARAIDADDSPARE _IOW('a', 203, struct ata_ioc_raid_config)
#define IOCATARAIDREBUILD _IOW('a', 204, int)
#pragma clang diagnostic pop

View file

@ -0,0 +1,443 @@
/*-
* Copyright (c) 1998 Doug Rabson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#pragma once
#include <stdint.h>
#include <xhyve/support/misc.h>
#define __compiler_membar() __asm __volatile(" " : : : "memory")
#define mb() __asm __volatile("mfence;" : : : "memory")
#define wmb() __asm __volatile("sfence;" : : : "memory")
#define rmb() __asm __volatile("lfence;" : : : "memory")
/*
* Various simple operations on memory, each of which is atomic in the
* presence of interrupts and multiple processors.
*
* atomic_set_char(P, V) (*(u_char *)(P) |= (V))
* atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V))
* atomic_add_char(P, V) (*(u_char *)(P) += (V))
* atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))
*
* atomic_set_short(P, V) (*(u_short *)(P) |= (V))
* atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V))
* atomic_add_short(P, V) (*(u_short *)(P) += (V))
* atomic_subtract_short(P, V) (*(u_short *)(P) -= (V))
*
* atomic_set_int(P, V) (*(u_int *)(P) |= (V))
* atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V))
* atomic_add_int(P, V) (*(u_int *)(P) += (V))
* atomic_subtract_int(P, V) (*(u_int *)(P) -= (V))
* atomic_swap_int(P, V) (return (*(u_int *)(P)); *(u_int *)(P) = (V);)
* atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
*
* atomic_set_long(P, V) (*(u_long *)(P) |= (V))
* atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
* atomic_add_long(P, V) (*(u_long *)(P) += (V))
* atomic_subtract_long(P, V) (*(u_long *)(P) -= (V))
* atomic_swap_long(P, V) (return (*(u_long *)(P)); *(u_long *)(P) = (V);)
* atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
*/
#define MPLOCKED "lock ; "
/*
* The assembly is volatilized to avoid code chunk removal by the compiler.
* GCC aggressively reorders operations and memory clobbering is necessary
* in order to avoid that for memory barriers.
*/
#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
static __inline void \
atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
{ \
__asm __volatile(MPLOCKED OP \
: "+m" (*p) \
: CONS (V) \
: "cc"); \
} \
\
static __inline void \
atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
{ \
__asm __volatile(MPLOCKED OP \
: "+m" (*p) \
: CONS (V) \
: "memory", "cc"); \
} \
struct __hack
/*
* Atomic compare and set, used by the mutex functions
*
* if (*dst == expect) *dst = src (all 32 bit words)
*
* Returns 0 on failure, non-zero on success
*/
static __inline int
atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
{
u_char res;
__asm __volatile(
" " MPLOCKED " "
" cmpxchgl %3,%1 ; "
" sete %0 ; "
"# atomic_cmpset_int"
: "=q" (res), /* 0 */
"+m" (*dst), /* 1 */
"+a" (expect) /* 2 */
: "r" (src) /* 3 */
: "memory", "cc");
return (res);
}
static __inline int
atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
{
u_char res;
__asm __volatile(
" " MPLOCKED " "
" cmpxchgq %3,%1 ; "
" sete %0 ; "
"# atomic_cmpset_long"
: "=q" (res), /* 0 */
"+m" (*dst), /* 1 */
"+a" (expect) /* 2 */
: "r" (src) /* 3 */
: "memory", "cc");
return (res);
}
/*
* Atomically add the value of v to the integer pointed to by p and return
* the previous value of *p.
*/
static __inline u_int
atomic_fetchadd_int(volatile u_int *p, u_int v)
{
__asm __volatile(
" " MPLOCKED " "
" xaddl %0,%1 ; "
"# atomic_fetchadd_int"
: "+r" (v), /* 0 */
"+m" (*p) /* 1 */
: : "cc");
return (v);
}
/*
* Atomically add the value of v to the long integer pointed to by p and return
* the previous value of *p.
*/
static __inline u_long
atomic_fetchadd_long(volatile u_long *p, u_long v)
{
__asm __volatile(
" " MPLOCKED " "
" xaddq %0,%1 ; "
"# atomic_fetchadd_long"
: "+r" (v), /* 0 */
"+m" (*p) /* 1 */
: : "cc");
return (v);
}
static __inline int
atomic_testandset_int(volatile u_int *p, u_int v)
{
u_char res;
__asm __volatile(
" " MPLOCKED " "
" btsl %2,%1 ; "
" setc %0 ; "
"# atomic_testandset_int"
: "=q" (res), /* 0 */
"+m" (*p) /* 1 */
: "Ir" (v & 0x1f) /* 2 */
: "cc");
return (res);
}
static __inline int
atomic_testandset_long(volatile u_long *p, u_int v)
{
u_char res;
__asm __volatile(
" " MPLOCKED " "
" btsq %2,%1 ; "
" setc %0 ; "
"# atomic_testandset_long"
: "=q" (res), /* 0 */
"+m" (*p) /* 1 */
: "Jr" ((u_long)(v & 0x3f)) /* 2 */
: "cc");
return (res);
}
/*
* We assume that a = b will do atomic loads and stores. Due to the
* IA32 memory model, a simple store guarantees release semantics.
*
* However, loads may pass stores, so for atomic_load_acq we have to
* ensure a Store/Load barrier to do the load in SMP kernels. We use
* "lock cmpxchg" as recommended by the AMD Software Optimization
* Guide, and not mfence. For UP kernels, however, the cache of the
* single processor is always consistent, so we only need to take care
* of the compiler.
*/
#define ATOMIC_STORE(TYPE) \
static __inline void \
atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
{ \
__compiler_membar(); \
*p = v; \
} \
struct __hack
#define ATOMIC_LOAD(TYPE, LOP) \
static __inline u_##TYPE \
atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
{ \
u_##TYPE res; \
\
__asm __volatile(MPLOCKED LOP \
: "=a" (res), /* 0 */ \
"+m" (*p) /* 1 */ \
: : "memory", "cc"); \
return (res); \
} \
struct __hack
ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);
ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v);
ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v);
ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v);
ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v);
ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v);
ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v);
ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v);
ATOMIC_ASM(set, int, "orl %1,%0", "ir", v);
ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v);
ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
ATOMIC_ASM(set, long, "orq %1,%0", "ir", v);
ATOMIC_ASM(clear, long, "andq %1,%0", "ir", ~v);
ATOMIC_ASM(add, long, "addq %1,%0", "ir", v);
ATOMIC_ASM(subtract, long, "subq %1,%0", "ir", v);
ATOMIC_LOAD(char, "cmpxchgb %b0,%1");
ATOMIC_LOAD(short, "cmpxchgw %w0,%1");
ATOMIC_LOAD(int, "cmpxchgl %0,%1");
ATOMIC_LOAD(long, "cmpxchgq %0,%1");
ATOMIC_STORE(char);
ATOMIC_STORE(short);
ATOMIC_STORE(int);
ATOMIC_STORE(long);
#undef ATOMIC_ASM
#undef ATOMIC_LOAD
#undef ATOMIC_STORE
/* Read the current value and store a new value in the destination. */
static __inline u_int
atomic_swap_int(volatile u_int *p, u_int v)
{
__asm __volatile(
" xchgl %1,%0 ; "
"# atomic_swap_int"
: "+r" (v), /* 0 */
"+m" (*p)); /* 1 */
return (v);
}
static __inline u_long
atomic_swap_long(volatile u_long *p, u_long v)
{
__asm __volatile(
" xchgq %1,%0 ; "
"# atomic_swap_long"
: "+r" (v), /* 0 */
"+m" (*p)); /* 1 */
return (v);
}
#define atomic_set_acq_char atomic_set_barr_char
#define atomic_set_rel_char atomic_set_barr_char
#define atomic_clear_acq_char atomic_clear_barr_char
#define atomic_clear_rel_char atomic_clear_barr_char
#define atomic_add_acq_char atomic_add_barr_char
#define atomic_add_rel_char atomic_add_barr_char
#define atomic_subtract_acq_char atomic_subtract_barr_char
#define atomic_subtract_rel_char atomic_subtract_barr_char
#define atomic_set_acq_short atomic_set_barr_short
#define atomic_set_rel_short atomic_set_barr_short
#define atomic_clear_acq_short atomic_clear_barr_short
#define atomic_clear_rel_short atomic_clear_barr_short
#define atomic_add_acq_short atomic_add_barr_short
#define atomic_add_rel_short atomic_add_barr_short
#define atomic_subtract_acq_short atomic_subtract_barr_short
#define atomic_subtract_rel_short atomic_subtract_barr_short
#define atomic_set_acq_int atomic_set_barr_int
#define atomic_set_rel_int atomic_set_barr_int
#define atomic_clear_acq_int atomic_clear_barr_int
#define atomic_clear_rel_int atomic_clear_barr_int
#define atomic_add_acq_int atomic_add_barr_int
#define atomic_add_rel_int atomic_add_barr_int
#define atomic_subtract_acq_int atomic_subtract_barr_int
#define atomic_subtract_rel_int atomic_subtract_barr_int
#define atomic_cmpset_acq_int atomic_cmpset_int
#define atomic_cmpset_rel_int atomic_cmpset_int
#define atomic_set_acq_long atomic_set_barr_long
#define atomic_set_rel_long atomic_set_barr_long
#define atomic_clear_acq_long atomic_clear_barr_long
#define atomic_clear_rel_long atomic_clear_barr_long
#define atomic_add_acq_long atomic_add_barr_long
#define atomic_add_rel_long atomic_add_barr_long
#define atomic_subtract_acq_long atomic_subtract_barr_long
#define atomic_subtract_rel_long atomic_subtract_barr_long
#define atomic_cmpset_acq_long atomic_cmpset_long
#define atomic_cmpset_rel_long atomic_cmpset_long
#define atomic_readandclear_int(p) atomic_swap_int(p, 0)
#define atomic_readandclear_long(p) atomic_swap_long(p, 0)
/* Operations on 8-bit bytes. */
#define atomic_set_8 atomic_set_char
#define atomic_set_acq_8 atomic_set_acq_char
#define atomic_set_rel_8 atomic_set_rel_char
#define atomic_clear_8 atomic_clear_char
#define atomic_clear_acq_8 atomic_clear_acq_char
#define atomic_clear_rel_8 atomic_clear_rel_char
#define atomic_add_8 atomic_add_char
#define atomic_add_acq_8 atomic_add_acq_char
#define atomic_add_rel_8 atomic_add_rel_char
#define atomic_subtract_8 atomic_subtract_char
#define atomic_subtract_acq_8 atomic_subtract_acq_char
#define atomic_subtract_rel_8 atomic_subtract_rel_char
#define atomic_load_acq_8 atomic_load_acq_char
#define atomic_store_rel_8 atomic_store_rel_char
/* Operations on 16-bit words. */
#define atomic_set_16 atomic_set_short
#define atomic_set_acq_16 atomic_set_acq_short
#define atomic_set_rel_16 atomic_set_rel_short
#define atomic_clear_16 atomic_clear_short
#define atomic_clear_acq_16 atomic_clear_acq_short
#define atomic_clear_rel_16 atomic_clear_rel_short
#define atomic_add_16 atomic_add_short
#define atomic_add_acq_16 atomic_add_acq_short
#define atomic_add_rel_16 atomic_add_rel_short
#define atomic_subtract_16 atomic_subtract_short
#define atomic_subtract_acq_16 atomic_subtract_acq_short
#define atomic_subtract_rel_16 atomic_subtract_rel_short
#define atomic_load_acq_16 atomic_load_acq_short
#define atomic_store_rel_16 atomic_store_rel_short
/* Operations on 32-bit double words. */
#define atomic_set_32 atomic_set_int
#define atomic_set_acq_32 atomic_set_acq_int
#define atomic_set_rel_32 atomic_set_rel_int
#define atomic_clear_32 atomic_clear_int
#define atomic_clear_acq_32 atomic_clear_acq_int
#define atomic_clear_rel_32 atomic_clear_rel_int
#define atomic_add_32 atomic_add_int
#define atomic_add_acq_32 atomic_add_acq_int
#define atomic_add_rel_32 atomic_add_rel_int
#define atomic_subtract_32 atomic_subtract_int
#define atomic_subtract_acq_32 atomic_subtract_acq_int
#define atomic_subtract_rel_32 atomic_subtract_rel_int
#define atomic_load_acq_32 atomic_load_acq_int
#define atomic_store_rel_32 atomic_store_rel_int
#define atomic_cmpset_32 atomic_cmpset_int
#define atomic_cmpset_acq_32 atomic_cmpset_acq_int
#define atomic_cmpset_rel_32 atomic_cmpset_rel_int
#define atomic_swap_32 atomic_swap_int
#define atomic_readandclear_32 atomic_readandclear_int
#define atomic_fetchadd_32 atomic_fetchadd_int
#define atomic_testandset_32 atomic_testandset_int
/* Operations on 64-bit quad words. */
#define atomic_set_64 atomic_set_long
#define atomic_set_acq_64 atomic_set_acq_long
#define atomic_set_rel_64 atomic_set_rel_long
#define atomic_clear_64 atomic_clear_long
#define atomic_clear_acq_64 atomic_clear_acq_long
#define atomic_clear_rel_64 atomic_clear_rel_long
#define atomic_add_64 atomic_add_long
#define atomic_add_acq_64 atomic_add_acq_long
#define atomic_add_rel_64 atomic_add_rel_long
#define atomic_subtract_64 atomic_subtract_long
#define atomic_subtract_acq_64 atomic_subtract_acq_long
#define atomic_subtract_rel_64 atomic_subtract_rel_long
#define atomic_load_acq_64 atomic_load_acq_long
#define atomic_store_rel_64 atomic_store_rel_long
#define atomic_cmpset_64 atomic_cmpset_long
#define atomic_cmpset_acq_64 atomic_cmpset_acq_long
#define atomic_cmpset_rel_64 atomic_cmpset_rel_long
#define atomic_swap_64 atomic_swap_long
#define atomic_readandclear_64 atomic_readandclear_long
#define atomic_testandset_64 atomic_testandset_long
/* Operations on pointers. */
#define atomic_set_ptr atomic_set_long
#define atomic_set_acq_ptr atomic_set_acq_long
#define atomic_set_rel_ptr atomic_set_rel_long
#define atomic_clear_ptr atomic_clear_long
#define atomic_clear_acq_ptr atomic_clear_acq_long
#define atomic_clear_rel_ptr atomic_clear_rel_long
#define atomic_add_ptr atomic_add_long
#define atomic_add_acq_ptr atomic_add_acq_long
#define atomic_add_rel_ptr atomic_add_rel_long
#define atomic_subtract_ptr atomic_subtract_long
#define atomic_subtract_acq_ptr atomic_subtract_acq_long
#define atomic_subtract_rel_ptr atomic_subtract_rel_long
#define atomic_load_acq_ptr atomic_load_acq_long
#define atomic_store_rel_ptr atomic_store_rel_long
#define atomic_cmpset_ptr atomic_cmpset_long
#define atomic_cmpset_acq_ptr atomic_cmpset_acq_long
#define atomic_cmpset_rel_ptr atomic_cmpset_rel_long
#define atomic_swap_ptr atomic_swap_long
#define atomic_readandclear_ptr atomic_readandclear_long

View file

@ -0,0 +1,215 @@
/*-
* Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org>
* All rights reserved.
*
* Copyright (c) 2008 Nokia Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#pragma once
#include <strings.h>
#include <sys/types.h>
/*
* Macros addressing word and bit within it, tuned to make compiler
* optimize cases when SETSIZE fits into single machine word.
*/
#define _BITSET_BITS (sizeof(long) * NBBY)
#define __bitset_words(_s) (howmany(_s, _BITSET_BITS))
#define __bitset_mask(_s, n) \
(1L << ((__bitset_words((_s)) == 1) ? \
(size_t)(n) : ((n) % _BITSET_BITS)))
#define __bitset_word(_s, n) \
((__bitset_words((_s)) == 1) ? 0 : ((n) / _BITSET_BITS))
#define BITSET_DEFINE(t, _s) \
struct t { \
long __bits[__bitset_words((_s))]; \
}
#define BITSET_T_INITIALIZER(x) \
{ .__bits = { x } }
#define BITSET_FSET(n) \
[ 0 ... ((n) - 1) ] = (-1L)
#define BIT_CLR(_s, n, p) \
((p)->__bits[__bitset_word(_s, n)] &= ~__bitset_mask((_s), (n)))
#define BIT_COPY(_s, f, t) (void)(*(t) = *(f))
#define BIT_ISSET(_s, n, p) \
((((p)->__bits[__bitset_word(_s, n)] & __bitset_mask((_s), (n))) != 0))
#define BIT_SET(_s, n, p) \
((p)->__bits[__bitset_word(_s, n)] |= __bitset_mask((_s), (n)))
#define BIT_ZERO(_s, p) do { \
size_t __i; \
for (__i = 0; __i < __bitset_words((_s)); __i++) \
(p)->__bits[__i] = 0L; \
} while (0)
#define BIT_FILL(_s, p) do { \
size_t __i; \
for (__i = 0; __i < __bitset_words((_s)); __i++) \
(p)->__bits[__i] = -1L; \
} while (0)
#define BIT_SETOF(_s, n, p) do { \
BIT_ZERO(_s, p); \
(p)->__bits[__bitset_word(_s, n)] = __bitset_mask((_s), (n)); \
} while (0)
/* Is p empty. */
#define BIT_EMPTY(_s, p) __extension__ ({ \
size_t __i; \
for (__i = 0; __i < __bitset_words((_s)); __i++) \
if ((p)->__bits[__i]) \
break; \
__i == __bitset_words((_s)); \
})
/* Is p full set. */
#define BIT_ISFULLSET(_s, p) __extension__ ({ \
size_t __i; \
for (__i = 0; __i < __bitset_words((_s)); __i++) \
if ((p)->__bits[__i] != (long)-1) \
break; \
__i == __bitset_words((_s)); \
})
/* Is c a subset of p. */
#define BIT_SUBSET(_s, p, c) __extension__ ({ \
size_t __i; \
for (__i = 0; __i < __bitset_words((_s)); __i++) \
if (((c)->__bits[__i] & \
(p)->__bits[__i]) != \
(c)->__bits[__i]) \
break; \
__i == __bitset_words((_s)); \
})
/* Are there any common bits between b & c? */
#define BIT_OVERLAP(_s, p, c) __extension__ ({ \
size_t __i; \
for (__i = 0; __i < __bitset_words((_s)); __i++) \
if (((c)->__bits[__i] & \
(p)->__bits[__i]) != 0) \
break; \
__i != __bitset_words((_s)); \
})
/* Compare two sets, returns 0 if equal 1 otherwise. */
#define BIT_CMP(_s, p, c) __extension__ ({ \
size_t __i; \
for (__i = 0; __i < __bitset_words((_s)); __i++) \
if (((c)->__bits[__i] != \
(p)->__bits[__i])) \
break; \
__i != __bitset_words((_s)); \
})
#define BIT_OR(_s, d, s) do { \
size_t __i; \
for (__i = 0; __i < __bitset_words((_s)); __i++) \
(d)->__bits[__i] |= (s)->__bits[__i]; \
} while (0)
#define BIT_AND(_s, d, s) do { \
size_t __i; \
for (__i = 0; __i < __bitset_words((_s)); __i++) \
(d)->__bits[__i] &= (s)->__bits[__i]; \
} while (0)
#define BIT_NAND(_s, d, s) do { \
size_t __i; \
for (__i = 0; __i < __bitset_words((_s)); __i++) \
(d)->__bits[__i] &= ~(s)->__bits[__i]; \
} while (0)
#define BIT_CLR_ATOMIC(_s, n, p) \
atomic_clear_long(((volatile u_long *) \
&(p)->__bits[__bitset_word(_s, n)]), __bitset_mask((_s), n))
#define BIT_SET_ATOMIC(_s, n, p) \
atomic_set_long(((volatile u_long *) &(p)->__bits[__bitset_word(_s, n)]), \
__bitset_mask((_s), n))
#define BIT_SET_ATOMIC_ACQ(_s, n, p) \
atomic_set_acq_long(&(p)->__bits[__bitset_word(_s, n)], \
__bitset_mask((_s), n))
/* Convenience functions catering special cases. */
#define BIT_AND_ATOMIC(_s, d, s) do { \
size_t __i; \
for (__i = 0; __i < __bitset_words((_s)); __i++) \
atomic_clear_long(&(d)->__bits[__i], \
~(s)->__bits[__i]); \
} while (0)
#define BIT_OR_ATOMIC(_s, d, s) do { \
size_t __i; \
for (__i = 0; __i < __bitset_words((_s)); __i++) \
atomic_set_long(&(d)->__bits[__i], \
(s)->__bits[__i]); \
} while (0)
#define BIT_COPY_STORE_REL(_s, f, t) do { \
size_t __i; \
for (__i = 0; __i < __bitset_words((_s)); __i++) \
atomic_store_rel_long(&(t)->__bits[__i], \
(f)->__bits[__i]); \
} while (0)
#define BIT_FFS(_s, p) __extension__ ({ \
size_t __i; \
int __bit; \
\
__bit = 0; \
for (__i = 0; __i < __bitset_words((_s)); __i++) { \
if ((p)->__bits[__i] != 0) { \
__bit = ffsl((p)->__bits[__i]); \
__bit += __i * _BITSET_BITS; \
break; \
} \
} \
__bit; \
})
#define BIT_COUNT(_s, p) __extension__ ({ \
size_t __i; \
int __count; \
\
__count = 0; \
for (__i = 0; __i < __bitset_words((_s)); __i++) \
__count += __bitcountl((p)->__bits[__i]); \
__count; \
})

View file

@ -0,0 +1,150 @@
/*-
* Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org>
* All rights reserved.
*
* Copyright (c) 2008 Nokia Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#pragma once
#include <xhyve/support/bitset.h>
#define CPU_MAXSIZE 32
#ifndef CPU_SETSIZE
#define CPU_SETSIZE CPU_MAXSIZE
#endif
// #define _NCPUBITS _BITSET_BITS
// #define _NCPUWORDS __bitset_words(CPU_SETSIZE)
BITSET_DEFINE(_cpuset, CPU_SETSIZE);
typedef struct _cpuset cpuset_t;
// #define CPUSET_FSET BITSET_FSET(_NCPUWORDS)
// #define CPUSET_T_INITIALIZER BITSET_T_INITIALIZER
// #define CPUSETBUFSIZ ((2 + sizeof(long) * 2) * _NCPUWORDS)
#define CPU_CLR(n, p) BIT_CLR(CPU_SETSIZE, n, p)
// #define CPU_COPY(f, t) BIT_COPY(CPU_SETSIZE, f, t)
#define CPU_ISSET(n, p) BIT_ISSET(CPU_SETSIZE, n, p)
#define CPU_SET(n, p) BIT_SET(CPU_SETSIZE, n, p)
#define CPU_ZERO(p) BIT_ZERO(CPU_SETSIZE, p)
// #define CPU_FILL(p) BIT_FILL(CPU_SETSIZE, p)
#define CPU_SETOF(n, p) BIT_SETOF(CPU_SETSIZE, n, p)
#define CPU_EMPTY(p) BIT_EMPTY(CPU_SETSIZE, p)
// #define CPU_ISFULLSET(p) BIT_ISFULLSET(CPU_SETSIZE, p)
// #define CPU_SUBSET(p, c) BIT_SUBSET(CPU_SETSIZE, p, c)
// #define CPU_OVERLAP(p, c) BIT_OVERLAP(CPU_SETSIZE, p, c)
#define CPU_CMP(p, c) BIT_CMP(CPU_SETSIZE, p, c)
// #define CPU_OR(d, s) BIT_OR(CPU_SETSIZE, d, s)
#define CPU_AND(d, s) BIT_AND(CPU_SETSIZE, d, s)
// #define CPU_NAND(d, s) BIT_NAND(CPU_SETSIZE, d, s)
#define CPU_CLR_ATOMIC(n, p) BIT_CLR_ATOMIC(CPU_SETSIZE, n, p)
#define CPU_SET_ATOMIC(n, p) BIT_SET_ATOMIC(CPU_SETSIZE, n, p)
// #define CPU_SET_ATOMIC_ACQ(n, p) BIT_SET_ATOMIC_ACQ(CPU_SETSIZE, n, p)
// #define CPU_AND_ATOMIC(n, p) BIT_AND_ATOMIC(CPU_SETSIZE, n, p)
// #define CPU_OR_ATOMIC(d, s) BIT_OR_ATOMIC(CPU_SETSIZE, d, s)
// #define CPU_COPY_STORE_REL(f, t) BIT_COPY_STORE_REL(CPU_SETSIZE, f, t)
#define CPU_FFS(p) BIT_FFS(CPU_SETSIZE, p)
// #define CPU_COUNT(p) BIT_COUNT(CPU_SETSIZE, p)
// /*
// * Valid cpulevel_t values.
// */
// #define CPU_LEVEL_ROOT 1 /* All system cpus. */
// #define CPU_LEVEL_CPUSET 2 /* Available cpus for which. */
// #define CPU_LEVEL_WHICH 3 /* Actual mask/id for which. */
// /*
// * Valid cpuwhich_t values.
// */
// #define CPU_WHICH_TID 1 /* Specifies a thread id. */
// #define CPU_WHICH_PID 2 /* Specifies a process id. */
// #define CPU_WHICH_CPUSET 3 /* Specifies a set id. */
// #define CPU_WHICH_IRQ 4 /* Specifies an irq #. */
// #define CPU_WHICH_JAIL 5 /* Specifies a jail id. */
// #define CPU_WHICH_DOMAIN 6 /* Specifies a NUMA domain id. */
// /*
// * Reserved cpuset identifiers.
// */
// #define CPUSET_INVALID -1
// #define CPUSET_DEFAULT 0
// #ifdef _KERNEL
// LIST_HEAD(setlist, cpuset);
// /*
// * cpusets encapsulate cpu binding information for one or more threads.
// *
// * a - Accessed with atomics.
// * s - Set at creation, never modified. Only a ref required to read.
// * c - Locked internally by a cpuset lock.
// *
// * The bitmask is only modified while holding the cpuset lock. It may be
// * read while only a reference is held but the consumer must be prepared
// * to deal with inconsistent results.
// */
// struct cpuset {
// cpuset_t cs_mask; /* bitmask of valid cpus. */
// volatile u_int cs_ref; /* (a) Reference count. */
// int cs_flags; /* (s) Flags from below. */
// cpusetid_t cs_id; /* (s) Id or INVALID. */
// struct cpuset *cs_parent; /* (s) Pointer to our parent. */
// LIST_ENTRY(cpuset) cs_link; /* (c) All identified sets. */
// LIST_ENTRY(cpuset) cs_siblings; /* (c) Sibling set link. */
// struct setlist cs_children; /* (c) List of children. */
// };
// #define CPU_SET_ROOT 0x0001 /* Set is a root set. */
// #define CPU_SET_RDONLY 0x0002 /* No modification allowed. */
// extern cpuset_t *cpuset_root;
// struct prison;
// struct proc;
// struct cpuset *cpuset_thread0(void);
// struct cpuset *cpuset_ref(struct cpuset *);
// void cpuset_rel(struct cpuset *);
// int cpuset_setthread(lwpid_t id, cpuset_t *);
// int cpuset_setithread(lwpid_t id, int cpu);
// int cpuset_create_root(struct prison *, struct cpuset **);
// int cpuset_setproc_update_set(struct proc *, struct cpuset *);
// char *cpusetobj_strprint(char *, const cpuset_t *);
// int cpusetobj_strscan(cpuset_t *, const char *);
// #else
// __BEGIN_DECLS
// int cpuset(cpusetid_t *);
// int cpuset_setid(cpuwhich_t, id_t, cpusetid_t);
// int cpuset_getid(cpulevel_t, cpuwhich_t, id_t, cpusetid_t *);
// int cpuset_getaffinity(cpulevel_t, cpuwhich_t, id_t, size_t, cpuset_t *);
// int cpuset_setaffinity(cpulevel_t, cpuwhich_t, id_t, size_t, const cpuset_t *);
// __END_DECLS
// #endif

View file

@ -0,0 +1,81 @@
/*-
* Copyright (c) 1993 The Regents of the University of California.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: Header: timerreg.h,v 1.2 93/02/28 15:08:58 mccanne Exp
* $FreeBSD$
*/
/*
* Register definitions for the Intel 8253 Programmable Interval Timer.
*
* This chip has three independent 16-bit down counters that can be
* read on the fly. There are three mode registers and three countdown
* registers. The countdown registers are addressed directly, via the
* first three I/O ports. The three mode registers are accessed via
* the fourth I/O port, with two bits in the mode byte indicating the
* register. (Why are hardware interfaces always so braindead?).
*
* To write a value into the countdown register, the mode register
* is first programmed with a command indicating the which byte of
* the two byte register is to be modified. The three possibilities
* are load msb (TMR_MR_MSB), load lsb (TMR_MR_LSB), or load lsb then
* msb (TMR_MR_BOTH).
*
* To read the current value ("on the fly") from the countdown register,
* you write a "latch" command into the mode register, then read the stable
* value from the corresponding I/O port. For example, you write
* TMR_MR_LATCH into the corresponding mode register. Presumably,
* after doing this, a write operation to the I/O port would result
* in undefined behavior (but hopefully not fry the chip).
* Reading in this manner has no side effects.
*/
/*
* Macros for specifying values to be written into a mode register.
*/
#pragma once
#define TIMER_REG_CNTR0 0 /* timer 0 counter port */
#define TIMER_REG_CNTR1 1 /* timer 1 counter port */
#define TIMER_REG_CNTR2 2 /* timer 2 counter port */
#define TIMER_REG_MODE 3 /* timer mode port */
#define TIMER_SEL0 0x00 /* select counter 0 */
#define TIMER_SEL1 0x40 /* select counter 1 */
#define TIMER_SEL2 0x80 /* select counter 2 */
#define TIMER_INTTC 0x00 /* mode 0, intr on terminal cnt */
#define TIMER_ONESHOT 0x02 /* mode 1, one shot */
#define TIMER_RATEGEN 0x04 /* mode 2, rate generator */
#define TIMER_SQWAVE 0x06 /* mode 3, square wave */
#define TIMER_SWSTROBE 0x08 /* mode 4, s/w triggered strobe */
#define TIMER_HWSTROBE 0x0a /* mode 5, h/w triggered strobe */
#define TIMER_LATCH 0x00 /* latch counter for reading */
#define TIMER_LSB 0x10 /* r/w counter LSB */
#define TIMER_MSB 0x20 /* r/w counter MSB */
#define TIMER_16BIT 0x30 /* r/w counter 16 bits, LSB first */
#define TIMER_BCD 0x01 /* count in BCD */

View file

@ -0,0 +1,83 @@
/*-
* Copyright (c) 2003 Peter Wemm
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Register defintions for the i8259A programmable interrupt controller.
*/
#pragma once
/* Initialization control word 1. Written to even address. */
#define ICW1_IC4 0x01 /* ICW4 present */
#define ICW1_SNGL 0x02 /* 1 = single, 0 = cascaded */
#define ICW1_ADI 0x04 /* 1 = 4, 0 = 8 byte vectors */
#define ICW1_LTIM 0x08 /* 1 = level trigger, 0 = edge */
#define ICW1_RESET 0x10 /* must be 1 */
/* 0x20 - 0x80 - in 8080/8085 mode only */
/* Initialization control word 2. Written to the odd address. */
/* No definitions, it is the base vector of the IDT for 8086 mode */
/* Initialization control word 3. Written to the odd address. */
/* For a master PIC, bitfield indicating a slave 8259 on given input */
/* For slave, lower 3 bits are the slave's ID binary id on master */
/* Initialization control word 4. Written to the odd address. */
#define ICW4_8086 0x01 /* 1 = 8086, 0 = 8080 */
#define ICW4_AEOI 0x02 /* 1 = Auto EOI */
#define ICW4_MS 0x04 /* 1 = buffered master, 0 = slave */
#define ICW4_BUF 0x08 /* 1 = enable buffer mode */
#define ICW4_SFNM 0x10 /* 1 = special fully nested mode */
/* Operation control words. Written after initialization. */
/* Operation control word type 1 */
/*
* No definitions. Written to the odd address. Bitmask for interrupts.
* 1 = disabled.
*/
/* Operation control word type 2. Bit 3 (0x08) must be zero. Even address. */
#define OCW2_L0 0x01 /* Level */
#define OCW2_L1 0x02
#define OCW2_L2 0x04
/* 0x08 must be 0 to select OCW2 vs OCW3 */
/* 0x10 must be 0 to select OCW2 vs ICW1 */
#define OCW2_EOI 0x20 /* 1 = EOI */
#define OCW2_SL 0x40 /* EOI mode */
#define OCW2_R 0x80 /* EOI mode */
/* Operation control word type 3. Bit 3 (0x08) must be set. Even address. */
#define OCW3_RIS 0x01 /* 1 = read IS, 0 = read IR */
#define OCW3_RR 0x02 /* register read */
#define OCW3_P 0x04 /* poll mode command */
/* 0x08 must be 1 to select OCW3 vs OCW2 */
#define OCW3_SEL 0x08 /* must be 1 */
/* 0x10 must be 0 to select OCW3 vs ICW1 */
#define OCW3_SMM 0x20 /* special mode mask */
#define OCW3_ESMM 0x40 /* enable SMM */

View file

@ -0,0 +1,96 @@
/*-
* Copyright (c) 1999 John D. Polstra
* Copyright (c) 1999,2001 Peter Wemm <peter@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/* xhyve: sort of working linker sets for MachO */
#pragma once
#define __GLOBL1(sym) __asm__(".globl " #sym)
#define __GLOBL(sym) __GLOBL1(sym)
#define __section(x) __attribute__((__section__(x)))
/*
* The following macros are used to declare global sets of objects, which
* are collected by the linker into a `linker_set' as defined below.
* For ELF, this is done by constructing a separate segment for each set.
*/
#define __MAKE_SET_CONST const
/*
* Private macros, not to be used outside this header file.
*/
#define __MAKE_SET(set, sym) \
__GLOBL(__CONCAT(__start_set_,set)); \
__GLOBL(__CONCAT(__stop_set_,set)); \
static void const * __MAKE_SET_CONST \
__set_##set##_sym_##sym __section("__"#set",__set") \
__used = &(sym)
/*
* Public macros.
*/
#define TEXT_SET(set, sym) __MAKE_SET(set, sym)
#define DATA_SET(set, sym) __MAKE_SET(set, sym)
#define BSS_SET(set, sym) __MAKE_SET(set, sym)
#define ABS_SET(set, sym) __MAKE_SET(set, sym)
#define SET_ENTRY(set, sym) __MAKE_SET(set, sym)
/*
* Initialize before referring to a given linker set.
*/
#define SET_DECLARE(set, ptype) \
extern ptype __weak *__CONCAT(__start_set_,set) \
__asm("segment$start$__"#set); \
extern ptype __weak *__CONCAT(__stop_set_,set) \
__asm("segment$end$__"#set)
#define SET_BEGIN(set) \
(&__CONCAT(__start_set_,set))
#define SET_LIMIT(set) \
(&__CONCAT(__stop_set_,set))
/*
* Iterate over all the elements of a set.
*
* Sets always contain addresses of things, and "pvar" points to words
* containing those addresses. Thus is must be declared as "type **pvar",
* and the address of each set item is obtained inside the loop by "*pvar".
*/
#define SET_FOREACH(pvar, set) \
for (pvar = SET_BEGIN(set); pvar < SET_LIMIT(set); pvar++)
#define SET_ITEM(set, i) \
((SET_BEGIN(set))[i])
/*
* Provide a count of the items in a set.
*/
#define SET_COUNT(set) \
(SET_LIMIT(set) - SET_BEGIN(set))

View file

@ -0,0 +1,52 @@
/* MD5.H - header file for MD5C.C
* $FreeBSD$
*/
/*-
Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
rights reserved.
License to copy and use this software is granted provided that it
is identified as the "RSA Data Security, Inc. MD5 Message-Digest
Algorithm" in all material mentioning or referencing this software
or this function.
License is also granted to make and use derivative works provided
that such works are identified as "derived from the RSA Data
Security, Inc. MD5 Message-Digest Algorithm" in all material
mentioning or referencing the derived work.
RSA Data Security, Inc. makes no representations concerning either
the merchantability of this software or the suitability of this
software for any particular purpose. It is provided "as is"
without express or implied warranty of any kind.
These notices must be retained in any copies of any part of this
documentation and/or software.
*/
#pragma once
#include <xhyve/support/misc.h>
#define MD5_BLOCK_LENGTH 64
#define MD5_DIGEST_LENGTH 16
#define MD5_DIGEST_STRING_LENGTH (MD5_DIGEST_LENGTH * 2 + 1)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
/* MD5 context. */
typedef struct MD5Context {
u_int32_t state[4]; /* state (ABCD) */
u_int32_t count[2]; /* number of bits, modulo 2^64 (lsb first) */
unsigned char buffer[64]; /* input buffer */
} MD5_CTX;
#pragma clang diagnostic pop
void MD5Init(MD5_CTX *);
void MD5Update(MD5_CTX *, const void *, unsigned int);
void MD5Final(unsigned char [16], MD5_CTX *);
char * MD5End(MD5_CTX *, char *);
char * MD5File(const char *, char *);
char * MD5FileChunk(const char *, char *, off_t, off_t);
char * MD5Data(const void *, unsigned int, char *);

View file

@ -0,0 +1,64 @@
#pragma once
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#define UNUSED __attribute__ ((unused))
#define CTASSERT(x) _Static_assert ((x), "CTASSERT")
#define XHYVE_PAGE_SIZE 0x1000
#define XHYVE_PAGE_MASK (XHYVE_PAGE_SIZE - 1)
#define XHYVE_PAGE_SHIFT 12
#define __aligned(x) __attribute__ ((aligned ((x))))
#define __packed __attribute__ ((packed))
#define nitems(x) (sizeof((x)) / sizeof((x)[0]))
#define powerof2(x) ((((x)-1)&(x))==0)
#define roundup2(x, y) (((x)+((y)-1))&(~((y)-1))) /* if y is powers of two */
#define nitems(x) (sizeof((x)) / sizeof((x)[0]))
#define min(x, y) (((x) < (y)) ? (x) : (y))
#define xhyve_abort(...) \
do { \
fprintf(stderr, __VA_ARGS__); \
abort(); \
} while (0)
#define xhyve_warn(...) \
do { \
fprintf(stderr, __VA_ARGS__); \
} while (0)
#ifdef XHYVE_CONFIG_ASSERT
#define KASSERT(exp, msg) if (!(exp)) xhyve_abort msg
#define KWARN(exp, msg) if (!(exp)) xhyve_warn msg
#else
#define KASSERT(exp, msg) if (0) xhyve_abort msg
#define KWARN(exp, msg) if (0) xhyve_warn msg
#endif
#define FALSE 0
#define TRUE 1
#define XHYVE_PROT_READ 1
#define XHYVE_PROT_WRITE 2
#define XHYVE_PROT_EXECUTE 4
#define VM_SUCCESS 0
/* sys/sys/types.h */
typedef unsigned char u_char;
typedef unsigned short u_short;
typedef unsigned int u_int;
typedef unsigned long u_long;
static inline void cpuid_count(uint32_t ax, uint32_t cx, uint32_t *p) {
__asm__ __volatile__ ("cpuid"
: "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (ax), "c" (cx));
}
static inline void do_cpuid(unsigned ax, unsigned *p) {
__asm__ __volatile__ ("cpuid"
: "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (ax));
}

View file

@ -0,0 +1,194 @@
/*-
* Copyright (c) 1996, by Steve Passe
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. The name of the developer may NOT be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#pragma once
#include <stdint.h>
#include <xhyve/support/misc.h>
enum busTypes {
NOBUS = 0,
CBUS = 1,
CBUSII = 2,
EISA = 3,
ISA = 6,
MCA = 9,
PCI = 13,
XPRESS = 18,
MAX_BUSTYPE = 18,
UNKNOWN_BUSTYPE = 0xff
};
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpacked"
/* MP Floating Pointer Structure */
typedef struct MPFPS {
uint8_t signature[4];
uint32_t pap;
uint8_t length;
uint8_t spec_rev;
uint8_t checksum;
uint8_t config_type;
uint8_t mpfb2;
uint8_t mpfb3;
uint8_t mpfb4;
uint8_t mpfb5;
} __packed *mpfps_t;
#define MPFB2_IMCR_PRESENT 0x80
#define MPFB2_MUL_CLK_SRCS 0x40
/* MP Configuration Table Header */
typedef struct MPCTH {
uint8_t signature[4];
uint16_t base_table_length;
uint8_t spec_rev;
uint8_t checksum;
uint8_t oem_id[8];
uint8_t product_id[12];
uint32_t oem_table_pointer;
uint16_t oem_table_size;
uint16_t entry_count;
uint32_t apic_address;
uint16_t extended_table_length;
uint8_t extended_table_checksum;
uint8_t reserved;
} __packed *mpcth_t;
/* Base table entries */
#define MPCT_ENTRY_PROCESSOR 0
#define MPCT_ENTRY_BUS 1
#define MPCT_ENTRY_IOAPIC 2
#define MPCT_ENTRY_INT 3
#define MPCT_ENTRY_LOCAL_INT 4
typedef struct PROCENTRY {
uint8_t type;
uint8_t apic_id;
uint8_t apic_version;
uint8_t cpu_flags;
uint32_t cpu_signature;
uint32_t feature_flags;
uint32_t reserved1;
uint32_t reserved2;
} __packed *proc_entry_ptr;
#define PROCENTRY_FLAG_EN 0x01
#define PROCENTRY_FLAG_BP 0x02
typedef struct BUSENTRY {
uint8_t type;
uint8_t bus_id;
uint8_t bus_type[6];
} __packed *bus_entry_ptr;
typedef struct IOAPICENTRY {
uint8_t type;
uint8_t apic_id;
uint8_t apic_version;
uint8_t apic_flags;
uint32_t apic_address;
} __packed *io_apic_entry_ptr;
#define IOAPICENTRY_FLAG_EN 0x01
typedef struct INTENTRY {
uint8_t type;
uint8_t int_type;
uint16_t int_flags;
uint8_t src_bus_id;
uint8_t src_bus_irq;
uint8_t dst_apic_id;
uint8_t dst_apic_int;
} __packed *int_entry_ptr;
#define INTENTRY_TYPE_INT 0
#define INTENTRY_TYPE_NMI 1
#define INTENTRY_TYPE_SMI 2
#define INTENTRY_TYPE_EXTINT 3
#define INTENTRY_FLAGS_POLARITY 0x3
#define INTENTRY_FLAGS_POLARITY_CONFORM 0x0
#define INTENTRY_FLAGS_POLARITY_ACTIVEHI 0x1
#define INTENTRY_FLAGS_POLARITY_ACTIVELO 0x3
#define INTENTRY_FLAGS_TRIGGER 0xc
#define INTENTRY_FLAGS_TRIGGER_CONFORM 0x0
#define INTENTRY_FLAGS_TRIGGER_EDGE 0x4
#define INTENTRY_FLAGS_TRIGGER_LEVEL 0xc
/* Extended table entries */
typedef struct EXTENTRY {
uint8_t type;
uint8_t length;
} __packed *ext_entry_ptr;
#define MPCT_EXTENTRY_SAS 0x80
#define MPCT_EXTENTRY_BHD 0x81
#define MPCT_EXTENTRY_CBASM 0x82
typedef struct SASENTRY {
uint8_t type;
uint8_t length;
uint8_t bus_id;
uint8_t address_type;
uint64_t address_base;
uint64_t address_length;
} __packed *sas_entry_ptr;
#define SASENTRY_TYPE_IO 0
#define SASENTRY_TYPE_MEMORY 1
#define SASENTRY_TYPE_PREFETCH 2
typedef struct BHDENTRY {
uint8_t type;
uint8_t length;
uint8_t bus_id;
uint8_t bus_info;
uint8_t parent_bus;
uint8_t reserved[3];
} __packed *bhd_entry_ptr;
#define BHDENTRY_INFO_SUBTRACTIVE_DECODE 0x1
typedef struct CBASMENTRY {
uint8_t type;
uint8_t length;
uint8_t bus_id;
uint8_t address_mod;
uint32_t predefined_range;
} __packed *cbasm_entry_ptr;
#define CBASMENTRY_ADDRESS_MOD_ADD 0x0
#define CBASMENTRY_ADDRESS_MOD_SUBTRACT 0x1
#define CBASMENTRY_RANGE_ISA_IO 0
#define CBASMENTRY_RANGE_VGA_IO 1
#pragma clang diagnostic pop

View file

@ -0,0 +1,242 @@
/*-
* Copyright (c) 1991 The Regents of the University of California.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)ns16550.h 7.1 (Berkeley) 5/9/91
* $FreeBSD$
*/
/*
* NS8250... UART registers.
*/
/* 8250 registers #[0-6]. */
#pragma once
#define com_data 0 /* data register (R/W) */
#define REG_DATA com_data
#define com_ier 1 /* interrupt enable register (W) */
#define REG_IER com_ier
#define IER_ERXRDY 0x1
#define IER_ETXRDY 0x2
#define IER_ERLS 0x4
#define IER_EMSC 0x8
#define IER_BITS "\20\1ERXRDY\2ETXRDY\3ERLS\4EMSC"
#define com_iir 2 /* interrupt identification register (R) */
#define REG_IIR com_iir
#define IIR_IMASK 0xf
#define IIR_RXTOUT 0xc
#define IIR_BUSY 0x7
#define IIR_RLS 0x6
#define IIR_RXRDY 0x4
#define IIR_TXRDY 0x2
#define IIR_NOPEND 0x1
#define IIR_MLSC 0x0
#define IIR_FIFO_MASK 0xc0 /* set if FIFOs are enabled */
#define IIR_BITS "\20\1NOPEND\2TXRDY\3RXRDY"
#define com_lcr 3 /* line control register (R/W) */
#define com_cfcr com_lcr /* character format control register (R/W) */
#define REG_LCR com_lcr
#define LCR_DLAB 0x80
#define CFCR_DLAB LCR_DLAB
#define LCR_EFR_ENABLE 0xbf /* magic to enable EFR on 16650 up */
#define CFCR_EFR_ENABLE LCR_EFR_ENABLE
#define LCR_SBREAK 0x40
#define CFCR_SBREAK LCR_SBREAK
#define LCR_PZERO 0x30
#define CFCR_PZERO LCR_PZERO
#define LCR_PONE 0x20
#define CFCR_PONE LCR_PONE
#define LCR_PEVEN 0x10
#define CFCR_PEVEN LCR_PEVEN
#define LCR_PODD 0x00
#define CFCR_PODD LCR_PODD
#define LCR_PENAB 0x08
#define CFCR_PENAB LCR_PENAB
#define LCR_STOPB 0x04
#define CFCR_STOPB LCR_STOPB
#define LCR_8BITS 0x03
#define CFCR_8BITS LCR_8BITS
#define LCR_7BITS 0x02
#define CFCR_7BITS LCR_7BITS
#define LCR_6BITS 0x01
#define CFCR_6BITS LCR_6BITS
#define LCR_5BITS 0x00
#define CFCR_5BITS LCR_5BITS
#define com_mcr 4 /* modem control register (R/W) */
#define REG_MCR com_mcr
#define MCR_PRESCALE 0x80 /* only available on 16650 up */
#define MCR_LOOPBACK 0x10
#define MCR_IE 0x08
#define MCR_IENABLE MCR_IE
#define MCR_DRS 0x04
#define MCR_RTS 0x02
#define MCR_DTR 0x01
#define MCR_BITS "\20\1DTR\2RTS\3DRS\4IE\5LOOPBACK\10PRESCALE"
#define com_lsr 5 /* line status register (R/W) */
#define REG_LSR com_lsr
#define LSR_RCV_FIFO 0x80
#define LSR_TEMT 0x40
#define LSR_TSRE LSR_TEMT
#define LSR_THRE 0x20
#define LSR_TXRDY LSR_THRE
#define LSR_BI 0x10
#define LSR_FE 0x08
#define LSR_PE 0x04
#define LSR_OE 0x02
#define LSR_RXRDY 0x01
#define LSR_RCV_MASK 0x1f
#define LSR_BITS "\20\1RXRDY\2OE\3PE\4FE\5BI\6THRE\7TEMT\10RCV_FIFO"
#define com_msr 6 /* modem status register (R/W) */
#define REG_MSR com_msr
#define MSR_DCD 0x80
#define MSR_RI 0x40
#define MSR_DSR 0x20
#define MSR_CTS 0x10
#define MSR_DDCD 0x08
#define MSR_TERI 0x04
#define MSR_DDSR 0x02
#define MSR_DCTS 0x01
#define MSR_BITS "\20\1DCTS\2DDSR\3TERI\4DDCD\5CTS\6DSR\7RI\10DCD"
/* 8250 multiplexed registers #[0-1]. Access enabled by LCR[7]. */
#define com_dll 0 /* divisor latch low (R/W) */
#define com_dlbl com_dll
#define com_dlm 1 /* divisor latch high (R/W) */
#define com_dlbh com_dlm
#define REG_DLL com_dll
#define REG_DLH com_dlm
/* 16450 register #7. Not multiplexed. */
#define com_scr 7 /* scratch register (R/W) */
/* 16550 register #2. Not multiplexed. */
#define com_fcr 2 /* FIFO control register (W) */
#define com_fifo com_fcr
#define REG_FCR com_fcr
#define FCR_ENABLE 0x01
#define FIFO_ENABLE FCR_ENABLE
#define FCR_RCV_RST 0x02
#define FIFO_RCV_RST FCR_RCV_RST
#define FCR_XMT_RST 0x04
#define FIFO_XMT_RST FCR_XMT_RST
#define FCR_DMA 0x08
#define FIFO_DMA_MODE FCR_DMA
#define FCR_RX_LOW 0x00
#define FIFO_RX_LOW FCR_RX_LOW
#define FCR_RX_MEDL 0x40
#define FIFO_RX_MEDL FCR_RX_MEDL
#define FCR_RX_MEDH 0x80
#define FIFO_RX_MEDH FCR_RX_MEDH
#define FCR_RX_HIGH 0xc0
#define FIFO_RX_HIGH FCR_RX_HIGH
#define FCR_BITS "\20\1ENABLE\2RCV_RST\3XMT_RST\4DMA"
/* 16650 registers #2,[4-7]. Access enabled by LCR_EFR_ENABLE. */
#define com_efr 2 /* enhanced features register (R/W) */
#define REG_EFR com_efr
#define EFR_CTS 0x80
#define EFR_AUTOCTS EFR_CTS
#define EFR_RTS 0x40
#define EFR_AUTORTS EFR_RTS
#define EFR_EFE 0x10 /* enhanced functions enable */
#define com_xon1 4 /* XON 1 character (R/W) */
#define com_xon2 5 /* XON 2 character (R/W) */
#define com_xoff1 6 /* XOFF 1 character (R/W) */
#define com_xoff2 7 /* XOFF 2 character (R/W) */
#define DW_REG_USR 31 /* DesignWare derived Uart Status Reg */
#define com_usr 39 /* Octeon 16750/16550 Uart Status Reg */
#define REG_USR com_usr
#define USR_BUSY 1 /* Uart Busy. Serial transfer in progress */
#define USR_TXFIFO_NOTFULL 2 /* Uart TX FIFO Not full */
/* 16950 register #1. Access enabled by ACR[7]. Also requires !LCR[7]. */
#define com_asr 1 /* additional status register (R[0-7]/W[0-1]) */
/* 16950 register #3. R/W access enabled by ACR[7]. */
#define com_rfl 3 /* receiver fifo level (R) */
/*
* 16950 register #4. Access enabled by ACR[7]. Also requires
* !LCR_EFR_ENABLE.
*/
#define com_tfl 4 /* transmitter fifo level (R) */
/*
* 16950 register #5. Accessible if !LCR_EFR_ENABLE. Read access also
* requires ACR[6].
*/
#define com_icr 5 /* index control register (R/W) */
/*
* 16950 register #7. It is the same as com_scr except it has a different
* abbreviation in the manufacturer's data sheet and it also serves as an
* index into the Indexed Control register set.
*/
#define com_spr com_scr /* scratch pad (and index) register (R/W) */
#define REG_SPR com_scr
/*
* 16950 indexed control registers #[0-0x13]. Access is via index in SPR,
* data in ICR (if ICR is accessible).
*/
#define com_acr 0 /* additional control register (R/W) */
#define ACR_ASE 0x80 /* ASR/RFL/TFL enable */
#define ACR_ICRE 0x40 /* ICR enable */
#define ACR_TLE 0x20 /* TTL/RTL enable */
#define com_cpr 1 /* clock prescaler register (R/W) */
#define com_tcr 2 /* times clock register (R/W) */
#define com_ttl 4 /* transmitter trigger level (R/W) */
#define com_rtl 5 /* receiver trigger level (R/W) */
/* ... */
/* Hardware extension mode register for RSB-2000/3000. */
#define com_emr com_msr
#define EMR_EXBUFF 0x04
#define EMR_CTSFLW 0x08
#define EMR_DSRFLW 0x10
#define EMR_RTSFLW 0x20
#define EMR_DTRFLW 0x40
#define EMR_EFMODE 0x80

View file

@ -0,0 +1,945 @@
/*-
* Copyright (c) 1997, Stefan Esser <se@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#pragma once
/*
* PCIM_xxx: mask to locate subfield in register
* PCIR_xxx: config register offset
* PCIC_xxx: device class
* PCIS_xxx: device subclass
* PCIP_xxx: device programming interface
* PCIV_xxx: PCI vendor ID (only required to fixup ancient devices)
* PCID_xxx: device ID
* PCIY_xxx: capability identification number
* PCIZ_xxx: extended capability identification number
*/
/* some PCI bus constants */
#define PCI_DOMAINMAX 65535 /* highest supported domain number */
#define PCI_BUSMAX 255 /* highest supported bus number */
#define PCI_SLOTMAX 31 /* highest supported slot number */
#define PCI_FUNCMAX 7 /* highest supported function number */
#define PCI_REGMAX 255 /* highest supported config register addr. */
#define PCIE_REGMAX 4095 /* highest supported config register addr. */
#define PCI_MAXHDRTYPE 2
#define PCIE_ARI_SLOTMAX 0
#define PCIE_ARI_FUNCMAX 255
#define PCI_RID_BUS_SHIFT 8
#define PCI_RID_SLOT_SHIFT 3
#define PCI_RID_FUNC_SHIFT 0
#define PCI_RID(bus, slot, func) \
((((bus) & PCI_BUSMAX) << PCI_RID_BUS_SHIFT) | \
(((slot) & PCI_SLOTMAX) << PCI_RID_SLOT_SHIFT) | \
(((func) & PCI_FUNCMAX) << PCI_RID_FUNC_SHIFT))
#define PCI_ARI_RID(bus, func) \
((((bus) & PCI_BUSMAX) << PCI_RID_BUS_SHIFT) | \
(((func) & PCIE_ARI_FUNCMAX) << PCI_RID_FUNC_SHIFT))
#define PCI_RID2BUS(rid) (((rid) >> PCI_RID_BUS_SHIFT) & PCI_BUSMAX)
#define PCI_RID2SLOT(rid) (((rid) >> PCI_RID_SLOT_SHIFT) & PCI_SLOTMAX)
#define PCI_RID2FUNC(rid) (((rid) >> PCI_RID_FUNC_SHIFT) & PCI_FUNCMAX)
#define PCIE_ARI_RID2SLOT(rid) (0)
#define PCIE_ARI_RID2FUNC(rid) \
(((rid) >> PCI_RID_FUNC_SHIFT) & PCIE_ARI_FUNCMAX)
#define PCIE_ARI_SLOT(func) (((func) >> PCI_RID_SLOT_SHIFT) & PCI_SLOTMAX)
#define PCIE_ARI_FUNC(func) (((func) >> PCI_RID_FUNC_SHIFT) & PCI_FUNCMAX)
/* PCI config header registers for all devices */
#define PCIR_DEVVENDOR 0x00
#define PCIR_VENDOR 0x00
#define PCIR_DEVICE 0x02
#define PCIR_COMMAND 0x04
#define PCIM_CMD_PORTEN 0x0001
#define PCIM_CMD_MEMEN 0x0002
#define PCIM_CMD_BUSMASTEREN 0x0004
#define PCIM_CMD_SPECIALEN 0x0008
#define PCIM_CMD_MWRICEN 0x0010
#define PCIM_CMD_PERRESPEN 0x0040
#define PCIM_CMD_SERRESPEN 0x0100
#define PCIM_CMD_BACKTOBACK 0x0200
#define PCIM_CMD_INTxDIS 0x0400
#define PCIR_STATUS 0x06
#define PCIM_STATUS_INTxSTATE 0x0008
#define PCIM_STATUS_CAPPRESENT 0x0010
#define PCIM_STATUS_66CAPABLE 0x0020
#define PCIM_STATUS_BACKTOBACK 0x0080
#define PCIM_STATUS_MDPERR 0x0100
#define PCIM_STATUS_SEL_FAST 0x0000
#define PCIM_STATUS_SEL_MEDIMUM 0x0200
#define PCIM_STATUS_SEL_SLOW 0x0400
#define PCIM_STATUS_SEL_MASK 0x0600
#define PCIM_STATUS_STABORT 0x0800
#define PCIM_STATUS_RTABORT 0x1000
#define PCIM_STATUS_RMABORT 0x2000
#define PCIM_STATUS_SERR 0x4000
#define PCIM_STATUS_PERR 0x8000
#define PCIR_REVID 0x08
#define PCIR_PROGIF 0x09
#define PCIR_SUBCLASS 0x0a
#define PCIR_CLASS 0x0b
#define PCIR_CACHELNSZ 0x0c
#define PCIR_LATTIMER 0x0d
#define PCIR_HDRTYPE 0x0e
#define PCIM_HDRTYPE 0x7f
#define PCIM_HDRTYPE_NORMAL 0x00
#define PCIM_HDRTYPE_BRIDGE 0x01
#define PCIM_HDRTYPE_CARDBUS 0x02
#define PCIM_MFDEV 0x80
#define PCIR_BIST 0x0f
/* Capability Register Offsets */
#define PCICAP_ID 0x0
#define PCICAP_NEXTPTR 0x1
/* Capability Identification Numbers */
#define PCIY_PMG 0x01 /* PCI Power Management */
#define PCIY_AGP 0x02 /* AGP */
#define PCIY_VPD 0x03 /* Vital Product Data */
#define PCIY_SLOTID 0x04 /* Slot Identification */
#define PCIY_MSI 0x05 /* Message Signaled Interrupts */
#define PCIY_CHSWP 0x06 /* CompactPCI Hot Swap */
#define PCIY_PCIX 0x07 /* PCI-X */
#define PCIY_HT 0x08 /* HyperTransport */
#define PCIY_VENDOR 0x09 /* Vendor Unique */
#define PCIY_DEBUG 0x0a /* Debug port */
#define PCIY_CRES 0x0b /* CompactPCI central resource control */
#define PCIY_HOTPLUG 0x0c /* PCI Hot-Plug */
#define PCIY_SUBVENDOR 0x0d /* PCI-PCI bridge subvendor ID */
#define PCIY_AGP8X 0x0e /* AGP 8x */
#define PCIY_SECDEV 0x0f /* Secure Device */
#define PCIY_EXPRESS 0x10 /* PCI Express */
#define PCIY_MSIX 0x11 /* MSI-X */
#define PCIY_SATA 0x12 /* SATA */
#define PCIY_PCIAF 0x13 /* PCI Advanced Features */
/* Extended Capability Register Fields */
#define PCIR_EXTCAP 0x100
#define PCIM_EXTCAP_ID 0x0000ffff
#define PCIM_EXTCAP_VER 0x000f0000
#define PCIM_EXTCAP_NEXTPTR 0xfff00000
#define PCI_EXTCAP_ID(ecap) ((ecap) & PCIM_EXTCAP_ID)
#define PCI_EXTCAP_VER(ecap) (((ecap) & PCIM_EXTCAP_VER) >> 16)
#define PCI_EXTCAP_NEXTPTR(ecap) (((ecap) & PCIM_EXTCAP_NEXTPTR) >> 20)
/* Extended Capability Identification Numbers */
#define PCIZ_AER 0x0001 /* Advanced Error Reporting */
#define PCIZ_VC 0x0002 /* Virtual Channel if MFVC Ext Cap not set */
#define PCIZ_SERNUM 0x0003 /* Device Serial Number */
#define PCIZ_PWRBDGT 0x0004 /* Power Budgeting */
#define PCIZ_RCLINK_DCL 0x0005 /* Root Complex Link Declaration */
#define PCIZ_RCLINK_CTL 0x0006 /* Root Complex Internal Link Control */
#define PCIZ_RCEC_ASSOC 0x0007 /* Root Complex Event Collector Association */
#define PCIZ_MFVC 0x0008 /* Multi-Function Virtual Channel */
#define PCIZ_VC2 0x0009 /* Virtual Channel if MFVC Ext Cap set */
#define PCIZ_RCRB 0x000a /* RCRB Header */
#define PCIZ_VENDOR 0x000b /* Vendor Unique */
#define PCIZ_CAC 0x000c /* Configuration Access Correction -- obsolete */
#define PCIZ_ACS 0x000d /* Access Control Services */
#define PCIZ_ARI 0x000e /* Alternative Routing-ID Interpretation */
#define PCIZ_ATS 0x000f /* Address Translation Services */
#define PCIZ_SRIOV 0x0010 /* Single Root IO Virtualization */
#define PCIZ_MRIOV 0x0011 /* Multiple Root IO Virtualization */
#define PCIZ_MULTICAST 0x0012 /* Multicast */
#define PCIZ_PAGE_REQ 0x0013 /* Page Request */
#define PCIZ_AMD 0x0014 /* Reserved for AMD */
#define PCIZ_RESIZE_BAR 0x0015 /* Resizable BAR */
#define PCIZ_DPA 0x0016 /* Dynamic Power Allocation */
#define PCIZ_TPH_REQ 0x0017 /* TPH Requester */
#define PCIZ_LTR 0x0018 /* Latency Tolerance Reporting */
#define PCIZ_SEC_PCIE 0x0019 /* Secondary PCI Express */
#define PCIZ_PMUX 0x001a /* Protocol Multiplexing */
#define PCIZ_PASID 0x001b /* Process Address Space ID */
#define PCIZ_LN_REQ 0x001c /* LN Requester */
#define PCIZ_DPC 0x001d /* Downstream Porto Containment */
#define PCIZ_L1PM 0x001e /* L1 PM Substates */
/* config registers for header type 0 devices */
#define PCIR_BARS 0x10
#define PCIR_BAR(x) (PCIR_BARS + (x) * 4)
#define PCIR_MAX_BAR_0 5
#define PCI_RID2BAR(rid) (((rid) - PCIR_BARS) / 4)
#define PCI_BAR_IO(x) (((x) & PCIM_BAR_SPACE) == PCIM_BAR_IO_SPACE)
#define PCI_BAR_MEM(x) (((x) & PCIM_BAR_SPACE) == PCIM_BAR_MEM_SPACE)
#define PCIM_BAR_SPACE 0x00000001
#define PCIM_BAR_MEM_SPACE 0
#define PCIM_BAR_IO_SPACE 1
#define PCIM_BAR_MEM_TYPE 0x00000006
#define PCIM_BAR_MEM_32 0
#define PCIM_BAR_MEM_1MB 2 /* Locate below 1MB in PCI <= 2.1 */
#define PCIM_BAR_MEM_64 4
#define PCIM_BAR_MEM_PREFETCH 0x00000008
#define PCIM_BAR_MEM_BASE 0xfffffffffffffff0ULL
#define PCIM_BAR_IO_RESERVED 0x00000002
#define PCIM_BAR_IO_BASE 0xfffffffc
#define PCIR_CIS 0x28
#define PCIM_CIS_ASI_MASK 0x00000007
#define PCIM_CIS_ASI_CONFIG 0
#define PCIM_CIS_ASI_BAR0 1
#define PCIM_CIS_ASI_BAR1 2
#define PCIM_CIS_ASI_BAR2 3
#define PCIM_CIS_ASI_BAR3 4
#define PCIM_CIS_ASI_BAR4 5
#define PCIM_CIS_ASI_BAR5 6
#define PCIM_CIS_ASI_ROM 7
#define PCIM_CIS_ADDR_MASK 0x0ffffff8
#define PCIM_CIS_ROM_MASK 0xf0000000
#define PCIM_CIS_CONFIG_MASK 0xff
#define PCIR_SUBVEND_0 0x2c
#define PCIR_SUBDEV_0 0x2e
#define PCIR_BIOS 0x30
#define PCIM_BIOS_ENABLE 0x01
#define PCIM_BIOS_ADDR_MASK 0xfffff800
#define PCIR_CAP_PTR 0x34
#define PCIR_INTLINE 0x3c
#define PCIR_INTPIN 0x3d
#define PCIR_MINGNT 0x3e
#define PCIR_MAXLAT 0x3f
/* config registers for header type 1 (PCI-to-PCI bridge) devices */
#define PCIR_MAX_BAR_1 1
#define PCIR_SECSTAT_1 0x1e
#define PCIR_PRIBUS_1 0x18
#define PCIR_SECBUS_1 0x19
#define PCIR_SUBBUS_1 0x1a
#define PCIR_SECLAT_1 0x1b
#define PCIR_IOBASEL_1 0x1c
#define PCIR_IOLIMITL_1 0x1d
#define PCIR_IOBASEH_1 0x30
#define PCIR_IOLIMITH_1 0x32
#define PCIM_BRIO_16 0x0
#define PCIM_BRIO_32 0x1
#define PCIM_BRIO_MASK 0xf
#define PCIR_MEMBASE_1 0x20
#define PCIR_MEMLIMIT_1 0x22
#define PCIR_PMBASEL_1 0x24
#define PCIR_PMLIMITL_1 0x26
#define PCIR_PMBASEH_1 0x28
#define PCIR_PMLIMITH_1 0x2c
#define PCIM_BRPM_32 0x0
#define PCIM_BRPM_64 0x1
#define PCIM_BRPM_MASK 0xf
#define PCIR_BIOS_1 0x38
#define PCIR_BRIDGECTL_1 0x3e
/* config registers for header type 2 (CardBus) devices */
#define PCIR_MAX_BAR_2 0
#define PCIR_CAP_PTR_2 0x14
#define PCIR_SECSTAT_2 0x16
#define PCIR_PRIBUS_2 0x18
#define PCIR_SECBUS_2 0x19
#define PCIR_SUBBUS_2 0x1a
#define PCIR_SECLAT_2 0x1b
#define PCIR_MEMBASE0_2 0x1c
#define PCIR_MEMLIMIT0_2 0x20
#define PCIR_MEMBASE1_2 0x24
#define PCIR_MEMLIMIT1_2 0x28
#define PCIR_IOBASE0_2 0x2c
#define PCIR_IOLIMIT0_2 0x30
#define PCIR_IOBASE1_2 0x34
#define PCIR_IOLIMIT1_2 0x38
#define PCIR_BRIDGECTL_2 0x3e
#define PCIR_SUBVEND_2 0x40
#define PCIR_SUBDEV_2 0x42
#define PCIR_PCCARDIF_2 0x44
/* PCI device class, subclass and programming interface definitions */
#define PCIC_OLD 0x00
#define PCIS_OLD_NONVGA 0x00
#define PCIS_OLD_VGA 0x01
#define PCIC_STORAGE 0x01
#define PCIS_STORAGE_SCSI 0x00
#define PCIS_STORAGE_IDE 0x01
#define PCIP_STORAGE_IDE_MODEPRIM 0x01
#define PCIP_STORAGE_IDE_PROGINDPRIM 0x02
#define PCIP_STORAGE_IDE_MODESEC 0x04
#define PCIP_STORAGE_IDE_PROGINDSEC 0x08
#define PCIP_STORAGE_IDE_MASTERDEV 0x80
#define PCIS_STORAGE_FLOPPY 0x02
#define PCIS_STORAGE_IPI 0x03
#define PCIS_STORAGE_RAID 0x04
#define PCIS_STORAGE_ATA_ADMA 0x05
#define PCIS_STORAGE_SATA 0x06
#define PCIP_STORAGE_SATA_AHCI_1_0 0x01
#define PCIS_STORAGE_SAS 0x07
#define PCIS_STORAGE_NVM 0x08
#define PCIP_STORAGE_NVM_NVMHCI_1_0 0x01
#define PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0 0x02
#define PCIS_STORAGE_OTHER 0x80
#define PCIC_NETWORK 0x02
#define PCIS_NETWORK_ETHERNET 0x00
#define PCIS_NETWORK_TOKENRING 0x01
#define PCIS_NETWORK_FDDI 0x02
#define PCIS_NETWORK_ATM 0x03
#define PCIS_NETWORK_ISDN 0x04
#define PCIS_NETWORK_WORLDFIP 0x05
#define PCIS_NETWORK_PICMG 0x06
#define PCIS_NETWORK_OTHER 0x80
#define PCIC_DISPLAY 0x03
#define PCIS_DISPLAY_VGA 0x00
#define PCIS_DISPLAY_XGA 0x01
#define PCIS_DISPLAY_3D 0x02
#define PCIS_DISPLAY_OTHER 0x80
#define PCIC_MULTIMEDIA 0x04
#define PCIS_MULTIMEDIA_VIDEO 0x00
#define PCIS_MULTIMEDIA_AUDIO 0x01
#define PCIS_MULTIMEDIA_TELE 0x02
#define PCIS_MULTIMEDIA_HDA 0x03
#define PCIS_MULTIMEDIA_OTHER 0x80
#define PCIC_MEMORY 0x05
#define PCIS_MEMORY_RAM 0x00
#define PCIS_MEMORY_FLASH 0x01
#define PCIS_MEMORY_OTHER 0x80
#define PCIC_BRIDGE 0x06
#define PCIS_BRIDGE_HOST 0x00
#define PCIS_BRIDGE_ISA 0x01
#define PCIS_BRIDGE_EISA 0x02
#define PCIS_BRIDGE_MCA 0x03
#define PCIS_BRIDGE_PCI 0x04
#define PCIP_BRIDGE_PCI_SUBTRACTIVE 0x01
#define PCIS_BRIDGE_PCMCIA 0x05
#define PCIS_BRIDGE_NUBUS 0x06
#define PCIS_BRIDGE_CARDBUS 0x07
#define PCIS_BRIDGE_RACEWAY 0x08
#define PCIS_BRIDGE_PCI_TRANSPARENT 0x09
#define PCIS_BRIDGE_INFINIBAND 0x0a
#define PCIS_BRIDGE_OTHER 0x80
#define PCIC_SIMPLECOMM 0x07
#define PCIS_SIMPLECOMM_UART 0x00
#define PCIP_SIMPLECOMM_UART_8250 0x00
#define PCIP_SIMPLECOMM_UART_16450A 0x01
#define PCIP_SIMPLECOMM_UART_16550A 0x02
#define PCIP_SIMPLECOMM_UART_16650A 0x03
#define PCIP_SIMPLECOMM_UART_16750A 0x04
#define PCIP_SIMPLECOMM_UART_16850A 0x05
#define PCIP_SIMPLECOMM_UART_16950A 0x06
#define PCIS_SIMPLECOMM_PAR 0x01
#define PCIS_SIMPLECOMM_MULSER 0x02
#define PCIS_SIMPLECOMM_MODEM 0x03
#define PCIS_SIMPLECOMM_GPIB 0x04
#define PCIS_SIMPLECOMM_SMART_CARD 0x05
#define PCIS_SIMPLECOMM_OTHER 0x80
#define PCIC_BASEPERIPH 0x08
#define PCIS_BASEPERIPH_PIC 0x00
#define PCIP_BASEPERIPH_PIC_8259A 0x00
#define PCIP_BASEPERIPH_PIC_ISA 0x01
#define PCIP_BASEPERIPH_PIC_EISA 0x02
#define PCIP_BASEPERIPH_PIC_IO_APIC 0x10
#define PCIP_BASEPERIPH_PIC_IOX_APIC 0x20
#define PCIS_BASEPERIPH_DMA 0x01
#define PCIS_BASEPERIPH_TIMER 0x02
#define PCIS_BASEPERIPH_RTC 0x03
#define PCIS_BASEPERIPH_PCIHOT 0x04
#define PCIS_BASEPERIPH_SDHC 0x05
#define PCIS_BASEPERIPH_IOMMU 0x06
#define PCIS_BASEPERIPH_OTHER 0x80
#define PCIC_INPUTDEV 0x09
#define PCIS_INPUTDEV_KEYBOARD 0x00
#define PCIS_INPUTDEV_DIGITIZER 0x01
#define PCIS_INPUTDEV_MOUSE 0x02
#define PCIS_INPUTDEV_SCANNER 0x03
#define PCIS_INPUTDEV_GAMEPORT 0x04
#define PCIS_INPUTDEV_OTHER 0x80
#define PCIC_DOCKING 0x0a
#define PCIS_DOCKING_GENERIC 0x00
#define PCIS_DOCKING_OTHER 0x80
#define PCIC_PROCESSOR 0x0b
#define PCIS_PROCESSOR_386 0x00
#define PCIS_PROCESSOR_486 0x01
#define PCIS_PROCESSOR_PENTIUM 0x02
#define PCIS_PROCESSOR_ALPHA 0x10
#define PCIS_PROCESSOR_POWERPC 0x20
#define PCIS_PROCESSOR_MIPS 0x30
#define PCIS_PROCESSOR_COPROC 0x40
#define PCIC_SERIALBUS 0x0c
#define PCIS_SERIALBUS_FW 0x00
#define PCIS_SERIALBUS_ACCESS 0x01
#define PCIS_SERIALBUS_SSA 0x02
#define PCIS_SERIALBUS_USB 0x03
#define PCIP_SERIALBUS_USB_UHCI 0x00
#define PCIP_SERIALBUS_USB_OHCI 0x10
#define PCIP_SERIALBUS_USB_EHCI 0x20
#define PCIP_SERIALBUS_USB_XHCI 0x30
#define PCIP_SERIALBUS_USB_DEVICE 0xfe
#define PCIS_SERIALBUS_FC 0x04
#define PCIS_SERIALBUS_SMBUS 0x05
#define PCIS_SERIALBUS_INFINIBAND 0x06
#define PCIS_SERIALBUS_IPMI 0x07
#define PCIP_SERIALBUS_IPMI_SMIC 0x00
#define PCIP_SERIALBUS_IPMI_KCS 0x01
#define PCIP_SERIALBUS_IPMI_BT 0x02
#define PCIS_SERIALBUS_SERCOS 0x08
#define PCIS_SERIALBUS_CANBUS 0x09
#define PCIC_WIRELESS 0x0d
#define PCIS_WIRELESS_IRDA 0x00
#define PCIS_WIRELESS_IR 0x01
#define PCIS_WIRELESS_RF 0x10
#define PCIS_WIRELESS_BLUETOOTH 0x11
#define PCIS_WIRELESS_BROADBAND 0x12
#define PCIS_WIRELESS_80211A 0x20
#define PCIS_WIRELESS_80211B 0x21
#define PCIS_WIRELESS_OTHER 0x80
#define PCIC_INTELLIIO 0x0e
#define PCIS_INTELLIIO_I2O 0x00
#define PCIC_SATCOM 0x0f
#define PCIS_SATCOM_TV 0x01
#define PCIS_SATCOM_AUDIO 0x02
#define PCIS_SATCOM_VOICE 0x03
#define PCIS_SATCOM_DATA 0x04
#define PCIC_CRYPTO 0x10
#define PCIS_CRYPTO_NETCOMP 0x00
#define PCIS_CRYPTO_ENTERTAIN 0x10
#define PCIS_CRYPTO_OTHER 0x80
#define PCIC_DASP 0x11
#define PCIS_DASP_DPIO 0x00
#define PCIS_DASP_PERFCNTRS 0x01
#define PCIS_DASP_COMM_SYNC 0x10
#define PCIS_DASP_MGMT_CARD 0x20
#define PCIS_DASP_OTHER 0x80
#define PCIC_OTHER 0xff
/* Bridge Control Values. */
#define PCIB_BCR_PERR_ENABLE 0x0001
#define PCIB_BCR_SERR_ENABLE 0x0002
#define PCIB_BCR_ISA_ENABLE 0x0004
#define PCIB_BCR_VGA_ENABLE 0x0008
#define PCIB_BCR_MASTER_ABORT_MODE 0x0020
#define PCIB_BCR_SECBUS_RESET 0x0040
#define PCIB_BCR_SECBUS_BACKTOBACK 0x0080
#define PCIB_BCR_PRI_DISCARD_TIMEOUT 0x0100
#define PCIB_BCR_SEC_DISCARD_TIMEOUT 0x0200
#define PCIB_BCR_DISCARD_TIMER_STATUS 0x0400
#define PCIB_BCR_DISCARD_TIMER_SERREN 0x0800
/* PCI power manangement */
#define PCIR_POWER_CAP 0x2
#define PCIM_PCAP_SPEC 0x0007
#define PCIM_PCAP_PMEREQCLK 0x0008
#define PCIM_PCAP_DEVSPECINIT 0x0020
#define PCIM_PCAP_AUXPWR_0 0x0000
#define PCIM_PCAP_AUXPWR_55 0x0040
#define PCIM_PCAP_AUXPWR_100 0x0080
#define PCIM_PCAP_AUXPWR_160 0x00c0
#define PCIM_PCAP_AUXPWR_220 0x0100
#define PCIM_PCAP_AUXPWR_270 0x0140
#define PCIM_PCAP_AUXPWR_320 0x0180
#define PCIM_PCAP_AUXPWR_375 0x01c0
#define PCIM_PCAP_AUXPWRMASK 0x01c0
#define PCIM_PCAP_D1SUPP 0x0200
#define PCIM_PCAP_D2SUPP 0x0400
#define PCIM_PCAP_D0PME 0x0800
#define PCIM_PCAP_D1PME 0x1000
#define PCIM_PCAP_D2PME 0x2000
#define PCIM_PCAP_D3PME_HOT 0x4000
#define PCIM_PCAP_D3PME_COLD 0x8000
#define PCIR_POWER_STATUS 0x4
#define PCIM_PSTAT_D0 0x0000
#define PCIM_PSTAT_D1 0x0001
#define PCIM_PSTAT_D2 0x0002
#define PCIM_PSTAT_D3 0x0003
#define PCIM_PSTAT_DMASK 0x0003
#define PCIM_PSTAT_NOSOFTRESET 0x0008
#define PCIM_PSTAT_PMEENABLE 0x0100
#define PCIM_PSTAT_D0POWER 0x0000
#define PCIM_PSTAT_D1POWER 0x0200
#define PCIM_PSTAT_D2POWER 0x0400
#define PCIM_PSTAT_D3POWER 0x0600
#define PCIM_PSTAT_D0HEAT 0x0800
#define PCIM_PSTAT_D1HEAT 0x0a00
#define PCIM_PSTAT_D2HEAT 0x0c00
#define PCIM_PSTAT_D3HEAT 0x0e00
#define PCIM_PSTAT_DATASELMASK 0x1e00
#define PCIM_PSTAT_DATAUNKN 0x0000
#define PCIM_PSTAT_DATADIV10 0x2000
#define PCIM_PSTAT_DATADIV100 0x4000
#define PCIM_PSTAT_DATADIV1000 0x6000
#define PCIM_PSTAT_DATADIVMASK 0x6000
#define PCIM_PSTAT_PME 0x8000
#define PCIR_POWER_BSE 0x6
#define PCIM_PMCSR_BSE_D3B3 0x00
#define PCIM_PMCSR_BSE_D3B2 0x40
#define PCIM_PMCSR_BSE_BPCCE 0x80
#define PCIR_POWER_DATA 0x7
/* VPD capability registers */
#define PCIR_VPD_ADDR 0x2
#define PCIR_VPD_DATA 0x4
/* PCI Message Signalled Interrupts (MSI) */
#define PCIR_MSI_CTRL 0x2
#define PCIM_MSICTRL_VECTOR 0x0100
#define PCIM_MSICTRL_64BIT 0x0080
#define PCIM_MSICTRL_MME_MASK 0x0070
#define PCIM_MSICTRL_MME_1 0x0000
#define PCIM_MSICTRL_MME_2 0x0010
#define PCIM_MSICTRL_MME_4 0x0020
#define PCIM_MSICTRL_MME_8 0x0030
#define PCIM_MSICTRL_MME_16 0x0040
#define PCIM_MSICTRL_MME_32 0x0050
#define PCIM_MSICTRL_MMC_MASK 0x000E
#define PCIM_MSICTRL_MMC_1 0x0000
#define PCIM_MSICTRL_MMC_2 0x0002
#define PCIM_MSICTRL_MMC_4 0x0004
#define PCIM_MSICTRL_MMC_8 0x0006
#define PCIM_MSICTRL_MMC_16 0x0008
#define PCIM_MSICTRL_MMC_32 0x000A
#define PCIM_MSICTRL_MSI_ENABLE 0x0001
#define PCIR_MSI_ADDR 0x4
#define PCIR_MSI_ADDR_HIGH 0x8
#define PCIR_MSI_DATA 0x8
#define PCIR_MSI_DATA_64BIT 0xc
#define PCIR_MSI_MASK 0x10
#define PCIR_MSI_PENDING 0x14
/* PCI-X definitions */
/* For header type 0 devices */
#define PCIXR_COMMAND 0x2
#define PCIXM_COMMAND_DPERR_E 0x0001 /* Data Parity Error Recovery */
#define PCIXM_COMMAND_ERO 0x0002 /* Enable Relaxed Ordering */
#define PCIXM_COMMAND_MAX_READ 0x000c /* Maximum Burst Read Count */
#define PCIXM_COMMAND_MAX_READ_512 0x0000
#define PCIXM_COMMAND_MAX_READ_1024 0x0004
#define PCIXM_COMMAND_MAX_READ_2048 0x0008
#define PCIXM_COMMAND_MAX_READ_4096 0x000c
#define PCIXM_COMMAND_MAX_SPLITS 0x0070 /* Maximum Split Transactions */
#define PCIXM_COMMAND_MAX_SPLITS_1 0x0000
#define PCIXM_COMMAND_MAX_SPLITS_2 0x0010
#define PCIXM_COMMAND_MAX_SPLITS_3 0x0020
#define PCIXM_COMMAND_MAX_SPLITS_4 0x0030
#define PCIXM_COMMAND_MAX_SPLITS_8 0x0040
#define PCIXM_COMMAND_MAX_SPLITS_12 0x0050
#define PCIXM_COMMAND_MAX_SPLITS_16 0x0060
#define PCIXM_COMMAND_MAX_SPLITS_32 0x0070
#define PCIXM_COMMAND_VERSION 0x3000
#define PCIXR_STATUS 0x4
#define PCIXM_STATUS_DEVFN 0x000000FF
#define PCIXM_STATUS_BUS 0x0000FF00
#define PCIXM_STATUS_64BIT 0x00010000
#define PCIXM_STATUS_133CAP 0x00020000
#define PCIXM_STATUS_SC_DISCARDED 0x00040000
#define PCIXM_STATUS_UNEXP_SC 0x00080000
#define PCIXM_STATUS_COMPLEX_DEV 0x00100000
#define PCIXM_STATUS_MAX_READ 0x00600000
#define PCIXM_STATUS_MAX_READ_512 0x00000000
#define PCIXM_STATUS_MAX_READ_1024 0x00200000
#define PCIXM_STATUS_MAX_READ_2048 0x00400000
#define PCIXM_STATUS_MAX_READ_4096 0x00600000
#define PCIXM_STATUS_MAX_SPLITS 0x03800000
#define PCIXM_STATUS_MAX_SPLITS_1 0x00000000
#define PCIXM_STATUS_MAX_SPLITS_2 0x00800000
#define PCIXM_STATUS_MAX_SPLITS_3 0x01000000
#define PCIXM_STATUS_MAX_SPLITS_4 0x01800000
#define PCIXM_STATUS_MAX_SPLITS_8 0x02000000
#define PCIXM_STATUS_MAX_SPLITS_12 0x02800000
#define PCIXM_STATUS_MAX_SPLITS_16 0x03000000
#define PCIXM_STATUS_MAX_SPLITS_32 0x03800000
#define PCIXM_STATUS_MAX_CUM_READ 0x1C000000
#define PCIXM_STATUS_RCVD_SC_ERR 0x20000000
#define PCIXM_STATUS_266CAP 0x40000000
#define PCIXM_STATUS_533CAP 0x80000000
/* For header type 1 devices (PCI-X bridges) */
#define PCIXR_SEC_STATUS 0x2
#define PCIXM_SEC_STATUS_64BIT 0x0001
#define PCIXM_SEC_STATUS_133CAP 0x0002
#define PCIXM_SEC_STATUS_SC_DISC 0x0004
#define PCIXM_SEC_STATUS_UNEXP_SC 0x0008
#define PCIXM_SEC_STATUS_SC_OVERRUN 0x0010
#define PCIXM_SEC_STATUS_SR_DELAYED 0x0020
#define PCIXM_SEC_STATUS_BUS_MODE 0x03c0
#define PCIXM_SEC_STATUS_VERSION 0x3000
#define PCIXM_SEC_STATUS_266CAP 0x4000
#define PCIXM_SEC_STATUS_533CAP 0x8000
#define PCIXR_BRIDGE_STATUS 0x4
#define PCIXM_BRIDGE_STATUS_DEVFN 0x000000FF
#define PCIXM_BRIDGE_STATUS_BUS 0x0000FF00
#define PCIXM_BRIDGE_STATUS_64BIT 0x00010000
#define PCIXM_BRIDGE_STATUS_133CAP 0x00020000
#define PCIXM_BRIDGE_STATUS_SC_DISCARDED 0x00040000
#define PCIXM_BRIDGE_STATUS_UNEXP_SC 0x00080000
#define PCIXM_BRIDGE_STATUS_SC_OVERRUN 0x00100000
#define PCIXM_BRIDGE_STATUS_SR_DELAYED 0x00200000
#define PCIXM_BRIDGE_STATUS_DEVID_MSGCAP 0x20000000
#define PCIXM_BRIDGE_STATUS_266CAP 0x40000000
#define PCIXM_BRIDGE_STATUS_533CAP 0x80000000
/* HT (HyperTransport) Capability definitions */
#define PCIR_HT_COMMAND 0x2
#define PCIM_HTCMD_CAP_MASK 0xf800 /* Capability type. */
#define PCIM_HTCAP_SLAVE 0x0000 /* 000xx */
#define PCIM_HTCAP_HOST 0x2000 /* 001xx */
#define PCIM_HTCAP_SWITCH 0x4000 /* 01000 */
#define PCIM_HTCAP_INTERRUPT 0x8000 /* 10000 */
#define PCIM_HTCAP_REVISION_ID 0x8800 /* 10001 */
#define PCIM_HTCAP_UNITID_CLUMPING 0x9000 /* 10010 */
#define PCIM_HTCAP_EXT_CONFIG_SPACE 0x9800 /* 10011 */
#define PCIM_HTCAP_ADDRESS_MAPPING 0xa000 /* 10100 */
#define PCIM_HTCAP_MSI_MAPPING 0xa800 /* 10101 */
#define PCIM_HTCAP_DIRECT_ROUTE 0xb000 /* 10110 */
#define PCIM_HTCAP_VCSET 0xb800 /* 10111 */
#define PCIM_HTCAP_RETRY_MODE 0xc000 /* 11000 */
#define PCIM_HTCAP_X86_ENCODING 0xc800 /* 11001 */
#define PCIM_HTCAP_GEN3 0xd000 /* 11010 */
#define PCIM_HTCAP_FLE 0xd800 /* 11011 */
#define PCIM_HTCAP_PM 0xe000 /* 11100 */
#define PCIM_HTCAP_HIGH_NODE_COUNT 0xe800 /* 11101 */
/* HT MSI Mapping Capability definitions. */
#define PCIM_HTCMD_MSI_ENABLE 0x0001
#define PCIM_HTCMD_MSI_FIXED 0x0002
#define PCIR_HTMSI_ADDRESS_LO 0x4
#define PCIR_HTMSI_ADDRESS_HI 0x8
/* PCI Vendor capability definitions */
#define PCIR_VENDOR_LENGTH 0x2
#define PCIR_VENDOR_DATA 0x3
/* PCI EHCI Debug Port definitions */
#define PCIR_DEBUG_PORT 0x2
#define PCIM_DEBUG_PORT_OFFSET 0x1FFF
#define PCIM_DEBUG_PORT_BAR 0xe000
/* PCI-PCI Bridge Subvendor definitions */
#define PCIR_SUBVENDCAP_ID 0x4
/* PCI Express definitions */
#define PCIER_FLAGS 0x2
#define PCIEM_FLAGS_VERSION 0x000F
#define PCIEM_FLAGS_TYPE 0x00F0
#define PCIEM_TYPE_ENDPOINT 0x0000
#define PCIEM_TYPE_LEGACY_ENDPOINT 0x0010
#define PCIEM_TYPE_ROOT_PORT 0x0040
#define PCIEM_TYPE_UPSTREAM_PORT 0x0050
#define PCIEM_TYPE_DOWNSTREAM_PORT 0x0060
#define PCIEM_TYPE_PCI_BRIDGE 0x0070
#define PCIEM_TYPE_PCIE_BRIDGE 0x0080
#define PCIEM_TYPE_ROOT_INT_EP 0x0090
#define PCIEM_TYPE_ROOT_EC 0x00a0
#define PCIEM_FLAGS_SLOT 0x0100
#define PCIEM_FLAGS_IRQ 0x3e00
#define PCIER_DEVICE_CAP 0x4
#define PCIEM_CAP_MAX_PAYLOAD 0x00000007
#define PCIEM_CAP_PHANTHOM_FUNCS 0x00000018
#define PCIEM_CAP_EXT_TAG_FIELD 0x00000020
#define PCIEM_CAP_L0S_LATENCY 0x000001c0
#define PCIEM_CAP_L1_LATENCY 0x00000e00
#define PCIEM_CAP_ROLE_ERR_RPT 0x00008000
#define PCIEM_CAP_SLOT_PWR_LIM_VAL 0x03fc0000
#define PCIEM_CAP_SLOT_PWR_LIM_SCALE 0x0c000000
#define PCIEM_CAP_FLR 0x10000000
#define PCIER_DEVICE_CTL 0x8
#define PCIEM_CTL_COR_ENABLE 0x0001
#define PCIEM_CTL_NFER_ENABLE 0x0002
#define PCIEM_CTL_FER_ENABLE 0x0004
#define PCIEM_CTL_URR_ENABLE 0x0008
#define PCIEM_CTL_RELAXED_ORD_ENABLE 0x0010
#define PCIEM_CTL_MAX_PAYLOAD 0x00e0
#define PCIEM_CTL_EXT_TAG_FIELD 0x0100
#define PCIEM_CTL_PHANTHOM_FUNCS 0x0200
#define PCIEM_CTL_AUX_POWER_PM 0x0400
#define PCIEM_CTL_NOSNOOP_ENABLE 0x0800
#define PCIEM_CTL_MAX_READ_REQUEST 0x7000
#define PCIEM_CTL_BRDG_CFG_RETRY 0x8000 /* PCI-E - PCI/PCI-X bridges */
#define PCIEM_CTL_INITIATE_FLR 0x8000 /* FLR capable endpoints */
#define PCIER_DEVICE_STA 0xa
#define PCIEM_STA_CORRECTABLE_ERROR 0x0001
#define PCIEM_STA_NON_FATAL_ERROR 0x0002
#define PCIEM_STA_FATAL_ERROR 0x0004
#define PCIEM_STA_UNSUPPORTED_REQ 0x0008
#define PCIEM_STA_AUX_POWER 0x0010
#define PCIEM_STA_TRANSACTION_PND 0x0020
#define PCIER_LINK_CAP 0xc
#define PCIEM_LINK_CAP_MAX_SPEED 0x0000000f
#define PCIEM_LINK_CAP_MAX_WIDTH 0x000003f0
#define PCIEM_LINK_CAP_ASPM 0x00000c00
#define PCIEM_LINK_CAP_L0S_EXIT 0x00007000
#define PCIEM_LINK_CAP_L1_EXIT 0x00038000
#define PCIEM_LINK_CAP_CLOCK_PM 0x00040000
#define PCIEM_LINK_CAP_SURPRISE_DOWN 0x00080000
#define PCIEM_LINK_CAP_DL_ACTIVE 0x00100000
#define PCIEM_LINK_CAP_LINK_BW_NOTIFY 0x00200000
#define PCIEM_LINK_CAP_ASPM_COMPLIANCE 0x00400000
#define PCIEM_LINK_CAP_PORT 0xff000000
#define PCIER_LINK_CTL 0x10
#define PCIEM_LINK_CTL_ASPMC_DIS 0x0000
#define PCIEM_LINK_CTL_ASPMC_L0S 0x0001
#define PCIEM_LINK_CTL_ASPMC_L1 0x0002
#define PCIEM_LINK_CTL_ASPMC 0x0003
#define PCIEM_LINK_CTL_RCB 0x0008
#define PCIEM_LINK_CTL_LINK_DIS 0x0010
#define PCIEM_LINK_CTL_RETRAIN_LINK 0x0020
#define PCIEM_LINK_CTL_COMMON_CLOCK 0x0040
#define PCIEM_LINK_CTL_EXTENDED_SYNC 0x0080
#define PCIEM_LINK_CTL_ECPM 0x0100
#define PCIEM_LINK_CTL_HAWD 0x0200
#define PCIEM_LINK_CTL_LBMIE 0x0400
#define PCIEM_LINK_CTL_LABIE 0x0800
#define PCIER_LINK_STA 0x12
#define PCIEM_LINK_STA_SPEED 0x000f
#define PCIEM_LINK_STA_WIDTH 0x03f0
#define PCIEM_LINK_STA_TRAINING_ERROR 0x0400
#define PCIEM_LINK_STA_TRAINING 0x0800
#define PCIEM_LINK_STA_SLOT_CLOCK 0x1000
#define PCIEM_LINK_STA_DL_ACTIVE 0x2000
#define PCIEM_LINK_STA_LINK_BW_MGMT 0x4000
#define PCIEM_LINK_STA_LINK_AUTO_BW 0x8000
#define PCIER_SLOT_CAP 0x14
#define PCIEM_SLOT_CAP_APB 0x00000001
#define PCIEM_SLOT_CAP_PCP 0x00000002
#define PCIEM_SLOT_CAP_MRLSP 0x00000004
#define PCIEM_SLOT_CAP_AIP 0x00000008
#define PCIEM_SLOT_CAP_PIP 0x00000010
#define PCIEM_SLOT_CAP_HPS 0x00000020
#define PCIEM_SLOT_CAP_HPC 0x00000040
#define PCIEM_SLOT_CAP_SPLV 0x00007f80
#define PCIEM_SLOT_CAP_SPLS 0x00018000
#define PCIEM_SLOT_CAP_EIP 0x00020000
#define PCIEM_SLOT_CAP_NCCS 0x00040000
#define PCIEM_SLOT_CAP_PSN 0xfff80000
#define PCIER_SLOT_CTL 0x18
#define PCIEM_SLOT_CTL_ABPE 0x0001
#define PCIEM_SLOT_CTL_PFDE 0x0002
#define PCIEM_SLOT_CTL_MRLSCE 0x0004
#define PCIEM_SLOT_CTL_PDCE 0x0008
#define PCIEM_SLOT_CTL_CCIE 0x0010
#define PCIEM_SLOT_CTL_HPIE 0x0020
#define PCIEM_SLOT_CTL_AIC 0x00c0
#define PCIEM_SLOT_CTL_PIC 0x0300
#define PCIEM_SLOT_CTL_PCC 0x0400
#define PCIEM_SLOT_CTL_EIC 0x0800
#define PCIEM_SLOT_CTL_DLLSCE 0x1000
#define PCIER_SLOT_STA 0x1a
#define PCIEM_SLOT_STA_ABP 0x0001
#define PCIEM_SLOT_STA_PFD 0x0002
#define PCIEM_SLOT_STA_MRLSC 0x0004
#define PCIEM_SLOT_STA_PDC 0x0008
#define PCIEM_SLOT_STA_CC 0x0010
#define PCIEM_SLOT_STA_MRLSS 0x0020
#define PCIEM_SLOT_STA_PDS 0x0040
#define PCIEM_SLOT_STA_EIS 0x0080
#define PCIEM_SLOT_STA_DLLSC 0x0100
#define PCIER_ROOT_CTL 0x1c
#define PCIEM_ROOT_CTL_SERR_CORR 0x0001
#define PCIEM_ROOT_CTL_SERR_NONFATAL 0x0002
#define PCIEM_ROOT_CTL_SERR_FATAL 0x0004
#define PCIEM_ROOT_CTL_PME 0x0008
#define PCIEM_ROOT_CTL_CRS_VIS 0x0010
#define PCIER_ROOT_CAP 0x1e
#define PCIEM_ROOT_CAP_CRS_VIS 0x0001
#define PCIER_ROOT_STA 0x20
#define PCIEM_ROOT_STA_PME_REQID_MASK 0x0000ffff
#define PCIEM_ROOT_STA_PME_STATUS 0x00010000
#define PCIEM_ROOT_STA_PME_PEND 0x00020000
#define PCIER_DEVICE_CAP2 0x24
#define PCIEM_CAP2_ARI 0x20
#define PCIER_DEVICE_CTL2 0x28
#define PCIEM_CTL2_COMP_TIMEOUT_VAL 0x000f
#define PCIEM_CTL2_COMP_TIMEOUT_DIS 0x0010
#define PCIEM_CTL2_ARI 0x0020
#define PCIEM_CTL2_ATOMIC_REQ_ENABLE 0x0040
#define PCIEM_CTL2_ATOMIC_EGR_BLOCK 0x0080
#define PCIEM_CTL2_ID_ORDERED_REQ_EN 0x0100
#define PCIEM_CTL2_ID_ORDERED_CMP_EN 0x0200
#define PCIEM_CTL2_LTR_ENABLE 0x0400
#define PCIEM_CTL2_OBFF 0x6000
#define PCIEM_OBFF_DISABLE 0x0000
#define PCIEM_OBFF_MSGA_ENABLE 0x2000
#define PCIEM_OBFF_MSGB_ENABLE 0x4000
#define PCIEM_OBFF_WAKE_ENABLE 0x6000
#define PCIEM_CTL2_END2END_TLP 0x8000
#define PCIER_DEVICE_STA2 0x2a
#define PCIER_LINK_CAP2 0x2c
#define PCIER_LINK_CTL2 0x30
#define PCIER_LINK_STA2 0x32
#define PCIER_SLOT_CAP2 0x34
#define PCIER_SLOT_CTL2 0x38
#define PCIER_SLOT_STA2 0x3a
/* MSI-X definitions */
#define PCIR_MSIX_CTRL 0x2
#define PCIM_MSIXCTRL_MSIX_ENABLE 0x8000
#define PCIM_MSIXCTRL_FUNCTION_MASK 0x4000
#define PCIM_MSIXCTRL_TABLE_SIZE 0x07FF
#define PCIR_MSIX_TABLE 0x4
#define PCIR_MSIX_PBA 0x8
#define PCIM_MSIX_BIR_MASK 0x7
#define PCIM_MSIX_BIR_BAR_10 0
#define PCIM_MSIX_BIR_BAR_14 1
#define PCIM_MSIX_BIR_BAR_18 2
#define PCIM_MSIX_BIR_BAR_1C 3
#define PCIM_MSIX_BIR_BAR_20 4
#define PCIM_MSIX_BIR_BAR_24 5
#define PCIM_MSIX_VCTRL_MASK 0x1
/* PCI Advanced Features definitions */
#define PCIR_PCIAF_CAP 0x3
#define PCIM_PCIAFCAP_TP 0x01
#define PCIM_PCIAFCAP_FLR 0x02
#define PCIR_PCIAF_CTRL 0x4
#define PCIR_PCIAFCTRL_FLR 0x01
#define PCIR_PCIAF_STATUS 0x5
#define PCIR_PCIAFSTATUS_TP 0x01
/* Advanced Error Reporting */
#define PCIR_AER_UC_STATUS 0x04
#define PCIM_AER_UC_TRAINING_ERROR 0x00000001
#define PCIM_AER_UC_DL_PROTOCOL_ERROR 0x00000010
#define PCIM_AER_UC_SURPRISE_LINK_DOWN 0x00000020
#define PCIM_AER_UC_POISONED_TLP 0x00001000
#define PCIM_AER_UC_FC_PROTOCOL_ERROR 0x00002000
#define PCIM_AER_UC_COMPLETION_TIMEOUT 0x00004000
#define PCIM_AER_UC_COMPLETER_ABORT 0x00008000
#define PCIM_AER_UC_UNEXPECTED_COMPLETION 0x00010000
#define PCIM_AER_UC_RECEIVER_OVERFLOW 0x00020000
#define PCIM_AER_UC_MALFORMED_TLP 0x00040000
#define PCIM_AER_UC_ECRC_ERROR 0x00080000
#define PCIM_AER_UC_UNSUPPORTED_REQUEST 0x00100000
#define PCIM_AER_UC_ACS_VIOLATION 0x00200000
#define PCIM_AER_UC_INTERNAL_ERROR 0x00400000
#define PCIM_AER_UC_MC_BLOCKED_TLP 0x00800000
#define PCIM_AER_UC_ATOMIC_EGRESS_BLK 0x01000000
#define PCIM_AER_UC_TLP_PREFIX_BLOCKED 0x02000000
#define PCIR_AER_UC_MASK 0x08 /* Shares bits with UC_STATUS */
#define PCIR_AER_UC_SEVERITY 0x0c /* Shares bits with UC_STATUS */
#define PCIR_AER_COR_STATUS 0x10
#define PCIM_AER_COR_RECEIVER_ERROR 0x00000001
#define PCIM_AER_COR_BAD_TLP 0x00000040
#define PCIM_AER_COR_BAD_DLLP 0x00000080
#define PCIM_AER_COR_REPLAY_ROLLOVER 0x00000100
#define PCIM_AER_COR_REPLAY_TIMEOUT 0x00001000
#define PCIM_AER_COR_ADVISORY_NF_ERROR 0x00002000
#define PCIM_AER_COR_INTERNAL_ERROR 0x00004000
#define PCIM_AER_COR_HEADER_LOG_OVFLOW 0x00008000
#define PCIR_AER_COR_MASK 0x14 /* Shares bits with COR_STATUS */
#define PCIR_AER_CAP_CONTROL 0x18
#define PCIM_AER_FIRST_ERROR_PTR 0x0000001f
#define PCIM_AER_ECRC_GEN_CAPABLE 0x00000020
#define PCIM_AER_ECRC_GEN_ENABLE 0x00000040
#define PCIM_AER_ECRC_CHECK_CAPABLE 0x00000080
#define PCIM_AER_ECRC_CHECK_ENABLE 0x00000100
#define PCIM_AER_MULT_HDR_CAPABLE 0x00000200
#define PCIM_AER_MULT_HDR_ENABLE 0x00000400
#define PCIM_AER_TLP_PREFIX_LOG_PRESENT 0x00000800
#define PCIR_AER_HEADER_LOG 0x1c
#define PCIR_AER_ROOTERR_CMD 0x2c /* Only for root complex ports */
#define PCIM_AER_ROOTERR_COR_ENABLE 0x00000001
#define PCIM_AER_ROOTERR_NF_ENABLE 0x00000002
#define PCIM_AER_ROOTERR_F_ENABLE 0x00000004
#define PCIR_AER_ROOTERR_STATUS 0x30 /* Only for root complex ports */
#define PCIM_AER_ROOTERR_COR_ERR 0x00000001
#define PCIM_AER_ROOTERR_MULTI_COR_ERR 0x00000002
#define PCIM_AER_ROOTERR_UC_ERR 0x00000004
#define PCIM_AER_ROOTERR_MULTI_UC_ERR 0x00000008
#define PCIM_AER_ROOTERR_FIRST_UC_FATAL 0x00000010
#define PCIM_AER_ROOTERR_NF_ERR 0x00000020
#define PCIM_AER_ROOTERR_F_ERR 0x00000040
#define PCIM_AER_ROOTERR_INT_MESSAGE 0xf8000000
#define PCIR_AER_COR_SOURCE_ID 0x34 /* Only for root complex ports */
#define PCIR_AER_ERR_SOURCE_ID 0x36 /* Only for root complex ports */
#define PCIR_AER_TLP_PREFIX_LOG 0x38 /* Only for TLP prefix functions */
/* Virtual Channel definitions */
#define PCIR_VC_CAP1 0x04
#define PCIM_VC_CAP1_EXT_COUNT 0x00000007
#define PCIM_VC_CAP1_LOWPRI_EXT_COUNT 0x00000070
#define PCIR_VC_CAP2 0x08
#define PCIR_VC_CONTROL 0x0C
#define PCIR_VC_STATUS 0x0E
#define PCIR_VC_RESOURCE_CAP(n) (0x10 + (n) * 0x0C)
#define PCIR_VC_RESOURCE_CTL(n) (0x14 + (n) * 0x0C)
#define PCIR_VC_RESOURCE_STA(n) (0x18 + (n) * 0x0C)
/* Serial Number definitions */
#define PCIR_SERIAL_LOW 0x04
#define PCIR_SERIAL_HIGH 0x08
/* SR-IOV definitions */
#define PCIR_SRIOV_CTL 0x08
#define PCIM_SRIOV_VF_EN 0x01
#define PCIM_SRIOV_VF_MSE 0x08 /* Memory space enable. */
#define PCIM_SRIOV_ARI_EN 0x10
#define PCIR_SRIOV_TOTAL_VFS 0x0E
#define PCIR_SRIOV_NUM_VFS 0x10
#define PCIR_SRIOV_VF_OFF 0x14
#define PCIR_SRIOV_VF_STRIDE 0x16
#define PCIR_SRIOV_VF_DID 0x1A
#define PCIR_SRIOV_PAGE_CAP 0x1C
#define PCIR_SRIOV_PAGE_SIZE 0x20
#define PCI_SRIOV_BASE_PAGE_SHIFT 12
#define PCIR_SRIOV_BARS 0x24
#define PCIR_SRIOV_BAR(x) (PCIR_SRIOV_BARS + (x) * 4)

View file

@ -0,0 +1,89 @@
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)psl.h 5.2 (Berkeley) 1/18/91
* $FreeBSD$
*/
#pragma once
/*
* 386 processor status longword.
*/
#define PSL_C 0x00000001 /* carry bit */
#define PSL_PF 0x00000004 /* parity bit */
#define PSL_AF 0x00000010 /* bcd carry bit */
#define PSL_Z 0x00000040 /* zero bit */
#define PSL_N 0x00000080 /* negative bit */
#define PSL_T 0x00000100 /* trace enable bit */
#define PSL_I 0x00000200 /* interrupt enable bit */
#define PSL_D 0x00000400 /* string instruction direction bit */
#define PSL_V 0x00000800 /* overflow bit */
#define PSL_IOPL 0x00003000 /* i/o privilege level */
#define PSL_NT 0x00004000 /* nested task bit */
#define PSL_RF 0x00010000 /* resume flag bit */
#define PSL_VM 0x00020000 /* virtual 8086 mode bit */
#define PSL_AC 0x00040000 /* alignment checking */
#define PSL_VIF 0x00080000 /* virtual interrupt enable */
#define PSL_VIP 0x00100000 /* virtual interrupt pending */
#define PSL_ID 0x00200000 /* identification bit */
/*
* The i486 manual says that we are not supposed to change reserved flags,
* but this is too much trouble since the reserved flags depend on the cpu
* and setting them to their historical values works in practice.
*/
#define PSL_RESERVED_DEFAULT 0x00000002
/*
* Initial flags for kernel and user mode. The kernel later inherits
* PSL_I and some other flags from user mode.
*/
#define PSL_KERNEL PSL_RESERVED_DEFAULT
#define PSL_USER (PSL_RESERVED_DEFAULT | PSL_I)
/*
* Bits that can be changed in user mode on 486's. We allow these bits
* to be changed using ptrace(), sigreturn() and procfs. Setting PS_NT
* is undesirable but it may as well be allowed since users can inflict
* it on the kernel directly. Changes to PSL_AC are silently ignored on
* 386's.
*
* Users are allowed to change the privileged flag PSL_RF. The cpu sets PSL_RF
* in tf_eflags for faults. Debuggers should sometimes set it there too.
* tf_eflags is kept in the signal context during signal handling and there is
* no other place to remember it, so the PSL_RF bit may be corrupted by the
* signal handler without us knowing. Corruption of the PSL_RF bit at worst
* causes one more or one less debugger trap, so allowing it is fairly
* harmless.
*/
#define PSL_USERCHANGE (PSL_C | PSL_PF | PSL_AF | PSL_Z | PSL_N | PSL_T \
| PSL_D | PSL_V | PSL_NT | PSL_RF | PSL_AC | PSL_ID)

111
include/xhyve/support/rtc.h Normal file
View file

@ -0,0 +1,111 @@
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)rtc.h 7.1 (Berkeley) 5/12/91
* $FreeBSD$
*/
/*
* MC146818 RTC Register locations
*/
#pragma once
#define RTC_SEC 0x00 /* seconds */
#define RTC_SECALRM 0x01 /* seconds alarm */
#define RTC_MIN 0x02 /* minutes */
#define RTC_MINALRM 0x03 /* minutes alarm */
#define RTC_HRS 0x04 /* hours */
#define RTC_HRSALRM 0x05 /* hours alarm */
#define RTC_WDAY 0x06 /* week day */
#define RTC_DAY 0x07 /* day of month */
#define RTC_MONTH 0x08 /* month of year */
#define RTC_YEAR 0x09 /* month of year */
#define RTC_STATUSA 0x0a /* status register A */
#define RTCSA_TUP 0x80 /* time update, don't look now */
#define RTCSA_RESET 0x70 /* reset divider */
#define RTCSA_DIVIDER 0x20 /* divider correct for 32768 Hz */
#define RTCSA_8192 0x03 /* 8192 Hz interrupt */
#define RTCSA_4096 0x04
#define RTCSA_2048 0x05
#define RTCSA_1024 0x06 /* default for profiling */
#define RTCSA_PROF RTCSA_1024
#define RTC_PROFRATE 1024
#define RTCSA_512 0x07
#define RTCSA_256 0x08
#define RTCSA_128 0x09
#define RTCSA_NOPROF RTCSA_128
#define RTC_NOPROFRATE 128
#define RTCSA_64 0x0a
#define RTCSA_32 0x0b /* 32 Hz interrupt */
#define RTC_STATUSB 0x0b /* status register B */
#define RTCSB_DST 0x01 /* USA Daylight Savings Time enable */
#define RTCSB_24HR 0x02 /* 0 = 12 hours, 1 = 24 hours */
#define RTCSB_BCD 0x04 /* 0 = BCD, 1 = Binary coded time */
#define RTCSB_SQWE 0x08 /* 1 = output sqare wave at SQW pin */
#define RTCSB_UINTR 0x10 /* 1 = enable update-ended interrupt */
#define RTCSB_AINTR 0x20 /* 1 = enable alarm interrupt */
#define RTCSB_PINTR 0x40 /* 1 = enable periodic clock interrupt */
#define RTCSB_HALT 0x80 /* stop clock updates */
#define RTC_INTR 0x0c /* status register C (R) interrupt source */
#define RTCIR_UPDATE 0x10 /* update intr */
#define RTCIR_ALARM 0x20 /* alarm intr */
#define RTCIR_PERIOD 0x40 /* periodic intr */
#define RTCIR_INT 0x80 /* interrupt output signal */
#define RTC_STATUSD 0x0d /* status register D (R) Lost Power */
#define RTCSD_PWR 0x80 /* clock power OK */
#define RTC_DIAG 0x0e /* status register E - bios diagnostic */
#define RTCDG_BITS "\020\010clock_battery\007ROM_cksum\006config_unit\005memory_size\004fixed_disk\003invalid_time"
#define RTC_RESET 0x0f /* status register F - reset code byte */
#define RTCRS_RST 0x00 /* normal reset */
#define RTCRS_LOAD 0x04 /* load system */
#define RTC_FDISKETTE 0x10 /* diskette drive type in upper/lower nibble */
#define RTCFDT_NONE 0 /* none present */
#define RTCFDT_360K 0x10 /* 360K */
#define RTCFDT_12M 0x20 /* 1.2M */
#define RTCFDT_720K 0x30 /* 720K */
#define RTCFDT_144M 0x40 /* 1.44M */
#define RTCFDT_288M_1 0x50 /* 2.88M, some BIOSes */
#define RTCFDT_288M 0x60 /* 2.88M */
#define RTC_BASELO 0x15 /* low byte of basemem size */
#define RTC_BASEHI 0x16 /* high byte of basemem size */
#define RTC_EXTLO 0x17 /* low byte of extended mem size */
#define RTC_EXTHI 0x18 /* low byte of extended mem size */
#define RTC_CENTURY 0x32 /* current century */

View file

@ -0,0 +1,277 @@
/*-
* Copyright (c) 1989, 1990 William F. Jolitz
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)segments.h 7.1 (Berkeley) 5/9/91
* $FreeBSD$
*/
#pragma once
#include <stdint.h>
#include <xhyve/support/misc.h>
// /*
// * X86 Segmentation Data Structures and definitions
// */
/* Selectors */
#define SEL_RPL_MASK 3 /* requester priv level */
#define ISPL(s) ((s) & 3) /* priority level of a selector */
#define SEL_KPL 0 /* kernel priority level */
#define SEL_UPL 3 /* user priority level */
#define ISLDT(s) ((s) & SEL_LDT) /* is it local or global */
#define SEL_LDT 4 /* local descriptor table */
#define IDXSEL(s) (((s)>>3) & 0x1fff) /* index of selector */
#define LSEL(s,r) (((s)<<3) | SEL_LDT | r) /* a local selector */
#define GSEL(s,r) (((s)<<3) | r) /* a global selector */
// /*
// * User segment descriptors (%cs, %ds etc for i386 apps. 64 bit wide)
// * For long-mode apps, %cs only has the conforming bit in sd_type, the sd_dpl,
// * sd_p, sd_l and sd_def32 which must be zero). %ds only has sd_p.
// */
// struct segment_descriptor {
// unsigned sd_lolimit:16; /* segment extent (lsb) */
// unsigned sd_lobase:24; /* segment base address (lsb) */
// unsigned sd_type:5; /* segment type */
// unsigned sd_dpl:2; /* segment descriptor priority level */
// unsigned sd_p:1; /* segment descriptor present */
// unsigned sd_hilimit:4; /* segment extent (msb) */
// unsigned sd_xx:2; /* unused */
// unsigned sd_def32:1; /* default 32 vs 16 bit size */
// unsigned sd_gran:1; /* limit granularity (byte/page units)*/
// unsigned sd_hibase:8; /* segment base address (msb) */
// } __packed;
struct user_segment_descriptor {
uint64_t sd_lolimit:16; /* segment extent (lsb) */
uint64_t sd_lobase:24; /* segment base address (lsb) */
uint64_t sd_type:5; /* segment type */
uint64_t sd_dpl:2; /* segment descriptor priority level */
uint64_t sd_p:1; /* segment descriptor present */
uint64_t sd_hilimit:4; /* segment extent (msb) */
uint64_t sd_xx:1; /* unused */
uint64_t sd_long:1; /* long mode (cs only) */
uint64_t sd_def32:1; /* default 32 vs 16 bit size */
uint64_t sd_gran:1; /* limit granularity (byte/page units)*/
uint64_t sd_hibase:8; /* segment base address (msb) */
};
#define USD_GETBASE(sd) (((sd)->sd_lobase) | (sd)->sd_hibase << 24)
#define USD_SETBASE(sd, b) (sd)->sd_lobase = (b); \
(sd)->sd_hibase = ((b) >> 24);
#define USD_GETLIMIT(sd) (((sd)->sd_lolimit) | (sd)->sd_hilimit << 16)
#define USD_SETLIMIT(sd, l) (sd)->sd_lolimit = (l); \
(sd)->sd_hilimit = ((l) >> 16);
// #ifdef __i386__
// /*
// * Gate descriptors (e.g. indirect descriptors)
// */
// struct gate_descriptor {
// unsigned gd_looffset:16; /* gate offset (lsb) */
// unsigned gd_selector:16; /* gate segment selector */
// unsigned gd_stkcpy:5; /* number of stack wds to cpy */
// unsigned gd_xx:3; /* unused */
// unsigned gd_type:5; /* segment type */
// unsigned gd_dpl:2; /* segment descriptor priority level */
// unsigned gd_p:1; /* segment descriptor present */
// unsigned gd_hioffset:16; /* gate offset (msb) */
// } __packed;
// /*
// * Generic descriptor
// */
// union descriptor {
// struct segment_descriptor sd;
// struct gate_descriptor gd;
// };
// #else
// /*
// * Gate descriptors (e.g. indirect descriptors, trap, interrupt etc. 128 bit)
// * Only interrupt and trap gates have gd_ist.
// */
// struct gate_descriptor {
// uint64_t gd_looffset:16; /* gate offset (lsb) */
// uint64_t gd_selector:16; /* gate segment selector */
// uint64_t gd_ist:3; /* IST table index */
// uint64_t gd_xx:5; /* unused */
// uint64_t gd_type:5; /* segment type */
// uint64_t gd_dpl:2; /* segment descriptor priority level */
// uint64_t gd_p:1; /* segment descriptor present */
// uint64_t gd_hioffset:48; /* gate offset (msb) */
// uint64_t sd_xx1:32;
// } __packed;
// /*
// * Generic descriptor
// */
// union descriptor {
// struct user_segment_descriptor sd;
// struct gate_descriptor gd;
// };
// #endif
/* system segments and gate types */
#define SDT_SYSNULL 0 /* system null */
#define SDT_SYS286TSS 1 /* system 286 TSS available */
#define SDT_SYSLDT 2 /* system local descriptor table */
#define SDT_SYS286BSY 3 /* system 286 TSS busy */
#define SDT_SYS286CGT 4 /* system 286 call gate */
#define SDT_SYSTASKGT 5 /* system task gate */
#define SDT_SYS286IGT 6 /* system 286 interrupt gate */
#define SDT_SYS286TGT 7 /* system 286 trap gate */
#define SDT_SYSNULL2 8 /* system null again */
#define SDT_SYS386TSS 9 /* system 386 TSS available */
#define SDT_SYSTSS 9 /* system available 64 bit TSS */
#define SDT_SYSNULL3 10 /* system null again */
#define SDT_SYS386BSY 11 /* system 386 TSS busy */
#define SDT_SYSBSY 11 /* system busy 64 bit TSS */
#define SDT_SYS386CGT 12 /* system 386 call gate */
#define SDT_SYSCGT 12 /* system 64 bit call gate */
#define SDT_SYSNULL4 13 /* system null again */
#define SDT_SYS386IGT 14 /* system 386 interrupt gate */
#define SDT_SYSIGT 14 /* system 64 bit interrupt gate */
#define SDT_SYS386TGT 15 /* system 386 trap gate */
#define SDT_SYSTGT 15 /* system 64 bit trap gate */
// /* memory segment types */
// #define SDT_MEMRO 16 memory read only
// #define SDT_MEMROA 17 /* memory read only accessed */
// #define SDT_MEMRW 18 /* memory read write */
// #define SDT_MEMRWA 19 /* memory read write accessed */
// #define SDT_MEMROD 20 /* memory read only expand dwn limit */
// #define SDT_MEMRODA 21 /* memory read only expand dwn limit accessed */
// #define SDT_MEMRWD 22 /* memory read write expand dwn limit */
// #define SDT_MEMRWDA 23 /* memory read write expand dwn limit accessed*/
// #define SDT_MEME 24 /* memory execute only */
// #define SDT_MEMEA 25 /* memory execute only accessed */
// #define SDT_MEMER 26 /* memory execute read */
// #define SDT_MEMERA 27 /* memory execute read accessed */
// #define SDT_MEMEC 28 /* memory execute only conforming */
// #define SDT_MEMEAC 29 /* memory execute only accessed conforming */
// #define SDT_MEMERC 30 /* memory execute read conforming */
// #define SDT_MEMERAC 31 /* memory execute read accessed conforming */
// /*
// * Size of IDT table
// */
// #define NIDT 256 /* 32 reserved, 0x80 syscall, most are h/w */
// #define NRSVIDT 32 /* reserved entries for cpu exceptions */
/*
* Entries in the Interrupt Descriptor Table (IDT)
*/
#define IDT_DE 0 /* #DE: Divide Error */
#define IDT_DB 1 /* #DB: Debug */
#define IDT_NMI 2 /* Nonmaskable External Interrupt */
#define IDT_BP 3 /* #BP: Breakpoint */
#define IDT_OF 4 /* #OF: Overflow */
#define IDT_BR 5 /* #BR: Bound Range Exceeded */
#define IDT_UD 6 /* #UD: Undefined/Invalid Opcode */
#define IDT_NM 7 /* #NM: No Math Coprocessor */
#define IDT_DF 8 /* #DF: Double Fault */
#define IDT_FPUGP 9 /* Coprocessor Segment Overrun */
#define IDT_TS 10 /* #TS: Invalid TSS */
#define IDT_NP 11 /* #NP: Segment Not Present */
#define IDT_SS 12 /* #SS: Stack Segment Fault */
#define IDT_GP 13 /* #GP: General Protection Fault */
#define IDT_PF 14 /* #PF: Page Fault */
#define IDT_MF 16 /* #MF: FPU Floating-Point Error */
#define IDT_AC 17 /* #AC: Alignment Check */
#define IDT_MC 18 /* #MC: Machine Check */
#define IDT_XF 19 /* #XF: SIMD Floating-Point Exception */
#define IDT_IO_INTS NRSVIDT /* Base of IDT entries for I/O interrupts. */
#define IDT_SYSCALL 0x80 /* System Call Interrupt Vector */
#define IDT_DTRACE_RET 0x92 /* DTrace pid provider Interrupt Vector */
#define IDT_EVTCHN 0x93 /* Xen HVM Event Channel Interrupt Vector */
// #if defined(__i386__)
// /*
// * Entries in the Global Descriptor Table (GDT)
// * Note that each 4 entries share a single 32 byte L1 cache line.
// * Some of the fast syscall instructions require a specific order here.
// */
// #define GNULL_SEL 0 /* Null Descriptor */
// #define GPRIV_SEL 1 /* SMP Per-Processor Private Data */
// #define GUFS_SEL 2 /* User %fs Descriptor (order critical: 1) */
// #define GUGS_SEL 3 /* User %gs Descriptor (order critical: 2) */
// #define GCODE_SEL 4 /* Kernel Code Descriptor (order critical: 1) */
// #define GDATA_SEL 5 /* Kernel Data Descriptor (order critical: 2) */
// #define GUCODE_SEL 6 /* User Code Descriptor (order critical: 3) */
// #define GUDATA_SEL 7 /* User Data Descriptor (order critical: 4) */
// #define GBIOSLOWMEM_SEL 8 /* BIOS low memory access (must be entry 8) */
// #define GPROC0_SEL 9 /* Task state process slot zero and up */
// #define GLDT_SEL 10 /* Default User LDT */
// #define GUSERLDT_SEL 11 /* User LDT */
// #define GPANIC_SEL 12 /* Task state to consider panic from */
// #define GBIOSCODE32_SEL 13 /* BIOS interface (32bit Code) */
// #define GBIOSCODE16_SEL 14 /* BIOS interface (16bit Code) */
// #define GBIOSDATA_SEL 15 /* BIOS interface (Data) */
// #define GBIOSUTIL_SEL 16 /* BIOS interface (Utility) */
// #define GBIOSARGS_SEL 17 /* BIOS interface (Arguments) */
// #define GNDIS_SEL 18 /* For the NDIS layer */
// #define NGDT 19
// /*
// * Entries in the Local Descriptor Table (LDT)
// */
// #define LSYS5CALLS_SEL 0 /* forced by intel BCS */
// #define LSYS5SIGR_SEL 1
// #define L43BSDCALLS_SEL 2 /* notyet */
// #define LUCODE_SEL 3
// #define LSOL26CALLS_SEL 4 /* Solaris >= 2.6 system call gate */
// #define LUDATA_SEL 5
// /* separate stack, es,fs,gs sels ? */
// /* #define LPOSIXCALLS_SEL 5*/ /* notyet */
// #define LBSDICALLS_SEL 16 /* BSDI system call gate */
// #define NLDT (LBSDICALLS_SEL + 1)
// #else /* !__i386__ */
// /*
// * Entries in the Global Descriptor Table (GDT)
// */
// #define GNULL_SEL 0 /* Null Descriptor */
// #define GNULL2_SEL 1 /* Null Descriptor */
// #define GUFS32_SEL 2 /* User 32 bit %fs Descriptor */
// #define GUGS32_SEL 3 /* User 32 bit %gs Descriptor */
// #define GCODE_SEL 4 /* Kernel Code Descriptor */
// #define GDATA_SEL 5 /* Kernel Data Descriptor */
// #define GUCODE32_SEL 6 /* User 32 bit code Descriptor */
// #define GUDATA_SEL 7 /* User 32/64 bit Data Descriptor */
// #define GUCODE_SEL 8 /* User 64 bit Code Descriptor */
// #define GPROC0_SEL 9 /* TSS for entering kernel etc */
// /* slot 10 is second half of GPROC0_SEL */
// #define GUSERLDT_SEL 11 /* LDT */
// /* slot 12 is second half of GUSERLDT_SEL */
// #define NGDT 13
// #endif /* __i386__ */

View file

@ -0,0 +1,845 @@
/*-
* Copyright (c) 1991 The Regents of the University of California.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)specialreg.h 7.1 (Berkeley) 5/9/91
* $FreeBSD$
*/
#pragma once
/*
* Bits in 386 special registers:
*/
#define CR0_PE 0x00000001 /* Protected mode Enable */
#define CR0_MP 0x00000002 /* "Math" (fpu) Present */
#define CR0_EM 0x00000004 /* EMulate FPU instructions. (trap ESC only) */
#define CR0_TS 0x00000008 /* Task Switched (if MP, trap ESC and WAIT) */
#define CR0_PG 0x80000000 /* PaGing enable */
/*
* Bits in 486 special registers:
*/
#define CR0_ET 0x00000010 /* Extension type */
#define CR0_NE 0x00000020 /* Numeric Error enable (EX16 vs IRQ13) */
#define CR0_WP 0x00010000 /* Write Protect (honor page protect in
all modes) */
#define CR0_AM 0x00040000 /* Alignment Mask (set to enable AC flag) */
#define CR0_NW 0x20000000 /* Not Write-through */
#define CR0_CD 0x40000000 /* Cache Disable */
#define CR3_PCID_SAVE 0x8000000000000000
#define CR3_PCID_MASK 0xfff
/*
* Bits in PPro special registers
*/
#define CR4_VME 0x00000001 /* Virtual 8086 mode extensions */
#define CR4_PVI 0x00000002 /* Protected-mode virtual interrupts */
#define CR4_TSD 0x00000004 /* Time stamp disable */
#define CR4_DE 0x00000008 /* Debugging extensions */
#define CR4_PSE 0x00000010 /* Page size extensions */
#define CR4_PAE 0x00000020 /* Physical address extension */
#define CR4_MCE 0x00000040 /* Machine check enable */
#define CR4_PGE 0x00000080 /* Page global enable */
#define CR4_PCE 0x00000100 /* Performance monitoring counter enable */
#define CR4_FXSR 0x00000200 /* Fast FPU save/restore used by OS */
#define CR4_XMM 0x00000400 /* enable SIMD/MMX2 to use except 16 */
#define CR4_VMXE 0x00002000 /* enable VMX operation (Intel-specific) */
#define CR4_FSGSBASE 0x00010000 /* Enable FS/GS BASE accessing instructions */
#define CR4_PCIDE 0x00020000 /* Enable Context ID */
#define CR4_XSAVE 0x00040000 /* XSETBV/XGETBV */
#define CR4_SMEP 0x00100000 /* Supervisor-Mode Execution Prevention */
/*
* Bits in AMD64 special registers. EFER is 64 bits wide.
*/
#define EFER_SCE 0x000000001 /* System Call Extensions (R/W) */
#define EFER_LME 0x000000100 /* Long mode enable (R/W) */
#define EFER_LMA 0x000000400 /* Long mode active (R) */
#define EFER_NXE 0x000000800 /* PTE No-Execute bit enable (R/W) */
#define EFER_SVM 0x000001000 /* SVM enable bit for AMD, reserved for Intel */
#define EFER_LMSLE 0x000002000 /* Long Mode Segment Limit Enable */
#define EFER_FFXSR 0x000004000 /* Fast FXSAVE/FSRSTOR */
#define EFER_TCE 0x000008000 /* Translation Cache Extension */
/*
* Intel Extended Features registers
*/
#define XCR0 0 /* XFEATURE_ENABLED_MASK register */
#define XFEATURE_ENABLED_X87 0x00000001
#define XFEATURE_ENABLED_SSE 0x00000002
#define XFEATURE_ENABLED_YMM_HI128 0x00000004
#define XFEATURE_ENABLED_AVX XFEATURE_ENABLED_YMM_HI128
#define XFEATURE_ENABLED_BNDREGS 0x00000008
#define XFEATURE_ENABLED_BNDCSR 0x00000010
#define XFEATURE_ENABLED_OPMASK 0x00000020
#define XFEATURE_ENABLED_ZMM_HI256 0x00000040
#define XFEATURE_ENABLED_HI16_ZMM 0x00000080
#define XFEATURE_AVX \
(XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE | XFEATURE_ENABLED_AVX)
#define XFEATURE_AVX512 \
(XFEATURE_ENABLED_OPMASK | XFEATURE_ENABLED_ZMM_HI256 | \
XFEATURE_ENABLED_HI16_ZMM)
#define XFEATURE_MPX \
(XFEATURE_ENABLED_BNDREGS | XFEATURE_ENABLED_BNDCSR)
/*
* CPUID instruction features register
*/
#define CPUID_FPU 0x00000001
#define CPUID_VME 0x00000002
#define CPUID_DE 0x00000004
#define CPUID_PSE 0x00000008
#define CPUID_TSC 0x00000010
#define CPUID_MSR 0x00000020
#define CPUID_PAE 0x00000040
#define CPUID_MCE 0x00000080
#define CPUID_CX8 0x00000100
#define CPUID_APIC 0x00000200
#define CPUID_B10 0x00000400
#define CPUID_SEP 0x00000800
#define CPUID_MTRR 0x00001000
#define CPUID_PGE 0x00002000
#define CPUID_MCA 0x00004000
#define CPUID_CMOV 0x00008000
#define CPUID_PAT 0x00010000
#define CPUID_PSE36 0x00020000
#define CPUID_PSN 0x00040000
#define CPUID_CLFSH 0x00080000
#define CPUID_B20 0x00100000
#define CPUID_DS 0x00200000
#define CPUID_ACPI 0x00400000
#define CPUID_MMX 0x00800000
#define CPUID_FXSR 0x01000000
#define CPUID_SSE 0x02000000
#define CPUID_XMM 0x02000000
#define CPUID_SSE2 0x04000000
#define CPUID_SS 0x08000000
#define CPUID_HTT 0x10000000
#define CPUID_TM 0x20000000
#define CPUID_IA64 0x40000000
#define CPUID_PBE 0x80000000
#define CPUID2_SSE3 0x00000001
#define CPUID2_PCLMULQDQ 0x00000002
#define CPUID2_DTES64 0x00000004
#define CPUID2_MON 0x00000008
#define CPUID2_DS_CPL 0x00000010
#define CPUID2_VMX 0x00000020
#define CPUID2_SMX 0x00000040
#define CPUID2_EST 0x00000080
#define CPUID2_TM2 0x00000100
#define CPUID2_SSSE3 0x00000200
#define CPUID2_CNXTID 0x00000400
#define CPUID2_SDBG 0x00000800
#define CPUID2_FMA 0x00001000
#define CPUID2_CX16 0x00002000
#define CPUID2_XTPR 0x00004000
#define CPUID2_PDCM 0x00008000
#define CPUID2_PCID 0x00020000
#define CPUID2_DCA 0x00040000
#define CPUID2_SSE41 0x00080000
#define CPUID2_SSE42 0x00100000
#define CPUID2_X2APIC 0x00200000
#define CPUID2_MOVBE 0x00400000
#define CPUID2_POPCNT 0x00800000
#define CPUID2_TSCDLT 0x01000000
#define CPUID2_AESNI 0x02000000
#define CPUID2_XSAVE 0x04000000
#define CPUID2_OSXSAVE 0x08000000
#define CPUID2_AVX 0x10000000
#define CPUID2_F16C 0x20000000
#define CPUID2_RDRAND 0x40000000
#define CPUID2_HV 0x80000000
/*
* Important bits in the Thermal and Power Management flags
* CPUID.6 EAX and ECX.
*/
#define CPUTPM1_SENSOR 0x00000001
#define CPUTPM1_TURBO 0x00000002
#define CPUTPM1_ARAT 0x00000004
#define CPUTPM2_EFFREQ 0x00000001
/*
* Important bits in the AMD extended cpuid flags
*/
#define AMDID_SYSCALL 0x00000800
#define AMDID_MP 0x00080000
#define AMDID_NX 0x00100000
#define AMDID_EXT_MMX 0x00400000
#define AMDID_FFXSR 0x02000000
#define AMDID_PAGE1GB 0x04000000
#define AMDID_RDTSCP 0x08000000
#define AMDID_LM 0x20000000
#define AMDID_EXT_3DNOW 0x40000000
#define AMDID_3DNOW 0x80000000
#define AMDID2_LAHF 0x00000001
#define AMDID2_CMP 0x00000002
#define AMDID2_SVM 0x00000004
#define AMDID2_EXT_APIC 0x00000008
#define AMDID2_CR8 0x00000010
#define AMDID2_ABM 0x00000020
#define AMDID2_SSE4A 0x00000040
#define AMDID2_MAS 0x00000080
#define AMDID2_PREFETCH 0x00000100
#define AMDID2_OSVW 0x00000200
#define AMDID2_IBS 0x00000400
#define AMDID2_XOP 0x00000800
#define AMDID2_SKINIT 0x00001000
#define AMDID2_WDT 0x00002000
#define AMDID2_LWP 0x00008000
#define AMDID2_FMA4 0x00010000
#define AMDID2_TCE 0x00020000
#define AMDID2_NODE_ID 0x00080000
#define AMDID2_TBM 0x00200000
#define AMDID2_TOPOLOGY 0x00400000
#define AMDID2_PCXC 0x00800000
#define AMDID2_PNXC 0x01000000
#define AMDID2_DBE 0x04000000
#define AMDID2_PTSC 0x08000000
#define AMDID2_PTSCEL2I 0x10000000
/*
* CPUID instruction 1 eax info
*/
#define CPUID_STEPPING 0x0000000f
#define CPUID_MODEL 0x000000f0
#define CPUID_FAMILY 0x00000f00
#define CPUID_EXT_MODEL 0x000f0000
#define CPUID_EXT_FAMILY 0x0ff00000
// #ifdef __i386__
// #define CPUID_TO_MODEL(id) \
// ((((id) & CPUID_MODEL) >> 4) | \
// ((((id) & CPUID_FAMILY) >= 0x600) ? \
// (((id) & CPUID_EXT_MODEL) >> 12) : 0))
// #define CPUID_TO_FAMILY(id) \
// ((((id) & CPUID_FAMILY) >> 8) + \
// ((((id) & CPUID_FAMILY) == 0xf00) ? \
// (((id) & CPUID_EXT_FAMILY) >> 20) : 0))
// #else
// #define CPUID_TO_MODEL(id) \
// ((((id) & CPUID_MODEL) >> 4) | \
// (((id) & CPUID_EXT_MODEL) >> 12))
// #define CPUID_TO_FAMILY(id) \
// ((((id) & CPUID_FAMILY) >> 8) + \
// (((id) & CPUID_EXT_FAMILY) >> 20))
// #endif
/*
* CPUID instruction 1 ebx info
*/
#define CPUID_BRAND_INDEX 0x000000ff
#define CPUID_CLFUSH_SIZE 0x0000ff00
#define CPUID_HTT_CORES 0x00ff0000
#define CPUID_LOCAL_APIC_ID 0xff000000
/*
* CPUID instruction 5 info
*/
#define CPUID5_MON_MIN_SIZE 0x0000ffff /* eax */
#define CPUID5_MON_MAX_SIZE 0x0000ffff /* ebx */
#define CPUID5_MON_MWAIT_EXT 0x00000001 /* ecx */
#define CPUID5_MWAIT_INTRBREAK 0x00000002 /* ecx */
/*
* MWAIT cpu power states. Lower 4 bits are sub-states.
*/
#define MWAIT_C0 0xf0
#define MWAIT_C1 0x00
#define MWAIT_C2 0x10
#define MWAIT_C3 0x20
#define MWAIT_C4 0x30
/*
* MWAIT extensions.
*/
/* Interrupt breaks MWAIT even when masked. */
#define MWAIT_INTRBREAK 0x00000001
/*
* CPUID instruction 6 ecx info
*/
#define CPUID_PERF_STAT 0x00000001
#define CPUID_PERF_BIAS 0x00000008
/*
* CPUID instruction 0xb ebx info.
*/
#define CPUID_TYPE_INVAL 0
#define CPUID_TYPE_SMT 1
#define CPUID_TYPE_CORE 2
/*
* CPUID instruction 0xd Processor Extended State Enumeration Sub-leaf 1
*/
#define CPUID_EXTSTATE_XSAVEOPT 0x00000001
#define CPUID_EXTSTATE_XSAVEC 0x00000002
#define CPUID_EXTSTATE_XINUSE 0x00000004
#define CPUID_EXTSTATE_XSAVES 0x00000008
/*
* AMD extended function 8000_0007h edx info
*/
#define AMDPM_TS 0x00000001
#define AMDPM_FID 0x00000002
#define AMDPM_VID 0x00000004
#define AMDPM_TTP 0x00000008
#define AMDPM_TM 0x00000010
#define AMDPM_STC 0x00000020
#define AMDPM_100MHZ_STEPS 0x00000040
#define AMDPM_HW_PSTATE 0x00000080
#define AMDPM_TSC_INVARIANT 0x00000100
#define AMDPM_CPB 0x00000200
/*
* AMD extended function 8000_0008h ecx info
*/
#define AMDID_CMP_CORES 0x000000ff
#define AMDID_COREID_SIZE 0x0000f000
#define AMDID_COREID_SIZE_SHIFT 12
/*
* CPUID instruction 7 Structured Extended Features, leaf 0 ebx info
*/
#define CPUID_STDEXT_FSGSBASE 0x00000001
#define CPUID_STDEXT_TSC_ADJUST 0x00000002
#define CPUID_STDEXT_BMI1 0x00000008
#define CPUID_STDEXT_HLE 0x00000010
#define CPUID_STDEXT_AVX2 0x00000020
#define CPUID_STDEXT_SMEP 0x00000080
#define CPUID_STDEXT_BMI2 0x00000100
#define CPUID_STDEXT_ERMS 0x00000200
#define CPUID_STDEXT_INVPCID 0x00000400
#define CPUID_STDEXT_RTM 0x00000800
#define CPUID_STDEXT_MPX 0x00004000
#define CPUID_STDEXT_AVX512F 0x00010000
#define CPUID_STDEXT_RDSEED 0x00040000
#define CPUID_STDEXT_ADX 0x00080000
#define CPUID_STDEXT_SMAP 0x00100000
#define CPUID_STDEXT_CLFLUSHOPT 0x00800000
#define CPUID_STDEXT_PROCTRACE 0x02000000
#define CPUID_STDEXT_AVX512PF 0x04000000
#define CPUID_STDEXT_AVX512ER 0x08000000
#define CPUID_STDEXT_AVX512CD 0x10000000
#define CPUID_STDEXT_SHA 0x20000000
/*
* CPUID manufacturers identifiers
*/
#define AMD_VENDOR_ID "AuthenticAMD"
#define CENTAUR_VENDOR_ID "CentaurHauls"
#define CYRIX_VENDOR_ID "CyrixInstead"
#define INTEL_VENDOR_ID "GenuineIntel"
#define NEXGEN_VENDOR_ID "NexGenDriven"
#define NSC_VENDOR_ID "Geode by NSC"
#define RISE_VENDOR_ID "RiseRiseRise"
#define SIS_VENDOR_ID "SiS SiS SiS "
#define TRANSMETA_VENDOR_ID "GenuineTMx86"
#define UMC_VENDOR_ID "UMC UMC UMC "
/*
* Model-specific registers for the i386 family
*/
#define MSR_P5_MC_ADDR 0x000
#define MSR_P5_MC_TYPE 0x001
#define MSR_TSC 0x010
#define MSR_P5_CESR 0x011
#define MSR_P5_CTR0 0x012
#define MSR_P5_CTR1 0x013
#define MSR_IA32_PLATFORM_ID 0x017
#define MSR_APICBASE 0x01b
#define MSR_EBL_CR_POWERON 0x02a
#define MSR_TEST_CTL 0x033
#define MSR_IA32_FEATURE_CONTROL 0x03a
#define MSR_BIOS_UPDT_TRIG 0x079
#define MSR_BBL_CR_D0 0x088
#define MSR_BBL_CR_D1 0x089
#define MSR_BBL_CR_D2 0x08a
#define MSR_BIOS_SIGN 0x08b
#define MSR_PERFCTR0 0x0c1
#define MSR_PERFCTR1 0x0c2
#define MSR_PLATFORM_INFO 0x0ce
#define MSR_MPERF 0x0e7
#define MSR_APERF 0x0e8
#define MSR_IA32_EXT_CONFIG 0x0ee /* Undocumented. Core Solo/Duo only */
#define MSR_MTRRcap 0x0fe
#define MSR_BBL_CR_ADDR 0x116
#define MSR_BBL_CR_DECC 0x118
#define MSR_BBL_CR_CTL 0x119
#define MSR_BBL_CR_TRIG 0x11a
#define MSR_BBL_CR_BUSY 0x11b
#define MSR_BBL_CR_CTL3 0x11e
#define MSR_SYSENTER_CS_MSR 0x174
#define MSR_SYSENTER_ESP_MSR 0x175
#define MSR_SYSENTER_EIP_MSR 0x176
#define MSR_MCG_CAP 0x179
#define MSR_MCG_STATUS 0x17a
#define MSR_MCG_CTL 0x17b
#define MSR_EVNTSEL0 0x186
#define MSR_EVNTSEL1 0x187
#define MSR_THERM_CONTROL 0x19a
#define MSR_THERM_INTERRUPT 0x19b
#define MSR_THERM_STATUS 0x19c
#define MSR_IA32_MISC_ENABLE 0x1a0
#define MSR_IA32_TEMPERATURE_TARGET 0x1a2
#define MSR_TURBO_RATIO_LIMIT 0x1ad
#define MSR_TURBO_RATIO_LIMIT1 0x1ae
#define MSR_DEBUGCTLMSR 0x1d9
#define MSR_LASTBRANCHFROMIP 0x1db
#define MSR_LASTBRANCHTOIP 0x1dc
#define MSR_LASTINTFROMIP 0x1dd
#define MSR_LASTINTTOIP 0x1de
#define MSR_ROB_CR_BKUPTMPDR6 0x1e0
#define MSR_MTRRVarBase 0x200
#define MSR_MTRR64kBase 0x250
#define MSR_MTRR16kBase 0x258
#define MSR_MTRR4kBase 0x268
#define MSR_PAT 0x277
#define MSR_MC0_CTL2 0x280
#define MSR_MTRRdefType 0x2ff
#define MSR_MC0_CTL 0x400
#define MSR_MC0_STATUS 0x401
#define MSR_MC0_ADDR 0x402
#define MSR_MC0_MISC 0x403
#define MSR_MC1_CTL 0x404
#define MSR_MC1_STATUS 0x405
#define MSR_MC1_ADDR 0x406
#define MSR_MC1_MISC 0x407
#define MSR_MC2_CTL 0x408
#define MSR_MC2_STATUS 0x409
#define MSR_MC2_ADDR 0x40a
#define MSR_MC2_MISC 0x40b
#define MSR_MC3_CTL 0x40c
#define MSR_MC3_STATUS 0x40d
#define MSR_MC3_ADDR 0x40e
#define MSR_MC3_MISC 0x40f
#define MSR_MC4_CTL 0x410
#define MSR_MC4_STATUS 0x411
#define MSR_MC4_ADDR 0x412
#define MSR_MC4_MISC 0x413
#define MSR_RAPL_POWER_UNIT 0x606
#define MSR_PKG_ENERGY_STATUS 0x611
#define MSR_DRAM_ENERGY_STATUS 0x619
#define MSR_PP0_ENERGY_STATUS 0x639
#define MSR_PP1_ENERGY_STATUS 0x641
/*
* VMX MSRs
*/
#define MSR_VMX_BASIC 0x480
#define MSR_VMX_PINBASED_CTLS 0x481
#define MSR_VMX_PROCBASED_CTLS 0x482
#define MSR_VMX_EXIT_CTLS 0x483
#define MSR_VMX_ENTRY_CTLS 0x484
#define MSR_VMX_CR0_FIXED0 0x486
#define MSR_VMX_CR0_FIXED1 0x487
#define MSR_VMX_CR4_FIXED0 0x488
#define MSR_VMX_CR4_FIXED1 0x489
#define MSR_VMX_PROCBASED_CTLS2 0x48b
#define MSR_VMX_EPT_VPID_CAP 0x48c
#define MSR_VMX_TRUE_PINBASED_CTLS 0x48d
#define MSR_VMX_TRUE_PROCBASED_CTLS 0x48e
#define MSR_VMX_TRUE_EXIT_CTLS 0x48f
#define MSR_VMX_TRUE_ENTRY_CTLS 0x490
/*
* X2APIC MSRs
*/
#define MSR_APIC_000 0x800
#define MSR_APIC_ID 0x802
#define MSR_APIC_VERSION 0x803
#define MSR_APIC_TPR 0x808
#define MSR_APIC_EOI 0x80b
#define MSR_APIC_LDR 0x80d
#define MSR_APIC_SVR 0x80f
#define MSR_APIC_ISR0 0x810
#define MSR_APIC_ISR1 0x811
#define MSR_APIC_ISR2 0x812
#define MSR_APIC_ISR3 0x813
#define MSR_APIC_ISR4 0x814
#define MSR_APIC_ISR5 0x815
#define MSR_APIC_ISR6 0x816
#define MSR_APIC_ISR7 0x817
#define MSR_APIC_TMR0 0x818
#define MSR_APIC_IRR0 0x820
#define MSR_APIC_ESR 0x828
#define MSR_APIC_LVT_CMCI 0x82F
#define MSR_APIC_ICR 0x830
#define MSR_APIC_LVT_TIMER 0x832
#define MSR_APIC_LVT_THERMAL 0x833
#define MSR_APIC_LVT_PCINT 0x834
#define MSR_APIC_LVT_LINT0 0x835
#define MSR_APIC_LVT_LINT1 0x836
#define MSR_APIC_LVT_ERROR 0x837
#define MSR_APIC_ICR_TIMER 0x838
#define MSR_APIC_CCR_TIMER 0x839
#define MSR_APIC_DCR_TIMER 0x83e
#define MSR_APIC_SELF_IPI 0x83f
#define MSR_IA32_XSS 0xda0
#define MSR_IA32_TSC_AUX 0xc0000103
/*
* Constants related to MSR's.
*/
#define APICBASE_RESERVED 0x000002ff
#define APICBASE_BSP 0x00000100
#define APICBASE_X2APIC 0x00000400
#define APICBASE_ENABLED 0x00000800
#define APICBASE_ADDRESS 0xfffff000
/* MSR_IA32_FEATURE_CONTROL related */
#define IA32_FEATURE_CONTROL_LOCK 0x01 /* lock bit */
#define IA32_FEATURE_CONTROL_SMX_EN 0x02 /* enable VMX inside SMX */
#define IA32_FEATURE_CONTROL_VMX_EN 0x04 /* enable VMX outside SMX */
/*
* PAT modes.
*/
#define PAT_UNCACHEABLE 0x00
#define PAT_WRITE_COMBINING 0x01
#define PAT_WRITE_THROUGH 0x04
#define PAT_WRITE_PROTECTED 0x05
#define PAT_WRITE_BACK 0x06
#define PAT_UNCACHED 0x07
#define PAT_VALUE(i, m) ((long long)(m) << (8 * (i)))
#define PAT_MASK(i) PAT_VALUE(i, 0xff)
/*
* Constants related to MTRRs
*/
#define MTRR_UNCACHEABLE 0x00
#define MTRR_WRITE_COMBINING 0x01
#define MTRR_WRITE_THROUGH 0x04
#define MTRR_WRITE_PROTECTED 0x05
#define MTRR_WRITE_BACK 0x06
#define MTRR_N64K 8 /* numbers of fixed-size entries */
#define MTRR_N16K 16
#define MTRR_N4K 64
#define MTRR_CAP_WC 0x0000000000000400
#define MTRR_CAP_FIXED 0x0000000000000100
#define MTRR_CAP_VCNT 0x00000000000000ff
#define MTRR_DEF_ENABLE 0x0000000000000800
#define MTRR_DEF_FIXED_ENABLE 0x0000000000000400
#define MTRR_DEF_TYPE 0x00000000000000ff
#define MTRR_PHYSBASE_PHYSBASE 0x000ffffffffff000
#define MTRR_PHYSBASE_TYPE 0x00000000000000ff
#define MTRR_PHYSMASK_PHYSMASK 0x000ffffffffff000
#define MTRR_PHYSMASK_VALID 0x0000000000000800
/*
* Cyrix configuration registers, accessible as IO ports.
*/
#define CCR0 0xc0 /* Configuration control register 0 */
#define CCR0_NC0 0x01 /* First 64K of each 1M memory region is
non-cacheable */
#define CCR0_NC1 0x02 /* 640K-1M region is non-cacheable */
#define CCR0_A20M 0x04 /* Enables A20M# input pin */
#define CCR0_KEN 0x08 /* Enables KEN# input pin */
#define CCR0_FLUSH 0x10 /* Enables FLUSH# input pin */
#define CCR0_BARB 0x20 /* Flushes internal cache when entering hold
state */
#define CCR0_CO 0x40 /* Cache org: 1=direct mapped, 0=2x set
assoc */
#define CCR0_SUSPEND 0x80 /* Enables SUSP# and SUSPA# pins */
#define CCR1 0xc1 /* Configuration control register 1 */
#define CCR1_RPL 0x01 /* Enables RPLSET and RPLVAL# pins */
#define CCR1_SMI 0x02 /* Enables SMM pins */
#define CCR1_SMAC 0x04 /* System management memory access */
#define CCR1_MMAC 0x08 /* Main memory access */
#define CCR1_NO_LOCK 0x10 /* Negate LOCK# */
#define CCR1_SM3 0x80 /* SMM address space address region 3 */
#define CCR2 0xc2
#define CCR2_WB 0x02 /* Enables WB cache interface pins */
#define CCR2_SADS 0x02 /* Slow ADS */
#define CCR2_LOCK_NW 0x04 /* LOCK NW Bit */
#define CCR2_SUSP_HLT 0x08 /* Suspend on HALT */
#define CCR2_WT1 0x10 /* WT region 1 */
#define CCR2_WPR1 0x10 /* Write-protect region 1 */
#define CCR2_BARB 0x20 /* Flushes write-back cache when entering
hold state. */
#define CCR2_BWRT 0x40 /* Enables burst write cycles */
#define CCR2_USE_SUSP 0x80 /* Enables suspend pins */
#define CCR3 0xc3
#define CCR3_SMILOCK 0x01 /* SMM register lock */
#define CCR3_NMI 0x02 /* Enables NMI during SMM */
#define CCR3_LINBRST 0x04 /* Linear address burst cycles */
#define CCR3_SMMMODE 0x08 /* SMM Mode */
#define CCR3_MAPEN0 0x10 /* Enables Map0 */
#define CCR3_MAPEN1 0x20 /* Enables Map1 */
#define CCR3_MAPEN2 0x40 /* Enables Map2 */
#define CCR3_MAPEN3 0x80 /* Enables Map3 */
#define CCR4 0xe8
#define CCR4_IOMASK 0x07
#define CCR4_MEM 0x08 /* Enables momory bypassing */
#define CCR4_DTE 0x10 /* Enables directory table entry cache */
#define CCR4_FASTFPE 0x20 /* Fast FPU exception */
#define CCR4_CPUID 0x80 /* Enables CPUID instruction */
#define CCR5 0xe9
#define CCR5_WT_ALLOC 0x01 /* Write-through allocate */
#define CCR5_SLOP 0x02 /* LOOP instruction slowed down */
#define CCR5_LBR1 0x10 /* Local bus region 1 */
#define CCR5_ARREN 0x20 /* Enables ARR region */
#define CCR6 0xea
#define CCR7 0xeb
/* Performance Control Register (5x86 only). */
#define PCR0 0x20
#define PCR0_RSTK 0x01 /* Enables return stack */
#define PCR0_BTB 0x02 /* Enables branch target buffer */
#define PCR0_LOOP 0x04 /* Enables loop */
#define PCR0_AIS 0x08 /* Enables all instrcutions stalled to
serialize pipe. */
#define PCR0_MLR 0x10 /* Enables reordering of misaligned loads */
#define PCR0_BTBRT 0x40 /* Enables BTB test register. */
#define PCR0_LSSER 0x80 /* Disable reorder */
/* Device Identification Registers */
#define DIR0 0xfe
#define DIR1 0xff
/*
* Machine Check register constants.
*/
#define MCG_CAP_COUNT 0x000000ff
#define MCG_CAP_CTL_P 0x00000100
#define MCG_CAP_EXT_P 0x00000200
#define MCG_CAP_CMCI_P 0x00000400
#define MCG_CAP_TES_P 0x00000800
#define MCG_CAP_EXT_CNT 0x00ff0000
#define MCG_CAP_SER_P 0x01000000
#define MCG_STATUS_RIPV 0x00000001
#define MCG_STATUS_EIPV 0x00000002
#define MCG_STATUS_MCIP 0x00000004
#define MCG_CTL_ENABLE 0xffffffffffffffff
#define MCG_CTL_DISABLE 0x0000000000000000
#define MSR_MC_CTL(x) (MSR_MC0_CTL + (x) * 4)
#define MSR_MC_STATUS(x) (MSR_MC0_STATUS + (x) * 4)
#define MSR_MC_ADDR(x) (MSR_MC0_ADDR + (x) * 4)
#define MSR_MC_MISC(x) (MSR_MC0_MISC + (x) * 4)
#define MSR_MC_CTL2(x) (MSR_MC0_CTL2 + (x)) /* If MCG_CAP_CMCI_P */
#define MC_STATUS_MCA_ERROR 0x000000000000ffff
#define MC_STATUS_MODEL_ERROR 0x00000000ffff0000
#define MC_STATUS_OTHER_INFO 0x01ffffff00000000
#define MC_STATUS_COR_COUNT 0x001fffc000000000 /* If MCG_CAP_CMCI_P */
#define MC_STATUS_TES_STATUS 0x0060000000000000 /* If MCG_CAP_TES_P */
#define MC_STATUS_AR 0x0080000000000000 /* If MCG_CAP_TES_P */
#define MC_STATUS_S 0x0100000000000000 /* If MCG_CAP_TES_P */
#define MC_STATUS_PCC 0x0200000000000000
#define MC_STATUS_ADDRV 0x0400000000000000
#define MC_STATUS_MISCV 0x0800000000000000
#define MC_STATUS_EN 0x1000000000000000
#define MC_STATUS_UC 0x2000000000000000
#define MC_STATUS_OVER 0x4000000000000000
#define MC_STATUS_VAL 0x8000000000000000
#define MC_MISC_RA_LSB 0x000000000000003f /* If MCG_CAP_SER_P */
#define MC_MISC_ADDRESS_MODE 0x00000000000001c0 /* If MCG_CAP_SER_P */
#define MC_CTL2_THRESHOLD 0x0000000000007fff
#define MC_CTL2_CMCI_EN 0x0000000040000000
/*
* The following four 3-byte registers control the non-cacheable regions.
* These registers must be written as three separate bytes.
*
* NCRx+0: A31-A24 of starting address
* NCRx+1: A23-A16 of starting address
* NCRx+2: A15-A12 of starting address | NCR_SIZE_xx.
*
* The non-cacheable region's starting address must be aligned to the
* size indicated by the NCR_SIZE_xx field.
*/
#define NCR1 0xc4
#define NCR2 0xc7
#define NCR3 0xca
#define NCR4 0xcd
#define NCR_SIZE_0K 0
#define NCR_SIZE_4K 1
#define NCR_SIZE_8K 2
#define NCR_SIZE_16K 3
#define NCR_SIZE_32K 4
#define NCR_SIZE_64K 5
#define NCR_SIZE_128K 6
#define NCR_SIZE_256K 7
#define NCR_SIZE_512K 8
#define NCR_SIZE_1M 9
#define NCR_SIZE_2M 10
#define NCR_SIZE_4M 11
#define NCR_SIZE_8M 12
#define NCR_SIZE_16M 13
#define NCR_SIZE_32M 14
#define NCR_SIZE_4G 15
/*
* The address region registers are used to specify the location and
* size for the eight address regions.
*
* ARRx + 0: A31-A24 of start address
* ARRx + 1: A23-A16 of start address
* ARRx + 2: A15-A12 of start address | ARR_SIZE_xx
*/
#define ARR0 0xc4
#define ARR1 0xc7
#define ARR2 0xca
#define ARR3 0xcd
#define ARR4 0xd0
#define ARR5 0xd3
#define ARR6 0xd6
#define ARR7 0xd9
#define ARR_SIZE_0K 0
#define ARR_SIZE_4K 1
#define ARR_SIZE_8K 2
#define ARR_SIZE_16K 3
#define ARR_SIZE_32K 4
#define ARR_SIZE_64K 5
#define ARR_SIZE_128K 6
#define ARR_SIZE_256K 7
#define ARR_SIZE_512K 8
#define ARR_SIZE_1M 9
#define ARR_SIZE_2M 10
#define ARR_SIZE_4M 11
#define ARR_SIZE_8M 12
#define ARR_SIZE_16M 13
#define ARR_SIZE_32M 14
#define ARR_SIZE_4G 15
/*
* The region control registers specify the attributes associated with
* the ARRx addres regions.
*/
#define RCR0 0xdc
#define RCR1 0xdd
#define RCR2 0xde
#define RCR3 0xdf
#define RCR4 0xe0
#define RCR5 0xe1
#define RCR6 0xe2
#define RCR7 0xe3
#define RCR_RCD 0x01 /* Disables caching for ARRx (x = 0-6). */
#define RCR_RCE 0x01 /* Enables caching for ARR7. */
#define RCR_WWO 0x02 /* Weak write ordering. */
#define RCR_WL 0x04 /* Weak locking. */
#define RCR_WG 0x08 /* Write gathering. */
#define RCR_WT 0x10 /* Write-through. */
#define RCR_NLB 0x20 /* LBA# pin is not asserted. */
/* AMD Write Allocate Top-Of-Memory and Control Register */
#define AMD_WT_ALLOC_TME 0x40000 /* top-of-memory enable */
#define AMD_WT_ALLOC_PRE 0x20000 /* programmable range enable */
#define AMD_WT_ALLOC_FRE 0x10000 /* fixed (A0000-FFFFF) range enable */
/* AMD64 MSR's */
#define MSR_EFER 0xc0000080 /* extended features */
#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target/cs/ss */
#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target rip */
#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target rip */
#define MSR_SF_MASK 0xc0000084 /* syscall flags mask */
#define MSR_FSBASE 0xc0000100 /* base address of the %fs "segment" */
#define MSR_GSBASE 0xc0000101 /* base address of the %gs "segment" */
#define MSR_KGSBASE 0xc0000102 /* base address of the kernel %gs */
#define MSR_PERFEVSEL0 0xc0010000
#define MSR_PERFEVSEL1 0xc0010001
#define MSR_PERFEVSEL2 0xc0010002
#define MSR_PERFEVSEL3 0xc0010003
#define MSR_K7_PERFCTR0 0xc0010004
#define MSR_K7_PERFCTR1 0xc0010005
#define MSR_K7_PERFCTR2 0xc0010006
#define MSR_K7_PERFCTR3 0xc0010007
#define MSR_SYSCFG 0xc0010010
#define MSR_HWCR 0xc0010015
#define MSR_IORRBASE0 0xc0010016
#define MSR_IORRMASK0 0xc0010017
#define MSR_IORRBASE1 0xc0010018
#define MSR_IORRMASK1 0xc0010019
#define MSR_TOP_MEM 0xc001001a /* boundary for ram below 4G */
#define MSR_TOP_MEM2 0xc001001d /* boundary for ram above 4G */
#define MSR_NB_CFG1 0xc001001f /* NB configuration 1 */
#define MSR_P_STATE_LIMIT 0xc0010061 /* P-state Current Limit Register */
#define MSR_P_STATE_CONTROL 0xc0010062 /* P-state Control Register */
#define MSR_P_STATE_STATUS 0xc0010063 /* P-state Status Register */
#define MSR_P_STATE_CONFIG(n) (0xc0010064 + (n)) /* P-state Config */
#define MSR_SMM_ADDR 0xc0010112 /* SMM TSEG base address */
#define MSR_SMM_MASK 0xc0010113 /* SMM TSEG address mask */
#define MSR_IC_CFG 0xc0011021 /* Instruction Cache Configuration */
#define MSR_K8_UCODE_UPDATE 0xc0010020 /* update microcode */
#define MSR_MC0_CTL_MASK 0xc0010044
#define MSR_VM_CR 0xc0010114 /* SVM: feature control */
#define MSR_VM_HSAVE_PA 0xc0010117 /* SVM: host save area address */
/* MSR_VM_CR related */
#define VM_CR_SVMDIS 0x10 /* SVM: disabled by BIOS */
/* VIA ACE crypto featureset: for via_feature_rng */
#define VIA_HAS_RNG 1 /* cpu has RNG */
/* VIA ACE crypto featureset: for via_feature_xcrypt */
#define VIA_HAS_AES 1 /* cpu has AES */
#define VIA_HAS_SHA 2 /* cpu has SHA1 & SHA256 */
#define VIA_HAS_MM 4 /* cpu has RSA instructions */
#define VIA_HAS_AESCTR 8 /* cpu has AES-CTR instructions */
/* Centaur Extended Feature flags */
#define VIA_CPUID_HAS_RNG 0x000004
#define VIA_CPUID_DO_RNG 0x000008
#define VIA_CPUID_HAS_ACE 0x000040
#define VIA_CPUID_DO_ACE 0x000080
#define VIA_CPUID_HAS_ACE2 0x000100
#define VIA_CPUID_DO_ACE2 0x000200
#define VIA_CPUID_HAS_PHE 0x000400
#define VIA_CPUID_DO_PHE 0x000800
#define VIA_CPUID_HAS_PMM 0x001000
#define VIA_CPUID_DO_PMM 0x002000
/* VIA ACE xcrypt-* instruction context control options */
#define VIA_CRYPT_CWLO_ROUND_M 0x0000000f
#define VIA_CRYPT_CWLO_ALG_M 0x00000070
#define VIA_CRYPT_CWLO_ALG_AES 0x00000000
#define VIA_CRYPT_CWLO_KEYGEN_M 0x00000080
#define VIA_CRYPT_CWLO_KEYGEN_HW 0x00000000
#define VIA_CRYPT_CWLO_KEYGEN_SW 0x00000080
#define VIA_CRYPT_CWLO_NORMAL 0x00000000
#define VIA_CRYPT_CWLO_INTERMEDIATE 0x00000100
#define VIA_CRYPT_CWLO_ENCRYPT 0x00000000
#define VIA_CRYPT_CWLO_DECRYPT 0x00000200
#define VIA_CRYPT_CWLO_KEY128 0x0000000a /* 128bit, 10 rds */
#define VIA_CRYPT_CWLO_KEY192 0x0000040c /* 192bit, 12 rds */
#define VIA_CRYPT_CWLO_KEY256 0x0000080e /* 256bit, 15 rds */

View file

@ -0,0 +1,47 @@
/*-
* Copyright (C) 2005 TAKAHASHI Yoshihiro. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* The outputs of the three timers are connected as follows:
*
* timer 0 -> irq 0
* timer 1 -> dma chan 0 (for dram refresh)
* timer 2 -> speaker (via keyboard controller)
*
* Timer 0 is used to call hardclock.
* Timer 2 is used to generate console beeps.
*/
#pragma once
#include <xhyve/support/i8253reg.h>
#define IO_TIMER1 0x40 /* 8253 Timer #1 */
#define TIMER_CNTR0 (IO_TIMER1 + TIMER_REG_CNTR0)
#define TIMER_CNTR1 (IO_TIMER1 + TIMER_REG_CNTR1)
#define TIMER_CNTR2 (IO_TIMER1 + TIMER_REG_CNTR2)
#define TIMER_MODE (IO_TIMER1 + TIMER_REG_MODE)

View file

@ -0,0 +1,751 @@
/* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */
/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
/* $FreeBSD: src/sys/sys/tree.h,v 1.7 2007/12/28 07:03:26 jasone Exp $ */
/*-
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
/*
* This file defines data structures for different types of trees:
* splay trees and red-black trees.
*
* A splay tree is a self-organizing data structure. Every operation
* on the tree causes a splay to happen. The splay moves the requested
* node to the root of the tree and partly rebalances it.
*
* This has the benefit that request locality causes faster lookups as
* the requested nodes move to the top of the tree. On the other hand,
* every lookup causes memory writes.
*
* The Balance Theorem bounds the total access time for m operations
* and n inserts on an initially empty tree as O((m + n)lg n). The
* amortized cost for a sequence of m accesses to a splay tree is O(lg n);
*
* A red-black tree is a binary search tree with the node color as an
* extra attribute. It fulfills a set of conditions:
* - every search path from the root to a leaf consists of the
* same number of black nodes,
* - each red node (except for the root) has a black parent,
* - each leaf node is black.
*
* Every operation on a red-black tree is bounded as O(lg n).
* The maximum height of a red-black tree is 2lg (n+1).
*/
#define SPLAY_HEAD(name, type) \
struct name { \
struct type *sph_root; /* root of the tree */ \
}
#define SPLAY_INITIALIZER(root) \
{ NULL }
#define SPLAY_INIT(root) do { \
(root)->sph_root = NULL; \
} while (/*CONSTCOND*/ 0)
#define SPLAY_ENTRY(type) \
struct { \
struct type *spe_left; /* left element */ \
struct type *spe_right; /* right element */ \
}
#define SPLAY_LEFT(elm, field) (elm)->field.spe_left
#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right
#define SPLAY_ROOT(head) (head)->sph_root
#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \
SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
(head)->sph_root = tmp; \
} while (/*CONSTCOND*/ 0)
#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \
SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
SPLAY_LEFT(tmp, field) = (head)->sph_root; \
(head)->sph_root = tmp; \
} while (/*CONSTCOND*/ 0)
#define SPLAY_LINKLEFT(head, tmp, field) do { \
SPLAY_LEFT(tmp, field) = (head)->sph_root; \
tmp = (head)->sph_root; \
(head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
} while (/*CONSTCOND*/ 0)
#define SPLAY_LINKRIGHT(head, tmp, field) do { \
SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
tmp = (head)->sph_root; \
(head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
} while (/*CONSTCOND*/ 0)
#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \
SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\
SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
} while (/*CONSTCOND*/ 0)
/* Generates prototypes and inline functions */
#define SPLAY_PROTOTYPE(name, type, field, cmp) \
void name##_SPLAY(struct name *, struct type *); \
void name##_SPLAY_MINMAX(struct name *, int); \
struct type *name##_SPLAY_INSERT(struct name *, struct type *); \
struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \
\
/* Finds the node with the same key as elm */ \
static __inline struct type * \
name##_SPLAY_FIND(struct name *head, struct type *elm) \
{ \
if (SPLAY_EMPTY(head)) \
return(NULL); \
name##_SPLAY(head, elm); \
if ((cmp)(elm, (head)->sph_root) == 0) \
return (head->sph_root); \
return (NULL); \
} \
\
static __inline struct type * \
name##_SPLAY_NEXT(struct name *head, struct type *elm) \
{ \
name##_SPLAY(head, elm); \
if (SPLAY_RIGHT(elm, field) != NULL) { \
elm = SPLAY_RIGHT(elm, field); \
while (SPLAY_LEFT(elm, field) != NULL) { \
elm = SPLAY_LEFT(elm, field); \
} \
} else \
elm = NULL; \
return (elm); \
} \
\
static __inline struct type * \
name##_SPLAY_MIN_MAX(struct name *head, int val) \
{ \
name##_SPLAY_MINMAX(head, val); \
return (SPLAY_ROOT(head)); \
}
/* Main splay operation.
* Moves node close to the key of elm to top
*/
#define SPLAY_GENERATE(name, type, field, cmp) \
struct type * \
name##_SPLAY_INSERT(struct name *head, struct type *elm) \
{ \
if (SPLAY_EMPTY(head)) { \
SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \
} else { \
int __comp; \
name##_SPLAY(head, elm); \
__comp = (cmp)(elm, (head)->sph_root); \
if(__comp < 0) { \
SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\
SPLAY_RIGHT(elm, field) = (head)->sph_root; \
SPLAY_LEFT((head)->sph_root, field) = NULL; \
} else if (__comp > 0) { \
SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\
SPLAY_LEFT(elm, field) = (head)->sph_root; \
SPLAY_RIGHT((head)->sph_root, field) = NULL; \
} else \
return ((head)->sph_root); \
} \
(head)->sph_root = (elm); \
return (NULL); \
} \
\
struct type * \
name##_SPLAY_REMOVE(struct name *head, struct type *elm) \
{ \
struct type *__tmp; \
if (SPLAY_EMPTY(head)) \
return (NULL); \
name##_SPLAY(head, elm); \
if ((cmp)(elm, (head)->sph_root) == 0) { \
if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
(head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\
} else { \
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
(head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\
name##_SPLAY(head, elm); \
SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
} \
return (elm); \
} \
return (NULL); \
} \
\
void \
name##_SPLAY(struct name *head, struct type *elm) \
{ \
struct type __node, *__left, *__right, *__tmp; \
int __comp; \
\
SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
__left = __right = &__node; \
\
while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \
if (__comp < 0) { \
__tmp = SPLAY_LEFT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if ((cmp)(elm, __tmp) < 0){ \
SPLAY_ROTATE_RIGHT(head, __tmp, field); \
if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
break; \
} \
SPLAY_LINKLEFT(head, __right, field); \
} else if (__comp > 0) { \
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if ((cmp)(elm, __tmp) > 0){ \
SPLAY_ROTATE_LEFT(head, __tmp, field); \
if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
break; \
} \
SPLAY_LINKRIGHT(head, __left, field); \
} \
} \
SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
} \
\
/* Splay with either the minimum or the maximum element \
* Used to find minimum or maximum element in tree. \
*/ \
void name##_SPLAY_MINMAX(struct name *head, int __comp) \
{ \
struct type __node, *__left, *__right, *__tmp; \
\
SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
__left = __right = &__node; \
\
while (1) { \
if (__comp < 0) { \
__tmp = SPLAY_LEFT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if (__comp < 0){ \
SPLAY_ROTATE_RIGHT(head, __tmp, field); \
if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
break; \
} \
SPLAY_LINKLEFT(head, __right, field); \
} else if (__comp > 0) { \
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if (__comp > 0) { \
SPLAY_ROTATE_LEFT(head, __tmp, field); \
if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
break; \
} \
SPLAY_LINKRIGHT(head, __left, field); \
} \
} \
SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
}
#define SPLAY_NEGINF -1
#define SPLAY_INF 1
#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \
: name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \
: name##_SPLAY_MIN_MAX(x, SPLAY_INF))
#define SPLAY_FOREACH(x, name, head) \
for ((x) = SPLAY_MIN(name, head); \
(x) != NULL; \
(x) = SPLAY_NEXT(name, head, x))
/* Macros that define a red-black tree */
#define RB_HEAD(name, type) \
struct name { \
struct type *rbh_root; /* root of the tree */ \
}
#define RB_INITIALIZER(root) \
{ NULL }
#define RB_INIT(root) do { \
(root)->rbh_root = NULL; \
} while (/*CONSTCOND*/ 0)
/*
* Undef for Linux
*/
#undef RB_BLACK
#undef RB_RED
#undef RB_ROOT
#define RB_BLACK 0
#define RB_RED 1
#define RB_ENTRY(type) \
struct { \
struct type *rbe_left; /* left element */ \
struct type *rbe_right; /* right element */ \
struct type *rbe_parent; /* parent element */ \
int rbe_color; /* node color */ \
}
#define RB_LEFT(elm, field) (elm)->field.rbe_left
#define RB_RIGHT(elm, field) (elm)->field.rbe_right
#define RB_PARENT(elm, field) (elm)->field.rbe_parent
#define RB_COLOR(elm, field) (elm)->field.rbe_color
#define RB_ROOT(head) (head)->rbh_root
#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
#define RB_SET(elm, parent, field) do { \
RB_PARENT(elm, field) = parent; \
RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
RB_COLOR(elm, field) = RB_RED; \
} while (/*CONSTCOND*/ 0)
#define RB_SET_BLACKRED(black, red, field) do { \
RB_COLOR(black, field) = RB_BLACK; \
RB_COLOR(red, field) = RB_RED; \
} while (/*CONSTCOND*/ 0)
#ifndef RB_AUGMENT
#define RB_AUGMENT(x) do {} while (0)
#endif
#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \
(tmp) = RB_RIGHT(elm, field); \
if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \
RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
} \
RB_AUGMENT(elm); \
if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
else \
RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
} else \
(head)->rbh_root = (tmp); \
RB_LEFT(tmp, field) = (elm); \
RB_PARENT(elm, field) = (tmp); \
RB_AUGMENT(tmp); \
if ((RB_PARENT(tmp, field))) \
RB_AUGMENT(RB_PARENT(tmp, field)); \
} while (/*CONSTCOND*/ 0)
#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \
(tmp) = RB_LEFT(elm, field); \
if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \
RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
} \
RB_AUGMENT(elm); \
if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
else \
RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
} else \
(head)->rbh_root = (tmp); \
RB_RIGHT(tmp, field) = (elm); \
RB_PARENT(elm, field) = (tmp); \
RB_AUGMENT(tmp); \
if ((RB_PARENT(tmp, field))) \
RB_AUGMENT(RB_PARENT(tmp, field)); \
} while (/*CONSTCOND*/ 0)
/* Generates prototypes and inline functions */
#define RB_PROTOTYPE(name, type, field, cmp) \
RB_PROTOTYPE_INTERNAL(name, type, field, cmp,)
#define RB_PROTOTYPE_STATIC(name, type, field, cmp) \
RB_PROTOTYPE_INTERNAL(name, type, field, cmp, __unused static)
#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \
attr void name##_RB_INSERT_COLOR(struct name *, struct type *); \
attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *); \
attr struct type *name##_RB_REMOVE(struct name *, struct type *); \
attr struct type *name##_RB_INSERT(struct name *, struct type *); \
attr struct type *name##_RB_FIND(struct name *, struct type *); \
attr struct type *name##_RB_NFIND(struct name *, struct type *); \
attr struct type *name##_RB_NEXT(struct type *); \
attr struct type *name##_RB_PREV(struct type *); \
attr struct type *name##_RB_MINMAX(struct name *, int) \
/* Main rb operation.
* Moves node close to the key of elm to top
*/
#define RB_GENERATE(name, type, field, cmp) \
RB_GENERATE_INTERNAL(name, type, field, cmp,)
#define RB_GENERATE_STATIC(name, type, field, cmp) \
RB_GENERATE_INTERNAL(name, type, field, cmp, __unused static)
#define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \
attr void \
name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \
{ \
struct type *parent, *gparent, *tmp; \
while ((parent = RB_PARENT(elm, field)) != NULL && \
RB_COLOR(parent, field) == RB_RED) { \
gparent = RB_PARENT(parent, field); \
if (parent == RB_LEFT(gparent, field)) { \
tmp = RB_RIGHT(gparent, field); \
if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
RB_COLOR(tmp, field) = RB_BLACK; \
RB_SET_BLACKRED(parent, gparent, field);\
elm = gparent; \
continue; \
} \
if (RB_RIGHT(parent, field) == elm) { \
RB_ROTATE_LEFT(head, parent, tmp, field);\
tmp = parent; \
parent = elm; \
elm = tmp; \
} \
RB_SET_BLACKRED(parent, gparent, field); \
RB_ROTATE_RIGHT(head, gparent, tmp, field); \
} else { \
tmp = RB_LEFT(gparent, field); \
if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
RB_COLOR(tmp, field) = RB_BLACK; \
RB_SET_BLACKRED(parent, gparent, field);\
elm = gparent; \
continue; \
} \
if (RB_LEFT(parent, field) == elm) { \
RB_ROTATE_RIGHT(head, parent, tmp, field);\
tmp = parent; \
parent = elm; \
elm = tmp; \
} \
RB_SET_BLACKRED(parent, gparent, field); \
RB_ROTATE_LEFT(head, gparent, tmp, field); \
} \
} \
RB_COLOR(head->rbh_root, field) = RB_BLACK; \
} \
\
attr void \
name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \
{ \
struct type *tmp; \
while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \
elm != RB_ROOT(head)) { \
if (RB_LEFT(parent, field) == elm) { \
tmp = RB_RIGHT(parent, field); \
if (RB_COLOR(tmp, field) == RB_RED) { \
RB_SET_BLACKRED(tmp, parent, field); \
RB_ROTATE_LEFT(head, parent, tmp, field);\
tmp = RB_RIGHT(parent, field); \
} \
if ((RB_LEFT(tmp, field) == NULL || \
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
(RB_RIGHT(tmp, field) == NULL || \
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
RB_COLOR(tmp, field) = RB_RED; \
elm = parent; \
parent = RB_PARENT(elm, field); \
} else { \
if (RB_RIGHT(tmp, field) == NULL || \
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {\
struct type *oleft; \
if ((oleft = RB_LEFT(tmp, field)) \
!= NULL) \
RB_COLOR(oleft, field) = RB_BLACK;\
RB_COLOR(tmp, field) = RB_RED; \
RB_ROTATE_RIGHT(head, tmp, oleft, field);\
tmp = RB_RIGHT(parent, field); \
} \
RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
RB_COLOR(parent, field) = RB_BLACK; \
if (RB_RIGHT(tmp, field)) \
RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;\
RB_ROTATE_LEFT(head, parent, tmp, field);\
elm = RB_ROOT(head); \
break; \
} \
} else { \
tmp = RB_LEFT(parent, field); \
if (RB_COLOR(tmp, field) == RB_RED) { \
RB_SET_BLACKRED(tmp, parent, field); \
RB_ROTATE_RIGHT(head, parent, tmp, field);\
tmp = RB_LEFT(parent, field); \
} \
if ((RB_LEFT(tmp, field) == NULL || \
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
(RB_RIGHT(tmp, field) == NULL || \
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
RB_COLOR(tmp, field) = RB_RED; \
elm = parent; \
parent = RB_PARENT(elm, field); \
} else { \
if (RB_LEFT(tmp, field) == NULL || \
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {\
struct type *oright; \
if ((oright = RB_RIGHT(tmp, field)) \
!= NULL) \
RB_COLOR(oright, field) = RB_BLACK;\
RB_COLOR(tmp, field) = RB_RED; \
RB_ROTATE_LEFT(head, tmp, oright, field);\
tmp = RB_LEFT(parent, field); \
} \
RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
RB_COLOR(parent, field) = RB_BLACK; \
if (RB_LEFT(tmp, field)) \
RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;\
RB_ROTATE_RIGHT(head, parent, tmp, field);\
elm = RB_ROOT(head); \
break; \
} \
} \
} \
if (elm) \
RB_COLOR(elm, field) = RB_BLACK; \
} \
\
attr struct type * \
name##_RB_REMOVE(struct name *head, struct type *elm) \
{ \
struct type *child, *parent, *old = elm; \
int color; \
if (RB_LEFT(elm, field) == NULL) \
child = RB_RIGHT(elm, field); \
else if (RB_RIGHT(elm, field) == NULL) \
child = RB_LEFT(elm, field); \
else { \
struct type *left; \
elm = RB_RIGHT(elm, field); \
while ((left = RB_LEFT(elm, field)) != NULL) \
elm = left; \
child = RB_RIGHT(elm, field); \
parent = RB_PARENT(elm, field); \
color = RB_COLOR(elm, field); \
if (child) \
RB_PARENT(child, field) = parent; \
if (parent) { \
if (RB_LEFT(parent, field) == elm) \
RB_LEFT(parent, field) = child; \
else \
RB_RIGHT(parent, field) = child; \
RB_AUGMENT(parent); \
} else \
RB_ROOT(head) = child; \
if (RB_PARENT(elm, field) == old) \
parent = elm; \
(elm)->field = (old)->field; \
if (RB_PARENT(old, field)) { \
if (RB_LEFT(RB_PARENT(old, field), field) == old)\
RB_LEFT(RB_PARENT(old, field), field) = elm;\
else \
RB_RIGHT(RB_PARENT(old, field), field) = elm;\
RB_AUGMENT(RB_PARENT(old, field)); \
} else \
RB_ROOT(head) = elm; \
RB_PARENT(RB_LEFT(old, field), field) = elm; \
if (RB_RIGHT(old, field)) \
RB_PARENT(RB_RIGHT(old, field), field) = elm; \
if (parent) { \
left = parent; \
do { \
RB_AUGMENT(left); \
} while ((left = RB_PARENT(left, field)) != NULL); \
} \
goto color; \
} \
parent = RB_PARENT(elm, field); \
color = RB_COLOR(elm, field); \
if (child) \
RB_PARENT(child, field) = parent; \
if (parent) { \
if (RB_LEFT(parent, field) == elm) \
RB_LEFT(parent, field) = child; \
else \
RB_RIGHT(parent, field) = child; \
RB_AUGMENT(parent); \
} else \
RB_ROOT(head) = child; \
color: \
if (color == RB_BLACK) \
name##_RB_REMOVE_COLOR(head, parent, child); \
return (old); \
} \
\
/* Inserts a node into the RB tree */ \
attr struct type * \
name##_RB_INSERT(struct name *head, struct type *elm) \
{ \
struct type *tmp; \
struct type *parent = NULL; \
int comp = 0; \
tmp = RB_ROOT(head); \
while (tmp) { \
parent = tmp; \
comp = (cmp)(elm, parent); \
if (comp < 0) \
tmp = RB_LEFT(tmp, field); \
else if (comp > 0) \
tmp = RB_RIGHT(tmp, field); \
else \
return (tmp); \
} \
RB_SET(elm, parent, field); \
if (parent != NULL) { \
if (comp < 0) \
RB_LEFT(parent, field) = elm; \
else \
RB_RIGHT(parent, field) = elm; \
RB_AUGMENT(parent); \
} else \
RB_ROOT(head) = elm; \
name##_RB_INSERT_COLOR(head, elm); \
return (NULL); \
} \
\
/* Finds the node with the same key as elm */ \
attr struct type * \
name##_RB_FIND(struct name *head, struct type *elm) \
{ \
struct type *tmp = RB_ROOT(head); \
int comp; \
while (tmp) { \
comp = cmp(elm, tmp); \
if (comp < 0) \
tmp = RB_LEFT(tmp, field); \
else if (comp > 0) \
tmp = RB_RIGHT(tmp, field); \
else \
return (tmp); \
} \
return (NULL); \
} \
\
/* Finds the first node greater than or equal to the search key */ \
attr struct type * \
name##_RB_NFIND(struct name *head, struct type *elm) \
{ \
struct type *tmp = RB_ROOT(head); \
struct type *res = NULL; \
int comp; \
while (tmp) { \
comp = cmp(elm, tmp); \
if (comp < 0) { \
res = tmp; \
tmp = RB_LEFT(tmp, field); \
} \
else if (comp > 0) \
tmp = RB_RIGHT(tmp, field); \
else \
return (tmp); \
} \
return (res); \
} \
\
/* ARGSUSED */ \
attr struct type * \
name##_RB_NEXT(struct type *elm) \
{ \
if (RB_RIGHT(elm, field)) { \
elm = RB_RIGHT(elm, field); \
while (RB_LEFT(elm, field)) \
elm = RB_LEFT(elm, field); \
} else { \
if (RB_PARENT(elm, field) && \
(elm == RB_LEFT(RB_PARENT(elm, field), field))) \
elm = RB_PARENT(elm, field); \
else { \
while (RB_PARENT(elm, field) && \
(elm == RB_RIGHT(RB_PARENT(elm, field), field)))\
elm = RB_PARENT(elm, field); \
elm = RB_PARENT(elm, field); \
} \
} \
return (elm); \
} \
\
/* ARGSUSED */ \
attr struct type * \
name##_RB_PREV(struct type *elm) \
{ \
if (RB_LEFT(elm, field)) { \
elm = RB_LEFT(elm, field); \
while (RB_RIGHT(elm, field)) \
elm = RB_RIGHT(elm, field); \
} else { \
if (RB_PARENT(elm, field) && \
(elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
elm = RB_PARENT(elm, field); \
else { \
while (RB_PARENT(elm, field) && \
(elm == RB_LEFT(RB_PARENT(elm, field), field)))\
elm = RB_PARENT(elm, field); \
elm = RB_PARENT(elm, field); \
} \
} \
return (elm); \
} \
\
attr struct type * \
name##_RB_MINMAX(struct name *head, int val) \
{ \
struct type *tmp = RB_ROOT(head); \
struct type *parent = NULL; \
while (tmp) { \
parent = tmp; \
if (val < 0) \
tmp = RB_LEFT(tmp, field); \
else \
tmp = RB_RIGHT(tmp, field); \
} \
return (parent); \
}
#define RB_NEGINF -1
#define RB_INF 1
#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y)
#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
#define RB_PREV(name, x, y) name##_RB_PREV(y)
#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
#define RB_FOREACH(x, name, head) \
for ((x) = RB_MIN(name, head); \
(x) != NULL; \
(x) = name##_RB_NEXT(x))
#define RB_FOREACH_REVERSE(x, name, head) \
for ((x) = RB_MAX(name, head); \
(x) != NULL; \
(x) = name##_RB_PREV(x))
#pragma clang diagnostic pop

View file

@ -0,0 +1,153 @@
/*-
* Copyright (c) 2002,2005 Marcel Moolenaar
* Copyright (c) 2002 Hiten Mahesh Pandya
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#pragma once
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#define _UUID_NODE_LEN 6
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct uuid {
uint32_t time_low;
uint16_t time_mid;
uint16_t time_hi_and_version;
uint8_t clock_seq_hi_and_reserved;
uint8_t clock_seq_low;
uint8_t node[_UUID_NODE_LEN];
};
#pragma clang diagnostic pop
typedef struct uuid uuid_internal_t;
/*
* This implementation mostly conforms to the DCE 1.1 specification.
* See Also:
* uuidgen(1), uuidgen(2), uuid(3)
*/
/* Status codes returned by the functions. */
#define uuid_s_ok 0
#define uuid_s_bad_version 1
#define uuid_s_invalid_string_uuid 2
#define uuid_s_no_memory 3
/*
* uuid_create_nil() - create a nil UUID.
* See also:
* http://www.opengroup.org/onlinepubs/009629399/uuid_create_nil.htm
*/
static inline void
uuid_create_nil(uuid_t *u, uint32_t *status)
{
if (status)
*status = uuid_s_ok;
bzero(u, sizeof(*u));
}
static void
uuid_enc_le(void *buf, const uuid_t *uuid)
{
uuid_internal_t *u = (uuid_internal_t *) ((void *) uuid);
uint8_t *p = buf;
int i;
memcpy(p, &u->time_low, 4);
memcpy(p, &u->time_mid, 2);
memcpy(p, &u->time_hi_and_version, 2);
p[8] = u->clock_seq_hi_and_reserved;
p[9] = u->clock_seq_low;
for (i = 0; i < _UUID_NODE_LEN; i++)
p[10 + i] = u->node[i];
}
/*
* uuid_from_string() - convert a string representation of an UUID into
* a binary representation.
* See also:
* http://www.opengroup.org/onlinepubs/009629399/uuid_from_string.htm
*
* NOTE: The sequence field is in big-endian, while the time fields are in
* native byte order.
*/
static inline void
uuid_from_string(const char *s, uuid_t *uuid, uint32_t *status)
{
uuid_internal_t *u = (uuid_internal_t *) ((void *) uuid);
int n;
/* Short-circuit 2 special cases: NULL pointer and empty string. */
if (s == NULL || *s == '\0') {
uuid_create_nil(((uuid_t *) u), status);
return;
}
/* Assume the worst. */
if (status != NULL)
*status = uuid_s_invalid_string_uuid;
/* The UUID string representation has a fixed length. */
if (strlen(s) != 36)
return;
/*
* We only work with "new" UUIDs. New UUIDs have the form:
* 01234567-89ab-cdef-0123-456789abcdef
* The so called "old" UUIDs, which we don't support, have the form:
* 0123456789ab.cd.ef.01.23.45.67.89.ab
*/
if (s[8] != '-')
return;
n = sscanf(s,
"%8x-%4hx-%4hx-%2hhx%2hhx-%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx",
&u->time_low, &u->time_mid, &u->time_hi_and_version,
&u->clock_seq_hi_and_reserved, &u->clock_seq_low, &u->node[0],
&u->node[1], &u->node[2], &u->node[3], &u->node[4], &u->node[5]);
/* Make sure we have all conversions. */
if (n != 11)
return;
/* We have a successful scan. Check semantics... */
n = u->clock_seq_hi_and_reserved;
if ((n & 0x80) != 0x00 && /* variant 0? */
(n & 0xc0) != 0x80 && /* variant 1? */
(n & 0xe0) != 0xc0) { /* variant 2? */
if (status != NULL)
*status = uuid_s_bad_version;
} else {
if (status != NULL)
*status = uuid_s_ok;
}
}

View file

@ -26,20 +26,17 @@
* $FreeBSD$
*/
#ifndef _UART_EMUL_H_
#define _UART_EMUL_H_
#pragma once
#define UART_IO_BAR_SIZE 8
#define UART_IO_BAR_SIZE 8
struct uart_softc;
typedef void (*uart_intr_func_t)(void *arg);
struct uart_softc *uart_init(uart_intr_func_t intr_assert,
uart_intr_func_t intr_deassert, void *arg);
uart_intr_func_t intr_deassert, void *arg);
int uart_legacy_alloc(int unit, int *ioaddr, int *irq);
uint8_t uart_read(struct uart_softc *sc, int offset);
void uart_write(struct uart_softc *sc, int offset, uint8_t value);
int uart_set_backend(struct uart_softc *sc, const char *opt);
#endif
int uart_legacy_alloc(int unit, int *ioaddr, int *irq);
uint8_t uart_read(struct uart_softc *sc, int offset);
void uart_write(struct uart_softc *sc, int offset, uint8_t value);
int uart_set_backend(struct uart_softc *sc, const char *opt);

View file

@ -26,8 +26,10 @@
* $FreeBSD$
*/
#ifndef _VIRTIO_H_
#define _VIRTIO_H_
#pragma once
#include <stdint.h>
#include <pthread.h>
/*
* These are derived from several virtio specifications.
@ -125,35 +127,40 @@
#define VRING_DESC_F_WRITE (1 << 1)
#define VRING_DESC_F_INDIRECT (1 << 2)
struct virtio_desc { /* AKA vring_desc */
uint64_t vd_addr; /* guest physical address */
uint32_t vd_len; /* length of scatter/gather seg */
uint16_t vd_flags; /* VRING_F_DESC_* */
uint16_t vd_next; /* next desc if F_NEXT */
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpacked"
struct virtio_desc { /* AKA vring_desc */
uint64_t vd_addr; /* guest physical address */
uint32_t vd_len; /* length of scatter/gather seg */
uint16_t vd_flags; /* VRING_F_DESC_* */
uint16_t vd_next; /* next desc if F_NEXT */
} __packed;
struct virtio_used { /* AKA vring_used_elem */
uint32_t vu_idx; /* head of used descriptor chain */
uint32_t vu_tlen; /* length written-to */
struct virtio_used { /* AKA vring_used_elem */
uint32_t vu_idx; /* head of used descriptor chain */
uint32_t vu_tlen; /* length written-to */
} __packed;
#define VRING_AVAIL_F_NO_INTERRUPT 1
struct vring_avail {
uint16_t va_flags; /* VRING_AVAIL_F_* */
uint16_t va_idx; /* counts to 65535, then cycles */
uint16_t va_ring[]; /* size N, reported in QNUM value */
/* uint16_t va_used_event; -- after N ring entries */
uint16_t va_flags; /* VRING_AVAIL_F_* */
uint16_t va_idx; /* counts to 65535, then cycles */
uint16_t va_ring[]; /* size N, reported in QNUM value */
/* uint16_t va_used_event; -- after N ring entries */
} __packed;
#define VRING_USED_F_NO_NOTIFY 1
struct vring_used {
uint16_t vu_flags; /* VRING_USED_F_* */
uint16_t vu_idx; /* counts to 65535, then cycles */
struct virtio_used vu_ring[]; /* size N */
/* uint16_t vu_avail_event; -- after N ring entries */
uint16_t vu_flags; /* VRING_USED_F_* */
uint16_t vu_idx; /* counts to 65535, then cycles */
struct virtio_used vu_ring[]; /* size N */
/* uint16_t vu_avail_event; -- after N ring entries */
} __packed;
#pragma clang diagnostic pop
/*
* The address of any given virtual queue is determined by a single
* Page Frame Number register. The guest writes the PFN into the
@ -269,16 +276,15 @@ vring_size(u_int qsz)
/* constant 3 below = va_flags, va_idx, va_used_event */
size = sizeof(struct virtio_desc) * qsz + sizeof(uint16_t) * (3 + qsz);
size = roundup2(size, VRING_ALIGN);
size = roundup2(size, ((size_t) VRING_ALIGN));
/* constant 3 below = vu_flags, vu_idx, vu_avail_event */
size += sizeof(uint16_t) * 3 + sizeof(struct virtio_used) * qsz;
size = roundup2(size, VRING_ALIGN);
size = roundup2(size, ((size_t) VRING_ALIGN));
return (size);
}
struct vmctx;
struct pci_devinst;
struct vqueue_info;
@ -316,45 +322,53 @@ struct vqueue_info;
#define VIRTIO_EVENT_IDX 0x02 /* use the event-index values */
#define VIRTIO_BROKED 0x08 /* ??? */
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct virtio_softc {
struct virtio_consts *vs_vc; /* constants (see below) */
int vs_flags; /* VIRTIO_* flags from above */
pthread_mutex_t *vs_mtx; /* POSIX mutex, if any */
struct pci_devinst *vs_pi; /* PCI device instance */
uint32_t vs_negotiated_caps; /* negotiated capabilities */
struct vqueue_info *vs_queues; /* one per vc_nvq */
int vs_curq; /* current queue */
uint8_t vs_status; /* value from last status write */
uint8_t vs_isr; /* ISR flags, if not MSI-X */
uint16_t vs_msix_cfg_idx; /* MSI-X vector for config event */
struct virtio_consts *vs_vc; /* constants (see below) */
int vs_flags; /* VIRTIO_* flags from above */
pthread_mutex_t *vs_mtx; /* POSIX mutex, if any */
struct pci_devinst *vs_pi; /* PCI device instance */
uint32_t vs_negotiated_caps; /* negotiated capabilities */
struct vqueue_info *vs_queues; /* one per vc_nvq */
int vs_curq; /* current queue */
uint8_t vs_status; /* value from last status write */
uint8_t vs_isr; /* ISR flags, if not MSI-X */
uint16_t vs_msix_cfg_idx; /* MSI-X vector for config event */
};
#define VS_LOCK(vs) \
do { \
if (vs->vs_mtx) \
pthread_mutex_lock(vs->vs_mtx); \
#define VS_LOCK(vs) \
do { \
if (vs->vs_mtx) \
pthread_mutex_lock(vs->vs_mtx); \
} while (0)
#define VS_UNLOCK(vs) \
do { \
if (vs->vs_mtx) \
pthread_mutex_unlock(vs->vs_mtx); \
#define VS_UNLOCK(vs) \
do { \
if (vs->vs_mtx) \
pthread_mutex_unlock(vs->vs_mtx); \
} while (0)
struct virtio_consts {
const char *vc_name; /* name of driver (for diagnostics) */
int vc_nvq; /* number of virtual queues */
size_t vc_cfgsize; /* size of dev-specific config regs */
void (*vc_reset)(void *); /* called on virtual device reset */
void (*vc_qnotify)(void *, struct vqueue_info *);
/* called on QNOTIFY if no VQ notify */
int (*vc_cfgread)(void *, int, int, uint32_t *);
/* called to read config regs */
int (*vc_cfgwrite)(void *, int, int, uint32_t);
/* called to write config regs */
void (*vc_apply_features)(void *, uint64_t);
/* called to apply negotiated features */
uint64_t vc_hv_caps; /* hypervisor-provided capabilities */
/* name of driver (for diagnostics) */
const char *vc_name;
/* number of virtual queues */
int vc_nvq;
/* size of dev-specific config regs */
size_t vc_cfgsize;
/* called on virtual device reset */
void (*vc_reset)(void *);
/* called on QNOTIFY if no VQ notify */
void (*vc_qnotify)(void *, struct vqueue_info *);
/* called to read config regs */
int (*vc_cfgread)(void *, int, int, uint32_t *);
/* called to write config regs */
int (*vc_cfgwrite)(void *, int, int, uint32_t);
/* called to apply negotiated features */
void (*vc_apply_features)(void *, uint64_t);
/* hypervisor-provided capabilities */
uint64_t vc_hv_caps;
};
/*
@ -377,25 +391,34 @@ struct virtio_consts {
#define VQ_ALLOC 0x01 /* set once we have a pfn */
#define VQ_BROKED 0x02 /* ??? */
struct vqueue_info {
uint16_t vq_qsize; /* size of this queue (a power of 2) */
void (*vq_notify)(void *, struct vqueue_info *);
/* called instead of vc_notify, if not NULL */
struct virtio_softc *vq_vs; /* backpointer to softc */
uint16_t vq_num; /* we're the num'th queue in the softc */
uint16_t vq_flags; /* flags (see above) */
uint16_t vq_last_avail; /* a recent value of vq_avail->va_idx */
uint16_t vq_save_used; /* saved vq_used->vu_idx; see vq_endchains */
uint16_t vq_msix_idx; /* MSI-X index, or VIRTIO_MSI_NO_VECTOR */
uint32_t vq_pfn; /* PFN of virt queue (not shifted!) */
volatile struct virtio_desc *vq_desc; /* descriptor array */
volatile struct vring_avail *vq_avail; /* the "avail" ring */
volatile struct vring_used *vq_used; /* the "used" ring */
/* size of this queue (a power of 2) */
uint16_t vq_qsize;
/* called instead of vc_notify, if not NULL */
void (*vq_notify)(void *, struct vqueue_info *);
/* backpointer to softc */
struct virtio_softc *vq_vs;
/* we're the num'th queue in the softc */
uint16_t vq_num;
/* flags (see above) */
uint16_t vq_flags;
/* a recent value of vq_avail->va_idx */
uint16_t vq_last_avail;
/* saved vq_used->vu_idx; see vq_endchains */
uint16_t vq_save_used;
/* MSI-X index, or VIRTIO_MSI_NO_VECTOR */
uint16_t vq_msix_idx;
/* PFN of virt queue (not shifted!) */
uint32_t vq_pfn;
/* descriptor array */
volatile struct virtio_desc *vq_desc;
/* the "avail" ring */
volatile struct vring_avail *vq_avail;
/* the "used" ring */
volatile struct vring_used *vq_used;
};
#pragma clang diagnostic pop
/* as noted above, these are sort of backwards, name-wise */
#define VQ_AVAIL_EVENT_IDX(vq) \
(*(volatile uint16_t *)&(vq)->vq_used->vu_ring[(vq)->vq_qsize])
@ -408,7 +431,6 @@ struct vqueue_info {
static inline int
vq_ring_ready(struct vqueue_info *vq)
{
return (vq->vq_flags & VQ_ALLOC);
}
@ -419,7 +441,6 @@ vq_ring_ready(struct vqueue_info *vq)
static inline int
vq_has_descs(struct vqueue_info *vq)
{
return (vq_ring_ready(vq) && vq->vq_last_avail !=
vq->vq_avail->va_idx);
}
@ -431,7 +452,6 @@ vq_has_descs(struct vqueue_info *vq)
static inline void
vq_interrupt(struct virtio_softc *vs, struct vqueue_info *vq)
{
if (pci_msix_enabled(vs->vs_pi))
pci_generate_msix(vs->vs_pi, vq->vq_msix_idx);
else {
@ -444,21 +464,17 @@ vq_interrupt(struct virtio_softc *vs, struct vqueue_info *vq)
}
struct iovec;
void vi_softc_linkup(struct virtio_softc *vs, struct virtio_consts *vc,
void *dev_softc, struct pci_devinst *pi,
struct vqueue_info *queues);
int vi_intr_init(struct virtio_softc *vs, int barnum, int use_msix);
void vi_reset_dev(struct virtio_softc *);
void vi_set_io_bar(struct virtio_softc *, int);
int vq_getchain(struct vqueue_info *vq, uint16_t *pidx,
struct iovec *iov, int n_iov, uint16_t *flags);
void vq_retchain(struct vqueue_info *vq);
void vq_relchain(struct vqueue_info *vq, uint16_t idx, uint32_t iolen);
void vq_endchains(struct vqueue_info *vq, int used_all_avail);
uint64_t vi_pci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
int baridx, uint64_t offset, int size);
void vi_pci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
int baridx, uint64_t offset, int size, uint64_t value);
#endif /* _VIRTIO_H_ */
void vi_softc_linkup(struct virtio_softc *vs, struct virtio_consts *vc,
void *dev_softc, struct pci_devinst *pi, struct vqueue_info *queues);
int vi_intr_init(struct virtio_softc *vs, int barnum, int use_msix);
void vi_reset_dev(struct virtio_softc *);
void vi_set_io_bar(struct virtio_softc *, int);
int vq_getchain(struct vqueue_info *vq, uint16_t *pidx, struct iovec *iov,
int n_iov, uint16_t *flags);
void vq_retchain(struct vqueue_info *vq);
void vq_relchain(struct vqueue_info *vq, uint16_t idx, uint32_t iolen);
void vq_endchains(struct vqueue_info *vq, int used_all_avail);
uint64_t vi_pci_read(int vcpu, struct pci_devinst *pi, int baridx,
uint64_t offset, int size);
void vi_pci_write(int vcpu, struct pci_devinst *pi, int baridx, uint64_t offset,
int size, uint64_t value);

View file

@ -26,75 +26,62 @@
* $FreeBSD$
*/
#ifndef _VMCS_H_
#define _VMCS_H_
#pragma once
#ifdef _KERNEL
struct vmcs {
uint32_t identifier;
uint32_t abort_code;
char _impl_specific[PAGE_SIZE - sizeof(uint32_t) * 2];
};
CTASSERT(sizeof(struct vmcs) == PAGE_SIZE);
#include <stdint.h>
#include <Hypervisor/hv.h>
#include <Hypervisor/hv_vmx.h>
#include <xhyve/vmm/vmm.h>
/* MSR save region is composed of an array of 'struct msr_entry' */
struct msr_entry {
uint32_t index;
uint32_t reserved;
uint64_t val;
int vmcs_getreg(int vcpuid, int ident, uint64_t *rv);
int vmcs_setreg(int vcpuid, int ident, uint64_t val);
int vmcs_getdesc(int vcpuid, int ident, struct seg_desc *desc);
int vmcs_setdesc(int vcpuid, int ident, struct seg_desc *desc);
};
int vmcs_set_msr_save(struct vmcs *vmcs, u_long g_area, u_int g_count);
int vmcs_init(struct vmcs *vmcs);
int vmcs_getreg(struct vmcs *vmcs, int running, int ident, uint64_t *rv);
int vmcs_setreg(struct vmcs *vmcs, int running, int ident, uint64_t val);
int vmcs_getdesc(struct vmcs *vmcs, int running, int ident,
struct seg_desc *desc);
int vmcs_setdesc(struct vmcs *vmcs, int running, int ident,
struct seg_desc *desc);
/*
* Avoid header pollution caused by inline use of 'vtophys()' in vmx_cpufunc.h
*/
#ifdef _VMX_CPUFUNC_H_
static __inline uint64_t
vmcs_read(uint32_t encoding)
vmcs_read(int vcpuid, uint32_t encoding)
{
int error;
uint64_t val;
error = vmread(encoding, &val);
KASSERT(error == 0, ("vmcs_read(%u) error %d", encoding, error));
hv_vmx_vcpu_read_vmcs(((hv_vcpuid_t) vcpuid), encoding, &val);
return (val);
}
static __inline void
vmcs_write(uint32_t encoding, uint64_t val)
vmcs_write(int vcpuid, uint32_t encoding, uint64_t val)
{
int error;
error = vmwrite(encoding, val);
KASSERT(error == 0, ("vmcs_write(%u) error %d", encoding, error));
if (encoding == 0x00004002) {
if (val == 0x0000000000000004) {
abort();
}
}
hv_vmx_vcpu_write_vmcs(((hv_vcpuid_t) vcpuid), encoding, val);
}
#endif /* _VMX_CPUFUNC_H_ */
#define vmexit_instruction_length() vmcs_read(VMCS_EXIT_INSTRUCTION_LENGTH)
#define vmcs_guest_rip() vmcs_read(VMCS_GUEST_RIP)
#define vmcs_instruction_error() vmcs_read(VMCS_INSTRUCTION_ERROR)
#define vmcs_exit_reason() (vmcs_read(VMCS_EXIT_REASON) & 0xffff)
#define vmcs_exit_qualification() vmcs_read(VMCS_EXIT_QUALIFICATION)
#define vmcs_guest_cr3() vmcs_read(VMCS_GUEST_CR3)
#define vmcs_gpa() vmcs_read(VMCS_GUEST_PHYSICAL_ADDRESS)
#define vmcs_gla() vmcs_read(VMCS_GUEST_LINEAR_ADDRESS)
#define vmcs_idt_vectoring_info() vmcs_read(VMCS_IDT_VECTORING_INFO)
#define vmcs_idt_vectoring_err() vmcs_read(VMCS_IDT_VECTORING_ERROR)
#endif /* _KERNEL */
#define vmexit_instruction_length(vcpuid) \
vmcs_read(vcpuid, VMCS_EXIT_INSTRUCTION_LENGTH)
#define vmcs_guest_rip(vcpuid) \
vmcs_read(vcpuid, VMCS_GUEST_RIP)
#define vmcs_instruction_error(vcpuid) \
vmcs_read(vcpuid, VMCS_INSTRUCTION_ERROR)
#define vmcs_exit_reason(vcpuid) \
(vmcs_read(vcpuid, VMCS_EXIT_REASON) & 0xffff)
#define vmcs_exit_qualification(vcpuid) \
vmcs_read(vcpuid, VMCS_EXIT_QUALIFICATION)
#define vmcs_guest_cr3(vcpuid) \
vmcs_read(vcpuid, VMCS_GUEST_CR3)
#define vmcs_gpa(vcpuid) \
vmcs_read(vcpuid, VMCS_GUEST_PHYSICAL_ADDRESS)
#define vmcs_gla(vcpuid) \
vmcs_read(vcpuid, VMCS_GUEST_LINEAR_ADDRESS)
#define vmcs_idt_vectoring_info(vcpuid) \
vmcs_read(vcpuid, VMCS_IDT_VECTORING_INFO)
#define vmcs_idt_vectoring_err(vcpuid) \
vmcs_read(vcpuid, VMCS_IDT_VECTORING_ERROR)
#define VMCS_INITIAL 0xffffffffffffffff
#define VMCS_IDENT(encoding) ((encoding) | 0x80000000)
#define VMCS_IDENT(encoding) ((int) (((unsigned) (encoding)) | 0x80000000))
/*
* VMCS field encodings from Appendix H, Intel Architecture Manual Vol3B.
*/
@ -342,33 +329,33 @@ vmcs_write(uint32_t encoding, uint64_t val)
*
* Applies to VM-exits due to hardware exception or EPT fault.
*/
#define EXIT_QUAL_NMIUDTI (1 << 12)
#define EXIT_QUAL_NMIUDTI (1U << 12)
/*
* VMCS interrupt information fields
*/
#define VMCS_INTR_VALID (1U << 31)
#define VMCS_INTR_T_MASK 0x700 /* Interruption-info type */
#define VMCS_INTR_T_HWINTR (0 << 8)
#define VMCS_INTR_T_NMI (2 << 8)
#define VMCS_INTR_T_HWEXCEPTION (3 << 8)
#define VMCS_INTR_T_SWINTR (4 << 8)
#define VMCS_INTR_T_PRIV_SWEXCEPTION (5 << 8)
#define VMCS_INTR_T_SWEXCEPTION (6 << 8)
#define VMCS_INTR_DEL_ERRCODE (1 << 11)
#define VMCS_INTR_T_MASK 0x700U /* Interruption-info type */
#define VMCS_INTR_T_HWINTR (0U << 8)
#define VMCS_INTR_T_NMI (2U << 8)
#define VMCS_INTR_T_HWEXCEPTION (3U << 8)
#define VMCS_INTR_T_SWINTR (4U << 8)
#define VMCS_INTR_T_PRIV_SWEXCEPTION (5U << 8)
#define VMCS_INTR_T_SWEXCEPTION (6U << 8)
#define VMCS_INTR_DEL_ERRCODE (1U << 11)
/*
* VMCS IDT-Vectoring information fields
*/
#define VMCS_IDT_VEC_VALID (1U << 31)
#define VMCS_IDT_VEC_ERRCODE_VALID (1 << 11)
#define VMCS_IDT_VEC_ERRCODE_VALID (1U << 11)
/*
* VMCS Guest interruptibility field
*/
#define VMCS_INTERRUPTIBILITY_STI_BLOCKING (1 << 0)
#define VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING (1 << 1)
#define VMCS_INTERRUPTIBILITY_SMI_BLOCKING (1 << 2)
#define VMCS_INTERRUPTIBILITY_NMI_BLOCKING (1 << 3)
#define VMCS_INTERRUPTIBILITY_STI_BLOCKING (1U << 0)
#define VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING (1U << 1)
#define VMCS_INTERRUPTIBILITY_SMI_BLOCKING (1U << 2)
#define VMCS_INTERRUPTIBILITY_NMI_BLOCKING (1U << 3)
/*
* Exit qualification for EXIT_REASON_INVAL_VMCS
@ -397,5 +384,3 @@ vmcs_write(uint32_t encoding, uint64_t val)
* Exit qualification for APIC-write VM exit
*/
#define APIC_WRITE_OFFSET(qual) ((qual) & 0xFFF)
#endif

View file

@ -1,5 +1,6 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -26,50 +27,12 @@
* $FreeBSD$
*/
#ifndef _VMX_H_
#define _VMX_H_
#pragma once
#include "vmcs.h"
struct pmap;
struct vmxctx {
register_t guest_rdi; /* Guest state */
register_t guest_rsi;
register_t guest_rdx;
register_t guest_rcx;
register_t guest_r8;
register_t guest_r9;
register_t guest_rax;
register_t guest_rbx;
register_t guest_rbp;
register_t guest_r10;
register_t guest_r11;
register_t guest_r12;
register_t guest_r13;
register_t guest_r14;
register_t guest_r15;
register_t guest_cr2;
register_t host_r15; /* Host state */
register_t host_r14;
register_t host_r13;
register_t host_r12;
register_t host_rbp;
register_t host_rsp;
register_t host_rbx;
/*
* XXX todo debug registers and fpu state
*/
int inst_fail_status;
/*
* The pmap needs to be deactivated in vmx_enter_guest()
* so keep a copy of the 'pmap' in each vmxctx.
*/
struct pmap *pmap;
};
#include <stddef.h>
#include <xhyve/support/misc.h>
#include <xhyve/vmm/vmm.h>
#include <xhyve/vmm/intel/vmcs.h>
struct vmxcap {
int set;
@ -77,16 +40,19 @@ struct vmxcap {
uint32_t proc_ctls2;
};
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct vmxstate {
uint64_t nextrip; /* next instruction to be executed by guest */
int lastcpu; /* host cpu that this 'vcpu' last ran on */
uint16_t vpid;
};
#pragma clang diagnostic pop
struct apic_page {
uint32_t reg[PAGE_SIZE / 4];
uint32_t reg[XHYVE_PAGE_SIZE / 4];
};
CTASSERT(sizeof(struct apic_page) == PAGE_SIZE);
CTASSERT(sizeof(struct apic_page) == XHYVE_PAGE_SIZE);
/* Posted Interrupt Descriptor (described in section 29.6 of the Intel SDM) */
struct pir_desc {
@ -109,32 +75,20 @@ enum {
/* virtual machine softc */
struct vmx {
struct vmcs vmcs[VM_MAXCPU]; /* one vmcs per virtual cpu */
struct apic_page apic_page[VM_MAXCPU]; /* one apic page per vcpu */
char msr_bitmap[PAGE_SIZE];
struct pir_desc pir_desc[VM_MAXCPU];
uint64_t guest_msrs[VM_MAXCPU][GUEST_MSR_NUM];
struct vmxctx ctx[VM_MAXCPU];
struct vmxcap cap[VM_MAXCPU];
struct vmxstate state[VM_MAXCPU];
uint64_t eptp;
struct vm *vm;
long eptgen[MAXCPU]; /* cached pmap->pm_eptgen */
struct apic_page apic_page[VM_MAXCPU]; /* one apic page per vcpu */
uint64_t guest_msrs[VM_MAXCPU][GUEST_MSR_NUM];
struct vmxcap cap[VM_MAXCPU];
struct vmxstate state[VM_MAXCPU];
struct vm *vm;
};
CTASSERT((offsetof(struct vmx, vmcs) & PAGE_MASK) == 0);
CTASSERT((offsetof(struct vmx, msr_bitmap) & PAGE_MASK) == 0);
CTASSERT((offsetof(struct vmx, pir_desc[0]) & 63) == 0);
#define VMX_GUEST_VMEXIT 0
#define VMX_VMRESUME_ERROR 1
#define VMX_VMLAUNCH_ERROR 2
#define VMX_INVEPT_ERROR 3
int vmx_enter_guest(struct vmxctx *ctx, struct vmx *vmx, int launched);
void vmx_call_isr(uintptr_t entry);
u_long vmx_fix_cr0(u_long cr0);
u_long vmx_fix_cr4(u_long cr4);
extern char vmx_exit_guest[];
#endif

View file

@ -0,0 +1,94 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#pragma once
/* Pin-Based VM-Execution Controls */
#define PINBASED_EXTINT_EXITING (1u << 0)
#define PINBASED_NMI_EXITING (1u << 3)
#define PINBASED_VIRTUAL_NMI (1u << 5)
#define PINBASED_PREMPTION_TIMER (1u << 6)
#define PINBASED_POSTED_INTERRUPT (1u << 7)
/* Primary Processor-Based VM-Execution Controls */
#define PROCBASED_INT_WINDOW_EXITING (1u << 2)
#define PROCBASED_TSC_OFFSET (1u << 3)
#define PROCBASED_HLT_EXITING (1u << 7)
#define PROCBASED_INVLPG_EXITING (1u << 9)
#define PROCBASED_MWAIT_EXITING (1u << 10)
#define PROCBASED_RDPMC_EXITING (1u << 11)
#define PROCBASED_RDTSC_EXITING (1u << 12)
#define PROCBASED_CR3_LOAD_EXITING (1u << 15)
#define PROCBASED_CR3_STORE_EXITING (1u << 16)
#define PROCBASED_CR8_LOAD_EXITING (1u << 19)
#define PROCBASED_CR8_STORE_EXITING (1u << 20)
#define PROCBASED_USE_TPR_SHADOW (1u << 21)
#define PROCBASED_NMI_WINDOW_EXITING (1u << 22)
#define PROCBASED_MOV_DR_EXITING (1u << 23)
#define PROCBASED_IO_EXITING (1u << 24)
#define PROCBASED_IO_BITMAPS (1u << 25)
#define PROCBASED_MTF (1u << 27)
#define PROCBASED_MSR_BITMAPS (1u << 28)
#define PROCBASED_MONITOR_EXITING (1u << 29)
#define PROCBASED_PAUSE_EXITING (1u << 30)
#define PROCBASED_SECONDARY_CONTROLS (1U << 31)
/* Secondary Processor-Based VM-Execution Controls */
#define PROCBASED2_VIRTUALIZE_APIC_ACCESSES (1u << 0)
#define PROCBASED2_ENABLE_EPT (1u << 1)
#define PROCBASED2_DESC_TABLE_EXITING (1u << 2)
#define PROCBASED2_ENABLE_RDTSCP (1u << 3)
#define PROCBASED2_VIRTUALIZE_X2APIC_MODE (1u << 4)
#define PROCBASED2_ENABLE_VPID (1u << 5)
#define PROCBASED2_WBINVD_EXITING (1u << 6)
#define PROCBASED2_UNRESTRICTED_GUEST (1u << 7)
#define PROCBASED2_APIC_REGISTER_VIRTUALIZATION (1u << 8)
#define PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY (1u << 9)
#define PROCBASED2_PAUSE_LOOP_EXITING (1u << 10)
#define PROCBASED2_RDRAND_EXITING (1u << 11)
#define PROCBASED2_ENABLE_INVPCID (1u << 12)
/* VM Exit Controls */
#define VM_EXIT_SAVE_DEBUG_CONTROLS (1u << 2)
#define VM_EXIT_HOST_LMA (1u << 9)
#define VM_EXIT_LOAD_PERF_GLOBAL_CTRL (1u << 12)
#define VM_EXIT_ACKNOWLEDGE_INTERRUPT (1u << 15)
#define VM_EXIT_SAVE_PAT (1u << 18)
#define VM_EXIT_LOAD_PAT (1u << 19)
#define VM_EXIT_SAVE_EFER (1u << 20)
#define VM_EXIT_LOAD_EFER (1u << 21)
#define VM_EXIT_SAVE_PREEMPTION_TIMER (1u << 22)
/* VM Entry Controls */
#define VM_ENTRY_LOAD_DEBUG_CONTROLS (1u << 2)
#define VM_ENTRY_GUEST_LMA (1u << 9)
#define VM_ENTRY_INTO_SMM (1u << 10)
#define VM_ENTRY_DEACTIVATE_DUAL_MONITOR (1u << 11)
#define VM_ENTRY_LOAD_PERF_GLOBAL_CTRL (1u << 13)
#define VM_ENTRY_LOAD_PAT (1u << 14)
#define VM_ENTRY_LOAD_EFER (1u << 15)

View file

@ -26,14 +26,20 @@
* $FreeBSD$
*/
#ifndef _EPT_H_
#define _EPT_H_
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include <Hypervisor/hv.h>
#include <Hypervisor/hv_vmx.h>
#include <xhyve/support/misc.h>
struct vmx;
int ept_init(int ipinum);
void ept_invalidate_mappings(u_long eptp);
struct vmspace *ept_vmspace_alloc(vm_offset_t min, vm_offset_t max);
void ept_vmspace_free(struct vmspace *vmspace);
uint64_t eptp(uint64_t pml4);
#endif
void vmx_msr_init(void);
void vmx_msr_guest_init(struct vmx *vmx, int vcpuid);
int vmx_rdmsr(struct vmx *, int vcpuid, u_int num, uint64_t *val);
int vmx_wrmsr(struct vmx *, int vcpuid, u_int num, uint64_t val);
int vmx_set_ctlreg(hv_vmx_capability_t cap_field, uint32_t ones_mask,
uint32_t zeros_mask, uint32_t *retval);

View file

@ -26,15 +26,19 @@
* $FreeBSD$
*/
#ifndef _VATPIC_H_
#define _VATPIC_H_
#pragma once
#include <isa/isareg.h>
#include <stdint.h>
#include <stdbool.h>
#include <xhyve/vmm/vmm.h>
#define ICU_IMR_OFFSET 1
#define IO_ICU1 0x020 /* 8259A Interrupt Controller #1 */
#define IO_ICU2 0x0a0 /* 8259A Interrupt Controller #2 */
#define IO_ELCR1 0x4d0
#define IO_ELCR2 0x4d1
#define ICU_IMR_OFFSET 1
#define IO_ELCR1 0x4d0
#define IO_ELCR2 0x4d1
struct vatpic *vatpic_init(struct vm *vm);
void vatpic_cleanup(struct vatpic *vatpic);
@ -53,5 +57,3 @@ int vatpic_set_irq_trigger(struct vm *vm, int irq, enum vm_intr_trigger trigger)
void vatpic_pending_intr(struct vm *vm, int *vecptr);
void vatpic_intr_accepted(struct vm *vm, int vector);
#endif /* _VATPIC_H_ */

View file

@ -27,19 +27,22 @@
* $FreeBSD$
*/
#ifndef _VATPIT_H_
#define _VATPIT_H_
#pragma once
#include <machine/timerreg.h>
#include <stdint.h>
#include <stdbool.h>
//#include <machine/timerreg.h>
#define NMISC_PORT 0x61
struct vm;
struct vatpit;
struct vatpit *vatpit_init(struct vm *vm);
void vatpit_cleanup(struct vatpit *vatpit);
int vatpit_handler(struct vm *vm, int vcpuid, bool in, int port, int bytes,
uint32_t *eax);
uint32_t *eax);
int vatpit_nmisc_handler(struct vm *vm, int vcpuid, bool in, int port,
int bytes, uint32_t *eax);
#endif /* _VATPIT_H_ */
int bytes, uint32_t *eax);

View file

@ -27,18 +27,20 @@
* $FreeBSD$
*/
#ifndef _VHPET_H_
#define _VHPET_H_
#pragma once
#define VHPET_BASE 0xfed00000
#define VHPET_SIZE 1024
#include <stdint.h>
#define VHPET_BASE 0xfed00000
#define VHPET_SIZE 0x400
struct vm;
struct vhpet;
struct vhpet *vhpet_init(struct vm *vm);
void vhpet_cleanup(struct vhpet *vhpet);
int vhpet_mmio_write(void *vm, int vcpuid, uint64_t gpa, uint64_t val,
int size, void *arg);
int vhpet_mmio_read(void *vm, int vcpuid, uint64_t gpa, uint64_t *val,
int size, void *arg);
int vhpet_getcap(struct vm_hpet_cap *cap);
#endif /* _VHPET_H_ */
void vhpet_cleanup(struct vhpet *vhpet);
int vhpet_mmio_write(void *vm, int vcpuid, uint64_t gpa, uint64_t val,
int size, void *arg);
int vhpet_mmio_read(void *vm, int vcpuid, uint64_t gpa, uint64_t *val,
int size, void *arg);
int vhpet_getcap(uint32_t *cap);

View file

@ -27,24 +27,27 @@
* $FreeBSD$
*/
#ifndef _VIOAPIC_H_
#define _VIOAPIC_H_
#pragma once
#define VIOAPIC_BASE 0xFEC00000
#define VIOAPIC_SIZE 4096
#include <stdint.h>
#define VIOAPIC_BASE 0xfec00000
#define VIOAPIC_SIZE 0x1000
struct vm;
struct vioapic;
struct vioapic *vioapic_init(struct vm *vm);
void vioapic_cleanup(struct vioapic *vioapic);
void vioapic_cleanup(struct vioapic *vioapic);
int vioapic_assert_irq(struct vm *vm, int irq);
int vioapic_deassert_irq(struct vm *vm, int irq);
int vioapic_pulse_irq(struct vm *vm, int irq);
int vioapic_assert_irq(struct vm *vm, int irq);
int vioapic_deassert_irq(struct vm *vm, int irq);
int vioapic_pulse_irq(struct vm *vm, int irq);
int vioapic_mmio_write(void *vm, int vcpuid, uint64_t gpa,
uint64_t wval, int size, void *arg);
int vioapic_mmio_read(void *vm, int vcpuid, uint64_t gpa,
uint64_t *rval, int size, void *arg);
int vioapic_mmio_write(void *vm, int vcpuid, uint64_t gpa,
uint64_t wval, int size, void *arg);
int vioapic_mmio_read(void *vm, int vcpuid, uint64_t gpa,
uint64_t *rval, int size, void *arg);
int vioapic_pincount(struct vm *vm);
void vioapic_process_eoi(struct vm *vm, int vcpuid, int vector);
#endif
int vioapic_pincount(struct vm *vm);
void vioapic_process_eoi(struct vm *vm, int vcpuid, int vector);

View file

@ -26,11 +26,13 @@
* $FreeBSD$
*/
#ifndef _VLAPIC_H_
#define _VLAPIC_H_
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include <xhyve/vmm/vmm.h>
struct vm;
enum x2apic_state;
int vlapic_write(struct vlapic *vlapic, int mmio_access, uint64_t offset,
uint64_t data, bool *retu);
@ -106,4 +108,3 @@ void vlapic_icrtmr_write_handler(struct vlapic *vlapic);
void vlapic_dcr_write_handler(struct vlapic *vlapic);
void vlapic_lvt_write_handler(struct vlapic *vlapic, uint32_t offset);
void vlapic_self_ipi_handler(struct vlapic *vlapic, uint64_t val);
#endif /* _VLAPIC_H_ */

View file

@ -26,10 +26,13 @@
* $FreeBSD$
*/
#ifndef _VLAPIC_PRIV_H_
#define _VLAPIC_PRIV_H_
#pragma once
#include <x86/apicreg.h>
#include <stdint.h>
#include <stdbool.h>
#include <libkern/OSAtomic.h>
#include <xhyve/support/apicreg.h>
#include <xhyve/vmm/vmm_callout.h>
/*
* APIC Register: Offset Description
@ -95,32 +98,32 @@
#define VLAPIC_CTR3(vlapic, format, p1, p2, p3) \
VCPU_CTR3((vlapic)->vm, (vlapic)->vcpuid, format, p1, p2, p3)
#define VLAPIC_CTR_IRR(vlapic, msg) \
do { \
uint32_t *irrptr = &(vlapic)->apic_page->irr0; \
irrptr[0] = irrptr[0]; /* silence compiler */ \
VLAPIC_CTR1((vlapic), msg " irr0 0x%08x", irrptr[0 << 2]); \
VLAPIC_CTR1((vlapic), msg " irr1 0x%08x", irrptr[1 << 2]); \
VLAPIC_CTR1((vlapic), msg " irr2 0x%08x", irrptr[2 << 2]); \
VLAPIC_CTR1((vlapic), msg " irr3 0x%08x", irrptr[3 << 2]); \
VLAPIC_CTR1((vlapic), msg " irr4 0x%08x", irrptr[4 << 2]); \
VLAPIC_CTR1((vlapic), msg " irr5 0x%08x", irrptr[5 << 2]); \
VLAPIC_CTR1((vlapic), msg " irr6 0x%08x", irrptr[6 << 2]); \
VLAPIC_CTR1((vlapic), msg " irr7 0x%08x", irrptr[7 << 2]); \
#define VLAPIC_CTR_IRR(vlapic, msg) \
do { \
uint32_t *x = &(vlapic)->apic_page->irr0; \
x[0] = x[0]; /* silence compiler */ \
VLAPIC_CTR1((vlapic), msg " irr0 0x%08x", x[0 << 2]); \
VLAPIC_CTR1((vlapic), msg " irr1 0x%08x", x[1 << 2]); \
VLAPIC_CTR1((vlapic), msg " irr2 0x%08x", x[2 << 2]); \
VLAPIC_CTR1((vlapic), msg " irr3 0x%08x", x[3 << 2]); \
VLAPIC_CTR1((vlapic), msg " irr4 0x%08x", x[4 << 2]); \
VLAPIC_CTR1((vlapic), msg " irr5 0x%08x", x[5 << 2]); \
VLAPIC_CTR1((vlapic), msg " irr6 0x%08x", x[6 << 2]); \
VLAPIC_CTR1((vlapic), msg " irr7 0x%08x", x[7 << 2]); \
} while (0)
#define VLAPIC_CTR_ISR(vlapic, msg) \
do { \
uint32_t *isrptr = &(vlapic)->apic_page->isr0; \
isrptr[0] = isrptr[0]; /* silence compiler */ \
VLAPIC_CTR1((vlapic), msg " isr0 0x%08x", isrptr[0 << 2]); \
VLAPIC_CTR1((vlapic), msg " isr1 0x%08x", isrptr[1 << 2]); \
VLAPIC_CTR1((vlapic), msg " isr2 0x%08x", isrptr[2 << 2]); \
VLAPIC_CTR1((vlapic), msg " isr3 0x%08x", isrptr[3 << 2]); \
VLAPIC_CTR1((vlapic), msg " isr4 0x%08x", isrptr[4 << 2]); \
VLAPIC_CTR1((vlapic), msg " isr5 0x%08x", isrptr[5 << 2]); \
VLAPIC_CTR1((vlapic), msg " isr6 0x%08x", isrptr[6 << 2]); \
VLAPIC_CTR1((vlapic), msg " isr7 0x%08x", isrptr[7 << 2]); \
#define VLAPIC_CTR_ISR(vlapic, msg) \
do { \
uint32_t *x = &(vlapic)->apic_page->isr0; \
x[0] = x[0]; /* silence compiler */ \
VLAPIC_CTR1((vlapic), msg " isr0 0x%08x", x[0 << 2]); \
VLAPIC_CTR1((vlapic), msg " isr1 0x%08x", x[1 << 2]); \
VLAPIC_CTR1((vlapic), msg " isr2 0x%08x", x[2 << 2]); \
VLAPIC_CTR1((vlapic), msg " isr3 0x%08x", x[3 << 2]); \
VLAPIC_CTR1((vlapic), msg " isr4 0x%08x", x[4 << 2]); \
VLAPIC_CTR1((vlapic), msg " isr5 0x%08x", x[5 << 2]); \
VLAPIC_CTR1((vlapic), msg " isr6 0x%08x", x[6 << 2]); \
VLAPIC_CTR1((vlapic), msg " isr7 0x%08x", x[7 << 2]); \
} while (0)
enum boot_state {
@ -147,44 +150,40 @@ struct vlapic_ops {
void (*enable_x2apic_mode)(struct vlapic *vlapic);
};
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct vlapic {
struct vm *vm;
int vcpuid;
struct LAPIC *apic_page;
struct vlapic_ops ops;
uint32_t esr_pending;
int esr_firing;
struct callout callout; /* vlapic timer */
struct bintime timer_fire_bt; /* callout expiry time */
struct bintime timer_freq_bt; /* timer frequency */
struct bintime timer_period_bt; /* timer period */
struct mtx timer_mtx;
struct vm *vm;
int vcpuid;
struct LAPIC *apic_page;
struct vlapic_ops ops;
uint32_t esr_pending;
int esr_firing;
struct callout callout; /* vlapic timer */
struct bintime timer_fire_bt; /* callout expiry time */
struct bintime timer_freq_bt; /* timer frequency */
struct bintime timer_period_bt; /* timer period */
OSSpinLock timer_lock;
/*
* The 'isrvec_stk' is a stack of vectors injected by the local apic.
* A vector is popped from the stack when the processor does an EOI.
* The vector on the top of the stack is used to compute the
* Processor Priority in conjunction with the TPR.
*/
uint8_t isrvec_stk[ISRVEC_STK_SIZE];
int isrvec_stk_top;
uint64_t msr_apicbase;
uint8_t isrvec_stk[ISRVEC_STK_SIZE];
int isrvec_stk_top;
uint64_t msr_apicbase;
enum boot_state boot_state;
/*
* Copies of some registers in the virtual APIC page. We do this for
* a couple of different reasons:
* - to be able to detect what changed (e.g. svr_last)
* - to maintain a coherent snapshot of the register (e.g. lvt_last)
*/
uint32_t svr_last;
uint32_t lvt_last[VLAPIC_MAXLVT_INDEX + 1];
uint32_t svr_last;
uint32_t lvt_last[VLAPIC_MAXLVT_INDEX + 1];
};
#pragma clang diagnostic pop
void vlapic_init(struct vlapic *vlapic);
void vlapic_cleanup(struct vlapic *vlapic);
#endif /* _VLAPIC_PRIV_H_ */

View file

@ -26,17 +26,17 @@
* $FreeBSD$
*/
#ifndef _VPMTMR_H_
#define _VPMTMR_H_
#pragma once
#include <stdint.h>
#define IO_PMTMR 0x408
struct vm;
struct vpmtmr;
struct vpmtmr *vpmtmr_init(struct vm *vm);
void vpmtmr_cleanup(struct vpmtmr *pmtmr);
int vpmtmr_handler(struct vm *vm, int vcpuid, bool in, int port, int bytes,
uint32_t *val);
#endif
uint32_t *val);

View file

@ -26,11 +26,15 @@
* $FreeBSD$
*/
#ifndef _VRTC_H_
#define _VRTC_H_
#pragma once
#include <isa/isareg.h>
#include <stdint.h>
#include <stdbool.h>
#include <time.h>
#define IO_RTC 0x070 /* 4990A RTC */
struct vm;
struct vrtc;
struct vrtc *vrtc_init(struct vm *vm);
@ -43,8 +47,6 @@ int vrtc_nvram_write(struct vm *vm, int offset, uint8_t value);
int vrtc_nvram_read(struct vm *vm, int offset, uint8_t *retval);
int vrtc_addr_handler(struct vm *vm, int vcpuid, bool in, int port, int bytes,
uint32_t *val);
uint32_t *val);
int vrtc_data_handler(struct vm *vm, int vcpuid, bool in, int port, int bytes,
uint32_t *val);
#endif
uint32_t *val);

314
include/xhyve/vmm/vmm.h Normal file
View file

@ -0,0 +1,314 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include <xhyve/support/misc.h>
#include <xhyve/support/cpuset.h>
#include <xhyve/support/segments.h>
#include <xhyve/vmm/vmm_common.h>
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#define VM_INTINFO_VECTOR(info) ((info) & 0xff)
#define VM_INTINFO_DEL_ERRCODE 0x800
#define VM_INTINFO_RSVD 0x7ffff000
#define VM_INTINFO_VALID 0x80000000
#define VM_INTINFO_TYPE 0x700
#define VM_INTINFO_HWINTR (0 << 8)
#define VM_INTINFO_NMI (2 << 8)
#define VM_INTINFO_HWEXCEPTION (3 << 8)
#define VM_INTINFO_SWINTR (4 << 8)
struct vm;
struct vm_exception;
struct vm_memory_segment;
struct seg_desc;
struct vm_exit;
struct vm_run;
struct vhpet;
struct vioapic;
struct vlapic;
struct vmspace;
struct vm_object;
struct vm_guest_paging;
struct pmap;
typedef int (*vmm_init_func_t)(void);
typedef int (*vmm_cleanup_func_t)(void);
typedef void *(*vmi_vm_init_func_t)(struct vm *vm);
typedef int (*vmi_vcpu_init_func_t)(void *vmi, int vcpu);
typedef int (*vmi_run_func_t)(void *vmi, int vcpu, register_t rip,
void *rendezvous_cookie, void *suspend_cookie);
typedef void (*vmi_vm_cleanup_func_t)(void *vmi);
typedef void (*vmi_vcpu_cleanup_func_t)(void *vmi, int vcpu);
typedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num,
uint64_t *retval);
typedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num,
uint64_t val);
typedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num,
struct seg_desc *desc);
typedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num,
struct seg_desc *desc);
typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
typedef struct vlapic * (*vmi_vlapic_init)(void *vmi, int vcpu);
typedef void (*vmi_vlapic_cleanup)(void *vmi, struct vlapic *vlapic);
typedef void (*vmi_interrupt)(int vcpu);
struct vmm_ops {
vmm_init_func_t init; /* module wide initialization */
vmm_cleanup_func_t cleanup;
vmi_vm_init_func_t vm_init; /* vm-specific initialization */
vmi_vcpu_init_func_t vcpu_init;
vmi_run_func_t vmrun;
vmi_vm_cleanup_func_t vm_cleanup;
vmi_vcpu_cleanup_func_t vcpu_cleanup;
vmi_get_register_t vmgetreg;
vmi_set_register_t vmsetreg;
vmi_get_desc_t vmgetdesc;
vmi_set_desc_t vmsetdesc;
vmi_get_cap_t vmgetcap;
vmi_set_cap_t vmsetcap;
vmi_vlapic_init vlapic_init;
vmi_vlapic_cleanup vlapic_cleanup;
vmi_interrupt vcpu_interrupt;
};
extern struct vmm_ops vmm_ops_intel;
int vmm_init(void);
int vmm_cleanup(void);
int vm_create(struct vm **retvm);
int vcpu_create(struct vm *vm, int vcpu);
void vm_destroy(struct vm *vm);
void vcpu_destroy(struct vm *vm, int vcpu);
int vm_reinit(struct vm *vm);
const char *vm_name(struct vm *vm);
int vm_malloc(struct vm *vm, uint64_t gpa, size_t len);
void *vm_gpa2hva(struct vm *vm, uint64_t gpa, uint64_t len);
int vm_gpabase2memseg(struct vm *vm, uint64_t gpabase,
struct vm_memory_segment *seg);
int vm_get_memobj(struct vm *vm, uint64_t gpa, size_t len, uint64_t *offset,
void **object);
bool vm_mem_allocated(struct vm *vm, uint64_t gpa);
int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval);
int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val);
int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
struct seg_desc *ret_desc);
int vm_set_seg_desc(struct vm *vm, int vcpu, int reg, struct seg_desc *desc);
int vm_run(struct vm *vm, int vcpu, struct vm_exit *vm_exit);
int vm_suspend(struct vm *vm, enum vm_suspend_how how);
int vm_inject_nmi(struct vm *vm, int vcpu);
int vm_nmi_pending(struct vm *vm, int vcpuid);
void vm_nmi_clear(struct vm *vm, int vcpuid);
int vm_inject_extint(struct vm *vm, int vcpu);
int vm_extint_pending(struct vm *vm, int vcpuid);
void vm_extint_clear(struct vm *vm, int vcpuid);
struct vlapic *vm_lapic(struct vm *vm, int cpu);
struct vioapic *vm_ioapic(struct vm *vm);
struct vhpet *vm_hpet(struct vm *vm);
int vm_get_capability(struct vm *vm, int vcpu, int type, int *val);
int vm_set_capability(struct vm *vm, int vcpu, int type, int val);
int vm_get_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state *state);
int vm_set_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state state);
int vm_apicid2vcpuid(struct vm *vm, int apicid);
int vm_activate_cpu(struct vm *vm, int vcpu);
struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid);
void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip);
void vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip);
/*
* Rendezvous all vcpus specified in 'dest' and execute 'func(arg)'.
* The rendezvous 'func(arg)' is not allowed to do anything that will
* cause the thread to be put to sleep.
*
* If the rendezvous is being initiated from a vcpu context then the
* 'vcpuid' must refer to that vcpu, otherwise it should be set to -1.
*
* The caller cannot hold any locks when initiating the rendezvous.
*
* The implementation of this API may cause vcpus other than those specified
* by 'dest' to be stalled. The caller should not rely on any vcpus making
* forward progress when the rendezvous is in progress.
*/
typedef void (*vm_rendezvous_func_t)(struct vm *vm, int vcpuid, void *arg);
void vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest,
vm_rendezvous_func_t func, void *arg);
cpuset_t vm_active_cpus(struct vm *vm);
cpuset_t vm_suspended_cpus(struct vm *vm);
static __inline int
vcpu_rendezvous_pending(void *rendezvous_cookie)
{
return (*(uintptr_t *)rendezvous_cookie != 0);
}
static __inline int
vcpu_suspended(void *suspend_cookie)
{
return (*(int *)suspend_cookie);
}
enum vcpu_state {
VCPU_IDLE,
VCPU_FROZEN,
VCPU_RUNNING,
VCPU_SLEEPING,
};
int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state,
bool from_idle);
enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu);
static int __inline
vcpu_is_running(struct vm *vm, int vcpu)
{
return (vcpu_get_state(vm, vcpu) == VCPU_RUNNING);
}
void *vcpu_stats(struct vm *vm, int vcpu);
void vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr);
struct vatpic *vm_atpic(struct vm *vm);
struct vatpit *vm_atpit(struct vm *vm);
struct vpmtmr *vm_pmtmr(struct vm *vm);
struct vrtc *vm_rtc(struct vm *vm);
/*
* Inject exception 'vector' into the guest vcpu. This function returns 0 on
* success and non-zero on failure.
*
* Wrapper functions like 'vm_inject_gp()' should be preferred to calling
* this function directly because they enforce the trap-like or fault-like
* behavior of an exception.
*
* This function should only be called in the context of the thread that is
* executing this vcpu.
*/
int vm_inject_exception(struct vm *vm, int vcpuid, int vector, int err_valid,
uint32_t errcode, int restart_instruction);
/*
* This function is called after a VM-exit that occurred during exception or
* interrupt delivery through the IDT. The format of 'intinfo' is described
* in Figure 15-1, "EXITINTINFO for All Intercepts", APM, Vol 2.
*
* If a VM-exit handler completes the event delivery successfully then it
* should call vm_exit_intinfo() to extinguish the pending event. For e.g.,
* if the task switch emulation is triggered via a task gate then it should
* call this function with 'intinfo=0' to indicate that the external event
* is not pending anymore.
*
* Return value is 0 on success and non-zero on failure.
*/
int vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t intinfo);
/*
* This function is called before every VM-entry to retrieve a pending
* event that should be injected into the guest. This function combines
* nested events into a double or triple fault.
*
* Returns 0 if there are no events that need to be injected into the guest
* and non-zero otherwise.
*/
int vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *info);
int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2);
enum vm_reg_name vm_segment_name(int seg_encoding);
struct vm_copyinfo {
uint64_t gpa;
size_t len;
void *hva;
};
/*
* Set up 'copyinfo[]' to copy to/from guest linear address space starting
* at 'gla' and 'len' bytes long. The 'prot' should be set to PROT_READ for
* a copyin or PROT_WRITE for a copyout.
*
* retval is_fault Intepretation
* 0 0 Success
* 0 1 An exception was injected into the guest
* EFAULT N/A Unrecoverable error
*
* The 'copyinfo[]' can be passed to 'vm_copyin()' or 'vm_copyout()' only if
* the return value is 0. The 'copyinfo[]' resources should be freed by calling
* 'vm_copy_teardown()' after the copy is done.
*/
int vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
int num_copyinfo, int *is_fault);
void vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
int num_copyinfo);
void vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
void *kaddr, size_t len);
void vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
struct vm_copyinfo *copyinfo, size_t len);
int vcpu_trace_exceptions(void);
/* APIs to inject faults into the guest */
void vm_inject_fault(void *vm, int vcpuid, int vector, int errcode_valid,
int errcode);
static __inline void
vm_inject_ud(void *vm, int vcpuid)
{
vm_inject_fault(vm, vcpuid, IDT_UD, 0, 0);
}
static __inline void
vm_inject_gp(void *vm, int vcpuid)
{
vm_inject_fault(vm, vcpuid, IDT_GP, 1, 0);
}
static __inline void
vm_inject_ac(void *vm, int vcpuid, int errcode)
{
vm_inject_fault(vm, vcpuid, IDT_AC, 1, errcode);
}
static __inline void
vm_inject_ss(void *vm, int vcpuid, int errcode)
{
vm_inject_fault(vm, vcpuid, IDT_SS, 1, errcode);
}
void vm_inject_pf(void *vm, int vcpuid, int error_code, uint64_t cr2);
int vm_restart_instruction(void *vm, int vcpuid);
#pragma clang diagnostic pop

113
include/xhyve/vmm/vmm_api.h Normal file
View file

@ -0,0 +1,113 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#pragma once
#include <stdint.h>
#include <sys/time.h>
#include <xhyve/support/cpuset.h>
#include <xhyve/vmm/vmm_common.h>
struct iovec;
/*
* Different styles of mapping the memory assigned to a VM into the address
* space of the controlling process.
*/
enum vm_mmap_style {
VM_MMAP_NONE, /* no mapping */
VM_MMAP_ALL, /* fully and statically mapped */
VM_MMAP_SPARSE, /* mappings created on-demand */
};
int xh_vm_create(void);
void xh_vm_destroy(void);
int xh_vcpu_create(int vcpu);
void xh_vcpu_destroy(int vcpu);
int xh_vm_get_memory_seg(uint64_t gpa, size_t *ret_len);
int xh_vm_setup_memory(size_t len, enum vm_mmap_style vms);
void *xh_vm_map_gpa(uint64_t gpa, size_t len);
int xh_vm_gla2gpa(int vcpu, struct vm_guest_paging *paging, uint64_t gla,
int prot, uint64_t *gpa, int *fault);
uint32_t xh_vm_get_lowmem_limit(void);
void xh_vm_set_lowmem_limit(uint32_t limit);
void xh_vm_set_memflags(int flags);
size_t xh_vm_get_lowmem_size(void);
size_t xh_vm_get_highmem_size(void);
int xh_vm_set_desc(int vcpu, int reg, uint64_t base, uint32_t limit,
uint32_t access);
int xh_vm_get_desc(int vcpu, int reg, uint64_t *base, uint32_t *limit,
uint32_t *access);
int xh_vm_get_seg_desc(int vcpu, int reg, struct seg_desc *seg_desc);
int xh_vm_set_register(int vcpu, int reg, uint64_t val);
int xh_vm_get_register(int vcpu, int reg, uint64_t *retval);
int xh_vm_run(int vcpu, struct vm_exit *ret_vmexit);
int xh_vm_suspend(enum vm_suspend_how how);
int xh_vm_reinit(void);
int xh_vm_apicid2vcpu(int apicid);
int xh_vm_inject_exception(int vcpu, int vector, int errcode_valid,
uint32_t errcode, int restart_instruction);
int xh_vm_lapic_irq(int vcpu, int vector);
int xh_vm_lapic_local_irq(int vcpu, int vector);
int xh_vm_lapic_msi(uint64_t addr, uint64_t msg);
int xh_vm_ioapic_assert_irq(int irq);
int xh_vm_ioapic_deassert_irq(int irq);
int xh_vm_ioapic_pulse_irq(int irq);
int xh_vm_ioapic_pincount(int *pincount);
int xh_vm_isa_assert_irq(int atpic_irq, int ioapic_irq);
int xh_vm_isa_deassert_irq(int atpic_irq, int ioapic_irq);
int xh_vm_isa_pulse_irq(int atpic_irq, int ioapic_irq);
int xh_vm_isa_set_irq_trigger(int atpic_irq, enum vm_intr_trigger trigger);
int xh_vm_inject_nmi(int vcpu);
int xh_vm_capability_name2type(const char *capname);
const char *xh_vm_capability_type2name(int type);
int xh_vm_get_capability(int vcpu, enum vm_cap_type cap, int *retval);
int xh_vm_set_capability(int vcpu, enum vm_cap_type cap, int val);
int xh_vm_get_intinfo(int vcpu, uint64_t *i1, uint64_t *i2);
int xh_vm_set_intinfo(int vcpu, uint64_t exit_intinfo);
uint64_t *xh_vm_get_stats(int vcpu, struct timeval *ret_tv, int *ret_entries);
const char *xh_vm_get_stat_desc(int index);
int xh_vm_get_x2apic_state(int vcpu, enum x2apic_state *s);
int xh_vm_set_x2apic_state(int vcpu, enum x2apic_state s);
int xh_vm_get_hpet_capabilities(uint32_t *capabilities);
int xh_vm_copy_setup(int vcpu, struct vm_guest_paging *pg, uint64_t gla,
size_t len, int prot, struct iovec *iov, int iovcnt, int *fault);
void xh_vm_copyin(struct iovec *iov, void *dst, size_t len);
void xh_vm_copyout(const void *src, struct iovec *iov, size_t len);
int xh_vm_rtc_write(int offset, uint8_t value);
int xh_vm_rtc_read(int offset, uint8_t *retval);
int xh_vm_rtc_settime(time_t secs);
int xh_vm_rtc_gettime(time_t *secs);
int xh_vcpu_reset(int vcpu);
int xh_vm_active_cpus(cpuset_t *cpus);
int xh_vm_suspended_cpus(cpuset_t *cpus);
int xh_vm_activate_cpu(int vcpu);
int xh_vm_restart_instruction(int vcpu);
int xh_vm_emulate_instruction(int vcpu, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite, void *memarg);

View file

@ -0,0 +1,117 @@
#pragma once
#include <stdint.h>
#include <pthread.h>
#include <time.h>
#include <sys/time.h>
#define SBT_1S ((sbintime_t)1 << 32)
#define SBT_1M (SBT_1S * 60)
#define SBT_1MS (SBT_1S / 1000)
#define SBT_1US (SBT_1S / 1000000)
#define SBT_1NS (SBT_1S / 1000000000)
#define SBT_MAX 0x7fffffffffffffffLL
#define FREQ2BT(freq, bt) \
{ \
(bt)->sec = 0; \
(bt)->frac = ((uint64_t)0x8000000000000000 / (freq)) << 1; \
}
#define BT2FREQ(bt) \
(((uint64_t)0x8000000000000000 + ((bt)->frac >> 2)) / \
((bt)->frac >> 1))
struct bintime {
uint64_t sec;
uint64_t frac;
};
typedef int64_t sbintime_t;
static inline sbintime_t bttosbt(const struct bintime bt) {
return (sbintime_t) ((bt.sec << 32) + (bt.frac >> 32));
}
static inline void bintime_mul(struct bintime *bt, unsigned int x) {
uint64_t p1, p2;
p1 = (bt->frac & 0xffffffffull) * x;
p2 = (bt->frac >> 32) * x + (p1 >> 32);
bt->sec *= x;
bt->sec += (p2 >> 32);
bt->frac = (p2 << 32) | (p1 & 0xffffffffull);
}
static inline void bintime_add(struct bintime *_bt, const struct bintime *_bt2)
{
uint64_t _u;
_u = _bt->frac;
_bt->frac += _bt2->frac;
if (_u > _bt->frac)
_bt->sec++;
_bt->sec += _bt2->sec;
}
static inline void bintime_sub(struct bintime *_bt, const struct bintime *_bt2)
{
uint64_t _u;
_u = _bt->frac;
_bt->frac -= _bt2->frac;
if (_u < _bt->frac)
_bt->sec--;
_bt->sec -= _bt2->sec;
}
#define bintime_cmp(a, b, cmp) \
(((a)->sec == (b)->sec) ? \
((a)->frac cmp (b)->frac) : \
((a)->sec cmp (b)->sec))
void binuptime(struct bintime *bt);
void getmicrotime(struct timeval *tv);
static inline sbintime_t sbinuptime(void) {
struct bintime _bt;
binuptime(&_bt);
return (bttosbt(_bt));
}
struct callout {
pthread_cond_t wait;
struct callout *prev;
struct callout *next;
uint64_t timeout;
void *argument;
void (*callout)(void *);
int flags;
int queued;
};
#define C_ABSOLUTE 0x0200 /* event time is absolute */
#define CALLOUT_ACTIVE 0x0002 /* callout is currently active */
#define CALLOUT_PENDING 0x0004 /* callout is waiting for timeout */
#define CALLOUT_MPSAFE 0x0008 /* callout handler is mp safe */
#define CALLOUT_RETURNUNLOCKED 0x0010 /* handler returns with mtx unlocked */
#define CALLOUT_COMPLETED 0x0020 /* callout thread finished */
#define CALLOUT_WAITING 0x0040 /* thread waiting for callout to finish */
//#define CALLOUT_QUEUED 0x0080
void callout_system_init(void);
void callout_init(struct callout *c, int mpsafe);
int callout_reset_sbt(struct callout *c, sbintime_t sbt,
sbintime_t precision, void (*ftn)(void *), void *arg,
int flags);
int callout_stop_safe(struct callout *c, int drain);
#define callout_active(c) ((c)->flags & CALLOUT_ACTIVE)
#define callout_deactivate(c) ((c)->flags &= ~CALLOUT_ACTIVE)
#define callout_pending(c) ((c)->flags & CALLOUT_PENDING)
#define callout_completed(c) ((c)->flags & CALLOUT_COMPLETED)
#define callout_drain(c) callout_stop_safe(c, 1)
#define callout_stop(c) callout_stop_safe(c, 0)

View file

@ -0,0 +1,322 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#pragma once
#include <stdint.h>
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#define VM_MAXCPU 16 /* maximum virtual cpus */
enum vm_suspend_how {
VM_SUSPEND_NONE,
VM_SUSPEND_RESET,
VM_SUSPEND_POWEROFF,
VM_SUSPEND_HALT,
VM_SUSPEND_TRIPLEFAULT,
VM_SUSPEND_LAST
};
enum vm_cap_type {
VM_CAP_HALT_EXIT,
VM_CAP_MTRAP_EXIT,
VM_CAP_PAUSE_EXIT,
VM_CAP_MAX
};
enum vm_intr_trigger {
EDGE_TRIGGER,
LEVEL_TRIGGER
};
enum x2apic_state {
X2APIC_DISABLED,
X2APIC_ENABLED,
X2APIC_STATE_LAST
};
enum vm_cpu_mode {
CPU_MODE_REAL,
CPU_MODE_PROTECTED,
CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */
CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
};
enum vm_paging_mode {
PAGING_MODE_FLAT,
PAGING_MODE_32,
PAGING_MODE_PAE,
PAGING_MODE_64,
};
struct seg_desc {
uint64_t base;
uint32_t limit;
uint32_t access;
};
#define SEG_DESC_TYPE(access) ((access) & 0x001f)
#define SEG_DESC_DPL(access) (((access) >> 5) & 0x3)
#define SEG_DESC_PRESENT(access) (((access) & 0x0080) ? 1 : 0)
#define SEG_DESC_DEF32(access) (((access) & 0x4000) ? 1 : 0)
#define SEG_DESC_GRANULARITY(access) (((access) & 0x8000) ? 1 : 0)
#define SEG_DESC_UNUSABLE(access) (((access) & 0x10000) ? 1 : 0)
struct vm_guest_paging {
uint64_t cr3;
int cpl;
enum vm_cpu_mode cpu_mode;
enum vm_paging_mode paging_mode;
};
enum vm_reg_name {
VM_REG_GUEST_RAX,
VM_REG_GUEST_RBX,
VM_REG_GUEST_RCX,
VM_REG_GUEST_RDX,
VM_REG_GUEST_RSI,
VM_REG_GUEST_RDI,
VM_REG_GUEST_RBP,
VM_REG_GUEST_R8,
VM_REG_GUEST_R9,
VM_REG_GUEST_R10,
VM_REG_GUEST_R11,
VM_REG_GUEST_R12,
VM_REG_GUEST_R13,
VM_REG_GUEST_R14,
VM_REG_GUEST_R15,
VM_REG_GUEST_CR0,
VM_REG_GUEST_CR3,
VM_REG_GUEST_CR4,
VM_REG_GUEST_DR7,
VM_REG_GUEST_RSP,
VM_REG_GUEST_RIP,
VM_REG_GUEST_RFLAGS,
VM_REG_GUEST_ES,
VM_REG_GUEST_CS,
VM_REG_GUEST_SS,
VM_REG_GUEST_DS,
VM_REG_GUEST_FS,
VM_REG_GUEST_GS,
VM_REG_GUEST_LDTR,
VM_REG_GUEST_TR,
VM_REG_GUEST_IDTR,
VM_REG_GUEST_GDTR,
VM_REG_GUEST_EFER,
VM_REG_GUEST_CR2,
VM_REG_GUEST_PDPTE0,
VM_REG_GUEST_PDPTE1,
VM_REG_GUEST_PDPTE2,
VM_REG_GUEST_PDPTE3,
VM_REG_GUEST_INTR_SHADOW,
VM_REG_LAST
};
enum vm_exitcode {
VM_EXITCODE_INOUT,
VM_EXITCODE_VMX,
VM_EXITCODE_BOGUS,
VM_EXITCODE_RDMSR,
VM_EXITCODE_WRMSR,
VM_EXITCODE_HLT,
VM_EXITCODE_MTRAP,
VM_EXITCODE_PAUSE,
VM_EXITCODE_PAGING,
VM_EXITCODE_INST_EMUL,
VM_EXITCODE_SPINUP_AP,
VM_EXITCODE_DEPRECATED1, /* used to be SPINDOWN_CPU */
VM_EXITCODE_RENDEZVOUS,
VM_EXITCODE_IOAPIC_EOI,
VM_EXITCODE_SUSPENDED,
VM_EXITCODE_INOUT_STR,
VM_EXITCODE_TASK_SWITCH,
VM_EXITCODE_MONITOR,
VM_EXITCODE_MWAIT,
VM_EXITCODE_MAX
};
struct vm_inout {
uint16_t bytes:3; /* 1 or 2 or 4 */
uint16_t in:1;
uint16_t string:1;
uint16_t rep:1;
uint16_t port;
uint32_t eax; /* valid for out */
};
struct vm_inout_str {
struct vm_inout inout; /* must be the first element */
struct vm_guest_paging paging;
uint64_t rflags;
uint64_t cr0;
uint64_t index;
uint64_t count; /* rep=1 (%rcx), rep=0 (1) */
int addrsize;
enum vm_reg_name seg_name;
struct seg_desc seg_desc;
};
struct vie_op {
uint8_t op_byte; /* actual opcode byte */
uint8_t op_type; /* type of operation (e.g. MOV) */
uint16_t op_flags;
};
#define VIE_INST_SIZE 15
struct vie {
uint8_t inst[VIE_INST_SIZE]; /* instruction bytes */
uint8_t num_valid; /* size of the instruction */
uint8_t num_processed;
uint8_t addrsize:4, opsize:4; /* address and operand sizes */
uint8_t rex_w:1, /* REX prefix */
rex_r:1,
rex_x:1,
rex_b:1,
rex_present:1,
repz_present:1, /* REP/REPE/REPZ prefix */
repnz_present:1, /* REPNE/REPNZ prefix */
opsize_override:1, /* Operand size override */
addrsize_override:1, /* Address size override */
segment_override:1; /* Segment override */
uint8_t mod:2, /* ModRM byte */
reg:4,
rm:4;
uint8_t ss:2, /* SIB byte */
index:4,
base:4;
uint8_t disp_bytes;
uint8_t imm_bytes;
uint8_t scale;
int base_register; /* VM_REG_GUEST_xyz */
int index_register; /* VM_REG_GUEST_xyz */
int segment_register; /* VM_REG_GUEST_xyz */
int64_t displacement; /* optional addr displacement */
int64_t immediate; /* optional immediate operand */
uint8_t decoded; /* set to 1 if successfully decoded */
struct vie_op op; /* opcode description */
};
enum task_switch_reason {
TSR_CALL,
TSR_IRET,
TSR_JMP,
TSR_IDT_GATE /* task gate in IDT */
};
struct vm_task_switch {
uint16_t tsssel; /* new TSS selector */
int ext; /* task switch due to external event */
uint32_t errcode;
int errcode_valid; /* push 'errcode' on the new stack */
enum task_switch_reason reason;
struct vm_guest_paging paging;
};
struct vm_exit {
enum vm_exitcode exitcode;
int inst_length; /* 0 means unknown */
uint64_t rip;
union {
struct vm_inout inout;
struct vm_inout_str inout_str;
struct {
uint64_t gpa;
int fault_type;
} paging;
struct {
uint64_t gpa;
uint64_t gla;
uint64_t cs_base;
int cs_d; /* CS.D */
struct vm_guest_paging paging;
struct vie vie;
} inst_emul;
/*
* VMX specific payload. Used when there is no "better"
* exitcode to represent the VM-exit.
*/
struct {
int status; /* vmx inst status */
/*
* 'exit_reason' and 'exit_qualification' are valid
* only if 'status' is zero.
*/
uint32_t exit_reason;
uint64_t exit_qualification;
/*
* 'inst_error' and 'inst_type' are valid
* only if 'status' is non-zero.
*/
int inst_type;
int inst_error;
} vmx;
struct {
uint32_t code; /* ecx value */
uint64_t wval;
} msr;
struct {
int vcpu;
uint64_t rip;
} spinup_ap;
struct {
uint64_t rflags;
} hlt;
struct {
int vector;
} ioapic_eoi;
struct {
enum vm_suspend_how how;
} suspended;
struct vm_task_switch task_switch;
} u;
};
/* FIXME remove */
struct vm_memory_segment {
uint64_t gpa; /* in */
size_t len;
};
typedef int (*mem_region_read_t)(void *vm, int cpuid, uint64_t gpa,
uint64_t *rval, int rsize, void *arg);
typedef int (*mem_region_write_t)(void *vm, int cpuid, uint64_t gpa,
uint64_t wval, int wsize, void *arg);
uint64_t vie_size2mask(int size);
int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg,
struct seg_desc *desc, uint64_t off, int length, int addrsize, int prot,
uint64_t *gla);
int vie_alignment_check(int cpl, int operand_size, uint64_t cr0,
uint64_t rflags, uint64_t gla);
#pragma clang diagnostic pop

View file

@ -26,9 +26,19 @@
* $FreeBSD$
*/
#ifndef _SPINUP_AP_H_
#define _SPINUP_AP_H_
#pragma once
int spinup_ap(struct vmctx *ctx, int vcpu, int newcpu, uint64_t rip);
#include <stdint.h>
#endif
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct xsave_limits {
int xsave_enabled;
uint64_t xcr0_allowed;
uint32_t xsave_max_size;
};
#pragma clang diagnostic pop
void vmm_host_state_init(void);
const struct xsave_limits *vmm_get_xsave_limits(void);

View file

@ -26,19 +26,10 @@
* $FreeBSD$
*/
#ifndef _VMM_INSTRUCTION_EMUL_H_
#define _VMM_INSTRUCTION_EMUL_H_
#pragma once
#include <sys/mman.h>
/*
* Callback functions to read and write memory regions.
*/
typedef int (*mem_region_read_t)(void *vm, int cpuid, uint64_t gpa,
uint64_t *rval, int rsize, void *arg);
typedef int (*mem_region_write_t)(void *vm, int cpuid, uint64_t gpa,
uint64_t wval, int wsize, void *arg);
#include <stdint.h>
#include <xhyve/vmm/vmm.h>
/*
* Emulate the decoded 'vie' instruction.
@ -58,22 +49,9 @@ int vmm_emulate_instruction(void *vm, int cpuid, uint64_t gpa, struct vie *vie,
int vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
uint64_t val, int size);
/*
* Returns 1 if an alignment check exception should be injected and 0 otherwise.
*/
int vie_alignment_check(int cpl, int operand_size, uint64_t cr0,
uint64_t rflags, uint64_t gla);
/* Returns 1 if the 'gla' is not canonical and 0 otherwise. */
int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla);
uint64_t vie_size2mask(int size);
int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg,
struct seg_desc *desc, uint64_t off, int length, int addrsize, int prot,
uint64_t *gla);
#ifdef _KERNEL
/*
* APIs to fetch and decode the instruction from nested page fault handler.
*
@ -111,6 +89,3 @@ void vie_init(struct vie *vie, const char *inst_bytes, int inst_length);
#define VIE_INVALID_GLA (1UL << 63) /* a non-canonical address */
int vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla,
enum vm_cpu_mode cpu_mode, int csd, struct vie *vie);
#endif /* _KERNEL */
#endif /* _VMM_INSTRUCTION_EMUL_H_ */

View file

@ -26,12 +26,15 @@
* $FreeBSD$
*/
#ifndef _VMM_IOPORT_H_
#define _VMM_IOPORT_H_
#pragma once
#include <stdint.h>
#include <stdbool.h>
struct vm;
struct vm_exit;
typedef int (*ioport_handler_func_t)(struct vm *vm, int vcpuid,
bool in, int port, int bytes, uint32_t *val);
int vm_handle_inout(struct vm *vm, int vcpuid, struct vm_exit *vme, bool *retu);
#endif /* _VMM_IOPORT_H_ */

View file

@ -1,5 +1,6 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -26,44 +27,46 @@
* $FreeBSD$
*/
#ifndef _VMM_KTR_H_
#define _VMM_KTR_H_
#pragma once
#include <sys/ktr.h>
#include <sys/pcpu.h>
#include <stdio.h>
#ifndef KTR_VMM
#define KTR_VMM KTR_GEN
#ifdef XHYVE_CONFIG_TRACE
#define vmmtrace printf
#else
#define vmmtrace if (0) printf
#endif
struct vm;
extern const char *vm_name(struct vm *vm);
#define VCPU_CTR0(vm, vcpuid, format) \
CTR2(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid))
vmmtrace("vm %s[%d]: " format "\n", vm_name((vm)), (vcpuid))
#define VCPU_CTR1(vm, vcpuid, format, p1) \
CTR3(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), (p1))
vmmtrace("vm %s[%d]: " format "\n", vm_name((vm)), (vcpuid), (p1))
#define VCPU_CTR2(vm, vcpuid, format, p1, p2) \
CTR4(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), (p1), (p2))
vmmtrace("vm %s[%d]: " format "\n", vm_name((vm)), (vcpuid), (p1), (p2))
#define VCPU_CTR3(vm, vcpuid, format, p1, p2, p3) \
CTR5(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), (p1), (p2), (p3))
vmmtrace("vm %s[%d]: " format "\n", vm_name((vm)), (vcpuid), (p1), (p2), (p3))
#define VCPU_CTR4(vm, vcpuid, format, p1, p2, p3, p4) \
CTR6(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), \
vmmtrace("vm %s[%d]: " format "\n", vm_name((vm)), (vcpuid), \
(p1), (p2), (p3), (p4))
#define VM_CTR0(vm, format) \
CTR1(KTR_VMM, "vm %s: " format, vm_name((vm)))
vmmtrace("vm %s: " format "\n", vm_name((vm)))
#define VM_CTR1(vm, format, p1) \
CTR2(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1))
vmmtrace("vm %s: " format "\n", vm_name((vm)), (p1))
#define VM_CTR2(vm, format, p1, p2) \
CTR3(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1), (p2))
vmmtrace("vm %s: " format "\n", vm_name((vm)), (p1), (p2))
#define VM_CTR3(vm, format, p1, p2, p3) \
CTR4(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1), (p2), (p3))
vmmtrace("vm %s: " format "\n", vm_name((vm)), (p1), (p2), (p3))
#define VM_CTR4(vm, format, p1, p2, p3, p4) \
CTR5(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1), (p2), (p3), (p4))
#endif
vmmtrace("vm %s: " format "\n", vm_name((vm)), (p1), (p2), (p3), (p4))

View file

@ -26,12 +26,15 @@
* $FreeBSD$
*/
#ifndef _VMM_LAPIC_H_
#define _VMM_LAPIC_H_
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include <xhyve/support/misc.h>
struct vm;
boolean_t lapic_msr(u_int num);
bool lapic_msr(u_int num);
int lapic_rdmsr(struct vm *vm, int cpu, u_int msr, uint64_t *rval,
bool *retu);
int lapic_wrmsr(struct vm *vm, int cpu, u_int msr, uint64_t wval,
@ -71,5 +74,3 @@ lapic_intr_edge(struct vm *vm, int cpu, int vector)
int lapic_set_local_intr(struct vm *vm, int cpu, int vector);
int lapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg);
#endif

View file

@ -26,18 +26,13 @@
* $FreeBSD$
*/
#ifndef _VMM_MEM_H_
#define _VMM_MEM_H_
#pragma once
#include <stdint.h>
#include <stdlib.h>
struct vmspace;
struct vm_object;
int vmm_mem_init(void);
struct vm_object *vmm_mem_alloc(struct vmspace *, vm_paddr_t gpa, size_t size);
struct vm_object *vmm_mmio_alloc(struct vmspace *, vm_paddr_t gpa, size_t len,
vm_paddr_t hpa);
void vmm_mem_free(struct vmspace *, vm_paddr_t gpa, size_t size);
void vmm_mmio_free(struct vmspace *, vm_paddr_t gpa, size_t size);
vm_paddr_t vmm_mem_maxaddr(void);
#endif
int vmm_mem_init(void);
void *vmm_mem_alloc(uint64_t gpa, size_t size);
void vmm_mem_free(uint64_t gpa, size_t size, void *object);

View file

@ -1,5 +1,6 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -29,8 +30,9 @@
* $FreeBSD$
*/
#ifndef _VMM_STAT_H_
#define _VMM_STAT_H_
#pragma once
#include <stdint.h>
struct vm;
@ -46,6 +48,8 @@ struct vmm_stat_type;
typedef void (*vmm_stat_func_t)(struct vm *vm, int vcpu,
struct vmm_stat_type *stat);
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct vmm_stat_type {
int index; /* position in the stats buffer */
int nelems; /* standalone or array */
@ -53,14 +57,16 @@ struct vmm_stat_type {
vmm_stat_func_t func;
enum vmm_stat_scope scope;
};
#pragma clang diagnostic pop
void vmm_stat_register(void *arg);
#define VMM_STAT_FDEFINE(type, nelems, desc, func, scope) \
struct vmm_stat_type type[1] = { \
{ -1, nelems, desc, func, scope } \
}; \
SYSINIT(type##_stat, SI_SUB_KLD, SI_ORDER_ANY, vmm_stat_register, type)
}
//}; \
// SYSINIT(type##_stat, SI_SUB_KLD, SI_ORDER_ANY, vmm_stat_register, type)
#define VMM_STAT_DEFINE(type, nelems, desc, scope) \
VMM_STAT_FDEFINE(type, nelems, desc, NULL, scope)
@ -72,8 +78,6 @@ void vmm_stat_register(void *arg);
VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_ANY)
#define VMM_STAT_INTEL(type, desc) \
VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_INTEL)
#define VMM_STAT_AMD(type, desc) \
VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_AMD)
#define VMM_STAT_FUNC(type, desc, func) \
VMM_STAT_FDEFINE(type, 1, desc, func, VMM_STAT_SCOPE_ANY)
@ -95,13 +99,19 @@ static void __inline
vmm_stat_array_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
int statidx, uint64_t x)
{
#ifdef VMM_KEEP_STATS
#ifdef XHYVE_CONFIG_STATS
uint64_t *stats;
stats = vcpu_stats(vm, vcpu);
if (vst->index >= 0 && statidx < vst->nelems)
stats[vst->index + statidx] += x;
#else
(void) vm;
(void) vcpu;
(void) vst;
(void) statidx;
(void) x;
#endif
}
@ -109,13 +119,19 @@ static void __inline
vmm_stat_array_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
int statidx, uint64_t val)
{
#ifdef VMM_KEEP_STATS
#ifdef XHYVE_CONFIG_STATS
uint64_t *stats;
stats = vcpu_stats(vm, vcpu);
if (vst->index >= 0 && statidx < vst->nelems)
stats[vst->index + statidx] = val;
#else
(void) vm;
(void) vcpu;
(void) vst;
(void) statidx;
(void) val;
#endif
}
@ -123,8 +139,13 @@ static void __inline
vmm_stat_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t x)
{
#ifdef VMM_KEEP_STATS
#ifdef XHYVE_CONFIG_STATS
vmm_stat_array_incr(vm, vcpu, vst, 0, x);
#else
(void) vm;
(void) vcpu;
(void) vst;
(void) x;
#endif
}
@ -132,8 +153,13 @@ static void __inline
vmm_stat_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t val)
{
#ifdef VMM_KEEP_STATS
#ifdef XHYVE_CONFIG_STATS
vmm_stat_array_set(vm, vcpu, vst, 0, val);
#else
(void) vm;
(void) vcpu;
(void) vst;
(void) val;
#endif
}
@ -157,4 +183,3 @@ VMM_STAT_DECLARE(VMEXIT_ASTPENDING);
VMM_STAT_DECLARE(VMEXIT_USERSPACE);
VMM_STAT_DECLARE(VMEXIT_RENDEZVOUS);
VMM_STAT_DECLARE(VMEXIT_EXCEPTION);
#endif

View file

@ -26,15 +26,8 @@
* $FreeBSD$
*/
#ifndef _VMM_UTIL_H_
#define _VMM_UTIL_H_
#pragma once
struct trapframe;
boolean_t vmm_is_intel(void);
boolean_t vmm_is_amd(void);
boolean_t vmm_supports_1G_pages(void);
void dump_trapframe(struct trapframe *tf);
#endif
void dump_trapframe(struct trapframe *tf);

View file

@ -26,8 +26,9 @@
* $FreeBSD$
*/
#ifndef _X86_H_
#define _X86_H_
#pragma once
#include <stdint.h>
#define CPUID_0000_0000 (0x0)
#define CPUID_0000_0001 (0x1)
@ -61,18 +62,3 @@
int x86_emulate_cpuid(struct vm *vm, int vcpu_id, uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx);
enum vm_cpuid_capability {
VCC_NONE,
VCC_NO_EXECUTE,
VCC_FFXSR,
VCC_TCE,
VCC_LAST
};
/*
* Return 'true' if the capability 'cap' is enabled in this virtual cpu
* and 'false' otherwise.
*/
bool vm_cpuid_capability(struct vm *vm, int vcpuid, enum vm_cpuid_capability);
#endif

View file

@ -26,29 +26,55 @@
* $FreeBSD$
*/
#ifndef _IO_PPT_H_
#define _IO_PPT_H_
#pragma once
int ppt_unassign_all(struct vm *vm);
int ppt_map_mmio(struct vm *vm, int bus, int slot, int func,
vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
int ppt_setup_msi(struct vm *vm, int vcpu, int bus, int slot, int func,
uint64_t addr, uint64_t msg, int numvec);
int ppt_setup_msix(struct vm *vm, int vcpu, int bus, int slot, int func,
int idx, uint64_t addr, uint64_t msg, uint32_t vector_control);
int ppt_assigned_devices(struct vm *vm);
boolean_t ppt_is_mmio(struct vm *vm, vm_paddr_t gpa);
#include <stdint.h>
#include <xhyve/support/segments.h>
/*
* Returns the number of devices sequestered by the ppt driver for assignment
* to virtual machines.
*/
int ppt_avail_devices(void);
/*
* The following functions should never be called directly.
* Use 'vm_assign_pptdev()' and 'vm_unassign_pptdev()' instead.
*/
int ppt_assign_device(struct vm *vm, int bus, int slot, int func);
int ppt_unassign_device(struct vm *vm, int bus, int slot, int func);
#ifndef CTASSERT /* Allow lint to override */
#define CTASSERT(x) _CTASSERT(x, __LINE__)
#define _CTASSERT(x, y) __CTASSERT(x, y)
#define __CTASSERT(x, y) typedef char __assert ## y[(x) ? 1 : -1]
#endif
#define VMEXIT_CONTINUE (0)
#define VMEXIT_ABORT (-1)
extern int guest_ncpus;
extern char *guest_uuid_str;
extern char *vmname;
void xh_vm_inject_fault(int vcpu, int vector, int errcode_valid,
uint32_t errcode);
static __inline void
vm_inject_ud(int vcpuid)
{
xh_vm_inject_fault(vcpuid, IDT_UD, 0, 0);
}
static __inline void
vm_inject_gp(int vcpuid)
{
xh_vm_inject_fault(vcpuid, IDT_GP, 1, 0);
}
static __inline void
vm_inject_ac(int vcpuid, uint32_t errcode)
{
xh_vm_inject_fault(vcpuid, IDT_AC, 1, errcode);
}
static __inline void
vm_inject_ss(int vcpuid, uint32_t errcode)
{
xh_vm_inject_fault(vcpuid, IDT_SS, 1, errcode);
}
void *paddr_guest2host(uintptr_t addr, size_t len);
void vcpu_set_capabilities(int cpu);
void vcpu_add(int fromcpu, int newcpu, uint64_t rip);
int fbsdrun_vmexit_on_hlt(void);
int fbsdrun_vmexit_on_pause(void);
int fbsdrun_virtio_msix(void);

View file

@ -26,11 +26,10 @@
* $FreeBSD$
*/
#ifndef _XMSR_H_
#define _XMSR_H_
#pragma once
#include <stdint.h>
int init_msr(void);
int emulate_wrmsr(struct vmctx *ctx, int vcpu, uint32_t code, uint64_t val);
int emulate_rdmsr(struct vmctx *ctx, int vcpu, uint32_t code, uint64_t *val);
#endif
int emulate_wrmsr(int vcpu, uint32_t code, uint64_t val);
int emulate_rdmsr(int vcpu, uint32_t code, uint64_t *val);

View file

@ -1,13 +0,0 @@
# $FreeBSD$
LIB= vmmapi
SRCS= vmmapi.c vmmapi_freebsd.c
INCS= vmmapi.h
WARNS?= 2
LIBADD= util
CFLAGS+= -I${.CURDIR}
.include <bsd.lib.mk>

File diff suppressed because it is too large Load diff

View file

@ -1,173 +0,0 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _VMMAPI_H_
#define _VMMAPI_H_
#include <sys/param.h>
#include <sys/cpuset.h>
/*
* API version for out-of-tree consumers like grub-bhyve for making compile
* time decisions.
*/
#define VMMAPI_VERSION 0101 /* 2 digit major followed by 2 digit minor */
struct iovec;
struct vmctx;
enum x2apic_state;
/*
* Different styles of mapping the memory assigned to a VM into the address
* space of the controlling process.
*/
enum vm_mmap_style {
VM_MMAP_NONE, /* no mapping */
VM_MMAP_ALL, /* fully and statically mapped */
VM_MMAP_SPARSE, /* mappings created on-demand */
};
#define VM_MEM_F_INCORE 0x01 /* include guest memory in core file */
int vm_create(const char *name);
struct vmctx *vm_open(const char *name);
void vm_destroy(struct vmctx *ctx);
int vm_parse_memsize(const char *optarg, size_t *memsize);
int vm_get_memory_seg(struct vmctx *ctx, vm_paddr_t gpa, size_t *ret_len,
int *wired);
int vm_setup_memory(struct vmctx *ctx, size_t len, enum vm_mmap_style s);
void *vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len);
int vm_get_gpa_pmap(struct vmctx *, uint64_t gpa, uint64_t *pte, int *num);
int vm_gla2gpa(struct vmctx *, int vcpuid, struct vm_guest_paging *paging,
uint64_t gla, int prot, uint64_t *gpa, int *fault);
uint32_t vm_get_lowmem_limit(struct vmctx *ctx);
void vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit);
void vm_set_memflags(struct vmctx *ctx, int flags);
size_t vm_get_lowmem_size(struct vmctx *ctx);
size_t vm_get_highmem_size(struct vmctx *ctx);
int vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
uint64_t base, uint32_t limit, uint32_t access);
int vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
uint64_t *base, uint32_t *limit, uint32_t *access);
int vm_get_seg_desc(struct vmctx *ctx, int vcpu, int reg,
struct seg_desc *seg_desc);
int vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val);
int vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *retval);
int vm_run(struct vmctx *ctx, int vcpu, struct vm_exit *ret_vmexit);
int vm_suspend(struct vmctx *ctx, enum vm_suspend_how how);
int vm_reinit(struct vmctx *ctx);
int vm_apicid2vcpu(struct vmctx *ctx, int apicid);
int vm_inject_exception(struct vmctx *ctx, int vcpu, int vector,
int errcode_valid, uint32_t errcode, int restart_instruction);
int vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector);
int vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector);
int vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg);
int vm_ioapic_assert_irq(struct vmctx *ctx, int irq);
int vm_ioapic_deassert_irq(struct vmctx *ctx, int irq);
int vm_ioapic_pulse_irq(struct vmctx *ctx, int irq);
int vm_ioapic_pincount(struct vmctx *ctx, int *pincount);
int vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq);
int vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq);
int vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq);
int vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
enum vm_intr_trigger trigger);
int vm_inject_nmi(struct vmctx *ctx, int vcpu);
int vm_capability_name2type(const char *capname);
const char *vm_capability_type2name(int type);
int vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
int *retval);
int vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
int val);
int vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func);
int vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func);
int vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
int vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot,
int func, uint64_t addr, uint64_t msg, int numvec);
int vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot,
int func, int idx, uint64_t addr, uint64_t msg,
uint32_t vector_control);
int vm_get_intinfo(struct vmctx *ctx, int vcpu, uint64_t *i1, uint64_t *i2);
int vm_set_intinfo(struct vmctx *ctx, int vcpu, uint64_t exit_intinfo);
/*
* Return a pointer to the statistics buffer. Note that this is not MT-safe.
*/
uint64_t *vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv,
int *ret_entries);
const char *vm_get_stat_desc(struct vmctx *ctx, int index);
int vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *s);
int vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state s);
int vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities);
/*
* Translate the GLA range [gla,gla+len) into GPA segments in 'iov'.
* The 'iovcnt' should be big enough to accomodate all GPA segments.
*
* retval fault Interpretation
* 0 0 Success
* 0 1 An exception was injected into the guest
* EFAULT N/A Error
*/
int vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *pg,
uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt,
int *fault);
void vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *guest_iov,
void *host_dst, size_t len);
void vm_copyout(struct vmctx *ctx, int vcpu, const void *host_src,
struct iovec *guest_iov, size_t len);
void vm_copy_teardown(struct vmctx *ctx, int vcpu, struct iovec *iov,
int iovcnt);
/* RTC */
int vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value);
int vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval);
int vm_rtc_settime(struct vmctx *ctx, time_t secs);
int vm_rtc_gettime(struct vmctx *ctx, time_t *secs);
/* Reset vcpu register state */
int vcpu_reset(struct vmctx *ctx, int vcpu);
int vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus);
int vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus);
int vm_activate_cpu(struct vmctx *ctx, int vcpu);
/*
* FreeBSD specific APIs
*/
int vm_setup_freebsd_registers(struct vmctx *ctx, int vcpu,
uint64_t rip, uint64_t cr3, uint64_t gdtbase,
uint64_t rsp);
int vm_setup_freebsd_registers_i386(struct vmctx *vmctx, int vcpu,
uint32_t eip, uint32_t gdtbase,
uint32_t esp);
void vm_setup_freebsd_gdt(uint64_t *gdtr);
#endif /* _VMMAPI_H_ */

View file

@ -1,345 +0,0 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <machine/specialreg.h>
#include <machine/segments.h>
#include <machine/vmm.h>
#include <errno.h>
#include <string.h>
#include "vmmapi.h"
#define I386_TSS_SIZE 104
#define DESC_PRESENT 0x00000080
#define DESC_LONGMODE 0x00002000
#define DESC_DEF32 0x00004000
#define DESC_GRAN 0x00008000
#define DESC_UNUSABLE 0x00010000
#define GUEST_NULL_SEL 0
#define GUEST_CODE_SEL 1
#define GUEST_DATA_SEL 2
#define GUEST_TSS_SEL 3
#define GUEST_GDTR_LIMIT64 (3 * 8 - 1)
static struct segment_descriptor i386_gdt[] = {
{}, /* NULL */
{ .sd_lolimit = 0xffff, .sd_type = SDT_MEMER, /* CODE */
.sd_p = 1, .sd_hilimit = 0xf, .sd_def32 = 1, .sd_gran = 1 },
{ .sd_lolimit = 0xffff, .sd_type = SDT_MEMRW, /* DATA */
.sd_p = 1, .sd_hilimit = 0xf, .sd_def32 = 1, .sd_gran = 1 },
{ .sd_lolimit = I386_TSS_SIZE - 1, /* TSS */
.sd_type = SDT_SYS386TSS, .sd_p = 1 }
};
/*
* Setup the 'vcpu' register set such that it will begin execution at
* 'eip' in flat mode.
*/
int
vm_setup_freebsd_registers_i386(struct vmctx *vmctx, int vcpu, uint32_t eip,
uint32_t gdtbase, uint32_t esp)
{
uint64_t cr0, rflags, desc_base;
uint32_t desc_access, desc_limit, tssbase;
uint16_t gsel;
struct segment_descriptor *gdt;
int error, tmp;
/* A 32-bit guest requires unrestricted mode. */
error = vm_get_capability(vmctx, vcpu, VM_CAP_UNRESTRICTED_GUEST, &tmp);
if (error)
goto done;
error = vm_set_capability(vmctx, vcpu, VM_CAP_UNRESTRICTED_GUEST, 1);
if (error)
goto done;
cr0 = CR0_PE | CR0_NE;
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
goto done;
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, 0)) != 0)
goto done;
/*
* Forcing EFER to 0 causes bhyve to clear the "IA-32e guest
* mode" entry control.
*/
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_EFER, 0)))
goto done;
gdt = vm_map_gpa(vmctx, gdtbase, 0x1000);
if (gdt == NULL)
return (EFAULT);
memcpy(gdt, i386_gdt, sizeof(i386_gdt));
desc_base = gdtbase;
desc_limit = sizeof(i386_gdt) - 1;
error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
desc_base, desc_limit, 0);
if (error != 0)
goto done;
/* Place the TSS one page above the GDT. */
tssbase = gdtbase + 0x1000;
gdt[3].sd_lobase = tssbase;
rflags = 0x2;
error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
if (error)
goto done;
desc_base = 0;
desc_limit = 0xffffffff;
desc_access = DESC_GRAN | DESC_DEF32 | DESC_PRESENT | SDT_MEMERA;
error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
desc_base, desc_limit, desc_access);
desc_access = DESC_GRAN | DESC_DEF32 | DESC_PRESENT | SDT_MEMRWA;
error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
desc_base, desc_limit, desc_access);
if (error)
goto done;
error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
desc_base = tssbase;
desc_limit = I386_TSS_SIZE - 1;
desc_access = DESC_PRESENT | SDT_SYS386BSY;
error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR,
desc_base, desc_limit, desc_access);
if (error)
goto done;
error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, 0, 0,
DESC_UNUSABLE);
if (error)
goto done;
gsel = GSEL(GUEST_CODE_SEL, SEL_KPL);
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, gsel)) != 0)
goto done;
gsel = GSEL(GUEST_DATA_SEL, SEL_KPL);
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, gsel)) != 0)
goto done;
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, gsel)) != 0)
goto done;
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, gsel)) != 0)
goto done;
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, gsel)) != 0)
goto done;
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, gsel)) != 0)
goto done;
gsel = GSEL(GUEST_TSS_SEL, SEL_KPL);
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, gsel)) != 0)
goto done;
/* LDTR is pointing to the null selector */
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
goto done;
/* entry point */
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, eip)) != 0)
goto done;
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, esp)) != 0)
goto done;
error = 0;
done:
return (error);
}
void
vm_setup_freebsd_gdt(uint64_t *gdtr)
{
gdtr[GUEST_NULL_SEL] = 0;
gdtr[GUEST_CODE_SEL] = 0x0020980000000000;
gdtr[GUEST_DATA_SEL] = 0x0000900000000000;
}
/*
* Setup the 'vcpu' register set such that it will begin execution at
* 'rip' in long mode.
*/
int
vm_setup_freebsd_registers(struct vmctx *vmctx, int vcpu,
uint64_t rip, uint64_t cr3, uint64_t gdtbase,
uint64_t rsp)
{
int error;
uint64_t cr0, cr4, efer, rflags, desc_base;
uint32_t desc_access, desc_limit;
uint16_t gsel;
cr0 = CR0_PE | CR0_PG | CR0_NE;
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
goto done;
cr4 = CR4_PAE;
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
goto done;
efer = EFER_LME | EFER_LMA;
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_EFER, efer)))
goto done;
rflags = 0x2;
error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
if (error)
goto done;
desc_base = 0;
desc_limit = 0;
desc_access = 0x0000209B;
error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
desc_access = 0x00000093;
error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
desc_base, desc_limit, desc_access);
if (error)
goto done;
error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
/*
* XXX TR is pointing to null selector even though we set the
* TSS segment to be usable with a base address and limit of 0.
*/
desc_access = 0x0000008b;
error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
if (error)
goto done;
error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, 0, 0,
DESC_UNUSABLE);
if (error)
goto done;
gsel = GSEL(GUEST_CODE_SEL, SEL_KPL);
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, gsel)) != 0)
goto done;
gsel = GSEL(GUEST_DATA_SEL, SEL_KPL);
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, gsel)) != 0)
goto done;
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, gsel)) != 0)
goto done;
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, gsel)) != 0)
goto done;
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, gsel)) != 0)
goto done;
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, gsel)) != 0)
goto done;
/* XXX TR is pointing to the null selector */
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, 0)) != 0)
goto done;
/* LDTR is pointing to the null selector */
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
goto done;
/* entry point */
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0)
goto done;
/* page table base */
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, cr3)) != 0)
goto done;
desc_base = gdtbase;
desc_limit = GUEST_GDTR_LIMIT64;
error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
desc_base, desc_limit, 0);
if (error != 0)
goto done;
if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, rsp)) != 0)
goto done;
error = 0;
done:
return (error);
}

View file

@ -1,5 +1,6 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -50,26 +51,20 @@
* DSDT -> 0xf2800 (variable - can go up to 0x100000)
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/errno.h>
#include <sys/stat.h>
#include <paths.h>
#include <stdint.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <machine/vmm.h>
#include <vmmapi.h>
#include "bhyverun.h"
#include "acpi.h"
#include "pci_emul.h"
#include <paths.h>
#include <assert.h>
#include <errno.h>
#include <sys/param.h>
#include <sys/stat.h>
#include <xhyve/support/misc.h>
#include <xhyve/vmm/vmm_api.h>
#include <xhyve/xhyve.h>
#include <xhyve/acpi.h>
#include <xhyve/pci_emul.h>
/*
* Define the base address of the ACPI tables, and the offsets to
@ -109,11 +104,14 @@ static FILE *dsdt_fp;
static int dsdt_indent_level;
static int dsdt_error;
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct basl_fio {
int fd;
FILE *fp;
char f_name[MAXPATHLEN];
FILE *fp;
char f_name[MAXPATHLEN];
};
#pragma clang diagnostic pop
#define EFPRINTF(...) \
err = fprintf(__VA_ARGS__); if (err < 0) goto err_exit;
@ -613,7 +611,7 @@ basl_fwrite_mcfg(FILE *fp)
EFPRINTF(fp, "[0008]\t\tReserved : 0\n");
EFPRINTF(fp, "\n");
EFPRINTF(fp, "[0008]\t\tBase Address : %016lX\n", pci_ecfg_base());
EFPRINTF(fp, "[0008]\t\tBase Address : %016llx\n", pci_ecfg_base());
EFPRINTF(fp, "[0002]\t\tSegment Group: 0000\n");
EFPRINTF(fp, "[0001]\t\tStart Bus: 00\n");
EFPRINTF(fp, "[0001]\t\tEnd Bus: FF\n");
@ -657,6 +655,8 @@ err_exit:
return (errno);
}
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wformat-nonliteral"
/*
* Helper routines for writing to the DSDT from other modules.
*/
@ -666,6 +666,8 @@ dsdt_line(const char *fmt, ...)
va_list ap;
int err;
err = 0;
if (dsdt_error != 0)
return;
@ -683,6 +685,7 @@ dsdt_line(const char *fmt, ...)
err_exit:
dsdt_error = errno;
}
#pragma clang diagnostic pop
void
dsdt_indent(int levels)
@ -838,13 +841,12 @@ basl_start(struct basl_fio *in, struct basl_fio *out)
static void
basl_end(struct basl_fio *in, struct basl_fio *out)
{
basl_close(in);
basl_close(out);
}
static int
basl_load(struct vmctx *ctx, int fd, uint64_t off)
basl_load(int fd, uint64_t off)
{
struct stat sb;
void *gaddr;
@ -852,18 +854,20 @@ basl_load(struct vmctx *ctx, int fd, uint64_t off)
if (fstat(fd, &sb) < 0)
return (errno);
gaddr = paddr_guest2host(ctx, basl_acpi_base + off, sb.st_size);
gaddr = paddr_guest2host(basl_acpi_base + off, ((size_t) sb.st_size));
if (gaddr == NULL)
return (EFAULT);
if (read(fd, gaddr, sb.st_size) < 0)
if (read(fd, gaddr, ((size_t) sb.st_size)) < 0)
return (errno);
return (0);
}
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wformat-nonliteral"
static int
basl_compile(struct vmctx *ctx, int (*fwrite_section)(FILE *), uint64_t offset)
basl_compile(int (*fwrite_section)(FILE *), uint64_t offset)
{
struct basl_fio io[2];
static char iaslbuf[3*MAXPATHLEN + 10];
@ -897,7 +901,7 @@ basl_compile(struct vmctx *ctx, int (*fwrite_section)(FILE *), uint64_t offset)
* Copy the aml output file into guest
* memory at the specified location
*/
err = basl_load(ctx, io[1].fd, offset);
err = basl_load(io[1].fd, offset);
}
}
basl_end(&io[0], &io[1]);
@ -905,6 +909,7 @@ basl_compile(struct vmctx *ctx, int (*fwrite_section)(FILE *), uint64_t offset)
return (err);
}
#pragma clang diagnostic pop
static int
basl_make_templates(void)
@ -923,9 +928,9 @@ basl_make_templates(void)
tmpdir = _PATH_TMP;
}
len = strlen(tmpdir);
len = (int) strlen(tmpdir);
if ((len + sizeof(BHYVE_ASL_TEMPLATE) + 1) < MAXPATHLEN) {
if ((((unsigned long) len) + sizeof(BHYVE_ASL_TEMPLATE) + 1) < MAXPATHLEN) {
strcpy(basl_template, tmpdir);
while (len > 0 && basl_template[len - 1] == '/')
len--;
@ -938,12 +943,12 @@ basl_make_templates(void)
/*
* len has been intialized (and maybe adjusted) above
*/
if ((len + sizeof(BHYVE_ASL_TEMPLATE) + 1 +
if ((((unsigned long) len) + sizeof(BHYVE_ASL_TEMPLATE) + 1 +
sizeof(BHYVE_ASL_SUFFIX)) < MAXPATHLEN) {
strcpy(basl_stemplate, tmpdir);
basl_stemplate[len] = '/';
strcpy(&basl_stemplate[len + 1], BHYVE_ASL_TEMPLATE);
len = strlen(basl_stemplate);
len = (int) strlen(basl_stemplate);
strcpy(&basl_stemplate[len], BHYVE_ASL_SUFFIX);
} else
err = E2BIG;
@ -966,18 +971,18 @@ static struct {
{ basl_fwrite_mcfg, MCFG_OFFSET },
{ basl_fwrite_facs, FACS_OFFSET },
{ basl_fwrite_dsdt, DSDT_OFFSET },
{ NULL }
{ NULL , 0}
};
int
acpi_build(struct vmctx *ctx, int ncpu)
acpi_build(int ncpu)
{
int err;
int i;
basl_ncpu = ncpu;
err = vm_get_hpet_capabilities(ctx, &hpet_capabilities);
err = xh_vm_get_hpet_capabilities(&hpet_capabilities);
if (err != 0)
return (err);
@ -1003,7 +1008,7 @@ acpi_build(struct vmctx *ctx, int ncpu)
* copying them into guest memory
*/
while (!err && basl_ftables[i].wsect != NULL) {
err = basl_compile(ctx, basl_ftables[i].wsect,
err = basl_compile(basl_ftables[i].wsect,
basl_ftables[i].offset);
i++;
}

View file

@ -1,5 +1,6 @@
/*-
* Copyright (c) 2014 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -24,32 +25,23 @@
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <machine/vmm.h>
#include <vmmapi.h>
#include <stdint.h>
#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include <xhyve/support/misc.h>
#include <xhyve/vmm/vmm_api.h>
#include <xhyve/inout.h>
#include <xhyve/pci_lpc.h>
#include "inout.h"
#include "pci_lpc.h"
#define KBD_DATA_PORT 0x60
#define KBD_STS_CTL_PORT 0x64
#define KBD_SYS_FLAG 0x4
#define KBDC_RESET 0xfe
#define KBD_DATA_PORT 0x60
#define KBD_STS_CTL_PORT 0x64
#define KBD_SYS_FLAG 0x4
#define KBDC_RESET 0xfe
static int
atkbdc_data_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
atkbdc_data_handler(UNUSED int vcpu, UNUSED int in, UNUSED int port, int bytes,
uint32_t *eax, UNUSED void *arg)
{
if (bytes != 1)
return (-1);
@ -60,8 +52,8 @@ atkbdc_data_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
}
static int
atkbdc_sts_ctl_handler(struct vmctx *ctx, int vcpu, int in, int port,
int bytes, uint32_t *eax, void *arg)
atkbdc_sts_ctl_handler(UNUSED int vcpu, int in, UNUSED int port, int bytes,
uint32_t *eax, UNUSED void *arg)
{
int error, retval;
@ -74,7 +66,7 @@ atkbdc_sts_ctl_handler(struct vmctx *ctx, int vcpu, int in, int port,
} else {
switch (*eax) {
case KBDC_RESET: /* Pulse "reset" line. */
error = vm_suspend(ctx, VM_SUSPEND_RESET);
error = xh_vm_suspend(VM_SUSPEND_RESET);
assert(error == 0 || errno == EALREADY);
break;
}
@ -85,6 +77,5 @@ atkbdc_sts_ctl_handler(struct vmctx *ctx, int vcpu, int in, int port,
INOUT_PORT(atkdbc, KBD_DATA_PORT, IOPORT_F_INOUT, atkbdc_data_handler);
SYSRES_IO(KBD_DATA_PORT, 1);
INOUT_PORT(atkbdc, KBD_STS_CTL_PORT, IOPORT_F_INOUT,
atkbdc_sts_ctl_handler);
INOUT_PORT(atkbdc, KBD_STS_CTL_PORT, IOPORT_F_INOUT, atkbdc_sts_ctl_handler);
SYSRES_IO(KBD_STS_CTL_PORT, 1);

View file

@ -1,5 +1,6 @@
/*-
* Copyright (c) 2013 Peter Grehan <grehan@freebsd.org>
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -26,9 +27,6 @@
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/queue.h>
#include <sys/errno.h>
@ -42,20 +40,25 @@ __FBSDID("$FreeBSD$");
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include <pthread_np.h>
#include <signal.h>
#include <unistd.h>
#include <machine/atomic.h>
#include <xhyve/support/atomic.h>
#include <xhyve/xhyve.h>
#include <xhyve/mevent.h>
#include <xhyve/block_if.h>
#include "bhyverun.h"
#include "mevent.h"
#include "block_if.h"
#define BLOCKIF_SIG 0xb109b109
/* xhyve: FIXME
*
* // #define BLOCKIF_NUMTHR 8
*
* OS X does not support preadv/pwritev, we need to serialize reads and writes
* for the time being until we find a better solution.
*/
#define BLOCKIF_NUMTHR 1
#define BLOCKIF_SIG 0xb109b109
#define BLOCKIF_NUMTHR 8
#define BLOCKIF_MAXREQ (64 + BLOCKIF_NUMTHR)
#define BLOCKIF_MAXREQ (64 + BLOCKIF_NUMTHR)
enum blockop {
BOP_READ,
@ -72,33 +75,34 @@ enum blockstat {
BST_DONE
};
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct blockif_elem {
TAILQ_ENTRY(blockif_elem) be_link;
struct blockif_req *be_req;
enum blockop be_op;
enum blockstat be_status;
pthread_t be_tid;
off_t be_block;
struct blockif_req *be_req;
enum blockop be_op;
enum blockstat be_status;
pthread_t be_tid;
off_t be_block;
};
struct blockif_ctxt {
int bc_magic;
int bc_fd;
int bc_ischr;
int bc_isgeom;
int bc_candelete;
int bc_rdonly;
off_t bc_size;
int bc_sectsz;
int bc_psectsz;
int bc_psectoff;
int bc_closing;
pthread_t bc_btid[BLOCKIF_NUMTHR];
pthread_mutex_t bc_mtx;
pthread_cond_t bc_cond;
int bc_magic;
int bc_fd;
int bc_ischr;
int bc_isgeom;
int bc_candelete;
int bc_rdonly;
off_t bc_size;
int bc_sectsz;
int bc_psectsz;
int bc_psectoff;
int bc_closing;
pthread_t bc_btid[BLOCKIF_NUMTHR];
pthread_mutex_t bc_mtx;
pthread_cond_t bc_cond;
/* Request elements and free/pending/busy queues */
TAILQ_HEAD(, blockif_elem) bc_freeq;
TAILQ_HEAD(, blockif_elem) bc_freeq;
TAILQ_HEAD(, blockif_elem) bc_pendq;
TAILQ_HEAD(, blockif_elem) bc_busyq;
struct blockif_elem bc_reqs[BLOCKIF_MAXREQ];
@ -107,14 +111,36 @@ struct blockif_ctxt {
static pthread_once_t blockif_once = PTHREAD_ONCE_INIT;
struct blockif_sig_elem {
pthread_mutex_t bse_mtx;
pthread_cond_t bse_cond;
int bse_pending;
struct blockif_sig_elem *bse_next;
pthread_mutex_t bse_mtx;
pthread_cond_t bse_cond;
int bse_pending;
struct blockif_sig_elem *bse_next;
};
static struct blockif_sig_elem *blockif_bse_head;
#pragma clang diagnostic pop
static ssize_t
preadv(int fd, const struct iovec *iov, int iovcnt, off_t offset)
{
off_t res;
res = lseek(fd, offset, SEEK_SET);
assert(res == offset);
return readv(fd, iov, iovcnt);
}
static ssize_t
pwritev(int fd, const struct iovec *iov, int iovcnt, off_t offset)
{
off_t res;
res = lseek(fd, offset, SEEK_SET);
assert(res == offset);
return writev(fd, iov, iovcnt);
}
static int
blockif_enqueue(struct blockif_ctxt *bc, struct blockif_req *breq,
enum blockop op)
@ -137,7 +163,7 @@ blockif_enqueue(struct blockif_ctxt *bc, struct blockif_req *breq,
for (i = 0; i < breq->br_iovcnt; i++)
off += breq->br_iov[i].iov_len;
break;
default:
case BOP_FLUSH:
off = OFF_MAX;
}
be->be_block = off;
@ -202,7 +228,7 @@ static void
blockif_proc(struct blockif_ctxt *bc, struct blockif_elem *be, uint8_t *buf)
{
struct blockif_req *br;
off_t arg[2];
// off_t arg[2];
ssize_t clen, len, off, boff, voff;
int i, err;
@ -224,18 +250,18 @@ blockif_proc(struct blockif_ctxt *bc, struct blockif_elem *be, uint8_t *buf)
off = voff = 0;
while (br->br_resid > 0) {
len = MIN(br->br_resid, MAXPHYS);
if (pread(bc->bc_fd, buf, len, br->br_offset +
off) < 0) {
if (pread(bc->bc_fd, buf, ((size_t) len), br->br_offset + off) < 0)
{
err = errno;
break;
}
boff = 0;
do {
clen = MIN(len - boff, br->br_iov[i].iov_len -
voff);
memcpy(br->br_iov[i].iov_base + voff,
buf + boff, clen);
if (clen < br->br_iov[i].iov_len - voff)
clen = MIN((len - boff),
(((ssize_t) br->br_iov[i].iov_len) - voff));
memcpy(((void *) (((uintptr_t) br->br_iov[i].iov_base) +
((size_t) voff))), buf + boff, clen);
if (clen < (((ssize_t) br->br_iov[i].iov_len) - voff))
voff += clen;
else {
i++;
@ -266,11 +292,12 @@ blockif_proc(struct blockif_ctxt *bc, struct blockif_elem *be, uint8_t *buf)
len = MIN(br->br_resid, MAXPHYS);
boff = 0;
do {
clen = MIN(len - boff, br->br_iov[i].iov_len -
voff);
memcpy(buf + boff,
br->br_iov[i].iov_base + voff, clen);
if (clen < br->br_iov[i].iov_len - voff)
clen = MIN((len - boff),
(((ssize_t) br->br_iov[i].iov_len) - voff));
memcpy((buf + boff),
((void *) (((uintptr_t) br->br_iov[i].iov_base) +
((size_t) voff))), clen);
if (clen < (((ssize_t) br->br_iov[i].iov_len) - voff))
voff += clen;
else {
i++;
@ -278,7 +305,7 @@ blockif_proc(struct blockif_ctxt *bc, struct blockif_elem *be, uint8_t *buf)
}
boff += clen;
} while (boff < len);
if (pwrite(bc->bc_fd, buf, len, br->br_offset +
if (pwrite(bc->bc_fd, buf, ((size_t) len), br->br_offset +
off) < 0) {
err = errno;
break;
@ -289,28 +316,27 @@ blockif_proc(struct blockif_ctxt *bc, struct blockif_elem *be, uint8_t *buf)
break;
case BOP_FLUSH:
if (bc->bc_ischr) {
if (ioctl(bc->bc_fd, DIOCGFLUSH))
if (ioctl(bc->bc_fd, DKIOCSYNCHRONIZECACHE))
err = errno;
} else if (fsync(bc->bc_fd))
err = errno;
break;
case BOP_DELETE:
if (!bc->bc_candelete)
if (!bc->bc_candelete) {
err = EOPNOTSUPP;
else if (bc->bc_rdonly)
err = EROFS;
else if (bc->bc_ischr) {
arg[0] = br->br_offset;
arg[1] = br->br_resid;
if (ioctl(bc->bc_fd, DIOCGDELETE, arg))
err = errno;
else
br->br_resid = 0;
} else
// } else if (bc->bc_rdonly) {
// err = EROFS;
// } else if (bc->bc_ischr) {
// arg[0] = br->br_offset;
// arg[1] = br->br_resid;
// if (ioctl(bc->bc_fd, DIOCGDELETE, arg)) {
// err = errno;
// } else {
// br->br_resid = 0;
// }
} else {
err = EOPNOTSUPP;
break;
default:
err = EINVAL;
}
break;
}
@ -356,7 +382,8 @@ blockif_thr(void *arg)
}
static void
blockif_sigcont_handler(int signal, enum ev_type type, void *arg)
blockif_sigcont_handler(UNUSED int signal, UNUSED enum ev_type type,
UNUSED void *arg)
{
struct blockif_sig_elem *bse;
@ -388,14 +415,13 @@ blockif_init(void)
}
struct blockif_ctxt *
blockif_open(const char *optstr, const char *ident)
blockif_open(const char *optstr, UNUSED const char *ident)
{
char tname[MAXCOMLEN + 1];
char name[MAXPATHLEN];
// char name[MAXPATHLEN];
char *nopt, *xopts, *cp;
struct blockif_ctxt *bc;
struct stat sbuf;
struct diocgattr_arg arg;
// struct diocgattr_arg arg;
off_t size, psectsz, psectoff;
int extra, fd, i, sectsz;
int nocache, sync, ro, candelete, geom, ssopt, pssopt;
@ -408,6 +434,7 @@ blockif_open(const char *optstr, const char *ident)
sync = 0;
ro = 0;
pssopt = 0;
/*
* The first element in the optstring is always a pathname.
* Optional elements follow
@ -434,8 +461,11 @@ blockif_open(const char *optstr, const char *ident)
}
extra = 0;
if (nocache)
extra |= O_DIRECT;
if (nocache) {
perror("xhyve: nocache support unimplemented");
goto err;
// extra |= O_DIRECT;
}
if (sync)
extra |= O_SYNC;
@ -451,34 +481,37 @@ blockif_open(const char *optstr, const char *ident)
goto err;
}
if (fstat(fd, &sbuf) < 0) {
perror("Could not stat backing file");
if (fstat(fd, &sbuf) < 0) {
perror("Could not stat backing file");
goto err;
}
}
/*
/*
* Deal with raw devices
*/
size = sbuf.st_size;
size = sbuf.st_size;
sectsz = DEV_BSIZE;
psectsz = psectoff = 0;
candelete = geom = 0;
if (S_ISCHR(sbuf.st_mode)) {
if (ioctl(fd, DIOCGMEDIASIZE, &size) < 0 ||
ioctl(fd, DIOCGSECTORSIZE, &sectsz)) {
perror("Could not fetch dev blk/sector size");
goto err;
}
assert(size != 0);
assert(sectsz != 0);
if (ioctl(fd, DIOCGSTRIPESIZE, &psectsz) == 0 && psectsz > 0)
ioctl(fd, DIOCGSTRIPEOFFSET, &psectoff);
strlcpy(arg.name, "GEOM::candelete", sizeof(arg.name));
arg.len = sizeof(arg.value.i);
if (ioctl(fd, DIOCGATTR, &arg) == 0)
candelete = arg.value.i;
if (ioctl(fd, DIOCGPROVIDERNAME, name) == 0)
geom = 1;
perror("xhyve: raw device support unimplemented");
goto err;
// if (ioctl(fd, DIOCGMEDIASIZE, &size) < 0 ||
// ioctl(fd, DIOCGSECTORSIZE, &sectsz))
// {
// perror("Could not fetch dev blk/sector size");
// goto err;
// }
// assert(size != 0);
// assert(sectsz != 0);
// if (ioctl(fd, DIOCGSTRIPESIZE, &psectsz) == 0 && psectsz > 0)
// ioctl(fd, DIOCGSTRIPEOFFSET, &psectoff);
// strlcpy(arg.name, "GEOM::candelete", sizeof(arg.name));
// arg.len = sizeof(arg.value.i);
// if (ioctl(fd, DIOCGATTR, &arg) == 0)
// candelete = arg.value.i;
// if (ioctl(fd, DIOCGPROVIDERNAME, name) == 0)
// geom = 1;
} else
psectsz = sbuf.st_blksize;
@ -490,21 +523,21 @@ blockif_open(const char *optstr, const char *ident)
goto err;
}
/*
* Some backend drivers (e.g. cd0, ada0) require that the I/O
* size be a multiple of the device's sector size.
*
* Validate that the emulated sector size complies with this
* requirement.
*/
if (S_ISCHR(sbuf.st_mode)) {
if (ssopt < sectsz || (ssopt % sectsz) != 0) {
fprintf(stderr, "Sector size %d incompatible "
"with underlying device sector size %d\n",
ssopt, sectsz);
goto err;
}
}
// /*
// * Some backend drivers (e.g. cd0, ada0) require that the I/O
// * size be a multiple of the device's sector size.
// *
// * Validate that the emulated sector size complies with this
// * requirement.
// */
// if (S_ISCHR(sbuf.st_mode)) {
// if (ssopt < sectsz || (ssopt % sectsz) != 0) {
// fprintf(stderr, "Sector size %d incompatible "
// "with underlying device sector size %d\n",
// ssopt, sectsz);
// goto err;
// }
// }
sectsz = ssopt;
psectsz = pssopt;
@ -517,7 +550,7 @@ blockif_open(const char *optstr, const char *ident)
goto err;
}
bc->bc_magic = BLOCKIF_SIG;
bc->bc_magic = (int) BLOCKIF_SIG;
bc->bc_fd = fd;
bc->bc_ischr = S_ISCHR(sbuf.st_mode);
bc->bc_isgeom = geom;
@ -525,8 +558,8 @@ blockif_open(const char *optstr, const char *ident)
bc->bc_rdonly = ro;
bc->bc_size = size;
bc->bc_sectsz = sectsz;
bc->bc_psectsz = psectsz;
bc->bc_psectoff = psectoff;
bc->bc_psectsz = (int) psectsz;
bc->bc_psectoff = (int) psectoff;
pthread_mutex_init(&bc->bc_mtx, NULL);
pthread_cond_init(&bc->bc_cond, NULL);
TAILQ_INIT(&bc->bc_freeq);
@ -539,8 +572,6 @@ blockif_open(const char *optstr, const char *ident)
for (i = 0; i < BLOCKIF_NUMTHR; i++) {
pthread_create(&bc->bc_btid[i], NULL, blockif_thr, bc);
snprintf(tname, sizeof(tname), "blk-%s-%d", ident, i);
pthread_set_name_np(bc->bc_btid[i], tname);
}
return (bc);
@ -583,32 +614,28 @@ blockif_request(struct blockif_ctxt *bc, struct blockif_req *breq,
int
blockif_read(struct blockif_ctxt *bc, struct blockif_req *breq)
{
assert(bc->bc_magic == BLOCKIF_SIG);
assert(bc->bc_magic == ((int) BLOCKIF_SIG));
return (blockif_request(bc, breq, BOP_READ));
}
int
blockif_write(struct blockif_ctxt *bc, struct blockif_req *breq)
{
assert(bc->bc_magic == BLOCKIF_SIG);
assert(bc->bc_magic == ((int) BLOCKIF_SIG));
return (blockif_request(bc, breq, BOP_WRITE));
}
int
blockif_flush(struct blockif_ctxt *bc, struct blockif_req *breq)
{
assert(bc->bc_magic == BLOCKIF_SIG);
assert(bc->bc_magic == ((int) BLOCKIF_SIG));
return (blockif_request(bc, breq, BOP_FLUSH));
}
int
blockif_delete(struct blockif_ctxt *bc, struct blockif_req *breq)
{
assert(bc->bc_magic == BLOCKIF_SIG);
assert(bc->bc_magic == ((int) BLOCKIF_SIG));
return (blockif_request(bc, breq, BOP_DELETE));
}
@ -617,7 +644,7 @@ blockif_cancel(struct blockif_ctxt *bc, struct blockif_req *breq)
{
struct blockif_elem *be;
assert(bc->bc_magic == BLOCKIF_SIG);
assert(bc->bc_magic == ((int) BLOCKIF_SIG));
pthread_mutex_lock(&bc->bc_mtx);
/*
@ -696,7 +723,7 @@ blockif_close(struct blockif_ctxt *bc)
err = 0;
assert(bc->bc_magic == BLOCKIF_SIG);
assert(bc->bc_magic == ((int) BLOCKIF_SIG));
/*
* Stop the block i/o thread
@ -732,22 +759,22 @@ blockif_chs(struct blockif_ctxt *bc, uint16_t *c, uint8_t *h, uint8_t *s)
uint16_t secpt; /* sectors per track */
uint8_t heads;
assert(bc->bc_magic == BLOCKIF_SIG);
assert(bc->bc_magic == ((int) BLOCKIF_SIG));
sectors = bc->bc_size / bc->bc_sectsz;
/* Clamp the size to the largest possible with CHS */
if (sectors > 65535UL*16*255)
sectors = 65535UL*16*255;
if (sectors > 65535LL*16*255)
sectors = 65535LL*16*255;
if (sectors >= 65536UL*16*63) {
if (sectors >= 65536LL*16*63) {
secpt = 255;
heads = 16;
hcyl = sectors / secpt;
} else {
secpt = 17;
hcyl = sectors / secpt;
heads = (hcyl + 1023) / 1024;
heads = (uint8_t) ((hcyl + 1023) / 1024);
if (heads < 4)
heads = 4;
@ -764,9 +791,9 @@ blockif_chs(struct blockif_ctxt *bc, uint16_t *c, uint8_t *h, uint8_t *s)
}
}
*c = hcyl / heads;
*c = (uint16_t) (hcyl / heads);
*h = heads;
*s = secpt;
*s = (uint8_t) secpt;
}
/*
@ -775,24 +802,21 @@ blockif_chs(struct blockif_ctxt *bc, uint16_t *c, uint8_t *h, uint8_t *s)
off_t
blockif_size(struct blockif_ctxt *bc)
{
assert(bc->bc_magic == BLOCKIF_SIG);
assert(bc->bc_magic == ((int) BLOCKIF_SIG));
return (bc->bc_size);
}
int
blockif_sectsz(struct blockif_ctxt *bc)
{
assert(bc->bc_magic == BLOCKIF_SIG);
assert(bc->bc_magic == ((int) BLOCKIF_SIG));
return (bc->bc_sectsz);
}
void
blockif_psectsz(struct blockif_ctxt *bc, int *size, int *off)
{
assert(bc->bc_magic == BLOCKIF_SIG);
assert(bc->bc_magic == ((int) BLOCKIF_SIG));
*size = bc->bc_psectsz;
*off = bc->bc_psectoff;
}
@ -800,23 +824,20 @@ blockif_psectsz(struct blockif_ctxt *bc, int *size, int *off)
int
blockif_queuesz(struct blockif_ctxt *bc)
{
assert(bc->bc_magic == BLOCKIF_SIG);
assert(bc->bc_magic == ((int) BLOCKIF_SIG));
return (BLOCKIF_MAXREQ - 1);
}
int
blockif_is_ro(struct blockif_ctxt *bc)
{
assert(bc->bc_magic == BLOCKIF_SIG);
assert(bc->bc_magic == ((int) BLOCKIF_SIG));
return (bc->bc_rdonly);
}
int
blockif_candelete(struct blockif_ctxt *bc)
{
assert(bc->bc_magic == BLOCKIF_SIG);
assert(bc->bc_magic == ((int) BLOCKIF_SIG));
return (bc->bc_candelete);
}

View file

@ -1,5 +1,6 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -26,23 +27,20 @@
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/select.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <termios.h>
#include <unistd.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/select.h>
#include <xhyve/support/misc.h>
#include <xhyve/inout.h>
#include <xhyve/pci_lpc.h>
#include "inout.h"
#include "pci_lpc.h"
#define BVM_CONSOLE_PORT 0x220
#define BVM_CONS_SIG ('b' << 8 | 'v')
#define BVM_CONSOLE_PORT 0x220
#define BVM_CONS_SIG ('b' << 8 | 'v')
static struct termios tio_orig, tio_new;
@ -66,14 +64,15 @@ ttyopen(void)
static bool
tty_char_available(void)
{
fd_set rfds;
struct timeval tv;
fd_set rfds;
struct timeval tv;
FD_ZERO(&rfds);
FD_SET(STDIN_FILENO, &rfds);
tv.tv_sec = 0;
tv.tv_usec = 0;
if (select(STDIN_FILENO + 1, &rfds, NULL, NULL, &tv) > 0) {
FD_ZERO(&rfds);
FD_SET(STDIN_FILENO, &rfds);
tv.tv_sec = 0;
tv.tv_usec = 0;
if (select(STDIN_FILENO + 1, &rfds, NULL, NULL, &tv) > 0) {
return (true);
} else {
return (false);
@ -100,8 +99,8 @@ ttywrite(unsigned char wb)
}
static int
console_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
console_handler(UNUSED int vcpu, int in, UNUSED int port, int bytes,
uint32_t *eax, UNUSED void *arg)
{
static int opened;
@ -128,9 +127,9 @@ console_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
}
if (in)
*eax = ttyread();
*eax = (uint32_t) ttyread();
else
ttywrite(*eax);
ttywrite((unsigned char) *eax);
return (0);
}
@ -142,12 +141,12 @@ static struct inout_port consport = {
BVM_CONSOLE_PORT,
1,
IOPORT_F_INOUT,
console_handler
console_handler,
NULL
};
void
init_bvmcons(void)
{
register_inout(&consport);
}

View file

@ -1,5 +1,6 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -26,34 +27,32 @@
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <sys/uio.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <xhyve/support/misc.h>
#include <xhyve/inout.h>
#include <xhyve/dbgport.h>
#include <xhyve/pci_lpc.h>
#include "inout.h"
#include "dbgport.h"
#include "pci_lpc.h"
#define BVM_DBG_PORT 0x224
#define BVM_DBG_SIG ('B' << 8 | 'V')
#define BVM_DBG_PORT 0x224
#define BVM_DBG_SIG ('B' << 8 | 'V')
static int listen_fd, conn_fd;
static struct sockaddr_in sin;
static int
dbg_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
dbg_handler(UNUSED int vcpu, int in, UNUSED int port, int bytes, uint32_t *eax,
UNUSED void *arg)
{
char ch;
int nwritten, nread, printonce;
@ -81,19 +80,19 @@ again:
}
if (in) {
nread = read(conn_fd, &ch, 1);
nread = (int) read(conn_fd, &ch, 1);
if (nread == -1 && errno == EAGAIN)
*eax = -1;
*eax = (uint32_t) (-1);
else if (nread == 1)
*eax = ch;
*eax = (uint32_t) ch;
else {
close(conn_fd);
conn_fd = -1;
goto again;
}
} else {
ch = *eax;
nwritten = write(conn_fd, &ch, 1);
ch = (char) *eax;
nwritten = (int) write(conn_fd, &ch, 1);
if (nwritten != 1) {
close(conn_fd);
conn_fd = -1;
@ -108,7 +107,8 @@ static struct inout_port dbgport = {
BVM_DBG_PORT,
1,
IOPORT_F_INOUT,
dbg_handler
dbg_handler,
NULL
};
SYSRES_IO(BVM_DBG_PORT, 4);

262
src/firmware/kexec.c Normal file
View file

@ -0,0 +1,262 @@
/*-
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY ???, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <xhyve/vmm/vmm_api.h>
#include <xhyve/firmware/kexec.h>
#ifndef ALIGNUP
#define ALIGNUP(x, a) (((x - 1) & ~(a - 1)) + a)
#endif
#define BASE_GDT 0x2000ull
#define BASE_ZEROPAGE 0x3000ull
#define BASE_CMDLINE 0x4000ull
#define BASE_KERNEL 0x100000ull
#define HDRS 0x53726448 /* SrdH */
static struct {
uintptr_t base;
size_t size;
} memory, kernel, ramdisk;
static struct {
char *kernel;
char *initrd;
char *cmdline;
} config;
static int
kexec_load_kernel(char *path, char *cmdline) {
uint64_t kernel_offset, kernel_size, kernel_init_size, kernel_start, mem_k;
size_t sz, cmdline_len;
volatile struct zero_page *zp;
FILE *f;
if ((memory.size < (BASE_ZEROPAGE + sizeof(struct zero_page))) ||
((BASE_ZEROPAGE + sizeof(struct zero_page)) > BASE_CMDLINE))
{
return -1;
}
zp = ((struct zero_page *) (memory.base + ((off_t) BASE_ZEROPAGE)));
memset(((void *) ((uintptr_t) zp)), 0, sizeof(struct zero_page));
if (!(f = fopen(path, "r"))) {
return -1;
}
fseek(f, 0L, SEEK_END);
sz = (size_t) ftell(f);
if (sz < (0x01f1 + sizeof(struct setup_header))) {
fclose(f);
return -1;
}
fseek(f, 0x01f1, SEEK_SET);
if (!fread(((void *) ((uintptr_t) &zp->setup_header)), 1,
sizeof(zp->setup_header), f))
{
fclose(f);
return -1;
}
if ((zp->setup_header.setup_sects == 0) || /* way way too old */
(zp->setup_header.boot_flag != 0xaa55) || /* no boot magic */
(zp->setup_header.header != HDRS) || /* way too old */
(zp->setup_header.version < 0x020c) || /* too old */
(!(zp->setup_header.loadflags & 1)) || /* no bzImage */
(sz < (((zp->setup_header.setup_sects + 1) * 512) +
(zp->setup_header.syssize * 16)))) /* too small */
{
/* we can't boot this kernel */
fclose(f);
return -1;
}
kernel_offset = ((zp->setup_header.setup_sects + 1) * 512);
kernel_size = (sz - kernel_offset);
kernel_init_size = ALIGNUP(zp->setup_header.init_size, 0x1000ull);
kernel_start = (zp->setup_header.relocatable_kernel) ?
ALIGNUP(BASE_KERNEL, zp->setup_header.kernel_alignment) :
zp->setup_header.pref_address;
if ((kernel_start < BASE_KERNEL) ||
(kernel_size > kernel_init_size) || /* XXX: always true? */
((kernel_start + kernel_init_size) > memory.size)) /* oom */
{
fclose(f);
return -1;
}
/* copy kernel */
fseek(f, ((long) kernel_offset), SEEK_SET);
if (!fread(((void *) (memory.base + kernel_start)), 1, kernel_size, f)) {
fclose(f);
return -1;
}
fclose(f);
/* copy cmdline */
cmdline_len = strlen(cmdline);
if (((cmdline_len + 1)> zp->setup_header.cmdline_size) ||
((BASE_CMDLINE + (cmdline_len + 1)) > kernel_start))
{
return -1;
}
memcpy(((void *) (memory.base + BASE_CMDLINE)), cmdline, cmdline_len);
memset(((void *) (memory.base + BASE_CMDLINE + cmdline_len)), '\0', 1);
zp->setup_header.cmd_line_ptr = ((uint32_t) BASE_CMDLINE);
zp->ext_cmd_line_ptr = ((uint32_t) (BASE_CMDLINE >> 32));
zp->setup_header.hardware_subarch = 0; /* PC */
zp->setup_header.type_of_loader = 0xd; /* kexec */
mem_k = (memory.size - 0x100000) >> 10; /* assume memory base is at 0 */
zp->alt_mem_k = (mem_k > 0xffffffff) ? 0xffffffff : ((uint32_t) mem_k);
zp->e820_map[0].addr = 0x0000000000000000;
zp->e820_map[0].size = 0x000000000009fc00;
zp->e820_map[0].type = 1;
zp->e820_map[1].addr = 0x0000000000100000;
zp->e820_map[1].size = (memory.size - 0x0000000000100000);
zp->e820_map[1].type = 1;
zp->e820_entries = 2;
kernel.base = kernel_start;
kernel.size = kernel_init_size;
return 0;
}
static int
kexec_load_ramdisk(char *path) {
uint64_t ramdisk_start;
volatile struct zero_page *zp;
size_t sz;
FILE *f;
zp = ((struct zero_page *) (memory.base + BASE_ZEROPAGE));
if (!(f = fopen(path, "r"))) {;
return -1;
}
fseek(f, 0L, SEEK_END);
sz = (size_t) ftell(f);
fseek(f, 0, SEEK_SET);
ramdisk_start = ALIGNUP((kernel.base + kernel.size), 0x1000ull);
if ((ramdisk_start + sz) > memory.size) {
/* not enough memory */
fclose(f);
return -1;
}
/* copy ramdisk */
if (!fread(((void *) (memory.base + ramdisk_start)), 1, sz, f)) {
fclose(f);
return -1;
}
fclose(f);
zp->setup_header.ramdisk_image = ((uint32_t) ramdisk_start);
zp->ext_ramdisk_image = ((uint32_t) (ramdisk_start >> 32));
zp->setup_header.ramdisk_size = ((uint32_t) sz);
zp->ext_ramdisk_size = ((uint32_t) (sz >> 32));
ramdisk.base = ramdisk_start;
ramdisk.size = sz;
return 0;
}
void
kexec_init(char *kernel_path, char *initrd_path, char *cmdline) {
config.kernel = kernel_path;
config.initrd = initrd_path;
config.cmdline = cmdline;
}
uint64_t
kexec(void)
{
uint64_t *gdt_entry;
void *gpa_map;
gpa_map = xh_vm_map_gpa(0, xh_vm_get_lowmem_size());
memory.base = (uintptr_t) gpa_map;
memory.size = xh_vm_get_lowmem_size();
if (kexec_load_kernel(config.kernel,
config.cmdline ? config.cmdline : "auto"))
{
fprintf(stderr, "kexec: failed to load kernel %s\n", config.kernel);
abort();
}
if (config.initrd && kexec_load_ramdisk(config.initrd)) {
fprintf(stderr, "kexec: failed to load initrd %s\n", config.initrd);
abort();
}
gdt_entry = ((uint64_t *) (memory.base + BASE_GDT));
gdt_entry[0] = 0x0000000000000000; /* null */
gdt_entry[1] = 0x0000000000000000; /* null */
gdt_entry[2] = 0x00cf9a000000ffff; /* code */
gdt_entry[3] = 0x00cf92000000ffff; /* data */
xh_vcpu_reset(0);
xh_vm_set_desc(0, VM_REG_GUEST_GDTR, BASE_GDT, 0x1f, 0);
xh_vm_set_desc(0, VM_REG_GUEST_CS, 0, 0xffffffff, 0xc09b);
xh_vm_set_desc(0, VM_REG_GUEST_DS, 0, 0xffffffff, 0xc093);
xh_vm_set_desc(0, VM_REG_GUEST_ES, 0, 0xffffffff, 0xc093);
xh_vm_set_desc(0, VM_REG_GUEST_SS, 0, 0xffffffff, 0xc093);
xh_vm_set_register(0, VM_REG_GUEST_CS, 0x10);
xh_vm_set_register(0, VM_REG_GUEST_DS, 0x18);
xh_vm_set_register(0, VM_REG_GUEST_ES, 0x18);
xh_vm_set_register(0, VM_REG_GUEST_SS, 0x18);
xh_vm_set_register(0, VM_REG_GUEST_CR0, 0x21); /* enable protected mode */
xh_vm_set_register(0, VM_REG_GUEST_RBP, 0);
xh_vm_set_register(0, VM_REG_GUEST_RDI, 0);
xh_vm_set_register(0, VM_REG_GUEST_RBX, 0);
xh_vm_set_register(0, VM_REG_GUEST_RFLAGS, 0x2);
xh_vm_set_register(0, VM_REG_GUEST_RSI, BASE_ZEROPAGE);
xh_vm_set_register(0, VM_REG_GUEST_RIP, kernel.base);
return kernel.base;
}

View file

@ -1,5 +1,6 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -26,27 +27,18 @@
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/linker_set.h>
#include <sys/_iovec.h>
#include <sys/mman.h>
#include <x86/psl.h>
#include <x86/segments.h>
#include <machine/vmm.h>
#include <machine/vmm_instruction_emul.h>
#include <vmmapi.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <assert.h>
#include "bhyverun.h"
#include "inout.h"
#include <sys/uio.h>
#include <xhyve/support/misc.h>
#include <xhyve/support/linker_set.h>
#include <xhyve/support/psl.h>
#include <xhyve/support/segments.h>
#include <xhyve/vmm/vmm_api.h>
#include <xhyve/xhyve.h>
#include <xhyve/inout.h>
SET_DECLARE(inout_port_set, struct inout_port);
@ -55,32 +47,35 @@ SET_DECLARE(inout_port_set, struct inout_port);
#define VERIFY_IOPORT(port, size) \
assert((port) >= 0 && (size) > 0 && ((port) + (size)) <= MAX_IOPORTS)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
static struct {
const char *name;
int flags;
inout_func_t handler;
void *arg;
const char *name;
int flags;
inout_func_t handler;
void *arg;
} inout_handlers[MAX_IOPORTS];
#pragma clang diagnostic pop
static int
default_inout(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
default_inout(UNUSED int vcpu, int in, UNUSED int port, int bytes,
uint32_t *eax, UNUSED void *arg)
{
if (in) {
switch (bytes) {
case 4:
*eax = 0xffffffff;
break;
case 2:
*eax = 0xffff;
break;
case 1:
*eax = 0xff;
break;
}
}
return (0);
if (in) {
switch (bytes) {
case 4:
*eax = 0xffffffff;
break;
case 2:
*eax = 0xffff;
break;
case 1:
*eax = 0xff;
break;
}
}
return (0);
}
static void
@ -94,14 +89,42 @@ register_default_iohandler(int start, int size)
iop.name = "default";
iop.port = start;
iop.size = size;
iop.flags = IOPORT_F_INOUT | IOPORT_F_DEFAULT;
iop.flags = (int) (IOPORT_F_INOUT | IOPORT_F_DEFAULT);
iop.handler = default_inout;
register_inout(&iop);
}
static int
update_register(int vcpuid, enum vm_reg_name reg,
uint64_t val, int size)
{
int error;
uint64_t origval;
switch (size) {
case 1:
case 2:
error = xh_vm_get_register(vcpuid, reg, &origval);
if (error)
return (error);
val &= vie_size2mask(size);
val |= origval & ~vie_size2mask(size);
break;
case 4:
val &= 0xffffffffUL;
break;
case 8:
break;
default:
return (EINVAL);
}
return xh_vm_set_register(vcpuid, reg, val);
}
int
emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
emulate_inout(int vcpu, struct vm_exit *vmexit, int strict)
{
int addrsize, bytes, flags, in, port, prot, rep;
uint32_t eax, val;
@ -141,7 +164,7 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
vis = &vmexit->u.inout_str;
rep = vis->inout.rep;
addrsize = vis->addrsize;
prot = in ? PROT_WRITE : PROT_READ;
prot = in ? XHYVE_PROT_WRITE : XHYVE_PROT_READ;
assert(addrsize == 2 || addrsize == 4 || addrsize == 8);
/* Index register */
@ -152,18 +175,18 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
count = vis->count & vie_size2mask(addrsize);
/* Limit number of back-to-back in/out emulations to 16 */
iterations = MIN(count, 16);
iterations = min(count, 16);
while (iterations > 0) {
assert(retval == 0);
if (vie_calculate_gla(vis->paging.cpu_mode,
vis->seg_name, &vis->seg_desc, index, bytes,
addrsize, prot, &gla)) {
vm_inject_gp(ctx, vcpu);
vm_inject_gp(vcpu);
break;
}
error = vm_copy_setup(ctx, vcpu, &vis->paging, gla,
bytes, prot, iov, nitems(iov), &fault);
error = xh_vm_copy_setup(vcpu, &vis->paging, gla,
((size_t) bytes), prot, iov, nitems(iov), &fault);
if (error) {
retval = -1; /* Unrecoverable error */
break;
@ -174,33 +197,33 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
if (vie_alignment_check(vis->paging.cpl, bytes,
vis->cr0, vis->rflags, gla)) {
vm_inject_ac(ctx, vcpu, 0);
vm_inject_ac(vcpu, 0);
break;
}
val = 0;
if (!in)
vm_copyin(ctx, vcpu, iov, &val, bytes);
xh_vm_copyin(iov, &val, ((size_t) bytes));
retval = handler(ctx, vcpu, in, port, bytes, &val, arg);
retval = handler(vcpu, in, port, bytes, &val, arg);
if (retval != 0)
break;
if (in)
vm_copyout(ctx, vcpu, &val, iov, bytes);
xh_vm_copyout(&val, iov, ((size_t) bytes));
/* Update index */
if (vis->rflags & PSL_D)
index -= bytes;
index -= ((uint64_t) bytes);
else
index += bytes;
index += ((uint64_t) bytes);
count--;
iterations--;
}
/* Update index register */
error = vie_update_register(ctx, vcpu, idxreg, index, addrsize);
error = update_register(vcpu, idxreg, index, addrsize);
assert(error == 0);
/*
@ -208,25 +231,23 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
* prefix.
*/
if (rep) {
error = vie_update_register(ctx, vcpu, VM_REG_GUEST_RCX,
count, addrsize);
error = update_register(vcpu, VM_REG_GUEST_RCX, count, addrsize);
assert(error == 0);
}
/* Restart the instruction if more iterations remain */
if (retval == 0 && count != 0) {
error = vm_restart_instruction(ctx, vcpu);
error = xh_vm_restart_instruction(vcpu);
assert(error == 0);
}
} else {
eax = vmexit->u.inout.eax;
val = eax & vie_size2mask(bytes);
retval = handler(ctx, vcpu, in, port, bytes, &val, arg);
retval = handler(vcpu, in, port, bytes, &val, arg);
if (retval == 0 && in) {
eax &= ~vie_size2mask(bytes);
eax |= val & vie_size2mask(bytes);
error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RAX,
eax);
error = xh_vm_set_register(vcpu, VM_REG_GUEST_RAX, eax);
assert(error == 0);
}
}
@ -267,9 +288,9 @@ register_inout(struct inout_port *iop)
* Verify that the new registration is not overwriting an already
* allocated i/o range.
*/
if ((iop->flags & IOPORT_F_DEFAULT) == 0) {
if ((((unsigned) iop->flags) & IOPORT_F_DEFAULT) == 0) {
for (i = iop->port; i < iop->port + iop->size; i++) {
if ((inout_handlers[i].flags & IOPORT_F_DEFAULT) == 0)
if ((((unsigned) inout_handlers[i].flags) & IOPORT_F_DEFAULT) == 0)
return (-1);
}
}

View file

@ -1,6 +1,7 @@
/*-
* Copyright (c) 2014 Hudson River Trading LLC
* Written by: John H. Baldwin <jhb@FreeBSD.org>
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -25,15 +26,8 @@
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <machine/vmm.h>
#include <vmmapi.h>
#include "ioapic.h"
#include <xhyve/vmm/vmm_api.h>
#include <xhyve/ioapic.h>
/*
* Assign PCI INTx interrupts to I/O APIC pins in a round-robin
@ -47,10 +41,10 @@ __FBSDID("$FreeBSD$");
static int pci_pins;
void
ioapic_init(struct vmctx *ctx)
ioapic_init(void)
{
if (vm_ioapic_pincount(ctx, &pci_pins) < 0) {
if (xh_vm_ioapic_pincount(&pci_pins) < 0) {
pci_pins = 0;
return;
}

284
src/md5c.c Normal file
View file

@ -0,0 +1,284 @@
/*-
* MD5C.C - RSA Data Security, Inc., MD5 message-digest algorithm
*
* Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
* rights reserved.
*
* License to copy and use this software is granted provided that it
* is identified as the "RSA Data Security, Inc. MD5 Message-Digest
* Algorithm" in all material mentioning or referencing this software
* or this function.
*
* License is also granted to make and use derivative works provided
* that such works are identified as "derived from the RSA Data
* Security, Inc. MD5 Message-Digest Algorithm" in all material
* mentioning or referencing the derived work.
*
* RSA Data Security, Inc. makes no representations concerning either
* the merchantability of this software or the suitability of this
* software for any particular purpose. It is provided "as is"
* without express or implied warranty of any kind.
*
* These notices must be retained in any copies of any part of this
* documentation and/or software.
*
* This code is the same as the code published by RSA Inc. It has been
* edited for clarity and style only.
*/
#include <stdint.h>
#include <string.h>
#include <xhyve/support/md5.h>
static void MD5Transform(u_int32_t [4], const unsigned char [64]);
static unsigned char PADDING[64] = {
0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
/* F, G, H and I are basic MD5 functions. */
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits. */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/*
* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4.
* Rotation is separate from addition to prevent recomputation.
*/
#define FF(a, b, c, d, x, s, ac) { \
(a) += F ((b), (c), (d)) + (x) + (u_int32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define GG(a, b, c, d, x, s, ac) { \
(a) += G ((b), (c), (d)) + (x) + (u_int32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define HH(a, b, c, d, x, s, ac) { \
(a) += H ((b), (c), (d)) + (x) + (u_int32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define II(a, b, c, d, x, s, ac) { \
(a) += I ((b), (c), (d)) + (x) + (u_int32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
/* MD5 initialization. Begins an MD5 operation, writing a new context. */
void
MD5Init(context)
MD5_CTX *context;
{
context->count[0] = context->count[1] = 0;
/* Load magic initialization constants. */
context->state[0] = 0x67452301;
context->state[1] = 0xefcdab89;
context->state[2] = 0x98badcfe;
context->state[3] = 0x10325476;
}
/*
* MD5 block update operation. Continues an MD5 message-digest
* operation, processing another message block, and updating the
* context.
*/
void
MD5Update(context, in, inputLen)
MD5_CTX *context;
const void *in;
unsigned int inputLen;
{
unsigned int i, index, partLen;
const unsigned char *input = in;
/* Compute number of bytes mod 64 */
index = (unsigned int)((context->count[0] >> 3) & 0x3F);
/* Update number of bits */
if ((context->count[0] += ((u_int32_t)inputLen << 3))
< ((u_int32_t)inputLen << 3))
context->count[1]++;
context->count[1] += ((u_int32_t)inputLen >> 29);
partLen = 64 - index;
/* Transform as many times as possible. */
if (inputLen >= partLen) {
memcpy((void *)&context->buffer[index], (const void *)input,
partLen);
MD5Transform(context->state, context->buffer);
for (i = partLen; i + 63 < inputLen; i += 64)
MD5Transform(context->state, &input[i]);
index = 0;
}
else
i = 0;
/* Buffer remaining input */
memcpy((void *)&context->buffer[index], (const void *)&input[i],
inputLen-i);
}
/*
* MD5 padding. Adds padding followed by original length.
*/
static void
MD5Pad(MD5_CTX *context)
{
unsigned char bits[8];
unsigned int index, padLen;
/* Save number of bits */
memcpy(bits, context->count, 8);
/* Pad out to 56 mod 64. */
index = (unsigned int)((context->count[0] >> 3) & 0x3f);
padLen = (index < 56) ? (56 - index) : (120 - index);
MD5Update(context, PADDING, padLen);
/* Append length (before padding) */
MD5Update(context, bits, 8);
}
/*
* MD5 finalization. Ends an MD5 message-digest operation, writing the
* the message digest and zeroizing the context.
*/
void
MD5Final (digest, context)
unsigned char digest[16];
MD5_CTX *context;
{
/* Do padding. */
MD5Pad(context);
/* Store state in digest */
memcpy(digest, context->state, 16);
/* Zeroize sensitive information. */
memset((void *)context, 0, sizeof (*context));
}
/* MD5 basic transformation. Transforms state based on block. */
static void
MD5Transform (state, block)
u_int32_t state[4];
const unsigned char block[64];
{
u_int32_t a = state[0], b = state[1], c = state[2], d = state[3], x[16];
memcpy(x, block, 64);
/* Round 1 */
#define S11 7
#define S12 12
#define S13 17
#define S14 22
FF (a, b, c, d, x[ 0], S11, 0xd76aa478); /* 1 */
FF (d, a, b, c, x[ 1], S12, 0xe8c7b756); /* 2 */
FF (c, d, a, b, x[ 2], S13, 0x242070db); /* 3 */
FF (b, c, d, a, x[ 3], S14, 0xc1bdceee); /* 4 */
FF (a, b, c, d, x[ 4], S11, 0xf57c0faf); /* 5 */
FF (d, a, b, c, x[ 5], S12, 0x4787c62a); /* 6 */
FF (c, d, a, b, x[ 6], S13, 0xa8304613); /* 7 */
FF (b, c, d, a, x[ 7], S14, 0xfd469501); /* 8 */
FF (a, b, c, d, x[ 8], S11, 0x698098d8); /* 9 */
FF (d, a, b, c, x[ 9], S12, 0x8b44f7af); /* 10 */
FF (c, d, a, b, x[10], S13, 0xffff5bb1); /* 11 */
FF (b, c, d, a, x[11], S14, 0x895cd7be); /* 12 */
FF (a, b, c, d, x[12], S11, 0x6b901122); /* 13 */
FF (d, a, b, c, x[13], S12, 0xfd987193); /* 14 */
FF (c, d, a, b, x[14], S13, 0xa679438e); /* 15 */
FF (b, c, d, a, x[15], S14, 0x49b40821); /* 16 */
/* Round 2 */
#define S21 5
#define S22 9
#define S23 14
#define S24 20
GG (a, b, c, d, x[ 1], S21, 0xf61e2562); /* 17 */
GG (d, a, b, c, x[ 6], S22, 0xc040b340); /* 18 */
GG (c, d, a, b, x[11], S23, 0x265e5a51); /* 19 */
GG (b, c, d, a, x[ 0], S24, 0xe9b6c7aa); /* 20 */
GG (a, b, c, d, x[ 5], S21, 0xd62f105d); /* 21 */
GG (d, a, b, c, x[10], S22, 0x2441453); /* 22 */
GG (c, d, a, b, x[15], S23, 0xd8a1e681); /* 23 */
GG (b, c, d, a, x[ 4], S24, 0xe7d3fbc8); /* 24 */
GG (a, b, c, d, x[ 9], S21, 0x21e1cde6); /* 25 */
GG (d, a, b, c, x[14], S22, 0xc33707d6); /* 26 */
GG (c, d, a, b, x[ 3], S23, 0xf4d50d87); /* 27 */
GG (b, c, d, a, x[ 8], S24, 0x455a14ed); /* 28 */
GG (a, b, c, d, x[13], S21, 0xa9e3e905); /* 29 */
GG (d, a, b, c, x[ 2], S22, 0xfcefa3f8); /* 30 */
GG (c, d, a, b, x[ 7], S23, 0x676f02d9); /* 31 */
GG (b, c, d, a, x[12], S24, 0x8d2a4c8a); /* 32 */
/* Round 3 */
#define S31 4
#define S32 11
#define S33 16
#define S34 23
HH (a, b, c, d, x[ 5], S31, 0xfffa3942); /* 33 */
HH (d, a, b, c, x[ 8], S32, 0x8771f681); /* 34 */
HH (c, d, a, b, x[11], S33, 0x6d9d6122); /* 35 */
HH (b, c, d, a, x[14], S34, 0xfde5380c); /* 36 */
HH (a, b, c, d, x[ 1], S31, 0xa4beea44); /* 37 */
HH (d, a, b, c, x[ 4], S32, 0x4bdecfa9); /* 38 */
HH (c, d, a, b, x[ 7], S33, 0xf6bb4b60); /* 39 */
HH (b, c, d, a, x[10], S34, 0xbebfbc70); /* 40 */
HH (a, b, c, d, x[13], S31, 0x289b7ec6); /* 41 */
HH (d, a, b, c, x[ 0], S32, 0xeaa127fa); /* 42 */
HH (c, d, a, b, x[ 3], S33, 0xd4ef3085); /* 43 */
HH (b, c, d, a, x[ 6], S34, 0x4881d05); /* 44 */
HH (a, b, c, d, x[ 9], S31, 0xd9d4d039); /* 45 */
HH (d, a, b, c, x[12], S32, 0xe6db99e5); /* 46 */
HH (c, d, a, b, x[15], S33, 0x1fa27cf8); /* 47 */
HH (b, c, d, a, x[ 2], S34, 0xc4ac5665); /* 48 */
/* Round 4 */
#define S41 6
#define S42 10
#define S43 15
#define S44 21
II (a, b, c, d, x[ 0], S41, 0xf4292244); /* 49 */
II (d, a, b, c, x[ 7], S42, 0x432aff97); /* 50 */
II (c, d, a, b, x[14], S43, 0xab9423a7); /* 51 */
II (b, c, d, a, x[ 5], S44, 0xfc93a039); /* 52 */
II (a, b, c, d, x[12], S41, 0x655b59c3); /* 53 */
II (d, a, b, c, x[ 3], S42, 0x8f0ccc92); /* 54 */
II (c, d, a, b, x[10], S43, 0xffeff47d); /* 55 */
II (b, c, d, a, x[ 1], S44, 0x85845dd1); /* 56 */
II (a, b, c, d, x[ 8], S41, 0x6fa87e4f); /* 57 */
II (d, a, b, c, x[15], S42, 0xfe2ce6e0); /* 58 */
II (c, d, a, b, x[ 6], S43, 0xa3014314); /* 59 */
II (b, c, d, a, x[13], S44, 0x4e0811a1); /* 60 */
II (a, b, c, d, x[ 4], S41, 0xf7537e82); /* 61 */
II (d, a, b, c, x[11], S42, 0xbd3af235); /* 62 */
II (c, d, a, b, x[ 2], S43, 0x2ad7d2bb); /* 63 */
II (b, c, d, a, x[ 9], S44, 0xeb86d391); /* 64 */
state[0] += a;
state[1] += b;
state[2] += c;
state[3] += d;
/* Zeroize sensitive information. */
memset((void *)x, 0, sizeof (x));
}

View file

@ -1,5 +1,6 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -32,33 +33,30 @@
* so it can be searched within the range.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/tree.h>
#include <sys/errno.h>
#include <machine/vmm.h>
#include <machine/vmm_instruction_emul.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <assert.h>
#include <pthread.h>
#include <errno.h>
#include <assert.h>
#include <xhyve/support/misc.h>
#include <xhyve/support/tree.h>
#include <xhyve/vmm/vmm_api.h>
#include <xhyve/mem.h>
#include "mem.h"
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct mmio_rb_range {
RB_ENTRY(mmio_rb_range) mr_link; /* RB tree links */
struct mem_range mr_param;
uint64_t mr_base;
uint64_t mr_end;
RB_ENTRY(mmio_rb_range) mr_link; /* RB tree links */
struct mem_range mr_param;
uint64_t mr_base;
uint64_t mr_end;
};
#pragma clang diagnostic pop
struct mmio_rb_tree;
RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
static RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
/*
* Per-vCPU cache. Since most accesses from a vCPU will be to
@ -132,38 +130,40 @@ mmio_rb_dump(struct mmio_rb_tree *rbt)
}
#endif
RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare)
static int
mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
mem_read(UNUSED void *unused, int vcpu, uint64_t gpa, uint64_t *rval, int size,
void *arg)
{
int error;
struct mem_range *mr = arg;
error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size,
rval, mr->arg1, mr->arg2);
error = (*mr->handler)(vcpu, MEM_F_READ, gpa, size, rval, mr->arg1,
mr->arg2);
return (error);
}
static int
mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
mem_write(UNUSED void* unused, int vcpu, uint64_t gpa, uint64_t wval, int size,
void *arg)
{
int error;
struct mem_range *mr = arg;
error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size,
&wval, mr->arg1, mr->arg2);
error = (*mr->handler)(vcpu, MEM_F_WRITE, gpa, size, &wval, mr->arg1,
mr->arg2);
return (error);
}
int
emulate_mem(struct vmctx *ctx, int vcpu, uint64_t paddr, struct vie *vie,
struct vm_guest_paging *paging)
emulate_mem(int vcpu, uint64_t paddr, struct vie *vie,
struct vm_guest_paging *paging)
{
struct mmio_rb_range *entry;
int err, immutable;
pthread_rwlock_rdlock(&mmio_rwlock);
/*
* First check the per-vCPU cache
@ -202,8 +202,8 @@ emulate_mem(struct vmctx *ctx, int vcpu, uint64_t paddr, struct vie *vie,
if (immutable)
pthread_rwlock_unlock(&mmio_rwlock);
err = vmm_emulate_instruction(ctx, vcpu, paddr, vie, paging,
mem_read, mem_write, &entry->mr_param);
err = xh_vm_emulate_instruction(vcpu, paddr, vie, paging, mem_read,
mem_write, &entry->mr_param);
if (!immutable)
pthread_rwlock_unlock(&mmio_rwlock);

View file

@ -31,24 +31,18 @@
* using kqueue, and having events be persistent by default.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <assert.h>
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <pthread.h>
#include <sys/types.h>
#include <sys/event.h>
#include <sys/time.h>
#include <pthread.h>
#include <pthread_np.h>
#include "mevent.h"
#include <xhyve/support/misc.h>
#include <xhyve/mevent.h>
#define MEVENT_MAX 64
@ -64,18 +58,21 @@ static int mevent_timid = 43;
static int mevent_pipefd[2];
static pthread_mutex_t mevent_lmutex = PTHREAD_MUTEX_INITIALIZER;
struct mevent {
void (*me_func)(int, enum ev_type, void *);
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct mevent {
void (*me_func)(int, enum ev_type, void *);
#define me_msecs me_fd
int me_fd;
int me_timid;
int me_fd;
int me_timid;
enum ev_type me_type;
void *me_param;
int me_cq;
int me_state;
int me_closefd;
LIST_ENTRY(mevent) me_list;
void *me_param;
int me_cq;
int me_state;
int me_closefd;
LIST_ENTRY(mevent) me_list;
};
#pragma clang diagnostic pop
static LIST_HEAD(listhead, mevent) global_head, change_head;
@ -92,10 +89,10 @@ mevent_qunlock(void)
}
static void
mevent_pipe_read(int fd, enum ev_type type, void *param)
mevent_pipe_read(int fd, UNUSED enum ev_type type, UNUSED void *param)
{
char buf[MEVENT_MAX];
int status;
ssize_t status;
/*
* Drain the pipe read side. The fd is non-blocking so this is
@ -169,14 +166,14 @@ mevent_kq_flags(struct mevent *mevp)
}
static int
mevent_kq_fflags(struct mevent *mevp)
mevent_kq_fflags(UNUSED struct mevent *mevp)
{
/* XXX nothing yet, perhaps EV_EOF for reads ? */
return (0);
}
static int
mevent_build(int mfd, struct kevent *kev)
mevent_build(UNUSED int mfd, struct kevent *kev)
{
struct mevent *mevp, *tmpp;
int i;
@ -194,15 +191,15 @@ mevent_build(int mfd, struct kevent *kev)
close(mevp->me_fd);
} else {
if (mevp->me_type == EVF_TIMER) {
kev[i].ident = mevp->me_timid;
kev[i].ident = (uintptr_t) mevp->me_timid;
kev[i].data = mevp->me_msecs;
} else {
kev[i].ident = mevp->me_fd;
kev[i].ident = (uintptr_t) mevp->me_fd;
kev[i].data = 0;
}
kev[i].filter = mevent_kq_filter(mevp);
kev[i].flags = mevent_kq_flags(mevp);
kev[i].fflags = mevent_kq_fflags(mevp);
kev[i].filter = (int16_t) mevent_kq_filter(mevp);
kev[i].flags = (uint16_t) mevent_kq_flags(mevp);
kev[i].fflags = (uint32_t) mevent_kq_fflags(mevp);
kev[i].udata = mevp;
i++;
}
@ -388,11 +385,9 @@ mevent_delete_close(struct mevent *evp)
static void
mevent_set_name(void)
{
pthread_set_name_np(mevent_tid, "mevent");
}
void
__attribute__ ((noreturn)) void
mevent_dispatch(void)
{
struct kevent changelist[MEVENT_MAX];

View file

@ -33,19 +33,18 @@
* cc mevent_test.c mevent.c -lpthread
*/
#include <stdint.h>
#include <sys/types.h>
#include <sys/stdint.h>
#include <sys/sysctl.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <machine/cpufunc.h>
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <unistd.h>
#include "mevent.h"
#include <xhyve/mevent.h>
#define TEST_PORT 4321
@ -63,6 +62,15 @@ char *vmname = "test vm";
#define TEVSZ 4096
uint64_t tevbuf[TEVSZ];
static __inline uint64_t rdtsc(void)
{
unsigned a, d;
__asm__ __volatile__ ("cpuid");
__asm__ __volatile__ ("rdtsc" : "=a" (a), "=d" (d));
return (((uint64_t) a) | (((uint64_t) d) << 32));
}
static void
timer_print(void)
{
@ -87,7 +95,7 @@ timer_print(void)
max = diff;
}
printf("timers done: usecs, min %ld, max %ld, mean %ld\n", min, max,
printf("timers done: usecs, min %llu, max %llu, mean %llu\n", min, max,
sum/(TEVSZ - 1));
}
@ -246,11 +254,14 @@ acceptor(void *param)
return (NULL);
}
main()
int
main(void)
{
pthread_t tid;
pthread_create(&tid, NULL, acceptor, NULL);
mevent_dispatch();
return (0);
}

View file

@ -1,5 +1,6 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -26,20 +27,19 @@
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/errno.h>
#include <x86/mptable.h>
// #include <x86/mptable.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include "acpi.h"
#include "bhyverun.h"
#include "mptbl.h"
#include "pci_emul.h"
#include <sys/types.h>
#include <sys/errno.h>
#include <xhyve/support/misc.h>
#include <xhyve/support/mptable.h>
#include <xhyve/acpi.h>
#include <xhyve/xhyve.h>
#include <xhyve/mptbl.h>
#include <xhyve/pci_emul.h>
#define MPTABLE_BASE 0xF0000
@ -95,16 +95,15 @@ mpt_compute_checksum(void *base, size_t len)
sum += *bytes++;
}
return (256 - sum);
return ((uint8_t) (256 - sum));
}
static void
mpt_build_mpfp(mpfps_t mpfp, vm_paddr_t gpa)
mpt_build_mpfp(mpfps_t mpfp, uint64_t gpa)
{
memset(mpfp, 0, sizeof(*mpfp));
memcpy(mpfp->signature, MPFP_SIG, 4);
mpfp->pap = gpa + sizeof(*mpfp);
mpfp->pap = (uint32_t) (gpa + sizeof(*mpfp));
mpfp->length = 1;
mpfp->spec_rev = MP_SPECREV;
mpfp->checksum = mpt_compute_checksum(mpfp, sizeof(*mpfp));
@ -130,7 +129,7 @@ mpt_build_proc_entries(proc_entry_ptr mpep, int ncpu)
for (i = 0; i < ncpu; i++) {
memset(mpep, 0, sizeof(*mpep));
mpep->type = MPCT_ENTRY_PROCESSOR;
mpep->apic_id = i; // XXX
mpep->apic_id = (uint8_t) i; // XXX
mpep->apic_version = LAPIC_VERSION;
mpep->cpu_flags = PROCENTRY_FLAG_EN;
if (i == 0)
@ -187,7 +186,7 @@ mpt_build_ioapic_entries(io_apic_entry_ptr mpei, int id)
memset(mpei, 0, sizeof(*mpei));
mpei->type = MPCT_ENTRY_IOAPIC;
mpei->apic_id = id;
mpei->apic_id = (uint8_t) id;
mpei->apic_version = IOAPIC_VERSION;
mpei->apic_flags = IOAPICENTRY_FLAG_EN;
mpei->apic_address = IOAPIC_PADDR;
@ -210,8 +209,8 @@ mpt_count_ioint_entries(void)
}
static void
mpt_generate_pci_int(int bus, int slot, int pin, int pirq_pin, int ioapic_irq,
void *arg)
mpt_generate_pci_int(int bus, int slot, int pin, UNUSED int pirq_pin,
int ioapic_irq, void *arg)
{
int_entry_ptr *mpiep, mpie;
@ -225,10 +224,10 @@ mpt_generate_pci_int(int bus, int slot, int pin, int pirq_pin, int ioapic_irq,
*/
mpie->type = MPCT_ENTRY_INT;
mpie->int_type = INTENTRY_TYPE_INT;
mpie->src_bus_id = bus;
mpie->src_bus_irq = slot << 2 | (pin - 1);
mpie->src_bus_id = (uint8_t) bus;
mpie->src_bus_irq = (uint8_t) (slot << 2 | (pin - 1));
mpie->dst_apic_id = mpie[-1].dst_apic_id;
mpie->dst_apic_int = ioapic_irq;
mpie->dst_apic_int = (uint8_t) ioapic_irq;
*mpiep = mpie + 1;
}
@ -249,13 +248,13 @@ mpt_build_ioint_entries(int_entry_ptr mpie, int id)
memset(mpie, 0, sizeof(*mpie));
mpie->type = MPCT_ENTRY_INT;
mpie->src_bus_id = 1;
mpie->dst_apic_id = id;
mpie->dst_apic_id = (uint8_t) id;
/*
* All default configs route IRQs from bus 0 to the first 16
* pins of the first I/O APIC with an APIC ID of 2.
*/
mpie->dst_apic_int = pin;
mpie->dst_apic_int = (uint8_t) pin;
switch (pin) {
case 0:
/* Pin 0 is an ExtINT pin. */
@ -276,7 +275,7 @@ mpt_build_ioint_entries(int_entry_ptr mpie, int id)
default:
/* All other pins are identity mapped. */
mpie->int_type = INTENTRY_TYPE_INT;
mpie->src_bus_irq = pin;
mpie->src_bus_irq = (uint8_t) pin;
break;
}
mpie++;
@ -296,7 +295,7 @@ mptable_add_oemtbl(void *tbl, int tblsz)
}
int
mptable_build(struct vmctx *ctx, int ncpu)
mptable_build(int ncpu)
{
mpcth_t mpch;
bus_entry_ptr mpeb;
@ -308,7 +307,7 @@ mptable_build(struct vmctx *ctx, int ncpu)
char *curraddr;
char *startaddr;
startaddr = paddr_guest2host(ctx, MPTABLE_BASE, MPTABLE_MAX_LENGTH);
startaddr = paddr_guest2host(MPTABLE_BASE, MPTABLE_MAX_LENGTH);
if (startaddr == NULL) {
fprintf(stderr, "mptable requires mapped mem\n");
return (ENOMEM);
@ -340,7 +339,7 @@ mptable_build(struct vmctx *ctx, int ncpu)
mpep = (proc_entry_ptr)curraddr;
mpt_build_proc_entries(mpep, ncpu);
curraddr += sizeof(*mpep) * ncpu;
curraddr += sizeof(*mpep) * ((uint64_t) ncpu);
mpch->entry_count += ncpu;
mpeb = (bus_entry_ptr) curraddr;
@ -356,7 +355,7 @@ mptable_build(struct vmctx *ctx, int ncpu)
mpie = (int_entry_ptr) curraddr;
ioints = mpt_count_ioint_entries();
mpt_build_ioint_entries(mpie, 0);
curraddr += sizeof(*mpie) * ioints;
curraddr += sizeof(*mpie) * ((uint64_t) ioints);
mpch->entry_count += ioints;
mpie = (int_entry_ptr)curraddr;
@ -365,12 +364,13 @@ mptable_build(struct vmctx *ctx, int ncpu)
mpch->entry_count += MPEII_NUM_LOCAL_IRQ;
if (oem_tbl_start) {
mpch->oem_table_pointer = curraddr - startaddr + MPTABLE_BASE;
mpch->oem_table_size = oem_tbl_size;
mpch->oem_table_pointer =
(uint32_t) (curraddr - startaddr + MPTABLE_BASE);
mpch->oem_table_size = (uint16_t) oem_tbl_size;
memcpy(curraddr, oem_tbl_start, oem_tbl_size);
}
mpch->base_table_length = curraddr - (char *)mpch;
mpch->base_table_length = (uint16_t) (curraddr - (char *)mpch);
mpch->checksum = mpt_compute_checksum(mpch, mpch->base_table_length);
return (0);

View file

@ -1,5 +1,6 @@
/*-
* Copyright (c) 2013 Zhixiang Yu <zcore@freebsd.org>
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -26,36 +27,32 @@
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <inttypes.h>
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <strings.h>
#include <pthread.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include <sys/param.h>
#include <sys/linker_set.h>
#include <sys/stat.h>
#include <sys/uio.h>
#include <sys/ioctl.h>
#include <sys/disk.h>
#include <sys/ata.h>
#include <sys/endian.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <strings.h>
#include <unistd.h>
#include <assert.h>
#include <pthread.h>
#include <pthread_np.h>
#include <inttypes.h>
#include <md5.h>
#include "bhyverun.h"
#include "pci_emul.h"
#include "ahci.h"
#include "block_if.h"
#include <sys/queue.h>
// #include <sys/endian.h>
#include <xhyve/support/misc.h>
#include <xhyve/support/ata.h>
#include <xhyve/support/linker_set.h>
#include <xhyve/support/md5.h>
#include <xhyve/xhyve.h>
#include <xhyve/pci_emul.h>
#include <xhyve/block_if.h>
#include <xhyve/ahci.h>
#define MAX_PORTS 6 /* Intel ICH8 AHCI supports 6 ports */
@ -83,13 +80,13 @@ enum sata_fis_type {
#define PREVENT_ALLOW 0x1E
#define READ_CAPACITY 0x25
#define READ_10 0x28
#define POSITION_TO_ELEMENT 0x2B
// #define POSITION_TO_ELEMENT 0x2B
#define READ_TOC 0x43
#define GET_EVENT_STATUS_NOTIFICATION 0x4A
#define MODE_SENSE_10 0x5A
#define REPORT_LUNS 0xA0
#define READ_12 0xA8
#define READ_CD 0xBE
// #define READ_CD 0xBE
/*
* SCSI mode page codes
@ -102,18 +99,25 @@ enum sata_fis_type {
*/
#define ATA_SF_ENAB_SATA_SF 0x10
#define ATA_SATA_SF_AN 0x05
#define ATA_SF_DIS_SATA_SF 0x90
// #define ATA_SF_DIS_SATA_SF 0x90
/*
* Debug printf
*/
#ifdef AHCI_DEBUG
static FILE *dbg;
#define DPRINTF(format, arg...) do{fprintf(dbg, format, ##arg);fflush(dbg);}while(0)
#define DPRINTF(format, ...) \
do { \
fprintf(dbg, format, __VA_ARGS__); \
fflush(dbg); \
} while(0)
#else
#define DPRINTF(format, arg...)
#define DPRINTF(format, ...)
#endif
#define WPRINTF(format, arg...) printf(format, ##arg)
#define WPRINTF(format, ...) printf(format, __VA_ARGS__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct ahci_ioreq {
struct blockif_req io_req;
@ -204,14 +208,15 @@ struct pci_ahci_softc {
uint32_t lintr;
struct ahci_port port[MAX_PORTS];
};
#define ahci_ctx(sc) ((sc)->asc_pi->pi_vmctx)
#pragma clang diagnostic pop
static void ahci_handle_port(struct ahci_port *p);
static inline void lba_to_msf(uint8_t *buf, int lba)
{
lba += 150;
buf[0] = (lba / 75) / 60;
buf[0] = (uint8_t) ((lba / 75) / 60);
buf[1] = (lba / 75) % 60;
buf[2] = lba % 75;
}
@ -285,7 +290,11 @@ ahci_write_fis(struct ahci_port *p, enum sata_fis_type ft, uint8_t *fis)
len = 20;
irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_PS : 0;
break;
default:
case FIS_TYPE_REGH2D:
case FIS_TYPE_DMAACT:
case FIS_TYPE_DMASETUP:
case FIS_TYPE_DATA:
case FIS_TYPE_BIST:
WPRINTF("unsupported fis type %d\n", ft);
return;
}
@ -295,7 +304,7 @@ ahci_write_fis(struct ahci_port *p, enum sata_fis_type ft, uint8_t *fis)
}
memcpy(p->rfis + offset, fis, len);
if (irq) {
p->is |= irq;
p->is |= ((unsigned) irq);
ahci_generate_intr(p->pr_sc);
}
}
@ -321,18 +330,18 @@ ahci_write_fis_sdb(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
memset(fis, 0, sizeof(fis));
fis[0] = FIS_TYPE_SETDEVBITS;
fis[1] = (1 << 6);
fis[2] = tfd;
fis[2] = (uint8_t) tfd;
fis[3] = error;
if (fis[2] & ATA_S_ERROR) {
p->err_cfis[0] = slot;
p->err_cfis[2] = tfd;
p->err_cfis[0] = (uint8_t) slot;
p->err_cfis[2] = (uint8_t) tfd;
p->err_cfis[3] = error;
memcpy(&p->err_cfis[4], cfis + 4, 16);
} else {
*(uint32_t *)(fis + 4) = (1 << slot);
*(uint32_t *)((void *) (fis + 4)) = (1 << slot);
p->sact &= ~(1 << slot);
}
p->tfd &= ~0x77;
p->tfd &= ~((unsigned) 0x77);
p->tfd |= tfd;
ahci_write_fis(p, FIS_TYPE_SETDEVBITS, fis);
}
@ -378,9 +387,9 @@ ahci_write_fis_d2h_ncq(struct ahci_port *p, int slot)
p->tfd = ATA_S_READY | ATA_S_DSC;
memset(fis, 0, sizeof(fis));
fis[0] = FIS_TYPE_REGD2H;
fis[1] = 0; /* No interrupt */
fis[2] = p->tfd; /* Status */
fis[3] = 0; /* No error */
fis[1] = 0; /* No interrupt */
fis[2] = (uint8_t) p->tfd; /* Status */
fis[3] = 0; /* No error */
p->ci &= ~(1 << slot);
ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
}
@ -413,7 +422,7 @@ ahci_check_stopped(struct ahci_port *p)
if (!(p->cmd & AHCI_P_CMD_ST)) {
if (p->pending == 0) {
p->ccs = 0;
p->cmd &= ~(AHCI_P_CMD_CR | AHCI_P_CMD_CCS_MASK);
p->cmd &= ~((unsigned) (AHCI_P_CMD_CR | AHCI_P_CMD_CCS_MASK));
p->ci = 0;
p->sact = 0;
p->waitforclear = 0;
@ -430,7 +439,7 @@ ahci_port_stop(struct ahci_port *p)
int ncq;
int error;
assert(pthread_mutex_isowned_np(&p->pr_sc->mtx));
ncq = 0;
TAILQ_FOREACH(aior, &p->iobhd, io_blist) {
/*
@ -445,7 +454,9 @@ ahci_port_stop(struct ahci_port *p)
if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
cfis[2] == ATA_READ_FPDMA_QUEUED ||
cfis[2] == ATA_SEND_FPDMA_QUEUED)
{
ncq = 1;
}
if (ncq)
p->sact &= ~(1 << slot);
@ -530,7 +541,7 @@ ata_string(uint8_t *dest, const char *src, int len)
for (i = 0; i < len; i++) {
if (*src)
dest[i ^ 1] = *src++;
dest[i ^ 1] = (uint8_t) *src++;
else
dest[i ^ 1] = ' ';
}
@ -543,7 +554,7 @@ atapi_string(uint8_t *dest, const char *src, int len)
for (i = 0; i < len; i++) {
if (*src)
dest[i] = *src++;
dest[i] = (uint8_t) *src++;
else
dest[i] = ' ';
}
@ -561,22 +572,23 @@ ahci_build_iov(struct ahci_port *p, struct ahci_ioreq *aior,
uint32_t dbcsz;
/* Copy part of PRDT between 'done' and 'len' bytes into the iov. */
skip = aior->done;
left = aior->len - aior->done;
skip = (int) aior->done;
left = (int) (aior->len - aior->done);
todo = 0;
for (i = 0, j = 0; i < prdtl && j < BLOCKIF_IOV_MAX && left > 0;
i++, prdt++) {
dbcsz = (prdt->dbc & DBCMASK) + 1;
/* Skip already done part of the PRDT */
if (dbcsz <= skip) {
if (dbcsz <= ((uint32_t) skip)) {
skip -= dbcsz;
continue;
}
dbcsz -= skip;
if (dbcsz > left)
dbcsz = left;
breq->br_iov[j].iov_base = paddr_guest2host(ahci_ctx(p->pr_sc),
prdt->dba + skip, dbcsz);
dbcsz -= ((unsigned) skip);
if (dbcsz > ((uint32_t) left)) {
dbcsz = ((uint32_t) left);
}
breq->br_iov[j].iov_base =
paddr_guest2host((prdt->dba + ((uint64_t) skip)), dbcsz);
breq->br_iov[j].iov_len = dbcsz;
todo += dbcsz;
left -= dbcsz;
@ -590,8 +602,8 @@ ahci_build_iov(struct ahci_port *p, struct ahci_ioreq *aior,
todo -= extra;
assert(todo > 0);
while (extra > 0) {
if (breq->br_iov[j - 1].iov_len > extra) {
breq->br_iov[j - 1].iov_len -= extra;
if (breq->br_iov[j - 1].iov_len > ((size_t) extra)) {
breq->br_iov[j - 1].iov_len -= ((size_t) extra);
break;
}
extra -= breq->br_iov[j - 1].iov_len;
@ -601,7 +613,7 @@ ahci_build_iov(struct ahci_port *p, struct ahci_ioreq *aior,
breq->br_iovcnt = j;
breq->br_resid = todo;
aior->done += todo;
aior->done += ((unsigned) todo);
aior->more = (aior->done < aior->len && i < prdtl);
}
@ -616,8 +628,8 @@ ahci_handle_rw(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
uint32_t len;
int err, first, ncq, readop;
prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
prdt = (struct ahci_prdt_entry *)((void *) (cfis + 0x80));
hdr = (struct ahci_cmd_hdr *)((void *) (p->cmd_lst + slot * AHCI_CL_SIZE));
ncq = 0;
readop = 1;
first = (done == 0);
@ -636,7 +648,7 @@ ahci_handle_rw(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
((uint64_t)cfis[6] << 16) |
((uint64_t)cfis[5] << 8) |
cfis[4];
len = cfis[11] << 8 | cfis[3];
len = (uint32_t) (cfis[11] << 8 | cfis[3]);
if (!len)
len = 65536;
ncq = 1;
@ -649,18 +661,18 @@ ahci_handle_rw(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
((uint64_t)cfis[6] << 16) |
((uint64_t)cfis[5] << 8) |
cfis[4];
len = cfis[13] << 8 | cfis[12];
len = (uint32_t) (cfis[13] << 8 | cfis[12]);
if (!len)
len = 65536;
} else {
lba = ((cfis[7] & 0xf) << 24) | (cfis[6] << 16) |
(cfis[5] << 8) | cfis[4];
lba = (uint64_t) (((cfis[7] & 0xf) << 24) | (cfis[6] << 16) |
(cfis[5] << 8) | cfis[4]);
len = cfis[12];
if (!len)
len = 256;
}
lba *= blockif_sectsz(p->bctx);
len *= blockif_sectsz(p->bctx);
lba *= (uint64_t) blockif_sectsz(p->bctx);
len *= (uint32_t) blockif_sectsz(p->bctx);
/* Pull request off free list */
aior = STAILQ_FIRST(&p->iofhd);
@ -672,7 +684,7 @@ ahci_handle_rw(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
aior->len = len;
aior->done = done;
breq = &aior->io_req;
breq->br_offset = lba + done;
breq->br_offset = (off_t) (lba + done);
ahci_build_iov(p, aior, prdt, hdr->prdtl);
/* Mark this command in-flight. */
@ -734,21 +746,21 @@ read_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
void *to;
int i, len;
hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
hdr = (struct ahci_cmd_hdr *)((void *) (p->cmd_lst + slot * AHCI_CL_SIZE));
len = size;
to = buf;
prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
prdt = (struct ahci_prdt_entry *)((void *) (cfis + 0x80));
for (i = 0; i < hdr->prdtl && len; i++) {
uint8_t *ptr;
uint32_t dbcsz;
int sublen;
dbcsz = (prdt->dbc & DBCMASK) + 1;
ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
sublen = len < dbcsz ? len : dbcsz;
ptr = paddr_guest2host(prdt->dba, dbcsz);
sublen = ((len < ((int) dbcsz)) ? len : ((int) dbcsz));
memcpy(to, ptr, sublen);
len -= sublen;
to += sublen;
to = (uint8_t *) (((uintptr_t) to) + ((uintptr_t) sublen));
prdt++;
}
}
@ -766,11 +778,11 @@ ahci_handle_dsm_trim(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done
first = (done == 0);
if (cfis[2] == ATA_DATA_SET_MANAGEMENT) {
len = (uint16_t)cfis[13] << 8 | cfis[12];
len = (uint32_t) ((((uint16_t) cfis[13]) << 8) | cfis[12]);
len *= 512;
ncq = 0;
} else { /* ATA_SEND_FPDMA_QUEUED */
len = (uint16_t)cfis[11] << 8 | cfis[3];
len = (uint32_t) ((((uint16_t) cfis[11]) << 8) | cfis[3]);
len *= 512;
ncq = 1;
}
@ -784,7 +796,7 @@ next:
((uint64_t)entry[2] << 16) |
((uint64_t)entry[1] << 8) |
entry[0];
elen = (uint16_t)entry[7] << 8 | entry[6];
elen = (uint32_t) ((((uint16_t) entry[7]) << 8) | entry[6]);
done += 8;
if (elen == 0) {
if (done >= len) {
@ -811,8 +823,8 @@ next:
aior->more = (len != done);
breq = &aior->io_req;
breq->br_offset = elba * blockif_sectsz(p->bctx);
breq->br_resid = elen * blockif_sectsz(p->bctx);
breq->br_offset = (off_t) (elba * ((uint64_t) blockif_sectsz(p->bctx)));
breq->br_resid = elen * ((unsigned) blockif_sectsz(p->bctx));
/*
* Mark this command in-flight.
@ -840,24 +852,24 @@ write_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
void *from;
int i, len;
hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
hdr = (struct ahci_cmd_hdr *)((void *) (p->cmd_lst + slot * AHCI_CL_SIZE));
len = size;
from = buf;
prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
prdt = (struct ahci_prdt_entry *)((void *) (cfis + 0x80));
for (i = 0; i < hdr->prdtl && len; i++) {
uint8_t *ptr;
uint32_t dbcsz;
int sublen;
dbcsz = (prdt->dbc & DBCMASK) + 1;
ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
sublen = len < dbcsz ? len : dbcsz;
ptr = paddr_guest2host(prdt->dba, dbcsz);
sublen = (len < ((int) dbcsz)) ? len : ((int) dbcsz);
memcpy(ptr, from, sublen);
len -= sublen;
from += sublen;
from = (void *) (((uintptr_t) from) + ((uintptr_t) sublen));
prdt++;
}
hdr->prdbc = size - len;
hdr->prdbc = (uint32_t) (size - len);
}
static void
@ -868,7 +880,7 @@ ahci_checksum(uint8_t *buf, int size)
for (i = 0; i < size - 1; i++)
sum += buf[i];
buf[size - 1] = 0x100 - sum;
buf[size - 1] = (uint8_t) (0x100 - sum);
}
static void
@ -877,7 +889,7 @@ ahci_handle_read_log(struct ahci_port *p, int slot, uint8_t *cfis)
struct ahci_cmd_hdr *hdr;
uint8_t buf[512];
hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
hdr = (struct ahci_cmd_hdr *)((void *) (p->cmd_lst + slot * AHCI_CL_SIZE));
if (p->atapi || hdr->prdtl == 0 || cfis[4] != 0x10 ||
cfis[5] != 0 || cfis[9] != 0 || cfis[12] != 1 || cfis[13] != 0) {
ahci_write_fis_d2h(p, slot, cfis,
@ -900,7 +912,7 @@ handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
{
struct ahci_cmd_hdr *hdr;
hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
hdr = (struct ahci_cmd_hdr *)((void *) (p->cmd_lst + slot * AHCI_CL_SIZE));
if (p->atapi || hdr->prdtl == 0) {
ahci_write_fis_d2h(p, slot, cfis,
(ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
@ -914,7 +926,7 @@ handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
ro = blockif_is_ro(p->bctx);
candelete = blockif_candelete(p->bctx);
sectsz = blockif_sectsz(p->bctx);
sectors = blockif_size(p->bctx) / sectsz;
sectors = (uint64_t) (blockif_size(p->bctx) / sectsz);
blockif_chs(p->bctx, &cyl, &heads, &sech);
blockif_psectsz(p->bctx, &psectsz, &psectoff);
memset(buf, 0, sizeof(buf));
@ -931,10 +943,10 @@ handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
buf[50] = (1 << 14);
buf[53] = (1 << 1 | 1 << 2);
if (p->mult_sectors)
buf[59] = (0x100 | p->mult_sectors);
buf[59] = (uint16_t) (0x100 | p->mult_sectors);
if (sectors <= 0x0fffffff) {
buf[60] = sectors;
buf[61] = (sectors >> 16);
buf[60] = (uint16_t) sectors;
buf[61] = (uint16_t)(sectors >> 16);
} else {
buf[60] = 0xffff;
buf[61] = 0x0fff;
@ -968,9 +980,9 @@ handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
buf[88] = 0x7f;
if (p->xfermode & ATA_UDMA0)
buf[88] |= (1 << ((p->xfermode & 7) + 8));
buf[100] = sectors;
buf[101] = (sectors >> 16);
buf[102] = (sectors >> 32);
buf[100] = (uint16_t) sectors;
buf[101] = (uint16_t) (sectors >> 16);
buf[102] = (uint16_t) (sectors >> 32);
buf[103] = (sectors >> 48);
if (candelete && !ro) {
buf[69] |= ATA_SUPPORT_RZAT | ATA_SUPPORT_DRAT;
@ -986,8 +998,8 @@ handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
}
if (sectsz > 512) {
buf[106] |= 0x1000;
buf[117] = sectsz / 2;
buf[118] = ((sectsz / 2) >> 16);
buf[117] = (uint16_t) (sectsz / 2);
buf[118] = (uint16_t) ((sectsz / 2) >> 16);
}
buf[119] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
buf[120] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
@ -1070,7 +1082,7 @@ atapi_inquiry(struct ahci_port *p, int slot, uint8_t *cfis)
} else {
p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
p->asc = 0x24;
tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
tfd = (uint32_t) ((p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR);
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
ahci_write_fis_d2h(p, slot, cfis, tfd);
return;
@ -1097,14 +1109,52 @@ atapi_inquiry(struct ahci_port *p, int slot, uint8_t *cfis)
ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
}
static __inline void
be16enc(void *pp, uint16_t u)
{
unsigned char *p = (unsigned char *)pp;
p[0] = (u >> 8) & 0xff;
p[1] = u & 0xff;
}
static __inline uint16_t
be16dec(const void *pp)
{
unsigned char const *p = (unsigned char const *)pp;
return ((uint16_t) ((((uint32_t) p[0]) << 8) | ((uint32_t) p[1])));
}
static __inline void
be32enc(void *pp, uint32_t u)
{
unsigned char *p = (unsigned char *)pp;
p[0] = (u >> 24) & 0xff;
p[1] = (u >> 16) & 0xff;
p[2] = (u >> 8) & 0xff;
p[3] = u & 0xff;
}
static __inline uint32_t
be32dec(const void *pp)
{
unsigned char const *p = (unsigned char const *)pp;
return (uint32_t) ((((uint64_t) p[0]) << 24) |
(((uint64_t) p[1]) << 16) | (((uint64_t) p[2]) << 8) |
((uint64_t) p[3]));
}
static void
atapi_read_capacity(struct ahci_port *p, int slot, uint8_t *cfis)
{
uint8_t buf[8];
uint64_t sectors;
sectors = blockif_size(p->bctx) / 2048;
be32enc(buf, sectors - 1);
sectors = (uint64_t) (blockif_size(p->bctx) / 2048);
be32enc(buf, ((uint32_t) (sectors - 1)));
be32enc(buf + 4, 2048);
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
write_prdt(p, slot, cfis, buf, sizeof(buf));
@ -1135,7 +1185,7 @@ atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
uint32_t tfd;
p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
p->asc = 0x24;
tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
tfd = (uint32_t) ((p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR);
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
ahci_write_fis_d2h(p, slot, cfis, tfd);
return;
@ -1163,18 +1213,18 @@ atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
*bp++ = 0x14;
*bp++ = 0xaa;
*bp++ = 0;
sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
sectors = (uint64_t) (blockif_size(p->bctx) / blockif_sectsz(p->bctx));
sectors >>= 2;
if (msf) {
*bp++ = 0;
lba_to_msf(bp, sectors);
lba_to_msf(bp, ((int) sectors));
bp += 3;
} else {
be32enc(bp, sectors);
be32enc(bp, ((uint32_t) sectors));
bp += 4;
}
size = bp - buf;
be16enc(buf, size - 2);
size = (int) (bp - buf);
be16enc(buf, ((uint16_t) (size - 2)));
if (len > size)
len = size;
write_prdt(p, slot, cfis, buf, len);
@ -1190,7 +1240,7 @@ atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
buf[1] = 0xa;
buf[2] = 0x1;
buf[3] = 0x1;
if (len > sizeof(buf))
if (((size_t) len) > sizeof(buf))
len = sizeof(buf);
write_prdt(p, slot, cfis, buf, len);
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
@ -1240,14 +1290,14 @@ atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
*bp++ = 0;
*bp++ = 0;
*bp++ = 0;
sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
sectors = (uint64_t) (blockif_size(p->bctx) / blockif_sectsz(p->bctx));
sectors >>= 2;
if (msf) {
*bp++ = 0;
lba_to_msf(bp, sectors);
lba_to_msf(bp, ((int) sectors));
bp += 3;
} else {
be32enc(bp, sectors);
be32enc(bp, ((uint32_t) sectors));
bp += 4;
}
@ -1269,8 +1319,8 @@ atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
*bp++ = 0;
}
size = bp - buf;
be16enc(buf, size - 2);
size = (int) (bp - buf);
be16enc(buf, ((uint16_t) (size - 2)));
if (len > size)
len = size;
write_prdt(p, slot, cfis, buf, len);
@ -1284,7 +1334,7 @@ atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
p->asc = 0x24;
tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
tfd = (uint32_t) ((p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR);
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
ahci_write_fis_d2h(p, slot, cfis, tfd);
break;
@ -1320,8 +1370,8 @@ atapi_read(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
sc = p->pr_sc;
acmd = cfis + 0x40;
hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
hdr = (struct ahci_cmd_hdr *)((void *) (p->cmd_lst + slot * AHCI_CL_SIZE));
prdt = (struct ahci_prdt_entry *)((void *) (cfis + 0x80));
lba = be32dec(acmd + 2);
if (acmd[0] == READ_10)
@ -1346,7 +1396,7 @@ atapi_read(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
aior->len = len;
aior->done = done;
breq = &aior->io_req;
breq->br_offset = lba + done;
breq->br_offset = (off_t) (lba + ((uint64_t) done));
ahci_build_iov(p, aior, prdt, hdr->prdtl);
/* Mark this command in-flight. */
@ -1368,7 +1418,7 @@ atapi_request_sense(struct ahci_port *p, int slot, uint8_t *cfis)
acmd = cfis + 0x40;
len = acmd[4];
if (len > sizeof(buf))
if (((size_t) len) > sizeof(buf))
len = sizeof(buf);
memset(buf, 0, len);
buf[0] = 0x70 | (1 << 7);
@ -1386,6 +1436,8 @@ atapi_start_stop_unit(struct ahci_port *p, int slot, uint8_t *cfis)
uint8_t *acmd = cfis + 0x40;
uint32_t tfd;
tfd = 0;
switch (acmd[4] & 3) {
case 0:
case 1:
@ -1398,7 +1450,7 @@ atapi_start_stop_unit(struct ahci_port *p, int slot, uint8_t *cfis)
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
p->asc = 0x53;
tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
tfd = (uint32_t) ((p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR);
break;
}
ahci_write_fis_d2h(p, slot, cfis, tfd);
@ -1412,6 +1464,8 @@ atapi_mode_sense(struct ahci_port *p, int slot, uint8_t *cfis)
uint8_t pc, code;
int len;
tfd = 0;
acmd = cfis + 0x40;
len = be16dec(acmd + 7);
pc = acmd[2] >> 6;
@ -1424,8 +1478,9 @@ atapi_mode_sense(struct ahci_port *p, int slot, uint8_t *cfis)
{
uint8_t buf[16];
if (len > sizeof(buf))
if (((size_t) len) > sizeof(buf)) {
len = sizeof(buf);
}
memset(buf, 0, sizeof(buf));
be16enc(buf, 16 - 2);
@ -1441,8 +1496,9 @@ atapi_mode_sense(struct ahci_port *p, int slot, uint8_t *cfis)
{
uint8_t buf[30];
if (len > sizeof(buf))
if (((size_t) len) > sizeof(buf)) {
len = sizeof(buf);
}
memset(buf, 0, sizeof(buf));
be16enc(buf, 30 - 2);
@ -1459,20 +1515,19 @@ atapi_mode_sense(struct ahci_port *p, int slot, uint8_t *cfis)
}
default:
goto error;
break;
}
break;
case 3:
p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
p->asc = 0x39;
tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
tfd = (uint32_t) ((p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR);
break;
error:
case 1:
case 2:
p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
p->asc = 0x24;
tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
tfd = (uint32_t) ((p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR);
break;
}
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
@ -1492,14 +1547,15 @@ atapi_get_event_status_notification(struct ahci_port *p, int slot,
if (!(acmd[1] & 1)) {
p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
p->asc = 0x24;
tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
tfd = (uint32_t) ((p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR);
} else {
uint8_t buf[8];
int len;
len = be16dec(acmd + 7);
if (len > sizeof(buf))
if (((size_t) len) > sizeof(buf)) {
len = sizeof(buf);
}
memset(buf, 0, sizeof(buf));
be16enc(buf, 8 - 2);
@ -1572,8 +1628,8 @@ handle_packet_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
p->asc = 0x20;
ahci_write_fis_d2h(p, slot, cfis, (p->sense_key << 12) |
ATA_S_READY | ATA_S_ERROR);
ahci_write_fis_d2h(p, slot, cfis, ((uint32_t) (p->sense_key << 12)) |
((uint32_t) (ATA_S_READY | ATA_S_ERROR)));
break;
}
}
@ -1722,11 +1778,11 @@ ahci_handle_slot(struct ahci_port *p, int slot)
int cfl;
sc = p->pr_sc;
hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
hdr = (struct ahci_cmd_hdr *)((void *) (p->cmd_lst + slot * AHCI_CL_SIZE));
cfl = (hdr->flags & 0x1f) * 4;
cfis = paddr_guest2host(ahci_ctx(sc), hdr->ctba,
0x80 + hdr->prdtl * sizeof(struct ahci_prdt_entry));
prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
cfis = paddr_guest2host(hdr->ctba,
0x80 + hdr->prdtl * sizeof(struct ahci_prdt_entry));
prdt = (struct ahci_prdt_entry *)((void *) (cfis + 0x80));
#ifdef AHCI_DEBUG
DPRINTF("\ncfis:");
@ -1778,9 +1834,9 @@ ahci_handle_port(struct ahci_port *p)
if (p->waitforclear)
break;
if ((p->ci & ~p->pending & (1 << p->ccs)) != 0) {
p->cmd &= ~AHCI_P_CMD_CCS_MASK;
p->cmd &= ~((unsigned) AHCI_P_CMD_CCS_MASK);
p->cmd |= p->ccs << AHCI_P_CMD_CCS_SHIFT;
ahci_handle_slot(p, p->ccs);
ahci_handle_slot(p, ((int) p->ccs));
}
}
}
@ -1808,7 +1864,7 @@ ata_ioreq_cb(struct blockif_req *br, int err)
cfis = aior->cfis;
slot = aior->slot;
sc = p->pr_sc;
hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
hdr = (struct ahci_cmd_hdr *)((void *) (p->cmd_lst + slot * AHCI_CL_SIZE));
if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
cfis[2] == ATA_READ_FPDMA_QUEUED ||
@ -1881,7 +1937,8 @@ atapi_ioreq_cb(struct blockif_req *br, int err)
cfis = aior->cfis;
slot = aior->slot;
sc = p->pr_sc;
hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + aior->slot * AHCI_CL_SIZE);
hdr = (struct ahci_cmd_hdr *)
((void *) (p->cmd_lst + aior->slot * AHCI_CL_SIZE));
pthread_mutex_lock(&sc->mtx);
@ -1908,7 +1965,7 @@ atapi_ioreq_cb(struct blockif_req *br, int err)
} else {
p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
p->asc = 0x21;
tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
tfd = (uint32_t) ((p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR);
}
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
ahci_write_fis_d2h(p, slot, cfis, tfd);
@ -1932,7 +1989,7 @@ pci_ahci_ioreq_init(struct ahci_port *pr)
int i;
pr->ioqsz = blockif_queuesz(pr->bctx);
pr->ioreq = calloc(pr->ioqsz, sizeof(struct ahci_ioreq));
pr->ioreq = calloc(((size_t) pr->ioqsz), sizeof(struct ahci_ioreq));
STAILQ_INIT(&pr->iofhd);
/*
@ -1955,7 +2012,7 @@ pci_ahci_ioreq_init(struct ahci_port *pr)
static void
pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
{
int port = (offset - AHCI_OFFSET) / AHCI_STEP;
int port = (int) ((offset - AHCI_OFFSET) / AHCI_STEP);
offset = (offset - AHCI_OFFSET) % AHCI_STEP;
struct ahci_port *p = &sc->port[port];
@ -1964,16 +2021,16 @@ pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
switch (offset) {
case AHCI_P_CLB:
p->clb = value;
p->clb = (uint32_t) value;
break;
case AHCI_P_CLBU:
p->clbu = value;
p->clbu = (uint32_t) value;
break;
case AHCI_P_FB:
p->fb = value;
p->fb = (uint32_t) value;
break;
case AHCI_P_FBU:
p->fbu = value;
p->fbu = (uint32_t) value;
break;
case AHCI_P_IS:
p->is &= ~value;
@ -2000,8 +2057,7 @@ pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
p->cmd |= AHCI_P_CMD_CR;
clb = (uint64_t)p->clbu << 32 | p->clb;
p->cmd_lst = paddr_guest2host(ahci_ctx(sc), clb,
AHCI_CL_SIZE * AHCI_MAX_SLOTS);
p->cmd_lst = paddr_guest2host(clb, AHCI_CL_SIZE * AHCI_MAX_SLOTS);
}
if (value & AHCI_P_CMD_FRE) {
@ -2010,14 +2066,14 @@ pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
p->cmd |= AHCI_P_CMD_FR;
fb = (uint64_t)p->fbu << 32 | p->fb;
/* we don't support FBSCP, so rfis size is 256Bytes */
p->rfis = paddr_guest2host(ahci_ctx(sc), fb, 256);
p->rfis = paddr_guest2host(fb, 256);
} else {
p->cmd &= ~AHCI_P_CMD_FR;
p->cmd &= ~((unsigned) AHCI_P_CMD_FR);
}
if (value & AHCI_P_CMD_CLO) {
p->tfd &= ~(ATA_S_BUSY | ATA_S_DRQ);
p->cmd &= ~AHCI_P_CMD_CLO;
p->tfd &= ~((unsigned) (ATA_S_BUSY | ATA_S_DRQ));
p->cmd &= ~((unsigned) AHCI_P_CMD_CLO);
}
if (value & AHCI_P_CMD_ICC_MASK) {
@ -2033,7 +2089,7 @@ pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
WPRINTF("pci_ahci_port: read only registers 0x%"PRIx64"\n", offset);
break;
case AHCI_P_SCTL:
p->sctl = value;
p->sctl = (uint32_t) value;
if (!(p->cmd & AHCI_P_CMD_ST)) {
if (value & ATA_SC_DET_RESET)
ahci_port_reset(p);
@ -2087,8 +2143,8 @@ pci_ahci_host_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
}
static void
pci_ahci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
int baridx, uint64_t offset, int size, uint64_t value)
pci_ahci_write(UNUSED int vcpu, struct pci_devinst *pi, int baridx,
uint64_t offset, int size, uint64_t value)
{
struct pci_ahci_softc *sc = pi->pi_arg;
@ -2099,7 +2155,7 @@ pci_ahci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
if (offset < AHCI_OFFSET)
pci_ahci_host_write(sc, offset, value);
else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
else if (offset < ((uint64_t) (AHCI_OFFSET + (sc->ports * AHCI_STEP))))
pci_ahci_port_write(sc, offset, value);
else
WPRINTF("pci_ahci: unknown i/o write offset 0x%"PRIx64"\n", offset);
@ -2143,7 +2199,7 @@ static uint64_t
pci_ahci_port_read(struct pci_ahci_softc *sc, uint64_t offset)
{
uint32_t value;
int port = (offset - AHCI_OFFSET) / AHCI_STEP;
int port = (int) ((offset - AHCI_OFFSET) / AHCI_STEP);
offset = (offset - AHCI_OFFSET) % AHCI_STEP;
switch (offset) {
@ -2181,8 +2237,8 @@ pci_ahci_port_read(struct pci_ahci_softc *sc, uint64_t offset)
}
static uint64_t
pci_ahci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
uint64_t regoff, int size)
pci_ahci_read(UNUSED int vcpu, struct pci_devinst *pi, int baridx,
uint64_t regoff, int size)
{
struct pci_ahci_softc *sc = pi->pi_arg;
uint64_t offset;
@ -2190,15 +2246,16 @@ pci_ahci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
assert(baridx == 5);
assert(size == 1 || size == 2 || size == 4);
assert((regoff & (size - 1)) == 0);
assert((regoff & ((uint64_t) (size - 1))) == 0);
pthread_mutex_lock(&sc->mtx);
offset = regoff & ~0x3; /* round down to a multiple of 4 bytes */
/* round down to a multiple of 4 bytes */
offset = regoff & ~((uint64_t) 0x3);
if (offset < AHCI_OFFSET)
value = pci_ahci_host_read(sc, offset);
else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
value = pci_ahci_port_read(sc, offset);
value = (uint32_t) pci_ahci_host_read(sc, offset);
else if (offset < ((uint64_t) (AHCI_OFFSET + (sc->ports * AHCI_STEP))))
value = (uint32_t) pci_ahci_port_read(sc, offset);
else {
value = 0;
WPRINTF("pci_ahci: unknown i/o read offset 0x%"PRIx64"\n",
@ -2212,7 +2269,7 @@ pci_ahci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
}
static int
pci_ahci_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts, int atapi)
pci_ahci_init(struct pci_devinst *pi, char *opts, int atapi)
{
char bident[sizeof("XX:X:X")];
struct blockif_ctxt *bctxt;
@ -2261,7 +2318,7 @@ pci_ahci_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts, int atapi)
* md5 sum of the filename
*/
MD5Init(&mdctx);
MD5Update(&mdctx, opts, strlen(opts));
MD5Update(&mdctx, opts, ((unsigned int) strlen(opts)));
MD5Final(digest, &mdctx);
sprintf(sc->port[0].ident, "BHYVE-%02X%02X-%02X%02X-%02X%02X",
digest[0], digest[1], digest[2], digest[3], digest[4], digest[5]);
@ -2283,7 +2340,8 @@ pci_ahci_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts, int atapi)
AHCI_CAP_SMPS | AHCI_CAP_SSS | AHCI_CAP_SALP |
AHCI_CAP_SAL | AHCI_CAP_SCLO | (0x3 << AHCI_CAP_ISS_SHIFT)|
AHCI_CAP_PMD | AHCI_CAP_SSC | AHCI_CAP_PSC |
(slots << AHCI_CAP_NCS_SHIFT) | AHCI_CAP_SXS | (sc->ports - 1);
(((unsigned) slots) << AHCI_CAP_NCS_SHIFT) | AHCI_CAP_SXS |
(((unsigned) sc->ports) - 1);
/* Only port 0 implemented */
sc->pi = 1;
@ -2298,7 +2356,7 @@ pci_ahci_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts, int atapi)
pci_set_cfgdata8(pi, PCIR_PROGIF, PCIP_STORAGE_SATA_AHCI_1_0);
pci_emul_add_msicap(pi, 1);
pci_emul_alloc_bar(pi, 5, PCIBAR_MEM32,
AHCI_OFFSET + sc->ports * AHCI_STEP);
((uint64_t) (AHCI_OFFSET + sc->ports * AHCI_STEP)));
pci_lintr_request(pi);
@ -2313,23 +2371,21 @@ open_fail:
}
static int
pci_ahci_hd_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
pci_ahci_hd_init(struct pci_devinst *pi, char *opts)
{
return (pci_ahci_init(ctx, pi, opts, 0));
return (pci_ahci_init(pi, opts, 0));
}
static int
pci_ahci_atapi_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
pci_ahci_atapi_init(struct pci_devinst *pi, char *opts)
{
return (pci_ahci_init(ctx, pi, opts, 1));
return (pci_ahci_init(pi, opts, 1));
}
/*
* Use separate emulation names to distinguish drive and atapi devices
*/
struct pci_devemu pci_de_ahci_hd = {
static struct pci_devemu pci_de_ahci_hd = {
.pe_emu = "ahci-hd",
.pe_init = pci_ahci_hd_init,
.pe_barwrite = pci_ahci_write,
@ -2337,7 +2393,7 @@ struct pci_devemu pci_de_ahci_hd = {
};
PCI_EMUL_SET(pci_de_ahci_hd);
struct pci_devemu pci_de_ahci_cd = {
static struct pci_devemu pci_de_ahci_cd = {
.pe_emu = "ahci-cd",
.pe_init = pci_ahci_atapi_init,
.pe_barwrite = pci_ahci_write,

View file

@ -1,5 +1,6 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -26,53 +27,47 @@
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/linker_set.h>
#include <sys/errno.h>
#include <ctype.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <assert.h>
#include <stdint.h>
#include <stdbool.h>
#include <pthread.h>
#include <errno.h>
#include <assert.h>
#include <machine/vmm.h>
#include <vmmapi.h>
#include <xhyve/support/linker_set.h>
#include <xhyve/vmm/vmm_api.h>
#include <xhyve/acpi.h>
#include <xhyve/xhyve.h>
#include <xhyve/inout.h>
#include <xhyve/ioapic.h>
#include <xhyve/mem.h>
#include <xhyve/pci_emul.h>
#include <xhyve/pci_irq.h>
#include <xhyve/pci_lpc.h>
#include "acpi.h"
#include "bhyverun.h"
#include "inout.h"
#include "ioapic.h"
#include "mem.h"
#include "pci_emul.h"
#include "pci_irq.h"
#include "pci_lpc.h"
#define CONF1_ADDR_PORT 0x0cf8
#define CONF1_DATA_PORT0 0x0cfc
#define CONF1_DATA_PORT1 0x0cfd
#define CONF1_DATA_PORT2 0x0cfe
#define CONF1_DATA_PORT3 0x0cff
#define CONF1_ADDR_PORT 0x0cf8
#define CONF1_DATA_PORT 0x0cfc
#define CONF1_ENABLE 0x80000000ul
#define CONF1_ENABLE 0x80000000ul
#define MAXBUSES (PCI_BUSMAX + 1)
#define MAXSLOTS (PCI_SLOTMAX + 1)
#define MAXFUNCS (PCI_FUNCMAX + 1)
#define MAXBUSES (PCI_BUSMAX + 1)
#define MAXSLOTS (PCI_SLOTMAX + 1)
#define MAXFUNCS (PCI_FUNCMAX + 1)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct funcinfo {
char *fi_name;
char *fi_param;
char *fi_name;
char *fi_param;
struct pci_devinst *fi_devi;
};
struct intxinfo {
int ii_count;
int ii_pirq_pin;
int ii_ioapic_irq;
int ii_count;
int ii_pirq_pin;
int ii_ioapic_irq;
};
struct slotinfo {
@ -81,11 +76,12 @@ struct slotinfo {
};
struct businfo {
uint16_t iobase, iolimit; /* I/O window */
uint32_t membase32, memlimit32; /* mmio window below 4GB */
uint64_t membase64, memlimit64; /* mmio window above 4GB */
uint16_t iobase, iolimit; /* I/O window */
uint32_t membase32, memlimit32; /* mmio window below 4GB */
uint64_t membase64, memlimit64; /* mmio window above 4GB */
struct slotinfo slotinfo[MAXSLOTS];
};
#pragma clang diagnostic pop
static struct businfo *pci_businfo[MAXBUSES];
@ -110,17 +106,17 @@ SYSRES_MEM(PCI_EMUL_ECFG_BASE, PCI_EMUL_ECFG_SIZE);
static struct pci_devemu *pci_emul_finddev(char *name);
static void pci_lintr_route(struct pci_devinst *pi);
static void pci_lintr_update(struct pci_devinst *pi);
static void pci_cfgrw(struct vmctx *ctx, int vcpu, int in, int bus, int slot,
int func, int coff, int bytes, uint32_t *val);
static void pci_cfgrw(int vcpu, int in, int bus, int slot, int func, int coff,
int bytes, uint32_t *val);
static __inline void
CFGWRITE(struct pci_devinst *pi, int coff, uint32_t val, int bytes)
{
if (bytes == 1)
pci_set_cfgdata8(pi, coff, val);
pci_set_cfgdata8(pi, coff, ((uint8_t) val));
else if (bytes == 2)
pci_set_cfgdata16(pi, coff, val);
pci_set_cfgdata16(pi, coff, ((uint16_t) val));
else
pci_set_cfgdata32(pi, coff, val);
}
@ -159,7 +155,6 @@ CFGREAD(struct pci_devinst *pi, int coff, int bytes)
static void
pci_parse_slot_usage(char *aopt)
{
fprintf(stderr, "Invalid PCI slot info field \"%s\"\n", aopt);
}
@ -242,7 +237,7 @@ pci_valid_pba_offset(struct pci_devinst *pi, uint64_t offset)
if (offset < pi->pi_msix.pba_offset)
return (0);
if (offset >= pi->pi_msix.pba_offset + pi->pi_msix.pba_size) {
if (offset >= pi->pi_msix.pba_offset + ((unsigned) pi->pi_msix.pba_size)) {
return (0);
}
@ -264,7 +259,7 @@ pci_emul_msix_twrite(struct pci_devinst *pi, uint64_t offset, int size,
/*
* Return if table index is beyond what device supports
*/
tab_index = offset / MSIX_TABLE_ENTRY_SIZE;
tab_index = (int) (offset / MSIX_TABLE_ENTRY_SIZE);
if (tab_index >= pi->pi_msix.table_count)
return (-1);
@ -278,9 +273,9 @@ pci_emul_msix_twrite(struct pci_devinst *pi, uint64_t offset, int size,
dest += msix_entry_offset;
if (size == 4)
*((uint32_t *)dest) = value;
*((uint32_t *)((void *) dest)) = (uint32_t) value;
else
*((uint64_t *)dest) = value;
*((uint64_t *)((void *) dest)) = value;
return (0);
}
@ -291,7 +286,7 @@ pci_emul_msix_tread(struct pci_devinst *pi, uint64_t offset, int size)
char *dest;
int msix_entry_offset;
int tab_index;
uint64_t retval = ~0;
uint64_t retval = ~((uint64_t) 0);
/*
* The PCI standard only allows 4 and 8 byte accesses to the MSI-X
@ -308,7 +303,7 @@ pci_emul_msix_tread(struct pci_devinst *pi, uint64_t offset, int size)
return (retval);
}
tab_index = offset / MSIX_TABLE_ENTRY_SIZE;
tab_index = (int) (offset / MSIX_TABLE_ENTRY_SIZE);
if (tab_index < pi->pi_msix.table_count) {
/* valid MSI-X Table access */
@ -316,11 +311,11 @@ pci_emul_msix_tread(struct pci_devinst *pi, uint64_t offset, int size)
dest += msix_entry_offset;
if (size == 1)
retval = *((uint8_t *)dest);
retval = *((uint8_t *)((void *) dest));
else if (size == 4)
retval = *((uint32_t *)dest);
retval = *((uint32_t *)((void *) dest));
else
retval = *((uint64_t *)dest);
retval = *((uint64_t *)((void *) dest));
} else if (pci_valid_pba_offset(pi, offset)) {
/* return 0 for PBA access */
retval = 0;
@ -350,8 +345,8 @@ pci_msix_pba_bar(struct pci_devinst *pi)
}
static int
pci_emul_io_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
pci_emul_io_handler(int vcpu, int in, int port, int bytes, uint32_t *eax,
void *arg)
{
struct pci_devinst *pdi = arg;
struct pci_devemu *pe = pdi->pi_d;
@ -359,16 +354,17 @@ pci_emul_io_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
int i;
for (i = 0; i <= PCI_BARMAX; i++) {
if (pdi->pi_bar[i].type == PCIBAR_IO &&
port >= pdi->pi_bar[i].addr &&
port + bytes <= pdi->pi_bar[i].addr + pdi->pi_bar[i].size) {
offset = port - pdi->pi_bar[i].addr;
if ((pdi->pi_bar[i].type == PCIBAR_IO) &&
(((uint64_t) port) >= pdi->pi_bar[i].addr) &&
(((uint64_t) (port + bytes)) <=
(pdi->pi_bar[i].addr + pdi->pi_bar[i].size)))
{
offset = ((uint64_t) port) - pdi->pi_bar[i].addr;
if (in)
*eax = (*pe->pe_barread)(ctx, vcpu, pdi, i,
offset, bytes);
*eax = (uint32_t) (*pe->pe_barread)(vcpu, pdi, i, offset,
bytes);
else
(*pe->pe_barwrite)(ctx, vcpu, pdi, i, offset,
bytes, *eax);
(*pe->pe_barwrite)(vcpu, pdi, i, offset, bytes, *eax);
return (0);
}
}
@ -376,7 +372,7 @@ pci_emul_io_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
}
static int
pci_emul_mem_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr,
pci_emul_mem_handler(int vcpu, int dir, uint64_t addr,
int size, uint64_t *val, void *arg1, long arg2)
{
struct pci_devinst *pdi = arg1;
@ -385,32 +381,27 @@ pci_emul_mem_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr,
int bidx = (int) arg2;
assert(bidx <= PCI_BARMAX);
assert(pdi->pi_bar[bidx].type == PCIBAR_MEM32 ||
pdi->pi_bar[bidx].type == PCIBAR_MEM64);
assert(addr >= pdi->pi_bar[bidx].addr &&
addr + size <= pdi->pi_bar[bidx].addr + pdi->pi_bar[bidx].size);
assert((pdi->pi_bar[bidx].type == PCIBAR_MEM32) ||
(pdi->pi_bar[bidx].type == PCIBAR_MEM64));
assert((addr >= pdi->pi_bar[bidx].addr) &&
((addr + ((uint64_t) size)) <=
(pdi->pi_bar[bidx].addr + pdi->pi_bar[bidx].size)));
offset = addr - pdi->pi_bar[bidx].addr;
if (dir == MEM_F_WRITE) {
if (size == 8) {
(*pe->pe_barwrite)(ctx, vcpu, pdi, bidx, offset,
4, *val & 0xffffffff);
(*pe->pe_barwrite)(ctx, vcpu, pdi, bidx, offset + 4,
4, *val >> 32);
(*pe->pe_barwrite)(vcpu, pdi, bidx, offset, 4, *val & 0xffffffff);
(*pe->pe_barwrite)(vcpu, pdi, bidx, offset + 4, 4, *val >> 32);
} else {
(*pe->pe_barwrite)(ctx, vcpu, pdi, bidx, offset,
size, *val);
(*pe->pe_barwrite)(vcpu, pdi, bidx, offset, size, *val);
}
} else {
if (size == 8) {
*val = (*pe->pe_barread)(ctx, vcpu, pdi, bidx,
offset, 4);
*val |= (*pe->pe_barread)(ctx, vcpu, pdi, bidx,
offset + 4, 4) << 32;
*val = (*pe->pe_barread)(vcpu, pdi, bidx, offset, 4);
*val |= (*pe->pe_barread)(vcpu, pdi, bidx, offset + 4, 4) << 32;
} else {
*val = (*pe->pe_barread)(ctx, vcpu, pdi, bidx,
offset, size);
*val = (*pe->pe_barread)(vcpu, pdi, bidx, offset, size);
}
}
@ -459,8 +450,8 @@ modify_bar_registration(struct pci_devinst *pi, int idx, int registration)
case PCIBAR_IO:
bzero(&iop, sizeof(struct inout_port));
iop.name = pi->pi_name;
iop.port = pi->pi_bar[idx].addr;
iop.size = pi->pi_bar[idx].size;
iop.port = (int) pi->pi_bar[idx].addr;
iop.size = (int)pi->pi_bar[idx].size;
if (registration) {
iop.flags = IOPORT_F_INOUT;
iop.handler = pci_emul_io_handler;
@ -484,7 +475,8 @@ modify_bar_registration(struct pci_devinst *pi, int idx, int registration)
} else
error = unregister_mem(&mr);
break;
default:
case PCIBAR_NONE:
case PCIBAR_MEMHI64:
error = EINVAL;
break;
}
@ -576,8 +568,11 @@ pci_emul_alloc_pbar(struct pci_devinst *pdi, int idx, uint64_t hostbase,
assert(idx >= 0 && idx <= PCI_BARMAX);
addr = 0;
limit = 0;
if ((size & (size - 1)) != 0)
size = 1UL << flsl(size); /* round up to a power of 2 */
size = 1UL << flsl((long) size); /* round up to a power of 2 */
/* Enforce minimum BAR sizes required by the PCI standard */
if (type == PCIBAR_IO) {
@ -633,7 +628,7 @@ pci_emul_alloc_pbar(struct pci_devinst *pdi, int idx, uint64_t hostbase,
mask = PCIM_BAR_MEM_BASE;
lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32;
break;
default:
case PCIBAR_MEMHI64:
printf("pci_emul_alloc_base: invalid bar type %d\n", type);
assert(0);
}
@ -650,7 +645,7 @@ pci_emul_alloc_pbar(struct pci_devinst *pdi, int idx, uint64_t hostbase,
/* Initialize the BAR register in config space */
bar = (addr & mask) | lobits;
pci_set_cfgdata32(pdi, PCIR_BAR(idx), bar);
pci_set_cfgdata32(pdi, PCIR_BAR(idx), ((uint32_t) bar));
if (type == PCIBAR_MEM64) {
assert(idx + 1 <= PCI_BARMAX);
@ -686,10 +681,10 @@ pci_emul_add_capability(struct pci_devinst *pi, u_char *capdata, int caplen)
/* Set the previous capability pointer */
if ((sts & PCIM_STATUS_CAPPRESENT) == 0) {
pci_set_cfgdata8(pi, PCIR_CAP_PTR, capoff);
pci_set_cfgdata8(pi, PCIR_CAP_PTR, ((uint8_t) capoff));
pci_set_cfgdata16(pi, PCIR_STATUS, sts|PCIM_STATUS_CAPPRESENT);
} else
pci_set_cfgdata8(pi, pi->pi_prevcap + 1, capoff);
pci_set_cfgdata8(pi, pi->pi_prevcap + 1, ((uint8_t) capoff));
/* Copy the capability */
for (i = 0; i < caplen; i++)
@ -719,7 +714,7 @@ pci_emul_finddev(char *name)
}
static int
pci_emul_init(struct vmctx *ctx, struct pci_devemu *pde, int bus, int slot,
pci_emul_init(struct pci_devemu *pde, int bus, int slot,
int func, struct funcinfo *fi)
{
struct pci_devinst *pdi;
@ -727,10 +722,9 @@ pci_emul_init(struct vmctx *ctx, struct pci_devemu *pde, int bus, int slot,
pdi = calloc(1, sizeof(struct pci_devinst));
pdi->pi_vmctx = ctx;
pdi->pi_bus = bus;
pdi->pi_slot = slot;
pdi->pi_func = func;
pdi->pi_bus = (uint8_t) bus;
pdi->pi_slot = (uint8_t) slot;
pdi->pi_func = (uint8_t) func;
pthread_mutex_init(&pdi->pi_lintr.lock, NULL);
pdi->pi_lintr.pin = 0;
pdi->pi_lintr.state = IDLE;
@ -746,7 +740,7 @@ pci_emul_init(struct vmctx *ctx, struct pci_devemu *pde, int bus, int slot,
pci_set_cfgdata8(pdi, PCIR_COMMAND,
PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
err = (*pde->pe_init)(ctx, pdi, fi->fi_param);
err = (*pde->pe_init)(pdi, fi->fi_param);
if (err == 0)
fi->fi_devi = pdi;
else
@ -768,8 +762,8 @@ pci_populate_msicap(struct msicap *msicap, int msgnum, int nextptr)
bzero(msicap, sizeof(struct msicap));
msicap->capid = PCIY_MSI;
msicap->nextptr = nextptr;
msicap->msgctrl = PCIM_MSICTRL_64BIT | (mmc << 1);
msicap->nextptr = (uint8_t) nextptr;
msicap->msgctrl = (uint16_t) (PCIM_MSICTRL_64BIT | (mmc << 1));
}
int
@ -798,7 +792,7 @@ pci_populate_msixcap(struct msixcap *msixcap, int msgnum, int barnum,
* zero except for the Table Size.
* Note: Table size N is encoded as N-1
*/
msixcap->msgctrl = msgnum - 1;
msixcap->msgctrl = (uint16_t) (msgnum - 1);
/*
* MSI-X BAR setup:
@ -818,7 +812,7 @@ pci_msix_table_init(struct pci_devinst *pi, int table_entries)
assert(table_entries <= MAX_MSIX_TABLE_ENTRIES);
table_size = table_entries * MSIX_TABLE_ENTRY_SIZE;
pi->pi_msix.table = calloc(1, table_size);
pi->pi_msix.table = calloc(1, ((size_t) table_size));
/* set mask bit of vector control register */
for (i = 0; i < table_entries; i++)
@ -834,10 +828,10 @@ pci_emul_add_msixcap(struct pci_devinst *pi, int msgnum, int barnum)
assert(msgnum >= 1 && msgnum <= MAX_MSIX_TABLE_ENTRIES);
assert(barnum >= 0 && barnum <= PCIR_MAX_BAR_0);
tab_size = msgnum * MSIX_TABLE_ENTRY_SIZE;
tab_size = (uint32_t) (msgnum * MSIX_TABLE_ENTRY_SIZE);
/* Align table size to nearest 4K */
tab_size = roundup2(tab_size, 4096);
tab_size = roundup2(tab_size, 4096u);
pi->pi_msix.table_bar = barnum;
pi->pi_msix.pba_bar = barnum;
@ -852,7 +846,7 @@ pci_emul_add_msixcap(struct pci_devinst *pi, int msgnum, int barnum)
/* allocate memory for MSI-X Table and PBA */
pci_emul_alloc_bar(pi, barnum, PCIBAR_MEM32,
tab_size + pi->pi_msix.pba_size);
(tab_size + ((uint32_t) pi->pi_msix.pba_size)));
return (pci_emul_add_capability(pi, (u_char *)&msixcap,
sizeof(msixcap)));
@ -922,8 +916,8 @@ msicap_cfgwrite(struct pci_devinst *pi, int capoff, int offset,
CFGWRITE(pi, offset, val, bytes);
}
void
pciecap_cfgwrite(struct pci_devinst *pi, int capoff, int offset,
static void
pciecap_cfgwrite(struct pci_devinst *pi, UNUSED int capoff, int offset,
int bytes, uint32_t val)
{
@ -1026,8 +1020,8 @@ pci_emul_iscap(struct pci_devinst *pi, int offset)
}
static int
pci_emul_fallback_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr,
int size, uint64_t *val, void *arg1, long arg2)
pci_emul_fallback_handler(UNUSED int vcpu, int dir, UNUSED uint64_t addr,
UNUSED int size, uint64_t *val, UNUSED void *arg1, UNUSED long arg2)
{
/*
* Ignore writes; return 0xff's for reads. The mem read code
@ -1041,8 +1035,8 @@ pci_emul_fallback_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr,
}
static int
pci_emul_ecfg_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr,
int bytes, uint64_t *val, void *arg1, long arg2)
pci_emul_ecfg_handler(int vcpu, int dir, uint64_t addr, int bytes,
uint64_t *val, UNUSED void *arg1, UNUSED long arg2)
{
int bus, slot, func, coff, in;
@ -1053,7 +1047,7 @@ pci_emul_ecfg_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr,
in = (dir == MEM_F_READ);
if (in)
*val = ~0UL;
pci_cfgrw(ctx, vcpu, in, bus, slot, func, coff, bytes, (uint32_t *)val);
pci_cfgrw(vcpu, in, bus, slot, func, coff, bytes, (uint32_t *)val);
return (0);
}
@ -1068,7 +1062,7 @@ pci_ecfg_base(void)
#define BUSMEM_ROUNDUP (1024 * 1024)
int
init_pci(struct vmctx *ctx)
init_pci(void)
{
struct mem_range mr;
struct pci_devemu *pde;
@ -1080,7 +1074,7 @@ init_pci(struct vmctx *ctx)
int error;
pci_emul_iobase = PCI_EMUL_IOBASE;
pci_emul_membase32 = vm_get_lowmem_limit(ctx);
pci_emul_membase32 = xh_vm_get_lowmem_limit();
pci_emul_membase64 = PCI_EMUL_MEMBASE64;
for (bus = 0; bus < MAXBUSES; bus++) {
@ -1090,9 +1084,9 @@ init_pci(struct vmctx *ctx)
* Keep track of the i/o and memory resources allocated to
* this bus.
*/
bi->iobase = pci_emul_iobase;
bi->membase32 = pci_emul_membase32;
bi->membase64 = pci_emul_membase64;
bi->iobase = (uint16_t) pci_emul_iobase;
bi->membase32 = (uint32_t) pci_emul_membase32;
bi->membase64 = (uint32_t) pci_emul_membase64;
for (slot = 0; slot < MAXSLOTS; slot++) {
si = &bi->slotinfo[slot];
@ -1102,8 +1096,7 @@ init_pci(struct vmctx *ctx)
continue;
pde = pci_emul_finddev(fi->fi_name);
assert(pde != NULL);
error = pci_emul_init(ctx, pde, bus, slot,
func, fi);
error = pci_emul_init(pde, bus, slot, func, fi);
if (error)
return (error);
}
@ -1115,17 +1108,17 @@ init_pci(struct vmctx *ctx)
* reprogram the BARs.
*/
pci_emul_iobase += BUSIO_ROUNDUP;
pci_emul_iobase = roundup2(pci_emul_iobase, BUSIO_ROUNDUP);
bi->iolimit = pci_emul_iobase;
pci_emul_iobase = roundup2(pci_emul_iobase, ((uint32_t) BUSIO_ROUNDUP));
bi->iolimit = (uint16_t) pci_emul_iobase;
pci_emul_membase32 += BUSMEM_ROUNDUP;
pci_emul_membase32 = roundup2(pci_emul_membase32,
BUSMEM_ROUNDUP);
bi->memlimit32 = pci_emul_membase32;
((uint64_t) BUSMEM_ROUNDUP));
bi->memlimit32 = (uint32_t) pci_emul_membase32;
pci_emul_membase64 += BUSMEM_ROUNDUP;
pci_emul_membase64 = roundup2(pci_emul_membase64,
BUSMEM_ROUNDUP);
((uint64_t) BUSMEM_ROUNDUP));
bi->memlimit64 = pci_emul_membase64;
}
@ -1164,7 +1157,7 @@ init_pci(struct vmctx *ctx)
* Accesses to memory addresses that are not allocated to system
* memory or PCI devices return 0xff's.
*/
lowmem = vm_get_lowmem_size(ctx);
lowmem = xh_vm_get_lowmem_size();
bzero(&mr, sizeof(struct mem_range));
mr.name = "PCI hole";
mr.flags = MEM_F_RW | MEM_F_IMMUTABLE;
@ -1188,8 +1181,8 @@ init_pci(struct vmctx *ctx)
}
static void
pci_apic_prt_entry(int bus, int slot, int pin, int pirq_pin, int ioapic_irq,
void *arg)
pci_apic_prt_entry(UNUSED int bus, int slot, int pin, UNUSED int pirq_pin,
int ioapic_irq, UNUSED void *arg)
{
dsdt_line(" Package ()");
@ -1202,8 +1195,8 @@ pci_apic_prt_entry(int bus, int slot, int pin, int pirq_pin, int ioapic_irq,
}
static void
pci_pirq_prt_entry(int bus, int slot, int pin, int pirq_pin, int ioapic_irq,
void *arg)
pci_pirq_prt_entry(UNUSED int bus, int slot, int pin, int pirq_pin,
UNUSED int ioapic_irq, UNUSED void *arg)
{
char *name;
@ -1406,7 +1399,7 @@ pci_msi_enabled(struct pci_devinst *pi)
return (pi->pi_msi.enabled);
}
int
static int
pci_msi_maxmsgnum(struct pci_devinst *pi)
{
if (pi->pi_msi.enabled)
@ -1439,7 +1432,7 @@ pci_generate_msix(struct pci_devinst *pi, int index)
mte = &pi->pi_msix.table[index];
if ((mte->vector_control & PCIM_MSIX_VCTRL_MASK) == 0) {
/* XXX Set PBA bit if interrupt is disabled */
vm_lapic_msi(pi->pi_vmctx, mte->addr, mte->msg_data);
xh_vm_lapic_msi(mte->addr, mte->msg_data);
}
}
@ -1448,8 +1441,8 @@ pci_generate_msi(struct pci_devinst *pi, int index)
{
if (pci_msi_enabled(pi) && index < pci_msi_maxmsgnum(pi)) {
vm_lapic_msi(pi->pi_vmctx, pi->pi_msi.addr,
pi->pi_msi.msg_data + index);
xh_vm_lapic_msi(pi->pi_msi.addr, pi->pi_msi.msg_data +
((uint64_t) index));
}
}
@ -1488,8 +1481,8 @@ pci_lintr_request(struct pci_devinst *pi)
}
si->si_intpins[bestpin].ii_count++;
pi->pi_lintr.pin = bestpin + 1;
pci_set_cfgdata8(pi, PCIR_INTPIN, bestpin + 1);
pi->pi_lintr.pin = (int8_t) (bestpin + 1);
pci_set_cfgdata8(pi, PCIR_INTPIN, ((uint8_t) (bestpin + 1)));
}
static void
@ -1518,12 +1511,12 @@ pci_lintr_route(struct pci_devinst *pi)
* not yet assigned.
*/
if (ii->ii_pirq_pin == 0)
ii->ii_pirq_pin = pirq_alloc_pin(pi->pi_vmctx);
ii->ii_pirq_pin = pirq_alloc_pin();
assert(ii->ii_pirq_pin > 0);
pi->pi_lintr.ioapic_irq = ii->ii_ioapic_irq;
pi->pi_lintr.pirq_pin = ii->ii_pirq_pin;
pci_set_cfgdata8(pi, PCIR_INTLINE, pirq_irq(ii->ii_pirq_pin));
pci_set_cfgdata8(pi, PCIR_INTLINE, ((uint8_t) pirq_irq(ii->ii_pirq_pin)));
}
void
@ -1651,13 +1644,13 @@ pci_emul_hdrtype_fixup(int bus, int slot, int off, int bytes, uint32_t *rv)
switch (bytes) {
case 1:
case 2:
*rv &= ~PCIM_MFDEV;
*rv &= ~((uint32_t) PCIM_MFDEV);
if (mfdev) {
*rv |= PCIM_MFDEV;
}
break;
case 4:
*rv &= ~(PCIM_MFDEV << 16);
*rv &= ~((uint32_t) (PCIM_MFDEV << 16));
if (mfdev) {
*rv |= (PCIM_MFDEV << 16);
}
@ -1719,9 +1712,7 @@ pci_emul_cmdsts_write(struct pci_devinst *pi, int coff, uint32_t new, int bytes)
else
unregister_bar(pi, i);
}
break;
default:
assert(0);
break;
}
}
@ -1733,8 +1724,8 @@ pci_emul_cmdsts_write(struct pci_devinst *pi, int coff, uint32_t new, int bytes)
}
static void
pci_cfgrw(struct vmctx *ctx, int vcpu, int in, int bus, int slot, int func,
int coff, int bytes, uint32_t *eax)
pci_cfgrw(int vcpu, int in, int bus, int slot, int func, int coff, int bytes,
uint32_t *eax)
{
struct businfo *bi;
struct slotinfo *si;
@ -1787,7 +1778,7 @@ pci_cfgrw(struct vmctx *ctx, int vcpu, int in, int bus, int slot, int func,
if (in) {
/* Let the device emulation override the default handler */
if (pe->pe_cfgread != NULL) {
needcfg = pe->pe_cfgread(ctx, vcpu, pi, coff, bytes,
needcfg = pe->pe_cfgread(vcpu, pi, coff, bytes,
eax);
} else {
needcfg = 1;
@ -1800,7 +1791,7 @@ pci_cfgrw(struct vmctx *ctx, int vcpu, int in, int bus, int slot, int func,
} else {
/* Let the device emulation override the default handler */
if (pe->pe_cfgwrite != NULL &&
(*pe->pe_cfgwrite)(ctx, vcpu, pi, coff, bytes, *eax) == 0)
(*pe->pe_cfgwrite)(vcpu, pi, coff, bytes, *eax) == 0)
return;
/*
@ -1857,10 +1848,8 @@ pci_cfgrw(struct vmctx *ctx, int vcpu, int in, int bus, int slot, int func,
PCIBAR_MEMHI64);
}
break;
default:
assert(0);
}
pci_set_cfgdata32(pi, coff, bar);
pci_set_cfgdata32(pi, coff, ((uint32_t) bar));
} else if (pci_emul_iscap(pi, coff)) {
pci_emul_capwrite(pi, coff, bytes, *eax);
@ -1875,8 +1864,8 @@ pci_cfgrw(struct vmctx *ctx, int vcpu, int in, int bus, int slot, int func,
static int cfgenable, cfgbus, cfgslot, cfgfunc, cfgoff;
static int
pci_emul_cfgaddr(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
pci_emul_cfgaddr(UNUSED int vcpu, int in, UNUSED int port, int bytes,
uint32_t *eax, UNUSED void *arg)
{
uint32_t x;
@ -1887,7 +1876,9 @@ pci_emul_cfgaddr(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
}
if (in) {
x = (cfgbus << 16) | (cfgslot << 11) | (cfgfunc << 8) | cfgoff;
x = (uint32_t) ((cfgbus << 16) | (cfgslot << 11) | (cfgfunc << 8) |
cfgoff);
if (cfgenable)
x |= CONF1_ENABLE;
*eax = x;
@ -1905,16 +1896,16 @@ pci_emul_cfgaddr(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
INOUT_PORT(pci_cfgaddr, CONF1_ADDR_PORT, IOPORT_F_INOUT, pci_emul_cfgaddr);
static int
pci_emul_cfgdata(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
pci_emul_cfgdata(int vcpu, int in, int port, int bytes, uint32_t *eax,
UNUSED void *arg)
{
int coff;
assert(bytes == 1 || bytes == 2 || bytes == 4);
coff = cfgoff + (port - CONF1_DATA_PORT);
coff = cfgoff + (port - CONF1_DATA_PORT0);
if (cfgenable) {
pci_cfgrw(ctx, vcpu, in, cfgbus, cfgslot, cfgfunc, coff, bytes,
pci_cfgrw(vcpu, in, cfgbus, cfgslot, cfgfunc, coff, bytes,
eax);
} else {
/* Ignore accesses to cfgdata if not enabled by cfgaddr */
@ -1924,28 +1915,27 @@ pci_emul_cfgdata(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
return (0);
}
INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+0, IOPORT_F_INOUT, pci_emul_cfgdata);
INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+1, IOPORT_F_INOUT, pci_emul_cfgdata);
INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+2, IOPORT_F_INOUT, pci_emul_cfgdata);
INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+3, IOPORT_F_INOUT, pci_emul_cfgdata);
INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT0, IOPORT_F_INOUT, pci_emul_cfgdata);
INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT1, IOPORT_F_INOUT, pci_emul_cfgdata);
INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT2, IOPORT_F_INOUT, pci_emul_cfgdata);
INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT3, IOPORT_F_INOUT, pci_emul_cfgdata);
#define PCI_EMUL_TEST
#ifdef PCI_EMUL_TEST
/*
* Define a dummy test device
*/
#define DIOSZ 8
#define DMEMSZ 4096
#define DIOSZ 8
#define DMEMSZ 4096
struct pci_emul_dsoftc {
uint8_t ioregs[DIOSZ];
uint8_t memregs[2][DMEMSZ];
};
#define PCI_EMUL_MSI_MSGS 4
#define PCI_EMUL_MSIX_MSGS 16
#define PCI_EMUL_MSI_MSGS 4
static int
pci_emul_dinit(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
pci_emul_dinit(struct pci_devinst *pi, UNUSED char *opts)
{
int error;
struct pci_emul_dsoftc *sc;
@ -1974,15 +1964,15 @@ pci_emul_dinit(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
}
static void
pci_emul_diow(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
uint64_t offset, int size, uint64_t value)
pci_emul_diow(UNUSED int vcpu, struct pci_devinst *pi, int baridx,
uint64_t offset, int size, uint64_t value)
{
int i;
struct pci_emul_dsoftc *sc = pi->pi_arg;
if (baridx == 0) {
if (offset + size > DIOSZ) {
printf("diow: iow too large, offset %ld size %d\n",
if (offset + ((uint64_t) size) > DIOSZ) {
printf("diow: iow too large, offset %llu size %d\n",
offset, size);
return;
}
@ -1990,9 +1980,9 @@ pci_emul_diow(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
if (size == 1) {
sc->ioregs[offset] = value & 0xff;
} else if (size == 2) {
*(uint16_t *)&sc->ioregs[offset] = value & 0xffff;
*(uint16_t *)((void *) &sc->ioregs[offset]) = value & 0xffff;
} else if (size == 4) {
*(uint32_t *)&sc->ioregs[offset] = value;
*(uint32_t *)((void *) &sc->ioregs[offset]) = (uint32_t) value;
} else {
printf("diow: iow unknown size %d\n", size);
}
@ -2001,7 +1991,8 @@ pci_emul_diow(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
* Special magic value to generate an interrupt
*/
if (offset == 4 && size == 4 && pci_msi_enabled(pi))
pci_generate_msi(pi, value % pci_msi_maxmsgnum(pi));
pci_generate_msi(pi, ((int) (value %
((uint64_t) pci_msi_maxmsgnum(pi)))));
if (value == 0xabcdef) {
for (i = 0; i < pci_msi_maxmsgnum(pi); i++)
@ -2010,8 +2001,8 @@ pci_emul_diow(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
}
if (baridx == 1 || baridx == 2) {
if (offset + size > DMEMSZ) {
printf("diow: memw too large, offset %ld size %d\n",
if (offset + ((uint16_t) size) > DMEMSZ) {
printf("diow: memw too large, offset %llu size %d\n",
offset, size);
return;
}
@ -2019,13 +2010,14 @@ pci_emul_diow(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
i = baridx - 1; /* 'memregs' index */
if (size == 1) {
sc->memregs[i][offset] = value;
sc->memregs[i][offset] = (uint8_t) value;
} else if (size == 2) {
*(uint16_t *)&sc->memregs[i][offset] = value;
*(uint16_t *)((void *) &sc->memregs[i][offset]) = (uint16_t) value;
} else if (size == 4) {
*(uint32_t *)&sc->memregs[i][offset] = value;
*(uint32_t *)((void *) &sc->memregs[i][offset]) = (uint32_t) value;
} else if (size == 8) {
*(uint64_t *)&sc->memregs[i][offset] = value;
*(uint64_t *)((void *) &sc->memregs[i][offset]) = value;
} else {
printf("diow: memw unknown size %d\n", size);
}
@ -2041,16 +2033,18 @@ pci_emul_diow(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
}
static uint64_t
pci_emul_dior(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
uint64_t offset, int size)
pci_emul_dior(UNUSED int vcpu, struct pci_devinst *pi, int baridx,
uint64_t offset, int size)
{
struct pci_emul_dsoftc *sc = pi->pi_arg;
uint32_t value;
int i;
value = 0;
if (baridx == 0) {
if (offset + size > DIOSZ) {
printf("dior: ior too large, offset %ld size %d\n",
if (offset + ((uint64_t) size) > DIOSZ) {
printf("dior: ior too large, offset %llu size %d\n",
offset, size);
return (0);
}
@ -2058,17 +2052,17 @@ pci_emul_dior(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
if (size == 1) {
value = sc->ioregs[offset];
} else if (size == 2) {
value = *(uint16_t *) &sc->ioregs[offset];
value = *(uint16_t *)((void *) &sc->ioregs[offset]);
} else if (size == 4) {
value = *(uint32_t *) &sc->ioregs[offset];
value = *(uint32_t *)((void *) &sc->ioregs[offset]);
} else {
printf("dior: ior unknown size %d\n", size);
}
}
if (baridx == 1 || baridx == 2) {
if (offset + size > DMEMSZ) {
printf("dior: memr too large, offset %ld size %d\n",
if (offset + ((uint64_t) size) > DMEMSZ) {
printf("dior: memr too large, offset %llu size %d\n",
offset, size);
return (0);
}
@ -2078,11 +2072,11 @@ pci_emul_dior(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
if (size == 1) {
value = sc->memregs[i][offset];
} else if (size == 2) {
value = *(uint16_t *) &sc->memregs[i][offset];
value = *(uint16_t *) ((void *) &sc->memregs[i][offset]);
} else if (size == 4) {
value = *(uint32_t *) &sc->memregs[i][offset];
value = *(uint32_t *) ((void *) &sc->memregs[i][offset]);
} else if (size == 8) {
value = *(uint64_t *) &sc->memregs[i][offset];
value = (uint32_t) *(uint64_t *) ((void *) &sc->memregs[i][offset]);
} else {
printf("dior: ior unknown size %d\n", size);
}
@ -2097,7 +2091,7 @@ pci_emul_dior(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
return (value);
}
struct pci_devemu pci_dummy = {
static struct pci_devemu pci_dummy = {
.pe_emu = "dummy",
.pe_init = pci_emul_dinit,
.pe_barwrite = pci_emul_diow,

View file

@ -1,5 +1,6 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -26,15 +27,12 @@
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "pci_emul.h"
#include <xhyve/support/misc.h>
#include <xhyve/pci_emul.h>
static int
pci_hostbridge_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
pci_hostbridge_init(struct pci_devinst *pi, UNUSED char *opts)
{
/* config space */
pci_set_cfgdata16(pi, PCIR_VENDOR, 0x1275); /* NetApp */
pci_set_cfgdata16(pi, PCIR_DEVICE, 0x1275); /* NetApp */
@ -48,22 +46,22 @@ pci_hostbridge_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
}
static int
pci_amd_hostbridge_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
pci_amd_hostbridge_init(struct pci_devinst *pi, char *opts)
{
(void) pci_hostbridge_init(ctx, pi, opts);
(void) pci_hostbridge_init(pi, opts);
pci_set_cfgdata16(pi, PCIR_VENDOR, 0x1022); /* AMD */
pci_set_cfgdata16(pi, PCIR_DEVICE, 0x7432); /* made up */
return (0);
}
struct pci_devemu pci_de_amd_hostbridge = {
static struct pci_devemu pci_de_amd_hostbridge = {
.pe_emu = "amd_hostbridge",
.pe_init = pci_amd_hostbridge_init,
};
PCI_EMUL_SET(pci_de_amd_hostbridge);
struct pci_devemu pci_de_hostbridge = {
static struct pci_devemu pci_de_hostbridge = {
.pe_emu = "hostbridge",
.pe_init = pci_hostbridge_init,
};

Some files were not shown because too many files have changed in this diff Show more