mirror of
https://github.com/hermitcore/libhermit.git
synced 2025-03-09 00:00:03 +01:00
Merge branch 'devel' of github.com:RWTH-OS/HermitCore into devel
This commit is contained in:
commit
ed505cb926
43 changed files with 1844 additions and 550 deletions
14
.gitmodules
vendored
14
.gitmodules
vendored
|
@ -1,28 +1,28 @@
|
|||
[submodule "linux"]
|
||||
path = linux
|
||||
url = git@github.com:RWTH-OS/linux.git
|
||||
url = https://github.com/RWTH-OS/linux.git
|
||||
branch = hermit
|
||||
[submodule "hermit/usr/gcc"]
|
||||
path = hermit/usr/gcc
|
||||
url = git@github.com:RWTH-OS/gcc.git
|
||||
url = https://github.com/RWTH-OS/gcc.git
|
||||
branch = hermit
|
||||
[submodule "hermit/usr/binutils"]
|
||||
path = hermit/usr/binutils
|
||||
url = git@github.com:RWTH-OS/binutils.git
|
||||
url = https://github.com/RWTH-OS/binutils.git
|
||||
branch = hermit
|
||||
[submodule "hermit/usr/pte"]
|
||||
path = hermit/usr/pte
|
||||
url = git@github.com:RWTH-OS/pthread-embeded.git
|
||||
url = https://github.com/RWTH-OS/pthread-embeded.git
|
||||
branch = hermit
|
||||
[submodule "hermit/usr/newlib"]
|
||||
path = hermit/usr/newlib
|
||||
url = git@github.com:RWTH-OS/newlib.git
|
||||
url = https://github.com/RWTH-OS/newlib.git
|
||||
branch = hermit
|
||||
[submodule "hermit/lwip"]
|
||||
path = hermit/lwip
|
||||
url = git@github.com:RWTH-OS/LwIP.git
|
||||
url = https://github.com/RWTH-OS/LwIP.git
|
||||
branch = hermit
|
||||
[submodule "hermit/usr/libomp"]
|
||||
path = hermit/usr/libomp
|
||||
url = git@github.com:RWTH-OS/libomp_oss.git
|
||||
url = https://github.com/RWTH-OS/libomp_oss.git
|
||||
branch = hermit
|
||||
|
|
|
@ -7,7 +7,6 @@ compiler: gcc
|
|||
before_install:
|
||||
- sudo apt-get -qq update
|
||||
- sudo apt-get install -y qemu-system-x86 nasm texinfo libmpfr-dev libmpc-dev libgmp-dev libisl-dev flex bison
|
||||
- sed -i 's/git@github.com:/https:\/\/github.com\//' .gitmodules
|
||||
- git submodule update --init hermit/lwip hermit/usr/gcc hermit/usr/pte hermit/usr/libomp
|
||||
script:
|
||||
- ./configure
|
||||
|
@ -23,6 +22,6 @@ env:
|
|||
- PROXY_STR=":hermit:M:7:\\x42::`pwd`/RWTH-OS/HermitCore/hermit/tools/proxy"
|
||||
- HERMIT_ISLE=qemu
|
||||
- HERMIT_CPUS=1
|
||||
- HERMIT_MEM="128M"
|
||||
- HERMIT_MEM="512M"
|
||||
- HERMIT_KVM="0"
|
||||
#- HERMIT_VERBOSE="1"
|
||||
- HERMIT_VERBOSE="1"
|
||||
|
|
101
README.md
101
README.md
|
@ -1,19 +1,19 @@
|
|||
# HermitCore - A lightweight unikernel for a scalable and predictable runtime behavior
|
||||
|
||||
The project [HermitCore](http://www.hermitcore.org) is new [unikernel](http://unikernel.org) targeting high-performance computing.
|
||||
HermitCore extends on the multi-kernel approach with unikernel features to provide better programmability and scalability for hierarchical systems.
|
||||
By starting HermitCore applications, cores will be split off from the Linux system and the applications run bare-metal on these cores.
|
||||
This approach achieves a lower OS jitter and a better scalability.
|
||||
HermitCore applications and the Linux system can communicate via an IP interface (e.g. inter-kernel communication).
|
||||
The project [HermitCore](http://www.hermitcore.org) is new [unikernel](http://unikernel.org) targeting at high-performance computing.
|
||||
HermitCore extends the multi-kernel approach (like [McKernel](http://www-sys-aics.riken.jp/ResearchTopics/os/mckernel.html])) with unikernel features for a better programmability and scalability for hierarchical systems.
|
||||
On the startup of HermitCore applications, cores are isolated from the Linux system enabling the bare-metal of the applications on these cores.
|
||||
This approach achieves lower OS jitter and a better scalability compared to full-weight kernels.
|
||||
Inter-kernel communication between HermitCore applications and the Linux system is realized by means of an IP interface.
|
||||
|
||||
HermitCore can be used as classical standalone unikernel as well.
|
||||
In addition to the multi-kernel approach described above, HermitCore can be used as classical standalone unikernel as well.
|
||||
This reduces the demand on resources and improves the boot time.
|
||||
It is the result of a research project at RWTH Aachen University and is currently an experimental approach, i.e., not production ready.
|
||||
Please use it with caution.
|
||||
|
||||
## Requirements
|
||||
|
||||
Following software packets are required to build HermitCore on a Linux system:
|
||||
The build process works currently only on **x86-based Linux** systems. The following software packets are required to build HermitCore on a Linux system:
|
||||
|
||||
* Netwide Assembler (NASM)
|
||||
* GNU Make, GNU Binutils
|
||||
|
@ -21,7 +21,7 @@ Following software packets are required to build HermitCore on a Linux system:
|
|||
* texinfo
|
||||
* Qemu
|
||||
|
||||
On Ubuntu the packets could be installed with following command:
|
||||
On Debian-based systems the packets can be installed by executing:
|
||||
```
|
||||
sudo apt-get install qemu-system-x86 nasm texinfo libmpfr-dev libmpc-dev libgmp-dev libisl-dev flex bison
|
||||
```
|
||||
|
@ -29,25 +29,46 @@ On Ubuntu the packets could be installed with following command:
|
|||
## Building and testing HermitCore within a virtual machine
|
||||
|
||||
0. Please make sure that you cloned this repository and all its submodules.
|
||||
1. The build process works currently only on x86-based Linux systems.
|
||||
2. To configure the system, run the *configure* script in the directory, which contains this *README*. Fine tuning of the installation directories e.g. with the flag `--prefix` is currently not supported. HermitCore, the cross-compiler and the demo applications will be installed in subdirectories of this repository. Please find in section *Tips* (at the end of this *README*) hints to enable optimization for the target processor.
|
||||
3. The command `make` build the Linux kernel, the HermitCore kernel, the corss-compiler and the demo applications.
|
||||
4. To start a virtual machine and to boot a small Linux version use the command `make qemu`. Per default, the virtual machine has 10 cores, 2 NUMA nodes and 8 GByte RAM. To increase or to decrease the machine size, the label `qemu` in the Makefile has to be modified.
|
||||
5. Inside the VM runs a small Linux system, which already includes the patches for HermitCore. For each NUMA node (= HermitCore isle) is a directory called `isleX` in `/sys/hermit` , where `X` represents the number of the NUMA node. The demo applications are located in the directories `/hermit/usr/{tests,benchmarks}`. A HermitCore loader is already registered. By starting a HermitCore application, a proxy will be started on the Linux system, while the HermitCore binary will be started on isle 0 with cpu 1. To change the default behavior, the environment variable `HERMIT_ISLE` is used to specify the (memory) location of the isle, while the environment variable `HERMIT_CPUS` is used to specify the cores. For instance, `HERMIT_ISLE=1 HERMIT_CPUS="3-5" /hermit/usr/tests/hello` starts a HelloWorld demo on the HermitCore isle 1, which used the cores 3 to 5. The output messages are forwarded to the Linux proxy and printed on the Linux system.
|
||||
6. HermitCore's kernel messages of `isleX` are available via `cat /sys/hermit/isleX/log`, where `X` represents the isle.
|
||||
7. It exists an virtual IP devices between HermitCore isles and the Linux system (see output of `ifconfig`). Per default, the Linux system has the IP address `192.168.28.1`. The HermitCore isles starts with the IP address `192.168.28.2` for isle 0 and is increased by one for every isle.
|
||||
8. More HermitCore applications are available at `/hermit/usr/{tests,benchmarks}` which is a shared directory between the host and QEmu.
|
||||
1. To configure the system, run the *configure* script in the directory, which contains this *README*.
|
||||
Fine tuning of the installation directories, e.g., with the flag `--prefix` is currently not supported.
|
||||
HermitCore, the cross-compiler and the demo applications will be installed in subdirectories of this repository.
|
||||
At the end of this *README* in section *Tips* you find hints to enable optimization for the target.
|
||||
2. The command `make` build the Linux kernel, the HermitCore kernel, the cross-compiler, and the demo applications.
|
||||
3. To start a virtual machine and to boot a small Linux version use the command `make qemu`.
|
||||
Per default, the virtual machine has 10 cores, 2 NUMA nodes, and 8 GiB RAM.
|
||||
To increase or to decrease the machine size, the label `qemu` in the Makefile has to be modified accordingly.
|
||||
4. Inside the VM runs a small Linux system, which already includes the patches for HermitCore.
|
||||
Per NUMA node (= HermitCore isle) there is a directory called `isleX` under `/sys/hermit` , where `X` represents the NUMA node ID.
|
||||
The demo applications are located in the directories `/hermit/usr/{tests,benchmarks}`.
|
||||
A HermitCore loader is already registered.
|
||||
By starting a HermitCore application, a proxy will be executed on the Linux system, while the HermitCore binary will be started on isle 0 with cpu 1.
|
||||
To change the default behavior, the environment variable `HERMIT_ISLE` is used to specify the (memory) location of the isle, while the environment variable `HERMIT_CPUS` is used to specify the cores.
|
||||
For instance, `HERMIT_ISLE=1 HERMIT_CPUS="3-5" /hermit/usr/tests/hello` starts a HelloWorld demo on the HermitCore isle 1, which uses the cores 3 to 5.
|
||||
The output messages are forwarded to the Linux proxy and printed on the Linux system.
|
||||
5. HermitCore's kernel messages of `isleX` are available via `cat /sys/hermit/isleX/log`.
|
||||
6. There is a virtual IP device for the communication between the HermitCore isles and the Linux system (see output of `ifconfig`).
|
||||
Per default, the Linux system has the IP address `192.168.28.1`.
|
||||
The HermitCore isles starts with the IP address `192.168.28.2` for isle 0 and is increased by one for every isle.
|
||||
7. More HermitCore applications are available at `/hermit/usr/{tests,benchmarks}` which is a shared directory between the host and QEmu.
|
||||
|
||||
## Building and testing HermitCore on a real machine
|
||||
|
||||
Note, to launch HermitCore applications, root privileges are required.
|
||||
*Note*: to launch HermitCore applications, root privileges are required.
|
||||
|
||||
1. In principle you have to follow the tutorial above. After the configuration (step 2 in the above tutorial) go to the subdirectory `linux`, which contains the source code of the Linux kernel. Configure the kernel with `make menuconfig` for your system. Be sure, that the option `CONFIG_HERMIT_CORE` in `Processor type and features` is enabled.
|
||||
2. Go back to the root directory of this repository and build with `make` the Linux kernel, the HermitCore kernel, the cross-compiler and the demo applications.
|
||||
3. Install the Linux kernel and its initial ramdisk on your system (see descriptions of your Linux distribution). We recommend to disable Linux NO_HZ feature by setting the kernel parameter `no_hz=off`. This feature reduces partly the OS noise, but noise sensitive applications should now run on HermitCore.
|
||||
4. Register the HermitCore loader to your system with following command: `echo ":hermit:M:7:\\x42::/path2proyxy/proxy:" > /proc/sys/fs/binfmt_misc/register`, in which `path2proxy` defines the path to the loader. You find the loader `proxy` after building the HermiCore sources in the subdirectory `hermit/tools` of the directory, which contains this *README*.
|
||||
5. The IP device between HermitCore and Linux does currently not support IPv6. Consequently, disable IPv6 by adding following line to `/etc/sysctl.conf`: `net.ipv6.conf.mmnif.disable_ipv6 = 1`.
|
||||
6. Per default, the IP device uses a static IP address range. Linux has to use `162.168.28.1`, where HermitCore isles start with `192.168.28.2` (isle 0). The network manager must be configured accordingly and consequently the file `/etc/sysconfig/network-scripts/ifcfg-mmnif` must be created with following contents:
|
||||
1. In principle you have to follow the tutorial above.
|
||||
After the configuration (Step 2 in the [above tutorial](#building-and-testing-hermitcore-within-a-virtual-machine)) go to the subdirectory `linux`, which contains the source code of the Linux kernel.
|
||||
Configure the kernel with `make menuconfig` for your system.
|
||||
Be sure, that the option `CONFIG_HERMIT_CORE` in `Processor type and features` is enabled.
|
||||
2. Go back to the root directory of this repository and build with `make` the Linux kernel, the HermitCore kernel, the cross-compiler, and the demo applications.
|
||||
3. Install the Linux kernel and its initial ramdisk on your system (see descriptions of your Linux distribution).
|
||||
We recommend to disable Linux NO_HZ feature by setting the kernel parameter `no_hz=off`.
|
||||
4. Register the HermitCore loader at your system with following command: `echo ":hermit:M:7:\\x42::/path2proyxy/proxy:" > /proc/sys/fs/binfmt_misc/register`, in which `path2proxy` defines the path to the loader.
|
||||
You find the loader `proxy` after building the HermiCore sources in the subdirectory `hermit/tools` of the directory, which contains this *README*.
|
||||
5. The IP device between HermitCore and Linux currently does not support IPv6.
|
||||
Consequently, disable IPv6 by adding following line to `/etc/sysctl.conf`: `net.ipv6.conf.mmnif.disable_ipv6 = 1`.
|
||||
6. Per default, the IP device uses a static IP address range.
|
||||
Linux has to use `162.168.28.1`, where HermitCore isles start with `192.168.28.2` (isle 0).
|
||||
The network manager must be configured accordingly and therefore the file `/etc/sysconfig/network-scripts/ifcfg-mmnif` must be created with the following content:
|
||||
```
|
||||
DEVICE=mmnif
|
||||
BOOTPROTO=none
|
||||
|
@ -57,29 +78,30 @@ NETMASK=255.255.255.0
|
|||
IPADDR=192.168.28.1
|
||||
NM_CONTROLLED=yes
|
||||
```
|
||||
Finally, boot your system with the new Linux kernel and follow the above tutorial (*Building and testing HermitCore within a virtual machine*) from point 5.
|
||||
Finally, boot your system with the new Linux kernel and follow the [above tutorial](#building-and-testing-hermitcore-within-a-virtual-machine) from Step 5.
|
||||
|
||||
The demo applications are stored in their subdirectories `hermit/usr/{tests,benchmarks}` of the directory, which contains this *README*.
|
||||
The demo applications are located in their subdirectories `hermit/usr/{tests,benchmarks}`.
|
||||
|
||||
## HermitCore as classical standalone unikernel
|
||||
|
||||
HermitCore applications can be directly started as standalone kernel within a virtual machine.
|
||||
In this case, iRCCE isn’t supported.
|
||||
Please register the loader like the multi-kernel version of HermitCore (see bullet 4 in section *Building and testing HermitCore on a real machine*).
|
||||
In this case, [iRCCE](http://www.lfbs.rwth-aachen.de/publications/files/iRCCE.pdf) is not supported.
|
||||
Please register the loader the same way as done for the multi-kernel version of HermitCore (see Step 4 in [*Building and testing HermitCore on a real machine*](#building-and-testing-hermitcore-on-a-real-machine)).
|
||||
If the environment variable `HERMIT_ISLE` is set to `qemu`, the application will be started within a VM.
|
||||
Please note that the loader requires Qemu **with** KVM support and expects that the executable name is *qemu-system-x86_64*.
|
||||
With the environment variable `HERMIT_QEMU`, the executable name could be adapted for your system.
|
||||
Please note that the loader requires QEMU and uses per default *KVM*.
|
||||
Furthermore, it expects that the executable is called `qemu-system-x86_64`.
|
||||
You can adapt the name by setting the environment variable `HERMIT_QEMU`.
|
||||
|
||||
In this context, the environment variable `HERMIT_CPUS` specifies the number of cpus (and no longer a range of core ids).
|
||||
Furthermore, the variable `HERMIT_MEM` defines the memory size of the virtual machine.
|
||||
The suffix of *M* or *G* can be used to signify a value in megabytes or gigabytes respectively.
|
||||
Per default, the loader initializes a system with one core and 2 GByte RAM.
|
||||
The suffix of *M* or *G* can be used to specify a value in megabytes or gigabytes respectively.
|
||||
Per default, the loader initializes a system with one core and 2 GiB RAM.
|
||||
|
||||
The virtual machine opens two TCP/IP ports.
|
||||
One is used for the communication between HermitCore application and its proxy.
|
||||
The other port is used to create a connection via telnet to Qemu's system monitor.
|
||||
With the environment variable `HERMIT_PORT`, the default port (18766) can be changed between HermitCore application and its proxy.
|
||||
The connection to the system monitor used automatically `HERMIT_PORT+1` (or per default 18767).
|
||||
The second port is used to create a connection via telnet to QEMU's system monitor.
|
||||
With the environment variable `HERMIT_PORT`, the default port (18766) can be changed for the communication between the HermitCore application and its proxy.
|
||||
The connection to the system monitor used automatically `HERMIT_PORT+1`, i.e., the default port is 18767.
|
||||
|
||||
The following example starts the stream benchmark in a virtual machine, which has 4 cores and 6GB memory.
|
||||
```
|
||||
|
@ -88,6 +110,11 @@ HERMIT_ISLE=qemu HERMIT_CPUS=4 HERMIT_MEM=6G hermit/usr/benchmarks/stream
|
|||
|
||||
## Tips
|
||||
|
||||
1. The configuration flag `--with-mtune=name` specifies the name of the target processor for which GCC should tune the performance of the code. You could use any architecture name, which is supported by the GCC. For instance, `--with-mtune=native` optimzes the code for the host system. Please note, if the applications will be started within a VM, the hypervisor has to support the specified architecture name. If KVM is started by our proxy, per default the host architecture will be used as target processor.
|
||||
2. If KVM is started by our proxy and the environment variable `HERMIT_KVM` is set to `0`, the virtual machine will be not accelerated by KVM. In this case, the configuration flag `--with-mtune=name` should be avoided.
|
||||
3. By setting the environment variable `HERMIT_VERBOSE` to `1`, the proxy prints at termination the kernel log messages on the screen.
|
||||
1. The configuration flag `--with-mtune=name` specifies the name of the target processor for which GCC should tune the performance of the code.
|
||||
You can use any architecture name, which is supported by GCC.
|
||||
For instance, `--with-mtune=native` optimzes the code for the host system.
|
||||
Please note, if the applications is started within a VM, the hypervisor has to support the specified architecture name.
|
||||
If KVM is started by our proxy, per default the host architecture will be used as target processor.
|
||||
2. If KVM is started by our proxy and the environment variable `HERMIT_KVM` is set to `0`, the virtual machine will be not accelerated by KVM.
|
||||
In this case, the configuration flag `--with-mtune=name` should be avoided.
|
||||
3. By setting the environment variable `HERMIT_VERBOSE` to `1`, the proxy prints at termination the kernel log messages onto the screen.
|
||||
|
|
2
hermit/.gitignore
vendored
2
hermit/.gitignore
vendored
|
@ -16,7 +16,9 @@ usr/tests/hello++
|
|||
usr/tests/hellof
|
||||
usr/tests/jacobi
|
||||
usr/tests/thr_hello
|
||||
usr/tests/pi
|
||||
usr/tests/RCCE_minimum
|
||||
usr/tests/signals
|
||||
usr/benchmarks/RCCE_pingping
|
||||
usr/benchmarks/RCCE_pingpong
|
||||
usr/benchmarks/stream
|
||||
|
|
|
@ -16,6 +16,7 @@ STACKPROT = -fno-stack-protector
|
|||
|
||||
FC_FOR_TARGET = $(CROSSCOMPREFIX)-gfortran
|
||||
CC_FOR_TARGET = $(CROSSCOMPREFIX)-gcc
|
||||
GO_FOR_TARGET = $(CROSSCOMPREFIX)-gccgo
|
||||
CXX_FOR_TARGET = $(CROSSCOMPREFIX)-g++
|
||||
GCC_FOR_TARGET = $(CROSSCOMPREFIX)-gcc
|
||||
CPP_FOR_TARGET = $(CROSSCOMPREFIX)-cpp
|
||||
|
@ -55,6 +56,7 @@ ifdef PROFILING
|
|||
endif
|
||||
|
||||
CFLAGS_FOR_NEWLIB = -m64 -mtls-direct-seg-refs -O3 -ftree-vectorize $(ARCH_OPT) #$(STACKPROT)
|
||||
GOFLAGS_FOR_NEWLIB = -m64 -mtls-direct-seg-refs -O3 -ftree-vectorize $(ARCH_OPT)
|
||||
FCFLAGS_FOR_NEWLIB = -m64 -mtls-direct-seg-refs -O3 -ftree-vectorize $(ARCH_OPT)
|
||||
FFLAGS_FOR_NEWLIB = -m64 -mtls-direct-seg-refs -O3 -ftree-vectorize $(ARCH_OPT)
|
||||
CXXFLAGS_FOR_NEWLIB = -m64 -mtls-direct-seg-refs -O3 -ftree-vectorize $(ARCH_OPT)
|
||||
|
@ -78,6 +80,7 @@ all: arch/x86/kernel/boot.h bootstrap $(NAME) toolchain tools loader
|
|||
toolchain:
|
||||
$Q$(MAKE) ARCH=$(ARCH) \
|
||||
LDFLAGS_FOR_TARGET="$(LDFLAGS_FOR_NEWLIB)" \
|
||||
GOFLAGS_FOR_TARGET="$(GOFLAGS_FOR_NEWLIB)" \
|
||||
CFLAGS_FOR_TARGET="$(CFLAGS_FOR_NEWLIB)" \
|
||||
FFLAGS_FOR_TARGET="$(FFLAGS_FOR_NEWLIB)" \
|
||||
FCFLAGS_FOR_TARGET="$(FCFLAGS_FOR_NEWLIB)" \
|
||||
|
@ -85,6 +88,7 @@ toolchain:
|
|||
CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_NEWLIB)" \
|
||||
NASMFLAGS="$(NASMFLAGS_FOR_NEWLIB)" \
|
||||
CC_FOR_TARGET=$(CC_FOR_TARGET) \
|
||||
GO_FOR_TARGET=$(GO_FOR_TARGET) \
|
||||
FC_FOR_TARGET=$(FC_FOR_TARGET) \
|
||||
CXX_FOR_TARGET=$(CXX_FOR_TARGET) \
|
||||
GCC_FOR_TARGET=$(GCC_FOR_TARGET) \
|
||||
|
|
|
@ -204,6 +204,7 @@ int apic_is_enabled(void);
|
|||
int apic_enable_timer(void);
|
||||
int apic_disable_timer(void);
|
||||
int apic_timer_deadline(uint32_t);
|
||||
int apic_timer_is_running(void);
|
||||
int apic_send_ipi(uint64_t dest, uint8_t irq);
|
||||
int ioapic_inton(uint8_t irq, uint8_t apicid);
|
||||
int ioapic_intoff(uint8_t irq, uint8_t apicid);
|
||||
|
|
|
@ -41,54 +41,12 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
/** @brief Disable IRQs
|
||||
*
|
||||
* This inline function just clears out the interrupt bit
|
||||
*/
|
||||
inline static void irq_disable(void) {
|
||||
asm volatile("cli" ::: "memory");
|
||||
}
|
||||
|
||||
/** @brief Disable IRQs (nested)
|
||||
*
|
||||
* Disable IRQs when unsure if IRQs were enabled at all.\n
|
||||
* This function together with irq_nested_enable can be used
|
||||
* in situations when interrupts shouldn't be activated if they
|
||||
* were not activated before calling this function.
|
||||
*
|
||||
* @return The set of flags which have been set until now
|
||||
*/
|
||||
inline static uint8_t irq_nested_disable(void) {
|
||||
size_t flags;
|
||||
asm volatile("pushf; cli; pop %0": "=r"(flags) : : "memory");
|
||||
if (flags & (1 << 9))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** @brief Enable IRQs */
|
||||
inline static void irq_enable(void) {
|
||||
asm volatile("sti" ::: "memory");
|
||||
}
|
||||
|
||||
/** @brief Enable IRQs (nested)
|
||||
*
|
||||
* If called after calling irq_nested_disable, this function will
|
||||
* not activate IRQs if they were not active before.
|
||||
*
|
||||
* @param flags Flags to set. Could be the old ones you got from irq_nested_disable.
|
||||
*/
|
||||
inline static void irq_nested_enable(uint8_t flags) {
|
||||
if (flags)
|
||||
irq_enable();
|
||||
}
|
||||
|
||||
/** @brief Determines, if the interrupt flags (IF) is set
|
||||
*
|
||||
* @return
|
||||
* - 1 interrupt flag is set
|
||||
* - 0 interrupt flag is cleared
|
||||
*/
|
||||
*/
|
||||
inline static uint8_t is_irq_enabled(void)
|
||||
{
|
||||
size_t flags;
|
||||
|
@ -98,6 +56,49 @@ inline static uint8_t is_irq_enabled(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/** @brief Disable IRQs
|
||||
*
|
||||
* This inline function just clears out the interrupt bit
|
||||
*/
|
||||
inline static void irq_disable(void) {
|
||||
asm volatile("cli" ::: "memory");
|
||||
}
|
||||
|
||||
/** @brief Enable IRQs
|
||||
*
|
||||
* This inline function just sets the interrupt bit
|
||||
*/
|
||||
inline static void irq_enable(void) {
|
||||
asm volatile("sti" ::: "memory");
|
||||
}
|
||||
|
||||
/** @brief Disable IRQs (nested)
|
||||
*
|
||||
* Disable IRQs when unsure if IRQs were enabled at all.
|
||||
* This function together with irq_nested_enable can be used
|
||||
* in situations when interrupts shouldn't be activated if they
|
||||
* were not activated before calling this function.
|
||||
*
|
||||
* @return Whether IRQs had been enabled or not before disabling
|
||||
*/
|
||||
inline static uint8_t irq_nested_disable(void) {
|
||||
uint8_t was_enabled = is_irq_enabled();
|
||||
irq_disable();
|
||||
return was_enabled;
|
||||
}
|
||||
|
||||
/** @brief Enable IRQs (nested)
|
||||
*
|
||||
* Can be used in conjunction with irq_nested_disable() to only enable
|
||||
* interrupts again if they were enabled before.
|
||||
*
|
||||
* @param was_enabled Whether IRQs should be enabled or not
|
||||
*/
|
||||
inline static void irq_nested_enable(uint8_t was_enabled) {
|
||||
if (was_enabled)
|
||||
irq_enable();
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -425,6 +425,11 @@ inline static uint64_t rdtscp(uint32_t* cpu_id)
|
|||
return ((uint64_t)hi << 32ULL | (uint64_t)lo);
|
||||
}
|
||||
|
||||
inline static uint64_t get_rdtsc()
|
||||
{
|
||||
return has_rdtscp() ? rdtscp(NULL) : rdtsc();
|
||||
}
|
||||
|
||||
/** @brief Read MSR
|
||||
*
|
||||
* The asm instruction rdmsr which stands for "Read from model specific register"
|
||||
|
|
|
@ -114,7 +114,41 @@ typedef unsigned short wchar_t;
|
|||
typedef wchar_t wint_t;
|
||||
#endif
|
||||
|
||||
/// This defines what the stack looks like after the task context is saved.
|
||||
/// This defines registers, which are saved for a "user-level" context swicth
|
||||
typedef struct mregs {
|
||||
/// R15 register
|
||||
uint64_t r15;
|
||||
/// R14 register
|
||||
uint64_t r14;
|
||||
/// R13 register
|
||||
uint64_t r13;
|
||||
/// R12 register
|
||||
uint64_t r12;
|
||||
/// R9 register
|
||||
uint64_t r9;
|
||||
/// R8 register
|
||||
uint64_t r8;
|
||||
/// RDI register
|
||||
uint64_t rdi;
|
||||
/// RSI register
|
||||
uint64_t rsi;
|
||||
/// RBP register
|
||||
uint64_t rbp;
|
||||
/// RBX register
|
||||
uint64_t rbx;
|
||||
/// RDX register
|
||||
uint64_t rdx;
|
||||
/// RCX register
|
||||
uint64_t rcx;
|
||||
/// RSP register
|
||||
uint64_t rsp;
|
||||
/// RIP
|
||||
uint64_t rip;
|
||||
/// MSXCSR
|
||||
uint32_t mxcsr;
|
||||
} mregs_t;
|
||||
|
||||
/// This defines what the stack looks like after the task context is saved
|
||||
struct state {
|
||||
/// GS register
|
||||
uint64_t gs;
|
||||
|
@ -165,6 +199,12 @@ struct state {
|
|||
uint64_t ss;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
void *ss_sp; /* Stack base or pointer. */
|
||||
int ss_flags; /* Flags. */
|
||||
size_t ss_size; /* Stack size. */
|
||||
} stack_t;
|
||||
|
||||
const int32_t is_single_kernel(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -56,6 +56,8 @@ typedef struct {
|
|||
uint32_t status;
|
||||
} i387_fsave_t;
|
||||
|
||||
#define FPU_STATE_INIT { {0, 0, 0, 0, 0, 0, 0, { [0 ... 19] = 0 }, 0} }
|
||||
|
||||
typedef struct {
|
||||
uint16_t cwd;
|
||||
uint16_t swd;
|
||||
|
@ -106,6 +108,22 @@ union fpu_state {
|
|||
xsave_t xsave;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
uint16_t control_word;
|
||||
uint16_t unused1;
|
||||
uint16_t status_word;
|
||||
uint16_t unused2;
|
||||
uint16_t tags;
|
||||
uint16_t unused3;
|
||||
uint32_t eip;
|
||||
uint16_t cs_selector;
|
||||
uint32_t opcode:11;
|
||||
uint32_t unused4:5;
|
||||
uint32_t data_offset;
|
||||
uint16_t data_selector;
|
||||
uint16_t unused5;
|
||||
} fenv_t;
|
||||
|
||||
typedef void (*handle_fpu_state)(union fpu_state* state);
|
||||
|
||||
extern handle_fpu_state save_fpu_state;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
C_source := irq.c idt.c isrs.c gdt.c processor.c timer.c tasks.c apic.c pci.c vga.c uart.c
|
||||
C_source := irq.c idt.c isrs.c gdt.c processor.c timer.c tasks.c apic.c pci.c vga.c uart.c syscall.c
|
||||
ASM_source := entry.asm string.asm
|
||||
MODULE := arch_x86_kernel
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ static volatile ioapic_t* ioapic = NULL;
|
|||
static uint32_t icr = 0;
|
||||
static uint32_t ncores = 1;
|
||||
static uint8_t irq_redirect[16] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF};
|
||||
static uint8_t initialized = 0;
|
||||
static uint8_t apic_initialized = 0;
|
||||
static uint8_t online[MAX_APIC_CORES] = {[0 ... MAX_APIC_CORES-1] = 0};
|
||||
|
||||
spinlock_t bootlock = SPINLOCK_INIT;
|
||||
|
@ -141,7 +141,7 @@ static inline uint32_t ioapic_version(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline uint32_t ioapic_max_redirection_entry(void)
|
||||
static inline uint8_t ioapic_max_redirection_entry(void)
|
||||
{
|
||||
if (ioapic)
|
||||
return (ioapic_read(IOAPIC_REG_VER) >> 16) & 0xFF;
|
||||
|
@ -151,7 +151,29 @@ static inline uint32_t ioapic_max_redirection_entry(void)
|
|||
|
||||
int apic_is_enabled(void)
|
||||
{
|
||||
return (lapic && initialized);
|
||||
return (lapic && apic_initialized);
|
||||
}
|
||||
|
||||
static inline void lapic_timer_set_counter(uint32_t counter)
|
||||
{
|
||||
// set counter decrements to 1
|
||||
lapic_write(APIC_DCR, 0xB);
|
||||
lapic_write(APIC_ICR, counter);
|
||||
}
|
||||
|
||||
static inline void lapic_timer_disable(void)
|
||||
{
|
||||
lapic_write(APIC_LVT_TSR, 0x10000);
|
||||
}
|
||||
|
||||
static inline void lapic_timer_oneshot(void)
|
||||
{
|
||||
lapic_write(APIC_LVT_T, 0x7B);
|
||||
}
|
||||
|
||||
static inline void lapic_timer_periodic(void)
|
||||
{
|
||||
lapic_write(APIC_LVT_T, 0x2007B);
|
||||
}
|
||||
|
||||
extern uint32_t disable_x2apic;
|
||||
|
@ -281,15 +303,22 @@ static inline void set_ipi_dest(uint32_t cpu_id) {
|
|||
lapic_write(APIC_ICR2, tmp);
|
||||
}
|
||||
|
||||
int apic_timer_deadline(uint32_t t)
|
||||
int apic_timer_is_running(void)
|
||||
{
|
||||
if (BUILTIN_EXPECT(apic_is_enabled(), 1)) {
|
||||
return lapic_read(APIC_CCR) != 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int apic_timer_deadline(uint32_t ticks)
|
||||
{
|
||||
if (BUILTIN_EXPECT(apic_is_enabled() && icr, 1)) {
|
||||
//kprintf("timer oneshot %ld\n", t);
|
||||
|
||||
// create one shot interrup
|
||||
lapic_write(APIC_DCR, 0xB); // set it to 1 clock increments
|
||||
lapic_write(APIC_LVT_T, 0x7B); // connects the timer to 123 and enables it
|
||||
lapic_write(APIC_ICR, icr*t);
|
||||
lapic_timer_oneshot();
|
||||
lapic_timer_set_counter(ticks * icr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -302,7 +331,7 @@ int apic_disable_timer(void)
|
|||
if (BUILTIN_EXPECT(!apic_is_enabled(), 0))
|
||||
return -EINVAL;
|
||||
|
||||
lapic_write(APIC_LVT_T, 0x10000); // disable timer interrupt
|
||||
lapic_timer_disable();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -310,9 +339,9 @@ int apic_disable_timer(void)
|
|||
int apic_enable_timer(void)
|
||||
{
|
||||
if (BUILTIN_EXPECT(apic_is_enabled() && icr, 1)) {
|
||||
lapic_write(APIC_DCR, 0xB); // set it to 1 clock increments
|
||||
lapic_write(APIC_LVT_T, 0x2007B); // connects the timer to 123 and enables it
|
||||
lapic_write(APIC_ICR, icr);
|
||||
|
||||
lapic_timer_periodic();
|
||||
lapic_timer_set_counter(icr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -376,12 +405,15 @@ static int lapic_reset(void)
|
|||
|
||||
lapic_write(APIC_SVR, 0x17F); // enable the apic and connect to the idt entry 127
|
||||
lapic_write(APIC_TPR, 0x00); // allow all interrupts
|
||||
#ifdef DYNAMIC_TICKS
|
||||
lapic_timer_disable();
|
||||
#else
|
||||
if (icr) {
|
||||
lapic_write(APIC_DCR, 0xB); // set it to 1 clock increments
|
||||
lapic_write(APIC_LVT_T, 0x2007B); // connects the timer to 123 and enables it
|
||||
lapic_write(APIC_ICR, icr);
|
||||
lapic_timer_periodic();
|
||||
lapic_timer_set_counter(icr);
|
||||
} else
|
||||
lapic_write(APIC_LVT_T, 0x10000); // disable timer interrupt
|
||||
lapic_timer_disable();
|
||||
#endif
|
||||
if (max_lvt >= 4)
|
||||
lapic_write(APIC_LVT_TSR, 0x10000); // disable thermal sensor interrupt
|
||||
if (max_lvt >= 5)
|
||||
|
@ -522,87 +554,67 @@ int smp_init(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
|
||||
// How many ticks are used to calibrate the APIC timer
|
||||
#define APIC_TIMER_CALIBRATION_TICKS (3)
|
||||
|
||||
/*
|
||||
* detects the timer frequency of the APIC and restart
|
||||
* detects the timer frequency of the APIC and restarts
|
||||
* the APIC timer with the correct period
|
||||
*/
|
||||
int apic_calibration(void)
|
||||
{
|
||||
uint32_t flags;
|
||||
uint64_t ticks, old;
|
||||
uint8_t flags;
|
||||
uint64_t cycles, old, diff;
|
||||
|
||||
if (BUILTIN_EXPECT(!lapic, 0))
|
||||
return -ENXIO;
|
||||
|
||||
if (!is_single_kernel()) {
|
||||
uint64_t diff, wait = (uint64_t)cpu_freq * 3000000ULL / (uint64_t)TIMER_FREQ;
|
||||
const uint64_t cpu_freq_hz = (uint64_t) get_cpu_frequency() * 1000000ULL;
|
||||
const uint64_t cycles_per_tick = cpu_freq_hz / (uint64_t) TIMER_FREQ;
|
||||
const uint64_t wait_cycles = cycles_per_tick * APIC_TIMER_CALIBRATION_TICKS;
|
||||
|
||||
flags = irq_nested_disable();
|
||||
lapic_write(APIC_DCR, 0xB); // set it to 1 clock increments
|
||||
lapic_write(APIC_LVT_T, 0x2007B); // connects the timer to 123 and enables it
|
||||
lapic_write(APIC_ICR, 0xFFFFFFFFUL);
|
||||
irq_nested_enable(flags);
|
||||
// disable interrupts to increase calibration accuracy
|
||||
flags = irq_nested_disable();
|
||||
|
||||
// start timer with max. counter value
|
||||
const uint32_t initial_counter = 0xFFFFFFFF;
|
||||
|
||||
lapic_timer_oneshot();
|
||||
lapic_timer_set_counter(initial_counter);
|
||||
|
||||
rmb();
|
||||
old = get_rdtsc();
|
||||
|
||||
do {
|
||||
rmb();
|
||||
old = rdtsc();
|
||||
cycles = get_rdtsc();
|
||||
diff = cycles > old ? cycles - old : old - cycles;
|
||||
} while(diff < wait_cycles);
|
||||
|
||||
do {
|
||||
rmb();
|
||||
ticks = rdtsc();
|
||||
diff = ticks > old ? ticks - old : old - ticks;
|
||||
} while(diff < wait);
|
||||
// Calculate timer increments for desired tick frequency
|
||||
icr = (initial_counter - lapic_read(APIC_CCR)) / APIC_TIMER_CALIBRATION_TICKS;
|
||||
irq_nested_enable(flags);
|
||||
|
||||
icr = (0xFFFFFFFFUL - lapic_read(APIC_CCR)) / 3;
|
||||
kprintf("APIC calibration determined already an ICR of 0x%x\n", icr);
|
||||
lapic_reset();
|
||||
|
||||
flags = irq_nested_disable();
|
||||
lapic_reset();
|
||||
initialized = 1;
|
||||
irq_nested_enable(flags);
|
||||
|
||||
atomic_int32_inc(&cpu_online);
|
||||
|
||||
return 0;
|
||||
}
|
||||
kprintf("APIC calibration determined an ICR of 0x%x\n", icr);
|
||||
|
||||
apic_initialized = 1;
|
||||
atomic_int32_inc(&cpu_online);
|
||||
|
||||
old = get_clock_tick();
|
||||
if(is_single_kernel()) {
|
||||
// Now, HermitCore is able to use the APIC => Therefore, we disable the PIC
|
||||
outportb(0xA1, 0xFF);
|
||||
outportb(0x21, 0xFF);
|
||||
}
|
||||
|
||||
/* wait for the next time slice */
|
||||
while ((ticks = get_clock_tick()) - old == 0)
|
||||
PAUSE;
|
||||
|
||||
flags = irq_nested_disable();
|
||||
lapic_write(APIC_DCR, 0xB); // set it to 1 clock increments
|
||||
lapic_write(APIC_LVT_T, 0x2007B); // connects the timer to 123 and enables it
|
||||
lapic_write(APIC_ICR, 0xFFFFFFFFUL);
|
||||
irq_nested_enable(flags);
|
||||
|
||||
/* wait 3 time slices to determine a ICR */
|
||||
while (get_clock_tick() - ticks < 3)
|
||||
PAUSE;
|
||||
|
||||
icr = (0xFFFFFFFFUL - lapic_read(APIC_CCR)) / 3;
|
||||
|
||||
flags = irq_nested_disable();
|
||||
lapic_reset();
|
||||
irq_nested_enable(flags);
|
||||
|
||||
// Now, HermitCore is able to use the APIC => Therefore, we disable the PIC
|
||||
outportb(0xA1, 0xFF);
|
||||
outportb(0x21, 0xFF);
|
||||
|
||||
kprintf("APIC calibration determines an ICR of 0x%x\n", icr);
|
||||
|
||||
flags = irq_nested_disable();
|
||||
|
||||
// only the single-kernel maintain the IOAPIC
|
||||
if (ioapic) {
|
||||
uint32_t max_entry = ioapic_max_redirection_entry();
|
||||
// only the single-kernel maintains the IOAPIC
|
||||
if (ioapic && is_single_kernel()) {
|
||||
uint8_t max_entry = ioapic_max_redirection_entry();
|
||||
|
||||
// now lets turn everything else on
|
||||
for(uint32_t i=0; i<=max_entry; i++) {
|
||||
for(uint8_t i = 0; i <= max_entry; i++) {
|
||||
if (i != 2)
|
||||
ioapic_inton(i, apic_processors[boot_processor]->id);
|
||||
}
|
||||
|
@ -611,11 +623,9 @@ int apic_calibration(void)
|
|||
ioapic_intoff(2, apic_processors[boot_processor]->id);
|
||||
}
|
||||
|
||||
initialized = 1;
|
||||
#if MAX_CORES > 1
|
||||
smp_init();
|
||||
#endif
|
||||
irq_nested_enable(flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -354,13 +354,27 @@ isrstub_pseudo_error 9
|
|||
%assign i i+1
|
||||
%endrep
|
||||
|
||||
; Create entries for the interrupts 80 to 81
|
||||
; Create entries for the interrupts 80 to 82
|
||||
%assign i 80
|
||||
%rep 2
|
||||
%rep 3
|
||||
irqstub i
|
||||
%assign i i+1
|
||||
%endrep
|
||||
|
||||
global wakeup
|
||||
align 64
|
||||
wakeup:
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 121
|
||||
jmp common_stub
|
||||
|
||||
global mmnif_irq
|
||||
align 64
|
||||
mmnif_irq:
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 122
|
||||
jmp common_stub
|
||||
|
||||
global apic_timer
|
||||
align 64
|
||||
apic_timer:
|
||||
|
@ -396,20 +410,6 @@ apic_svr:
|
|||
push byte 127
|
||||
jmp common_stub
|
||||
|
||||
global wakeup
|
||||
align 64
|
||||
wakeup:
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 121
|
||||
jmp common_stub
|
||||
|
||||
global mmnif_irq
|
||||
align 64
|
||||
mmnif_irq:
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 122
|
||||
jmp common_stub
|
||||
|
||||
extern irq_handler
|
||||
extern get_current_stack
|
||||
extern finish_task_switch
|
||||
|
@ -493,6 +493,78 @@ isrsyscall:
|
|||
o64 sysret
|
||||
%endif
|
||||
|
||||
global getcontext
|
||||
align 64
|
||||
getcontext:
|
||||
cli
|
||||
; save general purpose regsiters
|
||||
mov QWORD [rdi + 0x00], r15
|
||||
mov QWORD [rdi + 0x08], r14
|
||||
mov QWORD [rdi + 0x10], r13
|
||||
mov QWORD [rdi + 0x18], r12
|
||||
mov QWORD [rdi + 0x20], r9
|
||||
mov QWORD [rdi + 0x28], r8
|
||||
mov QWORD [rdi + 0x30], rdi
|
||||
mov QWORD [rdi + 0x38], rsi
|
||||
mov QWORD [rdi + 0x40], rbp
|
||||
mov QWORD [rdi + 0x48], rbx
|
||||
mov QWORD [rdi + 0x50], rdx
|
||||
mov QWORD [rdi + 0x58], rcx
|
||||
lea rax, [rsp + 0x08]
|
||||
mov QWORD [rdi + 0x60], rax
|
||||
mov rax, QWORD [rsp]
|
||||
mov QWORD [rdi + 0x68], rax
|
||||
; save FPU state
|
||||
fnstenv [rdi + 0x74]
|
||||
fldenv [rdi + 0x74]
|
||||
lea rax, [rdi + 0x70]
|
||||
stmxcsr [rax]
|
||||
xor rax, rax
|
||||
sti
|
||||
ret
|
||||
|
||||
global setcontext
|
||||
align 64
|
||||
setcontext:
|
||||
cli
|
||||
; restore FPU state
|
||||
fldenv [rdi + 0x74]
|
||||
lea rax, [rdi + 0x70]
|
||||
ldmxcsr [rax]
|
||||
; restore general purpose registers
|
||||
mov r15, QWORD [rdi + 0x00]
|
||||
mov r14, QWORD [rdi + 0x08]
|
||||
mov r13, QWORD [rdi + 0x10]
|
||||
mov r12, QWORD [rdi + 0x18]
|
||||
mov r9, QWORD [rdi + 0x20]
|
||||
mov r8, QWORD [rdi + 0x28]
|
||||
mov rdi, QWORD [rdi + 0x30]
|
||||
mov rsi, QWORD [rdi + 0x38]
|
||||
mov rbp, QWORD [rdi + 0x40]
|
||||
mov rbx, QWORD [rdi + 0x48]
|
||||
mov rdx, QWORD [rdi + 0x50]
|
||||
mov rcx, QWORD [rdi + 0x58]
|
||||
mov rsp, QWORD [rdi + 0x60]
|
||||
push QWORD [rdi + 0x68]
|
||||
xor rax, rax
|
||||
sti
|
||||
ret
|
||||
|
||||
global __startcontext
|
||||
align 64
|
||||
__startcontext:
|
||||
mov rsp, rbx
|
||||
pop rdi
|
||||
cmp rdi, 0
|
||||
je Lno_context
|
||||
|
||||
call setcontext
|
||||
|
||||
Lno_context:
|
||||
extern exit
|
||||
call exit
|
||||
jmp $
|
||||
|
||||
global switch_context
|
||||
align 64
|
||||
switch_context:
|
||||
|
@ -506,8 +578,8 @@ switch_context:
|
|||
pushfq ; RFLAGS
|
||||
push QWORD 0x08 ; CS
|
||||
push QWORD rollback ; RIP
|
||||
push QWORD 0x00 ; Interrupt number
|
||||
push QWORD 0x00edbabe ; Error code
|
||||
push QWORD 0x00 ; Interrupt number
|
||||
push rax
|
||||
push rcx
|
||||
push rdx
|
||||
|
@ -674,6 +746,34 @@ is_single_kernel:
|
|||
mov eax, DWORD [single_kernel]
|
||||
ret
|
||||
|
||||
|
||||
global sighandler_epilog
|
||||
sighandler_epilog:
|
||||
; restore only those registers that might have changed between returning
|
||||
; from IRQ and execution of signal handler
|
||||
add rsp, 2 * 8 ; ignore fs, gs
|
||||
pop r15
|
||||
pop r14
|
||||
pop r13
|
||||
pop r12
|
||||
pop r11
|
||||
pop r10
|
||||
pop r9
|
||||
pop r8
|
||||
pop rdi
|
||||
pop rsi
|
||||
pop rbp
|
||||
add rsp, 8 ; ignore rsp
|
||||
pop rbx
|
||||
pop rdx
|
||||
pop rcx
|
||||
pop rax
|
||||
add rsp, 4 * 8 ; ignore int_no, error, rip, cs
|
||||
popfq
|
||||
add rsp, 2 * 8 ; ignore userrsp, ss
|
||||
|
||||
jmp [rsp - 5 * 8] ; jump to rip from saved state
|
||||
|
||||
SECTION .data
|
||||
|
||||
align 4096
|
||||
|
|
|
@ -74,6 +74,7 @@ extern void irq22(void);
|
|||
extern void irq23(void);
|
||||
extern void irq80(void);
|
||||
extern void irq81(void);
|
||||
extern void irq82(void);
|
||||
extern void apic_timer(void);
|
||||
extern void apic_lint0(void);
|
||||
extern void apic_lint1(void);
|
||||
|
@ -90,7 +91,7 @@ extern void mmnif_irq(void);
|
|||
* This array is actually an array of function pointers. We use
|
||||
* this to handle custom IRQ handlers for a given IRQ
|
||||
*/
|
||||
static void* irq_routines[MAX_HANDLERS] = {[0 ... MAX_HANDLERS-1] = NULL};
|
||||
static irq_handler_t irq_routines[MAX_HANDLERS] = {[0 ... MAX_HANDLERS-1] = NULL};
|
||||
static uint64_t irq_counter[MAX_CORES][MAX_HANDLERS] = {[0 ... MAX_CORES-1][0 ... MAX_HANDLERS-1] = 0};
|
||||
#ifdef MEASURE_IRQ
|
||||
static int go = 0;
|
||||
|
@ -230,6 +231,8 @@ static int irq_install(void)
|
|||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP, 1);
|
||||
idt_set_gate(113, (size_t)irq81, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP, 1);
|
||||
idt_set_gate(114, (size_t)irq82, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP, 1);
|
||||
|
||||
idt_set_gate(121, (size_t)wakeup, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP, 1);
|
||||
|
@ -278,40 +281,41 @@ int irq_init(void)
|
|||
*/
|
||||
size_t** irq_handler(struct state *s)
|
||||
{
|
||||
size_t** ret = NULL;
|
||||
#ifdef MEASURE_IRQ
|
||||
uint64_t diff = 0;
|
||||
#endif
|
||||
|
||||
/* This is a blank function pointer */
|
||||
void (*handler) (struct state * s);
|
||||
|
||||
#ifdef MEASURE_IRQ
|
||||
if (go)
|
||||
diff = rdtsc();
|
||||
#endif
|
||||
|
||||
size_t** ret = NULL;
|
||||
|
||||
if(BUILTIN_EXPECT(s->int_no >= MAX_HANDLERS, 0)) {
|
||||
kprintf("[%d] Invalid IRQ number %d\n", CORE_ID, s->int_no);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
irq_counter[CORE_ID][s->int_no]++;
|
||||
|
||||
check_workqueues_in_irqhandler(s->int_no);
|
||||
|
||||
/*
|
||||
* Find out if we have a custom handler to run for this
|
||||
* IRQ and then finally, run it
|
||||
*/
|
||||
if (BUILTIN_EXPECT(s->int_no < MAX_HANDLERS, 1)) {
|
||||
handler = irq_routines[s->int_no];
|
||||
if (handler)
|
||||
handler(s);
|
||||
else
|
||||
kprintf("Unhandle IRQ %d\n", s->int_no);
|
||||
} else kprintf("Invalid interrupt number %d\n", s->int_no);
|
||||
// Find out if we have a custom handler to run for this IRQ and run it
|
||||
irq_handler_t handler = irq_routines[s->int_no];
|
||||
|
||||
// timer interrupt?
|
||||
if ((s->int_no == 32) || (s->int_no == 123))
|
||||
ret = scheduler(); // switch to a new task
|
||||
else if ((s->int_no >= 32) && (get_highest_priority() > per_core(current_task)->prio))
|
||||
if (handler) {
|
||||
handler(s);
|
||||
} else {
|
||||
kprintf("[%d] Unhandled IRQ %d\n", CORE_ID, s->int_no);
|
||||
}
|
||||
|
||||
// Check if timers have expired that would unblock tasks
|
||||
check_workqueues_in_irqhandler((int) s->int_no);
|
||||
|
||||
if ((s->int_no == 32) || (s->int_no == 123)) {
|
||||
// a timer interrupt may have caused unblocking of tasks
|
||||
ret = scheduler();
|
||||
} else if ((s->int_no >= 32) && (get_highest_priority() > per_core(current_task)->prio)) {
|
||||
// there's a ready task with higher priority
|
||||
ret = scheduler();
|
||||
}
|
||||
|
||||
apic_eoi(s->int_no);
|
||||
|
||||
|
|
98
hermit/arch/x86/kernel/syscall.c
Normal file
98
hermit/arch/x86/kernel/syscall.c
Normal file
|
@ -0,0 +1,98 @@
|
|||
/*
|
||||
* Copyright (c) 2016, Stefan Lankes, RWTH Aachen University
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this
|
||||
* software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <hermit/stddef.h>
|
||||
#include <hermit/stdio.h>
|
||||
#include <hermit/tasks.h>
|
||||
#include <hermit/errno.h>
|
||||
#include <hermit/syscall.h>
|
||||
|
||||
void __startcontext(void);
|
||||
|
||||
void makecontext(ucontext_t *ucp, void (*func)(), int argc, ...)
|
||||
{
|
||||
va_list ap;
|
||||
|
||||
if (BUILTIN_EXPECT(!ucp, 0))
|
||||
return;
|
||||
|
||||
//kprintf("sys_makecontext %p, func %p, stack 0x%zx, task %d\n", ucp, func, ucp->uc_stack.ss_sp, per_core(current_task)->id);
|
||||
|
||||
size_t* stack = (size_t*) (ucp->uc_stack.ss_sp + ucp->uc_stack.ss_size);
|
||||
stack -= (argc > 6 ? argc - 6 : 0) + 1;
|
||||
uint32_t idx = (argc > 6 ? argc - 6 : 0) + 1;
|
||||
|
||||
/* Align stack and reserve space for trampoline address. */
|
||||
stack = (size_t*) ((((size_t) stack) & ~0xFULL) - 0x8);
|
||||
|
||||
/* Setup context */
|
||||
ucp->uc_mregs.rip = (size_t) func;
|
||||
ucp->uc_mregs.rbx = (size_t) &stack[idx];
|
||||
ucp->uc_mregs.rsp = (size_t) stack;
|
||||
|
||||
stack[0] = (size_t) &__startcontext;
|
||||
stack[idx] = (size_t) ucp->uc_link; // link to the next context
|
||||
|
||||
va_start(ap, argc);
|
||||
for (int i = 0; i < argc; i++)
|
||||
{
|
||||
switch (i)
|
||||
{
|
||||
case 0:
|
||||
ucp->uc_mregs.rdi = va_arg(ap, size_t);
|
||||
break;
|
||||
case 1:
|
||||
ucp->uc_mregs.rsi = va_arg(ap, size_t);
|
||||
break;
|
||||
case 2:
|
||||
ucp->uc_mregs.rdx = va_arg(ap, size_t);
|
||||
break;
|
||||
case 3:
|
||||
ucp->uc_mregs.rcx = va_arg(ap, size_t);
|
||||
break;
|
||||
case 4:
|
||||
ucp->uc_mregs.r8 = va_arg(ap, size_t);
|
||||
break;
|
||||
case 5:
|
||||
ucp->uc_mregs.r9 = va_arg(ap, size_t);
|
||||
break;
|
||||
default:
|
||||
/* copy value on stack */
|
||||
stack[i - 5] = va_arg(ap, size_t);
|
||||
break;
|
||||
}
|
||||
}
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
int swapcontext(ucontext_t *oucp, const ucontext_t *ucp)
|
||||
{
|
||||
//TODO: implementation is missing
|
||||
|
||||
kprintf("WARNING: sys_swapcontext is currently not implemented: %p <=> %p\n", oucp, ucp);
|
||||
return -ENOSYS;
|
||||
}
|
|
@ -46,6 +46,7 @@ extern int32_t boot_processor;
|
|||
|
||||
#ifdef DYNAMIC_TICKS
|
||||
DEFINE_PER_CORE(uint64_t, last_rdtsc, 0);
|
||||
uint64_t boot_tsc = 0;
|
||||
|
||||
void check_ticks(void)
|
||||
{
|
||||
|
@ -53,28 +54,17 @@ void check_ticks(void)
|
|||
if (!cpu_freq)
|
||||
return;
|
||||
|
||||
if (has_rdtscp()){
|
||||
uint64_t curr_rdtsc = rdtscp(NULL);
|
||||
uint64_t diff;
|
||||
const uint64_t curr_rdtsc = has_rdtscp() ? rdtscp(NULL) : rdtsc();
|
||||
rmb();
|
||||
|
||||
rmb();
|
||||
diff = ((curr_rdtsc - per_core(last_rdtsc)) * (uint64_t)TIMER_FREQ) / (1000000ULL*(uint64_t)get_cpu_frequency());
|
||||
if (diff > 0) {
|
||||
set_per_core(timer_ticks, per_core(timer_ticks) + diff);
|
||||
set_per_core(last_rdtsc, curr_rdtsc);
|
||||
rmb();
|
||||
}
|
||||
} else {
|
||||
uint64_t curr_rdtsc = rdtsc();
|
||||
uint64_t diff;
|
||||
const uint64_t diff_cycles = curr_rdtsc - per_core(last_rdtsc);
|
||||
const uint64_t cpu_freq_hz = 1000000ULL * (uint64_t) get_cpu_frequency();
|
||||
const uint64_t diff_ticks = (diff_cycles * (uint64_t) TIMER_FREQ) / cpu_freq_hz;
|
||||
|
||||
if (diff_ticks > 0) {
|
||||
set_per_core(timer_ticks, per_core(timer_ticks) + diff_ticks);
|
||||
set_per_core(last_rdtsc, curr_rdtsc);
|
||||
rmb();
|
||||
diff = ((curr_rdtsc - per_core(last_rdtsc)) * (uint64_t)TIMER_FREQ) / (1000000ULL*(uint64_t)get_cpu_frequency());
|
||||
if (diff > 0) {
|
||||
set_per_core(timer_ticks, per_core(timer_ticks) + diff);
|
||||
set_per_core(last_rdtsc, curr_rdtsc);
|
||||
rmb();
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -182,6 +172,14 @@ static int pit_init(void)
|
|||
*/
|
||||
int timer_init(void)
|
||||
{
|
||||
#ifdef DYNAMIC_TICKS
|
||||
if (boot_tsc)
|
||||
{
|
||||
set_per_core(last_rdtsc, boot_tsc);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Installs 'timer_handler' for the PIC and APIC timer,
|
||||
* only one handler will be later used.
|
||||
|
@ -191,10 +189,8 @@ int timer_init(void)
|
|||
irq_install_handler(121, wakeup_handler);
|
||||
|
||||
#ifdef DYNAMIC_TICKS
|
||||
if (has_rdtscp())
|
||||
last_rdtsc = rdtscp(NULL);
|
||||
else
|
||||
last_rdtsc = rdtsc();
|
||||
boot_tsc = has_rdtscp() ? rdtscp(NULL) : rdtsc();
|
||||
set_per_core(last_rdtsc, boot_tsc);
|
||||
#endif
|
||||
|
||||
if (cpu_freq) // do we need to configure the timer?
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
*/
|
||||
|
||||
#define UART_RX 0 /* In: Receive buffer */
|
||||
#define UART_IIR 2 /* In: Interrupt ID Register */
|
||||
#define UART_IIR 2 /* In: Interrupt ID Register */
|
||||
#define UART_TX 0 /* Out: Transmit buffer */
|
||||
#define UART_IER 1 /* Out: Interrupt Enable Register */
|
||||
#define UART_FCR 2 /* Out: FIFO Control Register */
|
||||
|
@ -58,26 +58,26 @@
|
|||
#define UART_LCR 3 /* Out: Line Control Register */
|
||||
#define UART_LSR 5 /* Line Status Register */
|
||||
|
||||
#define UART_IER_MSI 0x08 /* Enable Modem status interrupt */
|
||||
#define UART_IER_RLSI 0x04 /* Enable receiver line status interrupt */
|
||||
#define UART_IER_THRI 0x02 /* Enable Transmitter holding register int. */
|
||||
#define UART_IER_RDI 0x01 /* Enable receiver data interrupt */
|
||||
#define UART_IER_MSI 0x08 /* Enable Modem status interrupt */
|
||||
#define UART_IER_RLSI 0x04 /* Enable receiver line status interrupt */
|
||||
#define UART_IER_THRI 0x02 /* Enable Transmitter holding register int. */
|
||||
#define UART_IER_RDI 0x01 /* Enable receiver data interrupt */
|
||||
|
||||
#define UART_IIR_NO_INT 0x01 /* No interrupts pending */
|
||||
#define UART_IIR_ID 0x06 /* Mask for the interrupt ID */
|
||||
#define UART_IIR_ID 0x06 /* Mask for the interrupt ID */
|
||||
#define UART_IIR_MSI 0x00 /* Modem status interrupt */
|
||||
#define UART_IIR_THRI 0x02 /* Transmitter holding register empty */
|
||||
#define UART_IIR_RDI 0x04 /* Receiver data interrupt */
|
||||
#define UART_IIR_RLSI 0x06 /* Receiver line status interrupt */
|
||||
|
||||
#define UART_FCR_ENABLE_FIFO 0x01 /* Enable the FIFO */
|
||||
#define UART_FCR_CLEAR_RCVR 0x02 /* Clear the RCVR FIFO */
|
||||
#define UART_FCR_CLEAR_XMIT 0x04 /* Clear the XMIT FIFO */
|
||||
#define UART_FCR_CLEAR_RCVR 0x02 /* Clear the RCVR FIFO */
|
||||
#define UART_FCR_CLEAR_XMIT 0x04 /* Clear the XMIT FIFO */
|
||||
#define UART_FCR_TRIGGER_MASK 0xC0 /* Mask for the FIFO trigger range */
|
||||
#define UART_FCR_TRIGGER_1 0x00 /* Trigger RDI at FIFO level 1 byte */
|
||||
#define UART_FCR_TRIGGER_4 0x40 /* Trigger RDI at FIFO level 4 byte */
|
||||
#define UART_FCR_TRIGGER_8 0x80 /* Trigger RDI at FIFO level 8 byte */
|
||||
#define UART_FCR_TRIGGER_14 0xc0 /* Trigger RDI at FIFO level 14 byte*/
|
||||
#define UART_FCR_TRIGGER_1 0x00 /* Trigger RDI at FIFO level 1 byte */
|
||||
#define UART_FCR_TRIGGER_4 0x40 /* Trigger RDI at FIFO level 4 byte */
|
||||
#define UART_FCR_TRIGGER_8 0x80 /* Trigger RDI at FIFO level 8 byte */
|
||||
#define UART_FCR_TRIGGER_14 0xc0 /* Trigger RDI at FIFO level 14 byte*/
|
||||
|
||||
|
||||
#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */
|
||||
|
@ -103,11 +103,11 @@ static size_t iobase = 0;
|
|||
|
||||
static inline unsigned char read_from_uart(uint32_t off)
|
||||
{
|
||||
uint8_t c;
|
||||
uint8_t c = 0;
|
||||
|
||||
if (mmio)
|
||||
c = *((const volatile unsigned char*) (iobase + off));
|
||||
else
|
||||
else if (iobase)
|
||||
c = inportb(iobase + off);
|
||||
|
||||
return c;
|
||||
|
@ -117,7 +117,7 @@ static void write_to_uart(uint32_t off, unsigned char c)
|
|||
{
|
||||
if (mmio)
|
||||
*((volatile unsigned char*) (iobase + off)) = c;
|
||||
else
|
||||
else if (iobase)
|
||||
outportb(iobase + off, c);
|
||||
}
|
||||
|
||||
|
@ -131,7 +131,7 @@ static unsigned char uart_getchar(void)
|
|||
/* Puts a single character on a serial device */
|
||||
int uart_putchar(unsigned char c)
|
||||
{
|
||||
if (!iobase)
|
||||
if (!iobase && !mmio)
|
||||
return 0;
|
||||
|
||||
write_to_uart(UART_TX, c);
|
||||
|
@ -144,7 +144,7 @@ int uart_puts(const char *text)
|
|||
{
|
||||
size_t i, len = strlen(text);
|
||||
|
||||
if (!iobase)
|
||||
if (!iobase && !mmio)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
|
|
|
@ -98,6 +98,14 @@ size_t virt_to_phys(size_t addr)
|
|||
return phy | off;
|
||||
}
|
||||
|
||||
/*
|
||||
* get memory page size
|
||||
*/
|
||||
int getpagesize(void)
|
||||
{
|
||||
return PAGE_SIZE;
|
||||
}
|
||||
|
||||
//TODO: code is missing
|
||||
int page_set_flags(size_t viraddr, uint32_t npages, int flags)
|
||||
{
|
||||
|
@ -173,7 +181,8 @@ out:
|
|||
|
||||
int page_unmap(size_t viraddr, size_t npages)
|
||||
{
|
||||
uint8_t ipi = 0;
|
||||
if (BUILTIN_EXPECT(!npages, 0))
|
||||
return 0;
|
||||
|
||||
spinlock_irqsave_lock(&page_lock);
|
||||
|
||||
|
@ -183,11 +192,9 @@ int page_unmap(size_t viraddr, size_t npages)
|
|||
for (vpn=start; vpn<start+npages; vpn++) {
|
||||
self[0][vpn] = 0;
|
||||
tlb_flush_one_page(vpn << PAGE_BITS, 0);
|
||||
ipi = 1;
|
||||
}
|
||||
|
||||
if (ipi)
|
||||
ipi_tlb_flush();
|
||||
ipi_tlb_flush();
|
||||
|
||||
spinlock_irqsave_unlock(&page_lock);
|
||||
|
||||
|
|
122
hermit/include/hermit/dequeue.h
Normal file
122
hermit/include/hermit/dequeue.h
Normal file
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
* Copyright (c) 2016, Daniel Krebs, RWTH Aachen University
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this
|
||||
* software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @author Daniel Krebs
|
||||
* @file include/hermit/dequeue.h
|
||||
* @brief Double-ended queue implementation
|
||||
*/
|
||||
|
||||
#include <hermit/stddef.h>
|
||||
#include <hermit/spinlock.h>
|
||||
#include <hermit/errno.h>
|
||||
#include <string.h>
|
||||
|
||||
#ifndef __DEQUEUE_H__
|
||||
#define __DEQUEUE_H__
|
||||
|
||||
#define NOT_NULL(dequeue) do { \
|
||||
if(BUILTIN_EXPECT(!dequeue, 0)) { \
|
||||
return -EINVAL; \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
typedef struct _dequeue_t {
|
||||
size_t front; ///< point to first used entry
|
||||
size_t back; ///< point to first unused entry
|
||||
spinlock_t lock; ///< make dequeue thread safe
|
||||
char* buffer; ///< pointer to buffer that holds elements
|
||||
size_t buffer_length; ///< number of elements buffer can hold
|
||||
size_t element_size; ///< size of one element in buffer
|
||||
} dequeue_t;
|
||||
|
||||
static inline int
|
||||
dequeue_init(dequeue_t* dequeue, void* buffer, size_t buffer_length, size_t element_size)
|
||||
{
|
||||
NOT_NULL(dequeue);
|
||||
|
||||
dequeue->front = 0;
|
||||
dequeue->back = 0;
|
||||
|
||||
dequeue->buffer = buffer;
|
||||
dequeue->buffer_length = buffer_length;
|
||||
dequeue->element_size = element_size;
|
||||
|
||||
spinlock_init(&dequeue->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dequeue_push(dequeue_t* dequeue, void* v)
|
||||
{
|
||||
NOT_NULL(dequeue);
|
||||
|
||||
spinlock_lock(&dequeue->lock);
|
||||
|
||||
size_t new_back = (dequeue->back + 1) % dequeue->buffer_length;
|
||||
if(new_back == dequeue->front) {
|
||||
spinlock_unlock(&dequeue->lock);
|
||||
return -EOVERFLOW;
|
||||
}
|
||||
|
||||
memcpy(&dequeue->buffer[dequeue->back * dequeue->element_size],
|
||||
v,
|
||||
dequeue->element_size);
|
||||
|
||||
dequeue->back = new_back;
|
||||
|
||||
spinlock_unlock(&dequeue->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dequeue_pop(dequeue_t* dequeue, void* out)
|
||||
{
|
||||
|
||||
NOT_NULL(dequeue);
|
||||
NOT_NULL(out);
|
||||
|
||||
spinlock_lock(&dequeue->lock);
|
||||
|
||||
if(dequeue->front == dequeue->back) {
|
||||
spinlock_unlock(&dequeue->lock);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
memcpy(out,
|
||||
&dequeue->buffer[dequeue->front * dequeue->element_size],
|
||||
dequeue->element_size);
|
||||
|
||||
dequeue->front = (dequeue->front + 1) % dequeue->buffer_length;
|
||||
|
||||
spinlock_unlock(&dequeue->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // __DEQUEUE_H__
|
76
hermit/include/hermit/signal.h
Normal file
76
hermit/include/hermit/signal.h
Normal file
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
* Copyright (c) 2016, Daniel Krebs, RWTH Aachen University
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this
|
||||
* software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @author Daniel Krebs
|
||||
* @file include/hermit/signal.h
|
||||
* @brief Signal related functions
|
||||
*/
|
||||
|
||||
#ifndef __SIGNAL_H__
|
||||
#define __SIGNAL_H__
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <hermit/stddef.h>
|
||||
#include <hermit/semaphore_types.h>
|
||||
|
||||
#define MAX_SIGNALS 32
|
||||
|
||||
typedef void (*signal_handler_t)(int);
|
||||
|
||||
// This is used in deqeue.h (HACK)
|
||||
typedef struct _sig {
|
||||
tid_t dest;
|
||||
int signum;
|
||||
} sig_t;
|
||||
|
||||
/** @brief Send signal to kernel task
|
||||
*
|
||||
* @param dest Send signal to this task
|
||||
* @param signum Signal number
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -ENOENT (-2) if task not found
|
||||
*/
|
||||
int hermit_kill(tid_t dest, int signum);
|
||||
|
||||
/** @brief Register signal handler
|
||||
*
|
||||
* @param handler Signal handler
|
||||
* @return
|
||||
* - 0 on success
|
||||
*/
|
||||
int hermit_signal(signal_handler_t handler);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // __SIGNAL_H__
|
|
@ -173,13 +173,13 @@ inline static int spinlock_irqsave_destroy(spinlock_irqsave_t* s) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
/** @brief Unlock an irqsave spinlock on exit of critical section
|
||||
/** @brief Lock spinlock on entry of critical section and disable interrupts
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
inline static int spinlock_irqsave_lock(spinlock_irqsave_t* s) {
|
||||
uint32_t flags;
|
||||
uint8_t flags;
|
||||
int32_t ticket;
|
||||
|
||||
if (BUILTIN_EXPECT(!s, 0))
|
||||
|
@ -191,14 +191,10 @@ inline static int spinlock_irqsave_lock(spinlock_irqsave_t* s) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if 1
|
||||
ticket = atomic_int32_inc(&s->queue);
|
||||
while (atomic_int32_read(&s->dequeue) != ticket) {
|
||||
PAUSE;
|
||||
}
|
||||
#else
|
||||
while( atomic_int32_test_and_set(&s->dequeue,0) );
|
||||
#endif
|
||||
|
||||
s->coreid = CORE_ID;
|
||||
s->flags = flags;
|
||||
|
@ -207,13 +203,13 @@ inline static int spinlock_irqsave_lock(spinlock_irqsave_t* s) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
/** @brief Unlock irqsave spinlock on exit of critical section and re-enable interrupts
|
||||
/** @brief Unlock spinlock on exit of critical section and re-enable interrupts
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
inline static int spinlock_irqsave_unlock(spinlock_irqsave_t* s) {
|
||||
uint32_t flags;
|
||||
uint8_t flags;
|
||||
|
||||
if (BUILTIN_EXPECT(!s, 0))
|
||||
return -EINVAL;
|
||||
|
@ -223,11 +219,9 @@ inline static int spinlock_irqsave_unlock(spinlock_irqsave_t* s) {
|
|||
flags = s->flags;
|
||||
s->coreid = (uint32_t) -1;
|
||||
s->flags = 0;
|
||||
#if 1
|
||||
|
||||
atomic_int32_inc(&s->dequeue);
|
||||
#else
|
||||
atomic_int32_set(&s->dequeue,1);
|
||||
#endif
|
||||
|
||||
irq_nested_enable(flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -57,6 +57,8 @@ extern "C" {
|
|||
struct sem;
|
||||
typedef struct sem sem_t;
|
||||
|
||||
typedef void (*signal_handler_t)(int);
|
||||
|
||||
/*
|
||||
* HermitCore is a libOS.
|
||||
* => classical system calls are realized as normal function
|
||||
|
@ -88,6 +90,20 @@ int sys_rcce_init(int session_id);
|
|||
size_t sys_rcce_malloc(int session_id, int ue);
|
||||
int sys_rcce_fini(int session_id);
|
||||
void sys_yield(void);
|
||||
int sys_kill(tid_t dest, int signum);
|
||||
int sys_signal(signal_handler_t handler);
|
||||
|
||||
typedef struct ucontext {
|
||||
mregs_t uc_mregs;
|
||||
fenv_t uc_fenv;
|
||||
struct ucontext *uc_link;
|
||||
stack_t uc_stack;
|
||||
} ucontext_t;
|
||||
|
||||
void makecontext(ucontext_t *ucp, void (*func)(), int argc, ...);
|
||||
int swapcontext(ucontext_t *oucp, const ucontext_t *ucp);
|
||||
int getcontext(ucontext_t *ucp);
|
||||
int setcontext(ucontext_t *ucp);
|
||||
|
||||
#define __NR_exit 0
|
||||
#define __NR_write 1
|
||||
|
|
|
@ -47,6 +47,7 @@ extern "C" {
|
|||
/** @brief System call to terminate a user level process */
|
||||
void NORETURN sys_exit(int);
|
||||
|
||||
|
||||
/** @brief Task switcher
|
||||
*
|
||||
* Timer-interrupted use of this function for task switching
|
||||
|
@ -57,6 +58,7 @@ void NORETURN sys_exit(int);
|
|||
*/
|
||||
size_t** scheduler(void);
|
||||
|
||||
|
||||
/** @brief Initialize the multitasking subsystem
|
||||
*
|
||||
* This procedure sets the current task to the
|
||||
|
@ -68,6 +70,7 @@ size_t** scheduler(void);
|
|||
*/
|
||||
int multitasking_init(void);
|
||||
|
||||
|
||||
/** @brief Clone current task with a specific entry point
|
||||
*
|
||||
* @todo Don't acquire table_lock for the whole task creation.
|
||||
|
@ -84,6 +87,7 @@ int multitasking_init(void);
|
|||
*/
|
||||
int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio);
|
||||
|
||||
|
||||
/** @brief Create a task with a specific entry point
|
||||
*
|
||||
* @todo Don't acquire table_lock for the whole task creation.
|
||||
|
@ -100,6 +104,7 @@ int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio);
|
|||
*/
|
||||
int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t core_id);
|
||||
|
||||
|
||||
/** @brief create a kernel-level task on the current core.
|
||||
*
|
||||
* @param id The value behind this pointer will be set to the new task's id
|
||||
|
@ -113,6 +118,7 @@ int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t c
|
|||
*/
|
||||
int create_kernel_task(tid_t* id, entry_point_t ep, void* args, uint8_t prio);
|
||||
|
||||
|
||||
/** @brief create a kernel-level task.
|
||||
*
|
||||
* @param id The value behind this pointer will be set to the new task's id
|
||||
|
@ -127,6 +133,7 @@ int create_kernel_task(tid_t* id, entry_point_t ep, void* args, uint8_t prio);
|
|||
*/
|
||||
int create_kernel_task_on_core(tid_t* id, entry_point_t ep, void* args, uint8_t prio, uint32_t core_id);
|
||||
|
||||
|
||||
/** @brief Create a user level task.
|
||||
*
|
||||
* @param id The value behind this pointer will be set to the new task's id
|
||||
|
@ -150,12 +157,14 @@ int create_user_task_on_core(tid_t* id, const char* fame, char** argv, uint8_t p
|
|||
*/
|
||||
int init_tls(void);
|
||||
|
||||
|
||||
/** @brief Cleanup function for the task termination
|
||||
*
|
||||
* On termination, the task call this function to cleanup its address space.
|
||||
*/
|
||||
void finish_task_switch(void);
|
||||
|
||||
|
||||
/** @brief determine the highest priority of all tasks, which are ready
|
||||
*
|
||||
* @return
|
||||
|
@ -164,12 +173,14 @@ void finish_task_switch(void);
|
|||
*/
|
||||
uint32_t get_highest_priority(void);
|
||||
|
||||
|
||||
/** @brief Call to rescheduling
|
||||
*
|
||||
* This is a purely assembled procedure for rescheduling
|
||||
*/
|
||||
void reschedule(void);
|
||||
|
||||
|
||||
/** @brief Wake up a blocked task
|
||||
*
|
||||
* The task's status will be changed to TASK_READY
|
||||
|
@ -180,6 +191,7 @@ void reschedule(void);
|
|||
*/
|
||||
int wakeup_task(tid_t);
|
||||
|
||||
|
||||
/** @brief Block current task
|
||||
*
|
||||
* The current task's status will be changed to TASK_BLOCKED
|
||||
|
@ -190,6 +202,20 @@ int wakeup_task(tid_t);
|
|||
*/
|
||||
int block_current_task(void);
|
||||
|
||||
|
||||
/** @brief Get a process control block
|
||||
*
|
||||
* @param id ID of the task to retrieve
|
||||
* @param task Location to store pointer to task
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -ENOMEM (-12) if @p task is NULL
|
||||
* - -ENOENT ( -2) if @p id not in task table
|
||||
* - -EINVAL (-22) if there's no valid task with @p id
|
||||
*/
|
||||
int get_task(tid_t id, task_t** task);
|
||||
|
||||
|
||||
/** @brief Block current task until timer expires
|
||||
*
|
||||
* @param deadline Clock tick, when the timer expires
|
||||
|
@ -199,17 +225,21 @@ int block_current_task(void);
|
|||
*/
|
||||
int set_timer(uint64_t deadline);
|
||||
|
||||
|
||||
/** @brief check is a timer is expired
|
||||
*
|
||||
*/
|
||||
void check_timers(void);
|
||||
|
||||
|
||||
/** @brief Abort current task */
|
||||
void NORETURN do_abort(void);
|
||||
|
||||
|
||||
/** @brief This function shall be called by leaving kernel-level tasks */
|
||||
void NORETURN leave_kernel_task(void);
|
||||
|
||||
|
||||
/** @brief if a task exists with higher priority, HermitCore switch to it.
|
||||
*/
|
||||
void check_scheduling(void);
|
||||
|
@ -218,23 +248,27 @@ void check_scheduling(void);
|
|||
*/
|
||||
int network_shutdown(void);
|
||||
|
||||
|
||||
#ifdef DYNAMIC_TICKS
|
||||
/** @brief check, if the tick counter has to be updated
|
||||
*/
|
||||
void check_ticks(void);
|
||||
#endif
|
||||
|
||||
extern volatile uint32_t go_down;
|
||||
|
||||
/** @brief shutdown the whole system
|
||||
*/
|
||||
void shutdown_system(void);
|
||||
|
||||
|
||||
extern volatile uint32_t go_down;
|
||||
static inline void check_workqueues_in_irqhandler(int irq)
|
||||
{
|
||||
#ifdef DYNAMIC_TICKS
|
||||
// Increment ticks
|
||||
check_ticks();
|
||||
#endif
|
||||
|
||||
check_timers();
|
||||
|
||||
if (irq < 0) {
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include <hermit/stddef.h>
|
||||
#include <hermit/spinlock_types.h>
|
||||
#include <hermit/vma.h>
|
||||
#include <hermit/signal.h>
|
||||
#include <asm/tasks_types.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
|
@ -104,6 +105,8 @@ typedef struct task {
|
|||
size_t tls_size;
|
||||
/// LwIP error code
|
||||
int lwip_err;
|
||||
/// Handler for (POSIX) Signals
|
||||
signal_handler_t signal_handler;
|
||||
/// FPU state
|
||||
union fpu_state fpu;
|
||||
} task_t;
|
||||
|
@ -133,6 +136,66 @@ typedef struct {
|
|||
spinlock_irqsave_t lock;
|
||||
} readyqueues_t;
|
||||
|
||||
|
||||
static inline void task_list_remove_task(task_list_t* list, task_t* task)
|
||||
{
|
||||
if (task->prev)
|
||||
task->prev->next = task->next;
|
||||
|
||||
if (task->next)
|
||||
task->next->prev = task->prev;
|
||||
|
||||
if (list->last == task)
|
||||
list->last = task->prev;
|
||||
|
||||
if (list->first == task)
|
||||
list->first = task->next;
|
||||
}
|
||||
|
||||
|
||||
static inline void task_list_push_back(task_list_t* list, task_t* task)
|
||||
{
|
||||
if(BUILTIN_EXPECT((task == NULL) || (list == NULL), 0)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (list->last) {
|
||||
task->prev = list->last;
|
||||
task->next = NULL;
|
||||
list->last->next = task;
|
||||
list->last = task;
|
||||
} else {
|
||||
list->last = list->first = task;
|
||||
task->next = task->prev = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline task_t* task_list_pop_front(task_list_t* list)
|
||||
{
|
||||
if(BUILTIN_EXPECT((list == NULL), 0)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
task_t* task = list->first;
|
||||
|
||||
if(list->first) {
|
||||
// advance list
|
||||
list->first = list->first->next;
|
||||
|
||||
if(list->first) {
|
||||
// first element has no previous element
|
||||
list->first->prev = NULL;
|
||||
} else {
|
||||
// no first element => no last element either
|
||||
list->last = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
task->next = task->prev = NULL;
|
||||
return task;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -83,6 +83,10 @@ static inline void sleep(unsigned int sec) { timer_wait(sec*TIMER_FREQ); }
|
|||
|
||||
static inline int timer_deadline(uint32_t t) { return apic_timer_deadline(t); }
|
||||
|
||||
static inline void timer_disable(void) { apic_disable_timer(); }
|
||||
|
||||
static inline int timer_is_running(void) { return apic_timer_is_running(); }
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
C_source := main.c tasks.c syscall.c timer.c
|
||||
C_source := main.c tasks.c syscall.c timer.c signal.c
|
||||
MODULE := kernel
|
||||
|
||||
include $(TOPDIR)/Makefile.inc
|
||||
|
|
|
@ -98,6 +98,8 @@ uint32_t idle_poll = 1;
|
|||
islelock_t* rcce_lock = NULL;
|
||||
rcce_mpb_t* rcce_mpb = NULL;
|
||||
|
||||
extern void signal_init();
|
||||
|
||||
#if 0
|
||||
static int foo(void* arg)
|
||||
{
|
||||
|
@ -130,6 +132,8 @@ static int hermit_init(void)
|
|||
timer_init();
|
||||
multitasking_init();
|
||||
memory_init();
|
||||
signal_init();
|
||||
|
||||
#ifndef CONFIG_VGA
|
||||
uart_init();
|
||||
#endif
|
||||
|
@ -270,8 +274,11 @@ int network_shutdown(void)
|
|||
{
|
||||
kputs("Shutdown LwIP\n");
|
||||
|
||||
if (libc_sd > 0)
|
||||
lwip_close(libc_sd);
|
||||
if (libc_sd >= 0) {
|
||||
int s = libc_sd;
|
||||
libc_sd = -1;
|
||||
lwip_close(s);
|
||||
}
|
||||
|
||||
mmnif_shutdown();
|
||||
//stats_display();
|
||||
|
@ -282,6 +289,7 @@ int network_shutdown(void)
|
|||
#if MAX_CORES > 1
|
||||
int smp_main(void)
|
||||
{
|
||||
timer_init();
|
||||
#ifdef DYNAMIC_TICKS
|
||||
enable_dynticks();
|
||||
#endif
|
||||
|
|
203
hermit/kernel/signal.c
Normal file
203
hermit/kernel/signal.c
Normal file
|
@ -0,0 +1,203 @@
|
|||
#include <hermit/signal.h>
|
||||
#include <hermit/stddef.h>
|
||||
#include <hermit/spinlock.h>
|
||||
#include <hermit/stdio.h>
|
||||
#include <hermit/tasks.h>
|
||||
#include <hermit/dequeue.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
#define ENABLE_DEBUG 0
|
||||
#if !ENABLE_DEBUG
|
||||
#define kprintf(...)
|
||||
#endif
|
||||
|
||||
#define SIGNAL_IRQ (32 + 82)
|
||||
#define SIGNAL_BUFFER_SIZE (16)
|
||||
|
||||
// Per-core signal queue and buffer
|
||||
static dequeue_t signal_queue[MAX_CORES];
|
||||
static sig_t signal_buffer[MAX_CORES][SIGNAL_BUFFER_SIZE];
|
||||
|
||||
static void _signal_irq_handler(struct state* s)
|
||||
{
|
||||
kprintf("Enter _signal_irq_handler() on core %d\n", CORE_ID);
|
||||
|
||||
sig_t signal;
|
||||
task_t* dest_task;
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
while(dequeue_pop(&signal_queue[CORE_ID], &signal) == 0) {
|
||||
kprintf(" Deliver signal %d\n", signal.signum);
|
||||
|
||||
if(get_task(signal.dest, &dest_task) == 0) {
|
||||
kprintf(" Found valid task with ID %d\n", dest_task->id);
|
||||
|
||||
// only service signals for tasks on this core
|
||||
if(dest_task->last_core != CORE_ID) {
|
||||
kprintf(" Signal dispatched to wrong CPU! Dropping it ...\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
if(dest_task->signal_handler) {
|
||||
kprintf(" Has signal handler (%p)\n", dest_task->signal_handler);
|
||||
|
||||
/* We will inject the signal handler into the control flow when
|
||||
* the task will continue it's exection the next time. There are
|
||||
* 3 cases how the task was interrupted:
|
||||
*
|
||||
* 1. call to reschedule() by own intend
|
||||
* 2. a timer interrupt lead to rescheduling to another task
|
||||
* 3. this IRQ interrupted the task
|
||||
*
|
||||
* Depending on those cases, the state of the task can either be
|
||||
* saved to it's own stack (1.), it's interrupt stack (IST, 2.)
|
||||
* or the stack of this interrupt handler (3.).
|
||||
*
|
||||
* When the signal handler finishes it's execution, we need to
|
||||
* restore the task state, so we make the signal handler return
|
||||
* first to sighandler_epilog() which then restores the original
|
||||
* state.
|
||||
*
|
||||
* For cases 2+3, when task was interrupted by an IRQ, we modify
|
||||
* the existing state on the interrupt stack to execute the
|
||||
* signal handler, wherease in case 1, we craft a new state and
|
||||
* place it on top of the task stack.
|
||||
*
|
||||
* The task stack will have the following layout:
|
||||
*
|
||||
* | ... | <- task's rsp before interruption
|
||||
* |----------------------|
|
||||
* | saved state |
|
||||
* |----------------------|
|
||||
* | &sighandler_epilog() | <- rsp after IRQ
|
||||
* |----------------------|
|
||||
* |----------------------| Only for case 1:
|
||||
* | signal handler state | Craft signal handler state, so it
|
||||
* |----------------------| executes before task is continued
|
||||
*/
|
||||
|
||||
size_t* task_stackptr;
|
||||
struct state *task_state, *sighandler_state;
|
||||
|
||||
const int task_is_running = dest_task == curr_task;
|
||||
kprintf(" Task is%s running\n", task_is_running ? "" : " not");
|
||||
|
||||
// location of task state depends of type of interruption
|
||||
task_state = (!task_is_running) ?
|
||||
/* case 1+2: */ (struct state*) dest_task->last_stack_pointer :
|
||||
/* case 3: */ s;
|
||||
|
||||
// pseudo state pushed by reschedule() has INT no. 0
|
||||
const int state_on_task_stack = task_state->int_no == 0;
|
||||
|
||||
if(state_on_task_stack) {
|
||||
kprintf(" State is already on task stack\n");
|
||||
// stack pointer was saved by switch_context() after saving
|
||||
// task state to task stack
|
||||
task_stackptr = dest_task->last_stack_pointer;
|
||||
} else {
|
||||
// stack pointer is last rsp, since task state is saved to
|
||||
// interrupt stack
|
||||
task_stackptr = (size_t*) task_state->rsp;
|
||||
|
||||
kprintf(" Copy state to task stack\n");
|
||||
task_stackptr -= sizeof(struct state) / sizeof(size_t);
|
||||
memcpy(task_stackptr, task_state, sizeof(struct state));
|
||||
}
|
||||
|
||||
// signal handler will return to this function to restore
|
||||
// register state
|
||||
extern void sighandler_epilog();
|
||||
*(--task_stackptr) = (uint64_t) &sighandler_epilog;
|
||||
size_t* sighandler_rsp = task_stackptr;
|
||||
|
||||
if(state_on_task_stack) {
|
||||
kprintf(" Craft state for signal handler on task stack\n");
|
||||
|
||||
// we actually only care for ss, rflags, cs, fs and gs
|
||||
task_stackptr -= sizeof(struct state) / sizeof(size_t);
|
||||
sighandler_state = (struct state*) task_stackptr;
|
||||
memcpy(sighandler_state, task_state, sizeof(struct state));
|
||||
|
||||
// advance stack pointer so signal handler state will be
|
||||
// restored first
|
||||
dest_task->last_stack_pointer = (size_t*) sighandler_state;
|
||||
} else {
|
||||
kprintf(" Reuse state on IST for signal handler\n");
|
||||
sighandler_state = task_state;
|
||||
}
|
||||
|
||||
// update rsp so that sighandler_epilog() will be executed
|
||||
// after signal handler
|
||||
sighandler_state->rsp = (uint64_t) sighandler_rsp;
|
||||
sighandler_state->userrsp = sighandler_state->rsp;
|
||||
|
||||
// call signal handler instead of continuing task's execution
|
||||
sighandler_state->rdi = (uint64_t) signal.signum;
|
||||
sighandler_state->rip = (uint64_t) dest_task->signal_handler;
|
||||
} else {
|
||||
kprintf(" No signal handler installed\n");
|
||||
}
|
||||
} else {
|
||||
kprintf(" Task %d has already died\n", signal.dest);
|
||||
}
|
||||
}
|
||||
kprintf("Leave _signal_irq_handler() on core %d\n", CORE_ID);
|
||||
}
|
||||
|
||||
int hermit_signal(signal_handler_t handler)
|
||||
{
|
||||
task_t* curr_task = per_core(current_task);
|
||||
curr_task->signal_handler = handler;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hermit_kill(tid_t dest, int signum)
|
||||
{
|
||||
task_t* task;
|
||||
if(BUILTIN_EXPECT(get_task(dest, &task), 0)) {
|
||||
kprintf("Trying to send signal %d to invalid task %d\n", signum, dest);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
const tid_t dest_core = task->last_core;
|
||||
|
||||
kprintf("Send signal %d from task %d (core %d) to task %d (core %d)\n",
|
||||
signum, per_core(current_task)->id, CORE_ID, dest, dest_core);
|
||||
|
||||
if(task == per_core(current_task)) {
|
||||
kprintf(" Deliver signal to itself, call handler immediately\n");
|
||||
|
||||
if(task->signal_handler) {
|
||||
task->signal_handler(signum);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
sig_t signal = {dest, signum};
|
||||
if(dequeue_push(&signal_queue[dest_core], &signal)) {
|
||||
kprintf(" Cannot push signal to task's signal queue, dropping it\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
// send IPI to destination core
|
||||
kprintf(" Send signal IPI (%d) to core %d\n", SIGNAL_IRQ, dest_core);
|
||||
apic_send_ipi(dest_core, SIGNAL_IRQ);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void signal_init()
|
||||
{
|
||||
// initialize per-core signal queue
|
||||
for(int i = 0; i < MAX_CORES; i++) {
|
||||
dequeue_init(&signal_queue[i], signal_buffer[i],
|
||||
SIGNAL_BUFFER_SIZE, sizeof(sig_t));
|
||||
}
|
||||
|
||||
irq_install_handler(SIGNAL_IRQ, _signal_irq_handler);
|
||||
}
|
||||
|
|
@ -35,6 +35,7 @@
|
|||
#include <hermit/time.h>
|
||||
#include <hermit/rcce.h>
|
||||
#include <hermit/memory.h>
|
||||
#include <hermit/signal.h>
|
||||
#include <sys/uio.h>
|
||||
#include <sys/poll.h>
|
||||
|
||||
|
@ -43,7 +44,7 @@
|
|||
#include <lwip/stats.h>
|
||||
|
||||
//TODO: don't use one big kernel lock to comminicate with all proxies
|
||||
static spinlock_t lwip_lock = SPINLOCK_INIT;
|
||||
static spinlock_irqsave_t lwip_lock = SPINLOCK_IRQSAVE_INIT;
|
||||
|
||||
extern spinlock_irqsave_t stdio_lock;
|
||||
extern int32_t isle;
|
||||
|
@ -84,7 +85,7 @@ void NORETURN sys_exit(int arg)
|
|||
{
|
||||
sys_exit_t sysargs = {__NR_exit, arg};
|
||||
|
||||
spinlock_lock(&lwip_lock);
|
||||
spinlock_irqsave_lock(&lwip_lock);
|
||||
if (libc_sd >= 0)
|
||||
{
|
||||
int s = libc_sd;
|
||||
|
@ -92,7 +93,7 @@ void NORETURN sys_exit(int arg)
|
|||
lwip_write(s, &sysargs, sizeof(sysargs));
|
||||
libc_sd = -1;
|
||||
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
|
||||
// switch to LwIP thread
|
||||
reschedule();
|
||||
|
@ -101,7 +102,7 @@ void NORETURN sys_exit(int arg)
|
|||
idle_poll = 0;
|
||||
} else {
|
||||
idle_poll = 0;
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
}
|
||||
|
||||
do_exit(arg);
|
||||
|
@ -128,9 +129,9 @@ ssize_t sys_read(int fd, char* buf, size_t len)
|
|||
return ret;
|
||||
}
|
||||
|
||||
spinlock_lock(&lwip_lock);
|
||||
spinlock_irqsave_lock(&lwip_lock);
|
||||
if (libc_sd < 0) {
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
|
@ -146,7 +147,7 @@ ssize_t sys_read(int fd, char* buf, size_t len)
|
|||
{
|
||||
ret = lwip_read(s, buf+i, j-i);
|
||||
if (ret < 0) {
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -154,7 +155,7 @@ ssize_t sys_read(int fd, char* buf, size_t len)
|
|||
}
|
||||
}
|
||||
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
|
||||
return j;
|
||||
}
|
||||
|
@ -188,10 +189,10 @@ ssize_t sys_write(int fd, const char* buf, size_t len)
|
|||
return ret;
|
||||
}
|
||||
|
||||
spinlock_lock(&lwip_lock);
|
||||
spinlock_irqsave_lock(&lwip_lock);
|
||||
if (libc_sd < 0)
|
||||
{
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
|
||||
spinlock_irqsave_lock(&stdio_lock);
|
||||
for(i=0; i<len; i++)
|
||||
|
@ -209,7 +210,7 @@ ssize_t sys_write(int fd, const char* buf, size_t len)
|
|||
{
|
||||
ret = lwip_write(s, (char*)buf+i, len-i);
|
||||
if (ret < 0) {
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -222,7 +223,7 @@ ssize_t sys_write(int fd, const char* buf, size_t len)
|
|||
i = ret;
|
||||
} else i = len;
|
||||
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
@ -265,7 +266,7 @@ int sys_open(const char* name, int flags, int mode)
|
|||
int s, i, ret, sysnr = __NR_open;
|
||||
size_t len;
|
||||
|
||||
spinlock_lock(&lwip_lock);
|
||||
spinlock_irqsave_lock(&lwip_lock);
|
||||
if (libc_sd < 0) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
|
@ -308,7 +309,7 @@ int sys_open(const char* name, int flags, int mode)
|
|||
lwip_read(s, &ret, sizeof(ret));
|
||||
|
||||
out:
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -332,7 +333,7 @@ int sys_close(int fd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
spinlock_lock(&lwip_lock);
|
||||
spinlock_irqsave_lock(&lwip_lock);
|
||||
if (libc_sd < 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
|
@ -345,7 +346,7 @@ int sys_close(int fd)
|
|||
lwip_read(s, &ret, sizeof(ret));
|
||||
|
||||
out:
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -442,10 +443,10 @@ off_t sys_lseek(int fd, off_t offset, int whence)
|
|||
sys_lseek_t sysargs = {__NR_lseek, fd, offset, whence};
|
||||
int s;
|
||||
|
||||
spinlock_lock(&lwip_lock);
|
||||
spinlock_irqsave_lock(&lwip_lock);
|
||||
|
||||
if (libc_sd < 0) {
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
|
@ -453,7 +454,7 @@ off_t sys_lseek(int fd, off_t offset, int whence)
|
|||
lwip_write(s, &sysargs, sizeof(sysargs));
|
||||
lwip_read(s, &off, sizeof(off));
|
||||
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
|
||||
return off;
|
||||
}
|
||||
|
@ -630,6 +631,19 @@ void sys_yield(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
int sys_kill(tid_t dest, int signum)
|
||||
{
|
||||
if(signum < 0) {
|
||||
return -EINVAL;
|
||||
}
|
||||
return hermit_kill(dest, signum);
|
||||
}
|
||||
|
||||
int sys_signal(signal_handler_t handler)
|
||||
{
|
||||
return hermit_signal(handler);
|
||||
}
|
||||
|
||||
static int default_handler(void)
|
||||
{
|
||||
#if 1
|
||||
|
|
|
@ -54,8 +54,8 @@ extern atomic_int32_t cpu_online;
|
|||
* A task's id will be its position in this array.
|
||||
*/
|
||||
static task_t task_table[MAX_TASKS] = { \
|
||||
[0] = {0, TASK_IDLE, 0, NULL, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, NULL, 0, NULL, NULL, 0, 0, 0}, \
|
||||
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, NULL, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, NULL, 0, NULL, NULL, 0, 0, 0}};
|
||||
[0] = {0, TASK_IDLE, 0, NULL, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, NULL, 0, NULL, NULL, 0, 0, 0, NULL, FPU_STATE_INIT}, \
|
||||
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, NULL, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, NULL, 0, NULL, NULL, 0, 0, 0, NULL, FPU_STATE_INIT}};
|
||||
|
||||
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
|
||||
|
||||
|
@ -68,28 +68,143 @@ static readyqueues_t readyqueues[1] = {[0] = {task_table+0, NULL, 0, 0, 0, {[0 .
|
|||
|
||||
DEFINE_PER_CORE(task_t*, current_task, task_table+0);
|
||||
DEFINE_PER_CORE(char*, kernel_stack, NULL);
|
||||
|
||||
#if MAX_CORES > 1
|
||||
DEFINE_PER_CORE(uint32_t, __core_id, 0);
|
||||
#endif
|
||||
|
||||
extern const void boot_stack;
|
||||
extern const void boot_ist;
|
||||
|
||||
/** @brief helper function for the assembly code to determine the current task
|
||||
* @return Pointer to the task_t structure of current task
|
||||
*/
|
||||
task_t* get_current_task(void)
|
||||
|
||||
static void update_timer(task_t* first)
|
||||
{
|
||||
return per_core(current_task);
|
||||
if(first) {
|
||||
if(first->timeout > get_clock_tick()) {
|
||||
timer_deadline((uint32_t) (first->timeout - get_clock_tick()));
|
||||
} else {
|
||||
// workaround: start timer so new head will be serviced
|
||||
timer_deadline(1);
|
||||
}
|
||||
} else {
|
||||
// prevent spurious interrupts
|
||||
timer_disable();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void timer_queue_remove(uint32_t core_id, task_t* task)
|
||||
{
|
||||
if(BUILTIN_EXPECT(!task, 0)) {
|
||||
return;
|
||||
}
|
||||
|
||||
task_list_t* timer_queue = &readyqueues[core_id].timers;
|
||||
|
||||
#ifdef DYNAMIC_TICKS
|
||||
// if task is first in timer queue, we need to update the oneshot
|
||||
// timer for the next task
|
||||
if(timer_queue->first == task) {
|
||||
update_timer(task->next);
|
||||
}
|
||||
#endif
|
||||
|
||||
task_list_remove_task(timer_queue, task);
|
||||
}
|
||||
|
||||
|
||||
static void timer_queue_push(uint32_t core_id, task_t* task)
|
||||
{
|
||||
task_list_t* timer_queue = &readyqueues[core_id].timers;
|
||||
|
||||
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
||||
|
||||
task_t* first = timer_queue->first;
|
||||
|
||||
if(!first) {
|
||||
timer_queue->first = timer_queue->last = task;
|
||||
task->next = task->prev = NULL;
|
||||
|
||||
#ifdef DYNAMIC_TICKS
|
||||
update_timer(task);
|
||||
#endif
|
||||
} else {
|
||||
// lookup position where to insert task
|
||||
task_t* tmp = first;
|
||||
while(tmp && (task->timeout >= tmp->timeout))
|
||||
tmp = tmp->next;
|
||||
|
||||
if(!tmp) {
|
||||
// insert at the end of queue
|
||||
task->next = NULL;
|
||||
task->prev = timer_queue->last;
|
||||
|
||||
// there has to be a last element because there is also a first one
|
||||
timer_queue->last->next = task;
|
||||
timer_queue->last = task;
|
||||
} else {
|
||||
task->next = tmp;
|
||||
task->prev = tmp->prev;
|
||||
tmp->prev = task;
|
||||
|
||||
if(task->prev)
|
||||
task->prev->next = task;
|
||||
|
||||
if(timer_queue->first == tmp) {
|
||||
timer_queue->first = task;
|
||||
|
||||
#ifdef DYNAMIC_TICKS
|
||||
update_timer(task);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
||||
}
|
||||
|
||||
|
||||
static void readyqueues_push_back(uint32_t core_id, task_t* task)
|
||||
{
|
||||
// idle task (prio=0) doesn't have a queue
|
||||
task_list_t* readyqueue = &readyqueues[core_id].queue[task->prio - 1];
|
||||
|
||||
task_list_push_back(readyqueue, task);
|
||||
|
||||
// update priority bitmap
|
||||
readyqueues[core_id].prio_bitmap |= (1 << task->prio);
|
||||
|
||||
// increase the number of ready tasks
|
||||
readyqueues[core_id].nr_tasks++;
|
||||
}
|
||||
|
||||
|
||||
static void readyqueues_remove(uint32_t core_id, task_t* task)
|
||||
{
|
||||
// idle task (prio=0) doesn't have a queue
|
||||
task_list_t* readyqueue = &readyqueues[core_id].queue[task->prio - 1];
|
||||
|
||||
task_list_remove_task(readyqueue, task);
|
||||
|
||||
// no valid task in queue => update priority bitmap
|
||||
if (readyqueue->first == NULL)
|
||||
readyqueues[core_id].prio_bitmap &= ~(1 << task->prio);
|
||||
|
||||
// reduce the number of ready tasks
|
||||
readyqueues[core_id].nr_tasks--;
|
||||
}
|
||||
|
||||
|
||||
void check_scheduling(void)
|
||||
{
|
||||
if (!is_irq_enabled())
|
||||
return;
|
||||
if (msb(readyqueues[CORE_ID].prio_bitmap) > per_core(current_task)->prio)
|
||||
|
||||
if (get_highest_priority() > per_core(current_task)->prio)
|
||||
reschedule();
|
||||
}
|
||||
|
||||
|
||||
uint32_t get_highest_priority(void)
|
||||
{
|
||||
uint32_t prio = msb(readyqueues[CORE_ID].prio_bitmap);
|
||||
|
@ -99,6 +214,7 @@ uint32_t get_highest_priority(void)
|
|||
return prio;
|
||||
}
|
||||
|
||||
|
||||
int multitasking_init(void)
|
||||
{
|
||||
uint32_t core_id = CORE_ID;
|
||||
|
@ -120,9 +236,12 @@ int multitasking_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* interrupt handler to save / restore the FPU context */
|
||||
void fpu_handler(struct state *s)
|
||||
{
|
||||
(void) s;
|
||||
|
||||
task_t* task = per_core(current_task);
|
||||
uint32_t core_id = CORE_ID;
|
||||
|
||||
|
@ -150,6 +269,7 @@ void fpu_handler(struct state *s)
|
|||
restore_fpu_state(&task->fpu);
|
||||
}
|
||||
|
||||
|
||||
int set_idle_task(void)
|
||||
{
|
||||
uint32_t i, core_id = CORE_ID;
|
||||
|
@ -170,6 +290,7 @@ int set_idle_task(void)
|
|||
task_table[i].heap = NULL;
|
||||
readyqueues[core_id].idle = task_table+i;
|
||||
set_per_core(current_task, readyqueues[core_id].idle);
|
||||
set_tss((size_t) task_table[i].stack + KERNEL_STACK_SIZE - 0x10, (size_t) task_table[i].ist_addr + KERNEL_STACK_SIZE - 0x10);
|
||||
ret = 0;
|
||||
|
||||
break;
|
||||
|
@ -181,6 +302,7 @@ int set_idle_task(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int init_tls(void)
|
||||
{
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
@ -208,15 +330,17 @@ int init_tls(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void finish_task_switch(void)
|
||||
{
|
||||
task_t* old;
|
||||
uint8_t prio;
|
||||
const uint32_t core_id = CORE_ID;
|
||||
|
||||
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
||||
|
||||
if ((old = readyqueues[core_id].old_task) != NULL) {
|
||||
readyqueues[core_id].old_task = NULL;
|
||||
|
||||
if (old->status == TASK_FINISHED) {
|
||||
/* cleanup task */
|
||||
if (old->stack) {
|
||||
|
@ -236,7 +360,6 @@ void finish_task_switch(void)
|
|||
}
|
||||
|
||||
old->last_stack_pointer = NULL;
|
||||
readyqueues[core_id].old_task = NULL;
|
||||
|
||||
if (readyqueues[core_id].fpu_owner == old->id)
|
||||
readyqueues[core_id].fpu_owner = 0;
|
||||
|
@ -244,26 +367,15 @@ void finish_task_switch(void)
|
|||
/* signalizes that this task could be reused */
|
||||
old->status = TASK_INVALID;
|
||||
} else {
|
||||
prio = old->prio;
|
||||
if (!readyqueues[core_id].queue[prio-1].first) {
|
||||
old->next = old->prev = NULL;
|
||||
readyqueues[core_id].queue[prio-1].first = readyqueues[core_id].queue[prio-1].last = old;
|
||||
} else {
|
||||
old->next = NULL;
|
||||
old->prev = readyqueues[core_id].queue[prio-1].last;
|
||||
readyqueues[core_id].queue[prio-1].last->next = old;
|
||||
readyqueues[core_id].queue[prio-1].last = old;
|
||||
}
|
||||
readyqueues[core_id].old_task = NULL;
|
||||
readyqueues[core_id].prio_bitmap |= (1 << prio);
|
||||
// re-enqueue old task
|
||||
readyqueues_push_back(core_id, old);
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
||||
}
|
||||
|
||||
/** @brief A procedure to be called by
|
||||
* procedures which are called by exiting tasks. */
|
||||
|
||||
void NORETURN do_exit(int arg)
|
||||
{
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
@ -297,7 +409,7 @@ void NORETURN do_exit(int arg)
|
|||
}
|
||||
}
|
||||
|
||||
/** @brief A procedure to be called by kernel tasks */
|
||||
|
||||
void NORETURN leave_kernel_task(void) {
|
||||
int result;
|
||||
|
||||
|
@ -305,11 +417,12 @@ void NORETURN leave_kernel_task(void) {
|
|||
do_exit(result);
|
||||
}
|
||||
|
||||
/** @brief Aborting a task is like exiting it with result -1 */
|
||||
|
||||
void NORETURN do_abort(void) {
|
||||
do_exit(-1);
|
||||
}
|
||||
|
||||
|
||||
static uint32_t get_next_core_id(void)
|
||||
{
|
||||
uint32_t i;
|
||||
|
@ -333,6 +446,7 @@ static uint32_t get_next_core_id(void)
|
|||
return core_id;
|
||||
}
|
||||
|
||||
|
||||
int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
@ -375,7 +489,7 @@ int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
|
|||
if (task_table[i].status == TASK_INVALID) {
|
||||
task_table[i].id = i;
|
||||
task_table[i].status = TASK_READY;
|
||||
task_table[i].last_core = 0;
|
||||
task_table[i].last_core = core_id;
|
||||
task_table[i].last_stack_pointer = NULL;
|
||||
task_table[i].stack = stack;
|
||||
task_table[i].prio = prio;
|
||||
|
@ -386,6 +500,7 @@ int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
|
|||
task_table[i].tls_size = curr_task->tls_size;
|
||||
task_table[i].ist_addr = ist;
|
||||
task_table[i].lwip_err = 0;
|
||||
task_table[i].signal_handler = NULL;
|
||||
|
||||
if (id)
|
||||
*id = i;
|
||||
|
@ -432,6 +547,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t core_id)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
|
@ -475,7 +591,7 @@ int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t c
|
|||
if (task_table[i].status == TASK_INVALID) {
|
||||
task_table[i].id = i;
|
||||
task_table[i].status = TASK_READY;
|
||||
task_table[i].last_core = 0;
|
||||
task_table[i].last_core = core_id;
|
||||
task_table[i].last_stack_pointer = NULL;
|
||||
task_table[i].stack = stack;
|
||||
task_table[i].prio = prio;
|
||||
|
@ -486,6 +602,7 @@ int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t c
|
|||
task_table[i].tls_addr = 0;
|
||||
task_table[i].tls_size = 0;
|
||||
task_table[i].lwip_err = 0;
|
||||
task_table[i].signal_handler = NULL;
|
||||
|
||||
if (id)
|
||||
*id = i;
|
||||
|
@ -534,6 +651,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int create_kernel_task_on_core(tid_t* id, entry_point_t ep, void* args, uint8_t prio, uint32_t core_id)
|
||||
{
|
||||
if (prio > MAX_PRIO)
|
||||
|
@ -542,6 +660,7 @@ int create_kernel_task_on_core(tid_t* id, entry_point_t ep, void* args, uint8_t
|
|||
return create_task(id, ep, args, prio, core_id);
|
||||
}
|
||||
|
||||
|
||||
int create_kernel_task(tid_t* id, entry_point_t ep, void* args, uint8_t prio)
|
||||
{
|
||||
if (prio > MAX_PRIO)
|
||||
|
@ -550,23 +669,17 @@ int create_kernel_task(tid_t* id, entry_point_t ep, void* args, uint8_t prio)
|
|||
return create_task(id, ep, args, prio, CORE_ID);
|
||||
}
|
||||
|
||||
/** @brief Wakeup a blocked task
|
||||
* @param id The task's tid_t structure
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
|
||||
int wakeup_task(tid_t id)
|
||||
{
|
||||
task_t* task;
|
||||
uint32_t core_id, prio;
|
||||
uint32_t core_id;
|
||||
int ret = -EINVAL;
|
||||
uint8_t flags;
|
||||
|
||||
flags = irq_nested_disable();
|
||||
|
||||
task = task_table + id;
|
||||
prio = task->prio;
|
||||
task = &task_table[id];
|
||||
core_id = task->last_core;
|
||||
|
||||
if (task->status == TASK_BLOCKED) {
|
||||
|
@ -574,40 +687,18 @@ int wakeup_task(tid_t id)
|
|||
ret = 0;
|
||||
|
||||
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
||||
// increase the number of ready tasks
|
||||
readyqueues[core_id].nr_tasks++;
|
||||
|
||||
// do we need to remove from timer queue?
|
||||
// if task is in timer queue, remove it
|
||||
if (task->flags & TASK_TIMER) {
|
||||
task->flags &= ~TASK_TIMER;
|
||||
if (task->prev)
|
||||
task->prev->next = task->next;
|
||||
if (task->next)
|
||||
task->next->prev = task->prev;
|
||||
if (readyqueues[core_id].timers.first == task)
|
||||
readyqueues[core_id].timers.first = task->next;
|
||||
if (readyqueues[core_id].timers.last == task)
|
||||
readyqueues[core_id].timers.last = task->prev;
|
||||
|
||||
timer_queue_remove(core_id, task);
|
||||
}
|
||||
|
||||
// add task to the runqueue
|
||||
if (!readyqueues[core_id].queue[prio-1].last) {
|
||||
readyqueues[core_id].queue[prio-1].last = readyqueues[core_id].queue[prio-1].first = task;
|
||||
task->next = task->prev = NULL;
|
||||
readyqueues[core_id].prio_bitmap |= (1 << prio);
|
||||
} else {
|
||||
task->prev = readyqueues[core_id].queue[prio-1].last;
|
||||
task->next = NULL;
|
||||
readyqueues[core_id].queue[prio-1].last->next = task;
|
||||
readyqueues[core_id].queue[prio-1].last = task;
|
||||
}
|
||||
// add task to the ready queue
|
||||
readyqueues_push_back(core_id, task);
|
||||
|
||||
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
||||
|
||||
#if 0 //def DYNAMIC_TICKS
|
||||
// send IPI to be sure that the scheuler recognize the new task
|
||||
if (core_id != CORE_ID)
|
||||
apic_send_ipi(core_id, 121);
|
||||
#endif
|
||||
}
|
||||
|
||||
irq_nested_enable(flags);
|
||||
|
@ -615,54 +706,30 @@ int wakeup_task(tid_t id)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/** @brief Block current task
|
||||
*
|
||||
* The current task's status will be changed to TASK_BLOCKED
|
||||
*
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
int block_current_task(void)
|
||||
|
||||
int block_task(tid_t id)
|
||||
{
|
||||
task_t* curr_task;
|
||||
tid_t id;
|
||||
uint32_t prio, core_id;
|
||||
task_t* task;
|
||||
uint32_t core_id;
|
||||
int ret = -EINVAL;
|
||||
uint8_t flags;
|
||||
|
||||
flags = irq_nested_disable();
|
||||
|
||||
curr_task = per_core(current_task);
|
||||
id = curr_task->id;
|
||||
prio = curr_task->prio;
|
||||
core_id = CORE_ID;
|
||||
task = &task_table[id];
|
||||
core_id = task->last_core;
|
||||
|
||||
if (task_table[id].status == TASK_RUNNING) {
|
||||
task_table[id].status = TASK_BLOCKED;
|
||||
ret = 0;
|
||||
if (task->status == TASK_RUNNING) {
|
||||
task->status = TASK_BLOCKED;
|
||||
|
||||
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
||||
// reduce the number of ready tasks
|
||||
readyqueues[core_id].nr_tasks--;
|
||||
|
||||
// remove task from queue
|
||||
if (task_table[id].prev)
|
||||
task_table[id].prev->next = task_table[id].next;
|
||||
if (task_table[id].next)
|
||||
task_table[id].next->prev = task_table[id].prev;
|
||||
if (readyqueues[core_id].queue[prio-1].first == task_table+id)
|
||||
readyqueues[core_id].queue[prio-1].first = task_table[id].next;
|
||||
if (readyqueues[core_id].queue[prio-1].last == task_table+id) {
|
||||
readyqueues[core_id].queue[prio-1].last = task_table[id].prev;
|
||||
if (!readyqueues[core_id].queue[prio-1].last)
|
||||
readyqueues[core_id].queue[prio-1].last = readyqueues[core_id].queue[prio-1].first;
|
||||
}
|
||||
// remove task from ready queue
|
||||
readyqueues_remove(core_id, task);
|
||||
|
||||
// No valid task in queue => update prio_bitmap
|
||||
if (!readyqueues[core_id].queue[prio-1].first)
|
||||
readyqueues[core_id].prio_bitmap &= ~(1 << prio);
|
||||
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
irq_nested_enable(flags);
|
||||
|
@ -670,148 +737,71 @@ int block_current_task(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int block_current_task(void)
|
||||
{
|
||||
return block_task(per_core(current_task)->id);
|
||||
}
|
||||
|
||||
|
||||
int set_timer(uint64_t deadline)
|
||||
{
|
||||
task_t* curr_task;
|
||||
task_t* tmp;
|
||||
uint32_t core_id, prio;
|
||||
uint32_t flags;
|
||||
uint32_t core_id;
|
||||
uint8_t flags;
|
||||
int ret = -EINVAL;
|
||||
|
||||
flags = irq_nested_disable();
|
||||
|
||||
curr_task = per_core(current_task);
|
||||
prio = curr_task->prio;
|
||||
core_id = CORE_ID;
|
||||
|
||||
if (curr_task->status == TASK_RUNNING) {
|
||||
curr_task->status = TASK_BLOCKED;
|
||||
curr_task->timeout = deadline;
|
||||
// blocks task and removes from ready queue
|
||||
block_task(curr_task->id);
|
||||
|
||||
curr_task->flags |= TASK_TIMER;
|
||||
curr_task->timeout = deadline;
|
||||
|
||||
timer_queue_push(core_id, curr_task);
|
||||
|
||||
ret = 0;
|
||||
|
||||
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
||||
|
||||
// reduce the number of ready tasks
|
||||
readyqueues[core_id].nr_tasks--;
|
||||
|
||||
// remove task from queue
|
||||
if (curr_task->prev)
|
||||
curr_task->prev->next = curr_task->next;
|
||||
if (curr_task->next)
|
||||
curr_task->next->prev = curr_task->prev;
|
||||
if (readyqueues[core_id].queue[prio-1].first == curr_task)
|
||||
readyqueues[core_id].queue[prio-1].first = curr_task->next;
|
||||
if (readyqueues[core_id].queue[prio-1].last == curr_task) {
|
||||
readyqueues[core_id].queue[prio-1].last = curr_task->prev;
|
||||
if (!readyqueues[core_id].queue[prio-1].last)
|
||||
readyqueues[core_id].queue[prio-1].last = readyqueues[core_id].queue[prio-1].first;
|
||||
}
|
||||
|
||||
// No valid task in queue => update prio_bitmap
|
||||
if (!readyqueues[core_id].queue[prio-1].first)
|
||||
readyqueues[core_id].prio_bitmap &= ~(1 << prio);
|
||||
|
||||
// add task to the timer queue
|
||||
tmp = readyqueues[core_id].timers.first;
|
||||
if (!tmp) {
|
||||
readyqueues[core_id].timers.first = readyqueues[core_id].timers.last = curr_task;
|
||||
curr_task->prev = curr_task->next = NULL;
|
||||
#ifdef DYNAMIC_TICKS
|
||||
timer_deadline(deadline-get_clock_tick());
|
||||
#endif
|
||||
} else {
|
||||
while(tmp && (deadline >= tmp->timeout))
|
||||
tmp = tmp->next;
|
||||
|
||||
if (!tmp) {
|
||||
curr_task->next = NULL;
|
||||
curr_task->prev = readyqueues[core_id].timers.last;
|
||||
if (readyqueues[core_id].timers.last)
|
||||
readyqueues[core_id].timers.last->next = curr_task;
|
||||
readyqueues[core_id].timers.last = curr_task;
|
||||
// obsolete lines...
|
||||
//if (!readyqueues[core_id].timers.first)
|
||||
// readyqueues[core_id].timers.first = curr_task;
|
||||
} else {
|
||||
curr_task->prev = tmp->prev;
|
||||
curr_task->next = tmp;
|
||||
tmp->prev = curr_task;
|
||||
if (curr_task->prev)
|
||||
curr_task->prev->next = curr_task;
|
||||
if (readyqueues[core_id].timers.first == tmp) {
|
||||
readyqueues[core_id].timers.first = curr_task;
|
||||
#ifdef DYNAMIC_TICKS
|
||||
timer_deadline(deadline-get_clock_tick());
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
||||
} else kprintf("Task is already blocked. No timer will be set!\n");
|
||||
} else {
|
||||
kprintf("Task is already blocked. No timer will be set!\n");
|
||||
}
|
||||
|
||||
irq_nested_enable(flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
void check_timers(void)
|
||||
{
|
||||
uint32_t core_id = CORE_ID;
|
||||
uint32_t prio;
|
||||
uint64_t current_tick;
|
||||
readyqueues_t* readyqueue = &readyqueues[CORE_ID];
|
||||
spinlock_irqsave_lock(&readyqueue->lock);
|
||||
|
||||
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
||||
// since IRQs are disabled, get_clock_tick() won't increase here
|
||||
const uint64_t current_tick = get_clock_tick();
|
||||
|
||||
// check timers
|
||||
current_tick = get_clock_tick();
|
||||
while (readyqueues[core_id].timers.first && readyqueues[core_id].timers.first->timeout <= current_tick)
|
||||
// wakeup tasks whose deadline has expired
|
||||
task_t* task;
|
||||
while ((task = readyqueue->timers.first) && (task->timeout <= current_tick))
|
||||
{
|
||||
task_t* task = readyqueues[core_id].timers.first;
|
||||
|
||||
// remove timer from queue
|
||||
readyqueues[core_id].timers.first = readyqueues[core_id].timers.first->next;
|
||||
if (readyqueues[core_id].timers.first) {
|
||||
readyqueues[core_id].timers.first->prev = NULL;
|
||||
#ifdef DYNAMIC_TICKS
|
||||
if (readyqueues[core_id].timers.first->timeout > get_clock_tick())
|
||||
timer_deadline(readyqueues[core_id].timers.first->timeout-current_tick);
|
||||
#endif
|
||||
} else readyqueues[core_id].timers.last = NULL;
|
||||
task->flags &= ~TASK_TIMER;
|
||||
|
||||
// wakeup task
|
||||
if (task->status == TASK_BLOCKED) {
|
||||
task->status = TASK_READY;
|
||||
prio = task->prio;
|
||||
|
||||
// increase the number of ready tasks
|
||||
readyqueues[core_id].nr_tasks++;
|
||||
|
||||
// add task to the runqueue
|
||||
if (!readyqueues[core_id].queue[prio-1].first) {
|
||||
readyqueues[core_id].queue[prio-1].last = readyqueues[core_id].queue[prio-1].first = task;
|
||||
task->next = task->prev = NULL;
|
||||
readyqueues[core_id].prio_bitmap |= (1 << prio);
|
||||
} else {
|
||||
task->prev = readyqueues[core_id].queue[prio-1].last;
|
||||
task->next = NULL;
|
||||
readyqueues[core_id].queue[prio-1].last->next = task;
|
||||
readyqueues[core_id].queue[prio-1].last = task;
|
||||
}
|
||||
}
|
||||
// pops task from timer queue, so next iteration has new first element
|
||||
wakeup_task(task->id);
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
||||
spinlock_irqsave_unlock(&readyqueue->lock);
|
||||
}
|
||||
|
||||
|
||||
size_t** scheduler(void)
|
||||
{
|
||||
task_t* orig_task;
|
||||
task_t* curr_task;
|
||||
const int32_t core_id = CORE_ID;
|
||||
uint32_t prio;
|
||||
const uint32_t core_id = CORE_ID;
|
||||
uint64_t prio;
|
||||
|
||||
orig_task = curr_task = per_core(current_task);
|
||||
curr_task->last_core = core_id;
|
||||
|
@ -831,8 +821,12 @@ size_t** scheduler(void)
|
|||
set_per_core(current_task, curr_task);
|
||||
}
|
||||
|
||||
prio = msb(readyqueues[core_id].prio_bitmap); // determines highest priority
|
||||
if (prio > MAX_PRIO) {
|
||||
// determine highest priority
|
||||
prio = msb(readyqueues[core_id].prio_bitmap);
|
||||
|
||||
const int readyqueue_empty = prio > MAX_PRIO;
|
||||
if (readyqueue_empty) {
|
||||
|
||||
if ((curr_task->status == TASK_RUNNING) || (curr_task->status == TASK_IDLE))
|
||||
goto get_task_out;
|
||||
curr_task = readyqueues[core_id].idle;
|
||||
|
@ -842,26 +836,33 @@ size_t** scheduler(void)
|
|||
if ((curr_task->prio > prio) && (curr_task->status == TASK_RUNNING))
|
||||
goto get_task_out;
|
||||
|
||||
// mark current task for later cleanup by finish_task_switch()
|
||||
if (curr_task->status == TASK_RUNNING) {
|
||||
curr_task->status = TASK_READY;
|
||||
readyqueues[core_id].old_task = curr_task;
|
||||
}
|
||||
|
||||
curr_task = readyqueues[core_id].queue[prio-1].first;
|
||||
set_per_core(current_task, curr_task);
|
||||
if (BUILTIN_EXPECT(curr_task->status == TASK_INVALID, 0)) {
|
||||
kprintf("Upps!!!!!!! Got invalid task %d, orig task %d\n", curr_task->id, orig_task->id);
|
||||
}
|
||||
curr_task->status = TASK_RUNNING;
|
||||
// get new task from its ready queue
|
||||
curr_task = task_list_pop_front(&readyqueues[core_id].queue[prio-1]);
|
||||
|
||||
// remove new task from queue
|
||||
// by the way, priority 0 is only used by the idle task and doesn't need own queue
|
||||
readyqueues[core_id].queue[prio-1].first = curr_task->next;
|
||||
if (!curr_task->next) {
|
||||
readyqueues[core_id].queue[prio-1].last = NULL;
|
||||
if(BUILTIN_EXPECT(curr_task == NULL, 0)) {
|
||||
kprintf("Kernel panic: No task in readyqueue\n");
|
||||
while(1);
|
||||
}
|
||||
if (BUILTIN_EXPECT(curr_task->status == TASK_INVALID, 0)) {
|
||||
kprintf("Kernel panic: Got invalid task %d, orig task %d\n",
|
||||
curr_task->id, orig_task->id);
|
||||
while(1);
|
||||
}
|
||||
|
||||
// if we removed the last task from queue, update priority bitmap
|
||||
if(readyqueues[core_id].queue[prio-1].first == NULL) {
|
||||
readyqueues[core_id].prio_bitmap &= ~(1 << prio);
|
||||
}
|
||||
curr_task->next = curr_task->prev = NULL;
|
||||
|
||||
// finally make it the new current task
|
||||
curr_task->status = TASK_RUNNING;
|
||||
set_per_core(current_task, curr_task);
|
||||
}
|
||||
|
||||
get_task_out:
|
||||
|
@ -876,6 +877,27 @@ get_task_out:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
int get_task(tid_t id, task_t** task)
|
||||
{
|
||||
if (BUILTIN_EXPECT(task == NULL, 0)) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (BUILTIN_EXPECT(id >= MAX_TASKS, 0)) {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (BUILTIN_EXPECT(task_table[id].status == TASK_INVALID, 0)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*task = &task_table[id];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void reschedule(void)
|
||||
{
|
||||
size_t** stack;
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
/// A linked list for each binary size exponent
|
||||
static buddy_t* buddy_lists[BUDDY_LISTS] = { [0 ... BUDDY_LISTS-1] = NULL };
|
||||
/// Lock for the buddy lists
|
||||
static spinlock_t buddy_lock = SPINLOCK_INIT;
|
||||
static spinlock_irqsave_t buddy_lock = SPINLOCK_IRQSAVE_INIT;
|
||||
|
||||
/** @brief Check if larger free buddies are available */
|
||||
static inline int buddy_large_avail(uint8_t exp)
|
||||
|
@ -66,7 +66,7 @@ static inline int buddy_exp(size_t sz)
|
|||
/** @brief Get a free buddy by potentially splitting a larger one */
|
||||
static buddy_t* buddy_get(int exp)
|
||||
{
|
||||
spinlock_lock(&buddy_lock);
|
||||
spinlock_irqsave_lock(&buddy_lock);
|
||||
buddy_t** list = &buddy_lists[exp-BUDDY_MIN];
|
||||
buddy_t* buddy = *list;
|
||||
buddy_t* split;
|
||||
|
@ -92,7 +92,7 @@ static buddy_t* buddy_get(int exp)
|
|||
}
|
||||
|
||||
out:
|
||||
spinlock_unlock(&buddy_lock);
|
||||
spinlock_irqsave_unlock(&buddy_lock);
|
||||
|
||||
return buddy;
|
||||
}
|
||||
|
@ -103,11 +103,11 @@ out:
|
|||
*/
|
||||
static void buddy_put(buddy_t* buddy)
|
||||
{
|
||||
spinlock_lock(&buddy_lock);
|
||||
spinlock_irqsave_lock(&buddy_lock);
|
||||
buddy_t** list = &buddy_lists[buddy->prefix.exponent-BUDDY_MIN];
|
||||
buddy->next = *list;
|
||||
*list = buddy;
|
||||
spinlock_unlock(&buddy_lock);
|
||||
spinlock_irqsave_unlock(&buddy_lock);
|
||||
}
|
||||
|
||||
void buddy_dump(void)
|
||||
|
|
|
@ -48,7 +48,7 @@ extern const void kernel_end;
|
|||
*/
|
||||
static vma_t vma_boot = { VMA_MIN, VMA_MIN, VMA_HEAP };
|
||||
static vma_t* vma_list = &vma_boot;
|
||||
static spinlock_t vma_lock = SPINLOCK_INIT;
|
||||
static spinlock_irqsave_t vma_lock = SPINLOCK_IRQSAVE_INIT;
|
||||
|
||||
// TODO: we might move the architecture specific VMA regions to a
|
||||
// seperate function arch_vma_init()
|
||||
|
@ -86,7 +86,7 @@ out:
|
|||
|
||||
size_t vma_alloc(size_t size, uint32_t flags)
|
||||
{
|
||||
spinlock_t* lock = &vma_lock;
|
||||
spinlock_irqsave_t* lock = &vma_lock;
|
||||
vma_t** list = &vma_list;
|
||||
|
||||
//kprintf("vma_alloc: size = %#lx, flags = %#x\n", size, flags);
|
||||
|
@ -98,7 +98,7 @@ size_t vma_alloc(size_t size, uint32_t flags)
|
|||
size_t base = VMA_MIN;
|
||||
size_t limit = VMA_MAX;
|
||||
|
||||
spinlock_lock(lock);
|
||||
spinlock_irqsave_lock(lock);
|
||||
|
||||
// first fit search for free memory area
|
||||
vma_t* pred = NULL; // vma before current gap
|
||||
|
@ -115,7 +115,7 @@ size_t vma_alloc(size_t size, uint32_t flags)
|
|||
} while (pred || succ);
|
||||
|
||||
fail:
|
||||
spinlock_unlock(lock); // we were unlucky to find a free gap
|
||||
spinlock_irqsave_unlock(lock); // we were unlucky to find a free gap
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -143,14 +143,14 @@ found:
|
|||
*list = new;
|
||||
}
|
||||
|
||||
spinlock_unlock(lock);
|
||||
spinlock_irqsave_unlock(lock);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
int vma_free(size_t start, size_t end)
|
||||
{
|
||||
spinlock_t* lock = &vma_lock;
|
||||
spinlock_irqsave_t* lock = &vma_lock;
|
||||
vma_t* vma;
|
||||
vma_t** list = &vma_list;
|
||||
|
||||
|
@ -159,7 +159,7 @@ int vma_free(size_t start, size_t end)
|
|||
if (BUILTIN_EXPECT(start >= end, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(lock);
|
||||
spinlock_irqsave_lock(lock);
|
||||
|
||||
// search vma
|
||||
vma = *list;
|
||||
|
@ -169,7 +169,7 @@ int vma_free(size_t start, size_t end)
|
|||
}
|
||||
|
||||
if (BUILTIN_EXPECT(!vma, 0)) {
|
||||
spinlock_unlock(lock);
|
||||
spinlock_irqsave_unlock(lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -190,7 +190,7 @@ int vma_free(size_t start, size_t end)
|
|||
else {
|
||||
vma_t* new = kmalloc(sizeof(vma_t));
|
||||
if (BUILTIN_EXPECT(!new, 0)) {
|
||||
spinlock_unlock(lock);
|
||||
spinlock_irqsave_unlock(lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -205,14 +205,14 @@ int vma_free(size_t start, size_t end)
|
|||
new->prev = vma;
|
||||
}
|
||||
|
||||
spinlock_unlock(lock);
|
||||
spinlock_irqsave_unlock(lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vma_add(size_t start, size_t end, uint32_t flags)
|
||||
{
|
||||
spinlock_t* lock = &vma_lock;
|
||||
spinlock_irqsave_t* lock = &vma_lock;
|
||||
vma_t** list = &vma_list;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -221,7 +221,7 @@ int vma_add(size_t start, size_t end, uint32_t flags)
|
|||
|
||||
//kprintf("vma_add: start = %#lx, end = %#lx, flags = %#x\n", start, end, flags);
|
||||
|
||||
spinlock_lock(lock);
|
||||
spinlock_irqsave_lock(lock);
|
||||
|
||||
// search gap
|
||||
vma_t* pred = NULL;
|
||||
|
@ -267,7 +267,7 @@ int vma_add(size_t start, size_t end, uint32_t flags)
|
|||
}
|
||||
|
||||
fail:
|
||||
spinlock_unlock(lock);
|
||||
spinlock_irqsave_unlock(lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -286,7 +286,7 @@ void vma_dump(void)
|
|||
}
|
||||
|
||||
kputs("VMAs:\n");
|
||||
spinlock_lock(&vma_lock);
|
||||
spinlock_irqsave_lock(&vma_lock);
|
||||
print_vma(&vma_boot);
|
||||
spinlock_unlock(&vma_lock);
|
||||
spinlock_irqsave_unlock(&vma_lock);
|
||||
}
|
||||
|
|
|
@ -221,7 +221,7 @@ static int init_qemu(char *path)
|
|||
char monitor_str[MAX_PATH];
|
||||
char chardev_file[MAX_PATH];
|
||||
char* qemu_str = "qemu-system-x86_64";
|
||||
char* qemu_argv[] = {qemu_str, "-s", "-nographic", "-smp", "1", "-m", "2G", "-net", "nic,model=rtl8139", "-net", hostfwd, "-chardev", chardev_file, "-device", "pci-serial,chardev=gnc0", "-monitor", monitor_str, "-kernel", loader_path, "-initrd", path, NULL, NULL, NULL, NULL, NULL, NULL, NULL};
|
||||
char* qemu_argv[] = {qemu_str, "-nographic", "-smp", "1", "-m", "2G", "-net", "nic,model=rtl8139", "-net", hostfwd, "-chardev", chardev_file, "-device", "pci-serial,chardev=gnc0", "-monitor", monitor_str, "-kernel", loader_path, "-initrd", path, "-s", NULL, NULL, NULL, NULL, NULL, NULL, NULL};
|
||||
|
||||
str = getenv("HERMIT_CPUS");
|
||||
if (str)
|
||||
|
|
|
@ -32,8 +32,8 @@ default:
|
|||
|
||||
demo:
|
||||
@echo Build demo applications
|
||||
$Q$(MAKE) ELFEDIT_FOR_TARGET=$(ELFEDIT_FOR_TARGET) CC_FOR_TARGET=$(CC_FOR_TARGET) CXX_FOR_TARGET=$(CXX_FOR_TARGET) CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET) $(PROFILING_CFLAGS)" LDFLAGS_FOR_TARGET="$(LDFLAGS_FOR_TARGET) $(PROFILING_LDFLAGS)" OBJCOPY_FOR_TARGET=$(OBJCOPY_FOR_TARGET) -C tests depend
|
||||
$Q$(MAKE) ELFEDIT_FOR_TARGET=$(ELFEDIT_FOR_TARGET) CC_FOR_TARGET=$(CC_FOR_TARGET) CXX_FOR_TARGET=$(CXX_FOR_TARGET) CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET) $(PROFILING_CFLAGS)" LDFLAGS_FOR_TARGET="$(LDFLAGS_FOR_TARGET) $(PROFILING_LDFLAGS)" OBJCOPY_FOR_TARGET=$(OBJCOPY_FOR_TARGET) -C tests
|
||||
$Q$(MAKE) ELFEDIT_FOR_TARGET=$(ELFEDIT_FOR_TARGET) CC_FOR_TARGET=$(CC_FOR_TARGET) CXX_FOR_TARGET=$(CXX_FOR_TARGET) CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET) $(PROFILING_CFLAGS)" LDFLAGS_FOR_TARGET="$(LDFLAGS_FOR_TARGET) $(PROFILING_LDFLAGS)" OBJCOPY_FOR_TARGET=$(OBJCOPY_FOR_TARGET) GO_FOR_TARGET="$(GO_FOR_TARGET)" GOFLAGS_FOR_TARGET="$(GOFLAGS_FOR_TARGET)" -C tests depend
|
||||
$Q$(MAKE) ELFEDIT_FOR_TARGET=$(ELFEDIT_FOR_TARGET) CC_FOR_TARGET=$(CC_FOR_TARGET) CXX_FOR_TARGET=$(CXX_FOR_TARGET) CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET) $(PROFILING_CFLAGS)" LDFLAGS_FOR_TARGET="$(LDFLAGS_FOR_TARGET) $(PROFILING_LDFLAGS)" OBJCOPY_FOR_TARGET=$(OBJCOPY_FOR_TARGET) GO_FOR_TARGET="$(GO_FOR_TARGET)" GOFLAGS_FOR_TARGET="$(GOFLAGS_FOR_TARGET)" -C tests
|
||||
$Q$(MAKE) ELFEDIT_FOR_TARGET=$(ELFEDIT_FOR_TARGET) CC_FOR_TARGET=$(CC_FOR_TARGET) CXX_FOR_TARGET=$(CXX_FOR_TARGET) CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET) $(PROFILING_CFLAGS)" LDFLAGS_FOR_TARGET="$(LDFLAGS_FOR_TARGET) $(PROFILING_LDFLAGS)" OBJCOPY_FOR_TARGET=$(OBJCOPY_FOR_TARGET) -C benchmarks depend
|
||||
$Q$(MAKE) ELFEDIT_FOR_TARGET=$(ELFEDIT_FOR_TARGET) CC_FOR_TARGET=$(CC_FOR_TARGET) CXX_FOR_TARGET=$(CXX_FOR_TARGET) CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET) $(PROFILING_CFLAGS)" LDFLAGS_FOR_TARGET="$(LDFLAGS_FOR_TARGET) $(PROFILING_LDFLAGS)" OBJCOPY_FOR_TARGET=$(OBJCOPY_FOR_TARGET) -C benchmarks
|
||||
#$Q$(MAKE) ELFEDIT_FOR_TARGET=$(ELFEDIT_FOR_TARGET) CC_FOR_TARGET=$(CC_FOR_TARGET) CXX_FOR_TARGET=$(CXX_FOR_TARGET) CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" LDFLAGS_FOR_TARGET="$(LDFLAGS_FOR_TARGET)" OBJCOPY_FOR_TARGET=$(OBJCOPY_FOR_TARGET) -C openmpbench depend
|
||||
|
@ -55,7 +55,7 @@ $(TMP)/bootstrap:
|
|||
$Q$(MKDIR) $(TMP)/bootstrap
|
||||
$Q$(CD) $(TMP)/bootstrap; $(TOPDIR)/gcc/configure --target=$(TARGET) --prefix=$(TOPDIR)/$(ARCH) --without-headers --with-isl --with-tune=generic --enable-languages=c --disable-nls --disable-shared --disable-libssp --disable-libgomp --enable-threads=posix --enable-tls && $(MAKE) $(NJOBS) all-gcc $(TP) && $(MAKE) install-gcc $(TP)
|
||||
|
||||
toolchain: $(TMP)/newlib pte $(TMP)/gcc libs headers demo
|
||||
toolchain: $(TMP)/newlib pte headers $(TMP)/gcc libs demo
|
||||
|
||||
$(TMP)/newlib:
|
||||
@echo Build newlib
|
||||
|
@ -91,7 +91,7 @@ $(TMP)/gcc:
|
|||
$Q$(CD) $(TMP)/gcc; $(TOPDIR)/gcc/configure --target=$(TARGET) --prefix=$(TOPDIR)/$(ARCH) --without-headers --with-newlib --with-isl --with-tune=generic --enable-languages=c,lto --disable-nls --disable-shared --disable-libssp --enable-threads=posix --disable-libgomp --enable-tls --enable-lto --disable-symvers && $(MAKE) $(NJOBS) $(TP) && $(MAKE) install $(TP)
|
||||
$Q$(RM) $(TMP)/gcc
|
||||
$Q$(MKDIR) $(TMP)/gcc
|
||||
$Q$(CD) $(TMP)/gcc; $(TOPDIR)/gcc/configure --target=$(TARGET) --prefix=$(TOPDIR)/$(ARCH) --without-headers --with-newlib --with-isl --with-tune=generic --enable-languages=c,c++,fortran,lto --disable-nls --disable-shared --disable-libssp --enable-threads=posix --disable-libgomp --enable-tls --enable-lto --disable-symvers && $(MAKE) $(NJOBS) $(TP) && $(MAKE) install $(TP)
|
||||
$Q$(CD) $(TMP)/gcc; $(TOPDIR)/gcc/configure --target=$(TARGET) --prefix=$(TOPDIR)/$(ARCH) --without-headers --with-newlib --with-isl --without-libatomic --with-tune=generic --enable-languages=c,c++,go,fortran,lto --disable-nls --disable-shared --disable-libssp --enable-threads=posix --disable-libgomp --enable-tls --enable-lto --disable-symvers && $(MAKE) $(NJOBS) $(TP) && $(MAKE) install $(TP)
|
||||
|
||||
clean:
|
||||
@echo Cleaning toolchain
|
||||
|
|
|
@ -52,7 +52,7 @@ int sched_yield(void);
|
|||
#endif
|
||||
|
||||
#define N 10000
|
||||
#define M (1024+1)
|
||||
#define M (256+1)
|
||||
#define BUFFSZ (1ULL*1024ULL*1024ULL)
|
||||
|
||||
static char* buff[M];
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit e3ac291d9fa7ae33dca5625970098984af0ec7dc
|
||||
Subproject commit 07585ecfdbc69ec9cceadf4dcd0fdf1eb855d3e1
|
|
@ -57,3 +57,53 @@ Note that these are not signal handlers registered by newlib
|
|||
4 | 0x989660 <signal_dispatcher>
|
||||
5 | 0x989660 <signal_dispatcher>
|
||||
```
|
||||
|
||||
Show backtrace of suspended (or any) task:
|
||||
|
||||
```
|
||||
(gdb) hermit-ps
|
||||
ID | STATE | CPU | PRIO | STACK | INSTRUCTION POINTER
|
||||
--------------------------------------------------------------------
|
||||
0 | IDL | 0 | 0 | 0x88d000 | 0x80a756 <reschedule+41>
|
||||
1 | IDL | 1 | 0 | 0x88f000 | 0x80a756 <reschedule+41>
|
||||
2 | IDL | 2 | 0 | 0x891000 | 0x80a756 <reschedule+41>
|
||||
3 | IDL | 3 | 0 | 0x893000 | 0x80a756 <reschedule+41>
|
||||
4 | BLK | 0 | 8 | 0xde000 | 0x80a756 <reschedule+41>
|
||||
5 | BLK | 0 | 16 | 0xf8000 | 0x80a756 <reschedule+41>
|
||||
6 | RUN | 1 | 8 | 0x115000 | 0x990070 <thread_func+64>
|
||||
7 | RUN | 2 | 8 | 0x12b000 | 0x990070 <thread_func+64>
|
||||
8 | RUN | 3 | 8 | 0x141000 | 0x990070 <thread_func+64>
|
||||
9 | RUN | 0 | 8 | 0x157000 | 0x990070 <thread_func+64>
|
||||
(gdb) hermit-bt 4
|
||||
#0 0x000000000081701c in rollback ()
|
||||
#1 0x000000000080a756 in reschedule () at kernel/tasks.c:909
|
||||
#2 0x0000000000813f7b in timer_wait (ticks=50) at arch/x86/kernel/timer.c:139
|
||||
#3 0x000000000080b9f7 in sys_msleep (ms=500) at kernel/syscall.c:357
|
||||
#4 0x000000000098fd5e in main ()
|
||||
```
|
||||
|
||||
Switch current context to any task to investigate call stack. Remember to
|
||||
restore the context if you want to continue execution.
|
||||
|
||||
```
|
||||
(gdb) info threads
|
||||
Id Target Id Frame
|
||||
* 1 Thread 1 (CPU#0 [running]) 0x0000000000990070 in thread_func ()
|
||||
2 Thread 2 (CPU#1 [running]) 0x0000000000990070 in thread_func ()
|
||||
3 Thread 3 (CPU#2 [running]) 0x0000000000990070 in thread_func ()
|
||||
4 Thread 4 (CPU#3 [running]) 0x0000000000990070 in thread_func ()
|
||||
(gdb) hermit-switch-context 4
|
||||
(gdb) info threads
|
||||
Id Target Id Frame
|
||||
* 1 Thread 1 (CPU#0 [running]) 0x000000000081701c in rollback ()
|
||||
2 Thread 2 (CPU#1 [running]) 0x0000000000990070 in thread_func ()
|
||||
3 Thread 3 (CPU#2 [running]) 0x0000000000990070 in thread_func ()
|
||||
4 Thread 4 (CPU#3 [running]) 0x0000000000990070 in thread_func ()
|
||||
(gdb) bt
|
||||
#0 0x000000000081701c in rollback ()
|
||||
#1 0x000000000080a756 in reschedule () at kernel/tasks.c:909
|
||||
#2 0x0000000000813f7b in timer_wait (ticks=50) at arch/x86/kernel/timer.c:139
|
||||
#3 0x000000000080b9f7 in sys_msleep (ms=500) at kernel/syscall.c:357
|
||||
#4 0x000000000098fd5e in main ()
|
||||
(gdb) hermit-restore-context
|
||||
```
|
||||
|
|
|
@ -136,9 +136,6 @@ class HermitLsSighandler(gdb.Command):
|
|||
gdb.write(header)
|
||||
gdb.write((len(header) - 1) * '-' + '\n')
|
||||
|
||||
inferior = gdb.selected_inferior()
|
||||
currentInferiorThread = gdb.selected_thread()
|
||||
|
||||
for task in task_lists():
|
||||
|
||||
gdb.write(rowfmt.format(
|
||||
|
@ -147,3 +144,164 @@ class HermitLsSighandler(gdb.Command):
|
|||
))
|
||||
|
||||
HermitLsSighandler()
|
||||
|
||||
|
||||
|
||||
def stripSymbol(value):
|
||||
s = "%s" % value
|
||||
return s.split(' ')[0]
|
||||
|
||||
class HermitTaskState:
|
||||
def __init__(self, address = None):
|
||||
import re
|
||||
self.info_reg_regex = re.compile("(?P<register>[\w]+)\s+(?P<value>0x[0-9a-f]+).*")
|
||||
|
||||
if address:
|
||||
self.address = address
|
||||
|
||||
self.registers = {
|
||||
'gs': self.address + 0,
|
||||
'fs': self.address + 1,
|
||||
'r15': self.address + 2,
|
||||
'r14': self.address + 3,
|
||||
'r13': self.address + 4,
|
||||
'r12': self.address + 5,
|
||||
'r11': self.address + 6,
|
||||
'r10': self.address + 7,
|
||||
'r9': self.address + 8,
|
||||
'r8': self.address + 9,
|
||||
'rdi': self.address + 10,
|
||||
'rsi': self.address + 11,
|
||||
'rbp': self.address + 12,
|
||||
'rsp': self.address + 13,
|
||||
'rbx': self.address + 14,
|
||||
'rdx': self.address + 15,
|
||||
'rcx': self.address + 16,
|
||||
'rax': self.address + 17,
|
||||
# int_no
|
||||
# error
|
||||
'rip': self.address + 20,
|
||||
'cs': self.address + 21,
|
||||
'eflags': self.address + 22,
|
||||
# userrsp
|
||||
'ss': self.address + 24,
|
||||
}
|
||||
|
||||
# make nice strings out of register values
|
||||
for register, valptr in self.registers.items():
|
||||
self.registers[register] = stripSymbol(valptr.dereference())
|
||||
|
||||
else:
|
||||
self.address = False
|
||||
self.info_registers = gdb.execute('info registers', to_string=True)
|
||||
self.registers = {}
|
||||
for line in self.info_registers.split('\n'):
|
||||
match = self.info_reg_regex.match(line)
|
||||
if match:
|
||||
self.registers[match.group('register')] = match.group('value')
|
||||
|
||||
def switch(self):
|
||||
for register, value in self.registers.items():
|
||||
try:
|
||||
gdb.execute("set $%s = %s" % (register, value))
|
||||
except:
|
||||
print("Cannot restore %s=%s, skipping ..." % (register, value))
|
||||
|
||||
|
||||
class HermitTaskBacktrace(gdb.Command):
|
||||
"""Show backtrace for HermitCore task.
|
||||
|
||||
Usage: hermit-bt ID"""
|
||||
|
||||
def __init__(self):
|
||||
super(HermitTaskBacktrace, self).__init__("hermit-bt", gdb.COMMAND_DATA)
|
||||
|
||||
def invoke(self, arg, from_tty):
|
||||
argv = gdb.string_to_argv(arg)
|
||||
if len(argv) != 1:
|
||||
raise gdb.GdbError("hermit-bt takes one argument")
|
||||
|
||||
task = get_task_by_pid(int(argv[0]))
|
||||
|
||||
if task['status'] == 2:
|
||||
gdb.execute('bt')
|
||||
return
|
||||
|
||||
current_state = HermitTaskState()
|
||||
|
||||
task_state = HermitTaskState(task['last_stack_pointer'])
|
||||
|
||||
try:
|
||||
task_state.switch()
|
||||
gdb.execute('bt')
|
||||
finally:
|
||||
current_state.switch()
|
||||
|
||||
HermitTaskBacktrace()
|
||||
|
||||
original_state = {}
|
||||
|
||||
def saveCurrentState(state):
|
||||
curr_thread = gdb.selected_thread()
|
||||
for thread in gdb.selected_inferior().threads():
|
||||
if not thread.num in state:
|
||||
thread.switch()
|
||||
state[thread.num] = HermitTaskState()
|
||||
curr_thread.switch()
|
||||
|
||||
def restoreCurrentState(state):
|
||||
curr_thread = gdb.selected_thread()
|
||||
for thread in gdb.selected_inferior().threads():
|
||||
if thread.num in state:
|
||||
thread.switch()
|
||||
state[thread.num].switch()
|
||||
curr_thread.switch()
|
||||
state = {}
|
||||
|
||||
class HermitSwitchContext(gdb.Command):
|
||||
"""Switch current context to given HermitCore task
|
||||
|
||||
Usage: hermit-switch-context ID"""
|
||||
|
||||
def __init__(self):
|
||||
super(HermitSwitchContext, self).__init__("hermit-switch-context", gdb.COMMAND_DATA)
|
||||
|
||||
def invoke(self, arg, from_tty):
|
||||
global original_state
|
||||
|
||||
argv = gdb.string_to_argv(arg)
|
||||
if len(argv) != 1:
|
||||
raise gdb.GdbError("hermit-switch-context takes one argument")
|
||||
|
||||
# save original state to go back to it later
|
||||
saveCurrentState(original_state)
|
||||
|
||||
task = get_task_by_pid(int(argv[0]))
|
||||
|
||||
# switch current inferior thread to where task has run last
|
||||
for thread in gdb.selected_inferior().threads():
|
||||
if (thread.num - 1) == task['last_core']:
|
||||
thread.switch()
|
||||
break
|
||||
|
||||
# apply it's state
|
||||
task_state = HermitTaskState(task['last_stack_pointer'])
|
||||
task_state.switch()
|
||||
|
||||
HermitSwitchContext()
|
||||
|
||||
|
||||
class HermitRestoreContext(gdb.Command):
|
||||
"""Restore context to state before it was switched
|
||||
|
||||
Usage: hermit-restore-context"""
|
||||
|
||||
def __init__(self):
|
||||
super(HermitRestoreContext, self).__init__("hermit-restore-context", gdb.COMMAND_DATA)
|
||||
|
||||
def invoke(self, arg, from_tty):
|
||||
global original_state
|
||||
|
||||
restoreCurrentState(original_state)
|
||||
|
||||
HermitRestoreContext()
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 5ef13e91f15138ede39e9a8b15c3ef0641108068
|
||||
Subproject commit 30a3e67a8641c3a6884ad3d18e1fc8752652e89c
|
|
@ -8,6 +8,7 @@ KEEP_DEBUG = --only-keep-debug
|
|||
CROSSCOMPREFIX = x86_64-hermit-
|
||||
|
||||
CC_FOR_TARGET = $(CROSSCOMPREFIX)gcc
|
||||
GO_FOR_TARGET = $(CROSSCOMPREFIX)gccgo
|
||||
CXX_FOR_TARGET = $(CROSSCOMPREFIX)g++
|
||||
GCC_FOR_TARGET = $(CROSSCOMPREFIX)gcc
|
||||
CPP_FOR_TARGET = $(CROSSCOMPREFIX)cpp
|
||||
|
@ -34,6 +35,10 @@ endif
|
|||
@echo [CC] $@
|
||||
$Q$(CC_FOR_TARGET) -c $(CFLAGS_FOR_TARGET) -o $@ $<
|
||||
|
||||
%.o : %.go
|
||||
@echo [GO] $@
|
||||
$Q$(GO_FOR_TARGET) -c $(GOFLAGS_FOR_TARGET) -o $@ $<
|
||||
|
||||
%.o: %.cpp
|
||||
@echo [CXX] $@
|
||||
$Q$(CXX_FOR_TARGET) -c $(CXXFLAGS_FOR_TARGET) -o $@ $<
|
||||
|
@ -44,7 +49,7 @@ endif
|
|||
|
||||
default: all
|
||||
|
||||
all: hello hello++ thr_hello jacobi hellof RCCE_minimum
|
||||
all: hello hello++ thr_hello jacobi hellof RCCE_minimum signals pi
|
||||
|
||||
hello++: hello++.o
|
||||
@echo [LD] $@
|
||||
|
@ -67,6 +72,13 @@ hellof: hellof.o
|
|||
$Q$(OBJCOPY_FOR_TARGET) $(STRIP_DEBUG) $@
|
||||
$Qchmod a-x $@.sym
|
||||
|
||||
pi: pi.o
|
||||
@echo [LD] $@
|
||||
$Q$(GO_FOR_TARGET) -pthread -o $@ $< $(LDFLAGS_FOR_TARGET) $(GOFLAGS_FOR_TARGET)
|
||||
$Q$(OBJCOPY_FOR_TARGET) $(KEEP_DEBUG) $@ $@.sym
|
||||
$Q$(OBJCOPY_FOR_TARGET) $(STRIP_DEBUG) $@
|
||||
$Qchmod a-x $@.sym
|
||||
|
||||
jacobi: jacobi.o
|
||||
@echo [LD] $@
|
||||
$Q$(CC_FOR_TARGET) -o $@ $< $(LDFLAGS_FOR_TARGET) $(CFLAGS_FOR_TARGET)
|
||||
|
@ -85,6 +97,17 @@ thr_hello: thr_hello.o
|
|||
$Q$(OBJCOPY_FOR_TARGET) $(STRIP_DEBUG) $@
|
||||
$Qchmod a-x $@.sym
|
||||
|
||||
signals.o: signals.c
|
||||
@echo [CC] $@
|
||||
$Q$(CC_FOR_TARGET) -c $(CFLAGS_FOR_TARGET) -pthread -o $@ $<
|
||||
|
||||
signals: signals.o
|
||||
@echo [LD] $@
|
||||
$Q$(CC_FOR_TARGET) -o $@ $< $(LDFLAGS_FOR_TARGET) $(CFLAGS_FOR_TARGET) -pthread
|
||||
$Q$(OBJCOPY_FOR_TARGET) $(KEEP_DEBUG) $@ $@.sym
|
||||
$Q$(OBJCOPY_FOR_TARGET) $(STRIP_DEBUG) $@
|
||||
$Qchmod a-x $@.sym
|
||||
|
||||
RCCE_minimum: RCCE_minimum.o
|
||||
@echo [LD] $@
|
||||
$Q$(CC_FOR_TARGET) -o $@ $< $(LDFLAGS_FOR_TARGET) $(CFLAGS_FOR_TARGET) -lircce
|
||||
|
@ -94,11 +117,11 @@ RCCE_minimum: RCCE_minimum.o
|
|||
|
||||
clean:
|
||||
@echo Cleaning tests
|
||||
$Q$(RM) hello hello++ hellof jacobi thr_hello RCCE_minimum *.sym *.o *~
|
||||
$Q$(RM) hello hello++ hellof jacobi thr_hello RCCE_minimum signals pi *.sym *.o *~
|
||||
|
||||
veryclean:
|
||||
@echo Propper cleaning tests
|
||||
$Q$(RM) hello hello++ hellof jacobi thr_hello RCCE_minimum *.sym *.o *~
|
||||
$Q$(RM) hello hello++ hellof jacobi thr_hello RCCE_minimum pi *.sym *.o *~
|
||||
|
||||
depend:
|
||||
$Q$(CC_FOR_TARGET) -MM $(CFLAGS_FOR_TARGET) *.c > Makefile.dep
|
||||
|
|
84
hermit/usr/tests/pi.go
Normal file
84
hermit/usr/tests/pi.go
Normal file
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
* Copyright (c) 2016, Stefan Lankes, RWTH Aachen University
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this
|
||||
* software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
var step float64
|
||||
|
||||
func term(ch chan float64, start, end int) {
|
||||
var res float64
|
||||
|
||||
for i := start; i < end; i++ {
|
||||
x := (float64(i) + 0.5) * step
|
||||
res += 4.0 / (1.0 + x * x)
|
||||
}
|
||||
|
||||
ch <- res
|
||||
}
|
||||
|
||||
func main() {
|
||||
var num_steps int
|
||||
ch := make(chan float64)
|
||||
max_coroutines := runtime.NumCPU()
|
||||
|
||||
if len(os.Args) > 1 {
|
||||
num_steps, _ = strconv.Atoi(os.Args[1])
|
||||
}
|
||||
if num_steps < 100 {
|
||||
num_steps = 1000000
|
||||
}
|
||||
fmt.Println("num_steps = ", num_steps)
|
||||
|
||||
sum := float64(0)
|
||||
step = 1.0 / float64(num_steps)
|
||||
|
||||
start := time.Now()
|
||||
|
||||
for i := 0; i < max_coroutines; i++ {
|
||||
start := (num_steps / max_coroutines) * i
|
||||
end := (num_steps / max_coroutines) * (i+1)
|
||||
|
||||
go term(ch, start, end)
|
||||
}
|
||||
|
||||
for i := 0; i < max_coroutines; i++ {
|
||||
sum += <-ch
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
|
||||
fmt.Println("Pi : ", sum*step)
|
||||
fmt.Println("Time : ", elapsed)
|
||||
}
|
111
hermit/usr/tests/signals.c
Normal file
111
hermit/usr/tests/signals.c
Normal file
|
@ -0,0 +1,111 @@
|
|||
/*
|
||||
* Copyright (c) 2016, Daniel Krebs, RWTH Aachen University
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this
|
||||
* software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
#include <pthread.h>
|
||||
#include <syscall.h>
|
||||
|
||||
#define THREAD_COUNT_DEFAULT 2
|
||||
|
||||
static volatile __thread int alive = 1;
|
||||
static volatile __thread int thread_id;
|
||||
|
||||
pthread_barrier_t barrier;
|
||||
pthread_barrierattr_t attr;
|
||||
|
||||
static void sighandler(int sig)
|
||||
{
|
||||
printf("[%d] Received signal %d\n", thread_id, sig);
|
||||
alive = 0;
|
||||
}
|
||||
|
||||
void* thread_func(void* arg)
|
||||
{
|
||||
thread_id = *((int*) arg);
|
||||
|
||||
printf("[%d] Hello (task ID: %d)\n", thread_id, sys_getpid());
|
||||
|
||||
// register signal handler
|
||||
signal(16, sighandler);
|
||||
|
||||
// make sure all threads are running before main threads starts sending
|
||||
// signals
|
||||
pthread_barrier_wait(&barrier);
|
||||
|
||||
// stay here until signal received
|
||||
while(alive);
|
||||
|
||||
printf("[%d] I'm done\n", thread_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
size_t thread_count = THREAD_COUNT_DEFAULT;
|
||||
if(argc == 2) {
|
||||
thread_count = strtoul(argv[1], NULL, 10);
|
||||
}
|
||||
|
||||
pthread_t threads[thread_count];
|
||||
unsigned int i, param[thread_count];
|
||||
int ret;
|
||||
|
||||
// if we send the signals too early some threads might not have registered
|
||||
// a signal handler yet
|
||||
pthread_barrier_init(&barrier, &attr, thread_count + 1);
|
||||
|
||||
for(i = 0; i < thread_count; i++) {
|
||||
param[i] = i;
|
||||
ret = pthread_create(threads+i, NULL, thread_func, (void*) ¶m[i]);
|
||||
if (ret) {
|
||||
printf("Thread creation failed! error = %d\n", ret);
|
||||
return ret;
|
||||
} else printf("Create thread %d\n", i);
|
||||
}
|
||||
|
||||
pthread_barrier_wait(&barrier);
|
||||
|
||||
for(i = 0; i < thread_count; i++) {
|
||||
printf("Send signal to thread %d\n", i);
|
||||
pthread_kill(threads[i], 16);
|
||||
}
|
||||
|
||||
sys_msleep(500);
|
||||
|
||||
printf("Wait for all threads to finish\n");
|
||||
for(i = 0; i < thread_count; i++) {
|
||||
pthread_join(threads[i], NULL);
|
||||
printf("Thread %d is done\n", i);
|
||||
}
|
||||
|
||||
printf("All done\n");
|
||||
|
||||
return 0;
|
||||
}
|
2
test.sh
2
test.sh
|
@ -3,7 +3,7 @@
|
|||
# do not use this script
|
||||
# it is written only for internal tests via Travis CI
|
||||
|
||||
FILES="hermit/usr/tests/hello hermit/usr/tests/hellof hermit/usr/tests/hello++ hermit/usr/tests/thr_hello"
|
||||
FILES="hermit/usr/tests/hello hermit/usr/tests/hellof hermit/usr/tests/hello++ hermit/usr/tests/thr_hello hermit/usr/tests/pi hermit/usr/benchmarks/stream hermit/usr/benchmarks/basic"
|
||||
PROXY=hermit/tools/proxy
|
||||
|
||||
for f in $FILES; do echo "check $f..."; $PROXY $f || exit 1; done
|
||||
|
|
Loading…
Add table
Reference in a new issue