Merge branch 'master' into readwrite

This commit is contained in:
Stefan Lankes 2012-07-20 09:05:57 +02:00
commit edadf16d7f
782 changed files with 65233 additions and 41208 deletions

1
.gitignore vendored
View file

@ -12,6 +12,7 @@
tags
Makefile
include/metalsvm/config.h
include/metalsvm/config.inc
tools/make_initrd
newlib/examples/hello
newlib/examples/jacobi

359
Doxyfile
View file

@ -1,4 +1,4 @@
# Doxyfile 1.7.3
# Doxyfile 1.8.1.1
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
@ -22,10 +22,11 @@
DOXYFILE_ENCODING = UTF-8
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
# by quotes) that should identify the project.
# The PROJECT_NAME tag is a single word (or sequence of words) that should
# identify the project. Note that if you do not use Doxywizard you need
# to put quotes around the project name if it contains spaces.
PROJECT_NAME = MetalSVM
PROJECT_NAME = "MetalSVM"
# The PROJECT_NUMBER tag can be used to enter a project or revision number.
# This could be handy for archiving the generated documentation or
@ -33,7 +34,9 @@ PROJECT_NAME = MetalSVM
PROJECT_NUMBER =
# Using the PROJECT_BRIEF tag one can provide an optional one line description for a project that appears at the top of each page and should give viewer a quick idea about the purpose of the project. Keep the description short.
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer
# a quick idea about the purpose of the project. Keep the description short.
PROJECT_BRIEF = "A Bare-Metal Hypervisor for Non-Coherent Memory-Coupled Cores"
@ -42,14 +45,14 @@ PROJECT_BRIEF = "A Bare-Metal Hypervisor for Non-Coherent Memory-Couple
# exceed 55 pixels and the maximum width should not exceed 200 pixels.
# Doxygen will copy the logo to the output directory.
PROJECT_LOGO = documentation/img/lfbs_logo.gif
PROJECT_LOGO = documentation/img/lfbs_logo.gif
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
# base path where the generated documentation will be put.
# If a relative path is entered, it will be relative to the location
# where doxygen was started. If left blank the current directory will be used.
OUTPUT_DIRECTORY = documentation
OUTPUT_DIRECTORY = documentation/
# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
# 4096 sub-directories (in 2 levels) under the output directory of each output
@ -181,7 +184,7 @@ SEPARATE_MEMBER_PAGES = NO
# The TAB_SIZE tag can be used to set the number of spaces in a tab.
# Doxygen uses this value to replace tabs by spaces in code fragments.
TAB_SIZE = 2
TAB_SIZE = 8
# This tag can be used to specify a number of aliases that acts
# as commands in the documentation. An alias has the form "name=value".
@ -192,6 +195,13 @@ TAB_SIZE = 2
ALIASES =
# This tag can be used to specify a number of word-keyword mappings (TCL only).
# A mapping has the form "name=value". For example adding
# "class=itcl::class" will allow you to use the command class in the
# itcl::class meaning.
TCL_SUBST =
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
# sources only. Doxygen will then generate output that is more tailored for C.
# For instance, some of the names that are used will be different. The list
@ -230,6 +240,15 @@ OPTIMIZE_OUTPUT_VHDL = NO
EXTENSION_MAPPING =
# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
# comments according to the Markdown format, which allows for more readable
# documentation. See http://daringfireball.net/projects/markdown/ for details.
# The output of markdown processing is further processed by doxygen, so you
# can mix doxygen, HTML, and XML commands with Markdown formatting.
# Disable only in case of backward compatibilities issues.
MARKDOWN_SUPPORT = YES
# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
# to include (a tag file for) the STL sources as input, then you should
# set this tag to YES in order to let doxygen match functions declarations and
@ -257,7 +276,7 @@ SIP_SUPPORT = NO
# setting a simple type. If this is not the case, or you want to show the
# methods anyway, you should set this option to NO.
IDL_PROPERTY_SUPPORT = NO
IDL_PROPERTY_SUPPORT = YES
# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
# tag is set to YES, then doxygen will reuse the documentation of the first
@ -274,6 +293,22 @@ DISTRIBUTE_GROUP_DOC = NO
SUBGROUPING = YES
# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
# unions are shown inside the group in which they are included (e.g. using
# @ingroup) instead of on a separate page (for HTML and Man pages) or
# section (for LaTeX and RTF).
INLINE_GROUPED_CLASSES = NO
# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
# unions with only public data fields will be shown inline in the documentation
# of the scope in which they are defined (i.e. file, namespace, or group
# documentation), provided this scope is documented. If set to NO (the default),
# structs, classes, and unions are shown on a separate page (for HTML and Man
# pages) or section (for LaTeX and RTF).
INLINE_SIMPLE_STRUCTS = YES
# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
# is documented as struct, union, or enum with the name of the typedef. So
# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
@ -296,10 +331,21 @@ TYPEDEF_HIDES_STRUCT = YES
# a logarithmic scale so increasing the size by one will roughly double the
# memory usage. The cache size is given by this formula:
# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
# corresponding to a cache size of 2^16 = 65536 symbols
# corresponding to a cache size of 2^16 = 65536 symbols.
SYMBOL_CACHE_SIZE = 0
# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
# their name and scope. Since this can be an expensive process and often the
# same symbol appear multiple times in the code, doxygen keeps a cache of
# pre-resolved symbols. If the cache is too small doxygen will become slower.
# If the cache is too large, memory is wasted. The cache size is given by this
# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0,
# corresponding to a cache size of 2^16 = 65536 symbols.
LOOKUP_CACHE_SIZE = 0
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
@ -316,6 +362,10 @@ EXTRACT_ALL = NO
EXTRACT_PRIVATE = NO
# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal scope will be included in the documentation.
EXTRACT_PACKAGE = NO
# If the EXTRACT_STATIC tag is set to YES all static members of a file
# will be included in the documentation.
@ -449,8 +499,11 @@ SORT_GROUP_NAMES = NO
SORT_BY_SCOPE_NAME = NO
# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper type resolution of all parameters of a function it will reject a
# match between the prototype and the implementation of a member function even if there is only one candidate or it is obvious which candidate to choose by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
# do proper type resolution of all parameters of a function it will reject a
# match between the prototype and the implementation of a member function even
# if there is only one candidate or it is obvious which candidate to choose
# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
# will still accept a match between prototype and implementation in such cases.
STRICT_PROTO_MATCHING = NO
@ -459,25 +512,25 @@ STRICT_PROTO_MATCHING = NO
# disable (NO) the todo list. This list is created by putting \todo
# commands in the documentation.
GENERATE_TODOLIST = NO
GENERATE_TODOLIST = YES
# The GENERATE_TESTLIST tag can be used to enable (YES) or
# disable (NO) the test list. This list is created by putting \test
# commands in the documentation.
GENERATE_TESTLIST = NO
GENERATE_TESTLIST = YES
# The GENERATE_BUGLIST tag can be used to enable (YES) or
# disable (NO) the bug list. This list is created by putting \bug
# commands in the documentation.
GENERATE_BUGLIST = NO
GENERATE_BUGLIST = YES
# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
# disable (NO) the deprecated list. This list is created by putting
# \deprecated commands in the documentation.
GENERATE_DEPRECATEDLIST= NO
GENERATE_DEPRECATEDLIST= YES
# The ENABLED_SECTIONS tag can be used to enable conditional
# documentation sections, marked by \if sectionname ... \endif.
@ -500,12 +553,6 @@ MAX_INITIALIZER_LINES = 30
SHOW_USED_FILES = YES
# If the sources in your project are distributed over multiple directories
# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
# in the documentation. The default is NO.
SHOW_DIRECTORIES = YES
# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
# This will remove the Files entry from the Quick Index and from the
# Folder Tree View (if specified). The default is YES.
@ -517,7 +564,7 @@ SHOW_FILES = YES
# This will remove the Namespaces entry from the Quick Index
# and from the Folder Tree View (if specified). The default is YES.
SHOW_NAMESPACES = NO
SHOW_NAMESPACES = YES
# The FILE_VERSION_FILTER tag can be used to specify a program or script that
# doxygen should invoke to get the current version for each file (typically from
@ -531,13 +578,23 @@ FILE_VERSION_FILTER =
# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
# by doxygen. The layout file controls the global structure of the generated
# output files in an output format independent way. The create the layout file
# output files in an output format independent way. To create the layout file
# that represents doxygen's defaults, run doxygen with the -l option.
# You can optionally specify a file name after the option, if omitted
# DoxygenLayout.xml will be used as the name of the layout file.
LAYOUT_FILE = documentation/tmpl/layout.xml
# The CITE_BIB_FILES tag can be used to specify one or more bib files
# containing the references data. This must be a list of .bib files. The
# .bib extension is automatically appended if omitted. Using this command
# requires the bibtex tool to be installed. See also
# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
# feature you need bibtex and perl available in the search path.
CITE_BIB_FILES =
#---------------------------------------------------------------------------
# configuration options related to warning and progress messages
#---------------------------------------------------------------------------
@ -599,14 +656,14 @@ WARN_LOGFILE =
# with spaces.
INPUT = ./fs \
./include \
./kernel \
./drivers \
./arch \
./libkern \
./mm \
./tools \
./documentation/text
./include \
./kernel \
./drivers \
./arch \
./libkern \
./mm \
./tools \
./documentation/text
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
@ -624,11 +681,11 @@ INPUT_ENCODING = UTF-8
# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
# *.f90 *.f *.for *.vhd *.vhdl
FILE_PATTERNS += *.c
FILE_PATTERNS += *.h
FILE_PATTERNS += *.asm
FILE_PATTERNS += *.S
FILE_PATTERNS += *.dox
FILE_PATTERNS = *.c \
*.h \
*.asm \
*.S \
*.dox
# The RECURSIVE tag can be used to turn specify whether or not subdirectories
# should be searched for input files as well. Possible values are YES and NO.
@ -636,13 +693,15 @@ FILE_PATTERNS += *.dox
RECURSIVE = YES
# The EXCLUDE tag can be used to specify files and/or directories that should
# The EXCLUDE tag can be used to specify files and/or directories that should be
# excluded from the INPUT source files. This way you can easily exclude a
# subdirectory from a directory tree whose root is specified with the INPUT tag.
# Note that relative paths are relative to the directory from which doxygen is
# run.
EXCLUDE =
# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
# from the input.
@ -744,9 +803,9 @@ INLINE_SOURCES = NO
# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
# doxygen to hide any special comment blocks from generated source code
# fragments. Normal C and C++ comments will always remain visible.
# fragments. Normal C, C++ and Fortran comments will always remain visible.
STRIP_CODE_COMMENTS = YES
STRIP_CODE_COMMENTS = NO
# If the REFERENCED_BY_RELATION tag is set to YES
# then for each documented function all documented
@ -780,7 +839,7 @@ USE_HTAGS = NO
# will generate a verbatim copy of the header file for each class for
# which an include is specified. Set to NO to disable this.
VERBATIM_HEADERS = NO
VERBATIM_HEADERS = YES
#---------------------------------------------------------------------------
# configuration options related to the alphabetical class index
@ -828,27 +887,43 @@ HTML_FILE_EXTENSION = .html
# The HTML_HEADER tag can be used to specify a personal HTML header for
# each generated HTML page. If it is left blank doxygen will generate a
# standard header.
# standard header. Note that when using a custom header you are responsible
# for the proper inclusion of any scripts and style sheets that doxygen
# needs, which is dependent on the configuration options used.
# It is advised to generate a default header using "doxygen -w html
# header.html footer.html stylesheet.css YourConfigFile" and then modify
# that header. Note that the header is subject to change so you typically
# have to redo this when upgrading to a newer version of doxygen or when
# changing the value of configuration settings such as GENERATE_TREEVIEW!
HTML_HEADER = documentation/tmpl/header.html
HTML_HEADER = ./documentation/tmpl/header.html
# The HTML_FOOTER tag can be used to specify a personal HTML footer for
# each generated HTML page. If it is left blank doxygen will generate a
# standard footer.
HTML_FOOTER = documentation/tmpl/footer.html
HTML_FOOTER = ./documentation/tmpl/footer.html
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
# style sheet that is used by each HTML page. It can be used to
# fine-tune the look of the HTML output. If the tag is left blank doxygen
# will generate a default style sheet. Note that doxygen will try to copy
# the style sheet file to the HTML output directory, so don't put your own
# stylesheet in the HTML output directory as well, or it will be erased!
# style sheet in the HTML output directory as well, or it will be erased!
HTML_STYLESHEET = documentation/tmpl/stylesheet.css
HTML_STYLESHEET = ./documentation/tmpl/stylesheet.css
# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the HTML output directory. Note
# that these files will be copied to the base HTML output directory. Use the
# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
# files. In the HTML_STYLESHEET file, use the file name only. Also note that
# the files will be copied as-is; there are no commands or markers available.
HTML_EXTRA_FILES =
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
# Doxygen will adjust the colors in the stylesheet and background images
# Doxygen will adjust the colors in the style sheet and background images
# according to this color. Hue is specified as an angle on a colorwheel,
# see http://en.wikipedia.org/wiki/Hue for more information.
# For instance the value 0 represents red, 60 is yellow, 120 is green,
@ -878,20 +953,23 @@ HTML_COLORSTYLE_GAMMA = 80
HTML_TIMESTAMP = YES
# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
# files or namespaces will be aligned in HTML using tables. If set to
# NO a bullet list will be used.
HTML_ALIGN_MEMBERS = YES
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
# documentation will contain sections that can be hidden and shown after the
# page has loaded. For this to work a browser that supports
# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
# page has loaded.
HTML_DYNAMIC_SECTIONS = NO
# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
# entries shown in the various tree structured indices initially; the user
# can expand and collapse entries dynamically later on. Doxygen will expand
# the tree to such a level that at most the specified number of entries are
# visible (unless a fully collapsed tree already exceeds this amount).
# So setting the number of entries 1 will produce a full collapsed tree by
# default. 0 is a special value representing an infinite number of entries
# and will result in a full expanded tree by default.
HTML_INDEX_NUM_ENTRIES = 100
# If the GENERATE_DOCSET tag is set to YES, additional index files
# will be generated that can be used as input for Apple's Xcode 3
# integrated development environment, introduced with OSX 10.5 (Leopard).
@ -910,14 +988,14 @@ GENERATE_DOCSET = NO
# documentation sets from a single provider (such as a company or product suite)
# can be grouped.
DOCSET_FEEDNAME = "Doxygen generated docs"
DOCSET_FEEDNAME = "MetalSVM Documentation"
# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
# should uniquely identify the documentation set bundle. This should be a
# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
# will append .docset to the name.
DOCSET_BUNDLE_ID = de.rwth-aachen.lfbs
DOCSET_BUNDLE_ID = org.metalsvm
# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
# the documentation publisher. This should be a reverse domain-name style
@ -1043,18 +1121,14 @@ GENERATE_ECLIPSEHELP = NO
ECLIPSE_DOC_ID = org.doxygen.Project
# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
# top of each HTML page. The value NO (the default) enables the index and
# the value YES disables it.
# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
# at top of each HTML page. The value NO (the default) enables the index and
# the value YES disables it. Since the tabs have the same information as the
# navigation tree you can set this option to NO if you already set
# GENERATE_TREEVIEW to YES.
DISABLE_INDEX = NO
# This tag can be used to set the number of enum values (range [0,1..20])
# that doxygen will group on one line in the generated HTML documentation.
# Note that a value of 0 will completely suppress the enum values from appearing in the overview section.
ENUM_VALUES_PER_LINE = 4
# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
# structure should be generated to display hierarchical information.
# If the tag value is set to YES, a side panel will be generated
@ -1062,13 +1136,17 @@ ENUM_VALUES_PER_LINE = 4
# is generated for HTML Help). For this to work a browser that supports
# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
# Windows users are probably better off using the HTML help feature.
# Since the tree basically has the same information as the tab index you
# could consider to set DISABLE_INDEX to NO when enabling this option.
GENERATE_TREEVIEW = NO
GENERATE_TREEVIEW = YES
# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
# and Class Hierarchy pages using a tree view instead of an ordered list.
# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
# (range [0,1..20]) that doxygen will group on one line in the generated HTML
# documentation. Note that a value of 0 will completely suppress the enum
# values from appearing in the overview section.
USE_INLINE_TREES = NO
ENUM_VALUES_PER_LINE = 4
# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
# used to set the initial width (in pixels) of the frame in which the tree
@ -1101,7 +1179,7 @@ FORMULA_TRANSPARENT = YES
# (see http://www.mathjax.org) which uses client side Javascript for the
# rendering instead of using prerendered bitmaps. Use this if you do not
# have LaTeX installed or if you want to formulas look prettier in the HTML
# output. When enabled you also need to install MathJax separately and
# output. When enabled you may also need to install MathJax separately and
# configure the path to it using the MATHJAX_RELPATH option.
USE_MATHJAX = NO
@ -1110,11 +1188,18 @@ USE_MATHJAX = NO
# HTML output directory using the MATHJAX_RELPATH option. The destination
# directory should contain the MathJax.js script. For instance, if the mathjax
# directory is located at the same level as the HTML output directory, then
# MATHJAX_RELPATH should be ../mathjax. The default value points to the mathjax.org site, so you can quickly see the result without installing
# MathJax, but it is strongly recommended to install a local copy of MathJax
# before deployment.
# MATHJAX_RELPATH should be ../mathjax. The default value points to
# the MathJax Content Delivery Network so you can quickly see the result without
# installing MathJax.
# However, it is strongly recommended to install a local
# copy of MathJax from http://www.mathjax.org before deployment.
MATHJAX_RELPATH = http://www.mathjax.org/mathjax
MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
# names that should be enabled during MathJax rendering.
MATHJAX_EXTENSIONS =
# When the SEARCHENGINE tag is enabled doxygen will generate a search box
# for the HTML output. The underlying search engine uses javascript
@ -1189,6 +1274,13 @@ EXTRA_PACKAGES =
LATEX_HEADER =
# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
# the generated latex document. The footer should contain everything after
# the last chapter. If it is left blank doxygen will generate a
# standard footer. Notice: only use this tag if you know what you are doing!
LATEX_FOOTER =
# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
# is prepared for conversion to pdf (using ps2pdf). The pdf file will
# contain links (just like the HTML output) instead of page references
@ -1222,6 +1314,12 @@ LATEX_HIDE_INDICES = NO
LATEX_SOURCE_CODE = NO
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
# http://en.wikipedia.org/wiki/BibTeX for more info.
LATEX_BIB_STYLE = plain
#---------------------------------------------------------------------------
# configuration options related to the RTF output
#---------------------------------------------------------------------------
@ -1253,7 +1351,7 @@ COMPACT_RTF = NO
RTF_HYPERLINKS = NO
# Load stylesheet definitions from file. Syntax is similar to doxygen's
# Load style sheet definitions from file. Syntax is similar to doxygen's
# config file, i.e. a series of assignments. You only have to provide
# replacements, missing definitions are set to their default value.
@ -1395,10 +1493,10 @@ MACRO_EXPANSION = YES
# then the macro expansion is limited to the macros specified with the
# PREDEFINED and EXPAND_AS_DEFINED tags.
EXPAND_ONLY_PREDEF = YES
EXPAND_ONLY_PREDEF = NO
# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
# in the INCLUDE_PATH (see below) will be search if a #include is found.
# pointed to by INCLUDE_PATH will be searched when a #include is found.
SEARCH_INCLUDES = YES
@ -1423,31 +1521,14 @@ INCLUDE_FILE_PATTERNS =
# undefined via #undef or recursively expanded use the := operator
# instead of the = operator.
# Doxygen messes up the attribute lines as c-structure names
PREDEFINED = __attribute__ (x)= \
__attribute__(x)= \
__attribute__ ((x))= \
__attribute__((x))= \
HAVE_ARCH_STRLEN \
HAVE_ARCH_STRNCPY \
HAVE_ARCH_STRCPY \
HAVE_ARCH_MEMCPY \
HAVE_ARCH_MEMSET \
CONFIG_VGA \
CONFIG_PCI \
CONFIG_LWIP \
CONFIG_VGA \
CONFIG_KEYBOARD \
CONFIG_MULTIBOOT \
CONFIG_ROCKCREEK \
SCC \
MS_BAREMETAL \
GORY
PREDEFINED = __attribute__(x)= \
__attribute__ (x)=
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
# this tag can be used to specify a list of macro names that should be expanded.
# The macro definition that is found in the sources will be used.
# Use the PREDEFINED tag if you want to use a different macro definition that overrules the definition found in the source code.
# Use the PREDEFINED tag if you want to use a different macro definition that
# overrules the definition found in the source code.
EXPAND_AS_DEFINED =
@ -1462,22 +1543,18 @@ SKIP_FUNCTION_MACROS = YES
# Configuration::additions related to external references
#---------------------------------------------------------------------------
# The TAGFILES option can be used to specify one or more tagfiles.
# Optionally an initial location of the external documentation
# can be added for each tagfile. The format of a tag file without
# this location is as follows:
# The TAGFILES option can be used to specify one or more tagfiles. For each
# tag file the location of the external documentation should be added. The
# format of a tag file without this location is as follows:
#
# TAGFILES = file1 file2 ...
# Adding location for the tag files is done as follows:
#
# TAGFILES = file1=loc1 "file2 = loc2" ...
# where "loc1" and "loc2" can be relative or absolute paths or
# URLs. If a location is present for each tag, the installdox tool
# does not have to be run to correct the links.
# Note that each tag file must have a unique name
# (where the name does NOT include the path)
# If a tag file is not located in the directory in which doxygen
# is run, you must also specify the path to the tagfile here.
# where "loc1" and "loc2" can be relative or absolute paths
# or URLs. Note that each tag file must have a unique name (where the name does
# NOT include the path). If a tag file is not located in the directory in which
# doxygen is run, you must also specify the path to the tagfile here.
TAGFILES =
@ -1513,7 +1590,7 @@ PERL_PATH = /usr/bin/perl
# this option also works with HAVE_DOT disabled, but it is recommended to
# install and use dot, since it yields more powerful graphs.
CLASS_DIAGRAMS = NO
CLASS_DIAGRAMS = YES
# You can define message sequence charts within doxygen comments using the \msc
# command. Doxygen will then run the mscgen tool (see
@ -1535,7 +1612,7 @@ HIDE_UNDOC_RELATIONS = YES
# toolkit from AT&T and Lucent Bell Labs. The other options in this section
# have no effect if this option is set to NO (the default)
HAVE_DOT = YES
HAVE_DOT = NO
# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
# allowed to run in parallel. When set to 0 (the default) doxygen will
@ -1545,13 +1622,12 @@ HAVE_DOT = YES
DOT_NUM_THREADS = 0
# By default doxygen will write a font called Helvetica to the output
# directory and reference it in all dot files that doxygen generates.
# When you want a differently looking font you can specify the font name
# using DOT_FONTNAME. You need to make sure dot is able to find the font,
# which can be done by putting it in a standard location or by setting the
# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
# containing the font.
# By default doxygen will use the Helvetica font for all dot files that
# doxygen generates. When you want a differently looking font you can specify
# the font name using DOT_FONTNAME. You need to make sure dot is able to find
# the font, which can be done by putting it in a standard location or by setting
# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
# directory containing the font.
DOT_FONTNAME = Helvetica
@ -1560,19 +1636,18 @@ DOT_FONTNAME = Helvetica
DOT_FONTSIZE = 10
# By default doxygen will tell dot to use the output directory to look for the
# FreeSans.ttf font (which doxygen will put there itself). If you specify a
# different font using DOT_FONTNAME you can set the path where dot
# can find it using this tag.
# By default doxygen will tell dot to use the Helvetica font.
# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
# set the path where dot can find it.
DOT_FONTPATH =
# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
# will generate a graph for each documented class showing the direct and
# indirect inheritance relations. Setting this tag to YES will force the
# the CLASS_DIAGRAMS tag to NO.
# CLASS_DIAGRAMS tag to NO.
CLASS_GRAPH = NO
CLASS_GRAPH = YES
# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
# will generate a graph for each documented class showing the direct and
@ -1592,6 +1667,15 @@ GROUP_GRAPHS = YES
UML_LOOK = NO
# If the UML_LOOK tag is enabled, the fields and methods are shown inside
# the class node. If there are many fields or methods and many nodes the
# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
# threshold limits the number of items for each type to make the size more
# managable. Set this to 0 for no limit. Note that the threshold may be
# exceeded by 50% before the limit is enforced.
UML_LIMIT_NUM_FIELDS = 10
# If set to YES, the inheritance and collaboration graphs will show the
# relations between templates and their instances.
@ -1609,7 +1693,7 @@ INCLUDE_GRAPH = YES
# documented header file showing the documented files that directly or
# indirectly include this file.
INCLUDED_BY_GRAPH = NO
INCLUDED_BY_GRAPH = YES
# If the CALL_GRAPH and HAVE_DOT options are set to YES then
# doxygen will generate a call dependency graph for every global function
@ -1630,9 +1714,9 @@ CALLER_GRAPH = NO
# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
# will generate a graphical hierarchy of all classes instead of a textual one.
GRAPHICAL_HIERARCHY = NO
GRAPHICAL_HIERARCHY = YES
# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
# then doxygen will show the dependencies a directory has on other directories
# in a graphical way. The dependency relations are determined by the #include
# relations between the files in the directories.
@ -1640,11 +1724,22 @@ GRAPHICAL_HIERARCHY = NO
DIRECTORY_GRAPH = YES
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
# generated by dot. Possible values are png, svg, gif or svg.
# If left blank png will be used.
# generated by dot. Possible values are svg, png, jpg, or gif.
# If left blank png will be used. If you choose svg you need to set
# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
# visible in IE 9+ (other browsers do not have this requirement).
DOT_IMAGE_FORMAT = png
# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
# enable generation of interactive SVG images that allow zooming and panning.
# Note that this requires a modern browser other than Internet Explorer.
# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
# visible. Older versions of IE do not have SVG support.
INTERACTIVE_SVG = NO
# The tag DOT_PATH can be used to specify the path where the dot tool can be
# found. If left blank, it is assumed the dot tool can be found in the path.

View file

@ -1,48 +1,64 @@
TOPDIR = $(shell pwd)
TOPDIR = $(shell pwd)
ARCH = x86
# For 64bit support, you have define BIT as 64
BIT=32
NAME = metalsvm
LWIPDIRS = lwip/src/arch lwip/src/api lwip/src/core lwip/src/core/ipv4 lwip/src/netif
DRIVERDIRS = drivers/net drivers/char
KERNDIRS = libkern kernel mm fs apps arch/$(ARCH)/kernel arch/$(ARCH)/mm arch/$(ARCH)/scc $(LWIPDIRS) $(DRIVERDIRS)
SUBDIRS = $(KERNDIRS)
STACKPROT=-fno-stack-protector
STACKPROT = -fno-stack-protector
# Set your own cross compiler tool chain prefix here
CROSSCOMPREFIX=
CROSSCOMPREFIX =
# Uncomment both lines if compiling for the SCC!
#CROSSCOMPREFIX=i386-unknown-linux-gnu-
#STACKPROT=
#CROSSCOMPREFIX = i386-unknown-linux-gnu-
#STACKPROT =
CC_FOR_TARGET=$(CROSSCOMPREFIX)gcc
CXX_FOR_TARGET=$(CROSSCOMPREFIX)g++
GCC_FOR_TARGET=$(CROSSCOMPREFIX)gcc
AR_FOR_TARGET=$(CROSSCOMPREFIX)ar
AS_FOR_TARGET=$(CROSSCOMPREFIX)as
LD_FOR_TARGET=$(CROSSCOMPREFIX)ld
NM_FOR_TARGET=$(CROSSCOMPREFIX)nm
OBJDUMP_FOR_TARGET=$(CROSSCOMPREFIX)objdump
OBJCOPY_FOR_TARGET=$(CROSSCOMPREFIX)objcopy
RANLIB_FOR_TARGET=$(CROSSCOMPREFIX)ranlib
STRIP_FOR_TARGET=$(CROSSCOMPREFIX)strip
READELF_FOR_TARGET=$(CROSSCOMPREFIX)readelf
NASM = nasm
EMU=qemu
GDB=gdb
CC_FOR_TARGET = $(CROSSCOMPREFIX)gcc
CXX_FOR_TARGET = $(CROSSCOMPREFIX)g++
GCC_FOR_TARGET = $(CROSSCOMPREFIX)gcc
CPP_FOR_TARGET = $(CROSSCOMPREFIX)cpp
AR_FOR_TARGET = $(CROSSCOMPREFIX)ar
AS_FOR_TARGET = $(CROSSCOMPREFIX)as
LD_FOR_TARGET = $(CROSSCOMPREFIX)ld
NM_FOR_TARGET = $(CROSSCOMPREFIX)nm
OBJDUMP_FOR_TARGET = $(CROSSCOMPREFIX)objdump
OBJCOPY_FOR_TARGET = $(CROSSCOMPREFIX)objcopy
RANLIB_FOR_TARGET = $(CROSSCOMPREFIX)ranlib
STRIP_FOR_TARGET = $(CROSSCOMPREFIX)strip
READELF_FOR_TARGET = $(CROSSCOMPREFIX)readelf
MAKE = make
NASMFLAGS = -felf32 -g
INCLUDE = -I$(TOPDIR)/include -I$(TOPDIR)/arch/$(ARCH)/include -I$(TOPDIR)/lwip/src/include -I$(TOPDIR)/lwip/src/include/ipv4 -I$(TOPDIR)/drivers
# Compiler options for final code
CFLAGS = -g -m32 -march=i586 -Wall -O2 -fno-builtin -fstrength-reduce -fomit-frame-pointer -finline-functions -nostdinc $(INCLUDE) $(STACKPROT)
# Compiler options for debuuging
#CFLAGS = -g -O -m32 -march=i586 -Wall -fno-builtin -DWITH_FRAME_POINTER -nostdinc $(INCLUDE) $(STACKPROT)
ARFLAGS = rsv
RM = rm -rf
LDFLAGS = -T link.ld -z max-page-size=4096 --defsym __BUILD_DATE=$(shell date +'%Y%m%d') --defsym __BUILD_TIME=$(shell date +'%H%M%S')
NASM = nasm
# For 64bit code, you have to use qemu-system-x86_64
QEMU = qemu-system-i386
GDB = gdb
# For 64bit support, you have to define -felf64 instead of -felf32
NASMFLAGS = -felf32 -g -i$(TOPDIR)/include/metalsvm/
INCLUDE = -I$(TOPDIR)/include -I$(TOPDIR)/arch/$(ARCH)/include -I$(TOPDIR)/lwip/src/include -I$(TOPDIR)/lwip/src/include/ipv4 -I$(TOPDIR)/drivers
# For 64bit support, you have to define "-m64 -mno-red-zone" instead of "-m32 -march=i586"
# Compiler options for final code
CFLAGS = -g -m32 -march=i586 -Wall -O2 -fstrength-reduce -fomit-frame-pointer -finline-functions -ffreestanding $(INCLUDE) $(STACKPROT)
# Compiler options for debuging
#CFLAGS = -g -O -m32 -march=i586 -Wall -fomit-frame-pointer -ffreestanding $(INCLUDE) $(STACKPROT)
ARFLAGS = rsv
LDFLAGS = -T link$(BIT).ld -z max-page-size=4096 --defsym __BUILD_DATE=$(shell date +'%Y%m%d') --defsym __BUILD_TIME=$(shell date +'%H%M%S')
STRIP_DEBUG = --strip-debug
KEEP_DEBUG = --only-keep-debug
OUTPUT_FORMAT = -O elf32-i386
# For 64bit support, you have to define -m64 instead of "-m32 -march=i586"
CFLAGS_FOR_NEWLIB = -m32 -march=i586 -O2 $(STACKPROT)
# For 64bit support, you have to define -m64 instead of "-m32 -march=i586"
LDFLAGS_FOR_NEWLIB = -m32 -march=i586
# For 64bit support, you have to define -m64 instead of "-m32"
CFLAGS_FOR_TOOLS = -m32 -O2 -Wall
LDFLAGS_FOR_TOOLS =
# For 64bit support, you have to define -felf64 instead of -felf32
NASMFLAGS_FOR_NEWLIB = -felf32
# Prettify output
V = 0
@ -56,7 +72,7 @@ default: all
all: newlib tools $(NAME).elf
newlib:
$(MAKE) ARCH=$(ARCH) LDFLAGS="-m32" CFLAGS="-m32 -O2 -march=i586 $(STACKPROT)" NASMFLAGS="$(NASMFLAGS)" CC_FOR_TARGET=$(CC_FOR_TARGET) \
$(MAKE) ARCH=$(ARCH) BIT=$(BIT) LDFLAGS="$(LDFLAGS_FOR_NEWLIB)" CFLAGS="$(CFLAGS_FOR_NEWLIB)" NASMFLAGS="$(NASMFLAGS_FOR_NEWLIB)" CC_FOR_TARGET=$(CC_FOR_TARGET) \
CXX_FOR_TARGET=$(CXX_FOR_TARGET) \
GCC_FOR_TARGET=$(GCC_FOR_TARGET) \
AR_FOR_TARGET=$(AR_FOR_TARGET) \
@ -68,21 +84,22 @@ newlib:
RANLIB_FOR_TARGET=$(RANLIB_FOR_TARGET) \
STRIP_FOR_TARGET=$(STRIP_FOR_TARGET) \
READELF_FOR_TARGET=$(READELF_FOR_TARGET) -C newlib
tools:
$(MAKE) -C tools
$(MAKE) CFLAGS="$(CFLAGS_FOR_TOOLS)" LDFLAGS="$(LDFLAGS_FOR_TOOLS)" -C tools
$(NAME).elf:
$Q$(LD_FOR_TARGET) $(LDFLAGS) -o $(NAME).elf $^
@echo [OBJCOPY] $(NAME).sym
$Q$(OBJCOPY_FOR_TARGET) $(KEEP_DEBUG) $(NAME).elf $(NAME).sym
@echo [OBJCOPY] $(NAME).elf
$Q$(OBJCOPY_FOR_TARGET) $(STRIP_DEBUG) $(NAME).elf
$Q$(OBJCOPY_FOR_TARGET) $(STRIP_DEBUG) $(OUTPUT_FORMAT) $(NAME).elf
qemu: newlib tools $(NAME).elf
qemu -monitor stdio -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
$(QEMU) -monitor stdio -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
qemudbg: newlib tools $(NAME).elf
qemu -S -s -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
$(QEMU) -s -S -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
gdb: $(NAME).elf
make qemudbg > /dev/null &
@ -104,10 +121,15 @@ veryclean: clean
@echo [CC] $@
$Q$(CC_FOR_TARGET) -c -D__KERNEL__ $(CFLAGS) -o $@ $<
@echo [DEP] $*.dep
$Q$(CC_FOR_TARGET) -MF $*.dep -MT $*.o -MM $(CFLAGS) $<
$Q$(CPP_FOR_TARGET) -MF $*.dep -MT $*.o -MM -D__KERNEL__ $(CFLAGS) $<
include/metalsvm/config.inc: include/metalsvm/config.h
@echo "; This file is generated automatically from the config.h file." > include/metalsvm/config.inc
@echo "; Before editing this, you should consider editing config.h." >> include/metalsvm/config.inc
@awk '/^#define MAX_CORES/{ print "%define MAX_CORES", $$3 }' include/metalsvm/config.h >> include/metalsvm/config.inc
@awk '/^#define KERNEL_STACK_SIZE/{ print "%define KERNEL_STACK_SIZE", $$3 }' include/metalsvm/config.h >> include/metalsvm/config.inc
%.o : %.asm
%.o : %.asm include/metalsvm/config.inc
@echo [ASM] $@
$Q$(NASM) $(NASMFLAGS) -o $@ $<

138
Makefile.scc Normal file
View file

@ -0,0 +1,138 @@
TOPDIR = $(shell pwd)
ARCH = x86
# For 64bit support, you have define BIT as 64
BIT=32
NAME = metalsvm
LWIPDIRS = lwip/src/arch lwip/src/api lwip/src/core lwip/src/core/ipv4 lwip/src/netif
DRIVERDIRS = drivers/net drivers/char
KERNDIRS = libkern kernel mm fs apps arch/$(ARCH)/kernel arch/$(ARCH)/mm arch/$(ARCH)/scc $(LWIPDIRS) $(DRIVERDIRS)
SUBDIRS = $(KERNDIRS)
STACKPROT = -fno-stack-protector
# Set your own cross compiler tool chain prefix here
CROSSCOMPREFIX =
# Uncomment both lines if compiling for the SCC!
CROSSCOMPREFIX = i386-unknown-linux-gnu-
STACKPROT =
CC_FOR_TARGET = $(CROSSCOMPREFIX)gcc
CXX_FOR_TARGET = $(CROSSCOMPREFIX)g++
GCC_FOR_TARGET = $(CROSSCOMPREFIX)gcc
CPP_FOR_TARGET = $(CROSSCOMPREFIX)cpp
AR_FOR_TARGET = $(CROSSCOMPREFIX)ar
AS_FOR_TARGET = $(CROSSCOMPREFIX)as
LD_FOR_TARGET = $(CROSSCOMPREFIX)ld
NM_FOR_TARGET = $(CROSSCOMPREFIX)nm
OBJDUMP_FOR_TARGET = $(CROSSCOMPREFIX)objdump
OBJCOPY_FOR_TARGET = $(CROSSCOMPREFIX)objcopy
RANLIB_FOR_TARGET = $(CROSSCOMPREFIX)ranlib
STRIP_FOR_TARGET = $(CROSSCOMPREFIX)strip
READELF_FOR_TARGET = $(CROSSCOMPREFIX)readelf
MAKE = make
RM = rm -rf
NASM = nasm
# For 64bit code, you have to use qemu-system-x86_64
QEMU = qemu-system-i386
GDB = gdb
# For 64bit support, you have to define -felf64 instead of -felf32
NASMFLAGS = -felf32 -g -i$(TOPDIR)/include/metalsvm/
INCLUDE = -I$(TOPDIR)/include -I$(TOPDIR)/arch/$(ARCH)/include -I$(TOPDIR)/lwip/src/include -I$(TOPDIR)/lwip/src/include/ipv4 -I$(TOPDIR)/drivers
# For 64bit support, you have to define "-m64 -mno-red-zone" instead of "-m32 -march=i586"
# Compiler options for final code
CFLAGS = -g -m32 -march=i586 -Wall -O2 -fstrength-reduce -fomit-frame-pointer -finline-functions -ffreestanding $(INCLUDE) $(STACKPROT)
# Compiler options for debuging
#CFLAGS = -g -O -m32 -march=i586 -Wall -fomit-frame-pointer -ffreestanding $(INCLUDE) $(STACKPROT)
ARFLAGS = rsv
LDFLAGS = -T link$(BIT).ld -z max-page-size=4096 --defsym __BUILD_DATE=$(shell date +'%Y%m%d') --defsym __BUILD_TIME=$(shell date +'%H%M%S')
STRIP_DEBUG = --strip-debug
KEEP_DEBUG = --only-keep-debug
OUTPUT_FORMAT = -O elf32-i386
# For 64bit support, you have to define -m64 instead of "-m32 -march=i586"
CFLAGS_FOR_NEWLIB = -m32 -march=i586 -O2 $(STACKPROT)
# For 64bit support, you have to define -m64 instead of "-m32 -march=i586"
LDFLAGS_FOR_NEWLIB = -m32 -march=i586
# For 64bit support, you have to define -m64 instead of "-m32"
CFLAGS_FOR_TOOLS = -m32 -O2 -Wall
LDFLAGS_FOR_TOOLS =
# For 64bit support, you have to define -felf64 instead of -felf32
NASMFLAGS_FOR_NEWLIB = -felf32
# Prettify output
V = 0
ifeq ($V,0)
Q = @
P = > /dev/null
endif
default: all
all: newlib tools $(NAME).elf
newlib:
$(MAKE) ARCH=$(ARCH) BIT=$(BIT) LDFLAGS="$(LDFLAGS_FOR_NEWLIB)" CFLAGS="$(CFLAGS_FOR_NEWLIB)" NASMFLAGS="$(NASMFLAGS_FOR_NEWLIB)" CC_FOR_TARGET=$(CC_FOR_TARGET) \
CXX_FOR_TARGET=$(CXX_FOR_TARGET) \
GCC_FOR_TARGET=$(GCC_FOR_TARGET) \
AR_FOR_TARGET=$(AR_FOR_TARGET) \
AS_FOR_TARGET=$(AS_FOR_TARGET) \
LD_FOR_TARGET=$(LD_FOR_TARGET) \
NM_FOR_TARGET=$(NM_FOR_TARGET) \
OBJDUMP_FOR_TARGET=$(OBJDUMP_FOR_TARGET) \
OBJCOPY_FOR_TARGET=$(OBJCOPY_FOR_TARGET) \
RANLIB_FOR_TARGET=$(RANLIB_FOR_TARGET) \
STRIP_FOR_TARGET=$(STRIP_FOR_TARGET) \
READELF_FOR_TARGET=$(READELF_FOR_TARGET) -C newlib
tools:
$(MAKE) CFLAGS="$(CFLAGS_FOR_TOOLS)" LDFLAGS="$(LDFLAGS_FOR_TOOLS)" -C tools
$(NAME).elf:
$Q$(LD_FOR_TARGET) $(LDFLAGS) -o $(NAME).elf $^
@echo [OBJCOPY] $(NAME).sym
$Q$(OBJCOPY_FOR_TARGET) $(KEEP_DEBUG) $(NAME).elf $(NAME).sym
@echo [OBJCOPY] $(NAME).elf
$Q$(OBJCOPY_FOR_TARGET) $(STRIP_DEBUG) $(OUTPUT_FORMAT) $(NAME).elf
qemu: newlib tools $(NAME).elf
$(QEMU) -monitor stdio -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
qemudbg: newlib tools $(NAME).elf
$(QEMU) -s -S -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
gdb: $(NAME).elf
make qemudbg > /dev/null &
$(GDB) -x script.gdb
clean:
$Q$(RM) $(NAME).elf $(NAME).sym *~
$Q$(MAKE) -C tools clean
@echo Cleaned.
veryclean: clean
$Q$(MAKE) -C newlib veryclean
@echo Very cleaned
#depend:
# for i in $(SUBDIRS); do $(MAKE) -k -C $$i depend; done
%.o : %.c
@echo [CC] $@
$Q$(CC_FOR_TARGET) -c -D__KERNEL__ $(CFLAGS) -o $@ $<
@echo [DEP] $*.dep
$Q$(CPP_FOR_TARGET) -MF $*.dep -MT $*.o -MM -D__KERNEL__ $(CFLAGS) $<
include/metalsvm/config.inc: include/metalsvm/config.h
@echo "; This file is generated automatically from the config.h file." > include/metalsvm/config.inc
@echo "; Before editing this, you should consider editing config.h." >> include/metalsvm/config.inc
@awk '/^#define MAX_CORES/{ print "%define MAX_CORES", $$3 }' include/metalsvm/config.h >> include/metalsvm/config.inc
@awk '/^#define KERNEL_STACK_SIZE/{ print "%define KERNEL_STACK_SIZE", $$3 }' include/metalsvm/config.h >> include/metalsvm/config.inc
%.o : %.asm include/metalsvm/config.inc
@echo [ASM] $@
$Q$(NASM) $(NASMFLAGS) -o $@ $<
.PHONY: default all clean emu gdb newlib tools
include $(addsuffix /Makefile,$(SUBDIRS))

View file

@ -1,4 +1,4 @@
C_source := tests.c echo.c netio.c laplace.c gfx_client.c gfx_generic.c
C_source := tests.c echo.c netio.c jacobi.c laplace.c gfx_client.c gfx_generic.c
MODULE := apps
include $(TOPDIR)/Makefile.inc

View file

@ -32,7 +32,9 @@
#include <metalsvm/stdio.h>
#ifdef CONFIG_LWIP
#include "tests.h"
#if defined(CONFIG_LWIP) && defined(START_ECHO)
#include <lwip/opt.h>
@ -101,6 +103,299 @@ echo_init(void)
}
/*-----------------------------------------------------------------------------------*/
#else
#include <lwip/debug.h>
#include <lwip/stats.h>
#include <lwip/tcp.h>
/*
* TCP echo server example using raw API.
*
* Echos all bytes sent by connecting client,
* and passively closes when client is done.
*
*/
static struct tcp_pcb *echo_pcb;
enum echo_states
{
ES_NONE = 0,
ES_ACCEPTED,
ES_RECEIVED,
ES_CLOSING
};
struct echo_state
{
u8_t state;
u8_t retries;
struct tcp_pcb *pcb;
/* pbuf (chain) to recycle */
struct pbuf *p;
};
static err_t echo_accept(void *arg, struct tcp_pcb *newpcb, err_t err);
static err_t echo_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err);
static void echo_error(void *arg, err_t err);
static err_t echo_poll(void *arg, struct tcp_pcb *tpcb);
static err_t echo_sent(void *arg, struct tcp_pcb *tpcb, u16_t len);
static void echo_send(struct tcp_pcb *tpcb, struct echo_state *es);
static void echo_close(struct tcp_pcb *tpcb, struct echo_state *es);
void
echo_init(void)
{
echo_pcb = tcp_new();
if (echo_pcb != NULL)
{
err_t err;
err = tcp_bind(echo_pcb, IP_ADDR_ANY, 7);
if (err == ERR_OK)
{
echo_pcb = tcp_listen(echo_pcb);
tcp_accept(echo_pcb, echo_accept);
} else {
/* abort? output diagnostic? */
}
} else {
/* abort? output diagnostic? */
}
}
static err_t
echo_accept(void *arg, struct tcp_pcb *newpcb, err_t err)
{
err_t ret_err;
struct echo_state *es;
LWIP_UNUSED_ARG(arg);
LWIP_UNUSED_ARG(err);
/* commonly observed practive to call tcp_setprio(), why? */
tcp_setprio(newpcb, TCP_PRIO_MIN);
es = (struct echo_state *)mem_malloc(sizeof(struct echo_state));
if (es != NULL)
{
es->state = ES_ACCEPTED;
es->pcb = newpcb;
es->retries = 0;
es->p = NULL;
/* pass newly allocated es to our callbacks */
tcp_arg(newpcb, es);
tcp_recv(newpcb, echo_recv);
tcp_err(newpcb, echo_error);
tcp_poll(newpcb, echo_poll, 0);
ret_err = ERR_OK;
} else {
ret_err = ERR_MEM;
}
return ret_err;
}
static err_t
echo_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err)
{
struct echo_state *es;
err_t ret_err;
LWIP_ASSERT("arg != NULL",arg != NULL);
es = (struct echo_state *)arg;
if (p == NULL)
{
/* remote host closed connection */
es->state = ES_CLOSING;
if(es->p == NULL)
{
/* we're done sending, close it */
echo_close(tpcb, es);
} else {
/* we're not done yet */
tcp_sent(tpcb, echo_sent);
echo_send(tpcb, es);
}
ret_err = ERR_OK;
} else if(err != ERR_OK) {
/* cleanup, for unkown reason */
if (p != NULL)
{
es->p = NULL;
pbuf_free(p);
}
ret_err = err;
} else if(es->state == ES_ACCEPTED) {
/* first data chunk in p->payload */
es->state = ES_RECEIVED;
/* store reference to incoming pbuf (chain) */
es->p = p;
/* install send completion notifier */
tcp_sent(tpcb, echo_sent);
echo_send(tpcb, es);
ret_err = ERR_OK;
} else if (es->state == ES_RECEIVED) {
/* read some more data */
if(es->p == NULL)
{
es->p = p;
tcp_sent(tpcb, echo_sent);
echo_send(tpcb, es);
} else {
struct pbuf *ptr;
/* chain pbufs to the end of what we recv'ed previously */
ptr = es->p;
pbuf_chain(ptr,p);
}
ret_err = ERR_OK;
} else if(es->state == ES_CLOSING) {
/* odd case, remote side closing twice, trash data */
tcp_recved(tpcb, p->tot_len);
es->p = NULL;
pbuf_free(p);
ret_err = ERR_OK;
} else {
/* unkown es->state, trash data */
tcp_recved(tpcb, p->tot_len);
es->p = NULL;
pbuf_free(p);
ret_err = ERR_OK;
}
return ret_err;
}
static void
echo_error(void *arg, err_t err)
{
struct echo_state *es;
LWIP_UNUSED_ARG(err);
es = (struct echo_state *)arg;
if (es != NULL)
{
mem_free(es);
}
}
static err_t
echo_poll(void *arg, struct tcp_pcb *tpcb)
{
err_t ret_err;
struct echo_state *es;
es = (struct echo_state *)arg;
if (es != NULL)
{
if (es->p != NULL)
{
/* there is a remaining pbuf (chain) */
tcp_sent(tpcb, echo_sent);
echo_send(tpcb, es);
} else {
/* no remaining pbuf (chain) */
if(es->state == ES_CLOSING)
{
echo_close(tpcb, es);
}
}
ret_err = ERR_OK;
} else {
/* nothing to be done */
tcp_abort(tpcb);
ret_err = ERR_ABRT;
}
return ret_err;
}
static err_t
echo_sent(void *arg, struct tcp_pcb *tpcb, u16_t len)
{
struct echo_state *es;
LWIP_UNUSED_ARG(len);
es = (struct echo_state *)arg;
es->retries = 0;
if(es->p != NULL)
{
/* still got pbufs to send */
tcp_sent(tpcb, echo_sent);
echo_send(tpcb, es);
} else {
/* no more pbufs to send */
if(es->state == ES_CLOSING)
{
echo_close(tpcb, es);
}
}
return ERR_OK;
}
static void
echo_send(struct tcp_pcb *tpcb, struct echo_state *es)
{
struct pbuf *ptr;
err_t wr_err = ERR_OK;
while ((wr_err == ERR_OK) &&
(es->p != NULL) &&
(es->p->len <= tcp_sndbuf(tpcb)))
{
ptr = es->p;
/* enqueue data for transmission */
wr_err = tcp_write(tpcb, ptr->payload, ptr->len, 1);
if (wr_err == ERR_OK)
{
u16_t plen;
u8_t freed;
plen = ptr->len;
/* continue with next pbuf in chain (if any) */
es->p = ptr->next;
if(es->p != NULL)
{
/* new reference! */
pbuf_ref(es->p);
}
/* chop first pbuf from chain */
do
{
/* try hard to free pbuf */
freed = pbuf_free(ptr);
} while(freed == 0);
/* we can read more data now */
tcp_recved(tpcb, plen);
} else if(wr_err == ERR_MEM) {
/* we are low on memory, try later / harder, defer to poll */
es->p = ptr;
} else {
/* other problem ?? */
}
}
}
static void
echo_close(struct tcp_pcb *tpcb, struct echo_state *es)
{
tcp_arg(tpcb, NULL);
tcp_sent(tpcb, NULL);
tcp_recv(tpcb, NULL);
tcp_err(tpcb, NULL);
tcp_poll(tpcb, NULL, 0);
if (es != NULL)
{
mem_free(es);
}
tcp_close(tpcb);
}
#endif /* LWIP_NETCONN */
#endif
#endif /* CONFIG_LWIP */

View file

@ -19,11 +19,13 @@
#include <metalsvm/stdlib.h>
#ifdef CONFIG_LWIP
#if defined(CONFIG_LWIP) && LWIP_SOCKET
#include "gfx_client.h"
#include <lwip/sockets.h>
#ifdef CONFIG_GFX
static int myrank;
static int sockfd;
@ -38,7 +40,9 @@ int gfx_init(char* ip_str, char* port_str, int rank) {
char* hostname;
int port;
struct sockaddr_in serveraddr;
#if USE_GETHOSTBYNAME
struct hostent *server;
#endif
//*pargc -=2;
myrank = rank;
@ -144,3 +148,5 @@ int gfx_finalize(){
}
#endif
#endif

View file

@ -23,8 +23,10 @@
#include <metalsvm/stdio.h>
#include <metalsvm/stdlib.h>
#include <metalsvm/string.h>
#include "tests.h"
#ifdef CONFIG_LWIP
#if defined(CONFIG_GFX) && defined(CONFIG_LWIP) && LWIP_SOCKET
#define BUFSIZE 1024

View file

@ -20,22 +20,7 @@
#include "gfx_client.h"
#include "gfx_generic.h"
#ifdef CONFIG_LWIP
int GFX_init(int* pargc, char*** pargv, int rank)
{
return gfx_init(pargc, pargv, rank);
}
int GFX_send(char* buf, int size, int tag)
{
return gfx_send(buf, size, tag);
}
int GFX_finalize()
{
return gfx_finalize();
}
#if defined(CONFIG_GFX) && defined(CONFIG_LWIP) && LWIP_SOCKET
int GFX_update()
{

View file

@ -22,11 +22,11 @@
#include "gfx_client.h"
#ifdef CONFIG_LWIP
#if defined(CONFIG_LWIP) && defined(CONFIG_GFX) && LWIP_SOCKET
int GFX_init(int* pargc, char*** pargv, int rank);
int GFX_send(char* buf, int size, int tag);
int GFX_finalize();
#define GFX_init(ip_str, port_str, rank) gfx_init(ip_str, port_str, rank)
#define GFX_send(buf, size, tag) gfx_send(buf, size, tag)
#define GFX_finalize() gfx_finalize()
int GFX_update();

374
apps/jacobi.c Normal file
View file

@ -0,0 +1,374 @@
/*
* Copyright 2011 Stefan Lankes, Alexander Pilz, Maximilian Marx, Michael Ober,
* Chair for Operating Systems, RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <metalsvm/stdio.h>
#include <metalsvm/stdlib.h>
#include <metalsvm/string.h>
#include <asm/svm.h>
#include <asm/RCCE.h>
#include <asm/RCCE_lib.h>
#include <asm/SCC_API.h>
#include <asm/irqflags.h>
#include "tests.h"
#ifdef START_KERNEL_JACOBI
#define MATRIX_SIZE 256
#define MAXVALUE 1337
#define PAGE_SIZE 4096
#define CACHE_SIZE (256*1024)
#define SIZE ((MATRIX_SIZE+1)*MATRIX_SIZE*sizeof(double)+2*MATRIX_SIZE*sizeof(double)+10*PAGE_SIZE+CACHE_SIZE)
#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
#define RAND_MAX 32767
//#define SVM_TYPE SVM_STRONG
#define SVM_TYPE SVM_LAZYRELEASE
#define fabs(x) (x) >= 0 ? (x) : -1.0*(x)
static unsigned int seed = 0;
static int srand(unsigned int s)
{
seed = s;
return 0;
}
/* Pseudo-random generator based on Minimal Standard by
Lewis, Goodman, and Miller in 1969.
I[j+1] = a*I[j] (mod m)
where a = 16807
m = 2147483647
Using Schrage's algorithm, a*I[j] (mod m) can be rewritten as:
a*(I[j] mod q) - r*{I[j]/q} if >= 0
a*(I[j] mod q) - r*{I[j]/q} + m otherwise
where: {} denotes integer division
q = {m/a} = 127773
r = m (mod a) = 2836
note that the seed value of 0 cannot be used in the calculation as
it results in 0 itself
*/
static int rand(void)
{
long k;
long s = (long)(seed);
if (s == 0)
s = 0x12345987;
k = s / 127773;
s = 16807 * (s - k * 127773) - 2836 * k;
if (s < 0)
s += 2147483647;
seed = (unsigned int)s;
return (int)(s & RAND_MAX);
}
static inline void cache_invalidate(void)
{
asm volatile ( ".byte 0x0f; .byte 0x0a;\n" ); // CL1FLUSHMB
}
static int generate_empty_matrix(double*** A , unsigned int N, int rankID) {
unsigned int iCnt;
int i,j;
unsigned int iter_start, iter_end, pad;
int num = RCCE_NP;
pad = N/num;
if (pad % 4) {
pad -= pad % 4;
unsigned int p = (N - num * pad) / 4;
if (rankID < p) {
iter_start = rankID*(pad+4);
iter_end = (rankID+1)*(pad+4);
} else {
iter_start = p*(pad+4)+(rankID-p)*pad;
iter_end = p*(pad+4)+(rankID+1-p)*pad;
}
} else {
iter_start = rankID*pad;
iter_end = (rankID+1)*pad;
}
kprintf("iter_start %d, iter_end %d\n", iter_start, iter_end);
*A = (double**) kmalloc((N+1)*sizeof(double*));
if (*A == NULL)
return -2; /* Error */
svm_barrier(SVM_TYPE);
**A = (double*) svm_malloc((N+1)*N*sizeof(double), SVM_TYPE);
if (**A == NULL)
return -2; /* Error */
svm_barrier(SVM_TYPE);
for(iCnt=1; iCnt<N; iCnt++) { /* Assign pointers in the first "real index"; Value from 1 to N (0 yet set, value N means N+1) */
(*A)[iCnt] = &((*A)[0][iCnt*(N+1)]);
}
for(i=iter_start; i<iter_end; i++)
{
for(j=0; j<=N; j++)
(*A)[i][j] = 0;
}
svm_flush(0);
svm_invalidate();
svm_barrier(SVM_TYPE);
if(rankID == 0)
{
srand( 42 ) ; /* init random number generator */
/*
* initialize the system of linear equations
* the result vector is one
*/
for (i = 0; i < N; i++)
{
double sum = 0.0;
for (j = 0; j < N; j++)
{
if (i != j)
{
double c = ((double)rand()) / ((double)RAND_MAX) * MAXVALUE;
sum += fabs(c);
(*A)[i][j] = c;
(*A)[i][N] += c;
}
}
/*
* The Jacobi method will always converge if the matrix A is strictly or irreducibly diagonally dominant.
* Strict row diagonal dominance means that for each row, the absolute value of the diagonal term is
* greater than the sum of absolute values of other terms: |A[i][i]| > Sum |A[i][j]| with (i != j)
*/
(*A)[i][i] = sum + 2.0;
(*A)[i][N] += sum + 2.0;
}
svm_flush(0);
svm_invalidate();
}
svm_barrier(SVM_TYPE);
return 0;
}
int jacobi(void* argv)
{
volatile double* temp = NULL;
volatile double* swap;
unsigned int i, j, k, iter_start, iter_end, pad;
unsigned int iterations = 0;
double error, norm, max = 0.0;
double** A=0;
volatile double* X;
volatile double* X_old;
double xi;
uint64_t start, stop;
int rankID, num;
rankID = RCCE_IAM;
num = RCCE_NP;
if (generate_empty_matrix(&A,MATRIX_SIZE,rankID) < 0)
{
kprintf("generate_empty_matrix() failed...\n");
return -1;
}
if (rankID == 0)
kprintf("generate_empty_matrix() done...\n");
svm_barrier(SVM_TYPE);
X = (double*) svm_malloc(MATRIX_SIZE*sizeof(double), SVM_TYPE);
X_old = (double*) svm_malloc(MATRIX_SIZE*sizeof(double), SVM_TYPE);
if (X == NULL || X_old == NULL)
{
kprintf("X or X_old is NULL...\n");
return -1;
}
temp = (double*) svm_malloc(PAGE_SIZE, SVM_LAZYRELEASE);
if (temp == NULL)
{
kprintf("temp is NULL...\n");
return -1;
}
if (rankID == 0) {
memset((void*)temp, 0x00, PAGE_SIZE);
for(i=0; i<MATRIX_SIZE; i++)
{
X[i] = ((double)rand()) / ((double)RAND_MAX) * 10.0;
X_old[i] = 0.0;
}
svm_flush(0);
svm_invalidate();
}
svm_barrier(SVM_TYPE);
if (rankID == 0)
kprintf("start calculation...\n");
svm_barrier(SVM_TYPE);
pad = MATRIX_SIZE/num;
if (pad % 4) {
pad -= pad % 4;
unsigned int p = (MATRIX_SIZE - num * pad) / 4;
if (rankID < p) {
iter_start = rankID*(pad+4);
iter_end = (rankID+1)*(pad+4);
} else {
iter_start = p*(pad+4)+(rankID-p)*pad;
iter_end = p*(pad+4)+(rankID+1-p)*pad;
}
} else {
iter_start = rankID*pad;
iter_end = (rankID+1)*pad;
}
start = rdtsc();
//while(1)
for(k=0; k<865000; k++)
{
iterations++;
swap = X_old;
X_old = X;
X = swap;
for(i=iter_start; i<iter_end; i++)
{
for(j=0, xi=0.0; j<i; j++)
xi += A[i][j]* X_old[j];
for(j=i+1; j<MATRIX_SIZE; j++)
xi += A[i][j] * X_old[j];
X[i] = (A[i][MATRIX_SIZE] - xi) / A[i][i];
}
if (iterations % 5000 == 0 ) {/* calculate the Euclidean norm between X_old and X*/
norm = 0.0;
#if 1
svm_barrier(SVM_TYPE);
for(i=0; i<MATRIX_SIZE; i++)
norm += (X_old[i] - X[i]) * (X_old[i] - X[i]);
svm_barrier(SVM_TYPE);
#else
if ((num > 1) && (rankID == 0)) { /* write always a complete cache line */
memset((void*)temp, 0, CACHE_LINE);
svm_flush(0);
}
svm_barrier(SVM_TYPE);
for(i=iter_start; i<iter_end; i++)
norm += (X_old[i] - X[i]) * (X_old[i] - X[i]);
if (num > 1) {
RCCE_acquire_lock(0);
svm_invalidate();
norm += temp[0];
temp[0] = norm;
svm_flush(0);
RCCE_release_lock(0);
svm_barrier(SVM_LAZYRELEASE);
norm = temp[0];
}
#endif
/* check the break condition */
norm /= (double) MATRIX_SIZE;
if (norm < 0.0000001)
; //break;
} else {
svm_barrier(SVM_TYPE);
}
//if (k % 100 == 0)
// kprintf("k = %d\n", k);
}
stop = rdtsc();
if (MATRIX_SIZE < 16) {
kprintf("Print the solution...\n");
/* print solution */
for(i=0; i<MATRIX_SIZE; i++) {
for(j=0; j<MATRIX_SIZE; j++)
kprintf("%u/100\t", (uint32_t) (100*A[i][j]));
kprintf("*\t%u/100\t=\t%u/100\n", (uint32_t) (100*X[i]), (uint32_t) (100*A[i][MATRIX_SIZE]));
}
}
kprintf("Check the result...\n");
/*
* check the result
* X[i] have to be 1
*/
for(i=0; i<MATRIX_SIZE; i++) {
double diff = X[i] - 1.0;
error = fabs(diff);
if (max < error)
max = error;
if (error > 0.01)
kprintf("Result is on position %d wrong (%d/10000 != 1.0, error %d/10000)\n", i, (int) (10000.0*X[i]), (int) (10000.0*error));
}
kprintf("maximal error is %d/10000\n", (int) (10000.0*max));
kprintf("\nmatrix size: %d x %d\n", MATRIX_SIZE, MATRIX_SIZE);
kprintf("number of iterations: %d\n", iterations);
kprintf("Calculation time: %llu ms (%llu ticks)\n", (stop-start)/(1000ULL*get_cpu_frequency()), stop-start);
return 0;
}
#endif

View file

@ -22,7 +22,9 @@
#include <asm/irqflags.h>
#include <asm/svm.h>
#ifdef CONFIG_ROCKCREEK
#include "tests.h"
#ifdef START_KERNEL_LAPLACE
#include <asm/RCCE.h>
#include <asm/iRCCE.h>
@ -49,17 +51,14 @@
#define TMAX (100*50)
//#define DATA unsigned int
#define DATA double
//#define DATA volatile unsigned int
#define DATA volatile double
//#define FIX 1024
#define FIX 1
#define USE_STRONG 1
#define USE_LAZYRELEASE 0
#if USE_STRONG && USE_LAZYRELEASE
#error Please, use only one memory model
#endif
#define SVM_TYPE SVM_STRONG
//#define SVM_TYPE SVM_LAZYRELEASE
static inline double pow(double a, int b)
{
@ -76,7 +75,6 @@ int laplace(void *arg)
{
//char* argv[] = {"/bin/laplace", "192.168.4.254", "12301", NULL};
//int argc = 3;
uint32_t flags;
#ifdef _USE_GFX
uint32_t ret;
#endif
@ -89,23 +87,21 @@ int laplace(void *arg)
int n;
int m;
volatile DATA **NewValues;
volatile DATA **OldValues;
DATA **NewValues;
DATA **OldValues;
volatile DATA **tmp;
DATA **tmp;
volatile char **BufValues;
char **BufValues;
uint64_t start, end;
flags = irq_nested_disable();
my_rank = RCCE_ue();
num_ranks = RCCE_num_ues();
irq_nested_enable(flags);
my_rank = RCCE_IAM;
num_ranks = RCCE_NP;
#ifdef _USE_GFX
kprintf("Laplace calls gfx_init\n");
ret = gfx_init("192.168.4.254" /*&argc */ , "5000" /*&argv */ , my_rank);
ret = GFX_init("192.168.4.254" /*&argc */ , "5000" /*&argv */ , my_rank);
kprintf("gfx_init: %d\n", ret);
#endif
@ -118,7 +114,7 @@ int laplace(void *arg)
if (my_rank == num_ranks - 1)
n += N % num_ranks;
kprintf("(%d) %d x %d / offsets: %d, %d / (%d x %d)\n", my_rank, N, M, I, J, n, m);
kprintf("(%d of %d) %d x %d / offsets: %d, %d / (%d x %d)\n", my_rank, num_ranks, N, M, I, J, n, m);
#ifdef _USE_GFX
if (my_rank == 0) {
@ -140,19 +136,15 @@ int laplace(void *arg)
#endif
NewValues = (volatile DATA **)kmalloc((N + 2) * sizeof(DATA *));
#if USE_STRONG
NewValues[0] = (DATA *) svmmalloc((N + 2) * (M + 2) * sizeof(DATA), SVM_STRONG);
#elif USE_LAZYRELEASE
NewValues[0] = (DATA *) svmmalloc((N + 2) * (M + 2) * sizeof(DATA), SVM_LAZYRELEASE);
#ifdef SVM_TYPE
NewValues[0] = (DATA *) svm_malloc((N + 2) * (M + 2) * sizeof(DATA), SVM_TYPE);
#else
NewValues[0] = (DATA *) kmalloc((N + 2) * (M + 2) * sizeof(DATA));
#endif
OldValues = (volatile DATA **)kmalloc((N + 2) * sizeof(DATA *));
#if USE_STRONG
OldValues[0] = (DATA *) svmmalloc((N + 2) * (M + 2) * sizeof(DATA), SVM_STRONG);
#elif USE_LAZYRELEASE
OldValues[0] = (DATA *) svmmalloc((N + 2) * (M + 2) * sizeof(DATA), SVM_LAZYRELEASE);
#ifdef SVM_TYPE
OldValues[0] = (DATA *) svm_malloc((N + 2) * (M + 2) * sizeof(DATA), SVM_TYPE);
#else
OldValues[0] = (DATA *) kmalloc((N + 2) * (M + 2) * sizeof(DATA));
#endif
@ -162,14 +154,16 @@ int laplace(void *arg)
OldValues[i] = OldValues[i - 1] + (M + 2);
}
BufValues = (volatile char **)kmalloc((N) * sizeof(char *));
BufValues = (char **)kmalloc((N) * sizeof(char *));
BufValues[0] = (char *)kmalloc((N) * (M) * sizeof(char));
for (i = 1; i < N; i++) {
BufValues[i] = BufValues[i - 1] + (M);
}
RCCE_barrier(&RCCE_COMM_WORLD);
#ifdef SVM_TYPE
svm_barrier(SVM_TYPE);
#endif
kprintf("(%d) Memory allocated!\n", my_rank);
@ -178,46 +172,67 @@ int laplace(void *arg)
int height = N + 2;
int width = M + 2;
/*if (my_rank == 0) {
for (i = 0; i < N + 2; i++) {
for (j = 0; j < M + 2; j++) {*/
{
for (i = I; i < I + n + 2; i++) {
for (j = 0; j < M + 2; j++) {
if (my_rank == 0) {
for (j = 0; j < m + 2; j++) {
double X = (((double)(J+j) / (double)width) * 5.0) - 2.5;
double Y = 0.0;
double Z = 0.0;
double X = (((double)j / (double)width) * 5.0) - 2.5;
double Y = (((double)i / (double)height) * 5.0) - 2.5;
double Z = 0.0;
Z = pow((4 - (X + 1) * (X + 1) - 4 * Y * Y), 2) + pow(1.2 * (1 - X), 3) - 10;
if (Z < 0.0)
Z = 1.0;
else if (Z > 0.0)
Z = 0.0;
Z = pow((4 - (X + 1) * (X + 1) - 4 * Y * Y), 2) + pow(1.2 * (1 - X), 3) - 10;
if (Z < 0.0)
Z = 1.0;
else if (Z > 0.0)
Z = 0.0;
OldValues[i][j] = NewValues[i][j] = (DATA) ((Z) * 255.0) * FIX;
//if(NewValues[i][j] < 0) NewValues[i][j] = 0;
}
OldValues[0][J+j] = NewValues[0][J+j] = (DATA) ((Z) * 255.0) * FIX;
}
}
#if USE_LAZYRELEASE
svm_flush();
svm_invalidate();
for (i = 1; i < n+1; i++) {
for (j = 0; j < m + 2; j++) {
double X = (((double)(J+j) / (double)width) * 5.0) - 2.5;
double Y = (((double)(I+i) / (double)height) * 5.0) - 2.5;
double Z = 0.0;
Z = pow((4 - (X + 1) * (X + 1) - 4 * Y * Y), 2) + pow(1.2 * (1 - X), 3) - 10;
if (Z < 0.0)
Z = 1.0;
else if (Z > 0.0)
Z = 0.0;
OldValues[I+i][J+j] = NewValues[I+i][J+j] = (DATA) ((Z) * 255.0) * FIX;
}
}
if (my_rank == num_ranks - 1) {
for (j = 0; j < m + 2; j++) {
double X = (((double)(J+j) / (double)width) * 5.0) - 2.5;
double Y = (((double)(I+n+1) / (double)height) * 5.0) - 2.5;
double Z = 0.0;
Z = pow((4 - (X + 1) * (X + 1) - 4 * Y * Y), 2) + pow(1.2 * (1 - X), 3) - 10;
if (Z < 0.0)
Z = 1.0;
else if (Z > 0.0)
Z = 0.0;
OldValues[I+n+1][J+j] = NewValues[I+n+1][J+j] = (DATA) ((Z) * 255.0) * FIX;
}
}
#ifdef SVM_TYPE
svm_barrier(SVM_TYPE);
#endif
RCCE_barrier(&RCCE_COMM_WORLD);
kprintf("(%d) Arrays initialized!\n", my_rank);
start = rdtsc();
start = rdtsc();
// START ITERATIONS LOOP
for (t = 0; t < TMAX; t++) {
//kprintf("(%d): o:%u n:%u \n",my_rank,(unsigned int)(OldValues[I+1][J+1]), (unsigned int)(NewValues[I+1][J+1]) );
//kprintf("(%d): t: %u\n", my_rank, t);
// over all collumns
for (i = 1; i < n + 1; i++) {
@ -230,16 +245,14 @@ int laplace(void *arg)
OldValues[I + i][J + j + 1]) / 4;
}
}
#if USE_LAZYRELEASE
svm_flush();
svm_invalidate();
#endif
tmp = NewValues;
NewValues = OldValues;
OldValues = tmp;
RCCE_barrier(&RCCE_COMM_WORLD);
#ifdef SVM_TYPE
svm_barrier(SVM_TYPE);
#endif
#ifdef _USE_GFX
if ((my_rank == 0) && (t % 50 == 0)) {
@ -263,18 +276,22 @@ int laplace(void *arg)
GFX_update();
}
RCCE_barrier(&RCCE_COMM_WORLD);
#ifdef SVM_TYPE
svm_barrier(SVM_TYPE);
#endif
#endif
// END ITERATIONS LOOP
}
RCCE_barrier(&RCCE_COMM_WORLD);
#ifdef SVM_TYPE
svm_barrier(SVM_TYPE);
#endif
end = rdtsc();
kprintf("Calculation time: %llu ms (%llu ticks)\n", (end-start)/(1000ULL*get_cpu_frequency()), end-start);
#if USE_STRONG || USE_LAZYRELEASE
#ifdef SVM_TYPE
svm_statistics();
#endif
}

View file

@ -24,6 +24,8 @@
#include <metalsvm/syscall.h>
#include <metalsvm/errno.h>
#include "tests.h"
#ifdef CONFIG_ROCKCREEK
#include <asm/RCCE.h>
#include <asm/RCCE_lib.h>
@ -46,7 +48,7 @@
/* See http://www.nwlab.net/art/netio/netio.html to get the netio tool */
#ifdef CONFIG_LWIP
#if defined(START_NETIO) && defined(CONFIG_LWIP)
#ifdef CONFIG_ROCKCREEK
#if USE_SOCKET_BYPASSING // for socket bypassing
#include <lwip/opt.h>
@ -90,7 +92,7 @@ static struct in_addr addr_server;
static int send_data(int socket, void *buffer, size_t size, int flags)
{
int rc = send(socket, buffer, size, flags);
ssize_t rc = send(socket, buffer, size, flags);
if (rc < 0)
{
@ -106,7 +108,7 @@ static int send_data(int socket, void *buffer, size_t size, int flags)
static int recv_data(int socket, void *buffer, size_t size, int flags)
{
size_t rc = recv(socket, buffer, size, flags);
ssize_t rc = recv(socket, buffer, size, flags);
if (rc < 0) {
kprintf("recv failed: %d\n", errno);

View file

@ -26,6 +26,9 @@
#include <metalsvm/syscall.h>
#include <metalsvm/vma.h>
#include <metalsvm/page.h>
#ifdef CONFIG_LWIP
#include <lwip/opt.h>
#endif
#ifdef CONFIG_ROCKCREEK
#include <asm/icc.h>
#include <asm/RCCE.h>
@ -36,22 +39,28 @@
#include <asm/svm.h>
#endif
#include "tests.h"
int laplace(void* arg);
int jacobi(void* arg);
void echo_init(void);
void netio_init(void);
#ifdef START_CONSUMER_PRODUCER
static sem_t consuming, producing;
static mailbox_int32_t mbox;
static int val = 0;
int laplace(void* arg);
static int consumer(void* arg)
{
int i, m = 0;
for(i=0; i<5; i++) {
sem_wait(&consuming, 0);
kprintf("Consumer got %d\n", val);
val = 0;
sem_post(&producing);
}
kprintf("Consumer got %d\n", val);
val = 0;
sem_post(&producing);
}
for(i=0; i<5; i++) {
mailbox_int32_fetch(&mbox, &m, 0);
@ -80,7 +89,9 @@ static int producer(void* arg)
return 0;
}
#endif
#if defined(START_FOO) || defined(START_JOIN_TEST)
static int foo(void* arg)
{
int i;
@ -89,32 +100,48 @@ static int foo(void* arg)
return 0;
for(i=0; i<5; i++) {
kprintf("%s\n", (char*) arg);
kprintf("Message from core %u: %s\n", CORE_ID, (char*) arg);
sleep(1);
}
return 42;
}
#endif
#ifdef CONFIG_ROCKCREEK
#ifdef START_MAIL_PING
static int mail_ping(void* arg) {
//icc_mail_ping();
icc_mail_ping_irq();
int i;
//for(i=0; i<5; ++i)
// icc_mail_ping();
for(i=0; i<5; ++i)
icc_mail_ping_irq();
//icc_mail_ping_jitter();
//icc_irq_ping();
//icc_mail_datarates();
//icc_halt();
return 0;
}
#endif
#ifdef START_MAIL_NOISE
static int mail_noise(void*arg) {
icc_mail_noise(); // generate noise in the mesh
return 0;
}
#endif
#ifdef START_SVM_TEST
/* N has to be multiple of UEs */
#define N 1024
//#define N 514
#define LAZY
//#define N 512
//#define N 128
//#define SVM_TYPE SVM_STRONG
#define SVM_TYPE SVM_LAZYRELEASE
volatile static int* A[N];
volatile static int* B[N];
@ -130,14 +157,19 @@ static int svm_test(void *arg)
{
uint64_t start, end;
uint32_t i, j, k;
uint32_t svm_flags;
int my_ue, num_ues;
register int tmp;
kputs("Start SVM test...\n");
RCCE_barrier(&RCCE_COMM_WORLD);
my_ue = RCCE_ue();
num_ues = RCCE_num_ues();
#if 0
#if 1
if (!my_ue) {
// allocate and initialize SVM region
A[0] = (int*) kmalloc(3*N*N*sizeof(int));
@ -182,13 +214,19 @@ static int svm_test(void *arg)
#endif
// allocate and initialize SVM region
#ifndef LAZY
A[0] = (int*) svmmalloc(3*N*N*sizeof(int), SVM_STRONG);
#else
A[0] = (int*) svmmalloc(3*N*N*sizeof(int), SVM_LAZYRELEASE);
#endif
if (!my_ue)
svm_flags = SVM_TYPE;
if (svm_flags & SVM_LAZYRELEASE)
kputs("Use Lazy Release consistency!\n");
else
kputs("Use Strong Release consistency!\n");
A[0] = (int*) svm_malloc(3*N*N*sizeof(int), svm_flags);
#if 1
if (!my_ue)
memset((void*) A[0], 0x00, 3*N*N*sizeof(int));
#endif
// initialize matrices
for(i=0; i<N; i++) {
@ -196,6 +234,18 @@ static int svm_test(void *arg)
B[i] = A[0] + (i*N + N*N);
C[i] = A[0] + (i*N + 2*N*N);
}
#if 1
// distriubute page frames over all MC via affinity on first touch
for(i=my_ue*(N/num_ues); i<(my_ue+1)*(N/num_ues); i++) {
memset(A[i], 0x00, N*sizeof(int));
memset(B[i], 0x00, N*sizeof(int));
memset(C[i], 0x00, N*sizeof(int));
}
#endif
svm_barrier(svm_flags);
if (!my_ue) {
for(i=0; i<N; i++) {
A[i][i] = 1;
@ -204,17 +254,16 @@ static int svm_test(void *arg)
}
}
svm_flush();
svm_barrier(svm_flags);
RCCE_barrier(&RCCE_COMM_WORLD);
kputs("Start parallel calculation...\n");
start = rdtsc();
start = rdtsc();
// Now, we need only read access on A and B
change_page_permissions((size_t) A[0], (size_t) (A[0]+2*N*N), VMA_CACHEABLE|VMA_READ);
RCCE_barrier(&RCCE_COMM_WORLD);
//change_page_permissions((size_t) A[0], (size_t) (A[0]+2*N*N), VMA_CACHEABLE|VMA_READ);
//svm_barrier(SVM_TYPE);
// start calculation
for(i=my_ue*(N/num_ues); i<(my_ue+1)*(N/num_ues); i++) {
@ -226,9 +275,8 @@ static int svm_test(void *arg)
}
}
svm_flush();
RCCE_barrier(&RCCE_COMM_WORLD);
svm_barrier(SVM_TYPE);
end = rdtsc();
kputs("Check results...\n");
@ -241,17 +289,17 @@ static int svm_test(void *arg)
for(j=0; (j<N) && (err < 32); j++) {
if (C[i][j] != i+j) {
err++;
kprintf("Wrong value at C[%u][%u] = %u, B[%u][%u] = %u\n", i, j, C[i][j], i, j, GET_B(i,j));
kprintf("Wrong value at C[%u][%u] = %u, B[%u][%u] = %u = %u\n", i, j, C[i][j], i, j, GET_B(i,j), B[i][j]);
}
}
}
}
RCCE_barrier(&RCCE_COMM_WORLD);
svm_barrier(SVM_TYPE);
kprintf("Calculation time (par): %llu ms (%llu ticks)\n", (end-start)/(1000ULL*get_cpu_frequency()), end-start);
svmfree((void*) A[0], 3*N*sizeof(int));
svm_free((void*) A[0], 3*N*sizeof(int));
svm_statistics();
@ -259,6 +307,82 @@ static int svm_test(void *arg)
}
#endif
#ifdef START_SVM_BENCH
static int svm_bench(void *arg)
{
volatile uint32_t* array = NULL;
uint64_t start, end;
uint32_t i;
const uint32_t size = N*N*sizeof(uint32_t);
const uint32_t svm_flags = SVM_TYPE;
if (RCCE_IAM > 1)
return -1;
if (svm_flags & SVM_LAZYRELEASE)
kputs("Use Lazy Release consistency!\n");
else
kputs("Use Strong Release consistency!\n");
svm_barrier(svm_flags);
start = rdtsc();
start = rdtsc();
array = (volatile uint32_t*) svm_malloc(size, svm_flags);
end = rdtsc();
if (BUILTIN_EXPECT(!array, 0)) {
kprintf("Out of memory\n");
return -1;
}
kprintf("Time to allocate %u Bytes: %llu usec (%llu ticks)\n", size, (end-start)/get_cpu_frequency(), end-start);
svm_barrier(svm_flags);
if (!RCCE_IAM) {
start = rdtsc();
for(i=0; i<size/sizeof(uint32_t); i+=PAGE_SIZE/sizeof(uint32_t))
array[i] = 0;
end = rdtsc();
kprintf("Time to create %u page frames: %llu usec (%llu ticks)\n", size >> PAGE_SHIFT, (end-start)/get_cpu_frequency(), end-start);
}
svm_barrier(svm_flags);
if (RCCE_IAM) {
start = rdtsc();
for(i=0; i<size/sizeof(uint32_t); i+=PAGE_SIZE/sizeof(uint32_t))
array[i] = 1;
end = rdtsc();
kprintf("Time to map %u page frames: %llu usec (%llu ticks)\n", size >> PAGE_SHIFT, (end-start)/get_cpu_frequency(), end-start);
}
svm_barrier(svm_flags);
if (!RCCE_IAM) {
start = rdtsc();
for(i=0; i<size/sizeof(uint32_t); i+=PAGE_SIZE/sizeof(uint32_t))
array[i] = 0;
end = rdtsc();
kprintf("Time to get access permissions of %u page frames: %llu usec (%llu ticks)\n", size >> PAGE_SHIFT, (end-start)/get_cpu_frequency(), end-start);
}
svm_barrier(svm_flags);
start = rdtsc();
change_page_permissions((size_t) array, size, VMA_CACHEABLE|VMA_READ);
end = rdtsc();
kprintf("Time to change access permissions of %u page frames: %llu usec (%llu ticks)\n", size >> PAGE_SHIFT, (end-start)/get_cpu_frequency(), end-start);
svm_barrier(svm_flags);
svm_free((void*) array, N*N*sizeof(uint32_t));
svm_statistics();
return 0;
}
#endif
#ifdef START_JOIN_TEST
static int join_test(void* arg)
{
tid_t id, ret;
@ -275,7 +399,9 @@ static int join_test(void* arg)
return 0;
}
#endif
#ifdef START_PI
#ifndef M_PI
#define M_PI 3.14159265358979323846264338327950288 /* pi */
#endif
@ -300,37 +426,153 @@ static int pi(void* arg)
return 0;
}
#endif
#ifdef START_MEASURE_CTX_SWITCH
#define REPS 10000
volatile uint64_t t1, t2;
volatile int stop = !!0;
volatile int sid = 0;
static int measure_ctx_switch(void* arg)
{
int id = !!(int)arg;
int oid = !id;
uint64_t freq = get_cpu_frequency() *1000 *1000;
uint64_t diff, min = (uint64_t)-1, max = 0, avg = 0;
int i;
uint32_t a=0,b,c,d;
// Size of a timeslice in ticks
uint64_t timeslice = freq / TIMER_FREQ;
kprintf("ID: %d, ", id);
kprintf("Measuring SW task switching.\n");
for (i=0; i < REPS && stop == 0; i++) {
while(id == sid && stop == 0) {
t2 = rdtsc();
cpuid(0,&a,&b,&c,&d);
}
cpuid(0,&a,&b,&c,&d);
diff = rdtsc() -t2;
// The last measurement is garbage
if (stop) break;
// The first ones are garbage, too
if (i < 5) goto next_try;
if (diff >= timeslice) {
i--;
goto next_try;
}
kprintf("%i: diff= %llu, i= %i\n", id, diff, i);
if (diff > max) max = diff;
if (diff < min) min = diff;
avg += diff;
next_try:
sid = id;
}
avg /= i-5;
stop = 1;
kprintf("maximum gap: %llu ticks\n", max);
kprintf("minimum gap: %llu ticks\n", min);
kprintf("average gap: %llu ticks\n", avg);
kprintf("Timeslice size: %llu ticks\n", timeslice);
return 0;
}
#endif
int test_init(void)
{
// char* argv[] = {"/bin/mshell", NULL};
char* argv[] = {"/bin/tests", NULL};
#ifdef START_HELLO
char* hello_argv[] = {"/bin/hello", NULL};
#endif
#ifdef START_TESTS
char* tests_argv[] = {"/bin/tests", NULL};
#endif
#ifdef START_JACOBI
char* jacobi_argv[] = {"/bin/jacobi", NULL};
#endif
#ifdef START_MMNIF_TEST
char* server_argv[] = {"/bin/server", "6789", NULL};
char* client_argv[] = {"/bin/client", "192.168.0.1", "6789", NULL};
#endif
#ifdef START_ECHO
echo_init();
#endif
#ifdef START_NETIO
netio_init();
#endif
#ifdef START_CONSUMER_PRODUCER
sem_init(&producing, 1);
sem_init(&consuming, 0);
mailbox_int32_init(&mbox);
create_kernel_task(NULL, producer, NULL, NORMAL_PRIO);
create_kernel_task(NULL, consumer, NULL, NORMAL_PRIO);
#endif
#ifdef START_MEASURE_CTX_SWITCH
create_kernel_task(NULL, measure_ctx_switch, (int)0, NORMAL_PRIO);
create_kernel_task(NULL, measure_ctx_switch, (int)1, NORMAL_PRIO);
#endif
#ifdef START_FOO
create_kernel_task(NULL, foo, "Hello from foo1", NORMAL_PRIO);
//create_kernel_task_on_core(NULL, foo, "Hello from foo2", NORMAL_PRIO, 1);
#endif
#ifdef START_JOIN_TEST
create_kernel_task(NULL, join_test, NULL, NORMAL_PRIO);
//create_kernel_task(NULL, producer, , NORMAL_PRIO);
//create_kernel_task(NULL, consumer, NULL, NORMAL_PRIO);
//create_kernel_task(NULL, mail_ping, NULL, NORMAL_PRIO);
//create_kernel_task(NULL, mail_noise, NULL, NORMAL_PRIO);
//create_kernel_task(NULL, svm_test, NULL, NORMAL_PRIO);
//create_kernel_task(NULL, pi, NULL, NORMAL_PRIO);
//create_kernel_task(NULL, laplace, NULL, NORMAL_PRIO);
//create_user_task(NULL, "/bin/hello", argv);
create_user_task(NULL, "/bin/tests", argv);
//create_user_task(NULL, "/bin/jacobi", argv);
//create_user_task(NULL, "/bin/mshell", argv);
//create_user_task(NULL, "/bin/jacobi", argv);
/*create_user_task(NULL, "/bin/server", server_argv);
if (RCCE_ue() != 0) {
#endif
#ifdef START_MAIL_PING
create_kernel_task(NULL, mail_ping, NULL, NORMAL_PRIO);
#endif
#ifdef START_MAIL_NOISE
create_kernel_task(NULL, mail_noise, NULL, NORMAL_PRIO);
#endif
#ifdef START_SVM_TEST
create_kernel_task(NULL, svm_test, NULL, NORMAL_PRIO);
#endif
#ifdef START_SVM_BENCH
create_kernel_task(NULL, svm_bench, NULL, NORMAL_PRIO);
#endif
#ifdef START_PI
create_kernel_task(NULL, pi, NULL, NORMAL_PRIO);
#endif
#ifdef START_KERNEL_LAPLACE
create_kernel_task(NULL, laplace, NULL, NORMAL_PRIO);
#endif
#ifdef START_KERNEL_JACOBI
create_kernel_task(NULL, jacobi, NULL, NORMAL_PRIO);
#endif
#ifdef START_HELLO
create_user_task(NULL, "/bin/hello", hello_argv);
#endif
#ifdef START_TESTS
create_user_task(NULL, "/bin/tests", tests_argv);
#endif
#ifdef START_JACOBI
create_user_task(NULL, "/bin/jacobi", jacobi_argv);
//create_user_task_on_core(NULL, "/bin/jacobi", jacobi_argv, 1);
#endif
#ifdef START_MMNIF_TEST
#if defined(CONFIG_LWIP) && LWIP_SOCKET
if (RCCE_IAM == 0) {
kprintf("Start /bin/server...\n");
create_user_task(NULL, "/bin/server", server_argv);
} else {
sleep(5);
kprintf("Start /bin/client...\n");
create_user_task(NULL, "/bin/client", client_argv);
}*/
}
#endif
#endif
return 0;
}

53
apps/tests.h Normal file
View file

@ -0,0 +1,53 @@
/*
* Copyright 2010 Stefan Lankes, Chair for Operating Systems,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of MetalSVM.
*/
#ifndef CONFIG_TEST_H
#define CONFIG_TEST_H
#include <metalsvm/stddef.h>
// define test applications, which will be started
#ifdef CONFIG_ROCKCREEK
//#define START_SVM_TEST
//#define START_SVM_BENCH
//#define START_MAIL_PING
//#define START_MAIL_NOISE
//#define START_KERNEL_LAPLACE
//#define START_KERNEL_JACOBI
#define START_MMNIF_TEST
#endif
#ifdef CONFIG_LWIP
#define START_ECHO
#ifndef CONFIG_TICKLESS
//#define START_NETIO
#endif
#endif
//#define START_CONSUMER_PRODUCER
#define START_FOO
//#define START_JOIN_TEST
//#define START_PI
//#define START_MEASURE_CTX_SWITCH
//#define START_HELLO
#define START_TESTS
//#define START_JACOBI
// does our demos require GFX support?
//#define CONFIG_GFX
#endif

View file

@ -193,6 +193,8 @@ uint32_t apic_cpu_id(void);
int apic_calibration(void);
int has_apic(void);
int apic_is_enabled(void);
int apic_enable_timer(void);
int apic_disable_timer(void);
int ioapic_inton(uint8_t irq, uint8_t apicid);
int ioapic_intoff(uint8_t irq, uint8_t apicid);
int map_apic(void);

View file

@ -36,35 +36,37 @@ extern "C" {
#endif
/// This segment is a data segment
#define GDT_FLAG_DATASEG 0x02
#define GDT_FLAG_DATASEG 0x02
/// This segment is a code segment
#define GDT_FLAG_CODESEG 0x0a
#define GDT_FLAG_TSS 0x09
#define GDT_FLAG_CODESEG 0x0a
#define GDT_FLAG_TSS 0x09
#define GDT_FLAG_TSS_BUSY 0x02
#define GDT_FLAG_SEGMENT 0x10
#define GDT_FLAG_SEGMENT 0x10
/// Privilege level: Ring 0
#define GDT_FLAG_RING0 0x00
#define GDT_FLAG_RING0 0x00
/// Privilege level: Ring 1
#define GDT_FLAG_RING1 0x20
#define GDT_FLAG_RING1 0x20
/// Privilege level: Ring 2
#define GDT_FLAG_RING2 0x40
#define GDT_FLAG_RING2 0x40
/// Privilege level: Ring 3
#define GDT_FLAG_RING3 0x60
#define GDT_FLAG_RING3 0x60
/// Segment is present
#define GDT_FLAG_PRESENT 0x80
#define GDT_FLAG_PRESENT 0x80
/**
* @brief Granularity of segment limit
* - set: segment limit unit is 4 KB (page size)
* - not set: unit is bytes
*/
#define GDT_FLAG_4K_GRAN 0x80
#define GDT_FLAG_4K_GRAN 0x80
/**
* @brief Default operand size
* - set: 32 bit
* - not set: 16 bit
*/
#define GDT_FLAG_32_BIT 0x40
#define GDT_FLAG_16_BIT 0x00
#define GDT_FLAG_32_BIT 0x40
#define GDT_FLAG_64_BIT 0x20
/** @brief Defines a GDT entry
*
@ -74,17 +76,17 @@ extern "C" {
*/
typedef struct {
/// Lower 16 bits of limit range
unsigned short limit_low;
uint16_t limit_low;
/// Lower 16 bits of base address
unsigned short base_low;
uint16_t base_low;
/// middle 8 bits of base address
unsigned char base_middle;
uint8_t base_middle;
/// Access bits
unsigned char access;
uint8_t access;
/// Granularity bits
unsigned char granularity;
uint8_t granularity;
/// Higher 8 bits of base address
unsigned char base_high;
uint8_t base_high;
} __attribute__ ((packed)) gdt_entry_t;
/** @brief defines the GDT pointer structure
@ -93,13 +95,18 @@ typedef struct {
*/
typedef struct {
/// Size of the table in bytes (not the number of entries!)
unsigned short limit;
uint16_t limit;
/// Address of the table
unsigned int base;
size_t base;
} __attribute__ ((packed)) gdt_ptr_t;
/// Defines the maximum number of GDT entries
#define GDT_ENTRIES (5+MAX_TASKS)
#ifdef CONFIG_X86_32
#define GDT_ENTRIES (5+MAX_TASKS)
#else
// a TSS descriptor is twice larger than a code/data descriptor
#define GDT_ENTRIES (5+MAX_TASKS*2)
#endif
#if GDT_ENTRIES > 8192
#error Too many GDT entries!
#endif
@ -117,12 +124,12 @@ void gdt_install(void);
/** @brief Configures and returns a GDT descriptor with chosen attributes
*
* Just feed this function with address, limit and the flags
* Just feed this function with address, limit and the flags
* you have seen in idt.h
*
* @return a preconfigured gdt descriptor
*/
gdt_entry_t configure_gdt_entry(unsigned long base, unsigned long limit,
void configure_gdt_entry(gdt_entry_t *dest_entry, unsigned long base, unsigned long limit,
unsigned char access, unsigned char gran);
#ifdef __cplusplus

View file

@ -42,7 +42,8 @@ extern bootinfo_t* bootinfo;
enum icc_mail_requests {
PING_REQ=1,
PING_RESP,
SVM_REQUEST,
SVM_REQ,
SVM_RESP,
NOISE,
};
@ -52,7 +53,9 @@ void icc_mail_check(void);
int icc_mail_ping(void);
int icc_send_gic_irq(int core_num);
int icc_mail_ping_irq(void);
int icc_mail_ping_jitter(void);
int icc_mail_noise(void);
void icc_wait(int tag);
#endif

View file

@ -71,15 +71,21 @@ extern "C" {
*/
typedef struct {
/// Handler function's lower 16 address bits
unsigned short base_lo;
uint16_t base_lo;
/// Handler function's segment selector.
unsigned short sel;
uint16_t sel;
/// These bits are reserved by Intel
unsigned char always0;
uint8_t always0;
/// These 8 bits contain flags. Exact use depends on the type of interrupt gate.
unsigned char flags;
uint8_t flags;
/// Higher 16 bits of handler function's base address
unsigned short base_hi;
uint16_t base_hi;
#ifdef CONFIG_X86_64
/// In 64 bit mode, the "highest" 32 bits of the handler function's base address
uint32_t base_hi64;
/// resvered entries
uint32_t reserved;
#endif
} __attribute__ ((packed)) idt_entry_t;
/** @brief Defines the idt pointer structure.
@ -89,9 +95,9 @@ typedef struct {
*/
typedef struct {
/// Size of the IDT in bytes (not the number of entries!)
unsigned short limit;
uint16_t limit;
/// Base address of the IDT
unsigned int base;
size_t base;
} __attribute__ ((packed)) idt_ptr_t;
/** @brief Installs IDT
@ -120,8 +126,8 @@ void idt_set_gate(unsigned char num, size_t base, unsigned short sel,
*
* @return a preconfigured idt descriptor
*/
idt_entry_t configure_idt_entry(size_t base, unsigned short sel,
unsigned char flags);
void configure_idt_entry(idt_entry_t *dest_entry, size_t base,
unsigned short sel, unsigned char flags);
#ifdef __cplusplus
}

View file

@ -65,6 +65,18 @@ int irq_uninstall_handler(unsigned int irq);
*/
int irq_init(void);
/** @brief Disable the timer interrupt
*
* @return 0 on success
*/
int disable_timer_irq(void);
/** @brief Enable the timer interrupt
*
* @return 0 on success
*/
int enable_timer_irq(void);
#ifdef __cplusplus
}
#endif

View file

@ -51,8 +51,8 @@ inline static void irq_disable(void) {
* @return The set of flags which have been set until now
*/
inline static uint32_t irq_nested_disable(void) {
uint32_t flags;
asm volatile("pushf; cli; popl %0": "=r"(flags) : : "memory");
size_t flags;
asm volatile("pushf; cli; pop %0": "=r"(flags) : : "memory");
if (flags & (1 << 9))
return 1;
return 0;
@ -83,8 +83,8 @@ inline static void irq_nested_enable(uint32_t flags) {
*/
inline static uint32_t is_irq_enabled(void)
{
uint32_t flags;
asm volatile("pushf; popl %0": "=r"(flags) : : "memory");
size_t flags;
asm volatile("pushf; pop %0": "=r"(flags) : : "memory");
if (flags & (1 << 9))
return 1;
return 0;

View file

@ -43,6 +43,7 @@
#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
#define _PAGE_BIT_SVM_STRONG 9 /* mark a virtual address range as used by the SVM system */
#define _PAGE_BIT_SVM_LAZYRELEASE 10 /* mark a virtual address range as used by the SVM system */
#define _PAGE_BIT_SVM_INIT 11 /* mark if the MBP proxy is used */
/// Page is present
#define PG_PRESENT (1 << _PAGE_BIT_PRESENT)
@ -67,9 +68,12 @@
/// Pattern flag
#define PG_PAT (1 << _PAGE_BIT_PAT)
/// This virtual address range is used by SVM system as marked
#define PG_SVM PG_SVM_STRONG
#define PG_SVM_STRONG (1 << _PAGE_BIT_SVM_STRONG)
/// This virtual address range is used by SVM system as marked
#define PG_SVM_LAZYRELEASE (1 << _PAGE_BIT_SVM_LAZYRELEASE)
/// Currently, no page frame is behind this page (only the MBP proxy)
#define PG_SVM_INIT (1 << _PAGE_BIT_SVM_INIT)
/// This is a whole set of flags (PRESENT,RW,ACCESSED,DIRTY) for kernelspace tables
#define KERN_TABLE (PG_PRESENT|PG_RW|PG_ACCESSED|PG_DIRTY)
@ -79,27 +83,33 @@
#define KERN_PAGE (PG_PRESENT|PG_RW|PG_GLOBAL)
/// This is a whole set of flags (PRESENT,RW,USER) for userspace pages
#define USER_PAGE (PG_PRESENT|PG_RW|PG_USER)
#if __SIZEOF_POINTER__ == 4
#define PGT_ENTRIES 1024
#elif __SIZEOF_POINTER__ == 8
#define PGT_ENTRIES 512
#endif
/** @brief Page table structure
*
* This structure keeps page table entries.\n
* A page table consists of 1024 entries.
* On a 32bit system, a page table consists normally of 1024 entries.
*/
typedef struct page_table
{
/// Page table entries are unsigned 32bit integers.
uint32_t entries[1024];
size_t entries[PGT_ENTRIES];
} page_table_t __attribute__ ((aligned (4096)));
/** @brief Page directory structure
*
* This structure keeps page directory entries.\
* A page directory consists of 1024 entries.
* On a 32bit system, a page directory consists normally of 1024 entries.
*/
typedef struct page_dir
{
/// Page dir entries are unsigned 32bit integers.
uint32_t entries[1024];
size_t entries[PGT_ENTRIES];
} page_dir_t __attribute__ ((aligned (4096)));
/** @brief Converts a virtual address to a physical

View file

@ -68,11 +68,16 @@ inline static uint32_t has_fxsr(void)
return (cpu_info.feature1 & CPU_FEATURE_FXSR);
}
inline static uint32_t has_xmm(void)
inline static uint32_t has_sse(void)
{
return (cpu_info.feature1 & CPU_FEATURE_SSE);
}
inline static uint32_t has_sse2(void)
{
return (cpu_info.feature1 & CPU_FEATURE_SSE2);
}
inline static uint32_t has_avx(void)
{
return (cpu_info.feature2 & CPU_FEATURE_AVX);
@ -124,15 +129,11 @@ inline static int get_return_value(void) {
}
/* Force strict CPU ordering */
#ifdef CONFIG_ROCKCREEK
inline static void mb(void) { asm volatile ("lock; addl $0,0(%%esp)" ::: "memory", "cc"); }
inline static void rmb(void) { asm volatile ("lock; addl $0,0(%%esp)" ::: "memory", "cc"); }
inline static void wmb(void) { asm volatile ("lock; addl $0,0(%%esp)" ::: "memory", "cc"); }
#else
inline static void mb(void) { asm volatile("mfence" ::: "memory"); }
inline static void rmb(void) { asm volatile("lfence" ::: "memory"); }
inline static void wmb(void) { asm volatile("sfence" ::: "memory"); }
#endif
typedef void (*func_memory_barrier)(void);
extern func_memory_barrier mb;
extern func_memory_barrier rmb;
extern func_memory_barrier wmb;
/** @brief Read out CPU ID
*
@ -151,7 +152,7 @@ inline static void wmb(void) { asm volatile("sfence" ::: "memory"); }
* @param d EDX value will be stores here
*/
inline static void cpuid(uint32_t code, uint32_t* a, uint32_t* b, uint32_t* c, uint32_t* d) {
asm volatile ("cpuid" : "=a"(*a), "=b"(*b), "=c"(*c), "=d"(*d) : "0"(code));
asm volatile ("cpuid" : "=a"(*a), "=b"(*b), "=c"(*c), "=d"(*d) : "0"(code), "2"(*c));
}
/** @brief Read MSR
@ -173,8 +174,8 @@ inline static uint64_t rdmsr(uint32_t msr) {
/** @brief Read cr0 register
* @return cr0's value
*/
static inline uint32_t read_cr0(void) {
uint32_t val;
static inline size_t read_cr0(void) {
size_t val;
asm volatile("mov %%cr0, %0" : "=r"(val));
return val;
}
@ -182,15 +183,15 @@ static inline uint32_t read_cr0(void) {
/** @brief Write a value into cr0 register
* @param val The value you want to write into cr0
*/
static inline void write_cr0(uint32_t val) {
static inline void write_cr0(size_t val) {
asm volatile("mov %0, %%cr0" : : "r"(val));
}
/** @brief Read cr2 register
* @return cr2's value
*/
static inline uint32_t read_cr2(void) {
uint32_t val;
static inline size_t read_cr2(void) {
size_t val;
asm volatile("mov %%cr2, %0" : "=r"(val));
return val;
}
@ -198,8 +199,8 @@ static inline uint32_t read_cr2(void) {
/** @brief Read cr3 register
* @return cr3's value
*/
static inline uint32_t read_cr3(void) {
uint32_t val;
static inline size_t read_cr3(void) {
size_t val;
asm volatile("mov %%cr3, %0" : "=r"(val));
return val;
}
@ -207,15 +208,15 @@ static inline uint32_t read_cr3(void) {
/** @brief Write a value into cr3 register
* @param val The value you want to write into cr3
*/
static inline void write_cr3(uint32_t val) {
static inline void write_cr3(size_t val) {
asm volatile("mov %0, %%cr3" : : "r"(val));
}
/** @brief Read cr4 register
* @return cr4's value
*/
static inline uint32_t read_cr4(void) {
uint32_t val;
static inline size_t read_cr4(void) {
size_t val;
asm volatile("mov %%cr4, %0" : "=r"(val));
return val;
}
@ -223,7 +224,7 @@ static inline uint32_t read_cr4(void) {
/** @brief Write a value into cr4 register
* @param val The value you want to write into cr4
*/
static inline void write_cr4(uint32_t val) {
static inline void write_cr4(size_t val) {
asm volatile("mov %0, %%cr4" : : "r"(val));
}
@ -308,11 +309,6 @@ static inline size_t lsb(size_t i)
return ret;
}
/** @brief Read extended instruction pointer
* @return The EIP's value
*/
uint32_t read_eip(void);
/// A one-instruction-do-nothing
#define NOP1 asm volatile ("nop")
/// Do nothing for 2 instructions
@ -321,7 +317,11 @@ uint32_t read_eip(void);
#define NOP4 asm volatile ("nop;nop;nop;nop")
/// Do nothing for 8 instructions
#define NOP8 asm volatile ("nop;nop;nop;nop;nop;nop;nop;nop")
#define HALT asm volatile ("hlt");
#ifndef CONFIG_TICKLESS
#define HALT asm volatile ("hlt")
#else
#define HALT asm volatile ("nop;nop;nop;nop;nop;nop;nop;nop")
#endif
/** @brief Init several subsystems
*

View file

@ -32,6 +32,8 @@
extern "C" {
#endif
#if __SIZEOF_POINTER__ == 4
#define CONFIG_X86_32
/// A popular type for addresses
typedef unsigned long size_t;
/// Pointer differences
@ -40,6 +42,19 @@ typedef long ptrdiff_t;
typedef long ssize_t;
typedef long off_t;
#endif
#elif __SIZEOF_POINTER__ == 8
#define CONFIG_X86_64
// A popular type for addresses
typedef unsigned long long size_t;
/// Pointer differences
typedef long long ptrdiff_t;
#ifdef __KERNEL__
typedef long long ssize_t;
typedef long long off_t;
#endif
#else
#error unsupported architecture
#endif
/// Unsigned 64 bit integer
typedef unsigned long long uint64_t;
@ -70,33 +85,79 @@ typedef unsigned int wint_t;
* All the interrupt handler routines use this type for their only parameter.
*/
struct state {
#ifdef CONFIG_X86_32
/// EDI register
unsigned int edi;
uint32_t edi;
/// ESI register
unsigned int esi;
uint32_t esi;
/// EBP register
unsigned int ebp;
uint32_t ebp;
/// ESP register
unsigned int esp;
uint32_t esp;
/// EBX register
unsigned int ebx;
uint32_t ebx;
/// EDX register
unsigned int edx;
uint32_t edx;
/// ECX register
unsigned int ecx;
uint32_t ecx;
/// EAX register
unsigned int eax; /* pushed by 'pusha' */
uint32_t eax; /* pushed by 'pusha' */
/// Interrupt number
unsigned int int_no;
uint32_t int_no;
// pushed by the processor automatically
unsigned int error;
unsigned int eip;
unsigned int cs;
unsigned int eflags;
unsigned int useresp;
unsigned int ss;
uint32_t error;
uint32_t eip;
uint32_t cs;
uint32_t eflags;
uint32_t useresp;
uint32_t ss;
#elif defined(CONFIG_X86_64)
/// R15 register
uint64_t r15;
/// R14 register
uint64_t r14;
/// R13 register
uint64_t r13;
/// R12 register
uint64_t r12;
/// R11 register
uint64_t r11;
/// R10 register
uint64_t r10;
/// R9 register
uint64_t r9;
/// R8 register
uint64_t r8;
/// RDI register
uint64_t rdi;
/// RSI register
uint64_t rsi;
/// RBP register
uint64_t rbp;
/// (pseudo) RSP register
uint64_t rsp;
/// RBX register
uint64_t rbx;
/// RDX register
uint64_t rdx;
/// RCX register
uint64_t rcx;
/// RAX register
uint64_t rax;
/// Interrupt number
uint64_t int_no;
// pushed by the processor automatically
uint64_t error;
uint64_t rip;
uint64_t cs;
uint64_t rflags;
uint64_t userrsp;
uint64_t ss;
#endif
};
uint32_t apic_cpu_id(void);

View file

@ -96,11 +96,12 @@ inline static void *memcpy(void *dest, const void *src, size_t count)
*/
inline static void *memcpy(void* dest, const void *src, size_t count)
{
int32_t i, j, k;
size_t i, j, k;
if (BUILTIN_EXPECT(!dest || !src, 0))
return dest;
#ifdef CONFIG_X86_32
asm volatile (
"cld; rep movsl\n\t"
"movl %4, %%ecx\n\t"
@ -108,6 +109,15 @@ inline static void *memcpy(void* dest, const void *src, size_t count)
"rep movsb\n\t"
: "=&c"(i), "=&D"(j), "=&S"(k)
: "0"(count/4), "g"(count), "1"(dest), "2"(src) : "memory","cc");
#elif defined(CONFIG_X86_64)
asm volatile (
"cld; rep movsq\n\t"
"movq %4, %%rcx\n\t"
"andq $7, %%rcx\n\t"
"rep movsb\n\t"
: "=&c"(i), "=&D"(j), "=&S"(k)
: "0"(count/8), "g"(count), "1"(dest), "2"(src) : "memory","cc");
#endif
return dest;
}
@ -128,7 +138,7 @@ inline static void *memcpy(void* dest, const void *src, size_t count)
*/
inline static void *memset(void* dest, int val, size_t count)
{
int32_t i, j;
size_t i, j;
if (BUILTIN_EXPECT(!dest, 0))
return dest;
@ -155,7 +165,7 @@ inline static void *memset(void* dest, int val, size_t count)
*/
inline static void *memset(void* dest, int val, size_t count)
{
int32_t i, j;
size_t i, j;
if (BUILTIN_EXPECT(!dest, 0))
return dest;
@ -184,15 +194,22 @@ inline static void *memset(void* dest, int val, size_t count)
inline static size_t strlen(const char* str)
{
size_t len = 0;
uint32_t i, j;
size_t i, j;
if (BUILTIN_EXPECT(!str, 0))
return len;
#ifdef CONFIG_X86_32
asm volatile("not %%ecx; cld; repne scasb; not %%ecx; dec %%ecx"
: "=&c"(len), "=&D"(i), "=&a"(j)
: "2"(0), "1"(str), "0"(len)
: "memory","cc");
#elif defined(CONFIG_X86_64)
asm volatile("not %%rcx; cld; repne scasb; not %%rcx; dec %%rcx"
: "=&c"(len), "=&D"(i), "=&a"(j)
: "2"(0), "1"(str), "0"(len)
: "memory","cc");
#endif
return len;
}

View file

@ -21,6 +21,7 @@
#define __ARCH_SVM_H__
#include <metalsvm/stddef.h>
#include <asm/page.h>
#ifdef CONFIG_ROCKCREEK
#include <asm/RCCE_lib.h>
#endif
@ -31,8 +32,13 @@ extern "C" {
#ifdef CONFIG_ROCKCREEK
//#define SVM_WB
#define SVM_STRONG (1 << 0)
#define SVM_LAZYRELEASE (1 << 1)
#define SVM_L2 (1 << 4)
#define SVM_STRONG_L2 SVM_STRONG|SVM_L2
#define SVM_LAZYRELEASE_L2 SVM_LAZYRELEASE|SVM_L2
/** @brief Init routine of the SVM subsystem
*
@ -49,13 +55,15 @@ int svm_init(void);
*
* @return Pointer to the new memory range
*/
void* svmmalloc(size_t sizei, uint32_t flags);
void* svm_malloc(size_t size, uint32_t flags);
/** @brief Frees memory, which is managed by the SVM subsystem
*
* Like RCCE function, belongs svmfree to the synchronous function.
*/
void svmfree(void* addr, size_t size);
void svm_free(void* addr, size_t size);
int svm_barrier(uint32_t flags);
/** @brief Request for exlusive access
*
@ -64,6 +72,15 @@ void svmfree(void* addr, size_t size);
*/
int svm_access_request(size_t addr);
/** @brief Allocate n shared pages
*
* @param n number of requested pages
* @return physical address of the shared pages
*/
size_t shmalloc(uint32_t n);
int svm_alloc_page(size_t addr, page_table_t* pgt);
/** @brief emit page to core ue
*
* @return
@ -71,24 +88,28 @@ int svm_access_request(size_t addr);
*/
int svm_emit_page(size_t addr, int ue);
#ifdef CONFIG_ROCKCREEK
/* @brief invalidate the cache entries for all SVM regions
*/
#ifndef SVM_WB
static inline void svm_invalidate(void)
{
asm volatile ( ".byte 0x0f; .byte 0x0a;\n" ); // CL1FLUSHMB
}
#else
void svm_invalidate(void);
#endif
/* *brief flushs the cache for all SVM regions
/* @brief flushs the cache for all SVM regions
*/
#ifdef CONFIG_ROCKCREEK
#ifndef SVM_WB
static inline void svm_flush(void)
static inline void svm_flush(size_t unused)
{
// need to write to another line to make sure the write combine buffer gets flushed
*(int *)RCCE_fool_write_combine_buffer = 1;
*(volatile int *)RCCE_fool_write_combine_buffer = 1;
}
#else
void svm_flush(void);
void svm_flush(size_t addr);
#endif
#endif

View file

@ -55,6 +55,13 @@ int arch_fork(task_t* task);
*/
void switch_task(uint32_t id);
/**
* @brief Switch to current task
*
* @param stack Pointer to the old stack pointer
*/
void switch_context(size_t** stack);
/** @brief Setup a default frame for a new task
*
* @param task Pointer to the task structure
@ -64,16 +71,24 @@ void switch_task(uint32_t id);
* - 0 on success
* - -EINVAL (-22) on failure
*/
int create_default_frame(task_t* task, internal_entry_point_t ep, void* arg);
int create_default_frame(task_t* task, entry_point_t ep, void* arg);
/** @brief Register a task's TSS at GDT
*
* @param task Pointer to task structure
* @return
* - 0 on success
* - -EINVAL (-22) on failure
*/
int register_task(task_t* task);
static inline int register_task(void)
{
#ifdef CONFIG_X86_32
uint16_t sel = (CORE_ID+5) << 3;
#else
uint16_t sel = (CORE_ID*2+5) << 3;
#endif
asm volatile ("ltr %%ax" : : "a"(sel));
return 0;
}
/** @brief Jump back to user code
*
@ -84,19 +99,17 @@ int register_task(task_t* task);
*/
static inline int jump_to_user_code(uint32_t ep, uint32_t stack)
{
#ifdef CONFIG_X86_32
asm volatile ("mov %0, %%ds; mov %0, %%fs; mov %0, %%gs; mov %0, %%es" :: "r"(0x23));
asm volatile ("push $0x23; push %0; push $0x1B; push %1" :: "r"(stack), "r"(ep));
asm volatile ("lret" ::: "cc");
return 0;
#else
return -22;
#endif
}
/** @brief determines the stack of a specific task
*
* @return start address of a specific task
*/
size_t get_stack(uint32_t id);
#ifdef __cplusplus
}
#endif

View file

@ -65,19 +65,11 @@ union fpu_state {
i387_fxsave_t fxsave;
};
static inline void save_fpu_state(union fpu_state* state) {
if (has_fxsr())
asm volatile ("fxsave %0; fnclex" : "=m"((*state).fxsave) :: "memory");
else
asm volatile ("fnsave %0; fwait" : "=m"((*state).fsave) :: "memory");
}
typedef void (*handle_fpu_state)(union fpu_state* state);
static inline void restore_fpu_state(union fpu_state* state) {
if (has_fxsr())
asm volatile ("fxrstor %0" :: "m"(state->fxsave));
else
asm volatile ("frstor %0" :: "m"(state->fsave));
}
extern handle_fpu_state save_fpu_state;
extern handle_fpu_state restore_fpu_state;
extern handle_fpu_state fpu_init;
#ifdef __cplusplus
}

View file

@ -35,6 +35,7 @@ extern "C" {
/** @brief The tast state segment structure
*/
typedef struct {
#ifdef CONFIG_X86_32
uint16_t backlink, __blh;
uint32_t esp0;
uint16_t ss0, __ss0h;
@ -55,6 +56,23 @@ typedef struct {
uint16_t gs, __gsh;
uint16_t ldt, __ldth;
uint16_t trace, bitmap;
#endif
#ifdef CONFIG_X86_64
uint16_t res0, res1; // reserved entries
uint64_t rsp0;
uint64_t rsp1;
uint64_t rsp2;
uint32_t res2, res3; // reserved entries
uint64_t ist_rsp1;
uint64_t ist_rsp2;
uint64_t ist_rsp3;
uint64_t ist_rsp4;
uint64_t ist_rsp5;
uint64_t ist_rsp6;
uint64_t ist_rsp7;
uint32_t res4, res5; // reserved entries
uint16_t res6, bitmap;
#endif
} __attribute__ ((packed)) tss_t;
#ifdef __cplusplus

View file

@ -1,5 +1,5 @@
C_source := gdt.c kb.c timer.c irq.c isrs.c idt.c vga.c multiboot.c apic.c pci.c processor.c
ASM_source := entry.asm string.asm
ASM_source := entry$(BIT).asm string$(BIT).asm
MODULE := arch_x86_kernel
include $(TOPDIR)/Makefile.inc

View file

@ -38,10 +38,20 @@
#include <asm/RCCE_lib.h>
#endif
void start_tickless(void);
void end_tickless(void);
#if defined(CONFIG_ROCKCREEK) && (MAX_CORES > 1)
#error RockCreek is not a SMP system
#endif
/*
* Note that linker symbols are not variables, they have no memory allocated for
* maintaining a value, rather their address is their value.
*/
extern const void kernel_start;
extern const void kernel_end;
// IO APIC MMIO structure: write reg, then read or write data.
typedef struct {
uint32_t reg;
@ -51,9 +61,9 @@ typedef struct {
static const apic_processor_entry_t* apic_processors[MAX_CORES] = {[0 ... MAX_CORES-1] = NULL};
static uint32_t boot_processor = MAX_CORES;
static apic_mp_t* apic_mp = NULL;
apic_mp_t* apic_mp __attribute__ ((section (".data"))) = NULL;
static apic_config_table_t* apic_config = NULL;
static uint32_t lapic = 0;
static size_t lapic = 0;
static volatile ioapic_t* ioapic = NULL;
static uint32_t icr = 0;
static uint32_t ncores = 1;
@ -75,12 +85,15 @@ static inline uint32_t lapic_read(uint32_t addr)
static inline void lapic_write(uint32_t addr, uint32_t value)
{
#ifdef CONFIG_X86_32
/*
* to avoid a pentium bug, we have to read a apic register
* before we write a value to this register
*/
asm volatile ("movl (%%eax), %%edx; movl %%ebx, (%%eax)" :: "a"(lapic+addr), "b"(value) : "%edx");
//*((volatile uint32_t*) (lapic+addr)) = value;
#else
*((volatile uint32_t*) (lapic+addr)) = value;
#endif
}
static inline uint32_t ioapic_read(uint32_t reg)
@ -144,6 +157,31 @@ int apic_is_enabled(void)
return (lapic && initialized);
}
int apic_disable_timer(void)
{
if (BUILTIN_EXPECT(!apic_is_enabled(), 0))
return -EINVAL;
lapic_write(APIC_LVT_T, 0x10000); // disable timer interrupt
start_tickless();
return 0;
}
int apic_enable_timer(void)
{
if (BUILTIN_EXPECT(apic_is_enabled() && icr, 1)) {
lapic_write(APIC_DCR, 0xB); // set it to 1 clock increments
lapic_write(APIC_LVT_T, 0x2007B); // connects the timer to 123 and enables it
lapic_write(APIC_ICR, icr);
end_tickless();
return 0;
}
return -EINVAL;
}
#if MAX_CORES > 1
static inline void set_ipi_dest(uint32_t cpu_id) {
uint32_t tmp;
@ -280,9 +318,18 @@ extern void cpu_init(void);
*/
extern int smp_main(void);
#ifdef CONFIG_X86_64
/*
* 32bit entry point, which jumps to the 64bit code smp_start
*/
extern void smp_entry(void);
#endif
void smp_start(uint32_t id)
{
uint32_t i;
#ifdef CONFIG_X86_32
size_t i;
#endif
atomic_int32_inc(&cpu_online);
@ -301,8 +348,10 @@ void smp_start(uint32_t id)
// install IDT
idt_install();
// On 64bit system, paging is already enabled
#ifdef CONFIG_X86_32
/* enable paging */
write_cr3((uint32_t)get_boot_pgd());
write_cr3((size_t)get_boot_pgd());
i = read_cr0();
i = i | (1 << 31);
write_cr0(i);
@ -310,12 +359,13 @@ void smp_start(uint32_t id)
// reset APIC and set id
lapic_reset(); // sets also the timer interrupt
apic_set_cpu_id(id);
#endif
/*
* we turned on paging
* => now, we are able to register our task for Task State Switching
* => now, we are able to register our task
*/
register_task(per_core(current_task));
register_task();
// enable additional cpu features
cpu_detection();
@ -329,7 +379,7 @@ void smp_start(uint32_t id)
}
#endif
#if 1
#ifdef CONFIG_X86_32
static apic_mp_t* search_apic(size_t base, size_t limit) {
size_t ptr;
apic_mp_t* tmp;
@ -375,8 +425,13 @@ int smp_init(void)
{
// replace 0xDEADC0DE with the address of the smp entry code
if (*((uint32_t*) (bootaddr+j)) == 0xDEADC0DE) {
*((uint32_t*) (bootaddr+j)) = (size_t) smp_start;
kprintf("Set entry point of the application processors at 0x%x\n", (size_t) smp_start);
#ifdef CONFIG_X86_32
*((uint32_t*) (bootaddr+j)) = (uint32_t) smp_start;
kprintf("Set entry point of the application processors at 0x%x\n", (uint32_t) smp_start);
#else
*((uint32_t*) (bootaddr+j)) = (uint32_t) smp_entry;
kprintf("Set entry point of the application processors at 0x%lx\n", (size_t) smp_entry);
#endif
}
// replace APIC ID 0xDEADDEAD
@ -446,9 +501,17 @@ int map_apic(void)
if (!has_apic())
return -ENXIO;
#ifdef CONFIG_X86_32
lapic = map_region(0 /*lapic*/, lapic, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE);
if (BUILTIN_EXPECT(!lapic, 0))
return -ENXIO;
#else
if (lapic != (size_t)&kernel_start - 0x1000) {
lapic = map_region(0 /*lapic*/, lapic, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE);
if (BUILTIN_EXPECT(!lapic, 0))
return -ENXIO;
}
#endif
kprintf("Mapped LAPIC at 0x%x\n", lapic);
if (ioapic) {
@ -460,7 +523,7 @@ int map_apic(void)
// map all processor entries
for(i=0; i<MAX_CORES; i++) {
if (apic_processors[i] && (old != (((size_t)apic_processors[i]) & 0xFFFFF000)))
old = map_region(((size_t) apic_processors[i]) & 0xFFFFF000, ((size_t) apic_processors[i]) & 0xFFFFF000, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE);
old = map_region(((size_t) apic_processors[i]) & 0xFFFFF000, ((size_t) apic_processors[i]) & 0xFFFFF000, 1, MAP_REMAP|MAP_KERNEL_SPACE|MAP_NO_CACHE);
}
}
@ -526,10 +589,10 @@ int apic_calibration(void)
lapic_write(APIC_ICR, 0xFFFFFFFFUL);
/* wait 3 time slices to determine a ICR */
mb();
rmb();
start = rdtsc();
do {
mb();
rmb();
end = rdtsc();
ticks = end > start ? end - start : start - end;
} while(ticks*TIMER_FREQ < 3*RC_REFCLOCKMHZ*1000000UL);
@ -568,6 +631,7 @@ static int apic_probe(void)
uint32_t i, count;
int isa_bus = -1;
#ifdef CONFIG_X86_32
#if 1
apic_mp = search_apic(0xF0000, 0x100000);
if (apic_mp)
@ -607,6 +671,7 @@ static int apic_probe(void)
}
}
#endif
#endif
found_mp:
if (!apic_mp)
goto no_mp;
@ -620,7 +685,7 @@ found_mp:
goto no_mp;
}
apic_config = (apic_config_table_t*) apic_mp->mp_config;
apic_config = (apic_config_table_t*) ((size_t) apic_mp->mp_config);
if (!apic_config || strncmp((void*) &apic_config->signature, "PCMP", 4) !=0) {
kputs("Invalid MP config table\n");
goto no_mp;
@ -668,10 +733,10 @@ found_mp:
addr += 20;
} else if (*((uint8_t*) addr) == 2) { // IO_APIC
apic_io_entry_t* io_entry = (apic_io_entry_t*) addr;
ioapic = (ioapic_t*) io_entry->addr;
ioapic = (ioapic_t*) ((size_t) io_entry->addr);
addr += 8;
kprintf("Found IOAPIC at 0x%x (ver. 0x%x)\n", ioapic,
ioapic_read(IOAPIC_REG_VER));
//kprintf("Found IOAPIC at 0x%x (ver. 0x%x)\n", ioapic, ioapic_read(IOAPIC_REG_VER));
kprintf("Found IOAPIC at 0x%x\n", ioapic);
} else if (*((uint8_t*) addr) == 3) { // IO_INT
apic_ioirq_entry_t* extint = (apic_ioirq_entry_t*) addr;
if (extint->src_bus == isa_bus) {
@ -690,15 +755,34 @@ found_mp:
ncores = count;
check_lapic:
#ifdef CONFIG_X86_32
if (apic_config) {
lapic = apic_config->lapic;
} else {
uint32_t edx, dummy;
uint32_t edx, dummy=0;
cpuid(0x1, &dummy, &dummy, &dummy, &edx);
if (edx & (1 << 9))
lapic = 0xFEE00000;
}
#else
if (apic_config) {
if (apic_config->lapic == 0xFEE00000) {
// On a x64 system, we already map the lapic below the kernel
lapic = (size_t)&kernel_start - 0x1000;
} else {
lapic = apic_config->lapic;
}
} else {
uint32_t edx, dummy=0;
cpuid(0x1, &dummy, &dummy, &dummy, &edx);
if (edx & (1 << 9)) {
// On a x64 system, we already map the lapic below the kernel
lapic = (size_t)&kernel_start - 0x1000;
}
}
#endif
if (!lapic)
goto out;

View file

@ -21,6 +21,8 @@
; perhaps setting up the GDT and segments. Please note that interrupts
; are disabled at this point: More on interrupts later!
%include "config.inc"
[BITS 32]
; We use a special name to map this section at the begin of our kernel
; => Multiboot needs its magic number at the begin of the kernel
@ -46,29 +48,22 @@ mboot:
dd MULTIBOOT_HEADER_MAGIC
dd MULTIBOOT_HEADER_FLAGS
dd MULTIBOOT_CHECKSUM
; AOUT kludge - must be physical addresses. Make a note of these:
; The linker script fills in the data for these ones!
; dd mboot
; dd code
; dd bss
; dd end
; dd start
msg db "?ello from MetalSVM kernel!!", 0
extern default_stack_pointer
SECTION .text
ALIGN 4
stublet:
; initialize stack pointer.
mov esp, [default_stack_pointer]
mov esp, boot_stack
add esp, KERNEL_STACK_SIZE-16
; save pointer to the multiboot structure
push ebx
; initialize cpu features
call cpu_init
; interpret multiboot information
extern multiboot_init
push ebx
; pointer to the multiboot structure is already pushed
call multiboot_init
add esp, 4
@ -113,10 +108,12 @@ flush2:
ret
; determines the current instruction pointer (after the jmp)
global read_eip
read_eip:
pop eax ; Get the return address
jmp eax ; Return. Can't use RET because return
global read_ip
read_ip:
mov eax, [esp+4]
pop DWORD [eax] ; Get the return address
add esp, 4 ; Dirty Hack! read_ip cleanup the stacl
jmp [eax] ; Return. Can't use RET because return
; address popped off the stack.
; In just a few pages in this tutorial, we will add our Interrupt
@ -496,17 +493,6 @@ global apic_lint1
global apic_error
global apic_svr
global switch_task
switch_task:
mov eax, [esp+4]
add ax, WORD 5
mov bx, WORD 8
mul bx
mov [hack+5], ax
hack:
jmp 0x00 : 0xDEADBEAF
ret
; 32: IRQ0
irq0:
; irq0 - irq15 are registered as "Interrupt Gate"
@ -764,7 +750,29 @@ apic_svr:
jmp common_stub
extern irq_handler
extern get_current_stack
extern finish_task_switch
global switch_context
ALIGN 4
switch_context:
; create on the stack a pseudo interrupt
; afterwards, we switch to the task with iret
mov eax, [esp+4] ; on the stack is already the address to store the old esp
pushf ; EFLAGS
push DWORD 0x8 ; CS
push DWORD rollback ; EIP
push DWORD 0x0 ; Interrupt number
push DWORD 0x00edbabe ; Error code
pusha ; Registers...
jmp common_switch
ALIGN 4
rollback:
ret
ALIGN 4
common_stub:
pusha
@ -773,8 +781,31 @@ common_stub:
call irq_handler
add esp, 4
cmp eax, 0
je no_context_switch
common_switch:
mov [eax], esp ; store old esp
call get_current_stack ; get new esp
xchg eax, esp
; set task switched flag
mov eax, cr0
or eax, 8
mov cr0, eax
; call cleanup code
call finish_task_switch
no_context_switch:
popa
add esp, 8
iret
SECTION .data
global boot_stack
ALIGN 4096
boot_stack:
TIMES (MAX_CORES*KERNEL_STACK_SIZE) DB 0xcd
SECTION .note.GNU-stack noalloc noexec nowrite progbits

1173
arch/x86/kernel/entry64.asm Normal file

File diff suppressed because it is too large Load diff

View file

@ -17,6 +17,7 @@
* This file is part of MetalSVM.
*/
#include <metalsvm/stdio.h>
#include <metalsvm/string.h>
#include <metalsvm/stdlib.h>
#include <metalsvm/tasks.h>
@ -27,9 +28,7 @@
#include <asm/page.h>
gdt_ptr_t gp;
static tss_t task_state_segments[MAX_TASKS] __attribute__ ((aligned (PAGE_SIZE)));
static unsigned char kstacks[MAX_TASKS][KERNEL_STACK_SIZE] __attribute__ ((aligned (PAGE_SIZE))) = {[0 ... MAX_TASKS-1][0 ... KERNEL_STACK_SIZE-1] = 0xCD};
uint32_t default_stack_pointer = (uint32_t) kstacks[0] + KERNEL_STACK_SIZE - sizeof(size_t);
static tss_t task_state_segments[MAX_CORES] __attribute__ ((aligned (PAGE_SIZE)));
// currently, our kernel has full access to the ioports
static gdt_entry_t gdt[GDT_ENTRIES] = {[0 ... GDT_ENTRIES-1] = {0, 0, 0, 0, 0, 0}};
@ -39,128 +38,156 @@ static gdt_entry_t gdt[GDT_ENTRIES] = {[0 ... GDT_ENTRIES-1] = {0, 0, 0, 0, 0,
*/
extern void gdt_flush(void);
/*
* This is defined in entry.asm. We use this for a
* hardware-based task switch.
*/
extern void tss_switch(uint32_t id);
size_t get_stack(uint32_t id)
size_t* get_current_stack(void)
{
if (BUILTIN_EXPECT(id >= MAX_TASKS, 0))
return -EINVAL;
return (size_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
}
task_t* curr_task = per_core(current_task);
int register_task(task_t* task) {
uint16_t sel;
uint32_t id = task->id;
// determine and set esp0
#ifdef CONFIG_X86_32
task_state_segments[CORE_ID].esp0 = (size_t) curr_task->stack + KERNEL_STACK_SIZE - 16; // => stack is 16byte aligned
#else
task_state_segments[CORE_ID].rsp0 = (size_t) curr_task->stack + KERNEL_STACK_SIZE - 16; // => stack is 16byte aligned
#endif
if (BUILTIN_EXPECT(!task, 0))
return -EINVAL;
// use new page table
write_cr3(virt_to_phys((size_t)curr_task->pgd));
sel = (task->id+5) << 3;
asm volatile ("mov %0, %%ax; ltr %%ax" : : "ir"(sel) : "%eax");
// initialize the static elements of a TSS
task_state_segments[id].cr3 = (uint32_t) (task->pgd);
task_state_segments[id].ss0 = 0x10;
return 0;
return curr_task->last_stack_pointer;
}
int arch_fork(task_t* task)
{
uint16_t cs = 0x08;
uint16_t ds = 0x10;
uint32_t id;
struct state* state;
task_t* curr_task = per_core(current_task);
size_t esp, state_size;
if (BUILTIN_EXPECT(!task, 0))
return -EINVAL;
id = task->id;
if (BUILTIN_EXPECT(!task->stack, 0))
return -EINVAL;
#ifdef CONFIG_X86_32
state_size = sizeof(struct state) - 2*sizeof(size_t);
#else
state_size = sizeof(struct state);
#endif
// copy kernel stack of the current task
memcpy(kstacks[id], kstacks[curr_task->id], KERNEL_STACK_SIZE);
mb();
memcpy(task->stack, curr_task->stack, KERNEL_STACK_SIZE);
// reset TSS
memset(task_state_segments+id, 0x00, sizeof(tss_t));
#ifdef CONFIG_X86_32
asm volatile ("mov %%esp, %0" : "=m"(esp));
esp -= (size_t) curr_task->stack;
esp += (size_t) task->stack;
// set default values of all registers
task_state_segments[id].cs = cs;
task_state_segments[id].ss = ds;
task_state_segments[id].ds = ds;
task_state_segments[id].fs = ds;
task_state_segments[id].gs = ds;
task_state_segments[id].es = ds;
task_state_segments[id].cr3 = (uint32_t) (virt_to_phys((size_t)task->pgd));
task_state_segments[id].ss0 = ds;
task_state_segments[id].esp0 = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
state = (struct state*) (esp - state_size);
//memset(state, 0x00, state_size);
// save curret task context
asm volatile("mov %%esp, %0" : "=r"(task_state_segments[id].esp));
task_state_segments[id].esp -= (uint32_t) kstacks[curr_task->id];
task_state_segments[id].esp += (uint32_t) kstacks[id];
asm volatile ("pusha");
asm volatile ("pop %0" : "=r"(task_state_segments[id].edi));
asm volatile ("pop %0" : "=r"(task_state_segments[id].esi));
asm volatile ("pop %0" : "=r"(task_state_segments[id].ebp));
#ifdef WITH_FRAME_POINTER
task_state_segments[id].ebp -= (uint32_t) kstacks[curr_task->id];
task_state_segments[id].ebp += (uint32_t) kstacks[id];
#endif
asm volatile ("pusha; pop %0" : "=m"(state->edi));
asm volatile ("pop %0" : "=m"(state->esi));
asm volatile ("pop %0" : "=m"(state->ebp));
asm volatile ("add $4, %%esp" ::: "%esp");
asm volatile ("pop %0" : "=r"(task_state_segments[id].ebx));
asm volatile ("pop %0" : "=r"(task_state_segments[id].edx));
asm volatile ("pop %0" : "=r"(task_state_segments[id].ecx));
asm volatile ("pop %0" : "=r"(task_state_segments[id].eax));
asm volatile ("pop %0" : "=m"(state->ebx));
asm volatile ("pop %0" : "=m"(state->edx));
asm volatile ("pop %0" : "=m"(state->ecx));
asm volatile ("pop %0" : "=m"(state->eax));
state->esp = esp;
task->last_stack_pointer = (size_t*) state;
state->int_no = 0xB16B00B5;
state->error = 0xC03DB4B3;
state->cs = 0x08;
// store the current EFLAGS
asm volatile ("pushf; pop %%eax" : "=a"(task_state_segments[id].eflags));
// This will be the entry point for the new task.
asm volatile ("call read_eip" : "=a"(task_state_segments[id].eip));
asm volatile ("pushf; pop %0" : "=m"(state->eflags));
// enable interrupts
state->eflags |= (1 << 9);
// This will be the entry point for the new task. read_ip cleanups the stack
asm volatile ("push %0; call read_ip" :: "r"(&state->eip) : "%eax");
#else
#warning Currently, not supported!
return -1;
#endif
return 0;
}
int create_default_frame(task_t* task, internal_entry_point_t ep, void* arg)
int create_default_frame(task_t* task, entry_point_t ep, void* arg)
{
uint16_t cs = 0x08;
uint16_t ds = 0x10;
uint32_t id;
size_t *stack;
struct state *stptr;
size_t state_size;
if (BUILTIN_EXPECT(!task, 0))
return -EINVAL;
id = task->id;
/* reset buffers */
memset(task_state_segments+id, 0x00, sizeof(tss_t));
memset(kstacks[id], 0xCD, KERNEL_STACK_SIZE);
if (BUILTIN_EXPECT(!task->stack, 0))
return -EINVAL;
/* set default values of all registers */
task_state_segments[id].cs = cs;
task_state_segments[id].ss = ds;
task_state_segments[id].ds = ds;
task_state_segments[id].fs = ds;
task_state_segments[id].gs = ds;
task_state_segments[id].es = ds;
task_state_segments[id].eflags = 0x1002; // 0x1202;
task_state_segments[id].cr3 = (uint32_t) (virt_to_phys((size_t)task->pgd));
task_state_segments[id].eip = (uint32_t) ep;
task_state_segments[id].esp = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
/* build default stack frame */
*((size_t*)task_state_segments[id].esp) = 0xDEADBEAF; /* dead-end */
task_state_segments[id].ebp = task_state_segments[id].esp;
task_state_segments[id].esp -= sizeof(size_t);
*((size_t*)task_state_segments[id].esp) = (size_t) arg;
task_state_segments[id].esp -= sizeof(size_t);
*((size_t*)task_state_segments[id].esp) = (size_t) leave_kernel_task;
memset(task->stack, 0xCD, KERNEL_STACK_SIZE);
/* setup for the kernel stack frame */
task_state_segments[id].ss0 = 0x10;
task_state_segments[id].esp0 = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
/* The difference between setting up a task for SW-task-switching
* and not for HW-task-switching is setting up a stack and not a TSS.
* This is the stack which will be activated and popped off for iret later.
*/
stack = (size_t*) (task->stack + KERNEL_STACK_SIZE - 16); // => stack is 16byte aligned
/* The next three things on the stack are a marker for debugging purposes, ... */
*stack-- = 0xDEADBEEF;
#ifdef CONFIG_X86_32
/* the first-function-to-be-called's arguments, ... */
*stack-- = (size_t) arg;
#endif
/* and the "caller" we shall return to.
* This procedure cleans the task after exit. */
*stack = (size_t) leave_kernel_task;
/* Next bunch on the stack is the initial register state.
* The stack must look like the stack of a task which was
* scheduled away previously. */
/* In 64bit mode, he stack pointer (SS:RSP) is pushed unconditionally on interrupts.
* In legacy modes, this push is conditional and based on a change in current privilege level (CPL).*/
#ifdef CONFIG_X86_32
state_size = sizeof(struct state) - 2*sizeof(size_t);
#else
state_size = sizeof(struct state);
#endif
stack = (size_t*) ((size_t) stack - state_size);
stptr = (struct state *) stack;
memset(stptr, 0x00, state_size);
#ifdef CONFIG_X86_32
stptr->esp = (size_t)stack + state_size;
#else
stptr->rsp = (size_t)stack + state_size;
/* the first-function-to-be-called's arguments, ... */
stptr->rdi = (size_t) arg;
#endif
stptr->int_no = 0xB16B00B5;
stptr->error = 0xC03DB4B3;
/* The instruction pointer shall be set on the first function to be called
* after IRETing */
#ifdef CONFIG_X86_32
stptr->eip = (size_t)ep;
#else
stptr->rip = (size_t)ep;
#endif
stptr->cs = 0x08;
#ifdef CONFIG_X86_32
stptr->eflags = 0x1202;
// the creation of a kernel tasks didn't change the IOPL level
// => useresp & ss is not required
#else
stptr->rflags = 0x1202;
stptr->ss = 0x10;
stptr->userrsp = stptr->rsp;
#endif
/* Set the task's stack pointer entry to the stack we have crafted right now. */
task->last_stack_pointer = (size_t*)stack;
return 0;
}
@ -169,27 +196,24 @@ int create_default_frame(task_t* task, internal_entry_point_t ep, void* arg)
static void gdt_set_gate(int num, unsigned long base, unsigned long limit,
unsigned char access, unsigned char gran)
{
gdt[num] = configure_gdt_entry(base, limit, access, gran);
configure_gdt_entry(&gdt[num], base, limit, access, gran);
}
gdt_entry_t configure_gdt_entry(unsigned long base, unsigned long limit,
void configure_gdt_entry(gdt_entry_t *dest_entry, unsigned long base, unsigned long limit,
unsigned char access, unsigned char gran)
{
gdt_entry_t desc;
/* Setup the descriptor base address */
desc.base_low = (base & 0xFFFF);
desc.base_middle = (base >> 16) & 0xFF;
desc.base_high = (base >> 24) & 0xFF;
dest_entry->base_low = (base & 0xFFFF);
dest_entry->base_middle = (base >> 16) & 0xFF;
dest_entry->base_high = (base >> 24) & 0xFF;
/* Setup the descriptor limits */
desc.limit_low = (limit & 0xFFFF);
desc.granularity = ((limit >> 16) & 0x0F);
dest_entry->limit_low = (limit & 0xFFFF);
dest_entry->granularity = ((limit >> 16) & 0x0F);
/* Finally, set up the granularity and access flags */
desc.granularity |= (gran & 0xF0);
desc.access = access;
return desc;
dest_entry->granularity |= (gran & 0xF0);
dest_entry->access = access;
}
/*
@ -202,12 +226,23 @@ gdt_entry_t configure_gdt_entry(unsigned long base, unsigned long limit,
void gdt_install(void)
{
unsigned int i;
unsigned long mode, limit;
memset(task_state_segments, 0x00, MAX_TASKS*sizeof(tss_t));
memset(task_state_segments, 0x00, MAX_CORES*sizeof(tss_t));
#ifdef CONFIG_X86_32
mode = GDT_FLAG_32_BIT;
limit = 0xFFFFFFFF;
#elif defined(CONFIG_X86_64)
mode = GDT_FLAG_64_BIT;
limit = 0;
#else
#error invalid mode
#endif
/* Setup the GDT pointer and limit */
gp.limit = (sizeof(gdt_entry_t) * GDT_ENTRIES) - 1;
gp.base = (unsigned int) &gdt;
gp.base = (size_t) &gdt;
/* Our NULL descriptor */
gdt_set_gate(0, 0, 0, 0, 0);
@ -217,40 +252,49 @@ void gdt_install(void)
* is 0, the limit is 4 GByte, it uses 4KByte granularity,
* uses 32-bit opcodes, and is a Code Segment descriptor.
*/
gdt_set_gate(1, 0, 0xFFFFFFFF,
gdt_set_gate(1, 0, limit,
GDT_FLAG_RING0 | GDT_FLAG_SEGMENT | GDT_FLAG_CODESEG | GDT_FLAG_PRESENT,
GDT_FLAG_4K_GRAN | GDT_FLAG_32_BIT);
GDT_FLAG_4K_GRAN | mode);
/*
* The third entry is our Data Segment. It's EXACTLY the
* same as our code segment, but the descriptor type in
* this entry's access byte says it's a Data Segment
*/
gdt_set_gate(2, 0, 0xFFFFFFFF,
gdt_set_gate(2, 0, limit,
GDT_FLAG_RING0 | GDT_FLAG_SEGMENT | GDT_FLAG_DATASEG | GDT_FLAG_PRESENT,
GDT_FLAG_4K_GRAN | GDT_FLAG_32_BIT);
GDT_FLAG_4K_GRAN | mode);
/*
* Create code segement for userspace applications (ring 3)
*/
gdt_set_gate(3, 0, 0xFFFFFFFF,
gdt_set_gate(3, 0, limit,
GDT_FLAG_RING3 | GDT_FLAG_SEGMENT | GDT_FLAG_CODESEG | GDT_FLAG_PRESENT,
GDT_FLAG_4K_GRAN | GDT_FLAG_32_BIT);
GDT_FLAG_4K_GRAN | mode);
/*
* Create data segement for userspace applications (ring 3)
*/
gdt_set_gate(4, 0, 0xFFFFFFFF,
gdt_set_gate(4, 0, limit,
GDT_FLAG_RING3 | GDT_FLAG_SEGMENT | GDT_FLAG_DATASEG | GDT_FLAG_PRESENT,
GDT_FLAG_4K_GRAN | GDT_FLAG_32_BIT);
GDT_FLAG_4K_GRAN | mode);
/*
* Create TSS for each task at ring0 (we use these segments for task switching)
*/
for(i=0; i<MAX_TASKS; i++) {
for(i=0; i<MAX_CORES; i++) {
#ifdef CONFIG_X86_32
/* set default values */
task_state_segments[i].eflags = 0x1202;
task_state_segments[i].ss0 = 0x10; // data segment
task_state_segments[i].esp0 = 0xDEADBEEF; // invalid pseudo address
gdt_set_gate(5+i, (unsigned long) (task_state_segments+i), sizeof(tss_t)-1,
GDT_FLAG_PRESENT | GDT_FLAG_TSS | GDT_FLAG_RING0,
GDT_FLAG_32_BIT);
GDT_FLAG_PRESENT | GDT_FLAG_TSS | GDT_FLAG_RING0, mode);
#elif defined(CONFIG_X86_64)
task_state_segments[i].rsp0 = 0xDEADBEEF; // invalid pseudo address
gdt_set_gate(5+i*2, (unsigned long) (task_state_segments+i), sizeof(tss_t)-1,
GDT_FLAG_PRESENT | GDT_FLAG_TSS | GDT_FLAG_RING0, mode);
#endif
}
/* Flush out the old GDT and install the new changes! */

View file

@ -39,25 +39,25 @@
* for which the 'presence' bit is cleared (0) will generate an
* "Unhandled Interrupt" exception
*/
#ifdef CONFIG_X86_64
static idt_entry_t idt[256] = {[0 ... 255] = {0, 0, 0, 0, 0, 0, 0}};
#else
static idt_entry_t idt[256] = {[0 ... 255] = {0, 0, 0, 0, 0}};
#endif
static idt_ptr_t idtp;
idt_entry_t configure_idt_entry(size_t base, unsigned short sel,
unsigned char flags)
void configure_idt_entry(idt_entry_t *dest_entry, size_t base,
unsigned short sel, unsigned char flags)
{
idt_entry_t desc;
/* The interrupt routine's base address */
desc.base_lo = (base & 0xFFFF);
desc.base_hi = (base >> 16) & 0xFFFF;
dest_entry->base_lo = (base & 0xFFFF);
dest_entry->base_hi = (base >> 16) & 0xFFFF;
/* The segment or 'selector' that this IDT entry will use
* is set here, along with any access flags */
desc.sel = sel;
desc.always0 = 0;
desc.flags = flags;
return desc;
dest_entry->sel = sel;
dest_entry->always0 = 0;
dest_entry->flags = flags;
}
/*
@ -67,7 +67,7 @@ idt_entry_t configure_idt_entry(size_t base, unsigned short sel,
void idt_set_gate(unsigned char num, size_t base, unsigned short sel,
unsigned char flags)
{
idt[num] = configure_idt_entry(base, sel, flags);
configure_idt_entry(&idt[num], base, sel, flags);
}
extern void isrsyscall(void);
@ -82,7 +82,7 @@ void idt_install(void)
/* Sets the special IDT pointer up, just like in 'gdt.c' */
idtp.limit = (sizeof(idt_entry_t) * 256) - 1;
idtp.base = (unsigned int)&idt;
idtp.base = (size_t)&idt;
/* Add any new ISRs to the IDT here using idt_set_gate */
idt_set_gate(INT_SYSCALL, (size_t)isrsyscall, KERNEL_CODE_SELECTOR,

View file

@ -29,6 +29,7 @@
#include <metalsvm/string.h>
#include <metalsvm/tasks.h>
#include <metalsvm/errno.h>
#include <metalsvm/page.h>
#include <asm/irq.h>
#include <asm/idt.h>
#include <asm/isrs.h>
@ -128,6 +129,22 @@ static int irq_remap(void)
return 0;
}
int disable_timer_irq(void)
{
if (BUILTIN_EXPECT(apic_is_enabled(), 1))
return apic_disable_timer();
return -EINVAL;
}
int enable_timer_irq(void)
{
if (BUILTIN_EXPECT(apic_is_enabled(), 1))
return apic_enable_timer();
return -EINVAL;
}
/** @brief Remap IRQs and install ISRs in IDT
*
* We first remap the interrupt controllers, and then we install
@ -224,12 +241,12 @@ int irq_init(void)
* controller (an IRQ from 8 to 15) gets an interrupt, you need to
* acknowledge the interrupt at BOTH controllers, otherwise, you
* only send an EOI command to the first controller. If you don't send
* an EOI, it won't raise any more IRQs.\n
* \n
* an EOI, it won't raise any more IRQs.
*
* Note: If we enabled the APIC, we also disabled the PIC. Afterwards,
* we get no interrupts between 0 and 15.
*/
void irq_handler(struct state *s)
size_t** irq_handler(struct state *s)
{
/* This is a blank function pointer */
void (*handler) (struct state * s);
@ -276,7 +293,9 @@ void irq_handler(struct state *s)
leave_handler:
// timer interrupt?
if ((s->int_no == 32) || (s->int_no == 123))
scheduler(); // switch to a new task
return scheduler(); // switch to a new task
else if ((s->int_no >= 32) && (get_highest_priority() > per_core(current_task)->prio))
scheduler();
return scheduler();
return NULL;
}

View file

@ -27,13 +27,13 @@
*/
#include <metalsvm/stdio.h>
#include <metalsvm/string.h>
#include <metalsvm/tasks.h>
#include <asm/irqflags.h>
#include <asm/isrs.h>
#include <asm/irq.h>
#include <asm/apic.h>
#include <asm/idt.h>
#include <asm/io.h>
/*
* These are function prototypes for all of the exception
@ -165,27 +165,6 @@ void isrs_install(void)
irq_install_handler(7, fpu_handler);
}
static void fpu_init(union fpu_state* fpu)
{
if (has_fxsr()) {
i387_fxsave_t* fx = &fpu->fxsave;
memset(fx, 0x00, sizeof(i387_fxsave_t));
fx->cwd = 0x37f;
if (has_xmm())
fx->mxcsr = 0x1f80;
} else {
i387_fsave_t *fp = &fpu->fsave;
memset(fp, 0x00, sizeof(i387_fsave_t));
fp->cwd = 0xffff037fu;
fp->swd = 0xffff0000u;
fp->twd = 0xffffffffu;
fp->fos = 0xffff0000u;
}
}
static void fpu_handler(struct state *s)
{
task_t* task = per_core(current_task);
@ -230,9 +209,14 @@ static void fault_handler(struct state *s)
{
if (s->int_no < 32) {
kputs(exception_messages[s->int_no]);
#ifdef CONFIG_X86_32
kprintf(" Exception (%d) at 0x%x:0x%x on core %u, error code 0x%x, eflags 0x%x\n",
s->int_no, s->cs, s->eip, CORE_ID, s->error, s->eflags);
#elif defined(CONFIG_X86_64)
kprintf(" Exception (%d) at 0x%llx:0x%llx on core %u, error code 0x%llx, rflags 0x%llx\n",
s->int_no, s->cs, s->rip, CORE_ID, s->error, s->rflags);
#endif
/* Now, we signalize that we have handled the interrupt */
if (apic_is_enabled())
apic_eoi();

View file

@ -19,6 +19,7 @@
#include <metalsvm/stddef.h>
#include <metalsvm/stdio.h>
#include <metalsvm/string.h>
#include <metalsvm/time.h>
#include <metalsvm/processor.h>
#include <metalsvm/tasks.h>
@ -26,30 +27,103 @@
#include <asm/RCCE_lib.h>
#endif
static void default_mb(void)
{
asm volatile ("lock; addl $0,0(%%esp)" ::: "memory", "cc");
}
static void default_save_fpu_state(union fpu_state* state)
{
asm volatile ("fnsave %0; fwait" : "=m"((*state).fsave) :: "memory");
}
static void default_restore_fpu_state(union fpu_state* state)
{
asm volatile ("frstor %0" :: "m"(state->fsave));
}
static void default_fpu_init(union fpu_state* fpu)
{
i387_fsave_t *fp = &fpu->fsave;
memset(fp, 0x00, sizeof(i387_fsave_t));
fp->cwd = 0xffff037fu;
fp->swd = 0xffff0000u;
fp->twd = 0xffffffffu;
fp->fos = 0xffff0000u;
}
func_memory_barrier mb = default_mb;
func_memory_barrier rmb = default_mb;
func_memory_barrier wmb = default_mb;
handle_fpu_state save_fpu_state = default_save_fpu_state;
handle_fpu_state restore_fpu_state = default_restore_fpu_state;
handle_fpu_state fpu_init = default_fpu_init;
static void mfence(void) { asm volatile("mfence" ::: "memory"); }
static void lfence(void) { asm volatile("lfence" ::: "memory"); }
static void sfence(void) { asm volatile("sfence" ::: "memory"); }
static void save_fpu_state_fxsr(union fpu_state* state)
{
asm volatile ("fxsave %0; fnclex" : "=m"((*state).fxsave) :: "memory");
}
static void restore_fpu_state_fxsr(union fpu_state* state)
{
asm volatile ("fxrstor %0" :: "m"(state->fxsave));
}
static void fpu_init_fxsr(union fpu_state* fpu)
{
i387_fxsave_t* fx = &fpu->fxsave;
memset(fx, 0x00, sizeof(i387_fxsave_t));
fx->cwd = 0x37f;
if (BUILTIN_EXPECT(has_sse(), 1))
fx->mxcsr = 0x1f80;
}
cpu_info_t cpu_info = { 0, 0 };
static uint32_t cpu_freq = 0;
int cpu_detection(void)
{
uint32_t a, b, cr4;
uint32_t a, b;
size_t cr4;
cpuid(1, &a, &b, &cpu_info.feature2, &cpu_info.feature1);
cr4 = read_cr4();
if (has_fxsr())
cr4 |= 0x200; // set the OSFXSR bit
if (has_xmm())
if (has_sse())
cr4 |= 0x400; // set the OSXMMEXCPT bit
write_cr4(cr4);
if (has_sse())
wmb = sfence;
if (has_sse2()) {
rmb = lfence;
mb = mfence;
}
if (has_avx())
kprintf("The CPU owns the Advanced Vector Extensions (AVX). However, MetalSVM doesn't support AVX!\n");
if (has_fpu()) {
kputs("Found and initialize FPU!\n");
kputs("Found and initialized FPU!\n");
asm volatile ("fninit");
}
if (has_fxsr()) {
save_fpu_state = save_fpu_state_fxsr;
restore_fpu_state = restore_fpu_state_fxsr;
fpu_init = fpu_init_fxsr;
}
return 0;
}
@ -75,12 +149,12 @@ uint32_t detect_cpu_frequency(void)
while((ticks = get_clock_tick()) - old == 0)
HALT;
mb();
rmb();
start = rdtsc();
/* wait a second to determine the frequency */
while(get_clock_tick() - ticks < TIMER_FREQ)
HALT;
mb();
rmb();
end = rdtsc();
diff = end > start ? end - start : start - end;

View file

@ -0,0 +1,79 @@
;
; Written by the Chair for Operating Systems, RWTH Aachen University
;
; NO Copyright (C) 2010-2012, Stefan Lankes
; consider these trivial functions to be public domain.
;
; These functions are distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
;
[BITS 64]
SECTION .text
global strcpy
strcpy:
push rdi
L1:
lodsb
stosb
test al, al
jne L1
pop rax
ret
global strncpy
strncpy:
push rdi
mov rcx, rdx
L2:
dec rcx
js L3
lodsb
stosb
test al, al
jne L1
rep
stosb
L3:
pop rax
ret
%if 0
; The following function is derived from JamesM's kernel development tutorials
; (http://www.jamesmolloy.co.uk/tutorial_html/)
global copy_page_physical
copy_page_physical:
push esi ; According to __cdecl, we must preserve the contents of ESI
push edi ; and EDI.
pushf ; push EFLAGS, so we can pop it and reenable interrupts
; later, if they were enabled anyway.
cli ; Disable interrupts, so we aren't interrupted.
; Load these in BEFORE we disable paging!
mov edi, [esp+12+4] ; Destination address
mov esi, [esp+12+8] ; Source address
mov edx, cr0 ; Get the control register...
and edx, 0x7fffffff ; and...
mov cr0, edx ; Disable paging.
cld
mov ecx, 0x400 ; 1024*4bytes = 4096 bytes = page size
rep movsd ; copy page
mov edx, cr0 ; Get the control register again
or edx, 0x80000000 ; and...
mov cr0, edx ; Enable paging.
popf ; Pop EFLAGS back.
pop edi ; Get the original value of EDI
pop esi ; and ESI back.
ret
%endif
SECTION .note.GNU-stack noalloc noexec nowrite progbits

View file

@ -39,12 +39,47 @@ static volatile uint64_t timer_ticks = 0;
#if MAX_CORES > 1
extern atomic_int32_t cpu_online;
#endif
static int8_t use_tickless = 0;
static uint64_t last_rdtsc = 0;
uint64_t get_clock_tick(void)
{
return timer_ticks;
}
void start_tickless(void)
{
use_tickless = 1;
rmb();
last_rdtsc = rdtsc();
}
void end_tickless(void)
{
use_tickless = 0;
last_rdtsc = 0;
}
void check_ticks(void)
{
if (!use_tickless)
return;
#if MAX_CORES > 1
if (smp_id() == 0)
#endif
{
uint64_t curr_rdtsc = rdtsc();
rmb();
if (curr_rdtsc - last_rdtsc > 1000000ULL*(uint64_t)get_cpu_frequency() / (uint64_t)TIMER_FREQ) {
timer_ticks++;
last_rdtsc = curr_rdtsc;
rmb();
}
}
}
int sys_times(struct tms* buffer, clock_t* clock)
{
if (BUILTIN_EXPECT(!buffer, 0))
@ -85,12 +120,14 @@ static void timer_handler(struct state *s)
// dump_load();
}
#ifndef CONFIG_TICKLESS
update_load();
#if MAX_CORES > 1
if (atomic_int32_read(&cpu_online) > 1)
load_balancing();
#endif
#endif
}
int timer_wait(unsigned int ticks)

View file

@ -1,4 +1,4 @@
C_source := page.c svm.c
C_source := page$(BIT).c svm.c
MODULE := arch_x86_mm
include $(TOPDIR)/Makefile.inc

View file

@ -57,7 +57,9 @@ extern const void kernel_start;
extern const void kernel_end;
// boot task's page directory and page directory lock
static page_dir_t boot_pgd = {{[0 ... 1023] = 0}};
static page_dir_t boot_pgd = {{[0 ... PGT_ENTRIES-1] = 0}};
static page_table_t pgt_container = {{[0 ... PGT_ENTRIES-1] = 0}};
static page_table_t boot_pgt[KERNEL_SPACE/(1024*PAGE_SIZE)];
static spinlock_t kslock = SPINLOCK_INIT;
static int paging_enabled = 0;
@ -88,8 +90,8 @@ inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_tabl
if (counter)
(*counter)++;
for(i=0; i<1024; i++) {
if (pgt->entries[i] & 0xFFFFF000) {
for(i=0; i<PGT_ENTRIES; i++) {
if (pgt->entries[i] & PAGE_MASK) {
if (!(pgt->entries[i] & PG_USER)) {
// Kernel page => copy only page entries
new_pgt->entries[i] = pgt->entries[i];
@ -102,7 +104,7 @@ inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_tabl
if (counter)
(*counter)++;
copy_page_physical((void*)phyaddr, (void*) (pgt->entries[i] & 0xFFFFF000));
copy_page_physical((void*)phyaddr, (void*) (pgt->entries[i] & PAGE_MASK));
new_pgt->entries[i] = phyaddr | (pgt->entries[i] & 0xFFF);
@ -131,7 +133,7 @@ int create_pgd(task_t* task, int copy)
// we already know the virtual address of the "page table container"
// (see file header)
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & 0xFFFFF000);
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
// create new page directory for the new task
pgd = kmalloc(sizeof(page_dir_t));
@ -149,7 +151,7 @@ int create_pgd(task_t* task, int copy)
spinlock_lock(&kslock);
for(i=0; i<1024; i++) {
for(i=0; i<PGT_ENTRIES; i++) {
pgd->entries[i] = boot_pgd.entries[i];
// only kernel entries will be copied
if (pgd->entries[i] && !(pgd->entries[i] & PG_USER))
@ -159,13 +161,13 @@ int create_pgd(task_t* task, int copy)
spinlock_unlock(&kslock);
// map page table container at the end of the kernel space
viraddr = (KERNEL_SPACE - PAGE_SIZE) & 0xFFFFF000;
viraddr = (KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK;
index1 = viraddr >> 22;
index2 = (viraddr >> 12) & 0x3FF;
// now, we create a self reference
pgd->entries[index1] = ((size_t) virt_to_phys((size_t) pgt) & 0xFFFFF000)|KERN_TABLE;
pgt->entries[index2] = ((size_t) virt_to_phys((size_t) pgt) & 0xFFFFF000)|KERN_PAGE;
pgd->entries[index1] = ((size_t) virt_to_phys((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
pgt->entries[index2] = ((size_t) virt_to_phys((size_t) pgt) & PAGE_MASK)|KERN_PAGE;
task->pgd = pgd;
@ -178,10 +180,10 @@ int create_pgd(task_t* task, int copy)
if (!(curr_task->pgd->entries[i] & PG_USER))
continue;
phyaddr = copy_page_table(task, i, (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + i*PAGE_SIZE) & 0xFFFFF000), &counter);
phyaddr = copy_page_table(task, i, (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + i*PAGE_SIZE) & PAGE_MASK), &counter);
if (phyaddr) {
pgd->entries[i] = (phyaddr & 0xFFFFF000) | (curr_task->pgd->entries[i] & 0xFFF);
pgt->entries[i] = (phyaddr & 0xFFFFF000) | KERN_PAGE;
pgd->entries[i] = (phyaddr & PAGE_MASK) | (curr_task->pgd->entries[i] & 0xFFF);
pgt->entries[i] = (phyaddr & PAGE_MASK) | KERN_PAGE;
}
}
@ -206,9 +208,9 @@ int drop_pgd(void)
spinlock_lock(&task->pgd_lock);
for(i=0; i<1024; i++) {
for(i=0; i<PGT_ENTRIES; i++) {
if (pgd->entries[i] & PG_USER) {
put_page(pgd->entries[i] & 0xFFFFF000);
put_page(pgd->entries[i] & PAGE_MASK);
pgd->entries[i] = 0;
}
}
@ -241,14 +243,14 @@ size_t virt_to_phys(size_t viraddr)
index1 = viraddr >> 22;
index2 = (viraddr >> 12) & 0x3FF;
if (!(task->pgd->entries[index1] & 0xFFFFF000))
if (!(task->pgd->entries[index1] & PAGE_MASK))
goto out;
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & 0xFFFFF000);
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
if (!pgt || !(pgt->entries[index2]))
goto out;
ret = pgt->entries[index2] & 0xFFFFF000; // determine page frame
ret = pgt->entries[index2] & PAGE_MASK; // determine page frame
ret = ret | (viraddr & 0xFFF); // add page offset
out:
//kprintf("vir %p to phy %p\n", viraddr, ret);
@ -313,9 +315,9 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
if (paging_enabled)
// we already know the virtual address of the "page table container"
// (see file header)
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & 0xFFFFF000);
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
else
pgt_container = (page_table_t*) (task->pgd->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & 0xFFFFF000);
pgt_container = (page_table_t*) (task->pgd->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & PAGE_MASK);
if (BUILTIN_EXPECT(!pgt_container, 0)) {
spinlock_unlock(pgd_lock);
@ -328,26 +330,26 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
// clear the page table
if (paging_enabled)
memset((void*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & 0xFFFFF000), 0x00, PAGE_SIZE);
memset((void*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK), 0x00, PAGE_SIZE);
else
memset(pgt, 0x00, PAGE_SIZE);
} else pgt = (page_table_t*) (task->pgd->entries[index] & 0xFFFFF000);
} else pgt = (page_table_t*) (task->pgd->entries[index] & PAGE_MASK);
/* convert physical address to virtual */
if (paging_enabled)
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & 0xFFFFF000);
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
index = (viraddr >> 12) & 0x3FF;
if (pgt->entries[index] && !(flags & MAP_REMAP)) {
spinlock_unlock(pgd_lock);
kprintf("0x%x is already maped\n", viraddr);
kprintf("0x%x is already mapped\n", viraddr);
return 0;
}
if (flags & MAP_USER_SPACE)
pgt->entries[index] = USER_PAGE|(phyaddr & 0xFFFFF000);
pgt->entries[index] = USER_PAGE|(phyaddr & PAGE_MASK);
else
pgt->entries[index] = KERN_PAGE|(phyaddr & 0xFFFFF000);
pgt->entries[index] = KERN_PAGE|(phyaddr & PAGE_MASK);
if (flags & MAP_NO_CACHE)
pgt->entries[index] |= PG_PCD;
@ -364,6 +366,9 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
if (flags & MAP_SVM_LAZYRELEASE)
pgt->entries[index] |= PG_SVM_LAZYRELEASE|PG_PWT;
if (flags & MAP_SVM_INIT)
pgt->entries[index] |= PG_SVM_INIT;
if (flags & MAP_NO_ACCESS)
pgt->entries[index] &= ~PG_PRESENT;
@ -405,15 +410,17 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
index2 = (viraddr >> 12) & 0x3FF;
while ((viraddr < end) && (index2 < 1024)) {
pgt = (page_table_t*) (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & 0xFFFFF000);
pgt = (page_table_t*) (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
if (pgt && pgt->entries[index2]) {
phyaddr = pgt->entries[index2] & 0xFFFFF000;
phyaddr = pgt->entries[index2] & PAGE_MASK;
newflags = pgt->entries[index2] & 0xFFF; // get old flags
if ((newflags & PG_SVM_STRONG) && !(newflags & PG_PRESENT) && (flags & (VMA_READ|VMA_WRITE) && !(flags & VMA_NOACCESS)))
newflags |= PG_PRESENT;
else if ((newflags & PG_SVM_STRONG) && (newflags & PG_PRESENT) && (flags & VMA_NOACCESS))
newflags &= ~PG_PRESENT;
if (!(newflags & PG_SVM_INIT)) {
if ((newflags & PG_SVM_STRONG) && !(newflags & PG_PRESENT) && (flags & (VMA_READ|VMA_WRITE) && !(flags & VMA_NOACCESS)))
newflags |= PG_PRESENT;
else if ((newflags & PG_SVM_STRONG) && (newflags & PG_PRESENT) && (flags & VMA_NOACCESS))
newflags &= ~PG_PRESENT;
}
// update flags
if (!(flags & VMA_WRITE)) {
@ -430,7 +437,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
#endif
}
pgt->entries[index2] = (newflags & 0xFFF) | (phyaddr & 0xFFFFF000);
pgt->entries[index2] = (newflags & 0xFFF) | (phyaddr & PAGE_MASK);
tlb_flush_one_page(viraddr);
}
@ -464,12 +471,12 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
if (flags & MAP_KERNEL_SPACE) {
pgd_lock = &kslock;
start = (((size_t) &kernel_end) + PAGE_SIZE) & 0xFFFFF000;
end = (KERNEL_SPACE - 2*PAGE_SIZE) & 0xFFFFF000; // we need 1 PAGE for our PGTs
start = (((size_t) &kernel_end) + PAGE_SIZE) & PAGE_MASK;
end = (KERNEL_SPACE - 2*PAGE_SIZE) & PAGE_MASK; // we need 1 PAGE for our PGTs
} else {
pgd_lock = &task->pgd_lock;
start = KERNEL_SPACE & 0xFFFFF000;
end = 0xFFFFF000;
start = KERNEL_SPACE & PAGE_MASK;
end = PAGE_MASK;
}
if (BUILTIN_EXPECT(!npages, 0))
@ -483,7 +490,7 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
index1 = i >> 22;
index2 = (i >> 12) & 0x3FF;
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & 0xFFFFF000);
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
if (!pgt || !(pgt->entries[index2])) {
i+=PAGE_SIZE;
j++;
@ -526,7 +533,7 @@ int unmap_region(size_t viraddr, uint32_t npages)
index1 = viraddr >> 22;
index2 = (viraddr >> 12) & 0x3FF;
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & 0xFFFFF000);
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
if (!pgt)
continue;
pgt->entries[index2] &= ~PG_PRESENT;
@ -565,7 +572,7 @@ int vm_free(size_t viraddr, uint32_t npages)
index1 = viraddr >> 22;
index2 = (viraddr >> 12) & 0x3FF;
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & 0xFFFFF000);
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
if (!pgt)
continue;
pgt->entries[index2] = 0;
@ -598,7 +605,7 @@ int print_paging_tree(size_t viraddr)
kprintf("\tPage directory entry %u: ", index1);
if (pgd) {
kprintf("0x%0x\n", pgd->entries[index1]);
pgt = (page_table_t*) (pgd->entries[index1] & 0xFFFFF000);
pgt = (page_table_t*) (pgd->entries[index1] & PAGE_MASK);
} else
kputs("invalid page directory\n");
@ -629,7 +636,7 @@ static void pagefault_handler(struct state *s)
#endif
if ((viraddr >= task->start_heap) && (viraddr <= task->end_heap) && (viraddr > KERNEL_SPACE)) {
viraddr = viraddr & 0xFFFFF000;
viraddr = viraddr & PAGE_MASK;
phyaddr = get_page();
if (BUILTIN_EXPECT(!phyaddr, 0))
@ -648,13 +655,19 @@ static void pagefault_handler(struct state *s)
// does our SVM system need to handle this page fault?
index1 = viraddr >> 22;
index2 = (viraddr >> 12) & 0x3FF;
if (!pgd || !(pgd->entries[index1] & 0xFFFFF000))
if (!pgd || !(pgd->entries[index1] & PAGE_MASK))
goto default_handler;
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & 0xFFFFF000);
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
if (!pgt || !(pgt->entries[index2]))
goto default_handler;
if (pgt->entries[index2] & PG_SVM_INIT) {
if (BUILTIN_EXPECT(!svm_alloc_page(viraddr, pgt), 1))
return;
else
goto default_handler;
}
if (pgt->entries[index2] & PG_SVM_STRONG)
if (!svm_access_request(viraddr))
if (BUILTIN_EXPECT(!svm_access_request(viraddr), 1))
return;
#endif
@ -678,12 +691,7 @@ int arch_paging_init(void)
irq_install_handler(14, pagefault_handler);
// Create a page table to reference to the other page tables
pgt = (page_table_t*) get_page();
if (!pgt) {
kputs("arch_paging_init: Not enough memory!\n");
return -ENOMEM;
}
memset(pgt, 0, PAGE_SIZE);
pgt = &pgt_container;
// map this table at the end of the kernel space
viraddr = KERNEL_SPACE - PAGE_SIZE;
@ -691,21 +699,16 @@ int arch_paging_init(void)
index2 = (viraddr >> 12) & 0x3FF;
// now, we create a self reference
per_core(current_task)->pgd->entries[index1] = (((size_t) pgt) & 0xFFFFF000)|KERN_TABLE;
per_core(current_task)->pgd->entries[index1] = (((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
pgt->entries[index2] = ((size_t) pgt & 0xFFFFF000)|KERN_PAGE;
// create the other PGTs for the kernel space
for(i=0; i<KERNEL_SPACE/(1024*PAGE_SIZE)-1; i++) {
size_t phyaddr = get_page();
size_t phyaddr = boot_pgt+i;
if (!phyaddr) {
kputs("arch_paging_init: Not enough memory!\n");
return -ENOMEM;
}
memset((void*) phyaddr, 0, PAGE_SIZE);
per_core(current_task)->pgd->entries[i] = (phyaddr & 0xFFFFF000)|KERN_TABLE;
pgt->entries[i] = (phyaddr & 0xFFFFF000)|KERN_PAGE;
memset((void*) phyaddr, 0x00, sizeof(page_table_t));
per_core(current_task)->pgd->entries[i] = (phyaddr & PAGE_MASK)|KERN_TABLE;
pgt->entries[i] = (phyaddr & PAGE_MASK)|KERN_PAGE;
}
/*
@ -735,7 +738,7 @@ int arch_paging_init(void)
* of course, mb_info has to map into the kernel space
*/
if (mb_info)
map_region((size_t) mb_info & 0xFFFFF000, (size_t) mb_info & 0xFFFFF000, 1, MAP_KERNEL_SPACE);
map_region((size_t) mb_info & PAGE_MASK, (size_t) mb_info & PAGE_MASK, 1, MAP_KERNEL_SPACE);
#if 0
/*
@ -762,19 +765,20 @@ int arch_paging_init(void)
* Therefore, we map these moduels into the kernel space.
*/
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
multiboot_module_t* mmodule = (multiboot_module_t*) mb_info->mods_addr;
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
npages = mb_info->mods_count * sizeof(multiboot_module_t) >> PAGE_SHIFT;
if (mb_info->mods_count * sizeof(multiboot_module_t) & (PAGE_SIZE-1))
npages++;
map_region((size_t) (mb_info->mods_addr), (size_t) (mb_info->mods_addr), npages, MAP_KERNEL_SPACE);
map_region((size_t) mb_info->mods_addr, (size_t) mb_info->mods_addr, npages, MAP_KERNEL_SPACE);
for(i=0; i<mb_info->mods_count; i++, mmodule++) {
// map physical address to the same virtual address
npages = (mmodule->mod_end - mmodule->mod_start) >> PAGE_SHIFT;
if (mmodule->mod_end & (PAGE_SIZE-1))
npages++;
map_region((size_t) (mmodule->mod_start), (size_t) (mmodule->mod_start), npages, MAP_KERNEL_SPACE);
kprintf("Map module %s at 0x%x (%u pages)\n", (char*) mmodule->cmdline, mmodule->mod_start, npages);
map_region((size_t) mmodule->mod_start, (size_t) mmodule->mod_start, npages, MAP_KERNEL_SPACE);
}
}
#endif
@ -816,9 +820,9 @@ int arch_paging_init(void)
/*
* we turned on paging
* => now, we are able to register our task for Task State Switching
* => now, we are able to register our task
*/
register_task(per_core(current_task));
register_task();
// APIC registers into the kernel address space
map_apic();

646
arch/x86/mm/page64.c Normal file
View file

@ -0,0 +1,646 @@
/*
* Copyright 2012 Stefan Lankes, Chair for Operating Systems,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of MetalSVM.
*/
#include <metalsvm/stddef.h>
#include <metalsvm/stdio.h>
#include <metalsvm/stdlib.h>
#include <metalsvm/mmu.h>
#include <metalsvm/vma.h>
#include <metalsvm/string.h>
#include <metalsvm/page.h>
#include <metalsvm/spinlock.h>
#include <metalsvm/processor.h>
#include <metalsvm/tasks.h>
#include <metalsvm/errno.h>
#include <asm/irq.h>
#include <asm/multiboot.h>
#include <asm/apic.h>
#ifdef CONFIG_ROCKCREEK
#include <asm/RCCE_lib.h>
#include <asm/SCC_API.h>
#include <asm/svm.h>
#include <asm/icc.h>
#endif
/*
* Virtual Memory Layout of the standard configuration
* (1 GB kernel space)
*
* 0x00000000 - 0x000FFFFF: reserved for IO devices (16MB)
* 0x00100000 - 0x0DEADFFF: Kernel (size depends on the configuration) (221MB)
* 0x0DEAE000 - 0x3FFFFFFF: Kernel heap
*
*/
/*
* Note that linker symbols are not variables, they have no memory allocated for
* maintaining a value, rather their address is their value.
*/
extern const void kernel_start;
extern const void kernel_end;
// boot task's page directory and page directory lock
extern page_dir_t boot_pgd;
static spinlock_t kslock = SPINLOCK_INIT;
static int paging_enabled = 0;
page_dir_t* get_boot_pgd(void)
{
return &boot_pgd;
}
int create_pgd(task_t* task, int copy)
{
// Currently, we support only kernel tasks
// => all tasks are able to use the same pgd
if (BUILTIN_EXPECT(!paging_enabled, 0))
return -EINVAL;
task->pgd = get_boot_pgd();
return 0;
}
/*
* drops all page frames and the PGD of a user task
*/
int drop_pgd(void)
{
#if 0
page_dir_t* pgd = per_core(current_task)->pgd;
size_t phy_pgd = virt_to_phys((size_t) pgd);
task_t* task = per_core(current_task);
uint32_t i;
if (BUILTIN_EXPECT(pgd == &boot_pgd, 0))
return -EINVAL;
spinlock_lock(&task->pgd_lock);
for(i=0; i<1024; i++) {
if (pgd->entries[i] & PG_USER) {
put_page(pgd->entries[i] & PAGE_MASK);
pgd->entries[i] = 0;
}
}
// freeing the page directory
put_page(phy_pgd);
task->pgd = NULL;
spinlock_unlock(&task->pgd_lock);
#endif
return 0;
}
size_t virt_to_phys(size_t viraddr)
{
task_t* task = per_core(current_task);
uint16_t idx_pd4 = (viraddr >> 39) & 0x1FF;
uint16_t idx_dirp = (viraddr >> 30) & 0x1FF;
uint16_t idx_dir = (viraddr >> 21) & 0x1FF;
uint16_t idx_table = (viraddr >> 12) & 0x1FF;
page_table_t* pgt;
size_t ret = 0;
if (!paging_enabled)
return viraddr;
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
return 0;
spinlock_lock(&task->pgd_lock);
// Currently, we allocate pages only in kernel space.
// => physical address of the page table is identical of the virtual address
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
if (!pgt)
goto out;
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
if (!pgt)
goto out;
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
if (!pgt)
goto out;
ret = (size_t) (pgt->entries[idx_table] & PAGE_MASK);
if (!ret)
goto out;
ret = ret | (viraddr & 0xFFF); // add page offset
out:
//kprintf("vir %p to phy %p\n", viraddr, ret);
spinlock_unlock(&task->pgd_lock);
return ret;
}
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
{
task_t* task = per_core(current_task);
spinlock_t* pgd_lock;
page_table_t* pgt;
size_t i, ret;
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
return 0;
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
return 0;
if (flags & MAP_KERNEL_SPACE)
pgd_lock = &kslock;
else
pgd_lock = &task->pgd_lock;
spinlock_lock(pgd_lock);
if (!viraddr) {
viraddr = vm_alloc(npages, flags);
if (BUILTIN_EXPECT(!viraddr, 0)) {
spinlock_unlock(pgd_lock);
kputs("map_region: found no valid virtual address\n");
return 0;
}
}
ret = viraddr;
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE, phyaddr+=PAGE_SIZE) {
uint16_t idx_pd4 = (viraddr >> 39) & 0x1FF;
uint16_t idx_dirp = (viraddr >> 30) & 0x1FF;
uint16_t idx_dir = (viraddr >> 21) & 0x1FF;
uint16_t idx_table = (viraddr >> 12) & 0x1FF;
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
if (!pgt) {
spinlock_unlock(pgd_lock);
kputs("map_region: out of memory\n");
return 0;
}
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
if (!pgt) {
spinlock_unlock(pgd_lock);
kputs("map_region: out of memory\n");
return 0;
}
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
if (!pgt) {
spinlock_unlock(pgd_lock);
kputs("map_region: out of memory\n");
return 0;
}
/* convert physical address to virtual */
// Currently, we allocate pages only in kernel space.
// => physical address of the page table is identical of the virtual address
//if (paging_enabled)
// pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
if (pgt->entries[idx_table] && !(flags & MAP_REMAP)) {
spinlock_unlock(pgd_lock);
kprintf("0x%x is already mapped\n", viraddr);
return 0;
}
if (flags & MAP_USER_SPACE)
pgt->entries[idx_table] = USER_PAGE|(phyaddr & PAGE_MASK);
else
pgt->entries[idx_table] = KERN_PAGE|(phyaddr & PAGE_MASK);
if (flags & MAP_NO_CACHE)
pgt->entries[idx_table] |= PG_PCD;
if (flags & MAP_NO_ACCESS)
pgt->entries[idx_table] &= ~PG_PRESENT;
if (flags & MAP_WT)
pgt->entries[idx_table] |= PG_PWT;
if (flags & MAP_USER_SPACE)
atomic_int32_inc(&task->user_usage);
tlb_flush_one_page(viraddr);
}
spinlock_unlock(pgd_lock);
return ret;
}
int change_page_permissions(size_t start, size_t end, uint32_t flags)
{
#if 0
uint32_t index1, index2, newflags;
size_t viraddr = start & PAGE_MASK;
size_t phyaddr;
page_table_t* pgt;
page_dir_t* pgd;
task_t* task = per_core(current_task);
if (BUILTIN_EXPECT(!paging_enabled, 0))
return -EINVAL;
pgd = per_core(current_task)->pgd;
if (BUILTIN_EXPECT(!pgd, 0))
return -EINVAL;
spinlock_lock(&task->pgd_lock);
while (viraddr < end)
{
index1 = viraddr >> 22;
index2 = (viraddr >> 12) & 0x3FF;
while ((viraddr < end) && (index2 < 1024)) {
pgt = (page_table_t*) (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
if (pgt && pgt->entries[index2]) {
phyaddr = pgt->entries[index2] & PAGE_MASK;
newflags = pgt->entries[index2] & 0xFFF; // get old flags
if (!(newflags & PG_SVM_INIT)) {
if ((newflags & PG_SVM_STRONG) && !(newflags & PG_PRESENT) && (flags & (VMA_READ|VMA_WRITE) && !(flags & VMA_NOACCESS)))
newflags |= PG_PRESENT;
else if ((newflags & PG_SVM_STRONG) && (newflags & PG_PRESENT) && (flags & VMA_NOACCESS))
newflags &= ~PG_PRESENT;
}
// update flags
if (!(flags & VMA_WRITE)) {
newflags &= ~PG_RW;
#ifdef CONFIG_ROCKCREEK
if (newflags & (PG_SVM_STRONG|PG_SVM_LAZYRELEASE))
newflags &= ~PG_MPE;
#endif
} else {
newflags |= PG_RW;
#ifdef CONFIG_ROCKCREEK
if (newflags & (PG_SVM_STRONG|PG_SVM_LAZYRELEASE))
newflags |= PG_MPE;
#endif
}
pgt->entries[index2] = (newflags & 0xFFF) | (phyaddr & PAGE_MASK);
tlb_flush_one_page(viraddr);
}
index2++;
viraddr += PAGE_SIZE;
}
}
spinlock_unlock(&task->pgd_lock);
#endif
return -EINVAL;
}
/*
* Use the first fit algorithm to find a valid address range
*
* TODO: O(n) => bad performance, we need a better approach
*/
size_t vm_alloc(uint32_t npages, uint32_t flags)
{
task_t* task = per_core(current_task);
spinlock_t* pgd_lock;
size_t viraddr, i, j, ret = 0;
size_t start, end;
page_table_t* pgt;
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
return 0;
if (flags & MAP_KERNEL_SPACE) {
pgd_lock = &kslock;
start = (((size_t) &kernel_end) + 10*PAGE_SIZE) & PAGE_MASK;
end = (KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK;
} else {
pgd_lock = &task->pgd_lock;
start = KERNEL_SPACE & PAGE_MASK;
end = PAGE_MASK;
}
if (BUILTIN_EXPECT(!npages, 0))
return 0;
spinlock_lock(pgd_lock);
viraddr = i = start;
j = 0;
do {
uint16_t idx_pd4 = (viraddr >> 39) & 0x1FF;
uint16_t idx_dirp = (viraddr >> 30) & 0x1FF;
uint16_t idx_dir = (viraddr >> 21) & 0x1FF;
uint16_t idx_table = (viraddr >> 12) & 0x1FF;
// Currently, we allocate pages only in kernel space.
// => physical address of the page table is identical of the virtual address
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
if (!pgt) {
i += (size_t)PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
j += PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES;
continue;
}
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
if (!pgt) {
i += PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
j += PGT_ENTRIES*PGT_ENTRIES;
continue;
}
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
if (!pgt) {
i += PGT_ENTRIES*PAGE_SIZE;
j += PGT_ENTRIES;
continue;
}
if (!(pgt->entries[idx_table])) {
i += PAGE_SIZE;
j++;
} else {
// restart search
j = 0;
viraddr = i + PAGE_SIZE;
i = i + PAGE_SIZE;
}
} while((j < npages) && (i<=end));
if ((j >= npages) && (viraddr < end))
ret = viraddr;
spinlock_unlock(pgd_lock);
return ret;
}
int unmap_region(size_t viraddr, uint32_t npages)
{
task_t* task = per_core(current_task);
spinlock_t* pgd_lock;
page_table_t* pgt;
size_t i;
uint16_t idx_pd4, idx_dirp;
uint16_t idx_dir, idx_table;
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
return -EINVAL;
if (viraddr <= KERNEL_SPACE)
pgd_lock = &kslock;
else
pgd_lock = &task->pgd_lock;
spinlock_lock(pgd_lock);
i = 0;
while(i<npages)
{
idx_pd4 = (viraddr >> 39) & 0x1FF;
idx_dirp = (viraddr >> 30) & 0x1FF;
idx_dir = (viraddr >> 21) & 0x1FF;
idx_table = (viraddr >> 12) & 0x1FF;
// Currently, we allocate pages only in kernel space.
// => physical address of the page table is identical of the virtual address
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
if (!pgt) {
viraddr += (size_t) PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
i += PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES;
continue;
}
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
if (!pgt) {
viraddr += PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
i += PGT_ENTRIES*PGT_ENTRIES;
continue;
}
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
if (!pgt) {
viraddr += PGT_ENTRIES*PAGE_SIZE;
i += PGT_ENTRIES;
continue;
}
if (pgt->entries[idx_table])
pgt->entries[idx_table] &= ~PG_PRESENT;
viraddr +=PAGE_SIZE;
i++;
if (viraddr > KERNEL_SPACE)
atomic_int32_dec(&task->user_usage);
tlb_flush_one_page(viraddr);
}
spinlock_unlock(pgd_lock);
return 0;
}
int vm_free(size_t viraddr, uint32_t npages)
{
task_t* task = per_core(current_task);
spinlock_t* pgd_lock;
page_table_t* pgt;
size_t i;
uint16_t idx_pd4, idx_dirp;
uint16_t idx_dir, idx_table;
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
return -EINVAL;
if (viraddr <= KERNEL_SPACE)
pgd_lock = &kslock;
else
pgd_lock = &task->pgd_lock;
spinlock_lock(pgd_lock);
i = 0;
while(i<npages)
{
idx_pd4 = (viraddr >> 39) & 0x1FF;
idx_dirp = (viraddr >> 30) & 0x1FF;
idx_dir = (viraddr >> 21) & 0x1FF;
idx_table = (viraddr >> 12) & 0x1FF;
// Currently, we allocate pages only in kernel space.
// => physical address of the page table is identical of the virtual address
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
if (!pgt) {
viraddr += (size_t) PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
i += PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES;
continue;
}
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
if (!pgt) {
viraddr += PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
i += PGT_ENTRIES*PGT_ENTRIES;
continue;
}
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
if (!pgt) {
viraddr += PGT_ENTRIES*PAGE_SIZE;
i += PGT_ENTRIES;
continue;
}
if (pgt->entries[idx_table])
pgt->entries[idx_table] = 0;
viraddr +=PAGE_SIZE;
i++;
tlb_flush_one_page(viraddr);
}
spinlock_unlock(pgd_lock);
return 0;
}
static void pagefault_handler(struct state *s)
{
task_t* task = per_core(current_task);
//page_dir_t* pgd = task->pgd;
//page_table_t* pgt = NULL;
size_t viraddr = read_cr2();
//size_t phyaddr;
#if 0
if ((viraddr >= task->start_heap) && (viraddr <= task->end_heap) && (viraddr > KERNEL_SPACE)) {
viraddr = viraddr & PAGE_MASK;
phyaddr = get_page();
if (BUILTIN_EXPECT(!phyaddr, 0))
goto default_handler;
if (map_region(viraddr, phyaddr, 1, MAP_USER_SPACE) == viraddr) {
memset((void*) viraddr, 0x00, PAGE_SIZE);
return;
}
kprintf("Could not map 0x%x at 0x%x\n", phyaddr, viraddr);
put_page(phyaddr);
}
#endif
default_handler:
kprintf("PAGE FAULT: Task %u got page fault at %p (irq %llu, cs:rip 0x%llx:0x%llx)\n", task->id, viraddr, s->int_no, s->cs, s->rip);
kprintf("Register state: rax = 0x%llx, rbx = 0x%llx, rcx = 0x%llx, rdx = 0x%llx, rdi = 0x%llx, rsi = 0x%llx, rbp = 0x%llx, rsp = 0x%llx\n",
s->rax, s->rbx, s->rcx, s->rdx, s->rdi, s->rsi, s->rbp, s->rsp);
while(1);
irq_enable();
abort();
}
int arch_paging_init(void)
{
uint32_t i, npages;
// uninstall default handler and install our own
irq_uninstall_handler(14);
irq_install_handler(14, pagefault_handler);
// kernel is already maped into the kernel space (see entry64.asm)
// this includes .data, .bss, .text, video memory and the multiboot structure
#if MAX_CORES > 1
// Reserve page for smp boot code
if (!map_region(SMP_SETUP_ADDR, SMP_SETUP_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE)) {
kputs("could not reserve page for smp boot code\n");
return -ENOMEM;
}
#endif
#ifdef CONFIG_MULTIBOOT
#if 0
/*
* Map reserved memory regions into the kernel space
*/
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr;
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
while (mmap < mmap_end) {
if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE) {
npages = mmap->len / PAGE_SIZE;
if ((mmap->addr+mmap->len) % PAGE_SIZE)
npages++;
map_region(mmap->addr, mmap->addr, npages, MAP_KERNEL_SPACE|MAP_NO_CACHE);
}
mmap++;
}
}
#endif
/*
* Modules like the init ram disk are already loaded.
* Therefore, we map these moduels into the kernel space.
*/
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
npages = mb_info->mods_count * sizeof(multiboot_module_t) >> PAGE_SHIFT;
if (mb_info->mods_count * sizeof(multiboot_module_t) & (PAGE_SIZE-1))
npages++;
map_region((size_t) (mb_info->mods_addr), (size_t) (mb_info->mods_addr), npages, MAP_REMAP|MAP_KERNEL_SPACE);
for(i=0; i<mb_info->mods_count; i++, mmodule++) {
// map physical address to the same virtual address
npages = (mmodule->mod_end - mmodule->mod_start) >> PAGE_SHIFT;
if (mmodule->mod_end & (PAGE_SIZE-1))
npages++;
kprintf("Map module %s at 0x%x (%u pages)\n", (char*) mmodule->cmdline, mmodule->mod_start, npages);
map_region((size_t) (mmodule->mod_start), (size_t) (mmodule->mod_start), npages, MAP_REMAP|MAP_KERNEL_SPACE);
}
}
#endif
/* signalize that we are able to use paging */
paging_enabled = 1;
/*
* we turned on paging
* => now, we are able to register our task
*/
register_task();
// APIC registers into the kernel address space
map_apic();
return 0;
}

View file

@ -26,6 +26,7 @@
#include <metalsvm/errno.h>
#include <asm/irqflags.h>
#include <asm/processor.h>
#include <asm/page.h>
#ifdef CONFIG_ROCKCREEK
#include <asm/RCCE.h>
#include <asm/RCCE_lib.h>
@ -34,11 +35,60 @@
#include <asm/icc.h>
#include <asm/svm.h>
#define SHARED_PAGES (RCCE_SHM_SIZE_MAX >> PAGE_SHIFT)
#define USE_PERFCOUNTERS 1
#define USE_RESP_MAIL 1
#define SHARED_PAGES (4*(RCCE_SHM_SIZE_MAX >> PAGE_SHIFT))
#define OWNER_SIZE ((SHARED_PAGES * sizeof(uint8_t) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
#define AIREG1 0
#define AIREG2 (AIREG1 + 1)
#define LOCK_ID 0
#define ABS(a) (((a) < 0) ? -(a) : (a))
t_vcharp RC_SHM_BUFFER_START();
typedef struct {
int counter;
int initializer;
} atomic_increg_t;
static volatile atomic_increg_t *incregs = NULL;
static RCCE_FLAG release;
/*
* Details on L2 cache (nedded for flushing)
*/
#define OWN_MPB 0xd8000000
#define L2_LINESIZE 32UL
#define L2_WAYS 4UL
#define L2_CAPACITY (256*1024UL)
#define L2_WBSTRIDE (L2_CAPACITY/L2_WAYS)
#ifdef SVM_WB
/* Helper function to read data into all 4 ways of L2 cache */
__attribute__((always_inline)) static inline void svm_purge_set(const size_t set)
{
register char tmp;
/* Translate the set to a kernel space virtual address */
const volatile char* dummyData = (volatile char*)set;
/* Now read new data into all four ways, and then reread the first */
tmp = *dummyData;
tmp = *(dummyData + L2_WBSTRIDE);
tmp = *(dummyData + L2_WBSTRIDE * 2);
tmp = *(dummyData + L2_WBSTRIDE * 3);
}
static size_t dummy_base = OWN_MPB + L2_CAPACITY;
static size_t dummy_offset = 0;
#endif
/*
* This array describes the owner of a specific page.
* Only the owner of a page is able to change the possession.
@ -48,61 +98,198 @@ static volatile uint8_t* page_owner = NULL;
// helper array to convert a physical to a virtual address
static size_t phys2virt[SHARED_PAGES] = {[0 ... SHARED_PAGES-1] = 0};
static size_t shmbegin = 0;
static const size_t shmbegin = SHM_ADDR;
static uint32_t emit[RCCE_MAXNP] = {[0 ... RCCE_MAXNP-1] = 0};
static uint32_t request[RCCE_MAXNP] = {[0 ... RCCE_MAXNP-1] = 0};
static uint32_t forward[RCCE_MAXNP] = {[0 ... RCCE_MAXNP-1] = 0};
static uint32_t alloc_page = 0;
static uint32_t map_page = 0;
#if USE_PERFCOUNTERS
static uint64_t alloc_ticks = 0;
static uint64_t request_ticks = 0;
static uint64_t emit_ticks = 0;
static uint64_t wait_ticks = 0;
static uint64_t max_wait = 0;
static uint64_t min_wait = (uint64_t) -1;
#endif
int svm_init(void)
{
size_t phyaddr;
uint32_t flags;
// iRCCE is not thread save => disable interrupts
flags = irq_nested_disable();
shmbegin = (size_t)RC_SHM_BUFFER_START();
phyaddr = (size_t) RCCE_shmalloc(OWNER_SIZE);
irq_nested_enable(flags);
if (BUILTIN_EXPECT(!phyaddr, 0))
return -ENOMEM;
if (BUILTIN_EXPECT(phyaddr & 0xFFF, 0)) {
kprintf("RCCE_shmalloc returns not a page aligned physiacl address: 0x%x\n", phyaddr);
return -ENOMEM;
}
uint32_t i, flags;
kprintf("Shared memory starts at the physical address 0x%x\n", shmbegin);
page_owner = (uint8_t*) map_region(0, phyaddr, OWNER_SIZE >> PAGE_SHIFT, MAP_KERNEL_SPACE|MAP_NO_CACHE);
if (BUILTIN_EXPECT(!page_owner, 0)) {
flags = irq_nested_disable();
RCCE_shfree((t_vcharp) phyaddr);
irq_nested_enable(flags);
page_owner = (uint8_t*) map_region(0, shmbegin, OWNER_SIZE >> PAGE_SHIFT, MAP_KERNEL_SPACE|MAP_NO_CACHE);
if (BUILTIN_EXPECT(!page_owner, 0))
return -ENOMEM;
if (!RCCE_IAM) {
memset((void*)page_owner, 0xFF, OWNER_SIZE);
// owner vector is owned by core 0
for(i=0; i<(OWNER_SIZE >> PAGE_SHIFT); i++)
page_owner[i] = 0;
}
// per default is core 0 owner
if (!RCCE_IAM)
memset((void*)page_owner, 0x00, OWNER_SIZE);
// initialize svm barrier
incregs = (volatile atomic_increg_t*) map_region(0, 0xF900E000, 2, MAP_KERNEL_SPACE|MAP_NO_CACHE);
if (BUILTIN_EXPECT(!incregs, 0))
return -ENOMEM;
kprintf("Map atomic counters at 0x%x\n", incregs);
if (!RCCE_IAM) {
incregs[AIREG1].initializer = 0;
incregs[AIREG2].initializer = 0;
}
// iRCCE is not thread save => disable interrupts
flags = irq_nested_disable();
RCCE_barrier(&RCCE_COMM_WORLD);
RCCE_flag_alloc(&release);
irq_nested_enable(flags);
RCCE_barrier(&RCCE_COMM_WORLD);
return 0;
}
static size_t get_shpages(uint32_t n)
{
int x = X_PID(RC_MY_COREID);
int y = Y_PID(RC_MY_COREID);
size_t i, j = 0, k = 0, start = SHM_X0_Y0;
int diff, min = x + y;
diff = ABS(5 - x) + ABS(0 - y);
if (diff < min) {
min = diff;
start = SHM_X5_Y0;
}
diff = ABS(0 - x) + ABS(2 - y);
if (diff < min) {
min = diff;
start = SHM_X0_Y2;
}
diff = ABS(5 - x) + ABS(2 - y);
if (diff < min) {
min = diff;
start = SHM_X5_Y2;
}
for(i=0; (i < SHARED_PAGES) && (k < n); i++) {
k = 0;
j = (((start - shmbegin) >> PAGE_SHIFT) + i) % SHARED_PAGES;
while((k<n) && (i < SHARED_PAGES) && (page_owner[j+k] >= RCCE_MAXNP)) {
k++; i++;
}
}
if (BUILTIN_EXPECT(i >= SHARED_PAGES, 0))
return 0;
memset((void*) (page_owner+j), RCCE_IAM, sizeof(uint8_t)*n);
return shmbegin + (j << PAGE_SHIFT);
}
size_t shmalloc(uint32_t n)
{
size_t ret;
RCCE_acquire_lock(RC_COREID[LOCK_ID]);
ret = get_shpages(n);
RCCE_release_lock(RC_COREID[LOCK_ID]);
return ret;
}
/*
* This function is called by the pagefault handler
* => the interrupt flags is already cleared
*/
int svm_alloc_page(size_t addr, page_table_t* pgt)
{
#if USE_PERFCOUNTERS
uint64_t start = rdtsc();
#endif
uint32_t index2 = (addr >> 12) & 0x3FF;
size_t phyaddr;
t_vcharp mpb = (t_vcharp) ((size_t)(virt_to_phys(addr) >> PAGE_SHIFT) | ((size_t) RCCE_comm_buffer[RCCE_IAM] - RCCE_LINE_SIZE));
uint16_t offset = 0xFFFF;
addr &= PAGE_MASK; // align address to the page boundary
RCCE_acquire_lock(RC_COREID[LOCK_ID]);
iRCCE_get((t_vcharp) &offset, mpb, sizeof(uint16_t), RCCE_IAM);
if (!offset) {
int i;
phyaddr = get_shpages(1);
offset = (uint16_t) ((phyaddr - shmbegin) >> PAGE_SHIFT);
for(i=0; i<RCCE_NP; i++)
iRCCE_put(mpb, (t_vcharp) &offset, sizeof(uint16_t), i);
RCCE_release_lock(RC_COREID[LOCK_ID]);
pgt->entries[index2] &= 0xFFF;
pgt->entries[index2] &= ~PG_SVM_INIT;
pgt->entries[index2] |= phyaddr|PG_PRESENT;
phys2virt[(phyaddr - shmbegin) >> PAGE_SHIFT] = addr;
tlb_flush_one_page(addr);
alloc_page++;
//kprintf("map new page frame 0x%x at 0x%x, flags0x%x, offset 0x%x, mpb 0x%x\n", phyaddr, addr, pgt->entries[index2] & 0xFFF, (int) offset, mpb);
#if USE_PERFCOUNTERS
alloc_ticks += rdtsc() - start;
#endif
return 0;
} else {
RCCE_release_lock(RC_COREID[LOCK_ID]);
phyaddr = shmbegin + ((size_t)offset << PAGE_SHIFT);
pgt->entries[index2] &= 0xFFF;
pgt->entries[index2] &= ~PG_SVM_INIT;
if (pgt->entries[index2] & PG_SVM_LAZYRELEASE)
pgt->entries[index2] |= phyaddr|PG_PRESENT;
else
pgt->entries[index2] |= phyaddr;
phys2virt[(phyaddr - shmbegin) >> PAGE_SHIFT] = addr;
tlb_flush_one_page(addr);
map_page++;
//kprintf("map existing page frame 0x%x at 0x%x, offset 0x%x, mpb 0x%x\n", phyaddr, addr, offset, mpb);
#if USE_PERFCOUNTERS
alloc_ticks += rdtsc() - start;
#endif
if (pgt->entries[index2] & PG_SVM_LAZYRELEASE)
return 0;
if (pgt->entries[index2] & PG_RW)
return svm_access_request(addr);
return 0;
}
}
/*
* This function is called by the pagefault handler
* => the interrupt flags is already cleared
*/
int svm_access_request(size_t addr)
{
#if USE_PERFCOUNTERS
uint64_t start = rdtsc();
#endif
size_t phyaddr = virt_to_phys(addr);
uint32_t pageid;
int remote_rank;
uint8_t payload[iRCCE_MAIL_HEADER_PAYLOAD];
int ret;
if (phyaddr < shmbegin)
return -EINVAL;
@ -110,40 +297,74 @@ int svm_access_request(size_t addr)
return -EINVAL;
pageid = (phyaddr-shmbegin) >> PAGE_SHIFT;
if (page_owner[pageid] == RCCE_IAM)
remote_rank = page_owner[pageid];
if (remote_rank == RCCE_IAM)
return 0;
remote_rank = page_owner[pageid];
((size_t*) payload)[0] = RCCE_IAM;
((size_t*) payload)[1] = phyaddr;
//kprintf("send request (0x%x) to %d\n", addr, remote_rank);
/* send ping request */
iRCCE_mail_send(2*sizeof(size_t), SVM_REQUEST, 0, payload, remote_rank);
iRCCE_mail_send(2*sizeof(size_t), SVM_REQ, 0, (char*) payload, remote_rank);
NOP4;
icc_send_gic_irq(remote_rank);
request[remote_rank]++;
icc_send_gic_irq(remote_rank);
#if USE_RESP_MAIL
#if USE_PERFCOUNTERS
uint64_t wait_start = rdtsc();
#endif
// wait for response
icc_wait(SVM_RESP);
#if USE_PERFCOUNTERS
uint64_t res = rdtsc() - wait_start;
wait_ticks += res;
if (min_wait > res)
min_wait = res;
if (max_wait < res)
max_wait = res;
#endif
#else
NOP8;
while (page_owner[pageid] != RCCE_IAM) {
icc_mail_check();
NOP8;
}
#endif
return change_page_permissions(addr, addr+PAGE_SIZE, VMA_READ|VMA_WRITE|VMA_CACHEABLE);
addr &= PAGE_MASK; // align address to page boundary
ret = change_page_permissions(addr, addr + PAGE_SIZE, VMA_READ|VMA_WRITE|VMA_CACHEABLE);
#if USE_PERFCOUNTERS
request_ticks += rdtsc() - start;
#endif
return ret;
}
static atomic_int32_t size_counter = ATOMIC_INIT(0);
//static atomic_int32_t size_counter = ATOMIC_INIT(0);
void* svmmalloc(size_t size, uint32_t consistency)
void* svm_malloc(size_t size, uint32_t consistency)
{
size_t phyaddr, viraddr, i;
size_t viraddr, phyaddr, i, j;
t_vcharp mpb_addr;
uint32_t flags;
uint32_t map_flags = MAP_KERNEL_SPACE|MAP_MPE;
task_t* task = per_core(current_task);
uint32_t map_flags = MAP_KERNEL_SPACE|MAP_SVM_INIT;
uint8_t buffer[RCCE_LINE_SIZE]= {[0 ... RCCE_LINE_SIZE-1] = 0};
if(!(consistency & SVM_L2))
map_flags |= MAP_MPE;
else
task->flags |= TASK_L2;
if (consistency & SVM_STRONG)
map_flags |= MAP_SVM_STRONG;
else if (consistency & SVM_LAZYRELEASE)
map_flags |= MAP_SVM_LAZYRELEASE;
else return 0;
else return NULL;
// currently, we allocate memory in page size granulation
size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
@ -197,7 +418,8 @@ void* svmmalloc(size_t size, uint32_t consistency)
kprintf("shmmalloc returns 0x%x\n", viraddr);
return (void*) viraddr;
#else
#endif
#if 0
// iRCCE is not thread save => disable interrupts
flags = irq_nested_disable();
phyaddr = (size_t) RCCE_shmalloc(size);
@ -209,7 +431,7 @@ void* svmmalloc(size_t size, uint32_t consistency)
if (BUILTIN_EXPECT(!phyaddr, 0))
return NULL;
if (BUILTIN_EXPECT(phyaddr & 0xFFF, 0)) {
kprintf("RCCE_shmalloc returns not a page aligned physiacl address: 0x%x\n", phyaddr);
kprintf("RCCE_shmalloc returns not a page aligned physical address: 0x%x\n", phyaddr);
return NULL;
}
@ -221,16 +443,58 @@ void* svmmalloc(size_t size, uint32_t consistency)
return (void*) viraddr;
#endif
map_flags |= MAP_NO_ACCESS;
#ifndef SVM_WB
map_flags |= MAP_MPE;
#endif
viraddr = map_region(0, 0, size >> PAGE_SHIFT, map_flags);
kprintf("svmmalloc: viraddr 0x%x, size 0x%x, flags 0x%x\n", viraddr, size, map_flags);
map_flags |= MAP_REMAP;
for(i=0, j=0, mpb_addr=0; i<size; i+=PAGE_SIZE, j++) {
if (j % (RCCE_LINE_SIZE/sizeof(uint16_t)) == 0) {
flags = irq_nested_disable();
mpb_addr = RCCE_malloc(RCCE_LINE_SIZE);
if (BUILTIN_EXPECT(!mpb_addr, 0)) {
irq_nested_enable(flags);
kputs("RCCE_malloc failed\n");
goto out;
}
//kprintf("mpb_addr 0x%x\n", mpb_addr);
iRCCE_put(mpb_addr, buffer, RCCE_LINE_SIZE, RCCE_IAM);
irq_nested_enable(flags);
}
phyaddr = (size_t)mpb_addr + (j % (RCCE_LINE_SIZE/sizeof(uint16_t))) * sizeof(uint16_t);
phyaddr <<= PAGE_SHIFT;
//kprintf("viraddr 0x%x, phyaddr 0x%x, flags 0x%x\n", viraddr+i, phyaddr, map_flags);
map_region(viraddr+i, phyaddr, 1, map_flags);
}
return (void*) viraddr;
out:
// TODO: error handling
return NULL;
}
void svmfree(void* addr, size_t size)
void svm_free(void* addr, size_t size)
{
size_t phyaddr, i;
uint32_t flags;
//size_t phyaddr, i;
//uint32_t flags;
if (BUILTIN_EXPECT(!addr || !size, 0))
return;
// TODO: free is currently not implemented
return;
#if 0
phyaddr = virt_to_phys((size_t) addr);
// currently, we allocate memory in page size granulation
@ -246,6 +510,7 @@ void svmfree(void* addr, size_t size)
flags = irq_nested_disable();
RCCE_shfree((t_vcharp) phyaddr);
irq_nested_enable(flags);
#endif
}
/*
@ -253,8 +518,12 @@ void svmfree(void* addr, size_t size)
* => Interrupt flag is alread cleared.
*/
int svm_emit_page(size_t phyaddr, int ue)
{
{
#if USE_PERFCOUNTERS
uint64_t start = rdtsc();
#endif
uint32_t pageid;
int remote_rank;
//kprintf("Try to emit page 0x%x to %d\n", phyaddr, ue);
@ -264,19 +533,18 @@ int svm_emit_page(size_t phyaddr, int ue)
return -EINVAL;
pageid = (phyaddr-shmbegin) >> PAGE_SHIFT;
if (page_owner[pageid] != RCCE_IAM) {
remote_rank = page_owner[pageid];
if (remote_rank != RCCE_IAM) {
// Core is nor owner => forward request to new owner
int remote_rank;
uint8_t payload[iRCCE_MAIL_HEADER_PAYLOAD];
kprintf("Ups, core %d is not owner of page 0x%x\n", RCCE_IAM, phyaddr);
remote_rank = page_owner[pageid];
((size_t*) payload)[0] = ue;
((size_t*) payload)[1] = phyaddr;
/* send ping request */
iRCCE_mail_send(2*sizeof(size_t), SVM_REQUEST, 0, payload, remote_rank);
iRCCE_mail_send(2*sizeof(size_t), SVM_REQ, 0, (char*)payload, remote_rank);
/* send interrupt */
icc_send_gic_irq(remote_rank);
@ -284,18 +552,28 @@ int svm_emit_page(size_t phyaddr, int ue)
} else {
size_t viraddr;
svm_flush();
page_owner[pageid] = ue;
svm_flush(phyaddr);
#if USE_RESP_MAIL
// send response back to ue
// ue is polling for the response => no irq is needed
iRCCE_mail_send(0, SVM_RESP, 0, NULL, ue);
#endif
emit[ue]++;
viraddr = phys2virt[(phyaddr - shmbegin) >> PAGE_SHIFT];
page_owner[pageid] = ue;
viraddr = phys2virt[pageid];
change_page_permissions(viraddr, viraddr+PAGE_SIZE, VMA_NOACCESS|VMA_READ|VMA_CACHEABLE);
}
#if USE_PERFCOUNTERS
emit_ticks += rdtsc() - start;
#endif
return 0;
}
#ifdef SVM_WB
#if 0
void svm_flush(void)
{
int z, tmp;
@ -319,6 +597,141 @@ void svm_flush(void)
}
#endif
/*
* Function to flush one page or entire cache.
*/
#ifdef SVM_WB
void svm_invalidate(void)
{
task_t* task = per_core(current_task);
if(task->flags & TASK_L2) {
asm volatile ( ".byte 0x0f; .byte 0x0a;\n" ); // CL1FLUSHMB
} else {
/* no action needed svm_flush already invalidates cache */
return;
}
}
void svm_flush(size_t phyaddr)
{
task_t* task = per_core(current_task);
page_dir_t* pgd = task->pgd;
page_table_t* pgt = NULL;
size_t step = 0;
size_t stride = L2_LINESIZE;
size_t range = L2_WBSTRIDE;
size_t viraddr;
uint32_t index1, index2;
uint32_t flags;
/* flush entire Cache if phyaddr == 0 */
if(!phyaddr) {
if( task->flags & TASK_L2 ){
goto flush_l2;
} else {
goto flush_l1;
}
/* flush one page */
} else {
/* align the address to page boundaries */
phyaddr &= ~(PAGE_SIZE-1);
/* lookup pgt to check if L2 is enabled */
viraddr = phys2virt[(phyaddr - shmbegin) >> PAGE_SHIFT];
index1 = viraddr >> 22;
index2 = (viraddr >> 12) & 0x3FF;
/* check if pgt is present */
if (!pgd || !(pgd->entries[index1] & PAGE_MASK))
goto wrong_addr;
pgt = (page_table_t*)((KERNEL_SPACE - 1024 * PAGE_SIZE + index1 * PAGE_SIZE) & PAGE_MASK);
if( pgt->entries[index2] & PG_MPE ) {
goto flush_l1;
} else {
phyaddr = phyaddr % L2_WBSTRIDE;
range = PAGE_SIZE;
goto flush_l2;
}
}
/*
* FLUSH L1 CACHE:
*/
flush_l1:
kputs("flush L1\n");
*(int *)RCCE_fool_write_combine_buffer = 1;
//__asm__ volatile ( "wbinvd;\n\t" );
flush_cache();
return;
flush_l2:
/*
* FLUSH L2 CACHE:
* disable iterrupts due to pseudo LRU behavior of L2 cache
*/
flags = irq_nested_disable();
/* toggle between dummy areas */
phyaddr += dummy_base + dummy_offset;
kprintf("flush-l2: phyaddr 0x%x\n", phyaddr);
if(dummy_offset)
dummy_offset = 0;
else
dummy_offset = L2_CAPACITY;
flush_cache();
for( step = 0; step < range; step += stride )
svm_purge_set( phyaddr + step );
irq_nested_enable(flags);
return;
wrong_addr:
kputs("svm flush error: address not valid!\n");
return;
}
#endif
int svm_barrier(uint32_t flags)
{
int i;
RCCE_COMM *comm = &RCCE_COMM_WORLD;
static int index = 0;
if (flags & SVM_LAZYRELEASE) {
svm_flush(0);
svm_invalidate();
}
#if 1
// Lubachevsky barrier with flags
index = !index;
if (incregs[AIREG1].counter > (comm->size - 2)) {
incregs[AIREG1].initializer = 0;
while(incregs[AIREG1].initializer);
for (i = 0; i < comm->size; i++)
RCCE_flag_write(&release, index, comm->member[i]);
} else RCCE_wait_until(release, index);
#else
RCCE_barrier(&RCCE_COMM_WORLD);
#endif
return 0;
}
//extern uint64_t check_ticks;
//extern uint64_t recv_ticks;
int svm_statistics(void)
{
uint32_t i;
@ -333,6 +746,18 @@ int svm_statistics(void)
for(i=0; i<RCCE_MAXNP; i++)
kprintf("\t%u", forward[i]);
kputs("\n");
kprintf("allocate page frame: %u\n", alloc_page);
kprintf("map page frame: %d\n", map_page);
#if USE_PERFCOUNTERS
kprintf("alloc ticks: %llu\n", alloc_ticks);
kprintf("request ticks: %llu\n", request_ticks);
kprintf("wait ticks: %llu\n", wait_ticks);
kprintf("emit ticks: %llu\n", emit_ticks);
kprintf("max wait: %llu\n", max_wait);
kprintf("min wait: %llu\n", min_wait);
//kprintf("check_ticks: %llu\n", check_ticks);
//kprintf("recv_tick: %llu\n", recv_ticks);
#endif
return 0;
}

View file

@ -1,4 +1,4 @@
C_source := icc.c SCC_API.c iRCCE_admin.c iRCCE_send.c iRCCE_isend.c iRCCE_irecv.c iRCCE_recv.c iRCCE_get.c iRCCE_put.c iRCCE_synch.c iRCCE_mailbox.c RCCE_malloc.c RCCE_shmalloc.c RCCE_debug.c RCCE_qsort.c RCCE_DCMflush.c RCCE_send.c RCCE_recv.c RCCE_flags.c RCCE_comm.c RCCE_put.c RCCE_get.c RCCE_synch.c RCCE_bcast.c RCCE_admin.c # RCCE_power_management.c
C_source := icc.c SCC_API.c iRCCE_admin.c iRCCE_send.c iRCCE_isend.c iRCCE_irecv.c iRCCE_recv.c iRCCE_get.c iRCCE_put.c iRCCE_synch.c iRCCE_mailbox.c RCCE_malloc.c RCCE_debug.c RCCE_qsort.c RCCE_send.c RCCE_recv.c RCCE_flags.c RCCE_comm.c RCCE_put.c RCCE_get.c RCCE_synch.c RCCE_bcast.c RCCE_reduce.c RCCE_admin.c # RCCE_shmalloc.c RCCE_DCMflush.c RCCE_power_management.c
ASM_source :=
MODULE := arch_x86_scc

View file

@ -370,6 +370,9 @@ int RCCE_init(
// initialize RCCE_malloc
RCCE_malloc_init(RCCE_comm_buffer[RCCE_IAM],RCCE_BUFF_SIZE);
// MetalSVM has its own system to manage the shared regions
#if 0
#ifdef SHMADD
RCCE_shmalloc_init(RC_SHM_BUFFER_START()+RCCE_SHM_BUFFER_offset, RCCE_SHM_SIZE_MAX);
@ -379,6 +382,7 @@ int RCCE_init(
#endif
#else
RCCE_shmalloc_init(RC_SHM_BUFFER_START(), RCCE_SHM_SIZE_MAX);
#endif
#endif
// initialize the (global) flag bookkeeping data structure

View file

@ -27,12 +27,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "RCCE_lib.h"
#include <asm/RCCE_lib.h>
#define MIN(x,y) ( (x) < (y) ? (x) : (y) )
#define MAX(x,y) ( (x) > (y) ? (x) : (y) )
#include <stdlib.h>
#include <string.h>
#include <metalsvm/stdlib.h>
#include <metalsvm/string.h>
#ifdef CONFIG_ROCKCREEK
//--------------------------------------------------------------------------------------
// FUNCTION: RCCE_reduce_general
@ -185,3 +187,4 @@ int RCCE_reduce(
RCCE_reduce_general(inbuf, outbuf, num, type, op, root, all, comm)));
}
#endif

View file

@ -104,8 +104,7 @@ static int iRCCE_mail_fetch(
if( iRCCE_mail_garbage.first == NULL ) {
iRCCE_mail_garbage.last = NULL;
}
}
else {
} else {
header = (iRCCE_MAIL_HEADER*)kmalloc(sizeof(iRCCE_MAIL_HEADER));
}
@ -120,8 +119,7 @@ static int iRCCE_mail_fetch(
iRCCE_last_mail[rank] = 1;
iRCCE_mailbox_close_one( rank, 0 ); // we can close respective mailbox
iRCCE_mail_release( &header );
}
else {
} else {
// check mail priority
int prio = header->prio;
@ -144,77 +142,63 @@ static int iRCCE_mail_fetch(
}
//------------------------------------------------------------------------------
// FUNCTION: iRCCE_mailbox_check
//------------------------------------------------------------------------------
/**
* @brief routine to check for new mail in mailboxes
*
* This function has to be called from time to time. It empties all mailboxes of
* the participating cores if the corresponding sent-flag is set and the mailbox
* is not closed. After calling iRCCE_mail_fetch the sent-flag has to be reset.
* Here we have to use a little trick because we can only write to the MPB in
* cacheline granularity. We set the appropriate flag to zero and afterwords
* touch the MPB on another cacheline. That causes the write combine buffer to
* write out the data.
*/
//------------------------------------------------------------------------------
static iRCCE_MAIL_HEADER dummy_header = {0, 0, 0, NULL, 0, 0, 0, {[0 ... iRCCE_MAIL_HEADER_PAYLOAD-1] = 0} };
static int iRCCE_mailbox_check(void) {
int i,j;
uint32_t flags;
/* disable interrupts */
flags = irq_nested_disable();
for( j=1; j<RCCE_NP; ++j ) {
i = (j+RCCE_IAM)%RCCE_NP;
// only check open mailboxes
if( iRCCE_mailbox_status[i] == iRCCE_MAILBOX_OPEN ) {
RC_cache_invalidate();
if( iRCCE_mailbox_recv[i]->sent ) {
iRCCE_mail_fetch(i);
// reset senders flag
RC_cache_invalidate();
*(iRCCE_mailbox_recv[i]) = dummy_header;
}
}
}
/* enable interrupts */
irq_nested_enable(flags);
return iRCCE_SUCCESS;
}
//------------------------------------------------------------------------------
// FUNCTION: iRCCE_mail_check
//------------------------------------------------------------------------------
/**
* @brief routine to check one specific mailbox
* @param sender is the core ID from which the mailbox is checked
* @brief routine to check one specific mailbox or all
* @param sender is the core ID from which the mailbox is checked use
* iRCCE_MAILBOX_ALL as wildcard to check all mailboxes
*
* This function may be called by the user application to check one specific
* mailbox. It is recommended to use it in combination with an inter core
* interrupt.
* mailbox. It is recommended to use it in combination with an inter-core
* interrupt. It empties one or all mailboxes of the participating cores if the
* corresponding sent-flag is set and the mailbox is not closed. After calling
* iRCCE_mail_fetch the sent-flag has to be reset. Here we have to use a little
* trick because we can only write to the MPB in cacheline granularity. We set
* the appropriate flag to zero and afterwords touch the MPB on another
* cacheline. That causes the write combine buffer to write out the data.
*
*/
//------------------------------------------------------------------------------
const static iRCCE_MAIL_HEADER dummy_header =
{0, 0, 0, NULL, 0, 0, 0, {[0 ... iRCCE_MAIL_HEADER_PAYLOAD-1] = 0} };
int iRCCE_mail_check(int sender) {
uint32_t flags;
int j, i;
int found = 0;
// check all mailboxes in case of wildcard
if( sender == iRCCE_MAILBOX_ALL ) {
iRCCE_mailbox_check();
/* disable interrupts */
flags = irq_nested_disable();
return iRCCE_SUCCESS;
for( j=1; j<RCCE_NP; ++j ) {
i = (j+RCCE_IAM)%RCCE_NP;
// only check open mailboxes
if( iRCCE_mailbox_status[i] == iRCCE_MAILBOX_OPEN ) {
RC_cache_invalidate();
if( iRCCE_mailbox_recv[i]->sent ) {
if( !found ) found = 1;
iRCCE_mail_fetch(i);
// reset senders flag
RC_cache_invalidate();
*(iRCCE_mailbox_recv[i]) = dummy_header;
}
}
}
/* enable interrupts */
irq_nested_enable(flags);
return (found == 1)? iRCCE_SUCCESS : iRCCE_MAILBOX_EMPTY;
}
// verify sender's ID
if( (sender < 0) || (sender > RCCE_NP) || (sender == RCCE_IAM) ) {
if(BUILTIN_EXPECT((sender < 0) || (sender > RCCE_NP) || (sender == RCCE_IAM), 0)) {
return iRCCE_ERROR_SOURCE;
}
@ -223,26 +207,28 @@ int iRCCE_mail_check(int sender) {
return iRCCE_MAILBOX_CLOSED;
}
RC_cache_invalidate();
if( iRCCE_mailbox_recv[sender]->sent ) {
/* disable interrupts */
flags = irq_nested_disable();
iRCCE_mail_fetch(sender);
// reset senders flag
for(i=0; i<5; i++) {
RC_cache_invalidate();
*(iRCCE_mailbox_recv[sender]) = dummy_header;
if( iRCCE_mailbox_recv[sender]->sent ) {
/* disable interrupts */
flags = irq_nested_disable();
iRCCE_mail_fetch(sender);
// reset senders flag
RC_cache_invalidate();
*(iRCCE_mailbox_recv[sender]) = dummy_header;
/* enable interrupts */
irq_nested_enable(flags);
/* enable interrupts */
irq_nested_enable(flags);
return iRCCE_SUCCESS;
}
else {
return iRCCE_MAILBOX_EMPTY;
return iRCCE_SUCCESS;
}
NOP8;
}
return iRCCE_MAILBOX_EMPTY;
}
//------------------------------------------------------------------------------
@ -256,25 +242,28 @@ int iRCCE_mail_check(int sender) {
*
* The function checks if the receive queue with highest priority (priority 0)
* contains any mail headers. In this case we pop the first element of that list
* in a FIFO maner. Otherwise iRCCE_mailbox_check() has to be called. Afterwards
* the first element of a non-empty receive queue with highest priority is
* returned.
* in a FIFO maner. Afterwards the first element of a non-empty receive queue
* with highest priority is returned.
*/
//------------------------------------------------------------------------------
int iRCCE_mail_recv(
iRCCE_MAIL_HEADER** header // pointer to incoming header
) { // (memory allocated by iRCCE)
int i;
int i, found = 0;
uint32_t flags;
iRCCE_MAIL_HEADER* help_header;
// if no mail queued pointer must be ZERO
*header = NULL;
/* disable interrupts */
flags = irq_nested_disable();
// check priority queues
for( i=0; i<iRCCE_PRIOS; ++i ) {
if ( iRCCE_mailbox_recv_queue[i].first ) {
/* disable interrupts */
flags = irq_nested_disable();
if ( iRCCE_mailbox_recv_queue[i].first ) {
help_header = iRCCE_mailbox_recv_queue[i].first;
iRCCE_mailbox_recv_queue[i].first =
@ -283,23 +272,21 @@ int iRCCE_mail_recv(
if( iRCCE_mailbox_recv_queue[i].first == NULL ) {
iRCCE_mailbox_recv_queue[i].last = NULL;
}
/* prepare return value */
help_header->next = NULL;
*header = help_header;
/* enable interrupts */
irq_nested_enable(flags);
return iRCCE_SUCCESS;
found = 1;
break;
}
}
// no mail queued
*header = NULL;
return iRCCE_MAILBOX_EMPTY;
/* enable interrupts */
irq_nested_enable(flags);
return (found == 1)? iRCCE_SUCCESS : iRCCE_MAILBOX_EMPTY;
}
@ -385,27 +372,35 @@ int iRCCE_mail_send(
uint32_t flags;
// verify sender's ID
if( (dest < 0) || (dest > RCCE_NP) || (dest == RCCE_IAM) ) {
if(BUILTIN_EXPECT((dest < 0) || (dest > RCCE_NP) || (dest == RCCE_IAM),0)) {
return iRCCE_ERROR_TARGET;
}
// if dest mailbox is full, check for incoming mail
RC_cache_invalidate();
while( iRCCE_mailbox_send[dest]->sent ) {
iRCCE_mailbox_check();
// iRCCE_mail_check(iRCCE_MAILBOX_ALL);
RC_cache_invalidate();
NOP8;
NOP8;
NOP8;
}
/* disable interrupts */
flags = irq_nested_disable();
// check if mailbox is closed
RCCE_acquire_lock( dest );
RC_cache_invalidate();
if( iRCCE_mailbox_send[dest]->closed ) {
RCCE_release_lock( dest );
/* enable interrupts */
irq_nested_enable(flags);
return iRCCE_MAILBOX_CLOSED;
}
/* disable interrupts */
// flags = irq_nested_disable();
// prepare header
iRCCE_MAIL_HEADER header = { RCCE_IAM, size, tag, NULL, prio,
RCCE_FLAG_UNSET, RCCE_FLAG_UNSET,
@ -427,11 +422,11 @@ int iRCCE_mail_send(
*(int *)RCCE_fool_write_combine_buffer = 1;
RC_cache_invalidate();
/* enable interrupts */
// irq_nested_enable(flags);
RCCE_release_lock( dest );
/* enable interrupts */
irq_nested_enable(flags);
return iRCCE_SUCCESS;
}
@ -476,7 +471,7 @@ int iRCCE_last_mail_recv(void) {
//------------------------------------------------------------------------------
int iRCCE_mailbox_wait(void) {
while( iRCCE_last_mail_recv() == iRCCE_LAST_MAILS_NOT_RECV ) {
iRCCE_mailbox_check();
iRCCE_mail_check(iRCCE_MAILBOX_ALL);
}
return iRCCE_SUCCESS;
@ -533,7 +528,7 @@ int iRCCE_mailbox_flush(void) {
* last-mail.
*
* This function closes a mailbox of the given rank. If the check flag is set
* an iRCCE_mailbox_check()-call is performed. The close procedure has to be
* an iRCCE_mail_check()-call is performed. The close procedure has to be
* locked to be sure that no UE sends any mail while closing the mailbox.
*/
//------------------------------------------------------------------------------

View file

@ -26,6 +26,7 @@
#include <asm/SCC_API.h>
#include <asm/icc.h>
#include <asm/svm.h>
#include <asm/limits.h>
#define IRQ_STATUS 0xD000
#define IRQ_MASK 0xD200
@ -35,7 +36,6 @@
#include <net/rckemac.h>
bootinfo_t* bootinfo = (bootinfo_t*) SCC_BOOTINFO;
/* PSE bit for Pentium+ equals MPE (message buffer enable) flag in RCK! So, use it to create _PAGE_MPB symbol... */
@ -97,9 +97,11 @@ static inline void icc_mail_check_tag(iRCCE_MAIL_HEADER* mail) {
case PING_REQ:
iRCCE_mail_send(0, PING_RESP, 0, NULL, mail->source);
break;
case SVM_REQUEST:
case SVM_REQ:
svm_emit_page(((size_t*) mail->payload)[1], ((size_t*) mail->payload)[0]);
break;
case SVM_RESP:
break;
case NOISE:
// kprintf( "XXX " );
default:
@ -162,9 +164,6 @@ static void icc_handler(struct state *s)
while( iRCCE_mail_recv(&header) == iRCCE_SUCCESS ) {
icc_mail_check_tag(header);
iRCCE_mail_release(&header);
NOP8;
NOP8;
NOP8;
}
}
@ -297,9 +296,9 @@ int icc_halt(void)
return 0;
}
#define ROUNDS 1000
#define CORE_A 0 // sender
#define CORE_B 1 // receiver
#define ROUNDS 20000
#define CORE_A RC_RCCEID[0] // sender
#define CORE_B RC_RCCEID[30] // receiver
int icc_send_gic_irq(int core_num) {
volatile uint32_t* irq_request = (volatile uint32_t*)(FPGA_BASE+IRQ_REQUEST+RC_MY_COREID*8);
@ -347,7 +346,7 @@ int icc_mail_ping(void)
/* wait for response */
do {
res = iRCCE_mail_check(CORE_B);
res = iRCCE_mail_check(iRCCE_MAILBOX_ALL); //CORE_B);
} while( res != iRCCE_SUCCESS );
/* release mail */
@ -358,7 +357,7 @@ int icc_mail_ping(void)
else {
/* wait for request */
do {
res = iRCCE_mail_check(CORE_A);
res = iRCCE_mail_check(iRCCE_MAILBOX_ALL); //CORE_A);
} while( res != iRCCE_SUCCESS );
/* check mail */
@ -434,21 +433,92 @@ int icc_mail_ping_irq(void)
kprintf( "timer = %d\n", timer );
kprintf( "mail_pingpong needs in average %d nsec (%d ticks)!\n",
timer*1000/(2*ROUNDS*get_cpu_frequency()), timer/(2*ROUNDS) );
irq_nested_enable(flags);
return 0;
}
#define _iRQ_NOISE_ 0
int icc_mail_ping_jitter(void)
{
kprintf( "Hello from jitter_test ... \n" );
/* return if not core A */
if( RCCE_IAM != CORE_A ) return 0;
uint32_t flags;
uint64_t timer = 0;
uint64_t max = 0;
uint64_t min = ULONG_MAX;
uint64_t sum = 0;
int i;
int res;
iRCCE_MAIL_HEADER* recv_header = NULL;
kprintf( "my_rank = %d\n", RCCE_IAM );
kprintf( "rem_rank = %d\n", CORE_B );
kprintf( "rounds = %d\n", ROUNDS );
// disable interrupts
flags = irq_nested_disable();
for( i=0; i<ROUNDS+1; ++i ) {
/* start timer */
timer = rdtsc();
/* send ping request */
iRCCE_mail_send(0, PING_REQ, 0, NULL, CORE_B);
/* send interrupt */
icc_send_gic_irq(CORE_B);
/* wait for response */
do {
res = iRCCE_mail_check(CORE_B);
} while( res != iRCCE_SUCCESS );
iRCCE_mail_recv(&recv_header);
iRCCE_mail_release(&recv_header);
/* stop timer and update eval values */
timer = rdtsc() - timer;
if( i > 0 ) {
max = ( max < timer )? timer : max;
min = ( min > timer )? timer : min;
sum += timer;
}
}
kprintf( "Average was: %d nsec\n", sum*1000/(2*ROUNDS*533) );
kprintf( "Maximum was: %d nsec\n", max*1000/(2*533) );
kprintf( "Minimum was: %d nsec\n", min*1000/(2*533) );
kprintf( "Jitter was: %d nsec\n", (max-min)*1000/(2*533) );
irq_nested_enable(flags);
return 0;
}
#undef _IRQ_NOISE_
#define NOISE_PRIO 1
int icc_mail_noise(void) {
int i, j, res;
int num_ranks = RCCE_num_ues();
int count = 0;
iRCCE_MAIL_HEADER* recv_mail = NULL;
/* timer vars */
uint64_t timer;
uint64_t tmr;
uint64_t tmr_send = 0;
uint64_t tmr_recv = 0;
uint64_t tmr_release = 0;
uint64_t tmr_chck = 0;
kprintf( "my_ue = %d\n", RCCE_IAM );
// leave function if not participating
if( !((RCCE_IAM == 4) || (RCCE_IAM == 2) || (RCCE_IAM == CORE_B)) ) {
if( (RCCE_IAM == CORE_A) || (RCCE_IAM == CORE_B) ) {
kprintf( "mail_noise: leaving" );
return -1;
}
@ -456,46 +526,84 @@ int icc_mail_noise(void) {
kprintf( "Hello from icc_mail_noise: my_ue = %d\n", RCCE_IAM );
kprintf( "num_ues = %d\n", num_ranks );
for( i=0; i<10000; ++i ) {
timer = rdtsc();
for( i=0; i<40000; ++i ) {
if( !(i%1000) ) kprintf( "%d ", i );
tmr = rdtsc();
iRCCE_mail_check(iRCCE_MAILBOX_ALL);
tmr = rdtsc() - tmr;
tmr_chck += tmr;
/* send a mail to each UE */
for( j=0; j<num_ranks; ++j ) {
if( !((j == 4) || (j == 2)/* || (j == CORE_B) */) )
continue;
if( (j == CORE_A) || (j == CORE_B) ) continue;
/* send noise mail */
iRCCE_mail_send(0, NOISE, 1, NULL, j);
#ifdef _IRQ_NOISE_
kprintf( "sending irq ... " );
icc_send_gic_irq(j);
#endif
iRCCE_mail_recv(&recv_mail);
icc_mail_check_tag(recv_mail);
if( recv_mail ) iRCCE_mail_release(&recv_mail);
tmr = rdtsc();
iRCCE_mail_send(0, NOISE, NOISE_PRIO, NULL, j);
tmr = rdtsc() - tmr;
tmr_send += tmr;
tmr = rdtsc();
res = iRCCE_mail_recv(&recv_mail);
tmr = rdtsc() - tmr;
tmr_recv += tmr;
if( res == iRCCE_SUCCESS ) {
icc_mail_check_tag(recv_mail);
tmr = rdtsc();
iRCCE_mail_release(&recv_mail);
tmr = rdtsc() - tmr;
tmr_release += tmr;
count++;
}
}
}
kprintf( "XXX XXX XXX" );
do {
tmr = rdtsc();
iRCCE_mail_check(iRCCE_MAILBOX_ALL);
tmr = rdtsc() - tmr;
tmr_chck += tmr;
tmr = rdtsc();
res = iRCCE_mail_recv(&recv_mail);
icc_mail_check_tag(recv_mail);
if( recv_mail ) iRCCE_mail_release(&recv_mail);
tmr = rdtsc() - tmr;
tmr_recv += tmr;
if( res == iRCCE_SUCCESS ) {
icc_mail_check_tag(recv_mail);
tmr = rdtsc();
iRCCE_mail_release(&recv_mail);
tmr = rdtsc() - tmr;
tmr_release += tmr;
count++;
}
} while( res == iRCCE_SUCCESS );
timer = rdtsc() - timer;
kprintf( "Count = %d\n", count );
kprintf( "Time: %d ms\n", timer/(1000*get_cpu_frequency()) );
kprintf( "Time in send: %d ms\n", tmr_send/(1000*get_cpu_frequency()) );
kprintf( "Time in recv: %d ms\n", tmr_recv/(1000*get_cpu_frequency()) );
kprintf( "Time in chck: %d ms\n", tmr_chck/(1000*get_cpu_frequency()) );
kprintf( "Time in release: %d ms\n", tmr_release/(1000*get_cpu_frequency()) );
kprintf( "XXX XXX XXX" );
return 0;
}
/*
* Routine to check mailboxes. If irq = 1 is passed only those boxes are checked that
* refere to the cores with set bit in status register.
*
* Routine to check mailboxes.
*/
void icc_mail_check(void)
{
iRCCE_MAIL_HEADER* header = NULL;
iRCCE_MAIL_HEADER* header = NULL;
uint32_t flags;
/* disable interrupts */
@ -507,13 +615,45 @@ void icc_mail_check(void)
while( iRCCE_mail_recv(&header) == iRCCE_SUCCESS ) {
icc_mail_check_tag(header);
iRCCE_mail_release( &header );
NOP8;
NOP8;
NOP8;
}
/* enable interrupts */
irq_nested_enable(flags);
}
//uint64_t check_ticks = 0;
//uint64_t recv_ticks = 0;
void icc_wait(int tag)
{
iRCCE_MAIL_HEADER* header = NULL;
uint32_t flags;
//uint64_t start;
/* disable interrupts */
flags = irq_nested_disable();
retry:
//start = rdtsc();
iRCCE_mail_check(iRCCE_MAILBOX_ALL);
//check_ticks += rdtsc() - start;
//start = rdtsc();
/* empty mail queue */
while(iRCCE_mail_recv(&header) == iRCCE_SUCCESS ) {
icc_mail_check_tag(header);
if (header->tag == tag) {
iRCCE_mail_release( &header );
goto out;
} else iRCCE_mail_release( &header );
}
//recv_ticks += rdtsc() - start;
goto retry;
out:
//recv_ticks += rdtsc() - start;
/* enable interrupts */
irq_nested_enable(flags);
}
#endif

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.6 KiB

View file

@ -14,7 +14,7 @@
*
* The MetalSVM project is hosted in a Git repository. To check it out, just type:
*
* \verbatim$ git clone gitosis@git.lfbs.rwth-aachen.de:metalsvm.git \endverbatim
* \verbatim$ git clone git://git.lfbs.rwth-aachen.de:metalsvm.git \endverbatim
*
* If you are asked for a password you are not authorized to clone the repository. In this case you will need to get your public SSH key authorized.
*
@ -25,7 +25,7 @@
* \verbatim
$ cd MetalSVM
$ cp Makefile.example Makefile
$ cp include/metalsvm/config.h.example include/metalsvm/config.h \endverbatim
$ (cd include/metalsvm; cp config.h.example config.h) \endverbatim
*
* The standard configuration works on usual PC hardware configurations as well as in emulators.
*
@ -90,20 +90,8 @@ $ make SCC \endverbatim
* @section runsccmc Running MetalSVM on multiple SCC cores
*
* -# Build the kernel like described above (items 1-7) and change to the \c tools directory.
* -# The \c scc_bootinfo.asm file contains boot-information relevant to the SCC-cores.
* It is generated automatically by the \c bootinfo.sh script.\n
* \n
* The following example generates the \c scc_bootinfo.asm file needed for use of the cores 0 and 1:
* \verbatim$ ./bootinfo.sh 0x01000000 initrd.img 2 533 0 1 > scc_bootinfo.asm \endverbatim
* Parameters describe the following:
* -# First parameter describes the address at which the initrd shall be located at later (You will not need to change this: 0x00100000)
* -# Second is path to the initrd image file
* -# The other parameters are analogous to RCCE-App-parameters. This example starts MetalSVM on cores 0 and 1, clocked with 533MHz.
* -# Now the file \c metalsvm.mt has to be edited. It defines the layout of the memory image (Where the kernels will be located in the memory later). For the example from above it looks like the following:
* \verbatim# pid mch-route mch-dest-id mch-offset-base testcase
0x00 0x00 6 0x00 metalsvm.obj
0x01 0x00 6 0x01 metalsvm.obj \endverbatim
* This locates two instances of MetalSVM on core 0 and 1, supplied with memory from memory controller 0. See \c sccMerge \c -h for more information.
* -# Now the file \c metalsvm.mt can be edited, depending on how many cores you want MetalSVM running.
* Just remove the cores which shall be unaffected. Having a slim \c metalsvm.tm accelerates the build procedure.
* -# The final image must be generated then with \code$ make SCC\endcode
* -# A directory \c obj was created, containing the final MetalSVM Image. This image can now be loaded with the following command: \code$ sccBoot -g obj\endcode
* -# Everything has been placed in the cores' memory. To release the reset pins of the corresponding cores, type \code$ sccReset -r 0x00 0x01\endcode

View file

@ -19,7 +19,7 @@ create_kernel_task (&id, initd, NULL, NORMAL_PRIO);
...\endcode
*
* \c Initd starts all the other processes and then exits.
* The list of processes to start is defined in \c kernel/tests.c
* The list of processes to start is defined in \c apps/tests.c
* within \c test_init():
*
* \code

View file

@ -1,9 +1,13 @@
<!-- start footer part -->
<!--BEGIN GENERATE_TREEVIEW-->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
$navpath
<li class="footer">$generatedby
<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/></a> $doxygenversion </li>
</ul>
</div>
<img class="footer" src="$relpath$doxygen.png" alt="doxygen"/></a> $doxygenversion </li>
</ul>
</div>
<!--END GENERATE_TREEVIEW-->
<!--BEGIN !GENERATE_TREEVIEW-->
<hr class="footer"/><address class="footer"><small>

View file

@ -2,16 +2,19 @@
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
<link href="$relpath$tabs.css" rel="stylesheet" type="text/css"/>
<link href="stylesheet.css" rel="stylesheet" type="text/css" />
<script type="text/javascript" src="$relpath$jquery.js"></script>
<script type="text/javascript" src="$relpath$dynsections.js"></script>
$treeview
$search
$mathjax
<link href="stylesheet.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="top"><!-- do not remove this div! -->
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<!--BEGIN TITLEAREA-->
<div id="titlearea">
@ -20,15 +23,14 @@ $mathjax
<tr style="height: 56px;">
<!--BEGIN PROJECT_LOGO-->
<td id="projectlogo">
<div id="lfbslogo"><img src="../img/lfbs_logo.gif" alt="Lehrstuhl f&uuml;r Betriebssysteme" /></div>
<div id="ostrichlogo"><img src="../img/mike_ostrich.jpg" alt="Mike Ostrich, MetalSVM's official mascot" /></div>
<div id="lfbslogo"><img src="../img/lfbs_logo.gif" alt="Chair for Operating Systems" /></div>
<div id="rwthlogo"><img src="../img/rwth_logo.gif" alt="RWTH Aachen University" /></div>
</td>
<!--END PROJECT_LOGO-->
<!--BEGIN PROJECT_NAME-->
<td style="padding-left: 0.5em;">
<div id="projectname">$projectname
<!--BEGIN PROJECT_NUMBER-->&#160;<span id="projectnumber">$projectnumber</span><!--END PROJECT_NUMBER-->
</div>
<div id="projectname">$projectname</div>
<!--BEGIN PROJECT_BRIEF--><div id="projectbrief">$projectbrief</div><!--END PROJECT_BRIEF-->
</td>
<!--END PROJECT_NAME-->
@ -49,3 +51,4 @@ $mathjax
</table>
</div>
<!--END TITLEAREA-->
<!-- end header part -->

View file

@ -2,24 +2,22 @@
<!-- Navigation index tabs for HTML output -->
<navindex>
<tab type="mainpage" visible="yes" title=""/>
<tab type="pages" visible="yes" title="Manuals"
intro="This page contains manual articles to help using MetalSVM:"/>
<tab type="pages" visible="yes" title="" intro=""/>
<tab type="modules" visible="yes" title="" intro=""/>
<tab type="namespaces" visible="yes" title="">
<tab type="namespaces" visible="yes" title="" intro=""/>
<tab type="namespacelist" visible="yes" title="" intro=""/>
<tab type="namespacemembers" visible="yes" title="" intro=""/>
</tab>
<tab type="classes" visible="yes" title="">
<tab type="classes" visible="yes" title="" intro=""/>
<tab type="classlist" visible="yes" title="" intro=""/>
<tab type="classindex" visible="$ALPHABETICAL_INDEX" title=""/>
<tab type="hierarchy" visible="yes" title="" intro=""/>
<tab type="classmembers" visible="yes" title="" intro=""/>
</tab>
<tab type="files" visible="yes" title="">
<tab type="files" visible="yes" title="" intro=""/>
<tab type="filelist" visible="yes" title="" intro=""/>
<tab type="globals" visible="yes" title="" intro=""/>
</tab>
<tab type="dirs" visible="yes" title="" intro=""/>
<tab type="examples" visible="yes" title="" intro=""/>
</navindex>
@ -135,11 +133,11 @@
<briefdescription visible="yes"/>
<groupgraph visible="$GROUP_GRAPHS"/>
<memberdecl>
<classes visible="yes" title=""/>
<namespaces visible="yes" title=""/>
<dirs visible="yes" title=""/>
<nestedgroups visible="yes" title=""/>
<dirs visible="yes" title=""/>
<files visible="yes" title=""/>
<namespaces visible="yes" title=""/>
<classes visible="yes" title=""/>
<defines title=""/>
<typedefs title=""/>
<enums title=""/>

View file

@ -2,7 +2,8 @@
body, table, div, p, dl {
font-family: Lucida Grande, Verdana, Geneva, Arial, sans-serif;
font-size: 12px;
font-size: 13px;
line-height: 1.3;
}
/* @group Heading Levels */
@ -25,6 +26,19 @@ h3 {
font-size: 100%;
}
h1, h2, h3, h4, h5, h6 {
-webkit-transition: text-shadow 0.5s linear;
-moz-transition: text-shadow 0.5s linear;
-ms-transition: text-shadow 0.5s linear;
-o-transition: text-shadow 0.5s linear;
transition: text-shadow 0.5s linear;
margin-right: 15px;
}
h1.glow, h2.glow, h3.glow, h4.glow, h5.glow, h6.glow {
text-shadow: 0 0 15px cyan;
}
dt {
font-weight: bold;
}
@ -121,12 +135,12 @@ a.el {
a.elRef {
}
a.code {
color: #4665A2;
a.code, a.code:visited {
color: #4665A2;
}
a.codeRef {
color: #4665A2;
a.codeRef, a.codeRef:visited {
color: #4665A2;
}
/* @end */
@ -135,20 +149,72 @@ dl.el {
margin-left: -1cm;
}
.fragment {
font-family: monospace, fixed;
font-size: 105%;
pre.fragment {
border: 1px solid #C4CFE5;
background-color: #FBFCFD;
padding: 4px 6px;
margin: 4px 8px 4px 2px;
overflow: auto;
word-wrap: break-word;
font-size: 9pt;
line-height: 125%;
font-family: monospace, fixed;
font-size: 105%;
}
pre.fragment {
border: 1px solid #C4CFE5;
div.fragment {
padding: 4px;
margin: 4px;
background-color: #FBFCFD;
padding: 4px 6px;
margin: 4px 8px 4px 2px;
overflow: auto;
word-wrap: break-word;
font-size: 9pt;
line-height: 125%;
border: 1px solid #C4CFE5;
}
div.line {
font-family: monospace, fixed;
font-size: 13px;
min-height: 13px;
line-height: 1.0;
text-wrap: unrestricted;
white-space: -moz-pre-wrap; /* Moz */
white-space: -pre-wrap; /* Opera 4-6 */
white-space: -o-pre-wrap; /* Opera 7 */
white-space: pre-wrap; /* CSS3 */
word-wrap: break-word; /* IE 5.5+ */
text-indent: -53px;
padding-left: 53px;
padding-bottom: 0px;
margin: 0px;
-webkit-transition-property: background-color, box-shadow;
-webkit-transition-duration: 0.5s;
-moz-transition-property: background-color, box-shadow;
-moz-transition-duration: 0.5s;
-ms-transition-property: background-color, box-shadow;
-ms-transition-duration: 0.5s;
-o-transition-property: background-color, box-shadow;
-o-transition-duration: 0.5s;
transition-property: background-color, box-shadow;
transition-duration: 0.5s;
}
div.line.glow {
background-color: cyan;
box-shadow: 0 0 10px cyan;
}
span.lineno {
padding-right: 4px;
text-align: right;
border-right: 2px solid #0F0;
background-color: #E8E8E8;
white-space: pre;
}
span.lineno a {
background-color: #D8D8D8;
}
span.lineno a:hover {
background-color: #C8C8C8;
}
div.ah {
@ -188,7 +254,7 @@ body {
div.contents {
margin-top: 10px;
margin-left: 8px;
margin-left: 12px;
margin-right: 8px;
}
@ -198,6 +264,8 @@ td.indexkey {
border: 1px solid #C4CFE5;
margin: 2px 0px 2px 0;
padding: 2px 10px;
white-space: nowrap;
vertical-align: top;
}
td.indexvalue {
@ -290,6 +358,13 @@ span.vhdllogic {
color: #ff0000
}
blockquote {
background-color: #F7F8FB;
border-left: 2px solid #9CAFD4;
margin: 0 24px 0 4px;
padding: 0 12px 0 16px;
}
/* @end */
/*
@ -343,6 +418,24 @@ table.memberdecls {
padding: 0px;
}
.memberdecls td {
-webkit-transition-property: background-color, box-shadow;
-webkit-transition-duration: 0.5s;
-moz-transition-property: background-color, box-shadow;
-moz-transition-duration: 0.5s;
-ms-transition-property: background-color, box-shadow;
-ms-transition-duration: 0.5s;
-o-transition-property: background-color, box-shadow;
-o-transition-duration: 0.5s;
transition-property: background-color, box-shadow;
transition-duration: 0.5s;
}
.memberdecls td.glow {
background-color: cyan;
box-shadow: 0 0 15px cyan;
}
.mdescLeft, .mdescRight,
.memItemLeft, .memItemRight,
.memTemplItemLeft, .memTemplItemRight, .memTemplParams {
@ -404,14 +497,28 @@ table.memberdecls {
padding: 0;
margin-bottom: 10px;
margin-right: 5px;
-webkit-transition: box-shadow 0.5s linear;
-moz-transition: box-shadow 0.5s linear;
-ms-transition: box-shadow 0.5s linear;
-o-transition: box-shadow 0.5s linear;
transition: box-shadow 0.5s linear;
display: table !important;
width: 100%;
}
.memitem.glow {
box-shadow: 0 0 15px cyan;
}
.memname {
white-space: nowrap;
font-weight: bold;
margin-left: 6px;
}
.memname td {
vertical-align: bottom;
}
.memproto, dl.reflist dt {
border-top: 1px solid #A8B8D9;
border-left: 1px solid #A8B8D9;
@ -420,21 +527,21 @@ table.memberdecls {
color: #253555;
font-weight: bold;
text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9);
/* opera specific markup */
box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
border-top-right-radius: 8px;
border-top-left-radius: 8px;
/* firefox specific markup */
-moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px;
-moz-border-radius-topright: 8px;
-moz-border-radius-topleft: 8px;
/* webkit specific markup */
-webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
-webkit-border-top-right-radius: 8px;
-webkit-border-top-left-radius: 8px;
background-image:url('nav_f.png');
background-repeat:repeat-x;
background-color: #E2E8F2;
/* opera specific markup */
box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
border-top-right-radius: 4px;
border-top-left-radius: 4px;
/* firefox specific markup */
-moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px;
-moz-border-radius-topright: 4px;
-moz-border-radius-topleft: 4px;
/* webkit specific markup */
-webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
-webkit-border-top-right-radius: 4px;
-webkit-border-top-left-radius: 4px;
}
@ -442,23 +549,24 @@ table.memberdecls {
border-bottom: 1px solid #A8B8D9;
border-left: 1px solid #A8B8D9;
border-right: 1px solid #A8B8D9;
padding: 2px 5px;
padding: 6px 10px 2px 10px;
background-color: #FBFCFD;
border-top-width: 0;
background-image:url('nav_g.png');
background-repeat:repeat-x;
background-color: #FFFFFF;
/* opera specific markup */
border-bottom-left-radius: 8px;
border-bottom-right-radius: 8px;
border-bottom-left-radius: 4px;
border-bottom-right-radius: 4px;
box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
/* firefox specific markup */
-moz-border-radius-bottomleft: 8px;
-moz-border-radius-bottomright: 8px;
-moz-border-radius-bottomleft: 4px;
-moz-border-radius-bottomright: 4px;
-moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px;
background-image: -moz-linear-gradient(center top, #FFFFFF 0%, #FFFFFF 60%, #F7F8FB 95%, #EEF1F7);
/* webkit specific markup */
-webkit-border-bottom-left-radius: 8px;
-webkit-border-bottom-right-radius: 8px;
-webkit-border-bottom-left-radius: 4px;
-webkit-border-bottom-right-radius: 4px;
-webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
background-image: -webkit-gradient(linear,center top,center bottom,from(#FFFFFF), color-stop(0.6,#FFFFFF), color-stop(0.60,#FFFFFF), color-stop(0.95,#F7F8FB), to(#EEF1F7));
}
dl.reflist dt {
@ -485,9 +593,13 @@ dl.reflist dd {
.paramname em {
font-style: normal;
}
.paramname code {
line-height: 14px;
}
.params, .retval, .exception, .tparams {
border-spacing: 6px 2px;
margin-left: 0px;
padding-left: 0px;
}
.params .paramname, .retval .paramname {
@ -505,105 +617,114 @@ dl.reflist dd {
vertical-align: top;
}
table.mlabels {
border-spacing: 0px;
}
td.mlabels-left {
width: 100%;
padding: 0px;
}
td.mlabels-right {
vertical-align: bottom;
padding: 0px;
white-space: nowrap;
}
span.mlabels {
margin-left: 8px;
}
span.mlabel {
background-color: #728DC1;
border-top:1px solid #5373B4;
border-left:1px solid #5373B4;
border-right:1px solid #C4CFE5;
border-bottom:1px solid #C4CFE5;
text-shadow: none;
color: white;
margin-right: 4px;
padding: 2px 3px;
border-radius: 3px;
font-size: 7pt;
white-space: nowrap;
}
/* @end */
/* @group Directory (tree) */
/* these are for tree view when not used as main index */
/* for the tree view */
.ftvtree {
font-family: sans-serif;
margin: 0px;
div.directory {
margin: 10px 0px;
border-top: 1px solid #A8B8D9;
border-bottom: 1px solid #A8B8D9;
width: 100%;
}
/* these are for tree view when used as main index */
.directory {
font-size: 9pt;
font-weight: bold;
margin: 5px;
.directory table {
border-collapse:collapse;
}
.directory h3 {
margin: 0px;
margin-top: 1em;
font-size: 11pt;
.directory td {
margin: 0px;
padding: 0px;
vertical-align: top;
}
/*
The following two styles can be used to replace the root node title
with an image of your choice. Simply uncomment the next two styles,
specify the name of your image and be sure to set 'height' to the
proper pixel height of your image.
*/
/*
.directory h3.swap {
height: 61px;
background-repeat: no-repeat;
background-image: url("yourimage.gif");
}
.directory h3.swap span {
display: none;
}
*/
.directory > h3 {
margin-top: 0;
.directory td.entry {
white-space: nowrap;
padding-right: 6px;
}
.directory p {
margin: 0px;
white-space: nowrap;
.directory td.entry a {
outline:none;
}
.directory div {
display: none;
margin: 0px;
.directory td.entry a img {
border: none;
}
.directory td.desc {
width: 100%;
padding-left: 6px;
padding-right: 6px;
border-left: 1px solid rgba(0,0,0,0.05);
}
.directory tr.even {
padding-left: 6px;
background-color: #F7F8FB;
}
.directory img {
vertical-align: -30%;
}
/* these are for tree view when not used as main index */
.directory-alt {
font-size: 100%;
font-weight: bold;
.directory .levels {
white-space: nowrap;
width: 100%;
text-align: right;
font-size: 9pt;
}
.directory-alt h3 {
margin: 0px;
margin-top: 1em;
font-size: 11pt;
.directory .levels span {
cursor: pointer;
padding-left: 2px;
padding-right: 2px;
color: #3D578C;
}
.directory-alt > h3 {
margin-top: 0;
}
.directory-alt p {
margin: 0px;
white-space: nowrap;
}
.directory-alt div {
display: none;
margin: 0px;
}
.directory-alt img {
vertical-align: -30%;
}
/* @end */
div.dynheader {
margin-top: 8px;
-webkit-touch-callout: none;
-webkit-user-select: none;
-khtml-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
}
address {
@ -613,6 +734,8 @@ address {
table.doxtable {
border-collapse:collapse;
margin-top: 4px;
margin-bottom: 4px;
}
table.doxtable td, table.doxtable th {
@ -626,7 +749,6 @@ table.doxtable th {
font-size: 110%;
padding-bottom: 4px;
padding-top: 5px;
text-align:left;
}
table.fieldtable {
@ -760,9 +882,7 @@ div.summary a
div.ingroups
{
margin-left: 5px;
font-size: 8pt;
padding-left: 5px;
width: 50%;
text-align: left;
}
@ -791,47 +911,73 @@ dl
padding: 0 0 0 10px;
}
dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug
/* dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug */
dl.section
{
border-left:4px solid;
padding: 0 0 0 6px;
margin-left: 0px;
padding-left: 0px;
}
dl.note
{
margin-left:-7px;
padding-left: 3px;
border-left:4px solid;
border-color: #D0C000;
}
dl.warning, dl.attention
{
margin-left:-7px;
padding-left: 3px;
border-left:4px solid;
border-color: #FF0000;
}
dl.pre, dl.post, dl.invariant
{
margin-left:-7px;
padding-left: 3px;
border-left:4px solid;
border-color: #00D000;
}
dl.deprecated
{
margin-left:-7px;
padding-left: 3px;
border-left:4px solid;
border-color: #505050;
}
dl.todo
{
margin-left:-7px;
padding-left: 3px;
border-left:4px solid;
border-color: #00C0E0;
}
dl.test
{
margin-left:-7px;
padding-left: 3px;
border-left:4px solid;
border-color: #3030E0;
}
dl.bug
{
margin-left:-7px;
padding-left: 3px;
border-left:4px solid;
border-color: #C08050;
}
dl.section dd {
margin-bottom: 6px;
}
#projectlogo
{
text-align: center;
@ -857,9 +1003,19 @@ dl.bug
top: 5px;
right: 5px;
}
#ostrichlogo
{
position: absolute;
top: 5px;
left: 5px;
}
#projectname
{
position: relative;
left: 50px;
font: 300% Tahoma, Arial,sans-serif;
margin: 0px;
padding: 2px 0px;
@ -867,6 +1023,8 @@ dl.bug
#projectbrief
{
position: relative;
left: 50px;
font: 120% Tahoma, Arial,sans-serif;
margin: 0px;
padding: 0px;
@ -929,6 +1087,79 @@ dl.citelist dd {
padding:5px 0;
}
div.toc {
padding: 14px 25px;
background-color: #F4F6FA;
border: 1px solid #D8DFEE;
border-radius: 7px 7px 7px 7px;
float: right;
height: auto;
margin: 0 20px 10px 10px;
width: 200px;
}
div.toc li {
background: url("bdwn.png") no-repeat scroll 0 5px transparent;
font: 10px/1.2 Verdana,DejaVu Sans,Geneva,sans-serif;
margin-top: 5px;
padding-left: 10px;
padding-top: 2px;
}
div.toc h3 {
font: bold 12px/1.2 Arial,FreeSans,sans-serif;
color: #4665A2;
border-bottom: 0 none;
margin: 0;
}
div.toc ul {
list-style: none outside none;
border: medium none;
padding: 0px;
}
div.toc li.level1 {
margin-left: 0px;
}
div.toc li.level2 {
margin-left: 15px;
}
div.toc li.level3 {
margin-left: 30px;
}
div.toc li.level4 {
margin-left: 45px;
}
.inherit_header {
font-weight: bold;
color: gray;
cursor: pointer;
-webkit-touch-callout: none;
-webkit-user-select: none;
-khtml-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
}
.inherit_header td {
padding: 6px 0px 2px 5px;
}
.inherit {
display: none;
}
tr.heading h2 {
margin-top: 12px;
margin-bottom: 4px;
}
@media print
{
#top { display: none; }
@ -946,20 +1177,5 @@ dl.citelist dd {
overflow:inherit;
display:inline;
}
pre.fragment
{
overflow: visible;
text-wrap: unrestricted;
white-space: -moz-pre-wrap; /* Moz */
white-space: -pre-wrap; /* Opera 4-6 */
white-space: -o-pre-wrap; /* Opera 7 */
white-space: pre-wrap; /* CSS3 */
word-wrap: break-word; /* IE 5.5+ */
}
}
.textblock
{
width: 800px;
textalign: justify;
}

View file

@ -55,6 +55,7 @@
#include <asm/SCC_API.h>
#include <asm/scc_memcpy.h>
#include <asm/svm.h>
#include <net/mmnif.h>
@ -120,7 +121,9 @@
#define MMNIF_PSEUDO_SOCKET_START 0x31337
#if LWIP_SOCKET
static int npseudosocket = MMNIF_PSEUDO_SOCKET_START;
#endif
static spinlock_t pseudolock;
/* "message passing buffer" specific constants:
@ -527,6 +530,7 @@ static int mmnif_commit_packet(uint8_t dest, uint32_t addr)
return -1;
}
#if LWIP_SOCKET
/* mmnif_commit_packet: this function set the state of the (in advance)
* allocated packet to RDY so the recieve queue knows that it can be
* processed further
@ -549,6 +553,7 @@ static int mmnif_commit_packet_bypass(uint8_t dest, uint32_t addr, int dest_sock
return -1;
}
#endif
/* mmnif_rxbuff_free() : the opposite to mmnif_rxbuff_alloc() a from the receiver
* already processed chunk of memory is freed so that it can be allocated again
@ -691,6 +696,7 @@ static bypass_rxdesc_t *mmnif_hashlookup(int s)
return 0;
}
#if LWIP_SOCKET
/* mmnif_hashadd(): adds a entry to the hashtable
* by the socket
*/
@ -840,6 +846,7 @@ int mmnif_send(int s, void *data, size_t size, int flags)
return lwip_send(s, data, size, flags);
}
#endif
/* mmnif_link_layer(): wrapper function called by ip_output()
* adding all needed headers for the link layer
@ -883,8 +890,15 @@ err_t mmnif_init(struct netif *netif)
// align mpb size to the granularity of a page size
header_size = (header_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
#if 1
if (RCCE_IAM == 0)
header_start_address = (void*) shmalloc((MMNIF_CORES * header_size) >> PAGE_SHIFT);
RCCE_bcast((char*) &header_start_address, sizeof(header_start_address), 0, RCCE_COMM_WORLD);
DEBUGPRINTF("shmalloc : %p (size %u)\n", header_start_address, MMNIF_CORES * header_size);
#else
header_start_address = (void*) RCCE_shmalloc(header_size * MMNIF_CORES);
DEBUGPRINTF("RCCE_shmalloc : %p (size %u)\n", header_start_address, MMNIF_CORES * header_size);
#endif
// map physical address in the virtual address space
header_start_address = (void*) map_region(0, (size_t) header_start_address, (MMNIF_CORES * header_size) >> PAGE_SHIFT, MAP_KERNEL_SPACE | MAP_WT | MAP_NO_CACHE);
@ -909,8 +923,15 @@ err_t mmnif_init(struct netif *netif)
#else
// align size to the granularity of a page size
heap_size = (heap_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
#if 1
if (RCCE_IAM == 0)
heap_start_address = (void*) shmalloc((heap_size * MMNIF_CORES) >> PAGE_SHIFT);
RCCE_bcast((char*) &heap_start_address, sizeof(heap_start_address), 0, RCCE_COMM_WORLD);
DEBUGPRINTF("shmalloc : %p (size %u)\n", heap_start_address, MMNIF_CORES * header_size);
#else
heap_start_address = (void*) RCCE_shmalloc(heap_size * MMNIF_CORES);
DEBUGPRINTF("RCCE_shmalloc : %p (size %u)\n", heap_start_address, MMNIF_CORES * header_size);
#endif
// map physical address in the virtual address space
#if USE_CACHE
@ -1179,6 +1200,7 @@ out:
return;
}
#if LWIP_SOCKET
/* mmnif_rx_bypass(): recieve packets
* with insane speed ;)
*/
@ -1539,6 +1561,7 @@ int mmnif_closesocket(int s)
return 0;
}
#endif
/* mmnif_irqhandler():
* handles the incomint interrupts
@ -1561,11 +1584,15 @@ static void mmnif_irqhandler(struct state* s)
mmnif = (mmnif_t *) mmnif_dev->state;
if (!mmnif->check_in_progress) {
#if NO_SYS
mmnif_rx((void*) mmnif_dev);
#else
if (tcpip_callback_with_block(mmnif_rx, (void*) mmnif_dev, 0) == ERR_OK) {
mmnif->check_in_progress = 1;
} else {
DEBUGPRINTF("rckemacif_handler: unable to send a poll request to the tcpip thread\n");
}
#endif
}
tmp = ReadConfigReg(CRB_OWN + (z==0 ? GLCFG0 : GLCFG1));
@ -1586,7 +1613,12 @@ err_t mmnif_shutdown(void)
return ERR_MEM;
}
#if NO_SYS
netif_set_down(mmnif_dev);
err = ERR_OK;
#else
err = netifapi_netif_set_down(mmnif_dev);
#endif
//RCCE_shfree(mpb_start_address);
mmnif_dev = NULL;

View file

@ -28,7 +28,7 @@
#define AF_MMNIF_NET 0x42
#define MMNIF_AUTOACTIVATE_FAST_SOCKETS 1
#define MMNIF_AUTOACTIVATE_FAST_SOCKETS LWIP_SOCKET
#if MMNIF_AUTOACTIVATE_FAST_SOCKETS

View file

@ -536,6 +536,9 @@ void rckemacif_handler(struct state* s, uint32_t status)
write_offset = *((volatile unsigned int*) (rckemacif->rx_buffer)) & 0xFFFF;
//write_offset = read_emac(rckemacif->num_emac, EMAC_RX_CONTROL + EMAC_RX_BUFFER_WRITE_OFFSET, rckemacif->core);
if ((write_offset != 0) && (rckemacif->rx_read_offset != write_offset) && !rckemacif->polling) {
#if NO_SYS
rckemacif_poll((void*) write_offset);
#else
if (tcpip_callback_with_block(rckemacif_poll, (void*) write_offset, 0) == ERR_OK) {
/* Mask eMAC interrupt */
unsigned int tmp = *((volatile unsigned int*) (FPGA_BASE + IRQ_MASK + rckemacif->core * 2 * 4));
@ -546,6 +549,7 @@ void rckemacif_handler(struct state* s, uint32_t status)
} else {
LWIP_DEBUGF(NETIF_DEBUG, ("rckemacif_handler: unable to send a poll request to the tcpip thread\n"));
}
#endif
}
}

View file

@ -256,7 +256,7 @@ static void rtl_tx_inthandler(struct netif* netif)
}
}
/* this function is called in the context of the tcpip thread */
/* this function is called in the context of the tcpip thread or the irq handler (by using NO_SYS) */
static void rtl8139if_poll(void* ctx)
{
rtl_rx_inthandler(mynetif);
@ -276,11 +276,15 @@ static void rtl8139if_handler(struct state* s)
break;
if ((isr_contents & ISR_ROK) && !rtl8139if->polling) {
#if NO_SYS
rtl8139if_poll(NULL);
#else
if (tcpip_callback_with_block(rtl8139if_poll, NULL, 0) == ERR_OK) {
rtl8139if->polling = 1;
} else {
LWIP_DEBUGF(NETIF_DEBUG, ("rtl8139if_handler: unable to send a poll request to the tcpip thread\n"));
}
#endif
}
if (isr_contents & ISR_TOK)

View file

@ -444,7 +444,7 @@ int initrd_init(void)
multiboot_module_t* mmodule = NULL;
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
mmodule = (multiboot_module_t*) mb_info->mods_addr;
mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
mods_count = mb_info->mods_count;
}
#endif
@ -493,7 +493,7 @@ int initrd_init(void)
/* For every module.. */
#ifdef CONFIG_MULTIBOOT
for(i=0; i<mods_count; i++) {
initrd_header_t* header = (initrd_header_t*) mmodule[i].mod_start;
initrd_header_t* header = (initrd_header_t*) ((size_t) mmodule[i].mod_start);
#elif defined(CONFIG_ROCKCREEK)
for(i=0; i<1; i++) {
initrd_header_t* header = (initrd_header_t*) bootinfo->addr;
@ -501,8 +501,8 @@ int initrd_init(void)
initrd_file_desc_t* file_desc;
vfs_node_t* new_node;
if (header->magic != INITRD_MAGIC_NUMBER) {
kprintf("Invalid magic number for a init ram disk\n");
if (BUILTIN_EXPECT(header->magic != INITRD_MAGIC_NUMBER, 0)) {
kprintf("Invalid magic number for a init ram disk: 0x%x\n", header->magic);
continue;
}

View file

@ -24,14 +24,13 @@
extern "C" {
#endif
#define METALSVM_VERSION "0.1"
#define METALSVM_VERSION "0.9"
#define MAX_TASKS 16
#define MAX_CORES 1
#define MAX_FNAME 128
#define DEFAULT_STACK_SIZE (32*1024)
#define KERNEL_STACK_SIZE 8192
#define KMSG_SIZE (128*1024)
#define PAGE_SIZE 4096
#define PAGE_SHIFT 12
#define CACHE_LINE 64
#define MAILBOX_SIZE 8
@ -57,13 +56,20 @@ extern "C" {
#define CONFIG_KEYBOARD
#define CONFIG_MULTIBOOT
//#define CONFIG_ROCKCREEK
//#define CONFIG_TICKLESS
#ifdef CONFIG_ROCKCREEK
#ifndef __SIZEOF_POINTER__
#define __SIZEOF_POINTER__ 4
#endif
#endif
// RCCE specific flags
#define SCC
#define COPPERRIDGE
#define MS_BAREMETAL
//#define GORY
#define SHMADD
//#define SHMADD
#define SHMDBG
//#define SHMADD_CACHEABLE
#define SCC_BOOTINFO 0x80000

View file

@ -0,0 +1,92 @@
/*
* Copyright 2010 Stefan Lankes, Chair for Operating Systems,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of MetalSVM.
*/
#ifndef __CONFIG_H__
#define __CONFIG_H__
#ifdef __cplusplus
extern "C" {
#endif
#define METALSVM_VERSION "0.9"
#define MAX_TASKS 16
#define MAX_CORES 1
#define MAX_FNAME 128
#define DEFAULT_STACK_SIZE (32*1024)
#define KERNEL_STACK_SIZE 8192
#define KMSG_SIZE (128*1024)
#define PAGE_SHIFT 12
#define CACHE_LINE 32
#define MAILBOX_SIZE 8
#define TIMER_FREQ 100 /* in HZ */
#define CLOCK_TICK_RATE 1193182 /* 8254 chip's internal oscillator frequency */
#define INT_SYSCALL 0x80
#define KERNEL_SPACE (1*1024*1024*1024)
#define VIDEO_MEM_ADDR 0xB8000 // the video memora address
#define SMP_SETUP_ADDR 0x07000
#define BYTE_ORDER LITTLE_ENDIAN
/*
* address space / (page_size * sizeof(uint8_t))
* x86_32 => 4 GB / (4096 * 8)
*/
#define BITMAP_SIZE 1048576
//#define CONFIG_PCI
#define CONFIG_LWIP
//#define CONFIG_VGA
//#define CONFIG_UART
//#define CONFIG_KEYBOARD
//#define CONFIG_MULTIBOOT
#define CONFIG_ROCKCREEK
//#define CONFIG_TICKLESS
#ifdef CONFIG_ROCKCREEK
#ifndef __SIZEOF_POINTER__
#define __SIZEOF_POINTER__ 4
#endif
#endif
// RCCE specific flags
#define SCC
#define COPPERRIDGE
#define MS_BAREMETAL
//#define GORY
//#define SHMADD
#define SHMDBG
//#define SHMADD_CACHEABLE
#define SCC_BOOTINFO 0x80000
#define BUILTIN_EXPECT(exp, b) __builtin_expect((exp), (b))
//#define BUILTIN_EXPECT(exp, b) (exp)
#define NORETURN __attribute__((noreturn))
#define STDCALL __attribute__((stdcall))
#define HAVE_ARCH_MEMSET
#define HAVE_ARCH_MEMCPY
#define HAVE_ARCH_STRLEN
#define HAVE_ARCH_STRCPY
#define HAVE_ARCH_STRNCPY
#ifdef __cplusplus
}
#endif
#endif

View file

@ -35,7 +35,7 @@ static inline int isascii(int c)
/** Applies an and-operation to
* push the value of 'c' into the ASCII-range */
static inline int toascii(c)
static inline int toascii(int c)
{
return (((unsigned char)(c))&0x7f);
}

View file

@ -42,20 +42,35 @@
/*file descriptor init*/
#define NR_OPEN 100
#define _FOPEN (-1) /* from sys/file.h, kernel use only */
#define _FREAD 0x0001 /* read enabled */
#define _FWRITE 0x0002 /* write enabled */
#define _FAPPEND 0x0008 /* append (writes guaranteed at the end) */
#define _FMARK 0x0010 /* internal; mark during gc() */
#define _FDEFER 0x0020 /* internal; defer for next gc pass */
#define _FASYNC 0x0040 /* signal pgrp when data ready */
#define _FSHLOCK 0x0080 /* BSD flock() shared lock present */
#define _FEXLOCK 0x0100 /* BSD flock() exclusive lock present */
#define _FCREAT 0x0200 /* open with file create */
#define _FTRUNC 0x0400 /* open with truncation */
#define _FEXCL 0x0800 /* error on open if file exists */
#define _FNBIO 0x1000 /* non blocking I/O (sys5 style) */
#define _FSYNC 0x2000 /* do all writes synchronously */
#define _FNONBLOCK 0x4000 /* non blocking I/O (POSIX style) */
#define _FNDELAY _FNONBLOCK /* non blocking I/O (4.2 style) */
#define _FNOCTTY 0x8000 /* don't assign a ctty on this open */
/*open flags*/
#define O_RDONLY 0
#define O_WRONLY 1
#define O_RDONLY 0
#define O_WRONLY 1
#define O_RDWR 2
#define O_CREAT 64
#define O_EXCL 128
//#define O_NOCTTY 256
#define O_TRUNC 512
#define O_APPEND 1024
//#define O_NDELAY 2048
//#define O_SYNC 4096
//#define O_ASYNC 8192
#define O_APPEND _FAPPEND
#define O_CREAT _FCREAT
#define O_TRUNC _FTRUNC
#define O_EXCL _FEXCL
#define O_SYNC _FSYNC
#define O_NONBLOCK _FNONBLOCK
#define O_NOCTTY _FNOCTTY
/*lseek defines*/
#ifndef SEEK_SET
@ -115,7 +130,7 @@ typedef struct vfs_node {
uint32_t uid;
/// The owning group.
uint32_t gid;
/// Includes the node type. See #defines above.
/// Includes the node type. See the defines above.
uint32_t type;
/// Open handler function pointer
open_type_t open;

View file

@ -32,6 +32,10 @@ extern "C" {
typedef unsigned int tid_t;
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define PAGE_MASK ~(PAGE_SIZE - 1)
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
#if MAX_CORES == 1
#define per_core(name) name
#define DECLARE_PER_CORE(type, name) extern type name;

View file

@ -49,8 +49,9 @@ extern "C" {
#endif
#define MAP_SVM_STRONG (1 << 9)
#define MAP_SVM_LAZYRELEASE (1 << 10)
#define MAP_NO_ACCESS (1 << 11)
#define MAP_REMAP (1 << 12)
#define MAP_SVM_INIT (1 << 11)
#define MAP_NO_ACCESS (1 << 12)
#define MAP_REMAP (1 << 13)
void NORETURN abort(void);
@ -82,6 +83,19 @@ void* mem_allocation(size_t sz, uint32_t flags);
*/
void kfree(void*, size_t);
/** @brief Create a new stack for a new task
*
* @return start address of the new stack
*/
void* create_stack(void);
/** @brief Delete stack of a finished task
*
* @param Pointer to
* @return 0 on success
*/
int destroy_stack(task_t*);
/** @brief String to long
*
* This one is documented in newlib library.

View file

@ -61,12 +61,52 @@ int multitasking_init(void);
* @param ep Pointer to the entry function for the new task
* @param arg Arguments the task shall start with
* @param prio Desired priority of the new kernel task
* @param core_id Start the new task on the core with this id
*
* @return
* - 0 on success
* - -EINVAL (-22) on failure
*/
int create_kernel_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio);
int create_kernel_task_on_core(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t core_id);
/** @brief create a kernel task.
*
* @param id The value behind this pointer will be set to the new task's id
* @param ep Pointer to the entry function for the new task
* @param arg Arguments the task shall start with
* @param prio Desired priority of the new kernel task
*
* @return
* - 0 on success
* - -EINVAL (-22) on failure
*/
static inline int create_kernel_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
{
uint32_t core_id;
#if MAX_CORES > 1
uint32_t flags = irq_nested_disable();
core_id = CORE_ID;
irq_nested_enable(flags);
#else
core_id = 0;
#endif
return create_kernel_task_on_core(id, ep, arg, prio, core_id);
}
/** @brief Create a user level task.
*
* @param id The value behind this pointer will be set to the new task's id
* @param fname Filename of the executable to start the task with
* @param argv Pointer to arguments array
* @param core_id Start the new task on the core with this id
*
* @return
* - 0 on success
* - -EINVAL (-22) or -ENOMEM (-12)on failure
*/
int create_user_task_on_core(tid_t* id, const char* fame, char** argv, uint32_t core_id);
/** @brief Create a user level task.
*
@ -78,7 +118,20 @@ int create_kernel_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio);
* - 0 on success
* - -EINVAL (-22) or -ENOMEM (-12)on failure
*/
int create_user_task(tid_t* id, const char* fame, char** argv);
static inline int create_user_task(tid_t* id, const char* fame, char** argv)
{
uint32_t core_id;
#if MAX_CORES > 1
uint32_t flags = irq_nested_disable();
core_id = CORE_ID;
irq_nested_enable(flags);
#else
core_id = 0;
#endif
return create_user_task_on_core(id, fame, argv, core_id);
}
/** @brief Block current task until the child task is terminated
* @param result The terminated child's return value
@ -86,6 +139,7 @@ int create_user_task(tid_t* id, const char* fame, char** argv);
*/
tid_t wait(int32_t* result);
#ifndef CONFIG_TICKLESS
/** @brief Update the load of the current core
*
* This function is called from the timer interrupt
@ -106,12 +160,17 @@ void dump_load(void);
*/
void load_balancing(void);
#endif
#endif
/** @brief Task switcher
*
* Timer-interrupted use of this function for task switching
*
* @return
* - 0 no context switch
* - !0 address of the old stack pointer
*/
void scheduler(void);
size_t** scheduler(void);
/** @brief Wake up a blocked task
*
@ -142,6 +201,11 @@ int block_current_task(void);
*/
int set_timer(uint64_t deadline);
/** @biref check is a timer is expired
*
*/
void check_timers(void);
/** @brief Abort current task */
void NORETURN abort(void);
@ -201,12 +265,19 @@ uint32_t get_highest_priority(void);
*/
void reschedule(void);
/** @brief check, if the tick counter has to be updated
*/
void check_ticks(void);
static inline void check_workqueues_in_irqhandler(int irq)
{
#ifdef CONFIG_ROCKCREEK
if (irq != 124)
icc_mail_check();
#endif
check_ticks();
check_timers();
if (irq < 0)
check_scheduling();
}

View file

@ -59,9 +59,9 @@ extern "C" {
#define TASK_FPU_INIT (1 << 0)
#define TASK_FPU_USED (1 << 1)
#define TASK_TIMER (1 << 2)
#define TASK_L2 (1 << 3)
typedef int (*entry_point_t)(void*);
typedef int (STDCALL *internal_entry_point_t)(void*);
struct page_dir;
/** @brief The task_t structure */
@ -70,6 +70,10 @@ typedef struct task {
tid_t id;
/// Task status (INVALID, READY, RUNNING, ...)
uint32_t status;
/// copy of the stack pointer before a context switch
size_t* last_stack_pointer;
/// start address of the stack
void* stack;
/// Additional status flags. For instance, to signalize the using of the FPU
uint8_t flags;
/// Task priority
@ -82,29 +86,29 @@ typedef struct task {
struct task* prev;
/// last core id on which the task was running
uint32_t last_core;
/// Usage in number of pages
/// usage in number of pages
atomic_int32_t user_usage;
/// Avoids concurrent access to the page directory
/// avoids concurrent access to the page directory
spinlock_t pgd_lock;
/// pointer to the page directory
struct page_dir* pgd;
/// Lock for the VMA_list
/// lock for the VMA_list
spinlock_t vma_lock;
/// List of VMAs
/// list of VMAs
vma_t* vma_list;
/// Filedescriptor table
/// filedescriptor table
filp_t* fildes_table;
/// starting time/tick of the task
uint64_t start_tick;
/// Start address of the heap
/// start address of the heap
size_t start_heap;
/// End address of the heap
/// end address of the heap
size_t end_heap;
/// LwIP error code
int lwip_err;
/// Mail inbox
/// mail inbox
mailbox_wait_msg_t inbox;
/// Mail outbox array
/// mail outbox array
mailbox_wait_msg_t* outbox[MAX_TASKS];
/// FPU state
union fpu_state fpu;
@ -122,12 +126,14 @@ typedef struct {
task_t* old_task;
/// total number of tasks in the queue
uint32_t nr_tasks;
#ifndef CONFIG_TICKLESS
// current load = average number of tasks in the queue (1-minute average)
uint32_t load[3];
// help counter to determine the the cpu load
int32_t load_counter;
// help counter to avoid "over balancing"
int32_t balance_counter;
#endif
/// indicates the used priority queues
uint32_t prio_bitmap;
/// a queue for each priority

View file

@ -19,6 +19,7 @@
#include <metalsvm/stddef.h>
#include <metalsvm/stdio.h>
#include <metalsvm/stdlib.h>
#include <metalsvm/string.h>
#include <metalsvm/processor.h>
#include <metalsvm/time.h>
@ -26,6 +27,9 @@
#include <metalsvm/errno.h>
#include <metalsvm/init.h>
#include <metalsvm/fs.h>
#ifdef CONFIG_MULTIBOOT
#include <asm/multiboot.h>
#endif
#ifdef CONFIG_LWIP
#include <lwip/init.h>
#include <lwip/sys.h>
@ -46,8 +50,6 @@
#include <asm/RCCE_lib.h>
#endif
void echo_init(void);
void netio_init(void);
int test_init(void);
/*
@ -65,6 +67,9 @@ int lowlevel_init(void)
koutput_init();
//kprintf("Now, the BSS section (0x%x - 0x%x) is initialized.\n", (size_t) &bss_start, (size_t) &bss_end);
#ifdef CONFIG_MULTIBOOT
//kprintf("Start kernel with command line \"%s\"\n", mb_info->cmdline);
#endif
return 0;
}
@ -80,7 +85,9 @@ static int init_netifs(void)
struct ip_addr ipaddr;
struct ip_addr netmask;
struct ip_addr gw;
#if !NO_SYS
err_t err;
#endif
kputs("Initialize NICs...\n");
@ -95,6 +102,12 @@ static int init_netifs(void)
IP4_ADDR(&netmask, 255,255,255,0);
/* Bring up the network interface */
#if NO_SYS
netif_add(&default_netif, &ipaddr, &netmask, &gw, NULL, rckemacif_init, ethernet_input);
netif_set_default(&default_netif);
netif_set_up(&default_netif);
#else
if ((err = netifapi_netif_add(&default_netif, &ipaddr, &netmask, &gw, NULL, rckemacif_init, tcpip_input)) != ERR_OK) {
kprintf("Unable to add the network interface: err = %d\n", err);
return -ENODEV;
@ -102,6 +115,7 @@ static int init_netifs(void)
netifapi_netif_set_default(&default_netif);
netifapi_netif_set_up(&default_netif);
#endif
/* Bring up the intra network interface */
struct ip_addr intra_ipaddr;
@ -120,6 +134,12 @@ static int init_netifs(void)
* - mmnif_init : the initialization which has to be done in order to use our interface
* - ethernet_input : tells him that he should get ethernet input (inclusice ARP)
*/
#if NO_SYS
netif_add(&mmnif_netif, &intra_ipaddr, &intra_netmask, &intra_gw, NULL, mmnif_init, ethernet_input);
/* tell lwip all initialization is done and we want to set it ab */
netif_set_up(&mmnif_netif);
#else
if ((err = netifapi_netif_add(&mmnif_netif, &intra_ipaddr, &intra_netmask, &intra_gw, NULL, mmnif_init, tcpip_input)) != ERR_OK)
{
kprintf("Unable to add the intra network interface: err = %d\n", err);
@ -128,6 +148,7 @@ static int init_netifs(void)
/* tell lwip all initialization is done and we want to set it ab */
netifapi_netif_set_up(&mmnif_netif);
#endif
#else
/* Clear network address because we use DHCP to get an ip address */
IP4_ADDR(&gw, 0,0,0,0);
@ -135,15 +156,25 @@ static int init_netifs(void)
IP4_ADDR(&netmask, 0,0,0,0);
/* Bring up the network interface */
#if NO_SYS
uint32_t flags = irq_nested_disable();
netif_add(&default_netif, &ipaddr, &netmask, &gw, NULL, rtl8139if_init, ethernet_input);
netif_set_default(&default_netif);
#else
if ((err = netifapi_netif_add(&default_netif, &ipaddr, &netmask, &gw, NULL, rtl8139if_init, tcpip_input)) != ERR_OK) {
kprintf("Unable to add the network interface: err = %d\n", err);
return -ENODEV;
}
netifapi_netif_set_default(&default_netif);
#endif
kprintf("Starting DHCPCD...\n");
#if NO_SYS
dhcp_start(&default_netif);
irq_nested_enable(flags);
#else
netifapi_dhcp_start(&default_netif);
#endif
int mscnt = 0;
/* wait for ip address */
@ -163,21 +194,33 @@ static int init_netifs(void)
#endif
#ifdef CONFIG_LWIP
#if !NO_SYS
static void tcpip_init_done(void* arg)
{
sys_sem_t* sem = (sys_sem_t*)arg;
kprintf("LwIP's tcpip thread has task id %d\n", per_core(current_task)->id);
sys_sem_signal(sem);
}
#endif
#endif
int network_shutdown(void)
{
#if defined(CONFIG_LWIP) && defined(CONFIG_ROCKCREEK)
mmnif_shutdown();
#if NO_SYS
netif_set_down(&default_netif);
#else
netifapi_netif_set_down(&default_netif);
#endif
#elif defined(CONFIG_LWIP) && defined(CONFIG_PCI)
#if NO_SYS
dhcp_stop(&default_netif);
#else
netifapi_dhcp_stop(&default_netif);
#endif
#endif
return 0;
@ -228,36 +271,40 @@ static void list_root(void) {
int initd(void* arg)
{
#ifdef CONFIG_LWIP
#if !NO_SYS
sys_sem_t sem;
#endif
tid_t id;
char* argv[] = {"/bin/rlogind ", NULL};
// Initialize lwIP modules
#if NO_SYS
lwip_init();
#else
if(sys_sem_new(&sem, 0) != ERR_OK)
LWIP_ASSERT("Failed to create semaphore", 0);
tcpip_init(tcpip_init_done, &sem);
sys_sem_wait(&sem);
kprintf("TCP/IP initialized.\n");
sys_sem_free(&sem);
#endif
#if defined(CONFIG_LWIP) && (defined(CONFIG_PCI) || defined(CONFIG_ROCKCREEK))
init_netifs();
/* test if interface is really up */
if (!netif_is_up(&default_netif)) {
if (!netif_is_up(&default_netif))
kputs("network interface is not up\n");
return -ENODEV;
}
#endif
// start echo, netio and rlogind
//echo_init();
#if !NO_SYS
create_user_task(&id, "/bin/rlogind", argv);
kprintf("Create rlogind with id %u\n", id);
//netio_init();
#endif
#endif
list_root();
// list_root();
test_init();
return 0;

View file

@ -50,7 +50,12 @@ int smp_main(void)
{
irq_enable();
#ifdef CONFIG_TICKLESS
disable_timer_irq();
#endif
while(1) {
check_workqueues();
HALT;
}
@ -92,6 +97,10 @@ int main(void)
kprintf("Current allocated memory: %u KBytes\n", atomic_int32_read(&total_allocated_pages)*(PAGE_SIZE/1024));
kprintf("Current available memory: %u MBytes\n", atomic_int32_read(&total_available_pages)/((1024*1024)/PAGE_SIZE));
#ifdef CONFIG_TICKLESS
disable_timer_irq();
#endif
sleep(5);
create_kernel_task(&id, initd, NULL, NORMAL_PRIO);
kprintf("Create initd with id %u\n", id);

View file

@ -47,10 +47,10 @@
* A task's id will be its position in this array.
*/
static task_t task_table[MAX_TASKS] = { \
[0] = {0, TASK_IDLE, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}, \
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}};
[0] = {0, TASK_IDLE, NULL, NULL, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}, \
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, NULL, NULL, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}};
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
#ifndef CONFIG_TICKLESS
#if MAX_CORES > 1
static runqueue_t runqueues[MAX_CORES] = { \
[0] = {task_table+0, NULL, 0, {[0 ... 2] = 0}, TIMER_FREQ/5, TIMER_FREQ/2, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}, \
@ -59,8 +59,23 @@ static runqueue_t runqueues[MAX_CORES] = { \
static runqueue_t runqueues[1] = { \
[0] = {task_table+0, NULL, 0, {[0 ... 2] = 0}, TIMER_FREQ/5, TIMER_FREQ/2, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}};
#endif
#else
#if MAX_CORES > 1
static runqueue_t runqueues[MAX_CORES] = { \
[0] = {task_table+0, NULL, 0, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}, \
[1 ... MAX_CORES-1] = {NULL, NULL, 0, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}};
#else
static runqueue_t runqueues[1] = { \
[0] = {task_table+0, NULL, 0, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}};
#endif
#endif
#if MAX_CORES > 1
extern atomic_int32_t cpu_online;
#endif
DEFINE_PER_CORE(task_t*, current_task, task_table+0);
extern const void boot_stack;
/** @brief helper function for the assembly code to determine the current task
* @return Pointer to the task_t structure of current task
@ -92,6 +107,7 @@ int multitasking_init(void) {
task_table[0].pgd = get_boot_pgd();
task_table[0].flags = TASK_DEFAULT_FLAGS;
task_table[0].prio = IDLE_PRIO;
task_table[0].stack = (void*) &boot_stack;
return 0;
}
@ -103,6 +119,8 @@ size_t get_idle_task(uint32_t id)
return -EINVAL;
task_table[id].id = id;
task_table[id].last_stack_pointer = NULL;
task_table[id].stack = (void*) ((size_t)&boot_stack + id * KERNEL_STACK_SIZE);
task_table[id].status = TASK_IDLE;
task_table[id].prio = IDLE_PRIO;
task_table[id].flags = TASK_DEFAULT_FLAGS;
@ -114,13 +132,13 @@ size_t get_idle_task(uint32_t id)
current_task[id].var = task_table+id;
runqueues[id].idle = task_table+id;
return get_stack(id);
return (size_t) task_table[id].stack + KERNEL_STACK_SIZE - 16;
#else
return -EINVAL;
#endif
}
static void finish_task_switch(uint32_t irq)
void finish_task_switch(void)
{
uint8_t prio;
uint32_t core_id = CORE_ID;
@ -128,23 +146,27 @@ static void finish_task_switch(uint32_t irq)
spinlock_irqsave_lock(&runqueues[core_id].lock);
if ((old = runqueues[core_id].old_task) != NULL) {
prio = old->prio;
if (!runqueues[core_id].queue[prio-1].first) {
old->next = old->prev = NULL;
runqueues[core_id].queue[prio-1].first = runqueues[core_id].queue[prio-1].last = old;
if (old->status == TASK_INVALID) {
destroy_stack(old);
old->stack = NULL;
old->last_stack_pointer = NULL;
runqueues[core_id].old_task = NULL;
} else {
old->next = NULL;
old->prev = runqueues[core_id].queue[prio-1].last;
runqueues[core_id].queue[prio-1].last->next = old;
runqueues[core_id].queue[prio-1].last = old;
prio = old->prio;
if (!runqueues[core_id].queue[prio-1].first) {
old->next = old->prev = NULL;
runqueues[core_id].queue[prio-1].first = runqueues[core_id].queue[prio-1].last = old;
} else {
old->next = NULL;
old->prev = runqueues[core_id].queue[prio-1].last;
runqueues[core_id].queue[prio-1].last->next = old;
runqueues[core_id].queue[prio-1].last = old;
}
runqueues[core_id].old_task = NULL;
runqueues[core_id].prio_bitmap |= (1 << prio);
}
runqueues[core_id].old_task = NULL;
runqueues[core_id].prio_bitmap |= (1 << prio);
}
spinlock_irqsave_unlock(&runqueues[core_id].lock);
if (irq)
irq_enable();
}
/** @brief Wakeup tasks which are waiting for a message from the current one
@ -194,15 +216,17 @@ static void NORETURN do_exit(int arg) {
//remove fildes_table
if(!curr_task->fildes_table)
kfree(curr_task->fildes_table, sizeof(fildes_t)*NR_OPEN);
kfree(curr_task->fildes_table, sizeof(filp_t)*NR_OPEN);
spinlock_unlock(&curr_task->vma_lock);
drop_pgd(); // delete page directory and its page tables
#if 0
if (atomic_int32_read(&curr_task->user_usage))
kprintf("Memory leak! Task %d did not release %d pages\n",
curr_task->id, atomic_int32_read(&curr_task->user_usage));
#endif
curr_task->status = TASK_FINISHED;
// decrease the number of active tasks
@ -214,7 +238,7 @@ static void NORETURN do_exit(int arg) {
irq_nested_enable(flags);
reschedule();
kprintf("Kernel panic: scheduler on core %d found no valid task\n", CORE_ID);
while(1) {
HALT;
@ -245,15 +269,17 @@ void NORETURN abort(void) {
* @param ep Pointer to the function the task shall start with
* @param arg Arguments list
* @param prio Desired priority of the new task
* @param core_id Start the new task on the core with this id
*
* @return
* - 0 on success
* - -ENOMEM (-12) or -EINVAL (-22) on failure
*/
static int create_task(tid_t* id, internal_entry_point_t ep, void* arg, uint8_t prio)
static int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t core_id)
{
task_t* curr_task;
int ret = -ENOMEM;
unsigned int i, core_id;
uint32_t i;
if (BUILTIN_EXPECT(!ep, 0))
return -EINVAL;
@ -264,7 +290,15 @@ static int create_task(tid_t* id, internal_entry_point_t ep, void* arg, uint8_t
spinlock_irqsave_lock(&table_lock);
core_id = CORE_ID;
#if MAX_CORES > 1
if (core_id >= atomic_int32_read(&cpu_online))
#else
if (core_id > 0)
#endif
{
core_id = CORE_ID;
kprintf("Inavlid core id! Set id to %u!\n", core_id);
}
curr_task = per_core(current_task);
for(i=0; i<MAX_TASKS; i++) {
@ -279,6 +313,8 @@ static int create_task(tid_t* id, internal_entry_point_t ep, void* arg, uint8_t
task_table[i].id = i;
task_table[i].status = TASK_READY;
task_table[i].last_stack_pointer = NULL;
task_table[i].stack = create_stack();
task_table[i].flags = TASK_DEFAULT_FLAGS;
task_table[i].prio = prio;
task_table[i].last_core = 0;
@ -349,6 +385,9 @@ int sys_fork(void)
}
task_table[i].id = i;
task_table[i].last_stack_pointer = NULL;
task_table[i].stack = create_stack();
spinlock_init(&task_table[i].vma_lock);
// copy VMA list
@ -415,7 +454,6 @@ int sys_fork(void)
// Leave the function without releasing the locks
// because the locks are already released
// by the parent task!
finish_task_switch(1);
return 0;
}
@ -445,13 +483,11 @@ typedef struct {
/** @brief This call is used to adapt create_task calls
* which want to have a start function and argument list */
static int STDCALL kernel_entry(void* args)
static int kernel_entry(void* args)
{
int ret;
kernel_args_t* kernel_args = (kernel_args_t*) args;
finish_task_switch(1);
if (BUILTIN_EXPECT(!kernel_args, 0))
return -EINVAL;
@ -462,7 +498,7 @@ static int STDCALL kernel_entry(void* args)
return ret;
}
int create_kernel_task(tid_t* id, entry_point_t ep, void* args, uint8_t prio)
int create_kernel_task_on_core(tid_t* id, entry_point_t ep, void* args, uint8_t prio, uint32_t core_id)
{
kernel_args_t* kernel_args;
@ -476,7 +512,7 @@ int create_kernel_task(tid_t* id, entry_point_t ep, void* args, uint8_t prio)
if (prio > MAX_PRIO)
prio = NORMAL_PRIO;
return create_task(id, kernel_entry, kernel_args, prio);
return create_task(id, kernel_entry, kernel_args, prio, core_id);
}
#define MAX_ARGS (PAGE_SIZE - 2*sizeof(int) - sizeof(vfs_node_t*))
@ -501,6 +537,7 @@ typedef struct {
*/
static int load_task(load_args_t* largs)
{
#ifdef CONFIG_X86_32
uint32_t i, offset, idx, fd_i;
uint32_t addr, npages, flags, stack = 0;
elf_header_t header;
@ -724,16 +761,17 @@ invalid:
kprintf("program entry point 0x%x\n", (size_t) header.entry);
return -EINVAL;
#else
return -EINVAL;
#endif
}
/** @brief This call is used to adapt create_task calls
* which want to have a start function and argument list */
static int STDCALL user_entry(void* arg)
static int user_entry(void* arg)
{
int ret;
finish_task_switch(1);
if (BUILTIN_EXPECT(!arg, 0))
return -EINVAL;
@ -749,11 +787,13 @@ static int STDCALL user_entry(void* arg)
* @param id Pointer to the tid_t structure which shall be filles
* @param fname Executable's path and filename
* @param argv Arguments list
* @param core_id Start the new task on the core with this id
*
* @return
* - 0 on success
* - -ENOMEM (-12) or -EINVAL (-22) on failure
*/
int create_user_task(tid_t* id, const char* fname, char** argv)
int create_user_task_on_core(tid_t* id, const char* fname, char** argv, uint32_t core_id)
{
vfs_node_t* node;
int argc = 0;
@ -790,9 +830,8 @@ int create_user_task(tid_t* id, const char* fname, char** argv)
while ((*dest++ = *src++) != 0);
}
/* create new task */
return create_task(id, user_entry, load_args, NORMAL_PRIO);
return create_task(id, user_entry, load_args, NORMAL_PRIO, core_id);
}
/** @brief Used by the execve-Systemcall */
@ -1089,6 +1128,8 @@ int set_timer(uint64_t deadline)
return ret;
}
#ifndef CONFIG_TICKLESS
/* determining the load as fix-point */
#define FSHIFT 11 /* nr of bits of precision */
#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
@ -1122,10 +1163,6 @@ void update_load(void)
}
}
#if MAX_CORES > 1
extern atomic_int32_t cpu_online;
#endif
void dump_load(void)
{
uint32_t i;
@ -1155,17 +1192,16 @@ void load_balancing(void)
uint32_t prio;
task_t* task;
spinlock_irqsave_lock(&runqueues[core_id].lock);
for(i=0; (i<atomic_int32_read(&cpu_online)) && (runqueues[core_id].balance_counter <= 0); i++)
{
if (i == core_id)
continue;
spinlock_irqsave_lock(&runqueues[i].lock);
if ((runqueues[i].load[0] >> (FSHIFT-1)) > (runqueues[core_id].load[0] >> (FSHIFT-1))) {
//kprintf("Try to steal a task from core %u (load %u) to %u (load %u)\n", i, runqueues[i].load[0], core_id, runqueues[core_id].load[0]);
//kprintf("Task on core %u: %u, core %u, %u\n", i, runqueues[i].nr_tasks, core_id, runqueues[i].nr_tasks);
spinlock_irqsave_lock(&runqueues[i].lock);
prio = lsb(runqueues[i].prio_bitmap);
if (prio < sizeof(size_t)*8) {
// steal a ready task
@ -1180,7 +1216,12 @@ void load_balancing(void)
runqueues[i].prio_bitmap &= ~(1 << prio);
} else runqueues[i].queue[prio-1].last = task->prev;
// update task counters
runqueues[i].nr_tasks--;
spinlock_irqsave_unlock(&runqueues[i].lock);
// add task at the end of queue core_id
spinlock_irqsave_lock(&runqueues[core_id].lock);
if (!runqueues[core_id].queue[prio-1].last) {
runqueues[core_id].queue[prio-1].first = runqueues[core_id].queue[prio-1].last = task;
task->next = task->prev = NULL;
@ -1194,15 +1235,20 @@ void load_balancing(void)
// update task counters
runqueues[core_id].nr_tasks++;
runqueues[i].nr_tasks--;
runqueues[core_id].balance_counter = TIMER_FREQ/2;
} /*else {
spinlock_irqsave_unlock(&runqueues[core_id].lock);
} else {
#if 1
spinlock_irqsave_unlock(&runqueues[i].lock);
#else
task_t* tmp;
// steal a blocked task
task = runqueues[i].timers.first;
if (!task) // Ups, found no valid task to steal
if (!task) { // Ups, found no valid task to steal
spinlock_irqsave_unlock(&runqueues[i].lock);
goto no_task_found;
}
kprintf("Core %u steals the blocked task %d from %u with prio %u\n", core_id, task->id, i, task->prio);
@ -1212,6 +1258,10 @@ void load_balancing(void)
else
runqueues[i].timers.first = runqueues[i].timers.first->next;
spinlock_irqsave_unlock(&runqueues[i].lock);
spinlock_irqsave_lock(&runqueues[core_id].lock);
// add timer to queue core_id
tmp = runqueues[core_id].timers.first;
while(tmp && (task->timeout >= tmp->timeout))
@ -1240,35 +1290,28 @@ void load_balancing(void)
// update task counters
runqueues[core_id].balance_counter = TIMER_FREQ/2;
}*/
spinlock_irqsave_lock(&runqueues[core_id].lock);
#endif
}
}
//no_task_found:
spinlock_irqsave_unlock(&runqueues[i].lock);
}
if (runqueues[core_id].balance_counter <= 0)
runqueues[core_id].balance_counter = TIMER_FREQ/2;
spinlock_irqsave_unlock(&runqueues[core_id].lock);
#endif
}
#endif
void scheduler(void)
#endif // CONFIG_TICKLESS
void check_timers(void)
{
task_t* orig_task;
task_t* curr_task;
uint32_t core_id = CORE_ID;
uint32_t prio;
uint64_t current_tick;
orig_task = curr_task = per_core(current_task);
curr_task->last_core = core_id;
/* signalizes that this task could be reused */
if (curr_task->status == TASK_FINISHED)
curr_task->status = TASK_INVALID;
spinlock_irqsave_lock(&runqueues[core_id].lock);
// check timers
@ -1307,8 +1350,29 @@ void scheduler(void)
}
}
runqueues[core_id].old_task = NULL; // reset old task
spinlock_irqsave_unlock(&runqueues[core_id].lock);
}
size_t** scheduler(void)
{
task_t* orig_task;
task_t* curr_task;
uint32_t core_id = CORE_ID;
uint32_t prio;
orig_task = curr_task = per_core(current_task);
curr_task->last_core = core_id;
spinlock_irqsave_lock(&runqueues[core_id].lock);
/* signalizes that this task could be reused */
if (curr_task->status == TASK_FINISHED) {
curr_task->status = TASK_INVALID;
runqueues[core_id].old_task = curr_task;
} else runqueues[core_id].old_task = NULL; // reset old task
prio = msb(runqueues[core_id].prio_bitmap); // determines highest priority
#ifndef CONFIG_TICKLESS
#if MAX_CORES > 1
if (prio >= sizeof(size_t)*8) {
// push load balancing
@ -1316,6 +1380,7 @@ void scheduler(void)
load_balancing();
prio = msb(runqueues[core_id].prio_bitmap); // retry...
}
#endif
#endif
if (prio >= sizeof(size_t)*8) {
@ -1359,16 +1424,19 @@ get_task_out:
orig_task->flags &= ~TASK_FPU_USED;
}
//kprintf("schedule from %u to %u with prio %u on core %u\n",
// orig_task->id, curr_task->id, (uint32_t)curr_task->prio, CORE_ID);
switch_task(curr_task->id);
finish_task_switch(0);
//kprintf("schedule from %u to %u with prio %u on core %u\n", orig_task->id, curr_task->id, (uint32_t)curr_task->prio, CORE_ID);
return (size_t**) &(orig_task->last_stack_pointer);
}
return NULL;
}
void reschedule(void)
{
size_t** stack;
uint32_t flags = irq_nested_disable();
scheduler();
if ((stack = scheduler()))
switch_context(stack);
irq_nested_enable(flags);
}

View file

@ -31,12 +31,16 @@
* SUCH DAMAGE.
*/
#include <metalsvm/config.h>
/*
* The code has been taken from FreeBSD (sys/libkern/divdi3.c) and is therefore
* BSD-licensed. Unnecessary functions have been removed and all typedefs required
* have been added in quad.h.
*/
#if __SIZEOF_POINTER__ == 4
#include "quad.h"
/*
@ -61,3 +65,5 @@ __divdi3(a, b)
uq = __qdivrem(ua, ub, (u_quad_t *)0);
return (neg ? -uq : uq);
}
#endif

View file

@ -31,6 +31,10 @@
* SUCH DAMAGE.
*/
#include <metalsvm/config.h>
#if __SIZEOF_POINTER__ == 4
/*
* The code has been taken from FreeBSD (sys/libkern/lshrdi3.c) and is therefore
* BSD-licensed. Unnecessary functions have been removed and all typedefs required
@ -61,3 +65,5 @@ __lshrdi3(a, shift)
}
return (aa.q);
}
#endif

View file

@ -31,6 +31,10 @@
* SUCH DAMAGE.
*/
#include <metalsvm/config.h>
#if __SIZEOF_POINTER__ == 4
/*
* The code has been taken from FreeBSD (sys/libkern/moddi3.c) and is therefore
* BSD-licensed. Unnecessary functions have been removed and all typedefs required
@ -63,3 +67,5 @@ __moddi3(a, b)
(void)__qdivrem(ua, ub, &ur);
return (neg ? -ur : ur);
}
#endif

View file

@ -37,6 +37,10 @@
* have been added in quad.h.
*/
#include <metalsvm/config.h>
#if __SIZEOF_POINTER__ == 4
/*
* Multiprecision divide. This algorithm is from Knuth vol. 2 (2nd ed),
* section 4.3.1, pp. 257--259.
@ -289,3 +293,5 @@ u_quad_t uq, vq, *arq;
tmp.ul[L] = COMBINE(qspace[3], qspace[4]);
return (tmp.q);
}
#endif

View file

@ -31,6 +31,10 @@
* SUCH DAMAGE.
*/
#include <metalsvm/config.h>
#if __SIZEOF_POINTER__ == 4
/*
* The code has been taken from FreeBSD (sys/libkern/ucmpdi2.c) and is therefore
* BSD-licensed. Unnecessary functions have been removed and all typedefs required
@ -54,3 +58,5 @@ __ucmpdi2(a, b)
return (aa.ul[H] < bb.ul[H] ? 0 : aa.ul[H] > bb.ul[H] ? 2 :
aa.ul[L] < bb.ul[L] ? 0 : aa.ul[L] > bb.ul[L] ? 2 : 1);
}
#endif

View file

@ -31,6 +31,10 @@
* SUCH DAMAGE.
*/
#include <metalsvm/config.h>
#if __SIZEOF_POINTER__ == 4
/*
* The code has been taken from FreeBSD (sys/libkern/udivdi3.c) and is therefore
* BSD-licensed. Unnecessary functions have been removed and all typedefs required
@ -48,3 +52,5 @@ u_quad_t a, b;
return (__qdivrem(a, b, (u_quad_t *) 0));
}
#endif

View file

@ -31,6 +31,10 @@
* SUCH DAMAGE.
*/
#include <metalsvm/config.h>
#if __SIZEOF_POINTER__ == 4
/*
* The code has been taken from FreeBSD (sys/libkern/umoddi3.c) and is therefore
* BSD-licensed. Unnecessary functions have been removed and all typedefs required
@ -50,3 +54,5 @@ u_quad_t a, b;
(void)__qdivrem(a, b, &r);
return (r);
}
#endif

View file

@ -20,8 +20,8 @@ SECTIONS
.data ALIGN(4096) : AT(ADDR(.data)) {
*(.data)
}
bss_start = .;
.bss ALIGN(4096) : AT(ADDR(.bss)) {
bss_start = .;
*(.bss)
}
bss_end = .;

29
link64.ld Normal file
View file

@ -0,0 +1,29 @@
OUTPUT_FORMAT("elf64-x86-64")
OUTPUT_ARCH("i386:x86-64")
ENTRY(start)
phys = 0x000000100000;
SECTIONS
{
kernel_start = phys;
.mboot phys : AT(ADDR(.mboot)) {
*(.mboot)
*(.kmsg)
}
.text ALIGN(4096) : AT(ADDR(.text)) {
*(.text)
}
.rodata ALIGN(4096) : AT(ADDR(.rodata)) {
*(.rodata)
*(.rodata.*)
}
.data ALIGN(4096) : AT(ADDR(.data)) {
*(.data)
}
.bss ALIGN(4096) : AT(ADDR(.bss)) {
bss_start = .;
*(.bss)
}
bss_end = .;
kernel_end = .;
}

View file

@ -32,7 +32,7 @@
#define FALSE 0
#endif
#if SYS_LIGHTWEIGHT_PROT
#if SYS_LIGHTWEIGHT_PROT && !NO_SYS
#if MAX_CORES > 1
static spinlock_irqsave_t lwprot_lock;
#endif

View file

@ -90,4 +90,18 @@ typedef size_t mem_ptr_t;
#define LWIP_PLATFORM_ASSERT(x) do {kprintf("Assertion \"%s\" failed at line %d in %s\n", \
x, __LINE__, __FILE__); abort();} while(0)
#if NO_SYS
typedef uint32_t sys_prot_t;
static inline sys_prot_t sys_arch_protect(void)
{
return irq_nested_disable();
}
static inline void sys_arch_unprotect(sys_prot_t pval)
{
irq_nested_enable(pval);
}
#endif
#endif /* __ARCH_CC_H__ */

View file

@ -24,7 +24,7 @@ typedef struct
typedef tid_t sys_thread_t;
#if SYS_LIGHTWEIGHT_PROT
#if MAX_CORES > 1
#if (MAX_CORES > 1) && !defined(CONFIG_TICKLESS)
typedef uint32_t sys_prot_t;
sys_prot_t sys_arch_protect(void);
void sys_arch_unprotect(sys_prot_t pval);

View file

@ -16,7 +16,11 @@
* NO_SYS==1: Provides VERY minimal functionality. Otherwise,
* use lwIP facilities.
*/
#ifdef CONFIG_TICKLESS
#define NO_SYS 1
#else
#define NO_SYS 0
#endif
/**
* LWIP_RAW==1: Enable application layer to hook into the IP layer itself.
@ -27,17 +31,17 @@
/**
* LWIP_SOCKET==1: Enable Socket API (require to use sockets.c)
*/
#define LWIP_SOCKET 1
#define LWIP_SOCKET !NO_SYS
/**
* LWIP_NETCONN==1: Enable Netconn API (require to use api_lib.c)
*/
#define LWIP_NETCONN 1
#define LWIP_NETCONN !NO_SYS
/**
* LWIP_NETIF_API==1: Support netif api (in netifapi.c)
*/
#define LWIP_NETIF_API 1
#define LWIP_NETIF_API !NO_SYS
/**
* LWIP_DHCP==1: Enable DHCP module.
@ -100,7 +104,7 @@
/**
* TCP_SND_BUF: TCP sender buffer space (bytes).
*/
#define TCP_SND_BUF (16*TCP_WND)
#define TCP_SND_BUF (16 * TCP_WND)
/**
* LWIP_BROADCAST_PING==1: respond to broadcast pings (default is unicast only)

View file

@ -96,7 +96,7 @@ int mmu_init(void)
size_t kernel_size;
unsigned int i;
size_t addr;
int ret;
int ret = 0;
// at first, set default value of the bitmap
memset(bitmap, 0xFF, sizeof(uint8_t)*BITMAP_SIZE);
@ -104,7 +104,7 @@ int mmu_init(void)
#ifdef CONFIG_MULTIBOOT
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
size_t end_addr;
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr;
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) ((size_t) mb_info->mmap_addr);
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
while (mmap < mmap_end) {
@ -128,35 +128,6 @@ int mmu_init(void)
HALT;
}
}
/*
* Modules like the init ram disk are already loaded.
* Therefore, we set these pages as used.
*/
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
multiboot_module_t* mmodule = (multiboot_module_t*) mb_info->mods_addr;
/*
* Mark the mb_info as used.
*/
page_set_mark((size_t)mb_info >> PAGE_SHIFT);
atomic_int32_inc(&total_allocated_pages);
atomic_int32_dec(&total_available_pages);
for(addr = mb_info->mods_addr; addr < mb_info->mods_addr + mb_info->mods_count * sizeof(multiboot_module_t); addr += PAGE_SIZE) {
page_set_mark(addr >> PAGE_SHIFT);
atomic_int32_inc(&total_allocated_pages);
atomic_int32_dec(&total_available_pages);
}
for(i=0; i<mb_info->mods_count; i++, mmodule++) {
for(addr=mmodule->mod_start; addr<mmodule->mod_end; addr+=PAGE_SIZE) {
page_set_mark(addr >> PAGE_SHIFT);
atomic_int32_inc(&total_allocated_pages);
atomic_int32_dec(&total_available_pages);
}
}
}
#elif defined(CONFIG_ROCKCREEK)
/* of course, the first slots belong to the private memory */
for(addr=0x00; addr<1*0x1000000; addr+=PAGE_SIZE) {
@ -183,15 +154,6 @@ int mmu_init(void)
atomic_int32_inc(&total_allocated_pages);
atomic_int32_dec(&total_available_pages);
/*
* The init ram disk are already loaded.
* Therefore, we set these pages as used.
*/
for(addr=bootinfo->addr; addr < bootinfo->addr+bootinfo->size; addr+=PAGE_SIZE) {
page_set_mark(addr >> PAGE_SHIFT);
atomic_int32_inc(&total_allocated_pages);
atomic_int32_dec(&total_available_pages);
}
#else
#error Currently, MetalSVM supports only the Multiboot specification or the RockCreek processor!
#endif
@ -219,8 +181,41 @@ int mmu_init(void)
atomic_int32_sub(&total_available_pages, 1);
#endif
ret = paging_init();
if (ret) {
kprintf("Failed to initialize paging: %d\n", ret);
return ret;
}
#ifdef CONFIG_ROCKCREEK
#ifdef CONFIG_MULTIBOOT
/*
* Modules like the init ram disk are already loaded.
* Therefore, we set these pages as used.
*/
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
/*
* Mark the mb_info as used.
*/
page_set_mark((size_t)mb_info >> PAGE_SHIFT);
atomic_int32_inc(&total_allocated_pages);
atomic_int32_dec(&total_available_pages);
for(addr = mb_info->mods_addr; addr < mb_info->mods_addr + mb_info->mods_count * sizeof(multiboot_module_t); addr += PAGE_SIZE) {
page_set_mark(addr >> PAGE_SHIFT);
atomic_int32_inc(&total_allocated_pages);
atomic_int32_dec(&total_available_pages);
}
for(i=0; i<mb_info->mods_count; i++, mmodule++) {
for(addr=mmodule->mod_start; addr<mmodule->mod_end; addr+=PAGE_SIZE) {
page_set_mark(addr >> PAGE_SHIFT);
atomic_int32_inc(&total_allocated_pages);
atomic_int32_dec(&total_available_pages);
}
}
}
#elif defined(CONFIG_ROCKCREEK)
/*
* Now, we are able to read the FPGA registers and to
* determine the number of slots for private memory.
@ -239,6 +234,17 @@ int mmu_init(void)
atomic_int32_inc(&total_pages);
atomic_int32_inc(&total_available_pages);
}
/*
* The init ram disk are already loaded.
* Therefore, we set these pages as used.
*/
for(addr=bootinfo->addr; addr < bootinfo->addr+bootinfo->size; addr+=PAGE_SIZE) {
// This area is already mapped, so we need to virt_to_phys() these addresses.
page_set_mark(virt_to_phys(addr) >> PAGE_SHIFT);
atomic_int32_inc(&total_allocated_pages);
atomic_int32_dec(&total_available_pages);
}
#endif
return ret;
@ -374,3 +380,18 @@ void kfree(void* addr, size_t sz)
atomic_int32_sub(&total_allocated_pages, npages);
atomic_int32_add(&total_available_pages, npages);
}
void* create_stack(void)
{
return kmalloc(KERNEL_STACK_SIZE);
}
int destroy_stack(task_t* task)
{
if (BUILTIN_EXPECT(!task || !task->stack, 0))
return -EINVAL;
kfree(task->stack, KERNEL_STACK_SIZE);
return 0;
}

View file

@ -1,37 +1,42 @@
TOPDIR = $(shell pwd)
ARCH = x86
BIT=32
ifeq ($(ARCH),x86)
ifeq ($(BIT),32)
TARGET=i586-metalsvm-elf32
endif
ifeq ($(ARCH),x86_64)
ifeq ($(BIT),64)
TARGET=x86_64-metalsvm-elf64
endif
endif
NEWLIB = $(TOPDIR)/$(ARCH)/$(TARGET)
RM = rm -rf
CD = cd
MKDIR = mkdir
TMP = $(TOPDIR)/tmp
OPT = --disable-shared --disable-multilib --enable-newlib-hw-fp
OPT = --disable-shared --disable-multilib --enable-newlib-hw-fp --disable-newlib-multithread --disable-newlib-reent-small
default: $(ARCH)
$(MAKE) CFLAGS+="-nostdinc -Wall -fno-builtin -I$(NEWLIB)/include -I../../include -I../../arch/$(ARCH)/include" LDFLAGS+="-nostdlib -L$(NEWLIB)/lib" -C net depend
$(MAKE) CFLAGS+="-nostdinc -Wall -fno-builtin -I$(NEWLIB)/include -I../../include -I../../arch/$(ARCH)/include" LDFLAGS+="-nostdlib -L$(NEWLIB)/lib" -C net
$(MAKE) CFLAGS+="-nostdinc -Wall -fno-builtin -I$(NEWLIB)/include -I../../include -I../../arch/$(ARCH)/include" LDFLAGS+="-nostdlib -L$(NEWLIB)/lib" -C examples depend
$(MAKE) CFLAGS+="-nostdinc -Wall -fno-builtin -I$(NEWLIB)/include -I../../include -I../../arch/$(ARCH)/include" LDFLAGS+="-nostdlib -L$(NEWLIB)/lib" -C examples
$(MAKE) ARCH=$(ARCH) TARGET=$(TARGET) CFLAGS+="-ffreestanding -Wall -I$(NEWLIB)/include -I../../include -I../../arch/$(ARCH)/include" LDFLAGS+="-nostdlib -L$(NEWLIB)/lib" -C net depend
$(MAKE) ARCH=$(ARCH) TARGET=$(TARGET) CFLAGS+="-ffreestanding -Wall -I$(NEWLIB)/include -I../../include -I../../arch/$(ARCH)/include" LDFLAGS+="-nostdlib -L$(NEWLIB)/lib" -C net
$(MAKE) ARCH=$(ARCH) TARGET=$(TARGET) CFLAGS+="-ffreestanding -Wall -I$(NEWLIB)/include -I../../include -I../../arch/$(ARCH)/include" LDFLAGS+="-nostdlib -L$(NEWLIB)/lib" -C examples depend
$(MAKE) ARCH=$(ARCH) TARGET=$(TARGET) CFLAGS+="-ffreestanding -Wall -I$(NEWLIB)/include -I../../include -I../../arch/$(ARCH)/include" LDFLAGS+="-nostdlib -L$(NEWLIB)/lib" -C examples
$(ARCH):
$(RM) $(TMP)
$(MKDIR) $(TMP)
$(CD) $(TMP); $(TOPDIR)/src/configure --target=$(TARGET) --prefix=$(TOPDIR)/$(ARCH) $(OPT) && make && make install
$(CD) $(TMP); $(TOPDIR)/src/configure --target=$(TARGET) --prefix=$(TOPDIR)/$(ARCH) $(OPT) && $(MAKE) && $(MAKE) install
$(MKDIR) $(NEWLIB)/include/netinet
$(MKDIR) $(NEWLIB)/include/arpa
clean:
$(MAKE) -C examples clean
$(MAKE) -C net clean
$(RM) $(TMP)
veryclean: clean
$(MAKE) -C examples clean
$(MAKE) -C net clean
$(RM) $(TOPDIR)/$(ARCH)

View file

@ -9,8 +9,6 @@
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#undef errno
extern int errno;
#define QUIT 0
#define S2C 1

View file

@ -21,28 +21,14 @@
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <dirent.h>
#undef errno
extern int errno;
/*file descriptor init*/
#define NR_OPEN 10
#define FS_INIT { [0 ... NR_OPEN-1] = {NULL, 0, 0} }
/*open flags*/
//#define O_RDONLY 0
//#define O_WRONLY 1
#define O_RDWR 2
#define O_CREAT 64
#define O_EXCL 128
//#define O_NOCTTY 256
#define O_TRUNC 512
#define O_APPEND 1024
int main(int argc, char** argv)
{
int i, testfile;

View file

@ -23,8 +23,6 @@
#include <time.h>
#include <unistd.h>
#include <errno.h>
#undef errno
extern int errno;
#define MATRIX_SIZE 128
#define MAXVALUE 1337

View file

@ -26,9 +26,6 @@
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/stat.h>
#undef errno
extern int errno;
void showlogo() {
printf("\n\n");

View file

@ -27,8 +27,6 @@
#include <errno.h>
#include <sys/types.h>
#include <sys/wait.h>
#undef errno
extern int errno;
#define PORT 4711

View file

@ -9,8 +9,6 @@
#include <string.h>
#include <unistd.h>
#include <errno.h>
#undef errno
extern int errno;
static char msg[] =" Hello from server!\n";

View file

@ -24,13 +24,11 @@
#include <errno.h>
#include <sys/types.h>
#include <sys/wait.h>
#undef errno
extern int errno;
int main(int argc, char** argv)
{
int status = 0;
pid_t pid;
pid_t pid, mypid = getpid();
printf("Create child process...\n");
@ -39,12 +37,14 @@ int main(int argc, char** argv)
char* newargs[] = {"/bin/hello", "one", "two", "three", NULL};
char* newenv[] = {"USER=root", "PATH=/bin:/sbin:/usr/bin", "PWD=/", "TEMP=/tmp", NULL};
printf("Hello from child process!\n");
mypid = getpid();
printf("Hello from child process! mypid = %u\n", mypid);
execve("/bin/hello", newargs, newenv);
return errno;
} else {
printf("Hello from parent process! pid = %u\n", pid);
mypid = getpid();
printf("Hello from parent process! pid = %u, mypid = %u\n", pid, mypid);
wait(&status);
printf("Child terminated with status %d\n", status);
}

View file

@ -1,5 +1,5 @@
ARCH = x86
NEWLIB = ../x86/i586-metalsvm-elf32
NEWLIB = ../$(ARCH)/$(TARGET)
MAKE = make
STRIP_DEBUG = --strip-debug
KEEP_DEBUG = --only-keep-debug

View file

@ -35,8 +35,6 @@
#include <_ansi.h>
#include <_syslist.h>
#include <errno.h>
#undef errno
extern int errno;
#include "warning.h"
#include "syscall.h"

View file

@ -35,8 +35,6 @@
#include <_ansi.h>
#include <_syslist.h>
#include <errno.h>
#undef errno
extern int errno;
#include "warning.h"
#include "syscall.h"

View file

@ -35,8 +35,6 @@
#include <_ansi.h>
#include <_syslist.h>
#include <errno.h>
#undef errno
extern int errno;
#include "warning.h"
#include "syscall.h"

View file

@ -35,8 +35,6 @@
#include <_ansi.h>
#include <_syslist.h>
#include <errno.h>
#undef errno
extern int errno;
#include "warning.h"
#include "syscall.h"

Some files were not shown because too many files have changed in this diff Show more