embeddedsw/lib/sw_apps/openamp_matrix_multiply/src/matrix_multiply.c
Kinjal Pravinbhai Patel 078a7131d6 sw_apps: openamp: modified matrix_multiply application
This patch modifies openamp matrix_multiply application to
remove the hardcoded shared memory region and support for the
memory region configuration as per requirement of the code
in MPU region settings

Signed-off-by: Kinjal Pravinbhai Patel <patelki@xilinx.com>
Acked-by: Anirudha Sarangi   <anirudh@xilinx.com>
2015-07-31 16:56:04 +05:30

183 lines
6.9 KiB
C

/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* Copyright (C) 2015 Xilinx, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of Mentor Graphics Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**************************************************************************************
* This is a sample demonstration application that showcases usage of remoteproc
* and rpmsg APIs on the remote core. This application is meant to run on the remote CPU
* running bare-metal code. It receives two matrices from the master,
* multiplies them and returns the result to the master core.
*
* The init_system is called in the main function which defines a shared memory region in
* MPU settings for the communication between master and remote using
* zynqMP_r5_map_mem_region API,it also initializes interrupt controller
* GIC and register the interrupt service routine for IPI using
* zynqMP_r5_gic_initialize API.
*
* The remoteproc_resource_init API is being called to create the virtio/RPMsg devices
* required for IPC with the master context. Invocation of this API causes remoteproc on
* the bare-metal to use the rpmsg name service announcement feature to advertise the
* rpmsg channels served by the application.
*
* The master receives the advertisement messages and performs the following tasks:
* 1. Invokes the channel created callback registered by the master application
* 2. Responds to remote context with a name service acknowledgement message
* After the acknowledgement is received from master, remoteproc on the bare-metal
* invokes the RPMsg channel-created callback registered by the remote application.
* The RPMsg channel is established at this point. All RPMsg APIs can be used subsequently
* on both sides for run time communications between the master and remote software contexts.
*
* Upon running the master application to send data to remote core, master will
* generate the matrices and send to remote (bare-metal) by informing the bare-metal with
* an IPI, the remote will compute the resulting matrix and send the data back to
* master. Once the application is ran and task by the
* bare-metal application is done, master needs to properly shut down the remote
* processor
*
* To shut down the remote processor, the following steps are performed:
* 1. The master application sends an application-specific shutdown message
* to the remote context
* 2. This bare-metal application cleans up application resources,
* sends a shutdown acknowledge to master, and invokes remoteproc_resource_deinit
* API to de-initialize remoteproc on the bare-metal side.
* 3. On receiving the shutdown acknowledge message, the master application invokes
* the remoteproc_shutdown API to shut down the remote processor and de-initialize
* remoteproc using remoteproc_deinit on its side.
*
**************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "open_amp.h"
#include "rsc_table.h"
#include "baremetal.h"
#include "xil_cache.h"
#include "xil_mmu.h"
#include "xreg_cortexr5.h"
#define MAX_SIZE 6
#define NUM_MATRIX 2
#define SHUTDOWN_MSG 0xEF56A55A
typedef struct _matrix {
unsigned int size;
unsigned int elements[MAX_SIZE][MAX_SIZE];
} matrix;
/* Internal functions */
static void rpmsg_channel_created(struct rpmsg_channel *rp_chnl);
static void rpmsg_channel_deleted(struct rpmsg_channel *rp_chnl);
static void rpmsg_read_cb(struct rpmsg_channel *, void *, int, void *, unsigned long);
static void Matrix_Multiply(const matrix *m, const matrix *n, matrix *r);
static void init_system();
/* Globals */
static struct rpmsg_channel *app_rp_chnl;
void* mat_mul_lock;
int need_to_cal = 0;
static struct rpmsg_endpoint *rp_ept;
static matrix matrix_array[NUM_MATRIX];
static matrix matrix_result;
static struct remote_proc *proc = NULL;
static struct rsc_table_info rsc_info;
extern const struct remote_resource_table resources;
/* Application entry point */
int main() {
int status = 0;
/* Initialize HW system components */
init_system();
rsc_info.rsc_tab = (struct resource_table *)&resources;
rsc_info.size = sizeof(resources);
/* Initialize RPMSG framework */
status = remoteproc_resource_init(&rsc_info, rpmsg_channel_created, rpmsg_channel_deleted,
rpmsg_read_cb ,&proc);
if (status < 0) {
return -1;
}
while (1) {
__asm__ ( "wfi\n\t" );
}
return 0;
}
static void rpmsg_channel_created(struct rpmsg_channel *rp_chnl) {
app_rp_chnl = rp_chnl;
rp_ept = rpmsg_create_ept(rp_chnl, rpmsg_read_cb, RPMSG_NULL,
RPMSG_ADDR_ANY);
}
static void rpmsg_channel_deleted(struct rpmsg_channel *rp_chnl) {
rpmsg_destroy_ept(rp_ept);
}
static void rpmsg_read_cb(struct rpmsg_channel *rp_chnl, void *data, int len,
void * priv, unsigned long src) {
if ((*(int *) data) == SHUTDOWN_MSG) {
remoteproc_resource_deinit(proc);
}else{
env_memcpy(matrix_array, data, len);
/* Process received data and multiple matrices. */
Matrix_Multiply(&matrix_array[0], &matrix_array[1], &matrix_result);
/* Send the result of matrix multiplication back to master. */
rpmsg_send(app_rp_chnl, &matrix_result, sizeof(matrix));
}
}
static void Matrix_Multiply(const matrix *m, const matrix *n, matrix *r) {
int i, j, k;
env_memset(r, 0x0, sizeof(matrix));
r->size = m->size;
for (i = 0; i < m->size; ++i) {
for (j = 0; j < n->size; ++j) {
for (k = 0; k < r->size; ++k) {
r->elements[i][j] += m->elements[i][k] * n->elements[k][j];
}
}
}
}
static void init_system() {
/* Initilaize GIC */
zynqMP_r5_gic_initialize();
}