msm: gud: Remove gud driver

Complete removal of gud mobicore driver.
The driver author delivers an updated version of the driver to
interested parties directly rendering this version obsolete.

Bug: 33842910
CRs-Fixed: 1116560
Change-Id: I40498d3203b1d6ca04f2b5a2e65461851d84d2d4
Acked-by: Tony Hamilton <tonyh@qti.qualcomm.com>
Signed-off-by: Trudy Shearer <tshearer@codeaurora.org>
Signed-off-by: Dennis Cagle <d-cagle@codeaurora.org>
Signed-off-by: Maggie White <maggiewhite@google.com>
CVE-2017-9691
Signed-off-by: Kevin F. Haggerty <haggertk@lineageos.org>
This commit is contained in:
Maggie White 2017-06-02 18:10:06 -07:00 committed by Francescodario Cuzzocrea
parent f6509830af
commit 16ffe7a254
45 changed files with 0 additions and 8213 deletions

View File

@ -148,8 +148,6 @@ source "drivers/virt/Kconfig"
source "drivers/devfreq/Kconfig"
source "drivers/gud/Kconfig"
source "drivers/coresight/Kconfig"
source "drivers/bif/Kconfig"

View File

@ -139,9 +139,6 @@ obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_PM_DEVFREQ) += devfreq/
#MobiCore
obj-$(CONFIG_MOBICORE_SUPPORT) += gud/
obj-$(CONFIG_CORESIGHT) += coresight/
obj-$(CONFIG_BIF) += bif/

View File

@ -1,30 +0,0 @@
#
# MobiCore configuration
#
config MOBICORE_SUPPORT
tristate "Linux MobiCore Support"
---help---
Enable Linux Kernel MobiCore Support
config MOBICORE_DEBUG
bool "MobiCore Module debug mode"
depends on MOBICORE_SUPPORT
---help---
Enable Debug mode in the MobiCore Driver.
It enables printing information about MobiCore operations
config MOBICORE_VERBOSE
bool "MobiCore Module verbose debug mode"
depends on MOBICORE_DEBUG
---help---
Enable Verbose Debug mode in the MobiCore Driver.
It enables printing extra information about MobiCore operations
Beware: this is only useful for debuging deep in the driver because
it prints too much logs
config MOBICORE_API
tristate "Linux MobiCore API"
depends on MOBICORE_SUPPORT
---help---
Enable Linux Kernel MobiCore API

View File

@ -1,40 +0,0 @@
#
# Makefile for the kernel mobicore drivers
#
GUD_ROOT_FOLDER := drivers/gud
# add our modules to kernel.
obj-$(CONFIG_MOBICORE_API) += mcKernelApi.o
obj-$(CONFIG_MOBICORE_SUPPORT) += mcDrvModule.o
mcDrvModule-objs := MobiCoreDriver/logging.o \
MobiCoreDriver/ops.o \
MobiCoreDriver/mem.o \
MobiCoreDriver/api.o \
MobiCoreDriver/pm.o \
MobiCoreDriver/main.o
mcKernelApi-objs := MobiCoreKernelApi/main.o \
MobiCoreKernelApi/clientlib.o \
MobiCoreKernelApi/device.o \
MobiCoreKernelApi/session.o \
MobiCoreKernelApi/connection.o
# Release mode by default
ccflags-y := -DNDEBUG -I$(GUD_ROOT_FOLDER)
ccflags-y += -Wno-declaration-after-statement
#Netlink changed arguments number
#ccflags-y += -DMC_NETLINK_COMPAT_V37
ccflags-$(CONFIG_MOBICORE_DEBUG) += -DDEBUG
ccflags-$(CONFIG_MOBICORE_VERBOSE) += -DDEBUG_VERBOSE
# Choose one platform from the folder
MOBICORE_PLATFORM := $(shell (ls -1 $(PWD)/$(GUD_ROOT_FOLDER)/MobiCoreDriver/platforms | tail -1) )
# Use the available platform folder
ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver/platforms/$(MOBICORE_PLATFORM)
# MobiCore Driver includes
ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver/public
# MobiCore KernelApi required incldes
ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreKernelApi/include
ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreKernelApi/public

View File

@ -1,31 +0,0 @@
#
# this makefile is called from the kernel make system. Thus we basically
# add things to "obj-m" here.
ifeq ($(MODE),release)
ccflags-y = -O2 -DNDEBUG
else
ccflags-y = -DDEBUG
endif # DEBUG/RELEASE
# CFLAG for testable mode
ifeq ($(IS_TESTABLE),yes)
ccflags-y = -DTEST
endif
# CFLAGS from the build script
ifdef MOBICORE_CFLAGS
ccflags-y += $(MOBICORE_CFLAGS)
endif
#EXTRA_CFLAGS+=-DDEBUG_VERBOSE
ccflags-y += -Wall -D__$(PLATFORM)__
# add our module to kernel.
obj-m += mcDrvModule.o
mcDrvModule-objs :=logging.o ops.o mem.o api.o pm.o main.o
clean:
rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions \
Module.markers Module.symvers modules.order

View File

@ -1,118 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include "main.h"
#include "mem.h"
#include "debug.h"
int mobicore_map_vmem(struct mc_instance *instance, void *addr,
uint32_t len, uint32_t *handle)
{
phys_addr_t phys;
return mc_register_wsm_mmu(instance, addr, len,
handle, &phys);
}
EXPORT_SYMBOL(mobicore_map_vmem);
/*
* Unmap a virtual memory buffer from mobicore
* @param instance
* @param handle
*
* @return 0 if no error
*
*/
int mobicore_unmap_vmem(struct mc_instance *instance, uint32_t handle)
{
return mc_unregister_wsm_mmu(instance, handle);
}
EXPORT_SYMBOL(mobicore_unmap_vmem);
/*
* Free a WSM buffer allocated with mobicore_allocate_wsm
* @param instance
* @param handle handle of the buffer
*
* @return 0 if no error
*
*/
int mobicore_free_wsm(struct mc_instance *instance, uint32_t handle)
{
return mc_free_buffer(instance, handle);
}
EXPORT_SYMBOL(mobicore_free_wsm);
/*
* Allocate WSM for given instance
*
* @param instance instance
* @param requested_size size of the WSM
* @param handle pointer where the handle will be saved
* @param virt_kernel_addr pointer for the kernel virtual address
*
* @return error code or 0 for success
*/
int mobicore_allocate_wsm(struct mc_instance *instance,
unsigned long requested_size, uint32_t *handle, void **virt_kernel_addr)
{
struct mc_buffer *buffer = NULL;
/* Setup the WSM buffer structure! */
if (mc_get_buffer(instance, &buffer, requested_size))
return -EFAULT;
*handle = buffer->handle;
*virt_kernel_addr = buffer->addr;
return 0;
}
EXPORT_SYMBOL(mobicore_allocate_wsm);
/*
* Initialize a new mobicore API instance object
*
* @return Instance or NULL if no allocation was possible.
*/
struct mc_instance *mobicore_open(void)
{
struct mc_instance *instance = mc_alloc_instance();
if (instance)
instance->admin = true;
return instance;
}
EXPORT_SYMBOL(mobicore_open);
/*
* Release a mobicore instance object and all objects related to it
* @param instance instance
* @return 0 if Ok or -E ERROR
*/
int mobicore_release(struct mc_instance *instance)
{
return mc_release_instance(instance);
}
EXPORT_SYMBOL(mobicore_release);
/*
* Test if mobicore can sleep
*
* @return true if mobicore can sleep, false if it can't sleep
*/
bool mobicore_sleep_ready(void)
{
return mc_sleep_ready();
}
EXPORT_SYMBOL(mobicore_sleep_ready);

View File

@ -1,87 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_ARM_H_
#define _MC_ARM_H_
#include "debug.h"
#ifdef CONFIG_ARM64
inline bool has_security_extensions(void)
{
return true;
}
inline bool is_secure_mode(void)
{
return false;
}
#else
/*
* ARM Trustzone specific masks and modes
* Vanilla Linux is unaware of TrustZone extension.
* I.e. arch/arm/include/asm/ptrace.h does not define monitor mode.
* Also TZ bits in cpuid are not defined, ARM port uses magic numbers,
* see arch/arm/kernel/setup.c
*/
#define ARM_MONITOR_MODE (0x16) /*(0b10110)*/
#define ARM_SECURITY_EXTENSION_MASK (0x30)
/* check if CPU supports the ARM TrustZone Security Extensions */
inline bool has_security_extensions(void)
{
u32 fea = 0;
asm volatile(
"mrc p15, 0, %[fea], cr0, cr1, 0" :
[fea]"=r" (fea));
MCDRV_DBG_VERBOSE(mcd, "CPU Features: 0x%X", fea);
/*
* If the CPU features ID has 0 for security features then the CPU
* doesn't support TrustZone at all!
*/
if ((fea & ARM_SECURITY_EXTENSION_MASK) == 0)
return false;
return true;
}
/* check if running in secure mode */
inline bool is_secure_mode(void)
{
u32 cpsr = 0;
u32 nsacr = 0;
asm volatile(
"mrc p15, 0, %[nsacr], cr1, cr1, 2\n"
"mrs %[cpsr], cpsr\n" :
[nsacr]"=r" (nsacr),
[cpsr]"=r"(cpsr));
MCDRV_DBG_VERBOSE(mcd, "CPRS.M = set to 0x%X\n", cpsr & MODE_MASK);
MCDRV_DBG_VERBOSE(mcd, "SCR.NS = set to 0x%X\n", nsacr);
/*
* If the NSACR contains the reset value(=0) then most likely we are
* running in Secure MODE.
* If the cpsr mode is set to monitor mode then we cannot load!
*/
if (nsacr == 0 || ((cpsr & MODE_MASK) == ARM_MONITOR_MODE))
return true;
return false;
}
#endif
#endif /* _MC_ARM_H_ */

View File

@ -1,28 +0,0 @@
#!/bin/bash
# source the setup script
if [ -z $COMP_PATH_ROOT ]; then
echo "The build environment is not set!"
echo "Trying to source setupDrivers.sh automatically!"
source ../setupDrivers.sh || exit 1
fi
ROOT_PATH=$(dirname $(readlink -f $0))
# These folders need to be relative to the kernel dir or absolute!
PLATFORM=EXYNOS_5410_STD
CODE_INCLUDE=$(readlink -f $ROOT_PATH/Locals/Code)
PLATFORM_INCLUDE="$CODE_INCLUDE/platforms/$PLATFORM"
MOBICORE_DAEMON=$COMP_PATH_MobiCoreDriverLib/Public
MOBICORE_CFLAGS="-I$MOBICORE_DRIVER/Public -I$MOBICORE_DAEMON -I$COMP_PATH_MobiCore/inc/Mci -I$COMP_PATH_MobiCore/inc -I${PLATFORM_INCLUDE}"
# Clean first
make -C $CODE_INCLUDE clean
make -C $LINUX_PATH \
MODE=$MODE \
ARCH=arm \
CROSS_COMPILE=$CROSS_COMPILE \
M=$CODE_INCLUDE \
"MOBICORE_CFLAGS=$MOBICORE_CFLAGS" \
modules

View File

@ -1,15 +0,0 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define MOBICORE_COMPONENT_BUILD_TAG \
"t-base-QC-MSM8974-Android-301C-V001-76_76"

View File

@ -1,64 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_DEBUG_H_
#define _MC_DEBUG_H_
/* Found in main.c */
extern struct device *mcd;
#define MCDRV_DBG_ERROR(dev, txt, ...) \
dev_err(dev, "MobiCore %s() ### ERROR: " txt "\n", \
__func__, \
##__VA_ARGS__)
/* dummy function helper macro. */
#define DUMMY_FUNCTION() do {} while (0)
#if defined(DEBUG)
/* #define DEBUG_VERBOSE */
#if defined(DEBUG_VERBOSE)
#define MCDRV_DBG_VERBOSE MCDRV_DBG
#else
#define MCDRV_DBG_VERBOSE(...) DUMMY_FUNCTION()
#endif
#define MCDRV_DBG(dev, txt, ...) \
dev_info(dev, "MobiCore %s(): " txt "\n", \
__func__, \
##__VA_ARGS__)
#define MCDRV_DBG_WARN(dev, txt, ...) \
dev_warn(dev, "MobiCore %s() WARNING: " txt "\n", \
__func__, \
##__VA_ARGS__)
#define MCDRV_ASSERT(cond) \
do { \
if (unlikely(!(cond))) { \
panic("Assertion failed: %s:%d\n", \
__FILE__, __LINE__); \
} \
} while (0)
#else
#define MCDRV_DBG_VERBOSE(...) DUMMY_FUNCTION()
#define MCDRV_DBG(...) DUMMY_FUNCTION()
#define MCDRV_DBG_WARN(...) DUMMY_FUNCTION()
#define MCDRV_ASSERT(...) DUMMY_FUNCTION()
#endif /* [not] defined(DEBUG) */
#endif /* _MC_DEBUG_H_ */

View File

@ -1,255 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_FASTCALL_H_
#define _MC_FASTCALL_H_
#include "debug.h"
#include "platform.h"
/* Use the arch_extension sec pseudo op before switching to secure world */
#if defined(__GNUC__) && \
defined(__GNUC_MINOR__) && \
defined(__GNUC_PATCHLEVEL__) && \
((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)) \
>= 40502
#ifndef CONFIG_ARM64
#define MC_ARCH_EXTENSION_SEC
#endif
#endif
/*
* MobiCore SMCs
*/
#define MC_SMC_N_YIELD 0x3 /* Yield to switch from NWd to SWd. */
#define MC_SMC_N_SIQ 0x4 /* SIQ to switch from NWd to SWd. */
/*
* MobiCore fast calls. See MCI documentation
*/
#ifdef MC_AARCH32_FC
#define MC_FC_STD64_BASE ((uint32_t)0xFF000000)
/**< Initializing FastCall. */
#define MC_FC_INIT (MC_FC_STD64_BASE+1)
/**< Info FastCall. */
#define MC_FC_INFO (MC_FC_STD64_BASE+2)
/**< Enable SWd tracing via memory */
#define MC_FC_NWD_TRACE (MC_FC_STD64_BASE+10)
#ifdef TBASE_CORE_SWITCHER
/**< Core switching fastcall */
#define MC_FC_SWITCH_CORE (MC_FC_STD64_BASE+54)
#endif
#else
#define MC_FC_INIT -1
#define MC_FC_INFO -2
#define MC_FC_NWD_TRACE -31
#ifdef TBASE_CORE_SWITCHER
#define MC_FC_SWITCH_CORE 0x84000005
#endif
#endif
/*
* return code for fast calls
*/
#define MC_FC_RET_OK 0
#define MC_FC_RET_ERR_INVALID 1
#define MC_FC_RET_ERR_ALREADY_INITIALIZED 5
/* structure wrappers for specific fastcalls */
/* generic fast call parameters */
union fc_generic {
struct {
uint32_t cmd;
uint32_t param[3];
} as_in;
struct {
uint32_t resp;
uint32_t ret;
uint32_t param[2];
} as_out;
};
/* fast call init */
union mc_fc_init {
union fc_generic as_generic;
struct {
uint32_t cmd;
uint32_t base;
uint32_t nq_info;
uint32_t mcp_info;
} as_in;
struct {
uint32_t resp;
uint32_t ret;
uint32_t rfu[2];
} as_out;
};
/* fast call info parameters */
union mc_fc_info {
union fc_generic as_generic;
struct {
uint32_t cmd;
uint32_t ext_info_id;
uint32_t rfu[2];
} as_in;
struct {
uint32_t resp;
uint32_t ret;
uint32_t state;
uint32_t ext_info;
} as_out;
};
#ifdef TBASE_CORE_SWITCHER
/* fast call switch Core parameters */
union mc_fc_swich_core {
union fc_generic as_generic;
struct {
uint32_t cmd;
uint32_t core_id;
uint32_t rfu[2];
} as_in;
struct {
uint32_t resp;
uint32_t ret;
uint32_t state;
uint32_t ext_info;
} as_out;
};
#endif
/*
* _smc() - fast call to MobiCore
*
* @data: pointer to fast call data
*/
#ifdef CONFIG_ARM64
static inline long _smc(void *data)
{
int ret = 0;
if (data == NULL)
return -EPERM;
#ifdef MC_SMC_FASTCALL
{
ret = smc_fastcall(data, sizeof(union fc_generic));
}
#else
{
union fc_generic *fc_generic = data;
/* SMC expect values in x0-x3 */
register u64 reg0 __asm__("x0") = fc_generic->as_in.cmd;
register u64 reg1 __asm__("x1") = fc_generic->as_in.param[0];
register u64 reg2 __asm__("x2") = fc_generic->as_in.param[1];
register u64 reg3 __asm__("x3") = fc_generic->as_in.param[2];
__asm__ volatile (
"smc #0\n"
: "+r"(reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3)
);
/* set response */
fc_generic->as_out.resp = reg0;
fc_generic->as_out.ret = reg1;
fc_generic->as_out.param[0] = reg2;
fc_generic->as_out.param[1] = reg3;
}
#endif
return ret;
}
#else
static inline long _smc(void *data)
{
int ret = 0;
if (data == NULL)
return -EPERM;
#ifdef MC_SMC_FASTCALL
{
ret = smc_fastcall(data, sizeof(union fc_generic));
}
#else
{
union fc_generic *fc_generic = data;
/* SMC expect values in r0-r3 */
register u32 reg0 __asm__("r0") = fc_generic->as_in.cmd;
register u32 reg1 __asm__("r1") = fc_generic->as_in.param[0];
register u32 reg2 __asm__("r2") = fc_generic->as_in.param[1];
register u32 reg3 __asm__("r3") = fc_generic->as_in.param[2];
__asm__ volatile (
#ifdef MC_ARCH_EXTENSION_SEC
/* This pseudo op is supported and required from
* binutils 2.21 on */
".arch_extension sec\n"
#endif
"smc #0\n"
: "+r"(reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3)
);
#ifdef __ARM_VE_A9X4_QEMU__
/* Qemu does not return to the address following the SMC
* instruction so we have to insert several nop instructions to
* workaround this Qemu bug. */
__asm__ volatile (
"nop\n"
"nop\n"
"nop\n"
"nop"
);
#endif
/* set response */
fc_generic->as_out.resp = reg0;
fc_generic->as_out.ret = reg1;
fc_generic->as_out.param[0] = reg2;
fc_generic->as_out.param[1] = reg3;
}
#endif
return ret;
}
#endif
/*
* convert fast call return code to linux driver module error code
*/
static inline int convert_fc_ret(uint32_t sret)
{
int ret = -EFAULT;
switch (sret) {
case MC_FC_RET_OK:
ret = 0;
break;
case MC_FC_RET_ERR_INVALID:
ret = -EINVAL;
break;
case MC_FC_RET_ERR_ALREADY_INITIALIZED:
ret = -EBUSY;
break;
default:
break;
}
return ret;
}
#endif /* _MC_FASTCALL_H_ */

View File

@ -1,375 +0,0 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* MobiCore Driver Logging Subsystem.
*
* The logging subsystem provides the interface between the Mobicore trace
* buffer and the Linux log
*/
#include <linux/miscdevice.h>
#include <linux/moduleparam.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/device.h>
#include "main.h"
#include "debug.h"
#include "ops.h"
#include "logging.h"
/* Default length of the log ring buffer 256KB*/
#define LOG_BUF_SIZE (64 * PAGE_SIZE)
/* Max Len of a log line for printing */
#define LOG_LINE_SIZE 256
static uint32_t log_size = LOG_BUF_SIZE;
module_param(log_size, uint, 0);
MODULE_PARM_DESC(log_size, "Size of the MobiCore log ringbuffer(256KB def)");
/* Definitions for log version 2 */
#define LOG_TYPE_MASK (0x0007)
#define LOG_TYPE_CHAR 0
#define LOG_TYPE_INTEGER 1
/* Field length */
#define LOG_LENGTH_MASK (0x00F8)
#define LOG_LENGTH_SHIFT 3
/* Extra attributes */
#define LOG_EOL (0x0100)
#define LOG_INTEGER_DECIMAL (0x0200)
#define LOG_INTEGER_SIGNED (0x0400)
struct logmsg_struct {
uint16_t ctrl; /* Type and format of data */
uint16_t source; /* Unique value for each event source */
uint32_t log_data; /* Value, if any */
};
static uint16_t prev_source; /* Previous Log source */
static uint32_t log_pos; /* MobiCore log previous position */
static struct mc_trace_buf *log_buf; /* MobiCore log buffer structure */
struct task_struct *log_thread; /* Log Thread task structure */
static char *log_line; /* Log Line buffer */
static uint32_t log_line_len; /* Log Line buffer current length */
static int thread_err;
static void log_eol(uint16_t source)
{
if (!strnlen(log_line, LOG_LINE_SIZE)) {
/* In case a TA tries to print a 0x0 */
log_line_len = 0;
return;
}
/* MobiCore Userspace */
if (prev_source)
dev_info(mcd, "%03x|%s\n", prev_source, log_line);
/* MobiCore kernel */
else
dev_info(mcd, "%s\n", log_line);
log_line_len = 0;
log_line[0] = 0;
}
/*
* Collect chars in log_line buffer and output the buffer when it is full.
* No locking needed because only "mobicore_log" thread updates this buffer.
*/
static void log_char(char ch, uint16_t source)
{
if (ch == '\n' || ch == '\r') {
log_eol(source);
return;
}
if (log_line_len >= LOG_LINE_SIZE - 1 || source != prev_source)
log_eol(source);
log_line[log_line_len] = ch;
log_line[log_line_len + 1] = 0;
log_line_len++;
prev_source = source;
}
static const uint8_t HEX2ASCII[16] = {
'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
static void dbg_raw_nro(uint32_t format, uint32_t value, uint16_t source)
{
int digits = 1;
uint32_t base = (format & LOG_INTEGER_DECIMAL) ? 10 : 16;
int width = (format & LOG_LENGTH_MASK) >> LOG_LENGTH_SHIFT;
int negative = 0;
uint32_t digit_base = 1;
if ((format & LOG_INTEGER_SIGNED) != 0 && ((signed int)value) < 0) {
negative = 1;
value = (uint32_t)(-(signed int)value);
width--;
}
/* Find length and divider to get largest digit */
while (value / digit_base >= base) {
digit_base *= base;
digits++;
}
if (width > digits) {
char ch = (base == 10) ? ' ' : '0';
while (width > digits) {
log_char(ch, source);
width--;
}
}
if (negative)
log_char('-', source);
while (digits-- > 0) {
uint32_t d = value / digit_base;
log_char(HEX2ASCII[d], source);
value = value - d * digit_base;
digit_base /= base;
}
}
static void log_msg(struct logmsg_struct *msg)
{
switch (msg->ctrl & LOG_TYPE_MASK) {
case LOG_TYPE_CHAR: {
uint32_t ch;
ch = msg->log_data;
while (ch != 0) {
log_char(ch & 0xFF, msg->source);
ch >>= 8;
}
break;
}
case LOG_TYPE_INTEGER: {
dbg_raw_nro(msg->ctrl, msg->log_data, msg->source);
break;
}
default:
break;
}
if (msg->ctrl & LOG_EOL)
log_eol(msg->source);
}
static uint32_t process_log(void)
{
char *last_msg = log_buf->buff + log_buf->write_pos;
char *buff = log_buf->buff + log_pos;
while (buff != last_msg) {
log_msg((struct logmsg_struct *)buff);
buff += sizeof(struct logmsg_struct);
/* Wrap around */
if ((buff + sizeof(struct logmsg_struct)) >
((char *)log_buf + log_size))
buff = log_buf->buff;
}
return buff - log_buf->buff;
}
static void log_exit(void)
{
union fc_generic fc_log;
memset(&fc_log, 0, sizeof(fc_log));
fc_log.as_in.cmd = MC_FC_NWD_TRACE;
MCDRV_DBG(mcd, "Unregister the trace buffer");
mc_fastcall(&fc_log);
MCDRV_DBG(mcd, "fc_log out ret=0x%08x", fc_log.as_out.ret);
if (fc_log.as_out.ret == 0) {
free_pages((unsigned long)log_buf, get_order(log_size));
log_buf = NULL;
}
}
/* log_worker() - Worker thread processing the log_buf buffer. */
static int log_worker(void *p)
{
int ret = 0;
if (log_buf == NULL) {
ret = -EFAULT;
goto err_kthread;
}
while (!kthread_should_stop()) {
if (log_buf->write_pos == log_pos)
schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
switch (log_buf->version) {
case 2:
log_pos = process_log();
break;
default:
MCDRV_DBG_ERROR(mcd, "Unknown Mobicore log data");
log_pos = log_buf->write_pos;
/*
* Stop the thread as we have no idea what
* happens next
*/
ret = -EFAULT;
goto err_kthread;
}
}
err_kthread:
MCDRV_DBG(mcd, "Logging thread stopped!");
thread_err = ret;
/* Wait until the next kthread_stop() is called, if it was already
* called we just slip through, if there is an error signal it and
* wait to get the signal */
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
schedule();
set_current_state(TASK_INTERRUPTIBLE);
}
set_current_state(TASK_RUNNING);
log_exit();
return ret;
}
/*
* Wake up the log reader thread
* This should be called from the places where calls into MobiCore have
* generated some logs(eg, yield, SIQ...)
*/
void mobicore_log_read(void)
{
if (log_thread == NULL || IS_ERR(log_thread))
return;
/* The thread itself is in some error condition so just get
* rid of it */
if (thread_err != 0) {
kthread_stop(log_thread);
log_thread = NULL;
return;
}
wake_up_process(log_thread);
}
/*
* Setup MobiCore kernel log. It assumes it's running on CORE 0!
* The fastcall will complain is that is not the case!
*/
long mobicore_log_setup(void)
{
phys_addr_t phys_log_buf;
union fc_generic fc_log;
struct sched_param param = { .sched_priority = 1 };
long ret;
log_pos = 0;
log_buf = NULL;
log_thread = NULL;
log_line = NULL;
log_line_len = 0;
prev_source = 0;
thread_err = 0;
/* Sanity check for the log size */
if (log_size < PAGE_SIZE)
return -EFAULT;
else
log_size = PAGE_ALIGN(log_size);
log_line = kzalloc(LOG_LINE_SIZE, GFP_KERNEL);
if (IS_ERR(log_line)) {
MCDRV_DBG_ERROR(mcd, "failed to allocate log line!");
return -ENOMEM;
}
log_thread = kthread_create(log_worker, NULL, "mc_log");
if (IS_ERR(log_thread)) {
MCDRV_DBG_ERROR(mcd, "MobiCore log thread creation failed!");
ret = -EFAULT;
goto err_free_line;
}
sched_setscheduler(log_thread, SCHED_IDLE, &param);
/*
* We are going to map this buffer into virtual address space in SWd.
* To reduce complexity there, we use a contiguous buffer.
*/
log_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(log_size));
if (!log_buf) {
MCDRV_DBG_ERROR(mcd, "Failed to get page for logger!");
ret = -ENOMEM;
goto err_stop_kthread;
}
phys_log_buf = virt_to_phys(log_buf);
memset(&fc_log, 0, sizeof(fc_log));
fc_log.as_in.cmd = MC_FC_NWD_TRACE;
fc_log.as_in.param[0] = (uint32_t)phys_log_buf;
#ifdef CONFIG_PHYS_ADDR_T_64BIT
fc_log.as_in.param[1] = (uint32_t)(phys_log_buf >> 32);
#endif
fc_log.as_in.param[2] = log_size;
MCDRV_DBG(mcd, "fc_log virt=%p phys=0x%llX",
log_buf, (u64)phys_log_buf);
mc_fastcall(&fc_log);
MCDRV_DBG(mcd, "fc_log out ret=0x%08x", fc_log.as_out.ret);
/* If the setup failed we must free the memory allocated */
if (fc_log.as_out.ret) {
MCDRV_DBG_ERROR(mcd, "MobiCore shared traces setup failed!");
free_pages((unsigned long)log_buf, get_order(log_size));
log_buf = NULL;
ret = -EIO;
goto err_stop_kthread;
}
set_task_state(log_thread, TASK_INTERRUPTIBLE);
MCDRV_DBG(mcd, "fc_log Logger version %u", log_buf->version);
return 0;
err_stop_kthread:
kthread_stop(log_thread);
log_thread = NULL;
err_free_line:
kfree(log_line);
log_line = NULL;
return ret;
}
/*
* Free kernel log components.
* ATTN: We can't free the log buffer because it's also in use by MobiCore and
* even if the module is unloaded MobiCore is still running.
*/
void mobicore_log_free(void)
{
if (log_thread && !IS_ERR(log_thread)) {
/* We don't really care what the thread returns for exit */
kthread_stop(log_thread);
}
kfree(log_line);
}

View File

@ -1,30 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_LOGGING_H_
#define _MC_LOGGING_H_
/* MobiCore internal trace buffer structure. */
struct mc_trace_buf {
uint32_t version; /* version of trace buffer */
uint32_t length; /* length of allocated buffer(includes header) */
uint32_t write_pos; /* last write position */
char buff[1]; /* start of the log buffer */
};
/* MobiCore internal trace log setup. */
void mobicore_log_read(void);
long mobicore_log_setup(void);
void mobicore_log_free(void);
#endif /* _MC_LOGGING_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,152 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_MAIN_H_
#define _MC_MAIN_H_
#include <asm/pgtable.h>
#include <linux/semaphore.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include "public/mc_linux.h"
/* Platform specific settings */
#include "platform.h"
#define MC_VERSION(major, minor) \
(((major & 0x0000ffff) << 16) | (minor & 0x0000ffff))
/* Instance data for MobiCore Daemon and TLCs. */
struct mc_instance {
/* lock for the instance */
struct mutex lock;
/* unique handle */
unsigned int handle;
bool admin;
};
/*
* Contiguous buffer allocated to TLCs.
* These buffers are uses as world shared memory (wsm) and shared with
* secure world.
* The virtual kernel address is added for a simpler search algorithm.
*/
struct mc_buffer {
struct list_head list;
/* unique handle */
unsigned int handle;
/* Number of references kept to this buffer */
atomic_t usage;
/* virtual Kernel start address */
void *addr;
/* virtual Userspace start address */
void *uaddr;
/* physical start address */
phys_addr_t phys;
/* order of number of pages */
unsigned int order;
uint32_t len;
struct mc_instance *instance;
};
/* MobiCore Driver Kernel Module context data. */
struct mc_context {
/* MobiCore MCI information */
struct mc_buffer mci_base;
/* MobiCore MCP buffer */
struct mc_mcp_buffer *mcp;
/* event completion */
struct completion isr_comp;
/* isr event counter */
unsigned int evt_counter;
atomic_t isr_counter;
/* ever incrementing counters */
atomic_t buffer_counter;
atomic_t instance_counter;
/* pointer to instance of daemon */
struct mc_instance *daemon_inst;
/* pointer to instance of daemon */
struct task_struct *daemon;
/* General list of contiguous buffers allocated by the kernel */
struct list_head cont_bufs;
/* Lock for the list of contiguous buffers */
struct mutex bufs_lock;
};
struct mc_sleep_mode {
uint16_t sleep_req;
uint16_t ready_to_sleep;
};
/* MobiCore is idle. No scheduling required. */
#define SCHEDULE_IDLE 0
/* MobiCore is non idle, scheduling is required. */
#define SCHEDULE_NON_IDLE 1
/* MobiCore status flags */
struct mc_flags {
/*
* Scheduling hint: if <> SCHEDULE_IDLE, MobiCore should
* be scheduled by the NWd
*/
uint32_t schedule;
/* State of sleep protocol */
struct mc_sleep_mode sleep_mode;
/* Reserved for future use: Must not be interpreted */
uint32_t rfu[2];
};
/* MCP buffer structure */
struct mc_mcp_buffer {
/* MobiCore Flags */
struct mc_flags flags;
uint32_t rfu; /* MCP message buffer - ignore */
};
/* check if caller is MobiCore Daemon */
static inline bool is_daemon(struct mc_instance *instance)
{
if (!instance)
return false;
return instance->admin;
}
/* Initialize a new mobicore API instance object */
struct mc_instance *mc_alloc_instance(void);
/* Release a mobicore instance object and all objects related to it */
int mc_release_instance(struct mc_instance *instance);
/*
* mc_register_wsm_mmu() - Create a MMU table from a virtual memory buffer which
* can be vmalloc or user space virtual memory
*/
int mc_register_wsm_mmu(struct mc_instance *instance,
void *buffer, uint32_t len,
uint32_t *handle, phys_addr_t *phys);
/* Unregister the buffer mapped above */
int mc_unregister_wsm_mmu(struct mc_instance *instance, uint32_t handle);
/* Allocate one mc_buffer of contiguous space */
int mc_get_buffer(struct mc_instance *instance,
struct mc_buffer **buffer, unsigned long len);
/* Free the buffer allocated above */
int mc_free_buffer(struct mc_instance *instance, uint32_t handle);
/* Check if the other end of the fd owns instance */
bool mc_check_owner_fd(struct mc_instance *instance, int32_t fd);
/* Test if sleep is possible */
bool mc_sleep_ready(void);
#endif /* _MC_MAIN_H_ */

View File

@ -1,767 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* MobiCore Driver Kernel Module.
*
* This module is written as a Linux device driver.
* This driver represents the command proxy on the lowest layer, from the
* secure world to the non secure world, and vice versa.
* This driver is located in the non secure world (Linux).
* This driver offers IOCTL commands, for access to the secure world, and has
* the interface from the secure world to the normal world.
* The access to the driver is possible with a file descriptor,
* which has to be created by the fd = open(/dev/mobicore) command.
*/
#include "main.h"
#include "debug.h"
#include "mem.h"
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/pagemap.h>
#include <linux/device.h>
#ifdef LPAE_SUPPORT
#define MMU_TYPE_PAGE (3 << 0)
#define MMU_BUFFERABLE (1 << 2) /* AttrIndx[0] */
#define MMU_CACHEABLE (1 << 3) /* AttrIndx[1] */
#define MMU_NS (1 << 5)
#define MMU_AP_RW_ALL (1 << 6) /* AP[2:1], RW, at any privilege level */
#define MMU_EXT_SHARED (3 << 8) /* SH[1:0], inner shareable */
#define MMU_EXT_AF (1 << 10) /* Access Flag */
#define MMU_EXT_NG (1 << 11)
#define MMU_EXT_XN (((uint64_t)1) << 54) /* XN */
#else
#define MMU_TYPE_EXT (3 << 0) /* v5 */
#define MMU_TYPE_SMALL (2 << 0)
#define MMU_BUFFERABLE (1 << 2)
#define MMU_CACHEABLE (1 << 3)
#define MMU_EXT_AP0 (1 << 4)
#define MMU_EXT_AP1 (2 << 4)
#define MMU_EXT_TEX(x) ((x) << 6) /* v5 */
#define MMU_EXT_SHARED (1 << 10) /* v6 */
#define MMU_EXT_NG (1 << 11) /* v6 */
#endif
/* MobiCore memory context data */
struct mc_mem_context mem_ctx;
static inline void release_page(struct page *page)
{
set_bit(PG_dirty, &page->flags);
page_cache_release(page);
}
static int lock_pages(struct task_struct *task, void *virt_start_page_addr,
int pages_no, struct page **pages)
{
int locked_pages;
/* lock user pages, must hold the mmap_sem to do this. */
down_read(&(task->mm->mmap_sem));
locked_pages = __get_user_pages(
task,
task->mm,
(unsigned long)virt_start_page_addr,
pages_no,
FOLL_TOUCH | FOLL_GET | FOLL_WRITE | FOLL_CMA,
pages,
NULL,
NULL);
up_read(&(task->mm->mmap_sem));
/* check if we could lock all pages. */
if (locked_pages != pages_no) {
MCDRV_DBG_ERROR(mcd, "get_user_pages() failed, locked_pages=%d",
locked_pages);
if (locked_pages > 0) {
/* release all locked pages. */
release_pages(pages, locked_pages, 0);
}
return -ENOMEM;
}
return 0;
}
/* Get kernel pointer to shared MMU table given a per-process reference */
static void *get_mmu_table_kernel_virt(struct mc_mmu_table *table)
{
if (WARN(!table, "Invalid MMU table"))
return NULL;
if (WARN(!table->set, "Invalid MMU table set"))
return NULL;
if (WARN(!table->set->kernel_virt, "Invalid MMU pointer"))
return NULL;
return &(table->set->kernel_virt->table[table->idx]);
}
/*
* Search the list of used MMU tables and return the one with the handle.
* Assumes the table_lock is taken.
*/
struct mc_mmu_table *find_mmu_table(unsigned int handle)
{
struct mc_mmu_table *table;
list_for_each_entry(table, &mem_ctx.mmu_tables, list) {
if (table->handle == handle)
return table;
}
return NULL;
}
/*
* Allocate a new MMU table store plus MMU_TABLES_PER_PAGE in the MMU free
* tables list. Assumes the table_lock is already taken by the caller above.
*/
static int alloc_mmu_table_store(void)
{
unsigned long store;
struct mc_mmu_tables_set *mmutable_set;
struct mc_mmu_table *mmutable, *mmutable2;
struct page *page;
int ret = 0, i;
/* temp list for holding the MMU tables */
LIST_HEAD(temp);
store = get_zeroed_page(GFP_KERNEL);
if (!store)
return -ENOMEM;
/*
* Actually, locking is not necessary, because kernel
* memory is not supposed to get swapped out. But we
* play safe....
*/
page = virt_to_page(store);
set_bit(PG_reserved, &page->flags);
/* add all the descriptors to the free descriptors list */
mmutable_set = kmalloc(sizeof(*mmutable_set), GFP_KERNEL | __GFP_ZERO);
if (mmutable_set == NULL) {
ret = -ENOMEM;
goto free_store;
}
/* initialize */
mmutable_set->kernel_virt = (void *)store;
mmutable_set->page = page;
mmutable_set->phys = virt_to_phys((void *)store);
/* the set is not yet used */
atomic_set(&mmutable_set->used_tables, 0);
/* init add to list. */
INIT_LIST_HEAD(&(mmutable_set->list));
list_add(&mmutable_set->list, &mem_ctx.mmu_tables_sets);
for (i = 0; i < MMU_TABLES_PER_PAGE; i++) {
/* allocate a WSM MMU descriptor */
mmutable = kmalloc(sizeof(*mmutable), GFP_KERNEL | __GFP_ZERO);
if (mmutable == NULL) {
ret = -ENOMEM;
MCDRV_DBG_ERROR(mcd, "out of memory");
/* Free the full temp list and the store in this case */
goto free_temp_list;
}
/* set set reference */
mmutable->set = mmutable_set;
mmutable->idx = i;
mmutable->virt = get_mmu_table_kernel_virt(mmutable);
mmutable->phys = mmutable_set->phys+i*sizeof(struct mmutable);
atomic_set(&mmutable->usage, 0);
/* add to temp list. */
INIT_LIST_HEAD(&mmutable->list);
list_add_tail(&mmutable->list, &temp);
}
/*
* If everything went ok then merge the temp list with the global
* free list
*/
list_splice_tail(&temp, &mem_ctx.free_mmu_tables);
return 0;
free_temp_list:
list_for_each_entry_safe(mmutable, mmutable2, &temp, list) {
kfree(mmutable);
}
list_del(&mmutable_set->list);
free_store:
free_page(store);
return ret;
}
/* Get a unique handle */
static uint32_t get_new_table_handle(void)
{
uint32_t handle;
struct mc_mmu_table *table;
/* assumption mem_ctx.table_lock mutex is locked */
retry:
handle = atomic_inc_return(&mem_ctx.table_counter);
if (handle == 0) {
atomic_set(&mem_ctx.table_counter, 1);
handle = 1;
}
list_for_each_entry(table, &mem_ctx.mmu_tables, list) {
if (table->handle == handle)
goto retry;
}
return handle;
}
/*
* Get a MMU table from the free tables list or allocate a new one and
* initialize it. Assumes the table_lock is already taken.
*/
static struct mc_mmu_table *alloc_mmu_table(struct mc_instance *instance)
{
int ret = 0;
struct mc_mmu_table *table = NULL;
if (list_empty(&mem_ctx.free_mmu_tables)) {
ret = alloc_mmu_table_store();
if (ret) {
MCDRV_DBG_ERROR(mcd, "Failed to allocate new store!");
return ERR_PTR(-ENOMEM);
}
/* if it's still empty something wrong has happened */
if (list_empty(&mem_ctx.free_mmu_tables)) {
MCDRV_DBG_ERROR(mcd,
"Free list not updated correctly!");
return ERR_PTR(-EFAULT);
}
}
/* get a WSM MMU descriptor */
table = list_first_entry(&mem_ctx.free_mmu_tables,
struct mc_mmu_table, list);
if (table == NULL) {
MCDRV_DBG_ERROR(mcd, "out of memory");
return ERR_PTR(-ENOMEM);
}
/* Move it to the used MMU tables list */
list_move_tail(&table->list, &mem_ctx.mmu_tables);
table->handle = get_new_table_handle();
table->owner = instance;
atomic_inc(&table->set->used_tables);
atomic_inc(&table->usage);
MCDRV_DBG_VERBOSE(mcd,
"chunkPhys=0x%llX, idx=%d",
(u64)table->set->phys, table->idx);
return table;
}
/*
* Frees the object associated with a MMU table. Initially the object is moved
* to the free tables list, but if all the 4 lists of the store are free
* then the store is also released.
* Assumes the table_lock is already taken.
*/
static void free_mmu_table(struct mc_mmu_table *table)
{
struct mc_mmu_tables_set *mmutable_set;
if (WARN(!table, "Invalid table"))
return;
mmutable_set = table->set;
if (WARN(!mmutable_set, "Invalid table set"))
return;
list_move_tail(&table->list, &mem_ctx.free_mmu_tables);
/* if nobody uses this set, we can release it. */
if (atomic_dec_and_test(&mmutable_set->used_tables)) {
struct mc_mmu_table *tmp;
/* remove from list */
list_del(&mmutable_set->list);
/*
* All the MMU tables are in the free list for this set
* so we can just remove them from there
*/
list_for_each_entry_safe(table, tmp, &mem_ctx.free_mmu_tables,
list) {
if (table->set == mmutable_set) {
list_del(&table->list);
kfree(table);
}
} /* end while */
/*
* We shouldn't recover from this since it was some data
* corruption before
*/
BUG_ON(!mmutable_set->page);
clear_bit(PG_reserved, &(mmutable_set->page)->flags);
BUG_ON(!mmutable_set->kernel_virt);
free_page((unsigned long)mmutable_set->kernel_virt);
kfree(mmutable_set);
}
}
/*
* Create a MMU table in a WSM container that has been allocates previously.
* Assumes the table lock is already taken or there is no need to take like
* when first creating the MMU table the full list is locked.
*
* @task pointer to task owning WSM
* @wsm_buffer user space WSM start
* @wsm_len WSM length
* @table Pointer to MMU table details
*/
static int map_buffer(struct task_struct *task, void *wsm_buffer,
unsigned int wsm_len, struct mc_mmu_table *table)
{
int ret = 0;
unsigned int i, nr_of_pages;
/* start address of the 4 KiB page of wsm_buffer */
void *virt_addr_page;
struct page *page;
struct mmutable *mmutable;
struct page **mmutable_as_array_of_pointers_to_page = NULL;
/* page offset in wsm buffer */
unsigned int offset;
if (WARN(!wsm_buffer, "Invalid WSM buffer pointer"))
return -EINVAL;
if (WARN(wsm_len == 0, "Invalid WSM buffer length"))
return -EINVAL;
if (WARN(!table, "Invalid mapping table for WSM"))
return -EINVAL;
/* no size > 1Mib supported */
if (wsm_len > SZ_1M) {
MCDRV_DBG_ERROR(mcd, "size > 1 MiB");
return -EINVAL;
}
MCDRV_DBG_VERBOSE(mcd, "WSM addr=0x%p, len=0x%08x", wsm_buffer,
wsm_len);
/* calculate page usage */
virt_addr_page = (void *)(((unsigned long)(wsm_buffer)) & PAGE_MASK);
offset = (unsigned int) (((unsigned long)(wsm_buffer)) & (~PAGE_MASK));
nr_of_pages = PAGE_ALIGN(offset + wsm_len) / PAGE_SIZE;
MCDRV_DBG_VERBOSE(mcd, "virt addr page start=0x%p, pages=%d",
virt_addr_page, nr_of_pages);
/* MMU table can hold max 1MiB in 256 pages. */
if ((nr_of_pages * PAGE_SIZE) > SZ_1M) {
MCDRV_DBG_ERROR(mcd, "WSM paged exceed 1 MiB");
return -EINVAL;
}
mmutable = table->virt;
#if (defined LPAE_SUPPORT) || !(defined CONFIG_ARM64)
/*
* We use the memory for the MMU table to hold the pointer
* and convert them later. This works, as everything comes
* down to a 32 bit value.
*/
mmutable_as_array_of_pointers_to_page = (struct page **)mmutable;
#else
mmutable_as_array_of_pointers_to_page = kmalloc(
sizeof(struct page *)*nr_of_pages, GFP_KERNEL | __GFP_ZERO);
if (mmutable_as_array_of_pointers_to_page == NULL) {
ret = -ENOMEM;
goto map_buffer_end;
}
#endif
/* Request comes from user space */
if (task != NULL && !is_vmalloc_addr(wsm_buffer)) {
/*
* lock user page in memory, so they do not get swapped
* out.
* REV axh: Kernel 2.6.27 added a new get_user_pages_fast()
* function, maybe it is called fast_gup() in some versions.
* handle user process doing a fork().
* Child should not get things.
* http://osdir.com/ml/linux-media/2009-07/msg00813.html
* http://lwn.net/Articles/275808/
*/
ret = lock_pages(task, virt_addr_page, nr_of_pages,
mmutable_as_array_of_pointers_to_page);
if (ret != 0) {
MCDRV_DBG_ERROR(mcd, "lock_user_pages() failed");
goto map_buffer_end;
}
}
/* Request comes from kernel space(cont buffer) */
else if (task == NULL && !is_vmalloc_addr(wsm_buffer)) {
void *uaddr = wsm_buffer;
for (i = 0; i < nr_of_pages; i++) {
page = virt_to_page(uaddr);
if (!page) {
MCDRV_DBG_ERROR(mcd, "failed to map address");
return -EINVAL;
}
get_page(page);
mmutable_as_array_of_pointers_to_page[i] = page;
uaddr += PAGE_SIZE;
}
}
/* Request comes from kernel space(vmalloc buffer) */
else {
void *uaddr = wsm_buffer;
for (i = 0; i < nr_of_pages; i++) {
page = vmalloc_to_page(uaddr);
if (!page) {
MCDRV_DBG_ERROR(mcd, "failed to map address");
return -EINVAL;
}
get_page(page);
mmutable_as_array_of_pointers_to_page[i] = page;
uaddr += PAGE_SIZE;
}
}
table->pages = nr_of_pages;
/*
* create MMU Table entries.
* used_mmutable->table contains a list of page pointers here.
* For a proper cleanup we have to ensure that the following
* code either works and used_mmutable contains a valid MMU table
* - or fails and used_mmutable->table contains the list of page
* pointers.
* Any mixed contents will make cleanup difficult.
* Fill the table in reverse order as the table is used as input and
* output.
*/
i = MC_ARM_MMU_TABLE_ENTRIES-1;
do {
if (i < nr_of_pages) {
#ifdef LPAE_SUPPORT
uint64_t pte;
#else
uint32_t pte;
#endif
page = mmutable_as_array_of_pointers_to_page[i];
if (!page) {
MCDRV_DBG_ERROR(mcd, "page address is null");
return -EFAULT;
}
/*
* create MMU table entry, see ARM MMU docu for details
* about flags stored in the lowest 12 bits.
* As a side reference, the Article
* "ARM's multiply-mapped memory mess"
* found in the collection at
* http://lwn.net/Articles/409032/
* is also worth reading.
*/
pte = page_to_phys(page);
#ifdef LPAE_SUPPORT
pte |= MMU_EXT_XN
| MMU_EXT_NG
| MMU_EXT_AF
| MMU_AP_RW_ALL
| MMU_NS
| MMU_CACHEABLE | MMU_BUFFERABLE
| MMU_TYPE_PAGE;
#else
pte |= MMU_EXT_AP1 | MMU_EXT_AP0
| MMU_CACHEABLE | MMU_BUFFERABLE
| MMU_TYPE_SMALL | MMU_TYPE_EXT | MMU_EXT_NG;
#endif /* LPAE_SUPPORT */
/*
* Linux uses different mappings for SMP systems(the
* sharing flag is set for the pte. In order not to
* confuse things too much in Mobicore make sure the
* shared buffers have the same flags.
* This should also be done in SWD side
*/
#ifdef CONFIG_SMP
#ifdef LPAE_SUPPORT
pte |= MMU_EXT_SHARED;
#else
pte |= MMU_EXT_SHARED | MMU_EXT_TEX(1);
#endif /* LPAE_SUPPORT */
#endif /* CONFIG_SMP */
mmutable->table_entries[i] = pte;
MCDRV_DBG_VERBOSE(mcd, "MMU entry %d: 0x%llx, virt %p",
i, (u64)(pte), page);
} else {
/* ensure rest of table is empty */
mmutable->table_entries[i] = 0;
}
} while (i-- != 0);
map_buffer_end:
#if !(defined LPAE_SUPPORT) && (defined CONFIG_ARM64)
kfree(mmutable_as_array_of_pointers_to_page);
#endif
return ret;
}
/*
* Remove a MMU table in a WSM container. Afterwards the container may be
* released. Assumes the table_lock and the lock is taken.
*/
static void unmap_buffers(struct mc_mmu_table *table)
{
struct mmutable *mmutable;
int i;
if (WARN_ON(!table))
return;
/* found the table, now release the resources. */
MCDRV_DBG_VERBOSE(mcd,
"clear table, phys=0x%llX, nr_of_pages=%d, virt=%p",
(u64)table->phys, table->pages, table->virt);
mmutable = table->virt;
/* release all locked user space pages */
for (i = 0; i < table->pages; i++) {
/* convert physical entries from MMU table to page pointers */
struct page *page = pte_page(mmutable->table_entries[i]);
MCDRV_DBG_VERBOSE(mcd, "MMU entry %d: 0x%llx, virt %p", i,
(u64)(mmutable->table_entries[i]), page);
BUG_ON(!page);
release_page(page);
}
/* remember that all pages have been freed */
table->pages = 0;
}
/* Delete a used MMU table. Assumes the table_lock and the lock is taken */
static void unmap_mmu_table(struct mc_mmu_table *table)
{
/* Check if it's not locked by other processes too! */
if (!atomic_dec_and_test(&table->usage))
return;
/* release if Nwd and Swd/MC do no longer use it. */
unmap_buffers(table);
free_mmu_table(table);
}
int mc_free_mmu_table(struct mc_instance *instance, uint32_t handle)
{
struct mc_mmu_table *table;
int ret = 0;
if (WARN(!instance, "No instance data available"))
return -EFAULT;
mutex_lock(&mem_ctx.table_lock);
table = find_mmu_table(handle);
if (table == NULL) {
MCDRV_DBG_VERBOSE(mcd, "entry not found");
ret = -EINVAL;
goto err_unlock;
}
if (instance == table->owner) {
/* Prevent double free */
table->owner = NULL;
} else if (!is_daemon(instance)) {
MCDRV_DBG_ERROR(mcd, "instance does not own it");
ret = -EPERM;
goto err_unlock;
}
/* free table (if no further locks exist) */
unmap_mmu_table(table);
err_unlock:
mutex_unlock(&mem_ctx.table_lock);
return ret;
}
int mc_lock_mmu_table(struct mc_instance *instance, uint32_t handle)
{
int ret = 0;
struct mc_mmu_table *table = NULL;
if (WARN(!instance, "No instance data available"))
return -EFAULT;
mutex_lock(&mem_ctx.table_lock);
table = find_mmu_table(handle);
if (table == NULL) {
MCDRV_DBG_VERBOSE(mcd, "entry not found %u", handle);
ret = -EINVAL;
goto table_err;
}
if (instance != table->owner && !is_daemon(instance)) {
MCDRV_DBG_ERROR(mcd, "instance does no own it");
ret = -EPERM;
goto table_err;
}
/* lock entry */
atomic_inc(&table->usage);
table_err:
mutex_unlock(&mem_ctx.table_lock);
return ret;
}
/*
* Allocate MMU table and map buffer into it.
* That is, create respective table entries.
*/
struct mc_mmu_table *mc_alloc_mmu_table(struct mc_instance *instance,
struct task_struct *task, void *wsm_buffer, unsigned int wsm_len)
{
int ret = 0;
struct mc_mmu_table *table;
if (WARN(!instance, "No instance data available"))
return ERR_PTR(-EFAULT);
mutex_lock(&mem_ctx.table_lock);
table = alloc_mmu_table(instance);
if (IS_ERR(table)) {
MCDRV_DBG_ERROR(mcd, "alloc_mmu_table() failed");
ret = -ENOMEM;
goto err_no_mem;
}
/* create the MMU page for the WSM */
ret = map_buffer(task, wsm_buffer, wsm_len, table);
if (ret != 0) {
MCDRV_DBG_ERROR(mcd, "map_buffer() failed");
unmap_mmu_table(table);
goto err_no_mem;
}
MCDRV_DBG_VERBOSE(mcd,
"mapped buffer %p to table with handle %d @ 0x%llX",
wsm_buffer, table->handle, (u64)table->phys);
mutex_unlock(&mem_ctx.table_lock);
return table;
err_no_mem:
mutex_unlock(&mem_ctx.table_lock);
return ERR_PTR(ret);
}
phys_addr_t mc_find_mmu_table(uint32_t handle, int32_t fd)
{
phys_addr_t ret = 0;
struct mc_mmu_table *table = NULL;
mutex_lock(&mem_ctx.table_lock);
table = find_mmu_table(handle);
if (table == NULL) {
MCDRV_DBG_ERROR(mcd, "entry not found %u", handle);
ret = 0;
goto table_err;
}
/* It's safe here not to lock the instance since the owner of
* the table will be cleared only with the table lock taken */
if (!mc_check_owner_fd(table->owner, fd)) {
MCDRV_DBG_ERROR(mcd, "not valid owner %u", handle);
ret = 0;
goto table_err;
}
ret = table->phys;
table_err:
mutex_unlock(&mem_ctx.table_lock);
return ret;
}
void mc_clean_mmu_tables(void)
{
struct mc_mmu_table *table, *tmp;
mutex_lock(&mem_ctx.table_lock);
/* Check if some WSM is orphaned. */
list_for_each_entry_safe(table, tmp, &mem_ctx.mmu_tables, list) {
if (table->owner == NULL) {
/*MCDRV_DBG(mcd,
"cleariM MMU: p=0x%llX pages=%d",
(u64)table->phys,
table->pages);*/
unmap_mmu_table(table);
}
}
mutex_unlock(&mem_ctx.table_lock);
}
void mc_clear_mmu_tables(struct mc_instance *instance)
{
struct mc_mmu_table *table, *tmp;
mutex_lock(&mem_ctx.table_lock);
/* Check if some WSM is still in use. */
list_for_each_entry_safe(table, tmp, &mem_ctx.mmu_tables, list) {
if (table->owner == instance) {
/*MCDRV_DBG(mcd, "release WSM MMU: p=0x%llX pages=%d",
(u64)table->phys,
table->pages);*/
/* unlock app usage and free or mark it as orphan */
table->owner = NULL;
unmap_mmu_table(table);
}
}
mutex_unlock(&mem_ctx.table_lock);
}
int mc_init_mmu_tables(void)
{
/* init list for WSM MMU chunks. */
INIT_LIST_HEAD(&mem_ctx.mmu_tables_sets);
/* MMU table descriptor list. */
INIT_LIST_HEAD(&mem_ctx.mmu_tables);
/* MMU free table descriptor list. */
INIT_LIST_HEAD(&mem_ctx.free_mmu_tables);
mutex_init(&mem_ctx.table_lock);
atomic_set(&mem_ctx.table_counter, 1);
return 0;
}
void mc_release_mmu_tables(void)
{
struct mc_mmu_table *table;
/* Check if some WSM is still in use. */
list_for_each_entry(table, &mem_ctx.mmu_tables, list) {
WARN(1, "WSM MMU still in use: phys=0x%llX ,nr_of_pages=%d",
(u64)table->phys, table->pages);
}
}

View File

@ -1,142 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_MEM_H_
#define _MC_MEM_H_
#ifdef LPAE_SUPPORT
/*
* Number of page table entries in one MMU table. This is ARM specific, an
* MMU table covers 2 MiB by using 512 entries referring to 4KiB pages each.
*/
#define MC_ARM_MMU_TABLE_ENTRIES 512
/* ARM level 3 (MMU) table with 512 entries. Size: 4k */
struct mmutable {
uint64_t table_entries[MC_ARM_MMU_TABLE_ENTRIES];
};
/* There is 1 table in each page. */
#define MMU_TABLES_PER_PAGE 1
#else
/*
* MobiCore specific page tables for world shared memory.
* Linux uses shadow page tables, see arch/arm/include/asm/pgtable-2level.
* MobiCore uses the default ARM format.
*
* Number of page table entries in one MMU table. This is ARM specific, an
* MMU table covers 1 MiB by using 256 entries referring to 4KiB pages each.
*/
#define MC_ARM_MMU_TABLE_ENTRIES 256
/* ARM level 2 (MMU) table with 256 entries. Size: 1k */
struct mmutable {
uint32_t table_entries[MC_ARM_MMU_TABLE_ENTRIES];
};
/* There are 4 tables in each page. */
#define MMU_TABLES_PER_PAGE 4
#endif
/* Store for four MMU tables in one 4kb page*/
struct mc_mmu_table_store {
struct mmutable table[MMU_TABLES_PER_PAGE];
};
/* Usage and maintenance information about mc_mmu_table_store */
struct mc_mmu_tables_set {
struct list_head list;
/* kernel virtual address */
struct mc_mmu_table_store *kernel_virt;
/* physical address */
phys_addr_t phys;
/* pointer to page struct */
struct page *page;
/* How many pages from this set are used */
atomic_t used_tables;
};
/*
* MMU table allocated to the Daemon or a TLC describing a world shared
* buffer.
* When users map a malloc()ed area into SWd, a MMU table is allocated.
* In addition, the area of maximum 1MB virtual address space is mapped into
* the MMU table and a handle for this table is returned to the user.
*/
struct mc_mmu_table {
struct list_head list;
/* Table lock */
struct mutex lock;
/* handle as communicated to user mode */
unsigned int handle;
/* Number of references kept to this MMU table */
atomic_t usage;
/* owner of this MMU table */
struct mc_instance *owner;
/* set describing where our MMU table is stored */
struct mc_mmu_tables_set *set;
/* index into MMU table set */
unsigned int idx;
/* size of buffer */
unsigned int pages;
/* virtual address*/
void *virt;
/* physical address */
phys_addr_t phys;
};
/* MobiCore Driver Memory context data. */
struct mc_mem_context {
struct mc_instance *daemon_inst;
/* Backing store for MMU tables */
struct list_head mmu_tables_sets;
/* Bookkeeping for used MMU tables */
struct list_head mmu_tables;
/* Bookkeeping for free MMU tables */
struct list_head free_mmu_tables;
/* semaphore to synchronize access to above lists */
struct mutex table_lock;
atomic_t table_counter;
};
/*
* Allocate MMU table and map buffer into it.
* That is, create respective table entries.
*/
struct mc_mmu_table *mc_alloc_mmu_table(struct mc_instance *instance,
struct task_struct *task, void *wsm_buffer, unsigned int wsm_len);
/* Delete all the MMU tables associated with an instance */
void mc_clear_mmu_tables(struct mc_instance *instance);
/* Release all orphaned MMU tables */
void mc_clean_mmu_tables(void);
/* Delete a used MMU table. */
int mc_free_mmu_table(struct mc_instance *instance, uint32_t handle);
/*
* Lock a MMU table - the daemon adds +1 to refcount of the MMU table
* marking it in use by SWD so it doesn't get released when the TLC dies.
*/
int mc_lock_mmu_table(struct mc_instance *instance, uint32_t handle);
/* Return the phys address of MMU table. */
phys_addr_t mc_find_mmu_table(uint32_t handle, int32_t fd);
/* Release all used MMU tables to Linux memory space */
void mc_release_mmu_tables(void);
/* Initialize all MMU tables structure */
int mc_init_mmu_tables(void);
#endif /* _MC_MEM_H_ */

View File

@ -1,421 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* MobiCore Driver Kernel Module.
*
* This module is written as a Linux device driver.
* This driver represents the command proxy on the lowest layer, from the
* secure world to the non secure world, and vice versa.
* This driver is located in the non secure world (Linux).
* This driver offers IOCTL commands, for access to the secure world, and has
* the interface from the secure world to the normal world.
* The access to the driver is possible with a file descriptor,
* which has to be created by the fd = open(/dev/mobicore) command.
*/
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
#include "main.h"
#include "fastcall.h"
#include "ops.h"
#include "mem.h"
#include "pm.h"
#include "debug.h"
/* MobiCore context data */
static struct mc_context *ctx;
#ifdef TBASE_CORE_SWITCHER
static uint32_t active_cpu;
#ifdef TEST
/*
* Normal world <t-base core info for testing.
*/
module_param(active_cpu, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
MODULE_PARM_DESC(active_cpu, "Active <t-base Core");
#endif
static int mobicore_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu);
static struct notifier_block mobicore_cpu_notifer = {
.notifier_call = mobicore_cpu_callback,
};
#endif
static inline long smc(union fc_generic *fc)
{
/* If we request sleep yields must be filtered out as they
* make no sense */
if (ctx->mcp)
if (ctx->mcp->flags.sleep_mode.sleep_req) {
if (fc->as_in.cmd == MC_SMC_N_YIELD)
return MC_FC_RET_ERR_INVALID;
}
return _smc(fc);
}
struct fastcall_work {
#ifdef MC_FASTCALL_WORKER_THREAD
struct kthread_work work;
#else
struct work_struct work;
#endif
void *data;
};
#ifdef MC_FASTCALL_WORKER_THREAD
static void fastcall_work_func(struct kthread_work *work);
#else
static void fastcall_work_func(struct work_struct *work);
#endif
#ifdef MC_FASTCALL_WORKER_THREAD
static struct task_struct *fastcall_thread;
static DEFINE_KTHREAD_WORKER(fastcall_worker);
bool mc_fastcall(void *data)
{
struct fastcall_work fc_work = {
KTHREAD_WORK_INIT(fc_work.work, fastcall_work_func),
.data = data,
};
if (!queue_kthread_work(&fastcall_worker, &fc_work.work))
return false;
flush_kthread_work(&fc_work.work);
return true;
}
int mc_fastcall_init(struct mc_context *context)
{
int ret = 0;
ctx = context;
fastcall_thread = kthread_create(kthread_worker_fn, &fastcall_worker,
"mc_fastcall");
if (IS_ERR(fastcall_thread)) {
ret = PTR_ERR(fastcall_thread);
fastcall_thread = NULL;
MCDRV_DBG_ERROR(mcd, "cannot create fastcall wq (%d)", ret);
return ret;
}
wake_up_process(fastcall_thread);
/* this thread MUST run on CPU 0 at startup */
set_cpus_allowed(fastcall_thread, CPU_MASK_CPU0);
#ifdef TBASE_CORE_SWITCHER
register_cpu_notifier(&mobicore_cpu_notifer);
#endif
return 0;
}
void mc_fastcall_destroy(void)
{
if (!IS_ERR_OR_NULL(fastcall_thread)) {
kthread_stop(fastcall_thread);
fastcall_thread = NULL;
}
}
#else
bool mc_fastcall(void *data)
{
struct fastcall_work work = {
.data = data,
};
INIT_WORK_ONSTACK(&work.work, fastcall_work_func);
if (!schedule_work_on(0, &work.work))
return false;
flush_work(&work.work);
return true;
}
int mc_fastcall_init(struct mc_context *context)
{
ctx = context;
return 0;
};
void mc_fastcall_destroy(void) {};
#endif
#ifdef MC_FASTCALL_WORKER_THREAD
static void fastcall_work_func(struct kthread_work *work)
#else
static void fastcall_work_func(struct work_struct *work)
#endif
{
struct fastcall_work *fc_work =
container_of(work, struct fastcall_work, work);
union fc_generic *fc_generic = fc_work->data;
#ifdef TBASE_CORE_SWITCHER
uint32_t cpu_swap = 0, new_cpu;
uint32_t cpu_id[] = CPU_IDS;
#endif
#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
mc_pm_clock_enable();
#endif
if (fc_generic == NULL)
return;
#ifdef TBASE_CORE_SWITCHER
if (fc_generic->as_in.cmd == MC_FC_SWITCH_CORE) {
cpu_swap = 1;
new_cpu = fc_generic->as_in.param[0];
fc_generic->as_in.param[0] = cpu_id[fc_generic->as_in.param[0]];
}
#endif
smc(fc_work->data);
#ifdef TBASE_CORE_SWITCHER
if (cpu_swap) {
if (fc_generic->as_out.ret == 0) {
cpumask_t cpu;
active_cpu = new_cpu;
MCDRV_DBG(mcd, "CoreSwap ok %d -> %d\n",
raw_smp_processor_id(), active_cpu);
cpumask_clear(&cpu);
cpumask_set_cpu(active_cpu, &cpu);
#ifdef MC_FASTCALL_WORKER_THREAD
set_cpus_allowed(fastcall_thread, cpu);
#endif
} else {
MCDRV_DBG(mcd, "CoreSwap failed %d -> %d\n",
raw_smp_processor_id(),
fc_generic->as_in.param[0]);
}
}
#endif
#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
mc_pm_clock_disable();
#endif
}
int mc_info(uint32_t ext_info_id, uint32_t *state, uint32_t *ext_info)
{
int ret = 0;
union mc_fc_info fc_info;
MCDRV_DBG_VERBOSE(mcd, "enter");
memset(&fc_info, 0, sizeof(fc_info));
fc_info.as_in.cmd = MC_FC_INFO;
fc_info.as_in.ext_info_id = ext_info_id;
MCDRV_DBG(mcd, "<- cmd=0x%08x, ext_info_id=0x%08x",
fc_info.as_in.cmd, fc_info.as_in.ext_info_id);
mc_fastcall(&(fc_info.as_generic));
MCDRV_DBG(mcd,
"-> r=0x%08x ret=0x%08x state=0x%08x ext_info=0x%08x",
fc_info.as_out.resp,
fc_info.as_out.ret,
fc_info.as_out.state,
fc_info.as_out.ext_info);
ret = convert_fc_ret(fc_info.as_out.ret);
*state = fc_info.as_out.state;
*ext_info = fc_info.as_out.ext_info;
MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
return ret;
}
#ifdef TBASE_CORE_SWITCHER
uint32_t mc_active_core(void)
{
return active_cpu;
}
int mc_switch_core(uint32_t core_num)
{
int32_t ret = 0;
union mc_fc_swich_core fc_switch_core;
if (!cpu_online(core_num))
return 1;
MCDRV_DBG_VERBOSE(mcd, "enter\n");
memset(&fc_switch_core, 0, sizeof(fc_switch_core));
fc_switch_core.as_in.cmd = MC_FC_SWITCH_CORE;
if (core_num < COUNT_OF_CPUS)
fc_switch_core.as_in.core_id = core_num;
else
fc_switch_core.as_in.core_id = 0;
MCDRV_DBG(mcd,
"<- cmd=0x%08x, core_id=0x%08x\n",
fc_switch_core.as_in.cmd,
fc_switch_core.as_in.core_id);
MCDRV_DBG(mcd,
"<- core_num=0x%08x, active_cpu=0x%08x\n",
core_num, active_cpu);
mc_fastcall(&(fc_switch_core.as_generic));
ret = convert_fc_ret(fc_switch_core.as_out.ret);
MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
return ret;
}
void mc_cpu_offfline(int cpu)
{
if (active_cpu == cpu) {
int i;
/* Chose the first online CPU and switch! */
for_each_online_cpu(i) {
if (i == cpu) {
MCDRV_DBG(mcd, "Skipping CPU %d\n", cpu);
continue;
}
MCDRV_DBG(mcd, "CPU %d is dying, switching to %d\n",
cpu, i);
mc_switch_core(i);
break;
}
} else {
MCDRV_DBG(mcd, "not active CPU, no action taken\n");
}
}
static int mobicore_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
switch (action) {
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
dev_info(mcd, "Cpu %u is going to die\n", cpu);
mc_cpu_offfline(cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
dev_info(mcd, "Cpu %u is dead\n", cpu);
break;
}
return NOTIFY_OK;
}
#endif
/* Yield to MobiCore */
int mc_yield(void)
{
int ret = 0;
union fc_generic yield;
MCDRV_DBG_VERBOSE(mcd, "enter");
memset(&yield, 0, sizeof(yield));
yield.as_in.cmd = MC_SMC_N_YIELD;
mc_fastcall(&yield);
ret = convert_fc_ret(yield.as_out.ret);
return ret;
}
/* call common notify */
int mc_nsiq(void)
{
int ret = 0;
union fc_generic nsiq;
MCDRV_DBG_VERBOSE(mcd, "enter");
memset(&nsiq, 0, sizeof(nsiq));
nsiq.as_in.cmd = MC_SMC_N_SIQ;
mc_fastcall(&nsiq);
ret = convert_fc_ret(nsiq.as_out.ret);
return ret;
}
/* call common notify */
int _nsiq(void)
{
int ret = 0;
union fc_generic nsiq;
MCDRV_DBG_VERBOSE(mcd, "enter");
memset(&nsiq, 0, sizeof(nsiq));
nsiq.as_in.cmd = MC_SMC_N_SIQ;
_smc(&nsiq);
ret = convert_fc_ret(nsiq.as_out.ret);
return ret;
}
/* Call the INIT fastcall to setup MobiCore initialization */
int mc_init(phys_addr_t base, uint32_t nq_length,
uint32_t mcp_offset, uint32_t mcp_length)
{
int ret = 0;
union mc_fc_init fc_init;
uint64_t base_addr = (uint64_t)base;
uint32_t base_high = (uint32_t)(base_addr >> 32);
MCDRV_DBG_VERBOSE(mcd, "enter");
memset(&fc_init, 0, sizeof(fc_init));
fc_init.as_in.cmd = MC_FC_INIT;
/* base address of mci buffer 4KB aligned */
fc_init.as_in.base = (uint32_t)base_addr;
/* notification buffer start/length [16:16] [start, length] */
fc_init.as_in.nq_info = ((base_high && 0xFFFF) << 16) |
(nq_length & 0xFFFF);
/* mcp buffer start/length [16:16] [start, length] */
fc_init.as_in.mcp_info = (mcp_offset << 16) | (mcp_length & 0xFFFF);
/*
* Set KMOD notification queue to start of MCI
* mciInfo was already set up in mmap
*/
MCDRV_DBG(mcd,
"cmd=0x%08x, base=0x%08x,nq_info=0x%08x, mcp_info=0x%08x",
fc_init.as_in.cmd, fc_init.as_in.base, fc_init.as_in.nq_info,
fc_init.as_in.mcp_info);
mc_fastcall(&fc_init.as_generic);
MCDRV_DBG(mcd, "out cmd=0x%08x, ret=0x%08x", fc_init.as_out.resp,
fc_init.as_out.ret);
ret = convert_fc_ret(fc_init.as_out.ret);
MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
return ret;
}
/* Return MobiCore driver version */
uint32_t mc_get_version(void)
{
MCDRV_DBG(mcd, "MobiCore driver version is %i.%i",
MCDRVMODULEAPI_VERSION_MAJOR,
MCDRVMODULEAPI_VERSION_MINOR);
return MC_VERSION(MCDRVMODULEAPI_VERSION_MAJOR,
MCDRVMODULEAPI_VERSION_MINOR);
}

View File

@ -1,37 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_OPS_H_
#define _MC_OPS_H_
#include <linux/workqueue.h>
#include "fastcall.h"
int mc_yield(void);
int mc_nsiq(void);
int _nsiq(void);
uint32_t mc_get_version(void);
int mc_info(uint32_t ext_info_id, uint32_t *state, uint32_t *ext_info);
int mc_init(phys_addr_t base, uint32_t nq_length, uint32_t mcp_offset,
uint32_t mcp_length);
#ifdef TBASE_CORE_SWITCHER
int mc_switch_core(uint32_t core_num);
#endif
bool mc_fastcall(void *data);
int mc_fastcall_init(struct mc_context *context);
void mc_fastcall_destroy(void);
#endif /* _MC_OPS_H_ */

View File

@ -1,55 +0,0 @@
/*
* Header file for the MobiCore Driver Kernel Module,
* its internal structures and defines.
*
* <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
* <-- Copyright Trustonic Limited 2013 -->
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _MC_PLATFORM_H_
#define _MC_PLATFORM_H_
/* MobiCore Interrupt for Qualcomm */
#define MC_INTR_SSIQ 280
/* Use SMC for fastcalls */
#define MC_SMC_FASTCALL
/*--------------- Implementation -------------- */
#include <mach/scm.h>
/* from following file */
#define SCM_SVC_MOBICORE 250
#define SCM_CMD_MOBICORE 1
extern int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
void *resp_buf, size_t resp_len);
static inline int smc_fastcall(void *fc_generic, size_t size)
{
return scm_call(SCM_SVC_MOBICORE, SCM_CMD_MOBICORE,
fc_generic, size,
fc_generic, size);
}
/* Enable mobicore mem traces */
#if defined(DEBUG)
#define MC_MEM_TRACES
#endif
/* Enable the use of vm_unamp instead of the deprecated do_munmap
* and other 3.7 features
*/
#ifndef CONFIG_ARCH_MSM8960
#define MC_VM_UNMAP
#endif
#if defined(CONFIG_ARCH_MSM8974) || defined(CONFIG_ARCH_MSM8226)
/* Perform clock enable/disable */
#define MC_CRYPTO_CLOCK_MANAGEMENT
#endif
#endif /* _MC_PLATFORM_H_ */

View File

@ -1,130 +0,0 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* Header file of MobiCore Driver Kernel Module Platform
* specific structures
*
* Internal structures of the McDrvModule
*
* Header file the MobiCore Driver Kernel Module,
* its internal structures and defines.
*/
#ifndef _MC_PLATFORM_H_
#define _MC_PLATFORM_H_
/* MobiCore Interrupt for Qualcomm */
#define MC_INTR_SSIQ 280
/* Use SMC for fastcalls */
#define MC_SMC_FASTCALL
/*--------------- Implementation -------------- */
#if defined(CONFIG_ARCH_APQ8084) || defined(CONFIG_ARCH_MSM8916) || \
defined(CONFIG_ARCH_MSM8994) || defined(CONFIG_ARCH_MSM8909)
#include <soc/qcom/scm.h>
#if defined(CONFIG_ARM64) || defined(CONFIG_ARCH_MSM8916)
#include <soc/qcom/qseecomi.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <linux/errno.h>
#define SCM_MOBIOS_FNID(s, c) (((((s) & 0xFF) << 8) | ((c) & 0xFF)) \
| 0x33000000)
#define TZ_EXECUTIVE_EXT_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_4( \
TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL, \
TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL)
#endif
#else
#include <mach/scm.h>
#endif
/* from following file */
#define SCM_SVC_MOBICORE 250
#define SCM_CMD_MOBICORE 1
static inline int smc_fastcall(void *fc_generic, size_t size)
{
#if defined(CONFIG_ARCH_APQ8084) || defined(CONFIG_ARCH_MSM8916) || \
defined(CONFIG_ARCH_MSM8994)
if (is_scm_armv8()) {
struct scm_desc desc = {0};
int ret;
void *scm_buf = NULL;
scm_buf = kzalloc(PAGE_ALIGN(size), GFP_KERNEL);
if (!scm_buf)
return -ENOMEM;
memcpy(scm_buf, fc_generic, size);
dmac_flush_range(scm_buf, scm_buf + size);
desc.arginfo = TZ_EXECUTIVE_EXT_ID_PARAM_ID;
desc.args[0] = virt_to_phys(scm_buf);
desc.args[1] = (u32)size;
desc.args[2] = virt_to_phys(scm_buf);
desc.args[3] = (u32)size;
ret = scm_call2(
SCM_MOBIOS_FNID(SCM_SVC_MOBICORE, SCM_CMD_MOBICORE),
&desc);
dmac_flush_range(scm_buf, scm_buf + size);
memcpy(fc_generic, scm_buf, size);
kfree(scm_buf);
return ret;
} else {
#endif
return scm_call(SCM_SVC_MOBICORE, SCM_CMD_MOBICORE,
fc_generic, size,
fc_generic, size);
#if defined(CONFIG_ARCH_APQ8084) || defined(CONFIG_ARCH_MSM8916) || \
defined(CONFIG_ARCH_MSM8994)
}
#endif
}
/* Enable mobicore mem traces */
#define MC_MEM_TRACES
/* Enable the use of vm_unamp instead of the deprecated do_munmap
* and other 3.7 features
*/
#ifndef CONFIG_ARCH_MSM8960
#define MC_VM_UNMAP
#endif
/*
* Perform crypto clock enable/disable
*/
#if !defined(CONFIG_ARCH_MSM8960) && !defined(CONFIG_ARCH_MSM8994)
#define MC_CRYPTO_CLOCK_MANAGEMENT
#endif
#if defined(CONFIG_ARCH_MSM8916) || defined(CONFIG_ARCH_MSM8909)
#define MC_USE_DEVICE_TREE
#endif
#endif /* _MC_PLATFORM_H_ */

View File

@ -1,291 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* MobiCore Driver Kernel Module.
* This module is written as a Linux device driver.
* This driver represents the command proxy on the lowest layer, from the
* secure world to the non secure world, and vice versa.
* This driver is located in the non secure world (Linux).
* This driver offers IOCTL commands, for access to the secure world, and has
* the interface from the secure world to the normal world.
* The access to the driver is possible with a file descriptor,
* which has to be created by the fd = open(/dev/mobicore) command.
*/
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/suspend.h>
#include <linux/device.h>
#include "main.h"
#include "pm.h"
#include "fastcall.h"
#include "ops.h"
#include "logging.h"
#include "debug.h"
#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
#include <linux/clk.h>
#include <linux/err.h>
struct clk *mc_ce_iface_clk = NULL;
struct clk *mc_ce_core_clk = NULL;
struct clk *mc_ce_bus_clk = NULL;
#endif /* MC_CRYPTO_CLOCK_MANAGEMENT */
#if defined(MC_CRYPTO_CLOCK_MANAGEMENT) && defined(MC_USE_DEVICE_TREE)
#include <linux/of.h>
#define QSEE_CE_CLK_100MHZ 100000000
struct clk *mc_ce_core_src_clk = NULL;
#endif /* MC_CRYPTO_CLOCK_MANAGEMENT && MC_USE_DEVICE_TREE */
#ifdef MC_PM_RUNTIME
static struct mc_context *ctx;
static bool sleep_ready(void)
{
if (!ctx->mcp)
return false;
if (!(ctx->mcp->flags.sleep_mode.ready_to_sleep & READY_TO_SLEEP))
return false;
return true;
}
static void mc_suspend_handler(struct work_struct *work)
{
if (!ctx->mcp)
return;
ctx->mcp->flags.sleep_mode.sleep_req = REQ_TO_SLEEP;
_nsiq();
}
DECLARE_WORK(suspend_work, mc_suspend_handler);
static inline void dump_sleep_params(struct mc_flags *flags)
{
MCDRV_DBG(mcd, "MobiCore IDLE=%d!", flags->schedule);
MCDRV_DBG(mcd,
"MobiCore Request Sleep=%d!", flags->sleep_mode.sleep_req);
MCDRV_DBG(mcd,
"MobiCore Sleep Ready=%d!", flags->sleep_mode.ready_to_sleep);
}
static int mc_suspend_notifier(struct notifier_block *nb,
unsigned long event, void *dummy)
{
struct mc_mcp_buffer *mcp = ctx->mcp;
/* We have noting to say if MobiCore is not initialized */
if (!mcp)
return 0;
#ifdef MC_MEM_TRACES
mobicore_log_read();
#endif /* MC_MEM_TRACES */
switch (event) {
case PM_SUSPEND_PREPARE:
/*
* Make sure we have finished all the work otherwise
* we end up in a race condition
*/
cancel_work_sync(&suspend_work);
/*
* We can't go to sleep if MobiCore is not IDLE
* or not Ready to sleep
*/
dump_sleep_params(&mcp->flags);
if (!sleep_ready()) {
ctx->mcp->flags.sleep_mode.sleep_req = REQ_TO_SLEEP;
schedule_work_on(0, &suspend_work);
flush_work(&suspend_work);
if (!sleep_ready()) {
dump_sleep_params(&mcp->flags);
ctx->mcp->flags.sleep_mode.sleep_req = 0;
MCDRV_DBG_ERROR(mcd, "MobiCore can't SLEEP!");
return NOTIFY_BAD;
}
}
break;
case PM_POST_SUSPEND:
MCDRV_DBG(mcd, "Resume MobiCore system!");
ctx->mcp->flags.sleep_mode.sleep_req = 0;
break;
default:
break;
}
return 0;
}
static struct notifier_block mc_notif_block = {
.notifier_call = mc_suspend_notifier,
};
int mc_pm_initialize(struct mc_context *context)
{
int ret = 0;
ctx = context;
ret = register_pm_notifier(&mc_notif_block);
if (ret)
MCDRV_DBG_ERROR(mcd, "device pm register failed");
return ret;
}
int mc_pm_free(void)
{
int ret = unregister_pm_notifier(&mc_notif_block);
if (ret)
MCDRV_DBG_ERROR(mcd, "device pm unregister failed");
return ret;
}
bool mc_pm_sleep_ready(void)
{
if (ctx == 0)
return true;
return sleep_ready();
}
#endif /* MC_PM_RUNTIME */
#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
int mc_pm_clock_initialize(void)
{
int ret = 0;
#ifdef MC_USE_DEVICE_TREE
/* Get core clk src */
mc_ce_core_src_clk = clk_get(mcd, "core_clk_src");
if (IS_ERR(mc_ce_core_src_clk)) {
ret = PTR_ERR(mc_ce_core_src_clk);
MCDRV_DBG_ERROR(mcd,
"cannot get core clock src with error: %d",
ret);
goto error;
} else {
int ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
if (of_property_read_u32(mcd->of_node,
"qcom,ce-opp-freq",
&ce_opp_freq_hz)) {
ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
MCDRV_DBG_ERROR(mcd,
"cannot get ce clock frequency. Using %d",
ce_opp_freq_hz);
}
ret = clk_set_rate(mc_ce_core_src_clk, ce_opp_freq_hz);
if (ret) {
clk_put(mc_ce_core_src_clk);
mc_ce_core_src_clk = NULL;
MCDRV_DBG_ERROR(mcd, "cannot set core clock src rate");
ret = -EIO;
goto error;
}
}
#endif /* MC_CRYPTO_CLOCK_MANAGEMENT && MC_USE_DEVICE_TREE */
/* Get core clk */
mc_ce_core_clk = clk_get(mcd, "core_clk");
if (IS_ERR(mc_ce_core_clk)) {
ret = PTR_ERR(mc_ce_core_clk);
MCDRV_DBG_ERROR(mcd, "cannot get core clock");
goto error;
}
/* Get Interface clk */
mc_ce_iface_clk = clk_get(mcd, "iface_clk");
if (IS_ERR(mc_ce_iface_clk)) {
clk_put(mc_ce_core_clk);
ret = PTR_ERR(mc_ce_iface_clk);
MCDRV_DBG_ERROR(mcd, "cannot get iface clock");
goto error;
}
/* Get AXI clk */
mc_ce_bus_clk = clk_get(mcd, "bus_clk");
if (IS_ERR(mc_ce_bus_clk)) {
clk_put(mc_ce_iface_clk);
clk_put(mc_ce_core_clk);
ret = PTR_ERR(mc_ce_bus_clk);
MCDRV_DBG_ERROR(mcd, "cannot get AXI bus clock");
goto error;
}
MCDRV_DBG(mcd, "obtained crypto clocks");
return ret;
error:
mc_ce_core_clk = NULL;
mc_ce_iface_clk = NULL;
mc_ce_bus_clk = NULL;
return ret;
}
void mc_pm_clock_finalize(void)
{
if (mc_ce_bus_clk != NULL)
clk_put(mc_ce_bus_clk);
if (mc_ce_iface_clk != NULL)
clk_put(mc_ce_iface_clk);
if (mc_ce_core_clk != NULL)
clk_put(mc_ce_core_clk);
#ifdef MC_USE_DEVICE_TREE
if (mc_ce_core_src_clk != NULL)
clk_put(mc_ce_core_src_clk);
#endif /* MC_CRYPTO_CLOCK_MANAGEMENT && MC_USE_DEVICE_TREE */
}
int mc_pm_clock_enable(void)
{
int rc = 0;
rc = clk_prepare_enable(mc_ce_core_clk);
if (rc) {
MCDRV_DBG_ERROR(mcd, "cannot enable clock");
} else {
rc = clk_prepare_enable(mc_ce_iface_clk);
if (rc) {
clk_disable_unprepare(mc_ce_core_clk);
MCDRV_DBG_ERROR(mcd, "cannot enable clock");
} else {
rc = clk_prepare_enable(mc_ce_bus_clk);
if (rc) {
clk_disable_unprepare(mc_ce_iface_clk);
MCDRV_DBG_ERROR(mcd, "cannot enable clock");
}
}
}
return rc;
}
void mc_pm_clock_disable(void)
{
if (mc_ce_iface_clk != NULL)
clk_disable_unprepare(mc_ce_iface_clk);
if (mc_ce_core_clk != NULL)
clk_disable_unprepare(mc_ce_core_clk);
if (mc_ce_bus_clk != NULL)
clk_disable_unprepare(mc_ce_bus_clk);
}
#endif /* MC_CRYPTO_CLOCK_MANAGEMENT */

View File

@ -1,44 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_PM_H_
#define _MC_PM_H_
#include "main.h"
#define NO_SLEEP_REQ 0
#define REQ_TO_SLEEP 1
#define NORMAL_EXECUTION 0
#define READY_TO_SLEEP 1
/* How much time after resume the daemon should backoff */
#define DAEMON_BACKOFF_TIME 500
/* Initialize Power Management */
int mc_pm_initialize(struct mc_context *context);
/* Free all Power Management resources*/
int mc_pm_free(void);
/* Initialize secure crypto clocks */
int mc_pm_clock_initialize(void);
/* Free secure crypto clocks */
void mc_pm_clock_finalize(void);
/* Enable secure crypto clocks */
int mc_pm_clock_enable(void);
/* Disable secure crypto clocks */
void mc_pm_clock_disable(void);
/* Test if sleep is possible */
bool mc_pm_sleep_ready(void);
#endif /* _MC_PM_H_ */

View File

@ -1,88 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* Interface to be used by module MobiCoreKernelAPI.
*/
#ifndef _MC_KERNEL_API_H_
#define _MC_KERNEL_API_H_
struct mc_instance;
/*
* mobicore_open() - Initialize a new MobiCore API instance object
*
* Returns a MobiCore Instance or NULL if no allocation was possible.
*/
struct mc_instance *mobicore_open(void);
/*
* mobicore_release() - Release a MobiCore instance object
* @instance: MobiCore instance
*
* Returns 0 if Ok or -E ERROR
*/
int mobicore_release(struct mc_instance *instance);
/*
* mobicore_allocate_wsm() - Allocate MobiCore WSM
* @instance: instance data for MobiCore Daemon and TLCs
* @requested_size: memory size requested in bytes
* @handle: pointer to handle
* @kernel_virt_addr: virtual user start address
*
* Returns 0 if OK
*/
int mobicore_allocate_wsm(struct mc_instance *instance,
unsigned long requested_size, uint32_t *handle,
void **virt_kernel_addr);
/*
* mobicore_free() - Free a WSM buffer allocated with mobicore_allocate_wsm
* @instance: instance data for MobiCore Daemon and TLCs
* @handle: handle of the buffer
*
* Returns 0 if OK
*/
int mobicore_free_wsm(struct mc_instance *instance, uint32_t handle);
/*
* mobicore_map_vmem() - Map a virtual memory buffer structure to Mobicore
* @instance: instance data for MobiCore Daemon and TLCs
* @addr: address of the buffer (NB it must be kernel virtual!)
* @len: buffer length (in bytes)
* @handle: unique handle
*
* Returns 0 if no error
*/
int mobicore_map_vmem(struct mc_instance *instance, void *addr,
uint32_t len, uint32_t *handle);
/*
* mobicore_unmap_vmem() - Unmap a virtual memory buffer from MobiCore
* @instance: instance data for MobiCore Daemon and TLCs
* @handle: unique handle
*
* Returns 0 if no error
*/
int mobicore_unmap_vmem(struct mc_instance *instance, uint32_t handle);
/*
* mobicore_sleep_ready() - Test if mobicore can sleep
*
* Returns true if mobicore can sleep, false if it can't sleep
*/
bool mobicore_sleep_ready(void);
#endif /* _MC_KERNEL_API_H_ */

View File

@ -1,207 +0,0 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_LINUX_H_
#define _MC_LINUX_H_
#include "version.h"
#ifndef __KERNEL__
#include <stdint.h>
#endif
#define MC_ADMIN_DEVNODE "mobicore"
#define MC_USER_DEVNODE "mobicore-user"
/*
* Data exchange structure of the MC_DRV_MODULE_INIT ioctl command.
* INIT request data to SWD
*/
struct mc_ioctl_init {
/* length of notification queue */
uint32_t nq_length;
/* mcp buffer start/length [16:16] [start, length] */
uint32_t mcp_offset;
/* length of mcp buffer */
uint32_t mcp_length;
};
/*
* Data exchange structure of the MC_DRV_MODULE_INFO ioctl command.
* INFO request data to the SWD
*/
struct mc_ioctl_info {
uint32_t ext_info_id; /* extended info ID */
uint32_t state; /* state */
uint32_t ext_info; /* extended info */
};
/*
* Data exchange structure of the MC_IO_MAP_WSM and MC_IO_MAP_MCI commands.
*
* Allocate a contiguous memory buffer for a process.
* The physical address can be used as for later calls to mmap.
* The handle can be used to communicate about this buffer to the Daemon.
* For MC_IO_MAP_MCI command, the reused field indicates that MCI was set up
* already. I.e. Daemon was restarted.
*/
struct mc_ioctl_map {
uint32_t len; /* Buffer length */
uint32_t handle; /* WSM handle */
uint64_t phys_addr; /* physical address of WSM (or 0) */
uint32_t rfu;
bool reused; /* if WSM memory was reused, or new allocated */
};
/*
* Data exchange structure of the MC_IO_REG_WSM command.
*
* Allocates a physical MMU table and maps the buffer into this page.
* Returns the physical address of the MMU table.
* The page alignment will be created and the appropriated pSize and pOffsetMMU
* will be modified to the used values.
*
* We assume the 64 bit compatible one to be the default and the
* 32 bit one to be the compat one but we must serve both of them.
*/
struct mc_compat_ioctl_reg_wsm {
uint32_t buffer; /* base address of the virtual address */
uint32_t len; /* size of the virtual address space */
uint32_t pid; /* process id */
uint32_t handle; /* driver handle for locked memory */
uint64_t table_phys; /* physical address of the MMU table */
};
struct mc_ioctl_reg_wsm {
uint64_t buffer; /* base address of the virtual address */
uint32_t len; /* size of the virtual address space */
uint32_t pid; /* process id */
uint32_t handle; /* driver handle for locked memory */
uint64_t table_phys;/* physical address of the MMU table */
};
/*
* Data exchange structure of the MC_IO_RESOLVE_CONT_WSM ioctl command.
*/
struct mc_ioctl_resolv_cont_wsm {
/* driver handle for buffer */
uint32_t handle;
/* length memory */
uint32_t length;
/* base address of memory */
uint64_t phys;
/* fd to owner of the buffer */
int32_t fd;
};
/*
* Data exchange structure of the MC_IO_RESOLVE_WSM ioctl command.
*/
struct mc_ioctl_resolv_wsm {
/* driver handle for buffer */
uint32_t handle;
/* fd to owner of the buffer */
int32_t fd;
/* base address of memory */
uint64_t phys;
};
/*
* defines for the ioctl mobicore driver module function call from user space.
*/
/* MobiCore IOCTL magic number */
#define MC_IOC_MAGIC 'M'
#define MC_IO_INIT _IOWR(MC_IOC_MAGIC, 0, struct mc_ioctl_init)
#define MC_IO_INFO _IOWR(MC_IOC_MAGIC, 1, struct mc_ioctl_info)
#define MC_IO_VERSION _IOR(MC_IOC_MAGIC, 2, uint32_t)
/*
* ioctl parameter to send the YIELD command to the SWD.
* Only possible in Privileged Mode.
* ioctl(fd, MC_DRV_MODULE_YIELD)
*/
#define MC_IO_YIELD _IO(MC_IOC_MAGIC, 3)
/*
* ioctl parameter to send the NSIQ signal to the SWD.
* Only possible in Privileged Mode
* ioctl(fd, MC_DRV_MODULE_NSIQ)
*/
#define MC_IO_NSIQ _IO(MC_IOC_MAGIC, 4)
/*
* Free's memory which is formerly allocated by the driver's mmap
* command. The parameter must be this mmaped address.
* The internal instance data regarding to this address are deleted as
* well as each according memory page and its appropriated reserved bit
* is cleared (ClearPageReserved).
* Usage: ioctl(fd, MC_DRV_MODULE_FREE, &address) with address being of
* type long address
*/
#define MC_IO_FREE _IO(MC_IOC_MAGIC, 5)
/*
* Creates a MMU Table of the given base address and the size of the
* data.
* Parameter: mc_ioctl_reg_wsm
*
* Since the end ID is also based on the size of the structure it is
* safe to use the same ID(6) for both
*/
#define MC_IO_REG_WSM _IOWR(MC_IOC_MAGIC, 6, struct mc_ioctl_reg_wsm)
#define MC_COMPAT_REG_WSM _IOWR(MC_IOC_MAGIC, 6, \
struct mc_compat_ioctl_reg_wsm)
#define MC_IO_UNREG_WSM _IO(MC_IOC_MAGIC, 7)
#define MC_IO_LOCK_WSM _IO(MC_IOC_MAGIC, 8)
#define MC_IO_UNLOCK_WSM _IO(MC_IOC_MAGIC, 9)
/*
* Allocate contiguous memory for a process for later mapping with mmap.
* MC_IO_MAP_WSM usual operation, pages are registered in
* device structure and freed later.
* MC_IO_MAP_MCI get Instance of MCI, allocates or mmaps
* the MCI to daemon
*/
#define MC_IO_MAP_WSM _IOWR(MC_IOC_MAGIC, 11, struct mc_ioctl_map)
#define MC_IO_MAP_MCI _IOWR(MC_IOC_MAGIC, 12, struct mc_ioctl_map)
/*
* Clean orphaned WSM buffers. Only available to the daemon and should
* only be carried out if the TLC crashes or otherwise calls exit() in
* an unexpected manner.
* The clean is needed together with the lock/unlock mechanism so the daemon
* has clear control of the mapped buffers so it can close a Trustlet before
* release all the WSM buffers, otherwise the Trustlet would be able to write
* to possibly kernel memory areas
*/
#define MC_IO_CLEAN_WSM _IO(MC_IOC_MAGIC, 14)
/*
* Get MMU phys address of a buffer handle allocated to the user.
* Only available to the daemon.
*/
#define MC_IO_RESOLVE_WSM _IOWR(MC_IOC_MAGIC, 15, \
struct mc_ioctl_resolv_wsm)
/*
* Get the phys address & length of a allocated contiguous buffer.
* Only available to the daemon */
#define MC_IO_RESOLVE_CONT_WSM _IOWR(MC_IOC_MAGIC, 16, \
struct mc_ioctl_resolv_cont_wsm)
/*
* Setup the mem traces when called.
* Only available to the daemon */
#define MC_IO_LOG_SETUP _IO(MC_IOC_MAGIC, 17)
#endif /* _MC_LINUX_H_ */

View File

@ -1,21 +0,0 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_DRV_VERSION_H_
#define _MC_DRV_VERSION_H_
#define MCDRVMODULEAPI_VERSION_MAJOR 1
#define MCDRVMODULEAPI_VERSION_MINOR 1
#endif /* _MC_DRV_VERSION_H_ */

View File

@ -1,41 +0,0 @@
#
# this makefile is called from the kernel make syste
ifeq ($(MODE),release)
ccflags-y += -O2 -DNDEBUG
else # DEBUG
# "-O" is needed to expand inlines
ccflags-y += -O -g3 -DDEBUG
endif # DEBUG/RELEASE
ifdef MOBICORE_CFLAGS
ccflags-y +=$(MOBICORE_CFLAGS)
endif
#Set the extra symbols
ifdef MCDRV_SYMBOLS_FILE
KBUILD_EXTRA_SYMBOLS=$(MCDRV_SYMBOLS_FILE)
endif
ifneq ($(PLATFORM), MSM8974_SURF_STD)
ccflags-y += -DMC_NETLINK_COMPAT_V37
endif
#EXTRA_CFLAGS += -DDEBUG -DDEBUG_VERBOSE
#EXTRA_CFLAGS += -Wno-declaration-after-statement
ccflags-y += -Wno-declaration-after-statement
# add our module to kernel.
obj-m += mcKernelApi.o
mcKernelApi-objs := main.o clientlib.o device.o session.o connection.o
clean:
rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions \
Module.markers Module.symvers modules.order
depend .depend dep:
$(CC) $(CFLAGS) -M *.c > .depend
ifeq (.depend,$(wildcard .depend))
include .depend
endif

View File

@ -1,34 +0,0 @@
#!/bin/bash
if [ -z $COMP_PATH_ROOT ]; then
echo "The build environment is not set!"
echo "Trying to source setupDrivers.sh automatically!"
source ../setupDrivers.sh || exit 1
fi
ROOT_PATH=$(dirname $(readlink -f $BASH_SOURCE))
# These folders need to be relative to the kernel dir or absolute!
PLATFORM=EXYNOS_4X12_STD
CODE_INCLUDE=$(readlink -f $ROOT_PATH/Locals/Code)
MOBICORE_DRIVER=$COMP_PATH_MobiCoreDriverMod
MOBICORE_DAEMON=$COMP_PATH_MobiCoreDriverLib/Public
MOBICORE_CFLAGS="-I$MOBICORE_DRIVER/Public -I$MOBICORE_DAEMON -I$COMP_PATH_MobiCore/inc/Mci -I$COMP_PATH_MobiCore/inc -I$CODE_INCLUDE/include -I$CODE_INCLUDE/public"
MCDRV_SYMBOLS_FILE="$COMP_PATH_ROOT/MobiCoreDriverMod/Locals/Code/Module.symvers"
if [ ! -f $MCDRV_SYMBOLS_FILE ]; then
echo "Please build the Mobicore Driver Module first!"
echo "Otherwise you will see warnings of missing symbols"
fi
# Clean first
make -C $CODE_INCLUDE clean
make -C $LINUX_PATH \
MODE=$MODE \
ARCH=arm \
CROSS_COMPILE=$CROSS_COMPILE \
M=$CODE_INCLUDE \
"MOBICORE_CFLAGS=$MOBICORE_CFLAGS" \
MCDRV_SYMBOLS_FILE=$MCDRV_SYMBOLS_FILE \
modules

File diff suppressed because it is too large Load Diff

View File

@ -1,80 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* Common data types for use by the MobiCore Kernel API Driver
*/
#ifndef _MC_KAPI_COMMON_H
#define _MC_KAPI_COMMON_H
#include "connection.h"
#include "mcinq.h"
void mcapi_insert_connection(struct connection *connection);
void mcapi_remove_connection(uint32_t seq);
unsigned int mcapi_unique_id(void);
#define MC_DAEMON_PID 0xFFFFFFFF
#define MC_DRV_MOD_DEVNODE_FULLPATH "/dev/mobicore"
/* dummy function helper macro */
#define DUMMY_FUNCTION() do {} while (0)
/* Found in main.c */
extern struct device *mc_kapi;
#define MCDRV_ERROR(dev, txt, ...) \
dev_err(dev, "%s() ### ERROR: " txt, __func__, ##__VA_ARGS__)
#if defined(DEBUG)
/* #define DEBUG_VERBOSE */
#if defined(DEBUG_VERBOSE)
#define MCDRV_DBG_VERBOSE MCDRV_DBG
#else
#define MCDRV_DBG_VERBOSE(...) DUMMY_FUNCTION()
#endif
#define MCDRV_DBG(dev, txt, ...) \
dev_info(dev, "%s(): " txt, __func__, ##__VA_ARGS__)
#define MCDRV_DBG_WARN(dev, txt, ...) \
dev_warn(dev, "%s() WARNING: " txt, __func__, ##__VA_ARGS__)
#define MCDRV_DBG_ERROR(dev, txt, ...) \
dev_err(dev, "%s() ### ERROR: " txt, __func__, ##__VA_ARGS__)
#define MCDRV_ASSERT(cond) \
do { \
if (unlikely(!(cond))) { \
panic("mc_kernelapi Assertion failed: %s:%d\n", \
__FILE__, __LINE__); \
} \
} while (0)
#elif defined(NDEBUG)
#define MCDRV_DBG_VERBOSE(...) DUMMY_FUNCTION()
#define MCDRV_DBG(...) DUMMY_FUNCTION()
#define MCDRV_DBG_WARN(...) DUMMY_FUNCTION()
#define MCDRV_DBG_ERROR(...) DUMMY_FUNCTION()
#define MCDRV_ASSERT(...) DUMMY_FUNCTION()
#else
#error "Define DEBUG or NDEBUG"
#endif /* [not] defined(DEBUG_MCMODULE) */
#define assert(expr) MCDRV_ASSERT(expr)
#endif /* _MC_KAPI_COMMON_H */

View File

@ -1,203 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/netlink.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/semaphore.h>
#include <linux/time.h>
#include <net/sock.h>
#include <net/net_namespace.h>
#include "connection.h"
#include "common.h"
/* Define the initial state of the Data Available Semaphore */
#define SEM_NO_DATA_AVAILABLE 0
struct connection *connection_new(void)
{
struct connection *conn;
conn = kzalloc(sizeof(*conn), GFP_KERNEL);
if (conn == NULL) {
MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
return NULL;
}
conn->sequence_magic = mcapi_unique_id();
mutex_init(&conn->data_lock);
sema_init(&conn->data_available_sem, SEM_NO_DATA_AVAILABLE);
mcapi_insert_connection(conn);
return conn;
}
void connection_cleanup(struct connection *conn)
{
if (!conn)
return;
kfree_skb(conn->skb);
mcapi_remove_connection(conn->sequence_magic);
kfree(conn);
}
bool connection_connect(struct connection *conn, pid_t dest)
{
/* Nothing to connect */
conn->peer_pid = dest;
return true;
}
size_t connection_read_data_msg(struct connection *conn, void *buffer,
uint32_t len)
{
size_t ret = -1;
MCDRV_DBG_VERBOSE(mc_kapi,
"reading connection data %u, connection data left %u",
len, conn->data_len);
/* trying to read more than the left data */
if (len > conn->data_len) {
ret = conn->data_len;
memcpy(buffer, conn->data_start, conn->data_len);
conn->data_len = 0;
} else {
ret = len;
memcpy(buffer, conn->data_start, len);
conn->data_len -= len;
conn->data_start += len;
}
if (conn->data_len == 0) {
conn->data_start = NULL;
kfree_skb(conn->skb);
conn->skb = NULL;
}
MCDRV_DBG_VERBOSE(mc_kapi, "read %zu", ret);
return ret;
}
size_t connection_read_datablock(struct connection *conn, void *buffer,
uint32_t len)
{
return connection_read_data(conn, buffer, len, -1);
}
size_t connection_read_data(struct connection *conn, void *buffer, uint32_t len,
int32_t timeout)
{
size_t ret = 0;
MCDRV_ASSERT(buffer != NULL);
MCDRV_ASSERT(conn->socket_descriptor != NULL);
MCDRV_DBG_VERBOSE(mc_kapi, "read data len = %u for PID = %u",
len, conn->sequence_magic);
do {
/*
* Wait until data is available or timeout
* msecs_to_jiffies(-1) -> wait forever for the sem
*/
if (down_timeout(&(conn->data_available_sem),
msecs_to_jiffies(timeout))) {
MCDRV_DBG_VERBOSE(mc_kapi,
"Timeout reading the data sem");
ret = -2;
break;
}
if (mutex_lock_interruptible(&(conn->data_lock))) {
MCDRV_DBG_ERROR(mc_kapi,
"interrupted reading the data sem");
ret = -1;
break;
}
/* Have data, use it */
if (conn->data_len > 0)
ret = connection_read_data_msg(conn, buffer, len);
mutex_unlock(&(conn->data_lock));
/* There is still some data left */
if (conn->data_len > 0)
up(&conn->data_available_sem);
} while (0);
return ret;
}
size_t connection_write_data(struct connection *conn, void *buffer,
uint32_t len)
{
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
int ret = 0;
MCDRV_DBG_VERBOSE(mc_kapi, "buffer length %u from pid %u\n",
len, conn->sequence_magic);
do {
skb = nlmsg_new(NLMSG_SPACE(len), GFP_KERNEL);
if (!skb) {
ret = -1;
break;
}
nlh = nlmsg_put(skb, 0, conn->sequence_magic, 2,
NLMSG_LENGTH(len), NLM_F_REQUEST);
if (!nlh) {
ret = -1;
kfree_skb(skb);
break;
}
memcpy(NLMSG_DATA(nlh), buffer, len);
/* netlink_unicast frees skb */
netlink_unicast(conn->socket_descriptor, skb,
conn->peer_pid, MSG_DONTWAIT);
ret = len;
} while (0);
return ret;
}
int connection_process(struct connection *conn, struct sk_buff *skb)
{
int ret = 0;
do {
if (mutex_lock_interruptible(&(conn->data_lock))) {
MCDRV_DBG_ERROR(mc_kapi,
"Interrupted getting data semaphore!");
ret = -1;
break;
}
kfree_skb(conn->skb);
/* Get a reference to the incoming skb */
conn->skb = skb_get(skb);
if (conn->skb) {
conn->data_msg = nlmsg_hdr(conn->skb);
conn->data_len = NLMSG_PAYLOAD(conn->data_msg, 0);
conn->data_start = NLMSG_DATA(conn->data_msg);
up(&(conn->data_available_sem));
}
mutex_unlock(&(conn->data_lock));
ret = 0;
} while (0);
return ret;
}

View File

@ -1,61 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_KAPI_CONNECTION_H_
#define _MC_KAPI_CONNECTION_H_
#include <linux/semaphore.h>
#include <linux/mutex.h>
#include <stddef.h>
#include <stdbool.h>
struct connection {
/* Netlink socket */
struct sock *socket_descriptor;
/* Random? magic to match requests/answers */
uint32_t sequence_magic;
struct nlmsghdr *data_msg;
/* How much connection data is left */
uint32_t data_len;
/* Start pointer of remaining data */
void *data_start;
struct sk_buff *skb;
/* Data protection lock */
struct mutex data_lock;
/* Data protection semaphore */
struct semaphore data_available_sem;
/* PID address used for local connection */
pid_t self_pid;
/* Remote PID for connection */
pid_t peer_pid;
/* The list param for using the kernel lists */
struct list_head list;
};
struct connection *connection_new(void);
void connection_cleanup(struct connection *conn);
bool connection_connect(struct connection *conn, pid_t dest);
size_t connection_read_datablock(struct connection *conn, void *buffer,
uint32_t len);
size_t connection_read_data(struct connection *conn, void *buffer,
uint32_t len, int32_t timeout);
size_t connection_write_data(struct connection *conn, void *buffer,
uint32_t len);
int connection_process(struct connection *conn, struct sk_buff *skb);
#endif /* _MC_KAPI_CONNECTION_H_ */

View File

@ -1,233 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* MobiCore client library device management.
*
* Device and Trustlet Session management Functions.
*/
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/device.h>
#include "mc_kernel_api.h"
#include "public/mobicore_driver_api.h"
#include "device.h"
#include "common.h"
static struct wsm *wsm_create(void *virt_addr, uint32_t len, uint32_t handle)
{
struct wsm *wsm;
wsm = kzalloc(sizeof(*wsm), GFP_KERNEL);
if (wsm == NULL) {
MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
return NULL;
}
wsm->virt_addr = virt_addr;
wsm->len = len;
wsm->handle = handle;
return wsm;
}
struct mcore_device_t *mcore_device_create(uint32_t device_id,
struct connection *connection)
{
struct mcore_device_t *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
return NULL;
}
dev->device_id = device_id;
dev->connection = connection;
INIT_LIST_HEAD(&dev->session_vector);
INIT_LIST_HEAD(&dev->wsm_mmu_vector);
return dev;
}
void mcore_device_cleanup(struct mcore_device_t *dev)
{
struct session *tmp;
struct wsm *wsm;
struct list_head *pos, *q;
/*
* Delete all session objects. Usually this should not be needed
* as close_device() requires that all sessions have been closed before.
*/
list_for_each_safe(pos, q, &dev->session_vector) {
tmp = list_entry(pos, struct session, list);
list_del(pos);
session_cleanup(tmp);
}
/* Free all allocated WSM descriptors */
list_for_each_safe(pos, q, &dev->wsm_mmu_vector) {
wsm = list_entry(pos, struct wsm, list);
list_del(pos);
kfree(wsm);
}
connection_cleanup(dev->connection);
mcore_device_close(dev);
kfree(dev);
}
bool mcore_device_open(struct mcore_device_t *dev, const char *device_name)
{
dev->instance = mobicore_open();
return dev->instance != NULL;
}
void mcore_device_close(struct mcore_device_t *dev)
{
mobicore_release(dev->instance);
}
bool mcore_device_has_sessions(struct mcore_device_t *dev)
{
return !list_empty(&dev->session_vector);
}
bool mcore_device_create_new_session(struct mcore_device_t *dev,
uint32_t session_id,
struct connection *connection)
{
/* Check if session_id already exists */
if (mcore_device_resolve_session_id(dev, session_id)) {
MCDRV_DBG_ERROR(mc_kapi,
" session %u already exists", session_id);
return false;
}
struct session *session =
session_create(session_id, dev->instance, connection);
if (session == NULL)
return false;
list_add_tail(&(session->list), &(dev->session_vector));
return true;
}
bool mcore_device_remove_session(struct mcore_device_t *dev,
uint32_t session_id)
{
bool ret = false;
struct session *tmp;
struct list_head *pos, *q;
list_for_each_safe(pos, q, &dev->session_vector) {
tmp = list_entry(pos, struct session, list);
if (tmp->session_id == session_id) {
list_del(pos);
session_cleanup(tmp);
ret = true;
break;
}
}
return ret;
}
struct session *mcore_device_resolve_session_id(struct mcore_device_t *dev,
uint32_t session_id)
{
struct session *ret = NULL;
struct session *tmp;
struct list_head *pos;
/* Get session for session_id */
list_for_each(pos, &dev->session_vector) {
tmp = list_entry(pos, struct session, list);
if (tmp->session_id == session_id) {
ret = tmp;
break;
}
}
return ret;
}
struct wsm *mcore_device_allocate_contiguous_wsm(struct mcore_device_t *dev,
uint32_t len)
{
struct wsm *wsm = NULL;
do {
if (len == 0)
break;
/* Allocate shared memory */
void *virt_addr;
uint32_t handle;
int ret = mobicore_allocate_wsm(dev->instance, len, &handle,
&virt_addr);
if (ret != 0)
break;
/* Register (vaddr) with device */
wsm = wsm_create(virt_addr, len, handle);
if (wsm == NULL) {
mobicore_free_wsm(dev->instance, handle);
break;
}
list_add_tail(&(wsm->list), &(dev->wsm_mmu_vector));
} while (0);
return wsm;
}
bool mcore_device_free_contiguous_wsm(struct mcore_device_t *dev,
struct wsm *wsm)
{
bool ret = false;
struct wsm *tmp;
struct list_head *pos;
list_for_each(pos, &dev->wsm_mmu_vector) {
tmp = list_entry(pos, struct wsm, list);
if (tmp == wsm) {
ret = true;
break;
}
}
if (ret) {
MCDRV_DBG_VERBOSE(mc_kapi,
"freeWsm virt_addr=0x%p, handle=%d",
wsm->virt_addr, wsm->handle);
/* ignore return code */
mobicore_free_wsm(dev->instance, wsm->handle);
list_del(pos);
kfree(wsm);
}
return ret;
}
struct wsm *mcore_device_find_contiguous_wsm(struct mcore_device_t *dev,
void *virt_addr)
{
struct wsm *wsm;
struct list_head *pos;
list_for_each(pos, &dev->wsm_mmu_vector) {
wsm = list_entry(pos, struct wsm, list);
if (virt_addr == wsm->virt_addr)
return wsm;
}
return NULL;
}

View File

@ -1,63 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* MobiCore client library device management.
*
* Device and Trustlet Session management Functions.
*/
#ifndef _MC_KAPI_DEVICE_H_
#define _MC_KAPI_DEVICE_H_
#include <linux/list.h>
#include "connection.h"
#include "session.h"
#include "wsm.h"
struct mcore_device_t {
/* MobiCore Trustlet session associated with the device */
struct list_head session_vector;
struct list_head wsm_mmu_vector; /* WSM L2 or L3 Table */
uint32_t device_id; /* Device identifier */
struct connection *connection; /* The device connection */
struct mc_instance *instance; /* MobiCore Driver instance */
/* The list param for using the kernel lists */
struct list_head list;
};
struct mcore_device_t *mcore_device_create(
uint32_t device_id, struct connection *connection);
void mcore_device_cleanup(struct mcore_device_t *dev);
bool mcore_device_open(struct mcore_device_t *dev, const char *device_name);
void mcore_device_close(struct mcore_device_t *dev);
bool mcore_device_has_sessions(struct mcore_device_t *dev);
bool mcore_device_create_new_session(
struct mcore_device_t *dev, uint32_t session_id,
struct connection *connection);
bool mcore_device_remove_session(
struct mcore_device_t *dev, uint32_t session_id);
struct session *mcore_device_resolve_session_id(
struct mcore_device_t *dev, uint32_t session_id);
struct wsm *mcore_device_allocate_contiguous_wsm(
struct mcore_device_t *dev, uint32_t len);
bool mcore_device_free_contiguous_wsm(
struct mcore_device_t *dev, struct wsm *wsm);
struct wsm *mcore_device_find_contiguous_wsm(
struct mcore_device_t *dev, void *virt_addr);
#endif /* _MC_KAPI_DEVICE_H_ */

View File

@ -1,104 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* Notifications inform the MobiCore runtime environment that information is
* pending in a WSM buffer.
*
* The Trustlet Connector (TLC) and the corresponding Trustlet also utilize
* this buffer to notify each other about new data within the
* Trustlet Connector Interface (TCI).
*
* The buffer is set up as a queue, which means that more than one
* notification can be written to the buffer before the switch to the other
* world is performed. Each side therefore facilitates an incoming and an
* outgoing queue for communication with the other side.
*
* Notifications hold the session ID, which is used to reference the
* communication partner in the other world.
* So if, e.g., the TLC in the normal world wants to notify his Trustlet
* about new data in the TLC buffer
*
* Notification queue declarations.
*/
#ifndef _MCINQ_H_
#define _MCINQ_H_
/* Minimum and maximum count of elements in the notification queue */
#define MIN_NQ_ELEM 1 /* Minimum notification queue elements. */
#define MAX_NQ_ELEM 64 /* Maximum notification queue elements. */
/* Minimum notification length (in bytes). */
#define MIN_NQ_LEN (MIN_NQ_ELEM * sizeof(notification))
/* Maximum notification length (in bytes). */
#define MAX_NQ_LEN (MAX_NQ_ELEM * sizeof(notification))
/*
* MCP session ID is used when directly communicating with the MobiCore
* (e.g. for starting and stopping of Trustlets).
*/
#define SID_MCP 0
/* Invalid session id is returned in case of an error. */
#define SID_INVALID 0xffffffff
/* Notification data structure. */
struct notification {
uint32_t session_id; /* Session ID. */
int32_t payload; /* Additional notification info */
};
/*
* Notification payload codes.
* 0 indicated a plain simple notification,
* a positive value is a termination reason from the task,
* a negative value is a termination reason from MobiCore.
* Possible negative values are given below.
*/
enum notification_payload {
/* task terminated, but exit code is invalid */
ERR_INVALID_EXIT_CODE = -1,
/* task terminated due to session end, no exit code available */
ERR_SESSION_CLOSE = -2,
/* task terminated due to invalid operation */
ERR_INVALID_OPERATION = -3,
/* session ID is unknown */
ERR_INVALID_SID = -4,
/* session is not active */
ERR_SID_NOT_ACTIVE = -5
};
/*
* Declaration of the notification queue header.
* Layout as specified in the data structure specification.
*/
struct notification_queue_header {
uint32_t write_cnt; /* Write counter. */
uint32_t read_cnt; /* Read counter. */
uint32_t queue_size; /* Queue size. */
};
/*
* Queue struct which defines a queue object.
* The queue struct is accessed by the queue<operation> type of
* function. elementCnt must be a power of two and the power needs
* to be smaller than power of uint32_t (obviously 32).
*/
struct notification_queue {
/* Queue header. */
struct notification_queue_header hdr;
/* Notification elements. */
struct notification notification[MIN_NQ_ELEM];
};
#endif /* _MCINQ_H_ */

View File

@ -1,24 +0,0 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MCUUID_H_
#define _MCUUID_H_
#define UUID_TYPE
/* Universally Unique Identifier (UUID) according to ISO/IEC 11578. */
struct mc_uuid_t {
uint8_t value[16]; /* Value of the UUID. */
};
#endif /* _MCUUID_H_ */

View File

@ -1,197 +0,0 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/netlink.h>
#include <linux/kthread.h>
#include <linux/device.h>
#include <net/sock.h>
#include <linux/list.h>
#include "connection.h"
#include "common.h"
#define MC_DAEMON_NETLINK 17
struct mc_kernelapi_ctx {
struct sock *sk;
struct list_head peers;
atomic_t counter;
};
struct mc_kernelapi_ctx *mod_ctx;
/* Define a MobiCore Kernel API device structure for use with dev_debug() etc */
struct device_driver mc_kernel_api_name = {
.name = "mckernelapi"
};
struct device mc_kernel_api_subname = {
.init_name = "", /* Set to 'mcapi' at mcapi_init() time */
.driver = &mc_kernel_api_name
};
struct device *mc_kapi = &mc_kernel_api_subname;
/* get a unique ID */
unsigned int mcapi_unique_id(void)
{
return (unsigned int)atomic_inc_return(&(mod_ctx->counter));
}
static struct connection *mcapi_find_connection(uint32_t seq)
{
struct connection *tmp;
struct list_head *pos;
/* Get session for session_id */
list_for_each(pos, &mod_ctx->peers) {
tmp = list_entry(pos, struct connection, list);
if (tmp->sequence_magic == seq)
return tmp;
}
return NULL;
}
void mcapi_insert_connection(struct connection *connection)
{
list_add_tail(&(connection->list), &(mod_ctx->peers));
connection->socket_descriptor = mod_ctx->sk;
}
void mcapi_remove_connection(uint32_t seq)
{
struct connection *tmp;
struct list_head *pos, *q;
/*
* Delete all session objects. Usually this should not be needed as
* closeDevice() requires that all sessions have been closed before.
*/
list_for_each_safe(pos, q, &mod_ctx->peers) {
tmp = list_entry(pos, struct connection, list);
if (tmp->sequence_magic == seq) {
list_del(pos);
break;
}
}
}
static int mcapi_process(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct connection *c;
int seq;
int ret;
seq = nlh->nlmsg_seq;
MCDRV_DBG_VERBOSE(mc_kapi, "nlmsg len %d type %d pid 0x%X seq %d\n",
nlh->nlmsg_len, nlh->nlmsg_type, nlh->nlmsg_pid, seq);
do {
c = mcapi_find_connection(seq);
if (!c) {
MCDRV_ERROR(mc_kapi,
"Invalid incoming connection - seq=%u!",
seq);
ret = -1;
break;
}
/* Pass the buffer to the appropriate connection */
connection_process(c, skb);
ret = 0;
} while (false);
return ret;
}
static void mcapi_callback(struct sk_buff *skb)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
int len = skb->len;
int err = 0;
while (NLMSG_OK(nlh, len)) {
err = mcapi_process(skb, nlh);
/* if err or if this message says it wants a response */
if (err || (nlh->nlmsg_flags & NLM_F_ACK))
netlink_ack(skb, nlh, err);
nlh = NLMSG_NEXT(nlh, len);
}
}
static int __init mcapi_init(void)
{
#if defined MC_NETLINK_COMPAT || defined MC_NETLINK_COMPAT_V37
struct netlink_kernel_cfg cfg = {
.input = mcapi_callback,
};
#endif
dev_set_name(mc_kapi, "mcapi");
dev_info(mc_kapi, "Mobicore API module initialized!\n");
mod_ctx = kzalloc(sizeof(*mod_ctx), GFP_KERNEL);
if (mod_ctx == NULL) {
MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
return -ENOMEM;
}
#ifdef MC_NETLINK_COMPAT_V37
mod_ctx->sk = netlink_kernel_create(&init_net, MC_DAEMON_NETLINK,
&cfg);
#elif defined MC_NETLINK_COMPAT
mod_ctx->sk = netlink_kernel_create(&init_net, MC_DAEMON_NETLINK,
THIS_MODULE, &cfg);
#else
/* start kernel thread */
mod_ctx->sk = netlink_kernel_create(&init_net, MC_DAEMON_NETLINK, 0,
mcapi_callback, NULL, THIS_MODULE);
#endif
if (!mod_ctx->sk) {
MCDRV_ERROR(mc_kapi, "register of receive handler failed");
kfree(mod_ctx);
mod_ctx = NULL;
return -EFAULT;
}
INIT_LIST_HEAD(&mod_ctx->peers);
return 0;
}
static void __exit mcapi_exit(void)
{
dev_info(mc_kapi, "Unloading Mobicore API module.\n");
if (mod_ctx->sk != NULL) {
netlink_kernel_release(mod_ctx->sk);
mod_ctx->sk = NULL;
}
kfree(mod_ctx);
mod_ctx = NULL;
}
module_init(mcapi_init);
module_exit(mcapi_exit);
MODULE_AUTHOR("Trustonic Limited");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MobiCore API driver");

View File

@ -1,399 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* MobiCore Driver API.
*
* The MobiCore (MC) Driver API provides access functions to the MobiCore
* runtime environment and the contained Trustlets.
*/
#ifndef _MOBICORE_DRIVER_API_H_
#define _MOBICORE_DRIVER_API_H_
#define __MC_CLIENT_LIB_API
#include "mcuuid.h"
/*
* Return values of MobiCore driver functions.
*/
enum mc_result {
/* Function call succeeded. */
MC_DRV_OK = 0,
/* No notification available. */
MC_DRV_NO_NOTIFICATION = 1,
/* Error during notification on communication level. */
MC_DRV_ERR_NOTIFICATION = 2,
/* Function not implemented. */
MC_DRV_ERR_NOT_IMPLEMENTED = 3,
/* No more resources available. */
MC_DRV_ERR_OUT_OF_RESOURCES = 4,
/* Driver initialization failed. */
MC_DRV_ERR_INIT = 5,
/* Unknown error. */
MC_DRV_ERR_UNKNOWN = 6,
/* The specified device is unknown. */
MC_DRV_ERR_UNKNOWN_DEVICE = 7,
/* The specified session is unknown.*/
MC_DRV_ERR_UNKNOWN_SESSION = 8,
/* The specified operation is not allowed. */
MC_DRV_ERR_INVALID_OPERATION = 9,
/* The response header from the MC is invalid. */
MC_DRV_ERR_INVALID_RESPONSE = 10,
/* Function call timed out. */
MC_DRV_ERR_TIMEOUT = 11,
/* Can not allocate additional memory. */
MC_DRV_ERR_NO_FREE_MEMORY = 12,
/* Free memory failed. */
MC_DRV_ERR_FREE_MEMORY_FAILED = 13,
/* Still some open sessions pending. */
MC_DRV_ERR_SESSION_PENDING = 14,
/* MC daemon not reachable */
MC_DRV_ERR_DAEMON_UNREACHABLE = 15,
/* The device file of the kernel module could not be opened. */
MC_DRV_ERR_INVALID_DEVICE_FILE = 16,
/* Invalid parameter. */
MC_DRV_ERR_INVALID_PARAMETER = 17,
/* Unspecified error from Kernel Module*/
MC_DRV_ERR_KERNEL_MODULE = 18,
/* Error during mapping of additional bulk memory to session. */
MC_DRV_ERR_BULK_MAPPING = 19,
/* Error during unmapping of additional bulk memory to session. */
MC_DRV_ERR_BULK_UNMAPPING = 20,
/* Notification received, exit code available. */
MC_DRV_INFO_NOTIFICATION = 21,
/* Set up of NWd connection failed. */
MC_DRV_ERR_NQ_FAILED = 22
};
/*
* Driver control command.
*/
enum mc_driver_ctrl {
/* Return the driver version */
MC_CTRL_GET_VERSION = 1
};
/*
* Structure of Session Handle, includes the Session ID and the Device ID the
* Session belongs to.
* The session handle will be used for session-based MobiCore communication.
* It will be passed to calls which address a communication end point in the
* MobiCore environment.
*/
struct mc_session_handle {
uint32_t session_id; /* MobiCore session ID */
uint32_t device_id; /* Device ID the session belongs to */
};
/*
* Information structure about additional mapped Bulk buffer between the
* Trustlet Connector (NWd) and the Trustlet (SWd). This structure is
* initialized from a Trustlet Connector by calling mc_map().
* In order to use the memory within a Trustlet the Trustlet Connector has to
* inform the Trustlet with the content of this structure via the TCI.
*/
struct mc_bulk_map {
/* The virtual address of the Bulk buffer regarding the address space
* of the Trustlet, already includes a possible offset! */
uint32_t secure_virt_addr;
uint32_t secure_virt_len; /* Length of the mapped Bulk buffer */
};
/* The default device ID */
#define MC_DEVICE_ID_DEFAULT 0
/* Wait infinite for a response of the MC. */
#define MC_INFINITE_TIMEOUT ((int32_t)(-1))
/* Do not wait for a response of the MC. */
#define MC_NO_TIMEOUT 0
/* TCI/DCI must not exceed 1MiB */
#define MC_MAX_TCI_LEN 0x100000
/**
* mc_open_device() - Open a new connection to a MobiCore device.
* @device_id: Identifier for the MobiCore device to be used.
* MC_DEVICE_ID_DEFAULT refers to the default device.
*
* Initializes all device specific resources required to communicate with a
* MobiCore instance located on the specified device in the system. If the
* device does not exist the function will return MC_DRV_ERR_UNKNOWN_DEVICE.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_ERR_INVALID_OPERATION: device already opened
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon
* MC_DRV_ERR_UNKNOWN_DEVICE: device_id unknown
* MC_DRV_ERR_INVALID_DEVICE_FILE: kernel module under /dev/mobicore
* cannot be opened
*/
__MC_CLIENT_LIB_API enum mc_result mc_open_device(uint32_t device_id);
/**
* mc_close_device() - Close the connection to a MobiCore device.
* @device_id: Identifier for the MobiCore device.
*
* When closing a device, active sessions have to be closed beforehand.
* Resources associated with the device will be released.
* The device may be opened again after it has been closed.
*
* MC_DEVICE_ID_DEFAULT refers to the default device.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_ERR_UNKNOWN_DEVICE: device id is invalid
* MC_DRV_ERR_SESSION_PENDING: a session is still open
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon occur
*/
__MC_CLIENT_LIB_API enum mc_result mc_close_device(uint32_t device_id);
/**
* mc_open_session() - Open a new session to a Trustlet.
* @session: On success, the session data will be returned
* @uuid: UUID of the Trustlet to be opened
* @tci: TCI buffer for communicating with the Trustlet
* @tci_len: Length of the TCI buffer. Maximum allowed value
* is MC_MAX_TCI_LEN
*
* The Trustlet with the given UUID has to be available in the flash filesystem.
*
* Write MCP open message to buffer and notify MobiCore about the availability
* of a new command.
*
* Waits till the MobiCore responses with the new session ID (stored in the MCP
* buffer).
*
* Note that session.device_id has to be the device id of an opened device.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: session parameter is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id is invalid
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon socket occur
* MC_DRV_ERR_NQ_FAILED: daemon returns an error
*/
__MC_CLIENT_LIB_API enum mc_result mc_open_session(
struct mc_session_handle *session, const struct mc_uuid_t *uuid,
uint8_t *tci, uint32_t tci_len);
/**
* mc_close_session() - Close a Trustlet session.
* @session: Session to be closed.
*
* Closes the specified MobiCore session. The call will block until the
* session has been closed.
*
* Device device_id has to be opened in advance.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: session parameter is invalid
* MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon occur
* MC_DRV_ERR_INVALID_DEVICE_FILE: daemon cannot open Trustlet file
*/
__MC_CLIENT_LIB_API enum mc_result mc_close_session(
struct mc_session_handle *session);
/**
* mc_notify() - Notify a session.
* @session: The session to be notified.
*
* Notifies the session end point about available message data.
* If the session parameter is correct, notify will always succeed.
* Corresponding errors can only be received by mc_wait_notification().
*
* A session has to be opened in advance.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: session parameter is invalid
* MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
*/
__MC_CLIENT_LIB_API enum mc_result mc_notify(struct mc_session_handle *session);
/**
* mc_wait_notification() - Wait for a notification.
* @session: The session the notification should correspond to.
* @timeout: Time in milliseconds to wait
* (MC_NO_TIMEOUT : direct return, > 0 : milliseconds,
* MC_INFINITE_TIMEOUT : wait infinitely)
*
* Wait for a notification issued by the MobiCore for a specific session.
* The timeout parameter specifies the number of milliseconds the call will wait
* for a notification.
*
* If the caller passes 0 as timeout value the call will immediately return.
* If timeout value is below 0 the call will block until a notification for the
* session has been received.
*
* If timeout is below 0, call will block.
*
* Caller has to trust the other side to send a notification to wake him up
* again.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_ERR_TIMEOUT: no notification arrived in time
* MC_DRV_INFO_NOTIFICATION: a problem with the session was
* encountered. Get more details with
* mc_get_session_error_code()
* MC_DRV_ERR_NOTIFICATION: a problem with the socket occurred
* MC_DRV_INVALID_PARAMETER: a parameter is invalid
* MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
*/
__MC_CLIENT_LIB_API enum mc_result mc_wait_notification(
struct mc_session_handle *session, int32_t timeout);
/**
* mc_malloc_wsm() - Allocate a block of world shared memory (WSM).
* @device_id: The ID of an opened device to retrieve the WSM from.
* @align: The alignment (number of pages) of the memory block
* (e.g. 0x00000001 for 4kb).
* @len: Length of the block in bytes.
* @wsm: Virtual address of the world shared memory block.
* @wsm_flags: Platform specific flags describing the memory to
* be allocated.
*
* The MC driver allocates a contiguous block of memory which can be used as
* WSM.
* This implicates that the allocated memory is aligned according to the
* alignment parameter.
*
* Always returns a buffer of size WSM_SIZE aligned to 4K.
*
* Align and wsm_flags are currently ignored
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: a parameter is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id is invalid
* MC_DRV_ERR_NO_FREE_MEMORY: no more contiguous memory is
* available in this size or for this
* process
*/
__MC_CLIENT_LIB_API enum mc_result mc_malloc_wsm(
uint32_t device_id,
uint32_t align,
uint32_t len,
uint8_t **wsm,
uint32_t wsm_flags
);
/**
* mc_free_wsm() - Free a block of world shared memory (WSM).
* @device_id: The ID to which the given address belongs
* @wsm: Address of WSM block to be freed
*
* The MC driver will free a block of world shared memory (WSM) previously
* allocated with mc_malloc_wsm(). The caller has to assure that the address
* handed over to the driver is a valid WSM address.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: a parameter is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: when device id is invalid
* MC_DRV_ERR_FREE_MEMORY_FAILED: on failure
*/
__MC_CLIENT_LIB_API enum mc_result mc_free_wsm(uint32_t device_id,
uint8_t *wsm);
/**
*mc_map() - Map additional bulk buffer between a Trustlet Connector (TLC)
* and the Trustlet (TL) for a session
* @session: Session handle with information of the device_id and
* the session_id. The given buffer is mapped to the
* session specified in the sessionHandle
* @buf: Virtual address of a memory portion (relative to TLC)
* to be shared with the Trustlet, already includes a
* possible offset!
* @len: length of buffer block in bytes.
* @map_info: Information structure about the mapped Bulk buffer
* between the TLC (NWd) and the TL (SWd).
*
* Memory allocated in user space of the TLC can be mapped as additional
* communication channel (besides TCI) to the Trustlet. Limitation of the
* Trustlet memory structure apply: only 6 chunks can be mapped with a maximum
* chunk size of 1 MiB each.
*
* It is up to the application layer (TLC) to inform the Trustlet
* about the additional mapped bulk memory.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: a parameter is invalid
* MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon occur
* MC_DRV_ERR_BULK_MAPPING: buf is already uses as bulk buffer or
* when registering the buffer failed
*/
__MC_CLIENT_LIB_API enum mc_result mc_map(
struct mc_session_handle *session, void *buf, uint32_t len,
struct mc_bulk_map *map_info);
/**
* mc_unmap() - Remove additional mapped bulk buffer between Trustlet Connector
* (TLC) and the Trustlet (TL) for a session
* @session: Session handle with information of the device_id and
* the session_id. The given buffer is unmapped from the
* session specified in the sessionHandle.
* @buf: Virtual address of a memory portion (relative to TLC)
* shared with the TL, already includes a possible offset!
* @map_info: Information structure about the mapped Bulk buffer
* between the TLC (NWd) and the TL (SWd)
*
* The bulk buffer will immediately be unmapped from the session context.
*
* The application layer (TLC) must inform the TL about unmapping of the
* additional bulk memory before calling mc_unmap!
*
* The clientlib currently ignores the len field in map_info.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: a parameter is invalid
* MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon occur
* MC_DRV_ERR_BULK_UNMAPPING: buf was not registered earlier
* or when unregistering failed
*/
__MC_CLIENT_LIB_API enum mc_result mc_unmap(
struct mc_session_handle *session, void *buf,
struct mc_bulk_map *map_info);
/**
* mc_get_session_error_code() - Get additional error information of the last
* error that occurred on a session.
* @session: Session handle with information of the device_id and
* the session_id
* @last_error: >0 Trustlet has terminated itself with this value,
* <0 Trustlet is dead because of an error within the
* MobiCore (e.g. Kernel exception). See also MCI
* definition.
*
* After the request the stored error code will be deleted.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: a parameter is invalid
* MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
*/
__MC_CLIENT_LIB_API enum mc_result mc_get_session_error_code(
struct mc_session_handle *session, int32_t *last_error);
#endif /* _MOBICORE_DRIVER_API_H_ */

View File

@ -1,250 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MOBICORE_DRIVER_CMD_H_
#define _MOBICORE_DRIVER_CMD_H_
#include "mcuuid.h"
enum mc_drv_cmd_t {
MC_DRV_CMD_PING = 0,
MC_DRV_CMD_GET_INFO = 1,
MC_DRV_CMD_OPEN_DEVICE = 2,
MC_DRV_CMD_CLOSE_DEVICE = 3,
MC_DRV_CMD_NQ_CONNECT = 4,
MC_DRV_CMD_OPEN_SESSION = 5,
MC_DRV_CMD_CLOSE_SESSION = 6,
MC_DRV_CMD_NOTIFY = 7,
MC_DRV_CMD_MAP_BULK_BUF = 8,
MC_DRV_CMD_UNMAP_BULK_BUF = 9
};
enum mc_drv_rsp_t {
MC_DRV_RSP_OK = 0,
MC_DRV_RSP_FAILED = 1,
MC_DRV_RSP_DEVICE_NOT_OPENED = 2,
MC_DRV_RSP_DEVICE_ALREADY_OPENED = 3,
MC_DRV_RSP_COMMAND_NOT_ALLOWED = 4,
MC_DRV_INVALID_DEVICE_NAME = 5,
MC_DRV_RSP_MAP_BULK_ERRO = 6,
MC_DRV_RSP_TRUSTLET_NOT_FOUND = 7,
MC_DRV_RSP_PAYLOAD_LENGTH_ERROR = 8,
};
struct mc_drv_command_header_t {
uint32_t command_id;
};
struct mc_drv_response_header_t {
uint32_t response_id;
};
#define MC_DEVICE_ID_DEFAULT 0 /* The default device ID */
struct mc_drv_cmd_open_device_payload_t {
uint32_t device_id;
};
struct mc_drv_cmd_open_device_t {
struct mc_drv_command_header_t header;
struct mc_drv_cmd_open_device_payload_t payload;
};
struct mc_drv_rsp_open_device_payload_t {
/* empty */
};
struct mc_drv_rsp_open_device_t {
struct mc_drv_response_header_t header;
struct mc_drv_rsp_open_device_payload_t payload;
};
struct mc_drv_cmd_close_device_t {
struct mc_drv_command_header_t header;
/*
* no payload here because close has none.
* If we use an empty struct, C++ will count it as 4 bytes.
* This will write too much into the socket at write(cmd,sizeof(cmd))
*/
};
struct mc_drv_rsp_close_device_payload_t {
/* empty */
};
struct mc_drv_rsp_close_device_t {
struct mc_drv_response_header_t header;
struct mc_drv_rsp_close_device_payload_t payload;
};
struct mc_drv_cmd_open_session_payload_t {
uint32_t device_id;
struct mc_uuid_t uuid;
uint32_t tci;
uint32_t handle;
uint32_t len;
};
struct mc_drv_cmd_open_session_t {
struct mc_drv_command_header_t header;
struct mc_drv_cmd_open_session_payload_t payload;
};
struct mc_drv_rsp_open_session_payload_t {
uint32_t session_id;
uint32_t device_session_id;
uint32_t session_magic;
};
struct mc_drv_rsp_open_session_t {
struct mc_drv_response_header_t header;
struct mc_drv_rsp_open_session_payload_t payload;
};
struct mc_drv_cmd_close_session_payload_t {
uint32_t session_id;
};
struct mc_drv_cmd_close_session_t {
struct mc_drv_command_header_t header;
struct mc_drv_cmd_close_session_payload_t payload;
};
struct mc_drv_rsp_close_session_payload_t {
/* empty */
};
struct mc_drv_rsp_close_session_t {
struct mc_drv_response_header_t header;
struct mc_drv_rsp_close_session_payload_t payload;
};
struct mc_drv_cmd_notify_payload_t {
uint32_t session_id;
};
struct mc_drv_cmd_notify_t {
struct mc_drv_command_header_t header;
struct mc_drv_cmd_notify_payload_t payload;
};
struct mc_drv_rsp_notify_payload_t {
/* empty */
};
struct mc_drv_rsp_notify_t {
struct mc_drv_response_header_t header;
struct mc_drv_rsp_notify_payload_t payload;
};
struct mc_drv_cmd_map_bulk_mem_payload_t {
uint32_t session_id;
uint32_t handle;
uint32_t rfu;
uint32_t offset_payload;
uint32_t len_bulk_mem;
};
struct mc_drv_cmd_map_bulk_mem_t {
struct mc_drv_command_header_t header;
struct mc_drv_cmd_map_bulk_mem_payload_t payload;
};
struct mc_drv_rsp_map_bulk_mem_payload_t {
uint32_t session_id;
uint32_t secure_virtual_adr;
};
struct mc_drv_rsp_map_bulk_mem_t {
struct mc_drv_response_header_t header;
struct mc_drv_rsp_map_bulk_mem_payload_t payload;
};
struct mc_drv_cmd_unmap_bulk_mem_payload_t {
uint32_t session_id;
uint32_t handle;
uint32_t secure_virtual_adr;
uint32_t len_bulk_mem;
};
struct mc_drv_cmd_unmap_bulk_mem_t {
struct mc_drv_command_header_t header;
struct mc_drv_cmd_unmap_bulk_mem_payload_t payload;
};
struct mc_drv_rsp_unmap_bulk_mem_payload_t {
uint32_t response_id;
uint32_t session_id;
};
struct mc_drv_rsp_unmap_bulk_mem_t {
struct mc_drv_response_header_t header;
struct mc_drv_rsp_unmap_bulk_mem_payload_t payload;
};
struct mc_drv_cmd_nqconnect_payload_t {
uint32_t device_id;
uint32_t session_id;
uint32_t device_session_id;
uint32_t session_magic; /* Random data */
};
struct mc_drv_cmd_nqconnect_t {
struct mc_drv_command_header_t header;
struct mc_drv_cmd_nqconnect_payload_t payload;
};
struct mc_drv_rsp_nqconnect_payload_t {
/* empty; */
};
struct mc_drv_rsp_nqconnect_t {
struct mc_drv_response_header_t header;
struct mc_drv_rsp_nqconnect_payload_t payload;
};
union mc_drv_command_t {
struct mc_drv_command_header_t header;
struct mc_drv_cmd_open_device_t mc_drv_cmd_open_device;
struct mc_drv_cmd_close_device_t mc_drv_cmd_close_device;
struct mc_drv_cmd_open_session_t mc_drv_cmd_open_session;
struct mc_drv_cmd_close_session_t mc_drv_cmd_close_session;
struct mc_drv_cmd_nqconnect_t mc_drv_cmd_nqconnect;
struct mc_drv_cmd_notify_t mc_drv_cmd_notify;
struct mc_drv_cmd_map_bulk_mem_t mc_drv_cmd_map_bulk_mem;
struct mc_drv_cmd_unmap_bulk_mem_t mc_drv_cmd_unmap_bulk_mem;
};
union mc_drv_response_t {
struct mc_drv_response_header_t header;
struct mc_drv_rsp_open_device_t mc_drv_rsp_open_device;
struct mc_drv_rsp_close_device_t mc_drv_rsp_close_device;
struct mc_drv_rsp_open_session_t mc_drv_rsp_open_session;
struct mc_drv_rsp_close_session_t mc_drv_rsp_close_session;
struct mc_drv_rsp_nqconnect_t mc_drv_rsp_nqconnect;
struct mc_drv_rsp_notify_t mc_drv_rsp_notify;
struct mc_drv_rsp_map_bulk_mem_t mc_drv_rsp_map_bulk_mem;
struct mc_drv_rsp_unmap_bulk_mem_t mc_drv_rsp_unmap_bulk_mem;
};
#endif /* _MOBICORE_DRIVER_CMD_H_ */

View File

@ -1,208 +0,0 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/device.h>
#include "mc_kernel_api.h"
#include "public/mobicore_driver_api.h"
#include "session.h"
struct bulk_buffer_descriptor *bulk_buffer_descriptor_create(
void *virt_addr, uint32_t len, uint32_t handle)
{
struct bulk_buffer_descriptor *desc;
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (desc == NULL) {
MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
return NULL;
}
desc->virt_addr = virt_addr;
desc->len = len;
desc->handle = handle;
return desc;
}
struct session *session_create(
uint32_t session_id, void *instance, struct connection *connection)
{
struct session *session;
session = kzalloc(sizeof(*session), GFP_KERNEL);
if (session == NULL) {
MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
return NULL;
}
session->session_id = session_id;
session->instance = instance;
session->notification_connection = connection;
session->session_info.last_error = SESSION_ERR_NO;
session->session_info.state = SESSION_STATE_INITIAL;
INIT_LIST_HEAD(&(session->bulk_buffer_descriptors));
return session;
}
void session_cleanup(struct session *session)
{
struct bulk_buffer_descriptor *bulk_buf_descr;
struct list_head *pos, *q;
/* Unmap still mapped buffers */
list_for_each_safe(pos, q, &session->bulk_buffer_descriptors) {
bulk_buf_descr =
list_entry(pos, struct bulk_buffer_descriptor, list);
MCDRV_DBG_VERBOSE(mc_kapi,
"handle= %d",
bulk_buf_descr->handle);
/* ignore any error, as we cannot do anything in this case. */
int ret = mobicore_unmap_vmem(session->instance,
bulk_buf_descr->handle);
if (ret != 0)
MCDRV_DBG_ERROR(mc_kapi,
"mobicore_unmap_vmem failed: %d", ret);
list_del(pos);
kfree(bulk_buf_descr);
}
/* Finally delete notification connection */
connection_cleanup(session->notification_connection);
kfree(session);
}
void session_set_error_info(struct session *session, int32_t err)
{
session->session_info.last_error = err;
}
int32_t session_get_last_err(struct session *session)
{
return session->session_info.last_error;
}
struct bulk_buffer_descriptor *session_add_bulk_buf(struct session *session,
void *buf, uint32_t len)
{
struct bulk_buffer_descriptor *bulk_buf_descr = NULL;
struct bulk_buffer_descriptor *tmp;
struct list_head *pos;
/*
* Search bulk buffer descriptors for existing vAddr
* At the moment a virtual address can only be added one time
*/
list_for_each(pos, &session->bulk_buffer_descriptors) {
tmp = list_entry(pos, struct bulk_buffer_descriptor, list);
if (tmp->virt_addr == buf)
return NULL;
}
do {
/*
* Prepare the interface structure for memory registration in
* Kernel Module
*/
uint32_t handle;
int ret = mobicore_map_vmem(session->instance, buf, len,
&handle);
if (ret != 0) {
MCDRV_DBG_ERROR(mc_kapi,
"mobicore_map_vmem failed, ret=%d",
ret);
break;
}
MCDRV_DBG_VERBOSE(mc_kapi, "handle=%d", handle);
/* Create new descriptor */
bulk_buf_descr =
bulk_buffer_descriptor_create(buf, len, handle);
if (bulk_buf_descr == NULL) {
/* Discard the returned value */
(void)mobicore_unmap_vmem(session->instance, handle);
break;
}
/* Add to vector of descriptors */
list_add_tail(&(bulk_buf_descr->list),
&(session->bulk_buffer_descriptors));
} while (0);
return bulk_buf_descr;
}
bool session_remove_bulk_buf(struct session *session, void *virt_addr)
{
bool ret = true;
struct bulk_buffer_descriptor *bulk_buf = NULL;
struct bulk_buffer_descriptor *tmp;
struct list_head *pos, *q;
MCDRV_DBG_VERBOSE(mc_kapi, "Virtual Address = 0x%p",
virt_addr);
/* Search and remove bulk buffer descriptor */
list_for_each_safe(pos, q, &session->bulk_buffer_descriptors) {
tmp = list_entry(pos, struct bulk_buffer_descriptor, list);
if (tmp->virt_addr == virt_addr) {
bulk_buf = tmp;
list_del(pos);
break;
}
}
if (bulk_buf == NULL) {
MCDRV_DBG_ERROR(mc_kapi, "Virtual Address not found");
ret = false;
} else {
MCDRV_DBG_VERBOSE(mc_kapi, "Wsm handle=%d",
bulk_buf->handle);
/* ignore any error, as we cannot do anything */
int ret = mobicore_unmap_vmem(session->instance,
bulk_buf->handle);
if (ret != 0)
MCDRV_DBG_ERROR(mc_kapi,
"mobicore_unmap_vmem failed: %d", ret);
kfree(bulk_buf);
}
return ret;
}
uint32_t session_find_bulk_buf(struct session *session, void *virt_addr)
{
struct bulk_buffer_descriptor *tmp;
struct list_head *pos, *q;
MCDRV_DBG_VERBOSE(mc_kapi, "Virtual Address = 0x%p",
virt_addr);
/* Search and return buffer descriptor handle */
list_for_each_safe(pos, q, &session->bulk_buffer_descriptors) {
tmp = list_entry(pos, struct bulk_buffer_descriptor, list);
if (tmp->virt_addr == virt_addr)
return tmp->handle;
}
return 0;
}

View File

@ -1,146 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_KAPI_SESSION_H_
#define _MC_KAPI_SESSION_H_
#include "common.h"
#include <linux/list.h>
#include "connection.h"
struct bulk_buffer_descriptor {
void *virt_addr; /* The VA of the Bulk buffer */
uint32_t len; /* Length of the Bulk buffer */
uint32_t handle;
/* The list param for using the kernel lists*/
struct list_head list;
};
struct bulk_buffer_descriptor *bulk_buffer_descriptor_create(
void *virt_addr,
uint32_t len,
uint32_t handle
);
/*
* Session states.
* At the moment not used !!
*/
enum session_state {
SESSION_STATE_INITIAL,
SESSION_STATE_OPEN,
SESSION_STATE_TRUSTLET_DEAD
};
#define SESSION_ERR_NO 0 /* No session error */
/*
* Session information structure.
* The information structure is used to hold the state of the session, which
* will limit further actions for the session.
* Also the last error code will be stored till it's read.
*/
struct session_information {
enum session_state state; /* Session state */
int32_t last_error; /* Last error of session */
};
struct session {
struct mc_instance *instance;
/* Descriptors of additional bulk buffer of a session */
struct list_head bulk_buffer_descriptors;
/* Information about session */
struct session_information session_info;
uint32_t session_id;
struct connection *notification_connection;
/* The list param for using the kernel lists */
struct list_head list;
};
struct session *session_create(
uint32_t session_id,
void *instance,
struct connection *connection
);
void session_cleanup(struct session *session);
/*
* session_add_bulk_buf() - Add address information of additional bulk
* buffer memory to session and register virtual
* memory in kernel module
* @session: Session information structure
* @buf: The virtual address of bulk buffer.
* @len: Length of bulk buffer.
*
* The virtual address can only be added one time. If the virtual
* address already exist, NULL is returned.
*
* On success the actual Bulk buffer descriptor with all address information
* is returned, NULL if an error occurs.
*/
struct bulk_buffer_descriptor *session_add_bulk_buf(
struct session *session, void *buf, uint32_t len);
/*
* session_remove_bulk_buf() - Remove address information of additional bulk
* buffer memory from session and unregister
* virtual memory in kernel module
* @session: Session information structure
* @buf: The virtual address of the bulk buffer
*
* Returns true on success
*/
bool session_remove_bulk_buf(struct session *session, void *buf);
/*
* session_find_bulk_buf() - Find the handle of the bulk buffer for this
* session
*
* @session: Session information structure
* @buf: The virtual address of bulk buffer.
*
* On success the actual Bulk buffer handle is returned, 0
* if an error occurs.
*/
uint32_t session_find_bulk_buf(struct session *session, void *virt_addr);
/*
* session_set_error_info() - Set additional error information of the last
* error that occurred.
* @session: Session information structure
* @err: The actual error
*/
void session_set_error_info(struct session *session, int32_t err);
/*
* session_get_last_err() - Get additional error information of the last
* error that occurred.
* @session: Session information structure
*
* After request the information is set to SESSION_ERR_NO.
*
* Returns the last stored error code or SESSION_ERR_NO
*/
int32_t session_get_last_err(struct session *session);
#endif /* _MC_KAPI_SESSION_H_ */

View File

@ -1,30 +0,0 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* World shared memory definitions.
*/
#ifndef _MC_KAPI_WSM_H_
#define _MC_KAPI_WSM_H_
#include "common.h"
#include <linux/list.h>
struct wsm {
void *virt_addr;
uint32_t len;
uint32_t handle;
struct list_head list;
};
#endif /* _MC_KAPI_WSM_H_ */

View File

@ -1,6 +0,0 @@
MobiCore is an operating system being shipped with TZBSP
on msm chipsets. MobiCore consists of several components in
the secure world(TrustZone) and non-secure world(linux
kernel, Android user space). The MobiCore driver
communicates with the MobiCore kernel that exists in
TrustZone.

View File

@ -1,15 +0,0 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define MOBICORE_COMPONENT_BUILD_TAG \
"t-base-QC-MSM8974-Android-301C-V001-76_76"

View File

@ -1,19 +0,0 @@
#!/bin/bash
export COMP_PATH_ROOT=$(dirname $(readlink -f $BASH_SOURCE)) #set this to the absolute path of the folder containing this file
# This part has to be set by the customer
# To be set, absolute path of kernel folder
export LINUX_PATH=
# To be set, absolute path! CROSS_COMPILE variable needed by kernel eg /home/user/arm-2009q3/bin/arm-none-linux-gnueabi-
export CROSS_COMPILE=
# To be set, build mode debug or release
export MODE=debug
# To be set, the absolute path to the Linux Android NDK
export NDK_PATH=
# Global variables needed by build scripts
export COMP_PATH_Logwrapper=$COMP_PATH_ROOT/Logwrapper/Out
export COMP_PATH_MobiCore=$COMP_PATH_ROOT/MobiCore/Out
export COMP_PATH_MobiCoreDriverMod=$COMP_PATH_ROOT/mobicore_driver/Out
export COMP_PATH_MobiCoreDriverLib=$COMP_PATH_ROOT/daemon/Out
export COMP_PATH_AndroidNdkLinux=$NDK_PATH