msm: ipa: Remove usage of stack memory

When stack memory is provided to HW as part of descriptor
it can lead to cache alignment issues. Make changes to
use heap memory whereever applicable.

Change-Id: I666f98cf2ec45a4743db0ab7bc6d2df821cce84a
Acked-by: Chaitanya Pratapa <cpratapa@qti.qualcomm.com>
Signed-off-by: Sridhar Ancha <sancha@codeaurora.org>
This commit is contained in:
Sridhar Ancha 2016-04-25 21:04:46 +05:30 committed by syphyr
parent cb68db0577
commit f290807c52
3 changed files with 287 additions and 113 deletions

View File

@ -1392,7 +1392,7 @@ bail:
static int ipa_init_smem_region(int memory_region_size,
int memory_region_offset)
{
struct ipa_hw_imm_cmd_dma_shared_mem cmd;
struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL;
struct ipa_desc desc;
struct ipa_mem_buffer mem;
int rc;
@ -1401,7 +1401,6 @@ static int ipa_init_smem_region(int memory_region_size,
return 0;
memset(&desc, 0, sizeof(desc));
memset(&cmd, 0, sizeof(cmd));
memset(&mem, 0, sizeof(mem));
mem.size = memory_region_size;
@ -1413,13 +1412,22 @@ static int ipa_init_smem_region(int memory_region_size,
}
memset(mem.base, 0, mem.size);
cmd.size = mem.size;
cmd.system_addr = mem.phys_base;
cmd.local_addr = ipa_ctx->smem_restricted_bytes +
cmd = kzalloc(sizeof(*cmd),
GFP_KERNEL);
if (cmd == NULL) {
IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM;
goto fail_send_cmd;
}
cmd->size = mem.size;
cmd->system_addr = mem.phys_base;
cmd->local_addr = ipa_ctx->smem_restricted_bytes +
memory_region_offset;
desc.opcode = IPA_DMA_SHARED_MEM;
desc.pyld = &cmd;
desc.len = sizeof(cmd);
desc.pyld = cmd;
desc.len = sizeof(*cmd);
desc.type = IPA_IMM_CMD_DESC;
rc = ipa_send_cmd(1, &desc);
@ -1428,6 +1436,8 @@ static int ipa_init_smem_region(int memory_region_size,
rc = -EFAULT;
}
kfree(cmd);
fail_send_cmd:
dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
mem.phys_base);
@ -1962,7 +1972,7 @@ int _ipa_init_sram_v2(void)
{
u32 *ipa_sram_mmio;
unsigned long phys_addr;
struct ipa_hw_imm_cmd_dma_shared_mem cmd = {0};
struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL;
struct ipa_desc desc = {0};
struct ipa_mem_buffer mem;
int rc = 0;
@ -2002,11 +2012,19 @@ int _ipa_init_sram_v2(void)
}
memset(mem.base, 0, mem.size);
cmd.size = mem.size;
cmd.system_addr = mem.phys_base;
cmd.local_addr = IPA_STATUS_CLEAR_OFST;
cmd = kzalloc(sizeof(*cmd),
GFP_KERNEL);
if (cmd == NULL) {
IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM;
goto fail_send_cmd;
}
cmd->size = mem.size;
cmd->system_addr = mem.phys_base;
cmd->local_addr = IPA_STATUS_CLEAR_OFST;
desc.opcode = IPA_DMA_SHARED_MEM;
desc.pyld = &cmd;
desc.pyld = (void *)cmd;
desc.len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
desc.type = IPA_IMM_CMD_DESC;
@ -2015,6 +2033,8 @@ int _ipa_init_sram_v2(void)
rc = -EFAULT;
}
kfree(cmd);
fail_send_cmd:
dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
return rc;
}
@ -2103,7 +2123,7 @@ int _ipa_init_hdr_v2(void)
{
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_hdr_init_local cmd;
struct ipa_hdr_init_local *cmd = NULL;
int rc = 0;
mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
@ -2115,13 +2135,20 @@ int _ipa_init_hdr_v2(void)
}
memset(mem.base, 0, mem.size);
cmd.hdr_table_src_addr = mem.phys_base;
cmd.size_hdr_table = mem.size;
cmd.hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes +
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL) {
IPAERR("Failed to alloc header init command object\n");
rc = -ENOMEM;
goto fail_send_cmd;
}
cmd->hdr_table_src_addr = mem.phys_base;
cmd->size_hdr_table = mem.size;
cmd->hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes +
IPA_MEM_PART(modem_hdr_ofst);
desc.opcode = IPA_HDR_INIT_LOCAL;
desc.pyld = &cmd;
desc.pyld = (void *)cmd;
desc.len = sizeof(struct ipa_hdr_init_local);
desc.type = IPA_IMM_CMD_DESC;
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
@ -2131,6 +2158,8 @@ int _ipa_init_hdr_v2(void)
rc = -EFAULT;
}
kfree(cmd);
fail_send_cmd:
dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
return rc;
}
@ -2139,8 +2168,8 @@ int _ipa_init_hdr_v2_5(void)
{
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_hdr_init_local cmd = { 0 };
struct ipa_hw_imm_cmd_dma_shared_mem dma_cmd = { 0 };
struct ipa_hdr_init_local *cmd = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL;
mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
@ -2151,25 +2180,35 @@ int _ipa_init_hdr_v2_5(void)
}
memset(mem.base, 0, mem.size);
cmd.hdr_table_src_addr = mem.phys_base;
cmd.size_hdr_table = mem.size;
cmd.hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes +
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL) {
IPAERR("Failed to alloc header init command object\n");
dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
mem.phys_base);
return -ENOMEM;
}
memset(cmd, 0, sizeof(struct ipa_hdr_init_local));
cmd->hdr_table_src_addr = mem.phys_base;
cmd->size_hdr_table = mem.size;
cmd->hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes +
IPA_MEM_PART(modem_hdr_ofst);
desc.opcode = IPA_HDR_INIT_LOCAL;
desc.pyld = &cmd;
desc.pyld = (void *)cmd;
desc.len = sizeof(struct ipa_hdr_init_local);
desc.type = IPA_IMM_CMD_DESC;
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
if (ipa_send_cmd(1, &desc)) {
IPAERR("fail to send immediate command\n");
dma_free_coherent(ipa_ctx->pdev,
mem.size, mem.base,
kfree(cmd);
dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
mem.phys_base);
return -EFAULT;
}
kfree(cmd);
dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) +
@ -2183,18 +2222,29 @@ int _ipa_init_hdr_v2_5(void)
memset(mem.base, 0, mem.size);
memset(&desc, 0, sizeof(desc));
dma_cmd.system_addr = mem.phys_base;
dma_cmd.local_addr = ipa_ctx->smem_restricted_bytes +
dma_cmd = kzalloc(sizeof(*dma_cmd), GFP_KERNEL);
if (dma_cmd == NULL) {
IPAERR("Failed to alloc immediate command object\n");
dma_free_coherent(ipa_ctx->pdev,
mem.size,
mem.base,
mem.phys_base);
return -ENOMEM;
}
dma_cmd->system_addr = mem.phys_base;
dma_cmd->local_addr = ipa_ctx->smem_restricted_bytes +
IPA_MEM_PART(modem_hdr_proc_ctx_ofst);
dma_cmd.size = mem.size;
dma_cmd->size = mem.size;
desc.opcode = IPA_DMA_SHARED_MEM;
desc.pyld = &dma_cmd;
desc.pyld = (void *)dma_cmd;
desc.len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
desc.type = IPA_IMM_CMD_DESC;
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
if (ipa_send_cmd(1, &desc)) {
IPAERR("fail to send immediate command\n");
kfree(dma_cmd);
dma_free_coherent(ipa_ctx->pdev,
mem.size,
mem.base,
@ -2204,8 +2254,9 @@ int _ipa_init_hdr_v2_5(void)
ipa_write_reg(ipa_ctx->mmio,
IPA_LOCAL_PKT_PROC_CNTXT_BASE_OFST,
dma_cmd.local_addr);
dma_cmd->local_addr);
kfree(dma_cmd);
dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
return 0;
@ -2221,7 +2272,7 @@ int _ipa_init_rt4_v2(void)
{
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_ip_v4_routing_init v4_cmd;
struct ipa_ip_v4_routing_init *v4_cmd = NULL;
u32 *entry;
int i;
int rc = 0;
@ -2246,15 +2297,22 @@ int _ipa_init_rt4_v2(void)
entry++;
}
v4_cmd = kzalloc(sizeof(*v4_cmd), GFP_KERNEL);
if (v4_cmd == NULL) {
IPAERR("Failed to alloc v4 routing init command object\n");
rc = -ENOMEM;
goto fail_send_cmd;
}
desc.opcode = IPA_IP_V4_ROUTING_INIT;
v4_cmd.ipv4_rules_addr = mem.phys_base;
v4_cmd.size_ipv4_rules = mem.size;
v4_cmd.ipv4_addr = ipa_ctx->smem_restricted_bytes +
v4_cmd->ipv4_rules_addr = mem.phys_base;
v4_cmd->size_ipv4_rules = mem.size;
v4_cmd->ipv4_addr = ipa_ctx->smem_restricted_bytes +
IPA_MEM_PART(v4_rt_ofst);
IPADBG("putting Routing IPv4 rules to phys 0x%x",
v4_cmd.ipv4_addr);
v4_cmd->ipv4_addr);
desc.pyld = &v4_cmd;
desc.pyld = (void *)v4_cmd;
desc.len = sizeof(struct ipa_ip_v4_routing_init);
desc.type = IPA_IMM_CMD_DESC;
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
@ -2264,6 +2322,8 @@ int _ipa_init_rt4_v2(void)
rc = -EFAULT;
}
kfree(v4_cmd);
fail_send_cmd:
dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
return rc;
}
@ -2272,7 +2332,7 @@ int _ipa_init_rt6_v2(void)
{
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_ip_v6_routing_init v6_cmd;
struct ipa_ip_v6_routing_init *v6_cmd = NULL;
u32 *entry;
int i;
int rc = 0;
@ -2297,15 +2357,22 @@ int _ipa_init_rt6_v2(void)
entry++;
}
v6_cmd = kzalloc(sizeof(*v6_cmd), GFP_KERNEL);
if (v6_cmd == NULL) {
IPAERR("Failed to alloc v6 routing init command object\n");
rc = -ENOMEM;
goto fail_send_cmd;
}
desc.opcode = IPA_IP_V6_ROUTING_INIT;
v6_cmd.ipv6_rules_addr = mem.phys_base;
v6_cmd.size_ipv6_rules = mem.size;
v6_cmd.ipv6_addr = ipa_ctx->smem_restricted_bytes +
v6_cmd->ipv6_rules_addr = mem.phys_base;
v6_cmd->size_ipv6_rules = mem.size;
v6_cmd->ipv6_addr = ipa_ctx->smem_restricted_bytes +
IPA_MEM_PART(v6_rt_ofst);
IPADBG("putting Routing IPv6 rules to phys 0x%x",
v6_cmd.ipv6_addr);
v6_cmd->ipv6_addr);
desc.pyld = &v6_cmd;
desc.pyld = (void *)v6_cmd;
desc.len = sizeof(struct ipa_ip_v6_routing_init);
desc.type = IPA_IMM_CMD_DESC;
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
@ -2315,6 +2382,8 @@ int _ipa_init_rt6_v2(void)
rc = -EFAULT;
}
kfree(v6_cmd);
fail_send_cmd:
dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
return rc;
}
@ -2323,7 +2392,7 @@ int _ipa_init_flt4_v2(void)
{
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_ip_v4_filter_init v4_cmd;
struct ipa_ip_v4_filter_init *v4_cmd = NULL;
u32 *entry;
int i;
int rc = 0;
@ -2346,15 +2415,22 @@ int _ipa_init_flt4_v2(void)
entry++;
}
v4_cmd = kzalloc(sizeof(*v4_cmd), GFP_KERNEL);
if (v4_cmd == NULL) {
IPAERR("Failed to alloc v4 fliter init command object\n");
rc = -ENOMEM;
goto fail_send_cmd;
}
desc.opcode = IPA_IP_V4_FILTER_INIT;
v4_cmd.ipv4_rules_addr = mem.phys_base;
v4_cmd.size_ipv4_rules = mem.size;
v4_cmd.ipv4_addr = ipa_ctx->smem_restricted_bytes +
v4_cmd->ipv4_rules_addr = mem.phys_base;
v4_cmd->size_ipv4_rules = mem.size;
v4_cmd->ipv4_addr = ipa_ctx->smem_restricted_bytes +
IPA_MEM_PART(v4_flt_ofst);
IPADBG("putting Filtering IPv4 rules to phys 0x%x",
v4_cmd.ipv4_addr);
v4_cmd->ipv4_addr);
desc.pyld = &v4_cmd;
desc.pyld = (void *)v4_cmd;
desc.len = sizeof(struct ipa_ip_v4_filter_init);
desc.type = IPA_IMM_CMD_DESC;
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
@ -2364,6 +2440,8 @@ int _ipa_init_flt4_v2(void)
rc = -EFAULT;
}
kfree(v4_cmd);
fail_send_cmd:
dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
return rc;
}
@ -2372,7 +2450,7 @@ int _ipa_init_flt6_v2(void)
{
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_ip_v6_filter_init v6_cmd;
struct ipa_ip_v6_filter_init *v6_cmd = NULL;
u32 *entry;
int i;
int rc = 0;
@ -2395,15 +2473,22 @@ int _ipa_init_flt6_v2(void)
entry++;
}
v6_cmd = kzalloc(sizeof(*v6_cmd), GFP_KERNEL);
if (v6_cmd == NULL) {
IPAERR("Failed to alloc v6 fliter init command object\n");
rc = -ENOMEM;
goto fail_send_cmd;
}
desc.opcode = IPA_IP_V6_FILTER_INIT;
v6_cmd.ipv6_rules_addr = mem.phys_base;
v6_cmd.size_ipv6_rules = mem.size;
v6_cmd.ipv6_addr = ipa_ctx->smem_restricted_bytes +
v6_cmd->ipv6_rules_addr = mem.phys_base;
v6_cmd->size_ipv6_rules = mem.size;
v6_cmd->ipv6_addr = ipa_ctx->smem_restricted_bytes +
IPA_MEM_PART(v6_flt_ofst);
IPADBG("putting Filtering IPv6 rules to phys 0x%x",
v6_cmd.ipv6_addr);
v6_cmd->ipv6_addr);
desc.pyld = &v6_cmd;
desc.pyld = (void *)v6_cmd;
desc.len = sizeof(struct ipa_ip_v6_filter_init);
desc.type = IPA_IMM_CMD_DESC;
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
@ -2413,6 +2498,8 @@ int _ipa_init_flt6_v2(void)
rc = -EFAULT;
}
kfree(v6_cmd);
fail_send_cmd:
dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
return rc;
}

View File

@ -266,8 +266,8 @@ int __ipa_commit_hdr_v2(void)
{
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_hdr_init_system cmd;
struct ipa_hw_imm_cmd_dma_shared_mem dma_cmd;
struct ipa_hdr_init_system *cmd = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL;
int rc = -EFAULT;
if (ipa_generate_hdr_hw_tbl(&mem)) {
@ -279,14 +279,21 @@ int __ipa_commit_hdr_v2(void)
if (mem.size > IPA_MEM_PART(apps_hdr_size)) {
IPAERR("tbl too big, needed %d avail %d\n", mem.size,
IPA_MEM_PART(apps_hdr_size));
goto end;
goto fail_send_cmd;
} else {
dma_cmd.system_addr = mem.phys_base;
dma_cmd.size = mem.size;
dma_cmd.local_addr = ipa_ctx->smem_restricted_bytes +
dma_cmd = kzalloc(sizeof(*dma_cmd), GFP_ATOMIC);
if (dma_cmd == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
goto fail_send_cmd;
}
dma_cmd->system_addr = mem.phys_base;
dma_cmd->size = mem.size;
dma_cmd->local_addr = ipa_ctx->smem_restricted_bytes +
IPA_MEM_PART(apps_hdr_ofst);
desc.opcode = IPA_DMA_SHARED_MEM;
desc.pyld = &dma_cmd;
desc.pyld = (void *)dma_cmd;
desc.len =
sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
}
@ -294,11 +301,17 @@ int __ipa_commit_hdr_v2(void)
if (mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
IPAERR("tbl too big, needed %d avail %d\n", mem.size,
IPA_MEM_PART(apps_hdr_size_ddr));
goto end;
goto fail_send_cmd;
} else {
cmd.hdr_table_addr = mem.phys_base;
cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (cmd == NULL) {
IPAERR("fail to alloc hdr init cmd\n");
rc = -ENOMEM;
goto fail_send_cmd;
}
cmd->hdr_table_addr = mem.phys_base;
desc.opcode = IPA_HDR_INIT_SYSTEM;
desc.pyld = &cmd;
desc.pyld = (void *)cmd;
desc.len = sizeof(struct ipa_hdr_init_system);
}
}
@ -311,6 +324,10 @@ int __ipa_commit_hdr_v2(void)
else
rc = 0;
kfree(dma_cmd);
kfree(cmd);
fail_send_cmd:
if (ipa_ctx->hdr_tbl_lcl) {
dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
mem.phys_base);
@ -322,6 +339,9 @@ int __ipa_commit_hdr_v2(void)
ipa_ctx->hdr_mem.base,
ipa_ctx->hdr_mem.phys_base);
ipa_ctx->hdr_mem = mem;
} else {
dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
mem.phys_base);
}
}
@ -335,10 +355,10 @@ int __ipa_commit_hdr_v2_5(void)
struct ipa_mem_buffer hdr_mem;
struct ipa_mem_buffer ctx_mem;
struct ipa_mem_buffer aligned_ctx_mem;
struct ipa_hdr_init_system hdr_init_cmd = {0};
struct ipa_hw_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
struct ipa_hw_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
struct ipa_register_write reg_write_cmd = {0};
struct ipa_hdr_init_system *hdr_init_cmd = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_hdr = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_ctx = NULL;
struct ipa_register_write *reg_write_cmd = NULL;
int rc = -EFAULT;
u32 proc_ctx_size;
u32 proc_ctx_ofst;
@ -361,15 +381,21 @@ int __ipa_commit_hdr_v2_5(void)
if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) {
IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
IPA_MEM_PART(apps_hdr_size));
goto end;
goto fail_send_cmd1;
} else {
dma_cmd_hdr.system_addr = hdr_mem.phys_base;
dma_cmd_hdr.size = hdr_mem.size;
dma_cmd_hdr.local_addr =
dma_cmd_hdr = kzalloc(sizeof(*dma_cmd_hdr), GFP_ATOMIC);
if (dma_cmd_hdr == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
goto fail_send_cmd1;
}
dma_cmd_hdr->system_addr = hdr_mem.phys_base;
dma_cmd_hdr->size = hdr_mem.size;
dma_cmd_hdr->local_addr =
ipa_ctx->smem_restricted_bytes +
IPA_MEM_PART(apps_hdr_ofst);
desc[0].opcode = IPA_DMA_SHARED_MEM;
desc[0].pyld = &dma_cmd_hdr;
desc[0].pyld = (void *)dma_cmd_hdr;
desc[0].len =
sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
}
@ -377,11 +403,18 @@ int __ipa_commit_hdr_v2_5(void)
if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
IPA_MEM_PART(apps_hdr_size_ddr));
goto end;
goto fail_send_cmd1;
} else {
hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
hdr_init_cmd = kzalloc(sizeof(*hdr_init_cmd),
GFP_ATOMIC);
if (hdr_init_cmd == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
goto fail_send_cmd1;
}
hdr_init_cmd->hdr_table_addr = hdr_mem.phys_base;
desc[0].opcode = IPA_HDR_INIT_SYSTEM;
desc[0].pyld = &hdr_init_cmd;
desc[0].pyld = (void *)hdr_init_cmd;
desc[0].len = sizeof(struct ipa_hdr_init_system);
}
}
@ -395,15 +428,22 @@ int __ipa_commit_hdr_v2_5(void)
IPAERR("tbl too big needed %d avail %d\n",
aligned_ctx_mem.size,
proc_ctx_size);
goto end;
goto fail_send_cmd1;
} else {
dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base;
dma_cmd_ctx.size = aligned_ctx_mem.size;
dma_cmd_ctx.local_addr =
dma_cmd_ctx = kzalloc(sizeof(*dma_cmd_ctx),
GFP_ATOMIC);
if (dma_cmd_ctx == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
goto fail_send_cmd1;
}
dma_cmd_ctx->system_addr = aligned_ctx_mem.phys_base;
dma_cmd_ctx->size = aligned_ctx_mem.size;
dma_cmd_ctx->local_addr =
ipa_ctx->smem_restricted_bytes +
proc_ctx_ofst;
desc[1].opcode = IPA_DMA_SHARED_MEM;
desc[1].pyld = &dma_cmd_ctx;
desc[1].pyld = (void *)dma_cmd_ctx;
desc[1].len =
sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
}
@ -413,15 +453,23 @@ int __ipa_commit_hdr_v2_5(void)
IPAERR("tbl too big, needed %d avail %d\n",
aligned_ctx_mem.size,
proc_ctx_size_ddr);
goto end;
goto fail_send_cmd1;
} else {
reg_write_cmd.offset = IPA_SYS_PKT_PROC_CNTXT_BASE_OFST;
reg_write_cmd.value = aligned_ctx_mem.phys_base;
reg_write_cmd.value_mask =
reg_write_cmd = kzalloc(sizeof(*reg_write_cmd),
GFP_ATOMIC);
if (reg_write_cmd == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
goto fail_send_cmd1;
}
reg_write_cmd->offset =
IPA_SYS_PKT_PROC_CNTXT_BASE_OFST;
reg_write_cmd->value = aligned_ctx_mem.phys_base;
reg_write_cmd->value_mask =
~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1);
desc[1].pyld = &reg_write_cmd;
desc[1].pyld = (void *)reg_write_cmd;
desc[1].opcode = IPA_REGISTER_WRITE;
desc[1].len = sizeof(reg_write_cmd);
desc[1].len = sizeof(*reg_write_cmd);
}
}
desc[1].type = IPA_IMM_CMD_DESC;
@ -432,22 +480,16 @@ int __ipa_commit_hdr_v2_5(void)
else
rc = 0;
if (ipa_ctx->hdr_tbl_lcl) {
dma_free_coherent(ipa_ctx->pdev, hdr_mem.size, hdr_mem.base,
hdr_mem.phys_base);
} else {
if (!rc) {
if (ipa_ctx->hdr_mem.phys_base)
dma_free_coherent(ipa_ctx->pdev,
ipa_ctx->hdr_mem.size,
ipa_ctx->hdr_mem.base,
ipa_ctx->hdr_mem.phys_base);
ipa_ctx->hdr_mem = hdr_mem;
}
}
fail_send_cmd1:
kfree(dma_cmd_hdr);
kfree(hdr_init_cmd);
kfree(dma_cmd_ctx);
kfree(reg_write_cmd);
if (ipa_ctx->hdr_proc_ctx_tbl_lcl) {
dma_free_coherent(ipa_ctx->pdev, ctx_mem.size, ctx_mem.base,
dma_free_coherent(ipa_ctx->pdev, ctx_mem.size,
ctx_mem.base,
ctx_mem.phys_base);
} else {
if (!rc) {
@ -457,9 +499,31 @@ int __ipa_commit_hdr_v2_5(void)
ipa_ctx->hdr_proc_ctx_mem.base,
ipa_ctx->hdr_proc_ctx_mem.phys_base);
ipa_ctx->hdr_proc_ctx_mem = ctx_mem;
} else {
dma_free_coherent(ipa_ctx->pdev, ctx_mem.size,
ctx_mem.base,
ctx_mem.phys_base);
}
}
if (ipa_ctx->hdr_tbl_lcl) {
dma_free_coherent(ipa_ctx->pdev, hdr_mem.size,
hdr_mem.base,
hdr_mem.phys_base);
} else {
if (!rc) {
if (ipa_ctx->hdr_mem.phys_base)
dma_free_coherent(ipa_ctx->pdev,
ipa_ctx->hdr_mem.size,
ipa_ctx->hdr_mem.base,
ipa_ctx->hdr_mem.phys_base);
ipa_ctx->hdr_mem = hdr_mem;
} else {
dma_free_coherent(ipa_ctx->pdev, hdr_mem.size,
hdr_mem.base,
hdr_mem.phys_base);
}
}
end:
return rc;
}

View File

@ -697,8 +697,8 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip)
struct ipa_desc desc[2];
struct ipa_mem_buffer body;
struct ipa_mem_buffer head;
struct ipa_hw_imm_cmd_dma_shared_mem cmd1 = {0};
struct ipa_hw_imm_cmd_dma_shared_mem cmd2 = {0};
struct ipa_hw_imm_cmd_dma_shared_mem *cmd1 = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *cmd2 = NULL;
u16 avail;
u32 num_modem_rt_index;
int rc = 0;
@ -748,34 +748,52 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip)
goto fail_send_cmd;
}
cmd1.size = head.size;
cmd1.system_addr = head.phys_base;
cmd1.local_addr = local_addr1;
cmd1 = kmalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
GFP_KERNEL);
if (cmd1 == NULL) {
IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM;
goto fail_send_cmd;
}
memset(cmd1, 0, sizeof(struct ipa_hw_imm_cmd_dma_shared_mem));
cmd1->size = head.size;
cmd1->system_addr = head.phys_base;
cmd1->local_addr = local_addr1;
desc[0].opcode = IPA_DMA_SHARED_MEM;
desc[0].pyld = &cmd1;
desc[0].pyld = (void *)cmd1;
desc[0].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
desc[0].type = IPA_IMM_CMD_DESC;
if (lcl) {
cmd2.size = body.size;
cmd2.system_addr = body.phys_base;
cmd2.local_addr = local_addr2;
cmd2 = kmalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
GFP_KERNEL);
if (cmd1 == NULL) {
IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM;
goto fail_send_cmd1;
}
memset(cmd2, 0, sizeof(struct ipa_hw_imm_cmd_dma_shared_mem));
cmd2->size = body.size;
cmd2->system_addr = body.phys_base;
cmd2->local_addr = local_addr2;
desc[1].opcode = IPA_DMA_SHARED_MEM;
desc[1].pyld = &cmd2;
desc[1].pyld = (void *)cmd2;
desc[1].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
desc[1].type = IPA_IMM_CMD_DESC;
if (ipa_send_cmd(2, desc)) {
IPAERR("fail to send immediate command\n");
rc = -EFAULT;
goto fail_send_cmd;
goto fail_send_cmd2;
}
} else {
if (ipa_send_cmd(1, desc)) {
IPAERR("fail to send immediate command\n");
rc = -EFAULT;
goto fail_send_cmd;
goto fail_send_cmd1;
}
}
@ -786,6 +804,11 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip)
IPA_DUMP_BUFF(body.base, body.phys_base, body.size);
}
__ipa_reap_sys_rt_tbls(ip);
fail_send_cmd2:
kfree(cmd2);
fail_send_cmd1:
kfree(cmd1);
fail_send_cmd:
dma_free_coherent(ipa_ctx->pdev, head.size, head.base, head.phys_base);
if (body.size)