mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-07 04:09:21 +00:00
[S390] Fix section mismatch warnings.
This fixes the last remaining section mismatch warnings in s390
architecture code. It reveals also a real bug introduced by... me
with git commit 2069e978d5
("[S390] sparsemem vmemmap: initialize memmap.")
Calling the generic vmemmap_alloc_block() function to get initialized
memory is a nice idea, however that function is __meminit annotated
and therefore the function might be gone if we try to call it later.
This can happen if a DCSS segment gets added.
So basically revert the patch and clear the memmap explicitly to fix
the original bug.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
bebd9a455b
commit
67060d9c1f
3 changed files with 15 additions and 7 deletions
|
@ -1089,7 +1089,7 @@ out:
|
|||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
int smp_rescan_cpus(void)
|
||||
int __ref smp_rescan_cpus(void)
|
||||
{
|
||||
cpumask_t newcpus;
|
||||
int cpu;
|
||||
|
|
|
@ -27,12 +27,19 @@ struct memory_segment {
|
|||
|
||||
static LIST_HEAD(mem_segs);
|
||||
|
||||
static pud_t *vmem_pud_alloc(void)
|
||||
static void __ref *vmem_alloc_pages(unsigned int order)
|
||||
{
|
||||
if (slab_is_available())
|
||||
return (void *)__get_free_pages(GFP_KERNEL, order);
|
||||
return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
|
||||
}
|
||||
|
||||
static inline pud_t *vmem_pud_alloc(void)
|
||||
{
|
||||
pud_t *pud = NULL;
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0);
|
||||
pud = vmem_alloc_pages(2);
|
||||
if (!pud)
|
||||
return NULL;
|
||||
clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
|
||||
|
@ -40,12 +47,12 @@ static pud_t *vmem_pud_alloc(void)
|
|||
return pud;
|
||||
}
|
||||
|
||||
static pmd_t *vmem_pmd_alloc(void)
|
||||
static inline pmd_t *vmem_pmd_alloc(void)
|
||||
{
|
||||
pmd_t *pmd = NULL;
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0);
|
||||
pmd = vmem_alloc_pages(2);
|
||||
if (!pmd)
|
||||
return NULL;
|
||||
clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
|
||||
|
@ -207,13 +214,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
|
|||
if (pte_none(*pt_dir)) {
|
||||
unsigned long new_page;
|
||||
|
||||
new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0));
|
||||
new_page =__pa(vmem_alloc_pages(0));
|
||||
if (!new_page)
|
||||
goto out;
|
||||
pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
|
||||
*pt_dir = pte;
|
||||
}
|
||||
}
|
||||
memset(start, 0, nr * sizeof(struct page));
|
||||
ret = 0;
|
||||
out:
|
||||
flush_tlb_kernel_range(start_addr, end_addr);
|
||||
|
|
|
@ -40,7 +40,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
|
|||
put_online_cpus();
|
||||
}
|
||||
|
||||
static void sclp_cpu_change_notify(struct work_struct *work)
|
||||
static void __ref sclp_cpu_change_notify(struct work_struct *work)
|
||||
{
|
||||
smp_rescan_cpus();
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue