diff options
Diffstat (limited to 'arch/tile/mm')
-rw-r--r-- | arch/tile/mm/init.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index bfb3127b4df9..a092e393bd20 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c @@ -254,8 +254,8 @@ static pgprot_t __init init_pgprot(ulong address) * Everything else that isn't data or bss is heap, so mark it * with the initial heap home (hash-for-home, or this cpu). This * includes any addresses after the loaded image and any address before - * _einitdata, since we already captured the case of text before - * _sinittext, and __pa(einittext) is approximately __pa(sinitdata). + * __init_end, since we already captured the case of text before + * _sinittext, and __pa(einittext) is approximately __pa(__init_begin). * * All the LOWMEM pages that we mark this way will get their * struct page homecache properly marked later, in set_page_homes(). @@ -263,7 +263,7 @@ static pgprot_t __init init_pgprot(ulong address) * homes, but with a zero free_time we don't have to actually * do a flush action the first time we use them, either. */ - if (address >= (ulong) _end || address < (ulong) _einitdata) + if (address >= (ulong) _end || address < (ulong) __init_end) return construct_pgprot(PAGE_KERNEL, initial_heap_home()); /* Use hash-for-home if requested for data/bss. */ @@ -632,7 +632,7 @@ int devmem_is_allowed(unsigned long pagenr) { return pagenr < kaddr_to_pfn(_end) && !(pagenr >= kaddr_to_pfn(&init_thread_union) || - pagenr < kaddr_to_pfn(_einitdata)) && + pagenr < kaddr_to_pfn(__init_end)) && !(pagenr >= kaddr_to_pfn(_sinittext) || pagenr <= kaddr_to_pfn(_einittext-1)); } @@ -975,8 +975,8 @@ void free_initmem(void) /* Free the data pages that we won't use again after init. */ free_init_pages("unused kernel data", - (unsigned long)_sinitdata, - (unsigned long)_einitdata); + (unsigned long)__init_begin, + (unsigned long)__init_end); /* * Free the pages mapped from 0xc0000000 that correspond to code |