From e929a633ab1af1fe958e347e5cf32e082d300191 Mon Sep 17 00:00:00 2001 From: yjiao Date: Mon, 1 Jun 2015 12:37:49 -0400 Subject: [PATCH 01/29] Push my initial work for uploading memory manager --- arch/lib/Kconfig | 17 ++++-- arch/lib/Makefile | 3 +- arch/lib/defconfig | 1 + arch/lib/fs.c | 3 + arch/lib/glue.c | 8 ++- arch/lib/include/asm/atomic.h | 5 +- arch/lib/sysctl.c | 8 +++ include/linux/bootmem.h | 6 ++ init/main.c | 4 ++ mm/Makefile | 2 +- mm/bootmem.c | 10 +++- mm/page_alloc.c | 108 ++++++++++++++++++++++++++++++++++ mm/slib.c | 30 ++++++---- 13 files changed, 184 insertions(+), 21 deletions(-) diff --git a/arch/lib/Kconfig b/arch/lib/Kconfig index eef92af0e711..b61d0ac24d09 100644 --- a/arch/lib/Kconfig +++ b/arch/lib/Kconfig @@ -61,9 +61,6 @@ config BASE_FULL config SELECT_MEMORY_MODEL def_bool n -config FLAT_NODE_MEM_MAP - def_bool n - config PAGEFLAGS_EXTENDED def_bool n @@ -121,4 +118,16 @@ source "crypto/Kconfig" source "lib/Kconfig" config SLIB - def_bool y \ No newline at end of file + def_bool y + +config INIT_DEBUG_ALWAYS + def_bool y + +config HAVE_MEMBLOCK + def_bool y + +config DEBUG_INFO + def_bool y + +config FLAT_NODE_MEM_MAP + def_bool y diff --git a/arch/lib/Makefile b/arch/lib/Makefile index 47bd08ce6e1a..0fa5e249174d 100644 --- a/arch/lib/Makefile +++ b/arch/lib/Makefile @@ -99,7 +99,8 @@ kernel/time/_to_keep=time.o kernel/rcu_to_keep=rcu/srcu.o rcu/pdate.o rcu/tiny.o kernel/locking_to_keep=locking/mutex.o kernel/bpf_to_keep=bpf/core.o -mm/_to_keep=util.o list_lru.o slib.o +mm/_to_keep=util.o list_lru.o slib.o page_alloc.o memblock.o mmzone.o slib_env.o \ +bootmem.o crypto/_to_keep=aead.o ahash.o shash.o api.o algapi.o cipher.o compress.o proc.o \ crc32c_generic.o drivers/base/_to_keep=class.o core.o bus.o dd.o driver.o devres.o module.o map.o diff --git a/arch/lib/defconfig b/arch/lib/defconfig index 9307e6f7eb41..b0ad77cc9c32 100644 --- a/arch/lib/defconfig +++ b/arch/lib/defconfig @@ -651,3 +651,4 @@ CONFIG_NLATTR=y # CONFIG_CORDIC is not set # CONFIG_DDR is not set # CONFIG_ARCH_HAS_SG_CHAIN is not set + diff --git a/arch/lib/fs.c b/arch/lib/fs.c index 33efe5f1da32..75bb1c9a5f78 100644 --- a/arch/lib/fs.c +++ b/arch/lib/fs.c @@ -64,7 +64,10 @@ int dirtytime_interval_handler(struct ctl_table *table, int write, return -ENOSYS; } +#if 0 unsigned int nr_free_buffer_pages(void) { return 65535; } +#endif + diff --git a/arch/lib/glue.c b/arch/lib/glue.c index 93f72d1f5568..19eabe7303e1 100644 --- a/arch/lib/glue.c +++ b/arch/lib/glue.c @@ -37,9 +37,15 @@ unsigned long avenrun[3]; /* defined in mm/page_alloc.c, used in net/xfrm/xfrm_hash.c */ int hashdist = HASHDIST_DEFAULT; /* defined in mm/page_alloc.c */ -struct pglist_data __refdata contig_page_data; +//struct pglist_data __refdata contig_page_data; /* defined in linux/mmzone.h mm/memory.c */ struct page *mem_map = 0; +unsigned long max_mapnr; +unsigned long highest_memmap_pfn __read_mostly; + +/* vmscan */ +unsigned long vm_total_pages; + /* used during boot. */ struct tvec_base boot_tvec_bases; /* used by sysinfo in kernel/timer.c */ diff --git a/arch/lib/include/asm/atomic.h b/arch/lib/include/asm/atomic.h index 444a9535c97b..f72c3a8ca48c 100644 --- a/arch/lib/include/asm/atomic.h +++ b/arch/lib/include/asm/atomic.h @@ -13,7 +13,10 @@ typedef struct { #define ATOMIC64_INIT(i) { (i) } #define atomic64_read(v) (*(volatile long *)&(v)->counter) -void atomic64_add(long i, atomic64_t *v); +static inline void atomic64_add(long i, atomic64_t *v) +{ + v->counter += i; +} static inline void atomic64_sub(long i, atomic64_t *v) { v->counter -= i; diff --git a/arch/lib/sysctl.c b/arch/lib/sysctl.c index 5f08f9f97103..bda173a6ce48 100644 --- a/arch/lib/sysctl.c +++ b/arch/lib/sysctl.c @@ -111,7 +111,11 @@ int dirty_background_ratio = 10; unsigned int dirty_expire_interval = 30 * 100; unsigned int dirty_writeback_interval = 5 * 100; unsigned long dirty_background_bytes = 0; + +#if 0 int percpu_pagelist_fraction = 0; +#endif + int panic_timeout = 0; int panic_on_oops = 0; int printk_delay_msec = 0; @@ -122,7 +126,11 @@ DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); int pid_max = PID_MAX_DEFAULT; int pid_max_min = RESERVED_PIDS + 1; int pid_max_max = PID_MAX_LIMIT; + +#if 0 int min_free_kbytes = 1024; +#endif + int max_threads = 100; int laptop_mode = 0; diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 0995c2de8162..6bb277570624 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -37,6 +37,9 @@ typedef struct bootmem_data { extern bootmem_data_t bootmem_node_data[]; #endif + +extern void link_bootmem(bootmem_data_t *bdata); + extern unsigned long bootmem_bootmap_pages(unsigned long); extern unsigned long init_bootmem_node(pg_data_t *pgdat, @@ -55,6 +58,9 @@ extern void free_bootmem_node(pg_data_t *pgdat, extern void free_bootmem(unsigned long physaddr, unsigned long size); extern void free_bootmem_late(unsigned long physaddr, unsigned long size); + + + /* * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, * the architecture-specific code should honor this). diff --git a/init/main.c b/init/main.c index 2115055faeac..b77ba6b450ce 100644 --- a/init/main.c +++ b/init/main.c @@ -782,10 +782,14 @@ int __init_or_module do_one_initcall(initcall_t fn) if (initcall_blacklisted(fn)) return -EPERM; +#ifndef CONFIG_INIT_DEBUG_ALWAYS if (initcall_debug) ret = do_one_initcall_debug(fn); else ret = fn(); +#else + ret = do_one_initcall_debug(fn); +#endif msgbuf[0] = 0; diff --git a/mm/Makefile b/mm/Makefile index 7d8314f95ce3..2f41f0024bc9 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -46,7 +46,7 @@ obj-$(CONFIG_NUMA) += mempolicy.o obj-$(CONFIG_SPARSEMEM) += sparse.o obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o obj-$(CONFIG_SLOB) += slob.o -obj-$(CONFIG_SLIB) += slib.o +obj-$(CONFIG_SLIB) += slib.o slib_env.o obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o obj-$(CONFIG_KSM) += ksm.o obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o diff --git a/mm/bootmem.c b/mm/bootmem.c index 477be696511d..7121b1228a6f 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -48,7 +48,7 @@ static int __init bootmem_debug_setup(char *buf) early_param("bootmem_debug", bootmem_debug_setup); #define bdebug(fmt, args...) ({ \ - if (unlikely(bootmem_debug)) \ + if (unlikely(!bootmem_debug)) \ printk(KERN_INFO \ "bootmem::%s " fmt, \ __func__, ## args); \ @@ -75,7 +75,7 @@ unsigned long __init bootmem_bootmap_pages(unsigned long pages) /* * link bdata in order */ -static void __init link_bootmem(bootmem_data_t *bdata) +void __init link_bootmem(bootmem_data_t *bdata) { bootmem_data_t *ent; @@ -174,6 +174,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) struct page *page; unsigned long *map, start, end, pages, count = 0; + printk("In %s node_bootmem_map %p\n", __func__, bdata->node_bootmem_map); + if (!bdata->node_bootmem_map) return 0; @@ -276,11 +278,13 @@ unsigned long __init free_all_bootmem(void) reset_all_zones_managed_pages(); + list_for_each_entry(bdata, &bdata_list, list) total_pages += free_all_bootmem_core(bdata); totalram_pages += total_pages; - + printk("I am %s\n", __func__); + return total_pages; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2fd31aebef30..2bf193807995 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -62,11 +62,14 @@ #include #include + #include #include #include #include "internal.h" +#include "slib_env.h" + /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ static DEFINE_MUTEX(pcp_batch_high_lock); #define MIN_PERCPU_PAGELIST_FRACTION (8) @@ -110,7 +113,11 @@ EXPORT_SYMBOL(node_states); /* Protect totalram_pages and zone->managed_pages */ static DEFINE_SPINLOCK(managed_page_count_lock); +#if 0 unsigned long totalram_pages __read_mostly; +#endif + + unsigned long totalreserve_pages __read_mostly; unsigned long totalcma_pages __read_mostly; /* @@ -122,6 +129,8 @@ unsigned long totalcma_pages __read_mostly; unsigned long dirty_balance_reserve __read_mostly; int percpu_pagelist_fraction; + + gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; #ifdef CONFIG_PM_SLEEP @@ -167,6 +176,8 @@ int pageblock_order __read_mostly; static void __free_pages_ok(struct page *page, unsigned int order); + +#if 0 /* * results with 256, 32 in the lowmem_reserve sysctl: * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) @@ -190,6 +201,7 @@ int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { #endif 32, }; +#endif EXPORT_SYMBOL(totalram_pages); @@ -1464,6 +1476,7 @@ void drain_all_pages(struct zone *zone) * cpu to drain that CPU pcps and on_each_cpu_mask * disables preemption as part of its processing */ + for_each_online_cpu(cpu) { struct per_cpu_pageset *pcp; struct zone *z; @@ -2834,6 +2847,37 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, return page; } +static void print_buddy_freelist(void) +{ + struct zone *zone; + unsigned int order, t; + struct list_head *curr; + unsigned long pfn; + int i = 0; + + + for_each_zone(zone) { + printk(KERN_INFO "I am zone %s %lu\n", zone->name, zone->present_pages); + if (zone->present_pages == 0) + goto out; + + for_each_migratetype_order(order, t) { + list_for_each(curr, &zone->free_area[order].free_list[t]) { + pfn = page_to_pfn(list_entry(curr, struct page, lru)); + + printk(KERN_INFO "%lu %d %d %d\n",pfn, order, t, i); + i++; + } + } + } +out: + printk(KERN_INFO "Totoal free page: %d\n", i); +} + + + +static int mem_initialized; + /* * This is the 'heart' of the zoned buddy allocator. */ @@ -2852,6 +2896,12 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, .migratetype = gfpflags_to_migratetype(gfp_mask), }; + if (mem_initialized == 0) + init_memory_system(); + + print_buddy_freelist(); + + gfp_mask &= gfp_allowed_mask; lockdep_trace_alloc(gfp_mask); @@ -2861,6 +2911,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (should_fail_alloc_page(gfp_mask, order)) return NULL; + printk(KERN_INFO "I am %s\n", __func__); + /* * Check the zones suitable for the gfp_mask contain at least one * valid zone. It's possible to have an empty zonelist as a result @@ -2914,10 +2966,14 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) goto retry_cpuset; + + printk(KERN_INFO "I am %s\n", __func__); + return page; } EXPORT_SYMBOL(__alloc_pages_nodemask); + /* * Common helper functions. */ @@ -2938,6 +2994,7 @@ unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) } EXPORT_SYMBOL(__get_free_pages); + unsigned long get_zeroed_page(gfp_t gfp_mask) { return __get_free_pages(gfp_mask | __GFP_ZERO, 0); @@ -2956,6 +3013,7 @@ void __free_pages(struct page *page, unsigned int order) EXPORT_SYMBOL(__free_pages); + void free_pages(unsigned long addr, unsigned int order) { if (addr != 0) { @@ -2966,6 +3024,8 @@ void free_pages(unsigned long addr, unsigned int order) EXPORT_SYMBOL(free_pages); + + /* * Page Fragment: * An arbitrary-length arbitrary-offset area of memory which resides @@ -3128,6 +3188,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) return (void *)addr; } + /** * alloc_pages_exact - allocate an exact number physically-contiguous pages. * @size: the number of bytes to allocate @@ -3151,6 +3212,7 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) } EXPORT_SYMBOL(alloc_pages_exact); + /** * alloc_pages_exact_nid - allocate an exact number of physically-contiguous * pages on a node. @@ -3191,6 +3253,7 @@ void free_pages_exact(void *virt, size_t size) } EXPORT_SYMBOL(free_pages_exact); + /** * nr_free_zone_pages - count number of pages beyond high watermark * @offset: The zone index of the highest zone @@ -3220,16 +3283,29 @@ static unsigned long nr_free_zone_pages(int offset) return sum; } + + + /** * nr_free_buffer_pages - count number of pages beyond high watermark * * nr_free_buffer_pages() counts the number of pages which are beyond the high * watermark within ZONE_DMA and ZONE_NORMAL. */ +#if 0 unsigned long nr_free_buffer_pages(void) { return nr_free_zone_pages(gfp_zone(GFP_USER)); } + +#else +unsigned long nr_free_buffer_pages(void) +{ + return 65535; +} + +#endif + EXPORT_SYMBOL_GPL(nr_free_buffer_pages); /** @@ -3249,6 +3325,7 @@ static inline void show_node(struct zone *zone) printk("Node %d ", zone_to_nid(zone)); } +#if 0 void si_meminfo(struct sysinfo *val) { val->totalram = totalram_pages; @@ -3262,6 +3339,8 @@ void si_meminfo(struct sysinfo *val) EXPORT_SYMBOL(si_meminfo); +#endif + #ifdef CONFIG_NUMA void si_meminfo_node(struct sysinfo *val, int nid) { @@ -5094,6 +5173,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) { + printk("I am %s %d\n", __func__, pgdat->node_spanned_pages); + /* Skip empty nodes */ if (!pgdat->node_spanned_pages) return; @@ -5640,6 +5721,7 @@ void free_highmem_page(struct page *page) } #endif +#if 0 void __init mem_init_print_info(const char *str) { @@ -5694,6 +5776,9 @@ void __init mem_init_print_info(const char *str) str ? ", " : "", str ? str : ""); } +#endif + + /** * set_dma_reserve - set the specified number of pages reserved in the first zone * @new_dma_reserve: The number of pages to mark reserved @@ -5889,6 +5974,7 @@ static void __setup_per_zone_wmarks(void) calculate_totalreserve_pages(); } + /** * setup_per_zone_wmarks - called when min_free_kbytes changes * or when memory is hot-{added|removed} @@ -5903,6 +5989,7 @@ void setup_per_zone_wmarks(void) mutex_unlock(&zonelists_mutex); } + /* * The inactive anon list should be small enough that the VM never has to * do too much work, but large enough that each inactive page has a chance @@ -5988,14 +6075,19 @@ int __meminit init_per_zone_wmark_min(void) pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", new_min_free_kbytes, user_min_free_kbytes); } + +#if 0 setup_per_zone_wmarks(); refresh_zone_stat_thresholds(); setup_per_zone_lowmem_reserve(); setup_per_zone_inactive_ratio(); +#endif return 0; } module_init(init_per_zone_wmark_min) +#if 0 + /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so * that we can call two helper functions whenever min_free_kbytes @@ -6017,6 +6109,8 @@ int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, return 0; } +#endif + #ifdef CONFIG_NUMA int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) @@ -6051,6 +6145,7 @@ int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, } #endif +#if 0 /* * lowmem_reserve_ratio_sysctl_handler - just a wrapper around * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() @@ -6068,6 +6163,8 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, return 0; } + + /* * percpu_pagelist_fraction - changes the pcp->high for each zone on each * cpu. It is the fraction of total pages in each zone that a hot per cpu @@ -6111,8 +6208,14 @@ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write, return ret; } + +#endif + +#if 0 int hashdist = HASHDIST_DEFAULT; +#endif + #ifdef CONFIG_NUMA static int __init set_hashdist(char *str) { @@ -6124,6 +6227,8 @@ static int __init set_hashdist(char *str) __setup("hashdist=", set_hashdist); #endif +#if 0 + /* * allocate a large system hash table from bootmem * - it is assumed that the hash table must contain an exact power-of-2 @@ -6222,6 +6327,9 @@ void *__init alloc_large_system_hash(const char *tablename, return table; } +#endif + + /* Return a pointer to the bitmap storing bits affecting a block of pages */ static inline unsigned long *get_pageblock_bitmap(struct zone *zone, unsigned long pfn) diff --git a/mm/slib.c b/mm/slib.c index 974c8aed0275..f72e0a1025d6 100644 --- a/mm/slib.c +++ b/mm/slib.c @@ -113,6 +113,8 @@ void kmem_cache_free(struct kmem_cache *cache, void *p) kfree(p); } +#if 0 + struct page * __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, nodemask_t *nodemask) @@ -142,14 +144,10 @@ void __free_pages(struct page *page, unsigned int order) lib_free(page); } -void put_page(struct page *page) -{ - if (atomic_dec_and_test(&page->_count)) - lib_free(page); -} -unsigned long get_zeroed_page(gfp_t gfp_mask) +void free_pages(unsigned long addr, unsigned int order) { - return __get_free_pages(gfp_mask | __GFP_ZERO, 0); + if (addr != 0) + kfree((void *)addr); } void *alloc_pages_exact(size_t size, gfp_t gfp_mask) @@ -157,6 +155,11 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) return alloc_pages(gfp_mask, get_order(size)); } +unsigned long get_zeroed_page(gfp_t gfp_mask) +{ + return __get_free_pages(gfp_mask | __GFP_ZERO, 0); +} + unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) { int size = (1 << order) * PAGE_SIZE; @@ -164,12 +167,16 @@ unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) return (unsigned long)p; } -void free_pages(unsigned long addr, unsigned int order) + +#endif + +void put_page(struct page *page) { - if (addr != 0) - kfree((void *)addr); + if (atomic_dec_and_test(&page->_count)) + lib_free(page); } + void *vmalloc(unsigned long size) { return lib_malloc(size); @@ -201,9 +208,12 @@ void free_percpu(void __percpu *ptr) { kfree(ptr); } +/* void *__alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal) { return kzalloc(size, GFP_KERNEL); } +*/ + From c50cd207654ee7988a077c6f93f5e5fea29b6e7d Mon Sep 17 00:00:00 2001 From: yjiao Date: Mon, 1 Jun 2015 12:42:31 -0400 Subject: [PATCH 02/29] add slib_env to use page_alloc.c --- mm/slib_env.c | 283 ++++++++++++++++++++++++++++++++++++++++++++++++++ mm/slib_env.h | 17 +++ 2 files changed, 300 insertions(+) create mode 100644 mm/slib_env.c create mode 100644 mm/slib_env.h diff --git a/mm/slib_env.c b/mm/slib_env.c new file mode 100644 index 000000000000..03d1f14bb7a2 --- /dev/null +++ b/mm/slib_env.c @@ -0,0 +1,283 @@ +#include +#include +#include +#include +#include +#include +#include "slib_env.h" + + +static inline void +free_memmap(unsigned long start_pfn, unsigned long end_pfn) +{ + struct page *start_pg, *end_pg; + phys_addr_t pg, pgend; + + /* + * Convert start_pfn/end_pfn to a struct page pointer. + */ + start_pg = pfn_to_page(start_pfn - 1) + 1; + end_pg = pfn_to_page(end_pfn - 1) + 1; + + /* + * Convert to physical addresses, and + * round start upwards and end downwards. + */ + pg = PAGE_ALIGN(__pa(start_pg)); + pgend = __pa(end_pg) & PAGE_MASK; + + /* + * If there are free pages between these, + * free the section of the memmap array. + */ + if (pg < pgend) + memblock_free_early(pg, pgend - pg); +} + + +/* + * The mem_map array can get very big. Free the unused area of the memory map. + */ +static void __init free_unused_memmap(void) +{ + unsigned long start, prev_end = 0; + struct memblock_region *reg; + + /* + * This relies on each bank being in address order. + * The banks are sorted previously in bootmem_init(). + */ + for_each_memblock(memory, reg) { + start = memblock_region_memory_base_pfn(reg); + + /* + * Align down here since the VM subsystem insists that the + * memmap entries are valid from the bank start aligned to + * MAX_ORDER_NR_PAGES. + */ + + start = round_down(start, MAX_ORDER_NR_PAGES); + + /* + * If we had a previous bank, and there is a space + * between the current bank and the previous, free it. + */ + if (prev_end && prev_end < start) + free_memmap(prev_end, start); + + /* + * Align up here since the VM subsystem insists that the + * memmap entries are valid from the bank end aligned to + * MAX_ORDER_NR_PAGES. + */ + prev_end = ALIGN(memblock_region_memory_end_pfn(reg), + MAX_ORDER_NR_PAGES); + } +} + +#ifdef CONFIG_HIGHMEM +static inline void free_area_high(unsigned long pfn, unsigned long end) +{ + for (; pfn < end; pfn++) + free_highmem_page(pfn_to_page(pfn)); +} +#endif + +static void __init free_highpages(void) +{ +#ifdef CONFIG_HIGHMEM + unsigned long max_low = max_low_pfn; + struct memblock_region *mem, *res; + + /* set highmem page free */ + for_each_memblock(memory, mem) { + unsigned long start = memblock_region_memory_base_pfn(mem); + unsigned long end = memblock_region_memory_end_pfn(mem); + + /* Ignore complete lowmem entries */ + if (end <= max_low) + continue; + + /* Truncate partial highmem entries */ + if (start < max_low) + start = max_low; + + /* Find and exclude any reserved regions */ + for_each_memblock(reserved, res) { + unsigned long res_start, res_end; + + res_start = memblock_region_reserved_base_pfn(res); + res_end = memblock_region_reserved_end_pfn(res); + + if (res_end < start) + continue; + if (res_start < start) + res_start = start; + if (res_start > end) + res_start = end; + if (res_end > end) + res_end = end; + if (res_start != start) + free_area_high(start, res_start); + start = res_end; + if (start == end) + break; + } + + /* And now free anything which remains */ + if (start < end) + free_area_high(start, end); + } +#endif +} + + +/* + * mem_init() marks the free areas in the mem_map and tells us how much + * memory is free. This is done after various parts of the system have + * claimed their memory after the kernel image. + */ +void __init mem_init(void) +{ + set_max_mapnr(pfn_to_page(max_pfn) - mem_map); + + /* this will put all unused low memory onto the freelists */ + free_unused_memmap(); + free_all_bootmem(); + + free_highpages(); + + //mem_init_print_info(NULL); + +} + +static void __init zone_sizes_init(unsigned long min, unsigned long max_low, + unsigned long max_high) +{ + unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; + int i; + + /* + * initialise the zones. + */ + memset(zone_size, 0, sizeof(zone_size)); + memset(zhole_size, 0, sizeof(zhole_size)); + + zone_size[0] = 194560; + zone_size[1] = 329728; + + free_area_init_node(0, zone_size, min, zhole_size); +} + +void __init arm_memblock_init(void) +{ + memblock_dump_all(); +} + + +int __init arm_add_memory(u64 start, u64 size) +{ + u64 aligned_start; + + /* + * Ensure that start/size are aligned to a page boundary. + * Size is rounded down, start is rounded up. + */ + aligned_start = PAGE_ALIGN(start); + if (aligned_start > start + size) + size = 0; + else + size -= aligned_start - start; + + + if (aligned_start < PHYS_OFFSET) { + if (aligned_start + size <= PHYS_OFFSET) { + pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n", + aligned_start, aligned_start + size); + return -EINVAL; + } + + pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n", + aligned_start, (u64)PHYS_OFFSET); + + size -= PHYS_OFFSET - aligned_start; + aligned_start = PHYS_OFFSET; + } + + start = aligned_start; + size = size & ~(phys_addr_t)(PAGE_SIZE - 1); + + /* + * Check whether this memory region has non-zero size or + * invalid node number. + */ + if (size == 0) + return -EINVAL; + + memblock_add(start, size); + return 0; +} + +void __init bootmem_init(void) +{ + unsigned long min, max_low, max_high; + + min = 0; + max_low = 194560; + max_high = 524288; + + + zone_sizes_init(min, max_low, max_high); + + /* + * This doesn't seem to be used by the Linux memory manager any + * more, but is used by ll_rw_block. If we can get rid of it, we + * also get rid of some of the stuff above as well. + */ + min_low_pfn = min; + max_low_pfn = max_low; + max_pfn = max_high; +} + + +void __init paging_init(void) +{ + bootmem_init(); +} + + + +void __init setup_arch(char **cmd) +{ + arm_add_memory(0, 1024 * 1024 * 1024 * 1); + arm_memblock_init(); + paging_init(); +} + + +/* + * Set up kernel memory allocators + */ +static void __init mm_init(void) +{ + link_bootmem(NODE_DATA(0)->bdata); + mem_init(); +} + +void __init init_memory_system(void) +{ + setup_arch(NULL); + page_alloc_init(); + build_all_zonelists(NULL, NULL); + mm_init(); +} + +void test(void) +{ + pg_data_t *pgdat = NODE_DATA(nid); + printk("I am printk: %p, %p, %d, %p\n", pgdat->node_zones, + pgdat->node_zonelists, + pgdat->nr_zones, + pgdat->bdata); +} + diff --git a/mm/slib_env.h b/mm/slib_env.h new file mode 100644 index 000000000000..630bf527999b --- /dev/null +++ b/mm/slib_env.h @@ -0,0 +1,17 @@ +#include +#include + +#define PHYS_OFFSET 0xC0000000 + +/* +char *_text, *_stext, *_etext; +char *_data, *_sdata, *_edata; +char *__bss_start, *__bss_stop; +char *_sinittext, *_einittext; +char *_end; +char *__per_cpu_load, *__per_cpu_start, *__per_cpu_end; +char *__kprobes_text_start, *__kprobes_text_end; +char *__entry_text_start, *__entry_text_end; +*/ + +void __init init_memory_system(void); From f0f45abaf3de1b93a41835411a08c9b32d4da2c3 Mon Sep 17 00:00:00 2001 From: yjiao Date: Mon, 1 Jun 2015 22:05:24 -0400 Subject: [PATCH 03/29] Add headers from arm/include/asm and try to initilize buddy system freelists --- arch/lib/Kconfig | 33 +- arch/lib/Makefile | 2 +- arch/lib/glue.c | 16 + arch/lib/include/asm/barrier.h | 94 ++- arch/lib/include/asm/cache.h | 28 + arch/lib/include/asm/cachetype.h | 59 ++ arch/lib/include/asm/glue-proc.h | 264 ++++++++ arch/lib/include/asm/glue.h | 25 + arch/lib/include/asm/highmem.h | 77 +++ arch/lib/include/asm/kmap_types.h | 9 + arch/lib/include/asm/memory.h | 359 ++++++++++ arch/lib/include/asm/mmu.h | 40 ++ arch/lib/include/asm/outercache.h | 149 +++++ arch/lib/include/asm/page.h | 175 ++++- arch/lib/include/asm/pgtable-2level-hwdef.h | 95 +++ arch/lib/include/asm/pgtable-2level-types.h | 67 ++ arch/lib/include/asm/pgtable-2level.h | 198 ++++++ arch/lib/include/asm/pgtable-hwdef.h | 19 + arch/lib/include/asm/pgtable.h | 371 ++++++++++- arch/lib/include/asm/proc-fns.h | 160 +++++ arch/lib/include/asm/processor.h | 2 +- arch/lib/include/asm/thread_info.h | 5 + arch/lib/include/asm/tlbflush.h | 686 ++++++++++++++++++++ arch/lib/sysctl.c | 11 + include/linux/bootmem.h | 3 - mm/memblock.c | 2 +- mm/page_alloc.c | 16 +- mm/slib_env.c | 98 ++- mm/slib_env.h | 58 +- 29 files changed, 3051 insertions(+), 70 deletions(-) create mode 100644 arch/lib/include/asm/cache.h create mode 100644 arch/lib/include/asm/cachetype.h create mode 100644 arch/lib/include/asm/glue-proc.h create mode 100644 arch/lib/include/asm/glue.h create mode 100644 arch/lib/include/asm/highmem.h create mode 100644 arch/lib/include/asm/kmap_types.h create mode 100644 arch/lib/include/asm/memory.h create mode 100644 arch/lib/include/asm/mmu.h create mode 100644 arch/lib/include/asm/outercache.h create mode 100644 arch/lib/include/asm/pgtable-2level-hwdef.h create mode 100644 arch/lib/include/asm/pgtable-2level-types.h create mode 100644 arch/lib/include/asm/pgtable-2level.h create mode 100644 arch/lib/include/asm/pgtable-hwdef.h create mode 100644 arch/lib/include/asm/proc-fns.h create mode 100644 arch/lib/include/asm/tlbflush.h diff --git a/arch/lib/Kconfig b/arch/lib/Kconfig index b61d0ac24d09..cb5c768c2ff2 100644 --- a/arch/lib/Kconfig +++ b/arch/lib/Kconfig @@ -25,7 +25,7 @@ config MODULES option modules config MMU - def_bool n + def_bool y config FPU def_bool n @@ -131,3 +131,34 @@ config DEBUG_INFO config FLAT_NODE_MEM_MAP def_bool y + +config NO_BOOTMEM + def_bool y + +config HIGHMEM + def_bool y + +config CPU_COPY_V6 + def_bool y + +config CPU_TLB_V7 + def_bool y + +config PAGE_OFFSET + hex + default 0xC0000000 + +config PHYS_OFFSET + hex + default 0x00000000 + +config ARM_L1_CACHE_SHIFT + int + default 6 + +config PGTABLE_LEVELS + int + default 2 + +config HAVE_ARCH_PFN_VALID + def_bool y diff --git a/arch/lib/Makefile b/arch/lib/Makefile index 0fa5e249174d..d2979cfbace7 100644 --- a/arch/lib/Makefile +++ b/arch/lib/Makefile @@ -100,7 +100,7 @@ kernel/rcu_to_keep=rcu/srcu.o rcu/pdate.o rcu/tiny.o kernel/locking_to_keep=locking/mutex.o kernel/bpf_to_keep=bpf/core.o mm/_to_keep=util.o list_lru.o slib.o page_alloc.o memblock.o mmzone.o slib_env.o \ -bootmem.o +nobootmem.o highmem.o crypto/_to_keep=aead.o ahash.o shash.o api.o algapi.o cipher.o compress.o proc.o \ crc32c_generic.o drivers/base/_to_keep=class.o core.o bus.o dd.o driver.o devres.o module.o map.o diff --git a/arch/lib/glue.c b/arch/lib/glue.c index 19eabe7303e1..e23f4e6331e4 100644 --- a/arch/lib/glue.c +++ b/arch/lib/glue.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include "sim-assert.h" #include "sim.h" @@ -42,6 +43,7 @@ int hashdist = HASHDIST_DEFAULT; struct page *mem_map = 0; unsigned long max_mapnr; unsigned long highest_memmap_pfn __read_mostly; +int randomize_va_space = 0; /* vmscan */ unsigned long vm_total_pages; @@ -70,6 +72,11 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd); static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); + +/* arm/mmu.c */ +pgprot_t pgprot_kernel; + + struct backing_dev_info noop_backing_dev_info = { .name = "noop", .capabilities = 0, @@ -293,3 +300,12 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) { } + +unsigned long +arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + lib_assert(false); + return 0; +} + diff --git a/arch/lib/include/asm/barrier.h b/arch/lib/include/asm/barrier.h index 47adcc659b50..d2f81e6b8c1c 100644 --- a/arch/lib/include/asm/barrier.h +++ b/arch/lib/include/asm/barrier.h @@ -1,8 +1,90 @@ -#include +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +#ifndef __ASSEMBLY__ +#include + +#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); + +#if __LINUX_ARM_ARCH__ >= 7 || \ + (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) +#define sev() __asm__ __volatile__ ("sev" : : : "memory") +#define wfe() __asm__ __volatile__ ("wfe" : : : "memory") +#define wfi() __asm__ __volatile__ ("wfi" : : : "memory") +#endif + +#if __LINUX_ARM_ARCH__ >= 7 +#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory") +#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory") +#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") +#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 +#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ + : : "r" (0) : "memory") +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ + : : "r" (0) : "memory") +#elif defined(CONFIG_CPU_FA526) +#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ + : : "r" (0) : "memory") +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb(x) __asm__ __volatile__ ("" : : : "memory") +#else +#define isb(x) __asm__ __volatile__ ("" : : : "memory") +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb(x) __asm__ __volatile__ ("" : : : "memory") +#endif + +#ifdef CONFIG_ARCH_HAS_BARRIERS +#include +#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) +#define mb() do { dsb(); outer_sync(); } while (0) +#define rmb() dsb() +#define wmb() do { dsb(st); outer_sync(); } while (0) +#define dma_rmb() dmb(osh) +#define dma_wmb() dmb(oshst) +#else +#define mb() barrier() +#define rmb() barrier() +#define wmb() barrier() +#define dma_rmb() barrier() +#define dma_wmb() barrier() +#endif + +#ifndef CONFIG_SMP +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#else +#define smp_mb() dmb(ish) +#define smp_rmb() smp_mb() +#define smp_wmb() dmb(ishst) +#endif -#undef smp_store_release #define smp_store_release(p, v) \ - do { \ - smp_mb(); \ - ACCESS_ONCE(*p) = (v); \ - } while (0) +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + +#define read_barrier_depends() do { } while(0) +#define smp_read_barrier_depends() do { } while(0) + +#define set_mb(var, value) do { var = value; smp_mb(); } while (0) + +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() + +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_BARRIER_H */ diff --git a/arch/lib/include/asm/cache.h b/arch/lib/include/asm/cache.h new file mode 100644 index 000000000000..75fe66bc02b4 --- /dev/null +++ b/arch/lib/include/asm/cache.h @@ -0,0 +1,28 @@ +/* + * arch/arm/include/asm/cache.h + */ +#ifndef __ASMARM_CACHE_H +#define __ASMARM_CACHE_H + +#define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +/* + * Memory returned by kmalloc() may be used for DMA, so we must make + * sure that all such allocations are cache aligned. Otherwise, + * unrelated code may cause parts of the buffer to be read into the + * cache before the transfer is done, causing old data to be seen by + * the CPU. + */ +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES + +/* + * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. + */ +#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) +#define ARCH_SLAB_MINALIGN 8 +#endif + +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) + +#endif diff --git a/arch/lib/include/asm/cachetype.h b/arch/lib/include/asm/cachetype.h new file mode 100644 index 000000000000..7ea78144ae22 --- /dev/null +++ b/arch/lib/include/asm/cachetype.h @@ -0,0 +1,59 @@ +#ifndef __ASM_ARM_CACHETYPE_H +#define __ASM_ARM_CACHETYPE_H + +#define CACHEID_VIVT (1 << 0) +#define CACHEID_VIPT_NONALIASING (1 << 1) +#define CACHEID_VIPT_ALIASING (1 << 2) +#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING) +#define CACHEID_ASID_TAGGED (1 << 3) +#define CACHEID_VIPT_I_ALIASING (1 << 4) +#define CACHEID_PIPT (1 << 5) + +extern unsigned int cacheid; + +#define cache_is_vivt() cacheid_is(CACHEID_VIVT) +#define cache_is_vipt() cacheid_is(CACHEID_VIPT) +#define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING) +#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING) +#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED) +#define icache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_I_ALIASING) +#define icache_is_pipt() cacheid_is(CACHEID_PIPT) + +/* + * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture + * Mask out support which will never be present on newer CPUs. + * - v6+ is never VIVT + * - v7+ VIPT never aliases on D-side + */ +#if __LINUX_ARM_ARCH__ >= 7 +#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING |\ + CACHEID_ASID_TAGGED |\ + CACHEID_VIPT_I_ALIASING |\ + CACHEID_PIPT) +#elif __LINUX_ARM_ARCH__ >= 6 +#define __CACHEID_ARCH_MIN (~CACHEID_VIVT) +#else +#define __CACHEID_ARCH_MIN (~0) +#endif + +/* + * Mask out support which isn't configured + */ +#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT) +#define __CACHEID_ALWAYS (CACHEID_VIVT) +#define __CACHEID_NEVER (~CACHEID_VIVT) +#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT) +#define __CACHEID_ALWAYS (0) +#define __CACHEID_NEVER (CACHEID_VIVT) +#else +#define __CACHEID_ALWAYS (0) +#define __CACHEID_NEVER (0) +#endif + +static inline unsigned int __attribute__((pure)) cacheid_is(unsigned int mask) +{ + return (__CACHEID_ALWAYS & mask) | + (~__CACHEID_NEVER & __CACHEID_ARCH_MIN & mask & cacheid); +} + +#endif diff --git a/arch/lib/include/asm/glue-proc.h b/arch/lib/include/asm/glue-proc.h new file mode 100644 index 000000000000..74be7c22035a --- /dev/null +++ b/arch/lib/include/asm/glue-proc.h @@ -0,0 +1,264 @@ +/* + * arch/arm/include/asm/glue-proc.h + * + * Copyright (C) 1997-1999 Russell King + * Copyright (C) 2000 Deep Blue Solutions Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_GLUE_PROC_H +#define ASM_GLUE_PROC_H + +#include + +/* + * Work out if we need multiple CPU support + */ +#undef MULTI_CPU +#undef CPU_NAME + +/* + * CPU_NAME - the prefix for CPU related functions + */ + +#ifdef CONFIG_CPU_ARM7TDMI +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm7tdmi +# endif +#endif + +#ifdef CONFIG_CPU_ARM720T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm720 +# endif +#endif + +#ifdef CONFIG_CPU_ARM740T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm740 +# endif +#endif + +#ifdef CONFIG_CPU_ARM9TDMI +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm9tdmi +# endif +#endif + +#ifdef CONFIG_CPU_ARM920T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm920 +# endif +#endif + +#ifdef CONFIG_CPU_ARM922T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm922 +# endif +#endif + +#ifdef CONFIG_CPU_FA526 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_fa526 +# endif +#endif + +#ifdef CONFIG_CPU_ARM925T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm925 +# endif +#endif + +#ifdef CONFIG_CPU_ARM926T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm926 +# endif +#endif + +#ifdef CONFIG_CPU_ARM940T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm940 +# endif +#endif + +#ifdef CONFIG_CPU_ARM946E +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm946 +# endif +#endif + +#ifdef CONFIG_CPU_SA110 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_sa110 +# endif +#endif + +#ifdef CONFIG_CPU_SA1100 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_sa1100 +# endif +#endif + +#ifdef CONFIG_CPU_ARM1020 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1020 +# endif +#endif + +#ifdef CONFIG_CPU_ARM1020E +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1020e +# endif +#endif + +#ifdef CONFIG_CPU_ARM1022 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1022 +# endif +#endif + +#ifdef CONFIG_CPU_ARM1026 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1026 +# endif +#endif + +#ifdef CONFIG_CPU_XSCALE +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_xscale +# endif +#endif + +#ifdef CONFIG_CPU_XSC3 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_xsc3 +# endif +#endif + +#ifdef CONFIG_CPU_MOHAWK +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_mohawk +# endif +#endif + +#ifdef CONFIG_CPU_FEROCEON +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_feroceon +# endif +#endif + +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_v6 +# endif +#endif + +#ifdef CONFIG_CPU_V7M +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_v7m +# endif +#endif + +#ifdef CONFIG_CPU_PJ4B +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_pj4b +# endif +#endif + +#ifdef CONFIG_CPU_V7 +/* + * Cortex-A9 needs a different suspend/resume function, so we need + * multiple CPU support for ARMv7 anyway. + */ +# undef MULTI_CPU +# define MULTI_CPU +#endif + +#ifndef MULTI_CPU +#define cpu_proc_init __glue(CPU_NAME,_proc_init) +#define cpu_proc_fin __glue(CPU_NAME,_proc_fin) +#define cpu_reset __glue(CPU_NAME,_reset) +#define cpu_do_idle __glue(CPU_NAME,_do_idle) +#define cpu_dcache_clean_area __glue(CPU_NAME,_dcache_clean_area) +#define cpu_do_switch_mm __glue(CPU_NAME,_switch_mm) +#define cpu_set_pte_ext __glue(CPU_NAME,_set_pte_ext) +#define cpu_suspend_size __glue(CPU_NAME,_suspend_size) +#define cpu_do_suspend __glue(CPU_NAME,_do_suspend) +#define cpu_do_resume __glue(CPU_NAME,_do_resume) +#endif + +#endif diff --git a/arch/lib/include/asm/glue.h b/arch/lib/include/asm/glue.h new file mode 100644 index 000000000000..fbf71d75ec83 --- /dev/null +++ b/arch/lib/include/asm/glue.h @@ -0,0 +1,25 @@ +/* + * arch/arm/include/asm/glue.h + * + * Copyright (C) 1997-1999 Russell King + * Copyright (C) 2000-2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file provides the glue to stick the processor-specific bits + * into the kernel in an efficient manner. The idea is to use branches + * when we're only targeting one class of TLB, or indirect calls + * when we're targeting multiple classes of TLBs. + */ +#ifdef __KERNEL__ + +#ifdef __STDC__ +#define ____glue(name,fn) name##fn +#else +#define ____glue(name,fn) name/**/fn +#endif +#define __glue(name,fn) ____glue(name,fn) + +#endif diff --git a/arch/lib/include/asm/highmem.h b/arch/lib/include/asm/highmem.h new file mode 100644 index 000000000000..734bb437710b --- /dev/null +++ b/arch/lib/include/asm/highmem.h @@ -0,0 +1,77 @@ +#ifndef _ASM_HIGHMEM_H +#define _ASM_HIGHMEM_H + +#if 1 +#include + +#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE) +#define LAST_PKMAP PTRS_PER_PTE +#define LAST_PKMAP_MASK (LAST_PKMAP - 1) +#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) +#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) + +#define kmap_prot PAGE_KERNEL + +#define flush_cache_kmaps() \ + do { \ + if (cache_is_vivt()) \ + flush_cache_all(); \ + } while (0) + +extern pte_t *pkmap_page_table; +extern pte_t *fixmap_page_table; + +extern void *kmap_high(struct page *page); +extern void kunmap_high(struct page *page); + +/* + * The reason for kmap_high_get() is to ensure that the currently kmap'd + * page usage count does not decrease to zero while we're using its + * existing virtual mapping in an atomic context. With a VIVT cache this + * is essential to do, but with a VIPT cache this is only an optimization + * so not to pay the price of establishing a second mapping if an existing + * one can be used. However, on platforms without hardware TLB maintenance + * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since + * the locking involved must also disable IRQs which is incompatible with + * the IPI mechanism used by global TLB operations. + */ +#define ARCH_NEEDS_KMAP_HIGH_GET +#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6) +#undef ARCH_NEEDS_KMAP_HIGH_GET +#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT) +#error "The sum of features in your kernel config cannot be supported together" +#endif +#endif + +/* + * Needed to be able to broadcast the TLB invalidation for kmap. + */ +#ifdef CONFIG_ARM_ERRATA_798181 +#undef ARCH_NEEDS_KMAP_HIGH_GET +#endif + +#ifdef ARCH_NEEDS_KMAP_HIGH_GET +extern void *kmap_high_get(struct page *page); +#else +static inline void *kmap_high_get(struct page *page) +{ + return NULL; +} +#endif + +/* + * The following functions are already defined by + * when CONFIG_HIGHMEM is not set. + */ +#ifdef CONFIG_HIGHMEM +extern void *kmap(struct page *page); +extern void kunmap(struct page *page); +extern void *kmap_atomic(struct page *page); +extern void __kunmap_atomic(void *kvaddr); +extern void *kmap_atomic_pfn(unsigned long pfn); +extern struct page *kmap_atomic_to_page(const void *ptr); +#endif + +#endif + +#endif diff --git a/arch/lib/include/asm/kmap_types.h b/arch/lib/include/asm/kmap_types.h new file mode 100644 index 000000000000..83eb2f772911 --- /dev/null +++ b/arch/lib/include/asm/kmap_types.h @@ -0,0 +1,9 @@ +#ifndef __ARM_KMAP_TYPES_H +#define __ARM_KMAP_TYPES_H + +/* + * This is the "bare minimum". AIO seems to require this. + */ +#define KM_TYPE_NR 16 + +#endif diff --git a/arch/lib/include/asm/memory.h b/arch/lib/include/asm/memory.h new file mode 100644 index 000000000000..184def0e1652 --- /dev/null +++ b/arch/lib/include/asm/memory.h @@ -0,0 +1,359 @@ +/* + * arch/arm/include/asm/memory.h + * + * Copyright (C) 2000-2002 Russell King + * modification for nommu, Hyok S. Choi, 2004 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Note: this file should not be included by non-asm/.h files + */ +#ifndef __ASM_ARM_MEMORY_H +#define __ASM_ARM_MEMORY_H + +#include +#include +#include +#include + +#include + +#ifdef CONFIG_NEED_MACH_MEMORY_H +#include +#endif + +/* + * Allow for constants defined here to be used from assembly code + * by prepending the UL suffix only with actual C code compilation. + */ +#define UL(x) _AC(x, UL) + +/* PAGE_OFFSET - the virtual address of the start of the kernel image */ +#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) + +#ifdef CONFIG_MMU + +/* + * TASK_SIZE - the maximum size of a user space task. + * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area + */ +#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) +#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) + +/* + * The maximum size of a 26-bit user space task. + */ +#define TASK_SIZE_26 (UL(1) << 26) + +/* + * The module space lives between the addresses given by TASK_SIZE + * and PAGE_OFFSET - it must be within 32MB of the kernel text. + */ +#ifndef CONFIG_THUMB2_KERNEL +#define MODULES_VADDR (PAGE_OFFSET - SZ_16M) +#else +/* smaller range for Thumb-2 symbols relocation (2^24)*/ +#define MODULES_VADDR (PAGE_OFFSET - SZ_8M) +#endif + +#if TASK_SIZE > MODULES_VADDR +#error Top of user space clashes with start of module space +#endif + +/* + * The highmem pkmap virtual space shares the end of the module area. + */ +#ifdef CONFIG_HIGHMEM +#define MODULES_END (PAGE_OFFSET - PMD_SIZE) +#else +#define MODULES_END (PAGE_OFFSET) +#endif + +/* + * The XIP kernel gets mapped at the bottom of the module vm area. + * Since we use sections to map it, this macro replaces the physical address + * with its virtual address while keeping offset from the base section. + */ +#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff)) + +/* + * Allow 16MB-aligned ioremap pages + */ +#define IOREMAP_MAX_ORDER 24 + +#else /* CONFIG_MMU */ + +/* + * The limitation of user task size can grow up to the end of free ram region. + * It is difficult to define and perhaps will never meet the original meaning + * of this define that was meant to. + * Fortunately, there is no reference for this in noMMU mode, for now. + */ +#define TASK_SIZE UL(0xffffffff) + +#ifndef TASK_UNMAPPED_BASE +#define TASK_UNMAPPED_BASE UL(0x00000000) +#endif + +#ifndef END_MEM +#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) +#endif + +/* + * The module can be at any place in ram in nommu mode. + */ +#define MODULES_END (END_MEM) +#define MODULES_VADDR PAGE_OFFSET + +#define XIP_VIRT_ADDR(physaddr) (physaddr) + +#endif /* !CONFIG_MMU */ + +/* + * We fix the TCM memories max 32 KiB ITCM resp DTCM at these + * locations + */ +#ifdef CONFIG_HAVE_TCM +#define ITCM_OFFSET UL(0xfffe0000) +#define DTCM_OFFSET UL(0xfffe8000) +#endif + +/* + * Convert a physical address to a Page Frame Number and back + */ +#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) +#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) + +/* + * Convert a page to/from a physical address + */ +#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) +#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) + +/* + * Minimum guaranted alignment in pgd_alloc(). The page table pointers passed + * around in head.S and proc-*.S are shifted by this amount, in order to + * leave spare high bits for systems with physical address extension. This + * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but + * gives us about 38-bits or so. + */ +#ifdef CONFIG_ARM_LPAE +#define ARCH_PGD_SHIFT L1_CACHE_SHIFT +#else +#define ARCH_PGD_SHIFT 0 +#endif +#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) + +/* + * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical + * memory. This is used for XIP and NoMMU kernels, and on platforms that don't + * have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use + * PLAT_PHYS_OFFSET and not PHYS_OFFSET. + */ +#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) + +#ifndef __ASSEMBLY__ + +/* + * Physical vs virtual RAM address space conversion. These are + * private definitions which should NOT be used outside memory.h + * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. + * + * PFNs are used to describe any physical page; this means + * PFN 0 == physical address 0. + */ +#if defined(__virt_to_phys) +#define PHYS_OFFSET PLAT_PHYS_OFFSET +#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) + +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) + +#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT) + +/* + * Constants used to force the right instruction encodings and shifts + * so that all we need to do is modify the 8-bit constant field. + */ +#define __PV_BITS_31_24 0x81000000 +#define __PV_BITS_7_0 0x81 + +extern unsigned long __pv_phys_pfn_offset; +extern u64 __pv_offset; +extern void fixup_pv_table(const void *, unsigned long); +extern const void *__pv_table_begin, *__pv_table_end; + +#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT) +#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset) + +#define virt_to_pfn(kaddr) \ + ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ + PHYS_PFN_OFFSET) + +#define __pv_stub(from,to,instr,type) \ + __asm__("@ __pv_stub\n" \ + "1: " instr " %0, %1, %2\n" \ + " .pushsection .pv_table,\"a\"\n" \ + " .long 1b\n" \ + " .popsection\n" \ + : "=r" (to) \ + : "r" (from), "I" (type)) + +#define __pv_stub_mov_hi(t) \ + __asm__ volatile("@ __pv_stub_mov\n" \ + "1: mov %R0, %1\n" \ + " .pushsection .pv_table,\"a\"\n" \ + " .long 1b\n" \ + " .popsection\n" \ + : "=r" (t) \ + : "I" (__PV_BITS_7_0)) + +#define __pv_add_carry_stub(x, y) \ + __asm__ volatile("@ __pv_add_carry_stub\n" \ + "1: adds %Q0, %1, %2\n" \ + " adc %R0, %R0, #0\n" \ + " .pushsection .pv_table,\"a\"\n" \ + " .long 1b\n" \ + " .popsection\n" \ + : "+r" (y) \ + : "r" (x), "I" (__PV_BITS_31_24) \ + : "cc") + +static inline phys_addr_t __virt_to_phys(unsigned long x) +{ + phys_addr_t t; + + if (sizeof(phys_addr_t) == 4) { + __pv_stub(x, t, "add", __PV_BITS_31_24); + } else { + __pv_stub_mov_hi(t); + __pv_add_carry_stub(x, t); + } + return t; +} + +static inline unsigned long __phys_to_virt(phys_addr_t x) +{ + unsigned long t; + + /* + * 'unsigned long' cast discard upper word when + * phys_addr_t is 64 bit, and makes sure that inline + * assembler expression receives 32 bit argument + * in place where 'r' 32 bit operand is expected. + */ + __pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24); + return t; +} + +#else + +#define PHYS_OFFSET PLAT_PHYS_OFFSET +#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) + +static inline phys_addr_t __virt_to_phys(unsigned long x) +{ + return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; +} + +static inline unsigned long __phys_to_virt(phys_addr_t x) +{ + return x - PHYS_OFFSET + PAGE_OFFSET; +} + +#define virt_to_pfn(kaddr) \ + ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ + PHYS_PFN_OFFSET) + +#endif + +/* + * These are *only* valid on the kernel direct mapped RAM memory. + * Note: Drivers should NOT use these. They are the wrong + * translation for translating DMA addresses. Use the driver + * DMA support - see dma-mapping.h. + */ +#define virt_to_phys virt_to_phys +static inline phys_addr_t virt_to_phys(const volatile void *x) +{ + return __virt_to_phys((unsigned long)(x)); +} + +#define phys_to_virt phys_to_virt +static inline void *phys_to_virt(phys_addr_t x) +{ + return (void *)__phys_to_virt(x); +} + +/* + * Drivers should NOT use these either. + */ +#define __pa(x) __virt_to_phys((unsigned long)(x)) +#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) +#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) + +extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); + +/* + * These are for systems that have a hardware interconnect supported alias of + * physical memory for idmap purposes. Most cases should leave these + * untouched. + */ +static inline phys_addr_t __virt_to_idmap(unsigned long x) +{ + if (arch_virt_to_idmap) + return arch_virt_to_idmap(x); + else + return __virt_to_phys(x); +} + +#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x)) + +/* + * Virtual <-> DMA view memory address translations + * Again, these are *only* valid on the kernel direct mapped RAM + * memory. Use of these is *deprecated* (and that doesn't mean + * use the __ prefixed forms instead.) See dma-mapping.h. + */ +#ifndef __virt_to_bus +#define __virt_to_bus __virt_to_phys +#define __bus_to_virt __phys_to_virt +#define __pfn_to_bus(x) __pfn_to_phys(x) +#define __bus_to_pfn(x) __phys_to_pfn(x) +#endif + +#ifdef CONFIG_VIRT_TO_BUS +#define virt_to_bus virt_to_bus +static inline __deprecated unsigned long virt_to_bus(void *x) +{ + return __virt_to_bus((unsigned long)x); +} + +#define bus_to_virt bus_to_virt +static inline __deprecated void *bus_to_virt(unsigned long x) +{ + return (void *)__bus_to_virt(x); +} +#endif + +/* + * Conversion between a struct page and a physical address. + * + * page_to_pfn(page) convert a struct page * to a PFN number + * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * + * + * virt_to_page(k) convert a _valid_ virtual address to struct page * + * virt_addr_valid(k) indicates whether a virtual address is valid + */ +#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET + +#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) +#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ + && pfn_valid(virt_to_pfn(kaddr))) + +#endif + +#include + +#endif diff --git a/arch/lib/include/asm/mmu.h b/arch/lib/include/asm/mmu.h new file mode 100644 index 000000000000..a5b47421059d --- /dev/null +++ b/arch/lib/include/asm/mmu.h @@ -0,0 +1,40 @@ +#ifndef __ARM_MMU_H +#define __ARM_MMU_H + +#ifdef CONFIG_MMU + +typedef struct { +#ifdef CONFIG_CPU_HAS_ASID + atomic64_t id; +#else + int switch_pending; +#endif + unsigned int vmalloc_seq; + unsigned long sigpage; +#ifdef CONFIG_VDSO + unsigned long vdso; +#endif +} mm_context_t; + +#ifdef CONFIG_CPU_HAS_ASID +#define ASID_BITS 8 +#define ASID_MASK ((~0ULL) << ASID_BITS) +#define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK)) +#else +#define ASID(mm) (0) +#endif + +#else + +/* + * From nommu.h: + * Copyright (C) 2002, David McCullough + * modified for 2.6 by Hyok S. Choi + */ +typedef struct { + unsigned long end_brk; +} mm_context_t; + +#endif + +#endif diff --git a/arch/lib/include/asm/outercache.h b/arch/lib/include/asm/outercache.h new file mode 100644 index 000000000000..563b92fc2f41 --- /dev/null +++ b/arch/lib/include/asm/outercache.h @@ -0,0 +1,149 @@ +/* + * arch/arm/include/asm/outercache.h + * + * Copyright (C) 2010 ARM Ltd. + * Written by Catalin Marinas + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ASM_OUTERCACHE_H +#define __ASM_OUTERCACHE_H + +#include + +struct l2x0_regs; + +struct outer_cache_fns { + void (*inv_range)(unsigned long, unsigned long); + void (*clean_range)(unsigned long, unsigned long); + void (*flush_range)(unsigned long, unsigned long); + void (*flush_all)(void); + void (*disable)(void); +#ifdef CONFIG_OUTER_CACHE_SYNC + void (*sync)(void); +#endif + void (*resume)(void); + + /* This is an ARM L2C thing */ + void (*write_sec)(unsigned long, unsigned); + void (*configure)(const struct l2x0_regs *); +}; + +extern struct outer_cache_fns outer_cache; + +#ifdef CONFIG_OUTER_CACHE +/** + * outer_inv_range - invalidate range of outer cache lines + * @start: starting physical address, inclusive + * @end: end physical address, exclusive + */ +static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) +{ + if (outer_cache.inv_range) + outer_cache.inv_range(start, end); +} + +/** + * outer_clean_range - clean dirty outer cache lines + * @start: starting physical address, inclusive + * @end: end physical address, exclusive + */ +static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) +{ + if (outer_cache.clean_range) + outer_cache.clean_range(start, end); +} + +/** + * outer_flush_range - clean and invalidate outer cache lines + * @start: starting physical address, inclusive + * @end: end physical address, exclusive + */ +static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) +{ + if (outer_cache.flush_range) + outer_cache.flush_range(start, end); +} + +/** + * outer_flush_all - clean and invalidate all cache lines in the outer cache + * + * Note: depending on implementation, this may not be atomic - it must + * only be called with interrupts disabled and no other active outer + * cache masters. + * + * It is intended that this function is only used by implementations + * needing to override the outer_cache.disable() method due to security. + * (Some implementations perform this as a clean followed by an invalidate.) + */ +static inline void outer_flush_all(void) +{ + if (outer_cache.flush_all) + outer_cache.flush_all(); +} + +/** + * outer_disable - clean, invalidate and disable the outer cache + * + * Disable the outer cache, ensuring that any data contained in the outer + * cache is pushed out to lower levels of system memory. The note and + * conditions above concerning outer_flush_all() applies here. + */ +extern void outer_disable(void); + +/** + * outer_resume - restore the cache configuration and re-enable outer cache + * + * Restore any configuration that the cache had when previously enabled, + * and re-enable the outer cache. + */ +static inline void outer_resume(void) +{ + if (outer_cache.resume) + outer_cache.resume(); +} + +#else + +static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) +{ } +static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) +{ } +static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) +{ } +static inline void outer_flush_all(void) { } +static inline void outer_disable(void) { } +static inline void outer_resume(void) { } + +#endif + +#ifdef CONFIG_OUTER_CACHE_SYNC +/** + * outer_sync - perform a sync point for outer cache + * + * Ensure that all outer cache operations are complete and any store + * buffers are drained. + */ +static inline void outer_sync(void) +{ + if (outer_cache.sync) + outer_cache.sync(); +} +#else +static inline void outer_sync(void) +{ } +#endif + +#endif /* __ASM_OUTERCACHE_H */ diff --git a/arch/lib/include/asm/page.h b/arch/lib/include/asm/page.h index 8c0aa7437374..4355f0ec44d6 100644 --- a/arch/lib/include/asm/page.h +++ b/arch/lib/include/asm/page.h @@ -1,14 +1,173 @@ -#ifndef _ASM_SIM_PAGE_H -#define _ASM_SIM_PAGE_H +/* + * arch/arm/include/asm/page.h + * + * Copyright (C) 1995-2003 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASMARM_PAGE_H +#define _ASMARM_PAGE_H -typedef struct {} pud_t; +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT 12 +#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) +#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) -#define THREAD_ORDER 1 -#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) +#ifndef __ASSEMBLY__ -#define WANT_PAGE_VIRTUAL 1 +#ifndef CONFIG_MMU + +#include + +#else + +#include + +/* + * User Space Model + * ================ + * + * This section selects the correct set of functions for dealing with + * page-based copying and clearing for user space for the particular + * processor(s) we're building for. + * + * We have the following to choose from: + * v4wt - ARMv4 with writethrough cache, without minicache + * v4wb - ARMv4 with writeback cache, without minicache + * v4_mc - ARMv4 with minicache + * xscale - Xscale + * xsc3 - XScalev3 + */ +#undef _USER +#undef MULTI_USER + +#ifdef CONFIG_CPU_COPY_V4WT +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER v4wt +# endif +#endif + +#ifdef CONFIG_CPU_COPY_V4WB +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER v4wb +# endif +#endif + +#ifdef CONFIG_CPU_COPY_FEROCEON +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER feroceon +# endif +#endif + +#ifdef CONFIG_CPU_COPY_FA +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER fa +# endif +#endif + +#ifdef CONFIG_CPU_SA1100 +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER v4_mc +# endif +#endif + +#ifdef CONFIG_CPU_XSCALE +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER xscale_mc +# endif +#endif + +#ifdef CONFIG_CPU_XSC3 +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER xsc3_mc +# endif +#endif + +#ifdef CONFIG_CPU_COPY_V6 +# define MULTI_USER 1 +#endif + +#if !defined(_USER) && !defined(MULTI_USER) +#error Unknown user operations model +#endif + +struct page; +struct vm_area_struct; + +struct cpu_user_fns { + void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); + void (*cpu_copy_user_highpage)(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); +}; + +#ifdef MULTI_USER +extern struct cpu_user_fns cpu_user; + +#define __cpu_clear_user_highpage cpu_user.cpu_clear_user_highpage +#define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage + +#else + +#define __cpu_clear_user_highpage __glue(_USER,_clear_user_highpage) +#define __cpu_copy_user_highpage __glue(_USER,_copy_user_highpage) + +extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); +extern void __cpu_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); +#endif + +#define clear_user_highpage(page,vaddr) \ + __cpu_clear_user_highpage(page, vaddr) + +#define __HAVE_ARCH_COPY_USER_HIGHPAGE +#define copy_user_highpage(to,from,vaddr,vma) \ + __cpu_copy_user_highpage(to, from, vaddr, vma) + +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) +extern void copy_page(void *to, const void *from); + +#ifdef CONFIG_KUSER_HELPERS +#define __HAVE_ARCH_GATE_AREA 1 +#endif + +#ifdef CONFIG_ARM_LPAE +#include +#else +#include +#endif + +#endif /* CONFIG_MMU */ + +typedef struct page *pgtable_t; + +#ifdef CONFIG_HAVE_ARCH_PFN_VALID +extern int pfn_valid(unsigned long); +#endif + +#include + +#endif /* !__ASSEMBLY__ */ + +#define VM_DATA_DEFAULT_FLAGS \ + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#include #include -#endif /* _ASM_SIM_PAGE_H */ +#endif diff --git a/arch/lib/include/asm/pgtable-2level-hwdef.h b/arch/lib/include/asm/pgtable-2level-hwdef.h new file mode 100644 index 000000000000..5e68278e953e --- /dev/null +++ b/arch/lib/include/asm/pgtable-2level-hwdef.h @@ -0,0 +1,95 @@ +/* + * arch/arm/include/asm/pgtable-2level-hwdef.h + * + * Copyright (C) 1995-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_PGTABLE_2LEVEL_HWDEF_H +#define _ASM_PGTABLE_2LEVEL_HWDEF_H + +/* + * Hardware page table definitions. + * + * + Level 1 descriptor (PMD) + * - common + */ +#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0) +#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) +#define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0) +#define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0) +#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */ +#define PMD_BIT4 (_AT(pmdval_t, 1) << 4) +#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) +#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ +/* + * - section + */ +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */ +#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) +#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) +#define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */ +#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 1) << 10) +#define PMD_SECT_AP_READ (_AT(pmdval_t, 1) << 11) +#define PMD_SECT_TEX(x) (_AT(pmdval_t, (x)) << 12) /* v5 */ +#define PMD_SECT_APX (_AT(pmdval_t, 1) << 15) /* v6 */ +#define PMD_SECT_S (_AT(pmdval_t, 1) << 16) /* v6 */ +#define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */ +#define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */ +#define PMD_SECT_AF (_AT(pmdval_t, 0)) + +#define PMD_SECT_UNCACHED (_AT(pmdval_t, 0)) +#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE) +#define PMD_SECT_WT (PMD_SECT_CACHEABLE) +#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) +#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE) +#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) +#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2)) + +/* + * - coarse table (not used) + */ + +/* + * + Level 2 descriptor (PTE) + * - common + */ +#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) +#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) +#define PTE_TYPE_LARGE (_AT(pteval_t, 1) << 0) +#define PTE_TYPE_SMALL (_AT(pteval_t, 2) << 0) +#define PTE_TYPE_EXT (_AT(pteval_t, 3) << 0) /* v5 */ +#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) +#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) + +/* + * - extended small page/tiny page + */ +#define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */ +#define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4) +#define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4) +#define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4) +#define PTE_EXT_AP_UNO_SRO (_AT(pteval_t, 0) << 4) +#define PTE_EXT_AP_UNO_SRW (PTE_EXT_AP0) +#define PTE_EXT_AP_URO_SRW (PTE_EXT_AP1) +#define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0) +#define PTE_EXT_TEX(x) (_AT(pteval_t, (x)) << 6) /* v5 */ +#define PTE_EXT_APX (_AT(pteval_t, 1) << 9) /* v6 */ +#define PTE_EXT_COHERENT (_AT(pteval_t, 1) << 9) /* XScale3 */ +#define PTE_EXT_SHARED (_AT(pteval_t, 1) << 10) /* v6 */ +#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* v6 */ + +/* + * - small page + */ +#define PTE_SMALL_AP_MASK (_AT(pteval_t, 0xff) << 4) +#define PTE_SMALL_AP_UNO_SRO (_AT(pteval_t, 0x00) << 4) +#define PTE_SMALL_AP_UNO_SRW (_AT(pteval_t, 0x55) << 4) +#define PTE_SMALL_AP_URO_SRW (_AT(pteval_t, 0xaa) << 4) +#define PTE_SMALL_AP_URW_SRW (_AT(pteval_t, 0xff) << 4) + +#define PHYS_MASK (~0UL) + +#endif diff --git a/arch/lib/include/asm/pgtable-2level-types.h b/arch/lib/include/asm/pgtable-2level-types.h new file mode 100644 index 000000000000..66cb5b0e89c5 --- /dev/null +++ b/arch/lib/include/asm/pgtable-2level-types.h @@ -0,0 +1,67 @@ +/* + * arch/arm/include/asm/pgtable-2level-types.h + * + * Copyright (C) 1995-2003 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_PGTABLE_2LEVEL_TYPES_H +#define _ASM_PGTABLE_2LEVEL_TYPES_H + +#include + +typedef u32 pteval_t; +typedef u32 pmdval_t; + +#undef STRICT_MM_TYPECHECKS + +#ifdef STRICT_MM_TYPECHECKS +/* + * These are used to make use of C type-checking.. + */ +typedef struct { pteval_t pte; } pte_t; +typedef struct { pmdval_t pmd; } pmd_t; +typedef struct { pmdval_t pgd[2]; } pgd_t; +typedef struct { pteval_t pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pgd_val(x) ((x).pgd[0]) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +#else +/* + * .. while these make it easier on the compiler + */ +typedef pteval_t pte_t; +typedef pmdval_t pmd_t; +typedef pmdval_t pgd_t[2]; +typedef pteval_t pgprot_t; + +#define pte_val(x) (x) +#define pmd_val(x) (x) +#define pgd_val(x) ((x)[0]) +#define pgprot_val(x) (x) + +#define __pte(x) (x) +#define __pmd(x) (x) +#define __pgprot(x) (x) + +#endif /* STRICT_MM_TYPECHECKS */ + +#endif /* _ASM_PGTABLE_2LEVEL_TYPES_H */ diff --git a/arch/lib/include/asm/pgtable-2level.h b/arch/lib/include/asm/pgtable-2level.h new file mode 100644 index 000000000000..bfd662e49a25 --- /dev/null +++ b/arch/lib/include/asm/pgtable-2level.h @@ -0,0 +1,198 @@ +/* + * arch/arm/include/asm/pgtable-2level.h + * + * Copyright (C) 1995-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_PGTABLE_2LEVEL_H +#define _ASM_PGTABLE_2LEVEL_H + +#define __PAGETABLE_PMD_FOLDED + +/* + * Hardware-wise, we have a two level page table structure, where the first + * level has 4096 entries, and the second level has 256 entries. Each entry + * is one 32-bit word. Most of the bits in the second level entry are used + * by hardware, and there aren't any "accessed" and "dirty" bits. + * + * Linux on the other hand has a three level page table structure, which can + * be wrapped to fit a two level page table structure easily - using the PGD + * and PTE only. However, Linux also expects one "PTE" table per page, and + * at least a "dirty" bit. + * + * Therefore, we tweak the implementation slightly - we tell Linux that we + * have 2048 entries in the first level, each of which is 8 bytes (iow, two + * hardware pointers to the second level.) The second level contains two + * hardware PTE tables arranged contiguously, preceded by Linux versions + * which contain the state information Linux needs. We, therefore, end up + * with 512 entries in the "PTE" level. + * + * This leads to the page tables having the following layout: + * + * pgd pte + * | | + * +--------+ + * | | +------------+ +0 + * +- - - - + | Linux pt 0 | + * | | +------------+ +1024 + * +--------+ +0 | Linux pt 1 | + * | |-----> +------------+ +2048 + * +- - - - + +4 | h/w pt 0 | + * | |-----> +------------+ +3072 + * +--------+ +8 | h/w pt 1 | + * | | +------------+ +4096 + * + * See L_PTE_xxx below for definitions of bits in the "Linux pt", and + * PTE_xxx for definitions of bits appearing in the "h/w pt". + * + * PMD_xxx definitions refer to bits in the first level page table. + * + * The "dirty" bit is emulated by only granting hardware write permission + * iff the page is marked "writable" and "dirty" in the Linux PTE. This + * means that a write to a clean page will cause a permission fault, and + * the Linux MM layer will mark the page dirty via handle_pte_fault(). + * For the hardware to notice the permission change, the TLB entry must + * be flushed, and ptep_set_access_flags() does that for us. + * + * The "accessed" or "young" bit is emulated by a similar method; we only + * allow accesses to the page if the "young" bit is set. Accesses to the + * page will cause a fault, and handle_pte_fault() will set the young bit + * for us as long as the page is marked present in the corresponding Linux + * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is + * up to date. + * + * However, when the "young" bit is cleared, we deny access to the page + * by clearing the hardware PTE. Currently Linux does not flush the TLB + * for us in this case, which means the TLB will retain the transation + * until either the TLB entry is evicted under pressure, or a context + * switch which changes the user space mapping occurs. + */ +#define PTRS_PER_PTE 512 +#define PTRS_PER_PMD 1 +#define PTRS_PER_PGD 2048 + +#define PTE_HWTABLE_PTRS (PTRS_PER_PTE) +#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t)) +#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32)) + +/* + * PMD_SHIFT determines the size of the area a second-level page table can map + * PGDIR_SHIFT determines what a third-level page table entry can map + */ +#define PMD_SHIFT 21 +#define PGDIR_SHIFT 21 + +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* + * section address mask and size definitions. + */ +#define SECTION_SHIFT 20 +#define SECTION_SIZE (1UL << SECTION_SHIFT) +#define SECTION_MASK (~(SECTION_SIZE-1)) + +/* + * ARMv6 supersection address mask and size definitions. + */ +#define SUPERSECTION_SHIFT 24 +#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT) +#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1)) + +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) + +/* + * "Linux" PTE definitions. + * + * We keep two sets of PTEs - the hardware and the linux version. + * This allows greater flexibility in the way we map the Linux bits + * onto the hardware tables, and allows us to have YOUNG and DIRTY + * bits. + * + * The PTE table pointer refers to the hardware entries; the "Linux" + * entries are stored 1024 bytes below. + */ +#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */ +#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) +#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) +#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6) +#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) +#define L_PTE_USER (_AT(pteval_t, 1) << 8) +#define L_PTE_XN (_AT(pteval_t, 1) << 9) +#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ +#define L_PTE_NONE (_AT(pteval_t, 1) << 11) + +/* + * These are the memory types, defined to be compatible with + * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB + */ +#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ +#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ +#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */ +#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */ +#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */ +#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */ +#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */ +#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ +#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ +#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ +#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */ +#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) + +#ifndef __ASSEMBLY__ + +/* + * The "pud_xxx()" functions here are trivial when the pmd is folded into + * the pud: the pud entry is never bad, always exists, and can't be set or + * cleared. + */ +#define pud_none(pud) (0) +#define pud_bad(pud) (0) +#define pud_present(pud) (1) +#define pud_clear(pudp) do { } while (0) +#define set_pud(pud,pudp) do { } while (0) + +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) +{ + return (pmd_t *)pud; +} + +#define pmd_large(pmd) (pmd_val(pmd) & 2) +#define pmd_bad(pmd) (pmd_val(pmd) & 2) + +#define copy_pmd(pmdpd,pmdps) \ + do { \ + pmdpd[0] = pmdps[0]; \ + pmdpd[1] = pmdps[1]; \ + flush_pmd_entry(pmdpd); \ + } while (0) + +#define pmd_clear(pmdp) \ + do { \ + pmdp[0] = __pmd(0); \ + pmdp[1] = __pmd(0); \ + clean_pmd_entry(pmdp); \ + } while (0) + +/* we don't need complex calculations here as the pmd is folded into the pgd */ +#define pmd_addr_end(addr,end) (end) + +#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) +#define pte_special(pte) (0) +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } + +/* + * We don't have huge page support for short descriptors, for the moment + * define empty stubs for use by pin_page_for_write. + */ +#define pmd_hugewillfault(pmd) (0) +#define pmd_thp_or_huge(pmd) (0) + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_PGTABLE_2LEVEL_H */ diff --git a/arch/lib/include/asm/pgtable-hwdef.h b/arch/lib/include/asm/pgtable-hwdef.h new file mode 100644 index 000000000000..8426229ba292 --- /dev/null +++ b/arch/lib/include/asm/pgtable-hwdef.h @@ -0,0 +1,19 @@ +/* + * arch/arm/include/asm/pgtable-hwdef.h + * + * Copyright (C) 1995-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASMARM_PGTABLE_HWDEF_H +#define _ASMARM_PGTABLE_HWDEF_H + +#ifdef CONFIG_ARM_LPAE +#include +#else +#include +#endif + +#endif diff --git a/arch/lib/include/asm/pgtable.h b/arch/lib/include/asm/pgtable.h index ce599c852795..f40354198bad 100644 --- a/arch/lib/include/asm/pgtable.h +++ b/arch/lib/include/asm/pgtable.h @@ -1,30 +1,363 @@ -#ifndef _ASM_SIM_PGTABLE_H -#define _ASM_SIM_PGTABLE_H +/* + * arch/arm/include/asm/pgtable.h + * + * Copyright (C) 1995-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASMARM_PGTABLE_H +#define _ASMARM_PGTABLE_H -#define PAGE_KERNEL ((pgprot_t) {0 }) +#include +#include -#define arch_start_context_switch(prev) do {} while (0) +#ifndef CONFIG_MMU -#define kern_addr_valid(addr)(1) -#define pte_file(pte)(1) -/* Encode and de-code a swap entry */ -#define __swp_type(x) (((x).val >> 5) & 0x1f) -#define __swp_offset(x) ((x).val >> 11) -#define __swp_entry(type, offset) \ - ((swp_entry_t) {((type) << 5) | ((offset) << 11) }) -#define __pte_to_swp_entry(pte) ((swp_entry_t) {pte_val((pte)) }) -#define __swp_entry_to_pte(x) ((pte_t) {(x).val }) -#define pmd_page(pmd) (struct page *)(pmd_val(pmd) & PAGE_MASK) -#define pgtable_cache_init() do { } while (0) +#include +#include -static inline int pte_swp_soft_dirty(pte_t pte) +#else + +#include +#include +#include + + +#include + +#ifdef CONFIG_ARM_LPAE +#include +#else +#include +#endif + +/* + * Just any arbitrary offset to the start of the vmalloc VM area: the + * current 8MB value just means that there will be a 8MB "hole" after the + * physical memory until the kernel virtual memory starts. That means that + * any out-of-bounds memory accesses will hopefully be caught. + * The vmalloc() routines leaves a hole of 4kB between each vmalloced + * area for the same reason. ;) + */ +#define VMALLOC_OFFSET (8*1024*1024) +#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) +#define VMALLOC_END 0xff000000UL + +#define LIBRARY_TEXT_START 0x0c000000 + +#ifndef __ASSEMBLY__ +extern void __pte_error(const char *file, int line, pte_t); +extern void __pmd_error(const char *file, int line, pmd_t); +extern void __pgd_error(const char *file, int line, pgd_t); + +#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte) +#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) +#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) + +/* + * This is the lowest virtual address we can permit any user space + * mapping to be mapped at. This is particularly important for + * non-high vector CPUs. + */ +#define FIRST_USER_ADDRESS (PAGE_SIZE * 2) + +/* + * Use TASK_SIZE as the ceiling argument for free_pgtables() and + * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd + * page shared between user and kernel). + */ +#ifdef CONFIG_ARM_LPAE +#define USER_PGTABLES_CEILING TASK_SIZE +#endif + +/* + * The pgprot_* and protection_map entries will be fixed up in runtime + * to include the cachable and bufferable bits based on memory policy, + * as well as any architecture dependent bits like global/ASID and SMP + * shared mapping bits. + */ +#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG + +extern pgprot_t pgprot_user; +extern pgprot_t pgprot_kernel; +extern pgprot_t pgprot_hyp_device; +extern pgprot_t pgprot_s2; +extern pgprot_t pgprot_s2_device; + +#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) + +#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE) +#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) +#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) +#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) +#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) +#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) +#define PAGE_KERNEL_EXEC pgprot_kernel +#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP) +#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP) +#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY) +#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY) + +#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) +#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) +#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) +#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) +#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) + +#define __pgprot_modify(prot,mask,bits) \ + __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) + +#define pgprot_noncached(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) + +#define pgprot_writecombine(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) + +#define pgprot_stronglyordered(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) + +#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE +#define pgprot_dmacoherent(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) +#define __HAVE_PHYS_MEM_ACCESS_PROT +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); +#else +#define pgprot_dmacoherent(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN) +#endif + +#endif /* __ASSEMBLY__ */ + +/* + * The table below defines the page protection levels that we insert into our + * Linux page table version. These get translated into the best that the + * architecture can perform. Note that on most ARM hardware: + * 1) We cannot do execute protection + * 2) If we could do execute protection, then read is implied + * 3) write implies read permissions + */ +#define __P000 __PAGE_NONE +#define __P001 __PAGE_READONLY +#define __P010 __PAGE_COPY +#define __P011 __PAGE_COPY +#define __P100 __PAGE_READONLY_EXEC +#define __P101 __PAGE_READONLY_EXEC +#define __P110 __PAGE_COPY_EXEC +#define __P111 __PAGE_COPY_EXEC + +#define __S000 __PAGE_NONE +#define __S001 __PAGE_READONLY +#define __S010 __PAGE_SHARED +#define __S011 __PAGE_SHARED +#define __S100 __PAGE_READONLY_EXEC +#define __S101 __PAGE_READONLY_EXEC +#define __S110 __PAGE_SHARED_EXEC +#define __S111 __PAGE_SHARED_EXEC + +#ifndef __ASSEMBLY__ +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern struct page *empty_zero_page; +#define ZERO_PAGE(vaddr) (empty_zero_page) + + +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; + +/* to find an entry in a page-table-directory */ +#define pgd_index(addr) ((addr) >> PGDIR_SHIFT) + +#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) + +#define pmd_none(pmd) (!pmd_val(pmd)) +#define pmd_present(pmd) (pmd_val(pmd)) + +static inline pte_t *pmd_page_vaddr(pmd_t pmd) +{ + return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); +} + +#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) + +#ifndef CONFIG_HIGHPTE +#define __pte_map(pmd) pmd_page_vaddr(*(pmd)) +#define __pte_unmap(pte) do { } while (0) +#else +#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd))) +#define __pte_unmap(pte) kunmap_atomic(pte) +#endif + +#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + +#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr)) + +#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr)) +#define pte_unmap(pte) __pte_unmap(pte) + +#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) +#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot)) + +#define pte_page(pte) pfn_to_page(pte_pfn(pte)) +#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) + +#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) + +#define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \ + : !!(pte_val(pte) & (val))) +#define pte_isclear(pte, val) (!(pte_val(pte) & (val))) + +#define pte_none(pte) (!pte_val(pte)) +#define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT)) +#define pte_valid(pte) (pte_isset((pte), L_PTE_VALID)) +#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) +#define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY)) +#define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY)) +#define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG)) +#define pte_exec(pte) (pte_isclear((pte), L_PTE_XN)) + +#define pte_valid_user(pte) \ + (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte)) + +#if __LINUX_ARM_ARCH__ < 6 +static inline void __sync_icache_dcache(pte_t pteval) +{ +} +#else +extern void __sync_icache_dcache(pte_t pteval); +#endif + +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + unsigned long ext = 0; + + if (addr < TASK_SIZE && pte_valid_user(pteval)) { + if (!pte_special(pteval)) + __sync_icache_dcache(pteval); + ext |= PTE_EXT_NG; + } + + set_pte_ext(ptep, pteval, ext); +} + +static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) +{ + pte_val(pte) &= ~pgprot_val(prot); + return pte; +} + +static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) +{ + pte_val(pte) |= pgprot_val(prot); + return pte; +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(L_PTE_RDONLY)); +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY)); +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY)); +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(L_PTE_DIRTY)); +} + +static inline pte_t pte_mkold(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG)); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(L_PTE_YOUNG)); +} + +static inline pte_t pte_mkexec(pte_t pte) { - return 0; + return clear_pte_bit(pte, __pgprot(L_PTE_XN)); } -static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) +static inline pte_t pte_mknexec(pte_t pte) { + return set_pte_bit(pte, __pgprot(L_PTE_XN)); +} + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | + L_PTE_NONE | L_PTE_VALID; + pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } -#endif /* _ASM_SIM_PGTABLE_H */ +/* + * Encode and decode a swap entry. Swap entries are stored in the Linux + * page tables as follows: + * + * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * <--------------- offset ------------------------> < type -> 0 0 + * + * This gives us up to 31 swap files and 128GB per swap file. Note that + * the offset field is always non-zero. + */ +#define __SWP_TYPE_SHIFT 2 +#define __SWP_TYPE_BITS 5 +#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) +#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) + +#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) +#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) +#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) + +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) + +/* + * It is an error for the kernel to have more swap files than we can + * encode in the PTEs. This ensures that we know when MAX_SWAPFILES + * is increased beyond what we presently support. + */ +#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) + +/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ +/* FIXME: this is not correct */ +#define kern_addr_valid(addr) (1) + +#include + +/* + * We provide our own arch_get_unmapped_area to cope with VIPT caches. + */ +#define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN + +#define pgtable_cache_init() do { } while (0) + +#endif /* !__ASSEMBLY__ */ + +#endif /* CONFIG_MMU */ + +#endif /* _ASMARM_PGTABLE_H */ diff --git a/arch/lib/include/asm/proc-fns.h b/arch/lib/include/asm/proc-fns.h new file mode 100644 index 000000000000..5324c1112f3a --- /dev/null +++ b/arch/lib/include/asm/proc-fns.h @@ -0,0 +1,160 @@ +/* + * arch/arm/include/asm/proc-fns.h + * + * Copyright (C) 1997-1999 Russell King + * Copyright (C) 2000 Deep Blue Solutions Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_PROCFNS_H +#define __ASM_PROCFNS_H + +#ifdef __KERNEL__ + +#include +#include + +#ifndef __ASSEMBLY__ + +struct mm_struct; + +/* + * Don't change this structure - ASM code relies on it. + */ +extern struct processor { + /* MISC + * get data abort address/flags + */ + void (*_data_abort)(unsigned long pc); + /* + * Retrieve prefetch fault address + */ + unsigned long (*_prefetch_abort)(unsigned long lr); + /* + * Set up any processor specifics + */ + void (*_proc_init)(void); + /* + * Disable any processor specifics + */ + void (*_proc_fin)(void); + /* + * Special stuff for a reset + */ + void (*reset)(unsigned long addr) __attribute__((noreturn)); + /* + * Idle the processor + */ + int (*_do_idle)(void); + /* + * Processor architecture specific + */ + /* + * clean a virtual address range from the + * D-cache without flushing the cache. + */ + void (*dcache_clean_area)(void *addr, int size); + + /* + * Set the page table + */ + void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm); + /* + * Set a possibly extended PTE. Non-extended PTEs should + * ignore 'ext'. + */ +#ifdef CONFIG_ARM_LPAE + void (*set_pte_ext)(pte_t *ptep, pte_t pte); +#else + void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); +#endif + + /* Suspend/resume */ + unsigned int suspend_size; + void (*do_suspend)(void *); + void (*do_resume)(void *); +} processor; + +#ifndef MULTI_CPU +extern void cpu_proc_init(void); +extern void cpu_proc_fin(void); +extern int cpu_do_idle(void); +extern void cpu_dcache_clean_area(void *, int); +extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); +#ifdef CONFIG_ARM_LPAE +extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); +#else +extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); +#endif +extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); + +/* These three are private to arch/arm/kernel/suspend.c */ +extern void cpu_do_suspend(void *); +extern void cpu_do_resume(void *); +#else +#define cpu_proc_init processor._proc_init +#define cpu_proc_fin processor._proc_fin +#define cpu_reset processor.reset +#define cpu_do_idle processor._do_idle +#define cpu_dcache_clean_area processor.dcache_clean_area +#define cpu_set_pte_ext processor.set_pte_ext +#define cpu_do_switch_mm processor.switch_mm + +/* These three are private to arch/arm/kernel/suspend.c */ +#define cpu_do_suspend processor.do_suspend +#define cpu_do_resume processor.do_resume +#endif + +extern void cpu_resume(void); + +#include + +#ifdef CONFIG_MMU + +#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) + +#ifdef CONFIG_ARM_LPAE + +#define cpu_get_ttbr(nr) \ + ({ \ + u64 ttbr; \ + __asm__("mrrc p15, " #nr ", %Q0, %R0, c2" \ + : "=r" (ttbr)); \ + ttbr; \ + }) + +#define cpu_set_ttbr(nr, val) \ + do { \ + u64 ttbr = val; \ + __asm__("mcrr p15, " #nr ", %Q0, %R0, c2" \ + : : "r" (ttbr)); \ + } while (0) + +#define cpu_get_pgd() \ + ({ \ + u64 pg = cpu_get_ttbr(0); \ + pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \ + (pgd_t *)phys_to_virt(pg); \ + }) +#else +#define cpu_get_pgd() \ + ({ \ + unsigned long pg; \ + __asm__("mrc p15, 0, %0, c2, c0, 0" \ + : "=r" (pg) : : "cc"); \ + pg &= ~0x3fff; \ + (pgd_t *)phys_to_virt(pg); \ + }) +#endif + +#else /*!CONFIG_MMU */ + +#define cpu_switch_mm(pgd,mm) { } + +#endif + +#endif /* __ASSEMBLY__ */ +#endif /* __KERNEL__ */ +#endif /* __ASM_PROCFNS_H */ diff --git a/arch/lib/include/asm/processor.h b/arch/lib/include/asm/processor.h index 4ac2e89127a0..cb14ac560b56 100644 --- a/arch/lib/include/asm/processor.h +++ b/arch/lib/include/asm/processor.h @@ -9,7 +9,7 @@ struct thread_struct {}; # define current_text_addr() ({ __label__ _l; _l: &&_l; }) -#define TASK_SIZE ((~(long)0)) +//#define TASK_SIZE ((~(long)0)) #define thread_saved_pc(x) (unsigned long)0 #define task_pt_regs(t) NULL diff --git a/arch/lib/include/asm/thread_info.h b/arch/lib/include/asm/thread_info.h index ec316c613041..c8fc65efd435 100644 --- a/arch/lib/include/asm/thread_info.h +++ b/arch/lib/include/asm/thread_info.h @@ -33,4 +33,9 @@ static inline bool test_and_clear_restore_sigmask(void) return true; } + +#define THREAD_SIZE_ORDER 1 +#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) +#define THREAD_START_SP (THREAD_SIZE - 8) + #endif /* _ASM_SIM_THREAD_INFO_H */ diff --git a/arch/lib/include/asm/tlbflush.h b/arch/lib/include/asm/tlbflush.h new file mode 100644 index 000000000000..def9e570199f --- /dev/null +++ b/arch/lib/include/asm/tlbflush.h @@ -0,0 +1,686 @@ +/* + * arch/arm/include/asm/tlbflush.h + * + * Copyright (C) 1999-2003 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASMARM_TLBFLUSH_H +#define _ASMARM_TLBFLUSH_H + +#ifdef CONFIG_MMU + +#include + +#define TLB_V4_U_PAGE (1 << 1) +#define TLB_V4_D_PAGE (1 << 2) +#define TLB_V4_I_PAGE (1 << 3) +#define TLB_V6_U_PAGE (1 << 4) +#define TLB_V6_D_PAGE (1 << 5) +#define TLB_V6_I_PAGE (1 << 6) + +#define TLB_V4_U_FULL (1 << 9) +#define TLB_V4_D_FULL (1 << 10) +#define TLB_V4_I_FULL (1 << 11) +#define TLB_V6_U_FULL (1 << 12) +#define TLB_V6_D_FULL (1 << 13) +#define TLB_V6_I_FULL (1 << 14) + +#define TLB_V6_U_ASID (1 << 16) +#define TLB_V6_D_ASID (1 << 17) +#define TLB_V6_I_ASID (1 << 18) + +#define TLB_V6_BP (1 << 19) + +/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */ +#define TLB_V7_UIS_PAGE (1 << 20) +#define TLB_V7_UIS_FULL (1 << 21) +#define TLB_V7_UIS_ASID (1 << 22) +#define TLB_V7_UIS_BP (1 << 23) + +#define TLB_BARRIER (1 << 28) +#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ +#define TLB_DCLEAN (1 << 30) +#define TLB_WB (1 << 31) + +/* + * MMU TLB Model + * ============= + * + * We have the following to choose from: + * v4 - ARMv4 without write buffer + * v4wb - ARMv4 with write buffer without I TLB flush entry instruction + * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction + * fr - Feroceon (v4wbi with non-outer-cacheable page table walks) + * fa - Faraday (v4 with write buffer with UTLB) + * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction + * v7wbi - identical to v6wbi + */ +#undef _TLB +#undef MULTI_TLB + +#ifdef CONFIG_SMP_ON_UP +#define MULTI_TLB 1 +#endif + +#define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE) + +#ifdef CONFIG_CPU_TLB_V4WT +# define v4_possible_flags v4_tlb_flags +# define v4_always_flags v4_tlb_flags +# ifdef _TLB +# define MULTI_TLB 1 +# else +# define _TLB v4 +# endif +#else +# define v4_possible_flags 0 +# define v4_always_flags (-1UL) +#endif + +#define fa_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ + TLB_V4_U_FULL | TLB_V4_U_PAGE) + +#ifdef CONFIG_CPU_TLB_FA +# define fa_possible_flags fa_tlb_flags +# define fa_always_flags fa_tlb_flags +# ifdef _TLB +# define MULTI_TLB 1 +# else +# define _TLB fa +# endif +#else +# define fa_possible_flags 0 +# define fa_always_flags (-1UL) +#endif + +#define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \ + TLB_V4_I_FULL | TLB_V4_D_FULL | \ + TLB_V4_I_PAGE | TLB_V4_D_PAGE) + +#ifdef CONFIG_CPU_TLB_V4WBI +# define v4wbi_possible_flags v4wbi_tlb_flags +# define v4wbi_always_flags v4wbi_tlb_flags +# ifdef _TLB +# define MULTI_TLB 1 +# else +# define _TLB v4wbi +# endif +#else +# define v4wbi_possible_flags 0 +# define v4wbi_always_flags (-1UL) +#endif + +#define fr_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_L2CLEAN_FR | \ + TLB_V4_I_FULL | TLB_V4_D_FULL | \ + TLB_V4_I_PAGE | TLB_V4_D_PAGE) + +#ifdef CONFIG_CPU_TLB_FEROCEON +# define fr_possible_flags fr_tlb_flags +# define fr_always_flags fr_tlb_flags +# ifdef _TLB +# define MULTI_TLB 1 +# else +# define _TLB v4wbi +# endif +#else +# define fr_possible_flags 0 +# define fr_always_flags (-1UL) +#endif + +#define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \ + TLB_V4_I_FULL | TLB_V4_D_FULL | \ + TLB_V4_D_PAGE) + +#ifdef CONFIG_CPU_TLB_V4WB +# define v4wb_possible_flags v4wb_tlb_flags +# define v4wb_always_flags v4wb_tlb_flags +# ifdef _TLB +# define MULTI_TLB 1 +# else +# define _TLB v4wb +# endif +#else +# define v4wb_possible_flags 0 +# define v4wb_always_flags (-1UL) +#endif + +#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ + TLB_V6_I_FULL | TLB_V6_D_FULL | \ + TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ + TLB_V6_I_ASID | TLB_V6_D_ASID | \ + TLB_V6_BP) + +#ifdef CONFIG_CPU_TLB_V6 +# define v6wbi_possible_flags v6wbi_tlb_flags +# define v6wbi_always_flags v6wbi_tlb_flags +# ifdef _TLB +# define MULTI_TLB 1 +# else +# define _TLB v6wbi +# endif +#else +# define v6wbi_possible_flags 0 +# define v6wbi_always_flags (-1UL) +#endif + +#define v7wbi_tlb_flags_smp (TLB_WB | TLB_BARRIER | \ + TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \ + TLB_V7_UIS_ASID | TLB_V7_UIS_BP) +#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ + TLB_V6_U_FULL | TLB_V6_U_PAGE | \ + TLB_V6_U_ASID | TLB_V6_BP) + +#ifdef CONFIG_CPU_TLB_V7 + +# ifdef CONFIG_SMP_ON_UP +# define v7wbi_possible_flags (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up) +# define v7wbi_always_flags (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up) +# elif defined(CONFIG_SMP) +# define v7wbi_possible_flags v7wbi_tlb_flags_smp +# define v7wbi_always_flags v7wbi_tlb_flags_smp +# else +# define v7wbi_possible_flags v7wbi_tlb_flags_up +# define v7wbi_always_flags v7wbi_tlb_flags_up +# endif +# ifdef _TLB +# define MULTI_TLB 1 +# else +# define _TLB v7wbi +# endif +#else +# define v7wbi_possible_flags 0 +# define v7wbi_always_flags (-1UL) +#endif + +#ifndef _TLB +#error Unknown TLB model +#endif + +#ifndef __ASSEMBLY__ + +#include + +struct cpu_tlb_fns { + void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *); + void (*flush_kern_range)(unsigned long, unsigned long); + unsigned long tlb_flags; +}; + +/* + * Select the calling method + */ +#ifdef MULTI_TLB + +#define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range +#define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range + +#else + +#define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range) +#define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range) + +extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); +extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long); + +#endif + +extern struct cpu_tlb_fns cpu_tlb; + +#define __cpu_tlb_flags cpu_tlb.tlb_flags + +/* + * TLB Management + * ============== + * + * The arch/arm/mm/tlb-*.S files implement these methods. + * + * The TLB specific code is expected to perform whatever tests it + * needs to determine if it should invalidate the TLB for each + * call. Start addresses are inclusive and end addresses are + * exclusive; it is safe to round these addresses down. + * + * flush_tlb_all() + * + * Invalidate the entire TLB. + * + * flush_tlb_mm(mm) + * + * Invalidate all TLB entries in a particular address + * space. + * - mm - mm_struct describing address space + * + * flush_tlb_range(mm,start,end) + * + * Invalidate a range of TLB entries in the specified + * address space. + * - mm - mm_struct describing address space + * - start - start address (may not be aligned) + * - end - end address (exclusive, may not be aligned) + * + * flush_tlb_page(vaddr,vma) + * + * Invalidate the specified page in the specified address range. + * - vaddr - virtual address (may not be aligned) + * - vma - vma_struct describing address range + * + * flush_kern_tlb_page(kaddr) + * + * Invalidate the TLB entry for the specified page. The address + * will be in the kernels virtual memory space. Current uses + * only require the D-TLB to be invalidated. + * - kaddr - Kernel virtual memory address + */ + +/* + * We optimise the code below by: + * - building a set of TLB flags that might be set in __cpu_tlb_flags + * - building a set of TLB flags that will always be set in __cpu_tlb_flags + * - if we're going to need __cpu_tlb_flags, access it once and only once + * + * This allows us to build optimal assembly for the single-CPU type case, + * and as close to optimal given the compiler constrants for multi-CPU + * case. We could do better for the multi-CPU case if the compiler + * implemented the "%?" method, but this has been discontinued due to too + * many people getting it wrong. + */ +#define possible_tlb_flags (v4_possible_flags | \ + v4wbi_possible_flags | \ + fr_possible_flags | \ + v4wb_possible_flags | \ + fa_possible_flags | \ + v6wbi_possible_flags | \ + v7wbi_possible_flags) + +#define always_tlb_flags (v4_always_flags & \ + v4wbi_always_flags & \ + fr_always_flags & \ + v4wb_always_flags & \ + fa_always_flags & \ + v6wbi_always_flags & \ + v7wbi_always_flags) + +#define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f))) + +#define __tlb_op(f, insnarg, arg) \ + do { \ + if (always_tlb_flags & (f)) \ + asm("mcr " insnarg \ + : : "r" (arg) : "cc"); \ + else if (possible_tlb_flags & (f)) \ + asm("tst %1, %2\n\t" \ + "mcrne " insnarg \ + : : "r" (arg), "r" (__tlb_flag), "Ir" (f) \ + : "cc"); \ + } while (0) + +#define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg) +#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg) + +static inline void __local_flush_tlb_all(void) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); + tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); + tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); +} + +static inline void local_flush_tlb_all(void) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (tlb_flag(TLB_WB)) + dsb(nshst); + + __local_flush_tlb_all(); + tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero); + + if (tlb_flag(TLB_BARRIER)) { + dsb(nsh); + isb(); + } +} + +static inline void __flush_tlb_all(void) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (tlb_flag(TLB_WB)) + dsb(ishst); + + __local_flush_tlb_all(); + tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero); + + if (tlb_flag(TLB_BARRIER)) { + dsb(ish); + isb(); + } +} + +static inline void __local_flush_tlb_mm(struct mm_struct *mm) +{ + const int zero = 0; + const int asid = ASID(mm); + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { + tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); + tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); + tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); + } + } + + tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid); + tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid); + tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid); +} + +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ + const int asid = ASID(mm); + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (tlb_flag(TLB_WB)) + dsb(nshst); + + __local_flush_tlb_mm(mm); + tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid); + + if (tlb_flag(TLB_BARRIER)) + dsb(nsh); +} + +static inline void __flush_tlb_mm(struct mm_struct *mm) +{ + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (tlb_flag(TLB_WB)) + dsb(ishst); + + __local_flush_tlb_mm(mm); +#ifdef CONFIG_ARM_ERRATA_720789 + tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", 0); +#else + tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm)); +#endif + + if (tlb_flag(TLB_BARRIER)) + dsb(ish); +} + +static inline void +__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); + + if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && + cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { + tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); + tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); + tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); + if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) + asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); + } + + tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr); + tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr); + tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr); +} + +static inline void +local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +{ + const unsigned int __tlb_flag = __cpu_tlb_flags; + + uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); + + if (tlb_flag(TLB_WB)) + dsb(nshst); + + __local_flush_tlb_page(vma, uaddr); + tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr); + + if (tlb_flag(TLB_BARRIER)) + dsb(nsh); +} + +static inline void +__flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +{ + const unsigned int __tlb_flag = __cpu_tlb_flags; + + uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); + + if (tlb_flag(TLB_WB)) + dsb(ishst); + + __local_flush_tlb_page(vma, uaddr); +#ifdef CONFIG_ARM_ERRATA_720789 + tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK); +#else + tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr); +#endif + + if (tlb_flag(TLB_BARRIER)) + dsb(ish); +} + +static inline void __local_flush_tlb_kernel_page(unsigned long kaddr) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); + tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); + tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); + if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) + asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); + + tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr); + tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr); + tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr); +} + +static inline void local_flush_tlb_kernel_page(unsigned long kaddr) +{ + const unsigned int __tlb_flag = __cpu_tlb_flags; + + kaddr &= PAGE_MASK; + + if (tlb_flag(TLB_WB)) + dsb(nshst); + + __local_flush_tlb_kernel_page(kaddr); + tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr); + + if (tlb_flag(TLB_BARRIER)) { + dsb(nsh); + isb(); + } +} + +static inline void __flush_tlb_kernel_page(unsigned long kaddr) +{ + const unsigned int __tlb_flag = __cpu_tlb_flags; + + kaddr &= PAGE_MASK; + + if (tlb_flag(TLB_WB)) + dsb(ishst); + + __local_flush_tlb_kernel_page(kaddr); + tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr); + + if (tlb_flag(TLB_BARRIER)) { + dsb(ish); + isb(); + } +} + +/* + * Branch predictor maintenance is paired with full TLB invalidation, so + * there is no need for any barriers here. + */ +static inline void __local_flush_bp_all(void) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (tlb_flag(TLB_V6_BP)) + asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero)); +} + +static inline void local_flush_bp_all(void) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + __local_flush_bp_all(); + if (tlb_flag(TLB_V7_UIS_BP)) + asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero)); +} + +static inline void __flush_bp_all(void) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + __local_flush_bp_all(); + if (tlb_flag(TLB_V7_UIS_BP)) + asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero)); +} + +/* + * flush_pmd_entry + * + * Flush a PMD entry (word aligned, or double-word aligned) to + * RAM if the TLB for the CPU we are running on requires this. + * This is typically used when we are creating PMD entries. + * + * clean_pmd_entry + * + * Clean (but don't drain the write buffer) if the CPU requires + * these operations. This is typically used when we are removing + * PMD entries. + */ +static inline void flush_pmd_entry(void *pmd) +{ + const unsigned int __tlb_flag = __cpu_tlb_flags; + + tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd); + tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); + + if (tlb_flag(TLB_WB)) + dsb(ishst); +} + +static inline void clean_pmd_entry(void *pmd) +{ + const unsigned int __tlb_flag = __cpu_tlb_flags; + + tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd); + tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); +} + +#undef tlb_op +#undef tlb_flag +#undef always_tlb_flags +#undef possible_tlb_flags + +/* + * Convert calls to our calling convention. + */ +#define local_flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) +#define local_flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e) + +#ifndef CONFIG_SMP +#define flush_tlb_all local_flush_tlb_all +#define flush_tlb_mm local_flush_tlb_mm +#define flush_tlb_page local_flush_tlb_page +#define flush_tlb_kernel_page local_flush_tlb_kernel_page +#define flush_tlb_range local_flush_tlb_range +#define flush_tlb_kernel_range local_flush_tlb_kernel_range +#define flush_bp_all local_flush_bp_all +#else +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr); +extern void flush_tlb_kernel_page(unsigned long kaddr); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void flush_bp_all(void); +#endif + +/* + * If PG_dcache_clean is not set for the page, we need to ensure that any + * cache entries for the kernels virtual memory range are written + * back to the page. On ARMv6 and later, the cache coherency is handled via + * the set_pte_at() function. + */ +#if __LINUX_ARM_ARCH__ < 6 +extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep); +#else +static inline void update_mmu_cache(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ +} +#endif + +#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) + +#endif + +#elif defined(CONFIG_SMP) /* !CONFIG_MMU */ + +#ifndef __ASSEMBLY__ + +#include + +static inline void local_flush_tlb_all(void) { } +static inline void local_flush_tlb_mm(struct mm_struct *mm) { } +static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { } +static inline void local_flush_tlb_kernel_page(unsigned long kaddr) { } +static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { } +static inline void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { } +static inline void local_flush_bp_all(void) { } + +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr); +extern void flush_tlb_kernel_page(unsigned long kaddr); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void flush_bp_all(void); +#endif /* __ASSEMBLY__ */ + +#endif + +#ifndef __ASSEMBLY__ +#ifdef CONFIG_ARM_ERRATA_798181 +extern void erratum_a15_798181_init(void); +#else +static inline void erratum_a15_798181_init(void) {} +#endif +extern bool (*erratum_a15_798181_handler)(void); + +static inline bool erratum_a15_798181(void) +{ + if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) && + erratum_a15_798181_handler)) + return erratum_a15_798181_handler(); + return false; +} +#endif + +#endif diff --git a/arch/lib/sysctl.c b/arch/lib/sysctl.c index bda173a6ce48..ecf0dcedf563 100644 --- a/arch/lib/sysctl.c +++ b/arch/lib/sysctl.c @@ -11,9 +11,17 @@ #include #include #include +#include #include "sim-assert.h" #include "sim-types.h" +int mmap_min_addr_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + lib_assert(false); + return 0; +} + int drop_caches_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { @@ -112,6 +120,9 @@ unsigned int dirty_expire_interval = 30 * 100; unsigned int dirty_writeback_interval = 5 * 100; unsigned long dirty_background_bytes = 0; +int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; +unsigned long dac_mmap_min_addr = 4096; + #if 0 int percpu_pagelist_fraction = 0; #endif diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 6bb277570624..a246d02ef592 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -37,9 +37,6 @@ typedef struct bootmem_data { extern bootmem_data_t bootmem_node_data[]; #endif - -extern void link_bootmem(bootmem_data_t *bdata); - extern unsigned long bootmem_bootmap_pages(unsigned long); extern unsigned long init_bootmem_node(pg_data_t *pgdat, diff --git a/mm/memblock.c b/mm/memblock.c index 9318b567ed79..bbf9bf8658eb 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -50,7 +50,7 @@ struct memblock memblock __initdata_memblock = { .current_limit = MEMBLOCK_ALLOC_ANYWHERE, }; -int memblock_debug __initdata_memblock; +int memblock_debug __initdata_memblock = 1; #ifdef CONFIG_MOVABLE_NODE bool movable_node_enabled __initdata_memblock = false; #endif diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2bf193807995..cbd84db43ee0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -62,7 +62,6 @@ #include #include - #include #include #include @@ -2868,10 +2867,12 @@ static void print_buddy_freelist(void) printk(KERN_INFO "%lu %d %d %d\n",pfn, order, t, i); i++; } + + printk(KERN_INFO "Totoal free page: %d\n", i); } } out: - printk(KERN_INFO "Totoal free page: %d\n", i); + printk(KERN_INFO "Totoal free page2: %d\n", i); } @@ -2901,7 +2902,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, print_buddy_freelist(); - gfp_mask &= gfp_allowed_mask; lockdep_trace_alloc(gfp_mask); @@ -2918,12 +2918,16 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, * valid zone. It's possible to have an empty zonelist as a result * of __GFP_THISNODE and a memoryless node */ - if (unlikely(!zonelist->_zonerefs->zone)) + if (unlikely(!zonelist->_zonerefs->zone)) { + printk(KERN_INFO "I am %s\n", __func__); return NULL; + } if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; + printk(KERN_INFO "I am %s\n", __func__); + retry_cpuset: cpuset_mems_cookie = read_mems_allowed_begin(); @@ -2967,7 +2971,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, goto retry_cpuset; - printk(KERN_INFO "I am %s\n", __func__); + printk(KERN_INFO "Done: I am %s %p\n", __func__, page); return page; } @@ -5173,7 +5177,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) { - printk("I am %s %d\n", __func__, pgdat->node_spanned_pages); + printk("I am %s %lu\n", __func__, pgdat->node_spanned_pages); /* Skip empty nodes */ if (!pgdat->node_spanned_pages) diff --git a/mm/slib_env.c b/mm/slib_env.c index 03d1f14bb7a2..0787fdffb1c4 100644 --- a/mm/slib_env.c +++ b/mm/slib_env.c @@ -6,6 +6,7 @@ #include #include "slib_env.h" +struct meminfo meminfo; static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) @@ -189,15 +190,14 @@ int __init arm_add_memory(u64 start, u64 size) else size -= aligned_start - start; - if (aligned_start < PHYS_OFFSET) { if (aligned_start + size <= PHYS_OFFSET) { - pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n", + pr_info("Ignoring memory below PHYS_OFFSET1: 0x%08llx-0x%08llx\n", aligned_start, aligned_start + size); return -EINVAL; } - pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n", + pr_info("Ignoring memory below PHYS_OFFSET2: 0x%08llx-0x%08llx\n", aligned_start, (u64)PHYS_OFFSET); size -= PHYS_OFFSET - aligned_start; @@ -207,6 +207,8 @@ int __init arm_add_memory(u64 start, u64 size) start = aligned_start; size = size & ~(phys_addr_t)(PAGE_SIZE - 1); + printk("I am %s start:%llu, size:%llu\n", __func__, start, size); + /* * Check whether this memory region has non-zero size or * invalid node number. @@ -218,14 +220,79 @@ int __init arm_add_memory(u64 start, u64 size) return 0; } +static void __init find_limits(unsigned long *min, unsigned long *max_low, + unsigned long *max_high) +{ + struct meminfo *mi = &meminfo; + int i; + + /* This assumes the meminfo array is properly sorted */ + *min = bank_pfn_start(&mi->bank[0]); + for_each_bank (i, mi) + if (mi->bank[i].highmem) + break; + *max_low = bank_pfn_end(&mi->bank[i - 1]); + *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]); +} + +static void __init arm_bootmem_init(unsigned long start_pfn, + unsigned long end_pfn) +{ + struct memblock_region *reg; + unsigned int boot_pages; + phys_addr_t bitmap; + pg_data_t *pgdat; + + /* + * Allocate the bootmem bitmap page. This must be in a region + * of memory which has already been mapped. + */ + boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); + + bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, + __pfn_to_phys(end_pfn)); + + /* + * Initialise the bootmem allocator, handing the + * memory banks over to bootmem. + */ + node_set_online(0); + pgdat = NODE_DATA(0); + init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); + + /* Free the lowmem regions from memblock into bootmem. */ + for_each_memblock(memory, reg) { + unsigned long start = memblock_region_memory_base_pfn(reg); + unsigned long end = memblock_region_memory_end_pfn(reg); + + if (end >= end_pfn) + end = end_pfn; + if (start >= end) + break; + + free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT); + } + + /* Reserve the lowmem memblock reserved regions in bootmem. */ + for_each_memblock(reserved, reg) { + unsigned long start = memblock_region_reserved_base_pfn(reg); + unsigned long end = memblock_region_reserved_end_pfn(reg); + + if (end >= end_pfn) + end = end_pfn; + if (start >= end) + break; + reserve_bootmem(__pfn_to_phys(start), + (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT); + } +} + + void __init bootmem_init(void) { unsigned long min, max_low, max_high; - min = 0; - max_low = 194560; - max_high = 524288; - + find_limits(&min, &max_low, &max_high); zone_sizes_init(min, max_low, max_high); @@ -246,10 +313,13 @@ void __init paging_init(void) } - void __init setup_arch(char **cmd) { - arm_add_memory(0, 1024 * 1024 * 1024 * 1); + int ret; + ret = arm_add_memory(0, 1024 * 1024 * 1024 * 1); + if (ret) + printk("arm_add_memory failed in %s\n", __func__); + arm_memblock_init(); paging_init(); } @@ -260,7 +330,6 @@ void __init setup_arch(char **cmd) */ static void __init mm_init(void) { - link_bootmem(NODE_DATA(0)->bdata); mem_init(); } @@ -275,9 +344,10 @@ void __init init_memory_system(void) void test(void) { pg_data_t *pgdat = NODE_DATA(nid); - printk("I am printk: %p, %p, %d, %p\n", pgdat->node_zones, - pgdat->node_zonelists, - pgdat->nr_zones, - pgdat->bdata); + alloc_pages(GFP_KERNEL, 1); + + //printk("I am printk: %p, %p, %d\n", pgdat->node_zones, + // pgdat->node_zonelists, + // pgdat->nr_zones); } diff --git a/mm/slib_env.h b/mm/slib_env.h index 630bf527999b..d58840422688 100644 --- a/mm/slib_env.h +++ b/mm/slib_env.h @@ -1,17 +1,55 @@ #include #include -#define PHYS_OFFSET 0xC0000000 + +/* From arm/include/asm/memory.h */ +#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) +#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) + + /* -char *_text, *_stext, *_etext; -char *_data, *_sdata, *_edata; -char *__bss_start, *__bss_stop; -char *_sinittext, *_einittext; -char *_end; -char *__per_cpu_load, *__per_cpu_start, *__per_cpu_end; -char *__kprobes_text_start, *__kprobes_text_end; -char *__entry_text_start, *__entry_text_end; -*/ + * Memory map description: from arm/include/asm/setup.h + */ +#ifdef CONFIG_ARM_NR_BANKS +#define NR_BANKS CONFIG_ARM_NR_BANKS +#else +#define NR_BANKS 16 +#endif + +struct membank { + phys_addr_t start; + unsigned long size; + unsigned int highmem; +}; + +struct meminfo { + int nr_banks; + struct membank bank[NR_BANKS]; +}; + +extern struct meminfo meminfo; + +#define for_each_bank(iter,mi) \ + for (iter = 0; iter < (mi)->nr_banks; iter++) + +#define bank_pfn_start(bank) __phys_to_pfn((bank)->start) +#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size) +#define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT) +#define bank_phys_start(bank) (bank)->start +#define bank_phys_end(bank) ((bank)->start + (bank)->size) +#define bank_phys_size(bank) (bank)->size + + + +/* From arm/mm/mmu.c */ +//pgprot_t pgprot_user; +//pgprot_t pgprot_kernel; +//pgprot_t pgprot_hyp_device; +//pgprot_t pgprot_s2; +//pgprot_t pgprot_s2_device; + void __init init_memory_system(void); + + From ed2c3feb305b8ae6bb72f13855882bb8c1749ac3 Mon Sep 17 00:00:00 2001 From: yjiao Date: Tue, 2 Jun 2015 12:21:38 -0400 Subject: [PATCH 04/29] recover init/main.c --- init/main.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/init/main.c b/init/main.c index b77ba6b450ce..2115055faeac 100644 --- a/init/main.c +++ b/init/main.c @@ -782,14 +782,10 @@ int __init_or_module do_one_initcall(initcall_t fn) if (initcall_blacklisted(fn)) return -EPERM; -#ifndef CONFIG_INIT_DEBUG_ALWAYS if (initcall_debug) ret = do_one_initcall_debug(fn); else ret = fn(); -#else - ret = do_one_initcall_debug(fn); -#endif msgbuf[0] = 0; From dfe766d5c7c01f267a0c6e635da22b0b03609061 Mon Sep 17 00:00:00 2001 From: yjiao Date: Tue, 2 Jun 2015 18:15:27 -0400 Subject: [PATCH 05/29] add WANT_PAGE_VIRTUAL to use page->virtual --- arch/lib/include/asm/page.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/lib/include/asm/page.h b/arch/lib/include/asm/page.h index 4355f0ec44d6..eadfa319a847 100644 --- a/arch/lib/include/asm/page.h +++ b/arch/lib/include/asm/page.h @@ -15,6 +15,9 @@ #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) +#define WANT_PAGE_VIRTUAL 1 + + #ifndef __ASSEMBLY__ #ifndef CONFIG_MMU From 63b48b1383065bd5762402cb239e7ff53a129780 Mon Sep 17 00:00:00 2001 From: yjiao Date: Wed, 3 Jun 2015 13:21:16 -0400 Subject: [PATCH 06/29] a working version done --- arch/lib/Makefile | 2 +- arch/lib/glue.c | 1 + arch/lib/include/asm/highmem.h | 2 - mm/page_alloc.c | 150 +++++++++++++++++++-------- mm/slib_env.c | 178 ++++++++++++++++++++++++++++++--- 5 files changed, 277 insertions(+), 56 deletions(-) diff --git a/arch/lib/Makefile b/arch/lib/Makefile index d2979cfbace7..4f6abbbe35a8 100644 --- a/arch/lib/Makefile +++ b/arch/lib/Makefile @@ -156,7 +156,7 @@ define cmd_lib_bounds echo "#define GENERATED_BOUNDS_H"; \ echo ""; \ echo "#define NR_PAGEFLAGS (__NR_PAGEFLAGS)"; \ - echo "#define MAX_NR_ZONES (__MAX_NR_ZONES)"; \ + echo "#define MAX_NR_ZONES 3"; \ echo ""; \ echo "#endif /* GENERATED_BOUNDS_H */") > $@ endef diff --git a/arch/lib/glue.c b/arch/lib/glue.c index e23f4e6331e4..756681000644 100644 --- a/arch/lib/glue.c +++ b/arch/lib/glue.c @@ -309,3 +309,4 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, return 0; } + diff --git a/arch/lib/include/asm/highmem.h b/arch/lib/include/asm/highmem.h index 734bb437710b..130b1dd3411e 100644 --- a/arch/lib/include/asm/highmem.h +++ b/arch/lib/include/asm/highmem.h @@ -1,7 +1,6 @@ #ifndef _ASM_HIGHMEM_H #define _ASM_HIGHMEM_H -#if 1 #include #define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE) @@ -74,4 +73,3 @@ extern struct page *kmap_atomic_to_page(const void *ptr); #endif -#endif diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cbd84db43ee0..a0ac6b9e1d45 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -117,6 +117,37 @@ unsigned long totalram_pages __read_mostly; #endif +static void print_buddy_freelist(void) +{ + struct zone *zone; + unsigned int order, t; + struct list_head *curr; + unsigned long pfn; + int i = 0; + + for_each_zone(zone) { + printk(KERN_INFO "I am zone %s %lu\n", zone->name, zone->present_pages); + if (zone->present_pages == 0) + goto out; + + for_each_migratetype_order(order, t) { + list_for_each(curr, &zone->free_area[order].free_list[t]) { + pfn = page_to_pfn(list_entry(curr, struct page, lru)); + + printk(KERN_INFO "%lu %d %d %d\n",pfn, order, t, i); + i++; + } + + } + } +out: + printk(KERN_INFO "Totoal free page2: %d\n", i); +} + + + + + unsigned long totalreserve_pages __read_mostly; unsigned long totalcma_pages __read_mostly; /* @@ -932,7 +963,10 @@ static inline void expand(struct zone *zone, struct page *page, set_page_guard(zone, &page[size], high, migratetype); continue; } + + list_add(&page[size].lru, &area->free_list[migratetype]); + printk("I am %s, pfn: %lu\n", __func__, page_to_pfn(&page[size])); area->nr_free++; set_page_order(&page[size], high); } @@ -985,8 +1019,9 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, kernel_map_pages(page, 1 << order, 1); kasan_alloc_pages(page, order); - if (gfp_flags & __GFP_ZERO) + if (gfp_flags & __GFP_ZERO) { prep_zero_page(page, order, gfp_flags); + } if (order && (gfp_flags & __GFP_COMP)) prep_compound_page(page, order); @@ -1022,9 +1057,13 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, if (list_empty(&area->free_list[migratetype])) continue; + printk("I am %s: mt: %d order:%d\n", __func__, migratetype, order); page = list_entry(area->free_list[migratetype].next, struct page, lru); + printk("I am %s: pfn %lu lru:%p\n", __func__, page_to_pfn(page), &page->lru); + printk("next:%p, prev:%p\n", (&page->lru)->next, (&page->lru)->prev); list_del(&page->lru); + printk("I am %s, %d\n", __func__, __LINE__); rmv_page_order(page); area->nr_free--; expand(zone, page, order, current_order, area, migratetype); @@ -1303,6 +1342,7 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order, struct page *page; retry_reserve: + printk("I am %s: %d\n", __func__, __LINE__); page = __rmqueue_smallest(zone, order, migratetype); if (unlikely(!page) && migratetype != MIGRATE_RESERVE) { @@ -1323,6 +1363,7 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order, } } + printk("I am %s: %d\n", __func__, __LINE__); trace_mm_page_alloc_zone_locked(page, order, migratetype); return page; } @@ -1358,6 +1399,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, else list_add_tail(&page->lru, list); list = &page->lru; + if (is_migrate_cma(get_freepage_migratetype(page))) __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, -(1 << order)); @@ -1723,6 +1765,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; if (list_empty(list)) { + printk("I am %s: %d\n", __func__, __LINE__); pcp->count += rmqueue_bulk(zone, 0, pcp->batch, list, migratetype, cold); @@ -1738,6 +1781,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, list_del(&page->lru); pcp->count--; } else { + if (unlikely(gfp_flags & __GFP_NOFAIL)) { /* * __GFP_NOFAIL is not to be used in new code. @@ -1760,11 +1804,15 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, get_freepage_migratetype(page)); } + + printk("I am %s: %d\n", __func__, __LINE__); __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && !test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) set_bit(ZONE_FAIR_DEPLETED, &zone->flags); + printk("I am %s: %d\n", __func__, __LINE__); + printk("I am %s: pfn: %lu\n", __func__, page_to_pfn(page)); __count_zone_vm_events(PGALLOC, zone, 1 << order); zone_statistics(preferred_zone, zone, gfp_flags); local_irq_restore(flags); @@ -2232,13 +2280,22 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, } try_this_zone: + + printk("I am %s: %d\n", __func__, __LINE__); page = buffered_rmqueue(ac->preferred_zone, zone, order, gfp_mask, ac->migratetype); + printk("I am %s: %d\n", __func__, __LINE__); + printk("page:%lu\n", page_to_pfn(page)); if (page) { + printk("I am %s: %d\n", __func__, __LINE__); if (prep_new_page(page, order, gfp_mask, alloc_flags)) goto try_this_zone; + + printk("I am %s: %d\n", __func__, __LINE__); return page; } + + this_zone_full: if (IS_ENABLED(CONFIG_NUMA) && zlc_active) zlc_mark_zone_full(zonelist, z); @@ -2271,6 +2328,9 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, if (zonelist_rescan) goto zonelist_scan; + + printk("I am %s: %d\n", __func__, __LINE__); + return NULL; } @@ -2846,39 +2906,12 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, return page; } -static void print_buddy_freelist(void) -{ - struct zone *zone; - unsigned int order, t; - struct list_head *curr; - unsigned long pfn; - int i = 0; - - - for_each_zone(zone) { - printk(KERN_INFO "I am zone %s %lu\n", zone->name, zone->present_pages); - if (zone->present_pages == 0) - goto out; - - for_each_migratetype_order(order, t) { - list_for_each(curr, &zone->free_area[order].free_list[t]) { - pfn = page_to_pfn(list_entry(curr, struct page, lru)); - - printk(KERN_INFO "%lu %d %d %d\n",pfn, order, t, i); - i++; - } - - printk(KERN_INFO "Totoal free page: %d\n", i); - } - } -out: - printk(KERN_INFO "Totoal free page2: %d\n", i); -} - static int mem_initialized; +extern char *total_ram; + /* * This is the 'heart' of the zoned buddy allocator. */ @@ -2897,10 +2930,11 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, .migratetype = gfpflags_to_migratetype(gfp_mask), }; - if (mem_initialized == 0) + if (mem_initialized == 0) { init_memory_system(); - - print_buddy_freelist(); + print_buddy_freelist(); + mem_initialized = 1; + } gfp_mask &= gfp_allowed_mask; @@ -2911,22 +2945,22 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (should_fail_alloc_page(gfp_mask, order)) return NULL; - printk(KERN_INFO "I am %s\n", __func__); + printk(KERN_INFO "I am %s line:%d\n", __func__, __LINE__); /* * Check the zones suitable for the gfp_mask contain at least one * valid zone. It's possible to have an empty zonelist as a result * of __GFP_THISNODE and a memoryless node */ if (unlikely(!zonelist->_zonerefs->zone)) { - printk(KERN_INFO "I am %s\n", __func__); + printk(KERN_INFO "I am %s line:%d\n", __func__, __LINE__); return NULL; } if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; - printk(KERN_INFO "I am %s\n", __func__); + printk(KERN_INFO "I am %s line:%d\n", __func__, __LINE__); retry_cpuset: cpuset_mems_cookie = read_mems_allowed_begin(); @@ -2937,13 +2971,20 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx, ac.nodemask ? : &cpuset_current_mems_allowed, &ac.preferred_zone); + + printk(KERN_INFO "I am %s line:%d\n", __func__, __LINE__); if (!ac.preferred_zone) goto out; + + printk(KERN_INFO "I am %s line:%d\n", __func__, __LINE__); ac.classzone_idx = zonelist_zone_idx(preferred_zoneref); /* First allocation attempt */ alloc_mask = gfp_mask|__GFP_HARDWALL; page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); + + printk(KERN_INFO "I am %s line:%d\n", __func__, __LINE__); + if (unlikely(!page)) { /* * Runtime PM, block IO and its error handling path @@ -2970,8 +3011,11 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) goto retry_cpuset; + printk(KERN_INFO "Done: I am %s %lu\n", __func__, page_to_pfn(page)); + printk(KERN_INFO "Done: I am %s %lx\n", __func__, page_to_pfn(page)); + page->virtual = (void *)total_ram + (page_to_pfn(page) << PAGE_SHIFT); - printk(KERN_INFO "Done: I am %s %p\n", __func__, page); + print_buddy_freelist(); return page; } @@ -2984,7 +3028,7 @@ EXPORT_SYMBOL(__alloc_pages_nodemask); unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) { struct page *page; - + printk("I am %s\n", __func__); /* * __get_free_pages() returns a 32-bit address, which cannot represent * a highmem page @@ -2994,6 +3038,7 @@ unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) page = alloc_pages(gfp_mask, order); if (!page) return 0; + printk("I am %s\n", __func__); return (unsigned long) page_address(page); } EXPORT_SYMBOL(__get_free_pages); @@ -3001,6 +3046,8 @@ EXPORT_SYMBOL(__get_free_pages); unsigned long get_zeroed_page(gfp_t gfp_mask) { + + printk("I am %s\n", __func__); return __get_free_pages(gfp_mask | __GFP_ZERO, 0); } EXPORT_SYMBOL(get_zeroed_page); @@ -3017,9 +3064,11 @@ void __free_pages(struct page *page, unsigned int order) EXPORT_SYMBOL(__free_pages); - +#if 0 void free_pages(unsigned long addr, unsigned int order) { + printk("I am %s\n", __func__); + if (addr != 0) { VM_BUG_ON(!virt_addr_valid((void *)addr)); __free_pages(virt_to_page((void *)addr), order); @@ -3027,7 +3076,19 @@ void free_pages(unsigned long addr, unsigned int order) } EXPORT_SYMBOL(free_pages); +#else +void free_pages(unsigned long addr, unsigned int order) +{ + unsigned long pfn = addr - (unsigned long) total_ram; + pfn = pfn >> PAGE_SHIFT; + if (addr != 0) { + __free_pages(pfn_to_page(pfn), order); + } +} + +EXPORT_SYMBOL(free_pages); +#endif /* @@ -3211,6 +3272,8 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) unsigned int order = get_order(size); unsigned long addr; + printk("I am %s\n", __func__); + addr = __get_free_pages(gfp_mask, order); return make_alloc_exact(addr, order, size); } @@ -4357,6 +4420,11 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, highest_memmap_pfn = end_pfn - 1; z = &NODE_DATA(nid)->node_zones[zone]; + + printk("I am %s zone name: %s, start pfn: %lu, end_pfn:%lu\n", + __func__, z->name, start_pfn, end_pfn); + + for (pfn = start_pfn; pfn < end_pfn; pfn++) { /* * There can be holes in boot-time mem_map[]s @@ -4371,6 +4439,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, } page = pfn_to_page(pfn); set_page_links(page, zone, nid, pfn); + mminit_verify_page_links(page, zone, nid, pfn); init_page_count(page); page_mapcount_reset(page); @@ -4396,6 +4465,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, set_pageblock_migratetype(page, MIGRATE_MOVABLE); INIT_LIST_HEAD(&page->lru); + #ifdef WANT_PAGE_VIRTUAL /* The shift won't overflow because ZONE_NORMAL is below 4G. */ if (!is_highmem_idx(zone)) @@ -5158,6 +5228,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, zone->zone_pgdat = pgdat; zone_pcp_init(zone); + printk("I am %s size:%lu\n", __func__, size); + /* For bootup, initialized properly in watermark setup */ mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages); diff --git a/mm/slib_env.c b/mm/slib_env.c index 0787fdffb1c4..5fc33df36c4b 100644 --- a/mm/slib_env.c +++ b/mm/slib_env.c @@ -4,9 +4,22 @@ #include #include #include +#include +#include + +#include #include "slib_env.h" +#include "sim.h" +#include "sim-assert.h" + struct meminfo meminfo; +static void * __initdata vmalloc_min = + (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); + +phys_addr_t arm_lowmem_limit __initdata = 0; + +unsigned int cacheid __read_mostly; static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) @@ -90,11 +103,20 @@ static void __init free_highpages(void) unsigned long max_low = max_low_pfn; struct memblock_region *mem, *res; + printk("max_low_pfn:%lu\n", max_low_pfn); + printk("min_low_pfn:%lu\n", min_low_pfn); + printk("max_pfn:%lu\n", max_pfn); + + /* set highmem page free */ for_each_memblock(memory, mem) { unsigned long start = memblock_region_memory_base_pfn(mem); unsigned long end = memblock_region_memory_end_pfn(mem); + printk("start:%lu\n", start); + printk("end:%lu\n", end); + + /* Ignore complete lowmem entries */ if (end <= max_low) continue; @@ -156,20 +178,48 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low, unsigned long max_high) { unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; - int i; + struct memblock_region *reg; /* * initialise the zones. */ memset(zone_size, 0, sizeof(zone_size)); - memset(zhole_size, 0, sizeof(zhole_size)); - zone_size[0] = 194560; - zone_size[1] = 329728; + /* + * The memory size has already been determined. If we need + * to do anything fancy with the allocation of this memory + * to the zones, now is the time to do it. + */ + zone_size[0] = max_low - min; +#ifdef CONFIG_HIGHMEM + zone_size[ZONE_HIGHMEM] = max_high - max_low; +#endif + + /* + * Calculate the size of the holes. + * holes = node_size - sum(bank_sizes) + */ + memcpy(zhole_size, zone_size, sizeof(zhole_size)); + for_each_memblock(memory, reg) { + unsigned long start = memblock_region_memory_base_pfn(reg); + unsigned long end = memblock_region_memory_end_pfn(reg); + + if (start < max_low) { + unsigned long low_end = min(end, max_low); + zhole_size[0] -= low_end - start; + } +#ifdef CONFIG_HIGHMEM + if (end > max_low) { + unsigned long high_start = max(start, max_low); + zhole_size[ZONE_HIGHMEM] -= end - high_start; + } +#endif + } free_area_init_node(0, zone_size, min, zhole_size); } + void __init arm_memblock_init(void) { memblock_dump_all(); @@ -220,21 +270,17 @@ int __init arm_add_memory(u64 start, u64 size) return 0; } + static void __init find_limits(unsigned long *min, unsigned long *max_low, unsigned long *max_high) { - struct meminfo *mi = &meminfo; - int i; - - /* This assumes the meminfo array is properly sorted */ - *min = bank_pfn_start(&mi->bank[0]); - for_each_bank (i, mi) - if (mi->bank[i].highmem) - break; - *max_low = bank_pfn_end(&mi->bank[i - 1]); - *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]); + *max_low = PFN_DOWN(memblock_get_current_limit()); + *min = PFN_UP(memblock_start_of_DRAM()); + *max_high = PFN_DOWN(memblock_end_of_DRAM()); } + + static void __init arm_bootmem_init(unsigned long start_pfn, unsigned long end_pfn) { @@ -294,6 +340,11 @@ void __init bootmem_init(void) find_limits(&min, &max_low, &max_high); + printk("min:%lu\n", min); + printk("max_low:%lu\n", max_low); + printk("max_high:%lu\n", max_high); + + zone_sizes_init(min, max_low, max_high); /* @@ -313,6 +364,90 @@ void __init paging_init(void) } +void __init sanity_check_meminfo(void) +{ + phys_addr_t memblock_limit = 0; + int highmem = 0; + phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; + struct memblock_region *reg; + + for_each_memblock(memory, reg) { + phys_addr_t block_start = reg->base; + phys_addr_t block_end = reg->base + reg->size; + phys_addr_t size_limit = reg->size; + + if (reg->base >= vmalloc_limit) + highmem = 1; + else + size_limit = vmalloc_limit - reg->base; + + if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { + + if (highmem) { + pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", + &block_start, &block_end); + memblock_remove(reg->base, reg->size); + continue; + } + + if (reg->size > size_limit) { + phys_addr_t overlap_size = reg->size - size_limit; + + pr_notice("Truncating RAM at %pa-%pa to -%pa", + &block_start, &block_end, &vmalloc_limit); + memblock_remove(vmalloc_limit, overlap_size); + block_end = vmalloc_limit; + } + } + + if (!highmem) { + if (block_end > arm_lowmem_limit) { + if (reg->size > size_limit) + arm_lowmem_limit = vmalloc_limit; + else + arm_lowmem_limit = block_end; + } + + /* + * Find the first non-section-aligned page, and point + * memblock_limit at it. This relies on rounding the + * limit down to be section-aligned, which happens at + * the end of this function. + * + * With this algorithm, the start or end of almost any + * bank can be non-section-aligned. The only exception + * is that the start of the bank 0 must be section- + * aligned, since otherwise memory would need to be + * allocated when mapping the start of bank 0, which + * occurs before any free memory is mapped. + */ + if (!memblock_limit) { + if (!IS_ALIGNED(block_start, SECTION_SIZE)) + memblock_limit = block_start; + else if (!IS_ALIGNED(block_end, SECTION_SIZE)) + memblock_limit = arm_lowmem_limit; + } + + } + } + + high_memory = __va(arm_lowmem_limit - 1) + 1; + + /* + * Round the memblock limit down to a section size. This + * helps to ensure that we will allocate memory from the + * last full section, which should be mapped. + */ + if (memblock_limit) + memblock_limit = round_down(memblock_limit, SECTION_SIZE); + if (!memblock_limit) + memblock_limit = arm_lowmem_limit; + + memblock_set_current_limit(memblock_limit); +} + +char *total_ram = NULL; + void __init setup_arch(char **cmd) { int ret; @@ -320,11 +455,26 @@ void __init setup_arch(char **cmd) if (ret) printk("arm_add_memory failed in %s\n", __func__); + total_ram = lib_malloc(1024 * 1024 * 1024 * 1); + if (total_ram == NULL) + printk("Alloc memory failed in %s\n", __func__); + + sanity_check_meminfo(); arm_memblock_init(); paging_init(); } +void *kmap_atomic(struct page *page) +{ + return (void *)total_ram + (page_to_pfn(page) << PAGE_SHIFT); +} + +void __kunmap_atomic(void *kvaddr) +{ + +} + /* * Set up kernel memory allocators */ From 13c94a14e338635cace836af69f195a246b5bde9 Mon Sep 17 00:00:00 2001 From: yjiao Date: Wed, 3 Jun 2015 14:14:04 -0400 Subject: [PATCH 07/29] delete prink debug message --- mm/highmem.c | 4 ++++ mm/page_alloc.c | 51 +++---------------------------------------------- 2 files changed, 7 insertions(+), 48 deletions(-) diff --git a/mm/highmem.c b/mm/highmem.c index 123bcd3ed4f2..ce66f2baf2bd 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -164,6 +164,10 @@ struct page *kmap_to_page(void *vaddr) } EXPORT_SYMBOL(kmap_to_page); +#if 1 +#define cache_is_vivt() 0 +#endif + static void flush_all_zero_pkmaps(void) { int i; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a0ac6b9e1d45..7b6406bd5846 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -966,7 +966,6 @@ static inline void expand(struct zone *zone, struct page *page, list_add(&page[size].lru, &area->free_list[migratetype]); - printk("I am %s, pfn: %lu\n", __func__, page_to_pfn(&page[size])); area->nr_free++; set_page_order(&page[size], high); } @@ -1057,13 +1056,9 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, if (list_empty(&area->free_list[migratetype])) continue; - printk("I am %s: mt: %d order:%d\n", __func__, migratetype, order); page = list_entry(area->free_list[migratetype].next, struct page, lru); - printk("I am %s: pfn %lu lru:%p\n", __func__, page_to_pfn(page), &page->lru); - printk("next:%p, prev:%p\n", (&page->lru)->next, (&page->lru)->prev); list_del(&page->lru); - printk("I am %s, %d\n", __func__, __LINE__); rmv_page_order(page); area->nr_free--; expand(zone, page, order, current_order, area, migratetype); @@ -1342,7 +1337,6 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order, struct page *page; retry_reserve: - printk("I am %s: %d\n", __func__, __LINE__); page = __rmqueue_smallest(zone, order, migratetype); if (unlikely(!page) && migratetype != MIGRATE_RESERVE) { @@ -1363,7 +1357,6 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order, } } - printk("I am %s: %d\n", __func__, __LINE__); trace_mm_page_alloc_zone_locked(page, order, migratetype); return page; } @@ -1765,7 +1758,6 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; if (list_empty(list)) { - printk("I am %s: %d\n", __func__, __LINE__); pcp->count += rmqueue_bulk(zone, 0, pcp->batch, list, migratetype, cold); @@ -1804,15 +1796,11 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, get_freepage_migratetype(page)); } - - printk("I am %s: %d\n", __func__, __LINE__); __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && !test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) set_bit(ZONE_FAIR_DEPLETED, &zone->flags); - printk("I am %s: %d\n", __func__, __LINE__); - printk("I am %s: pfn: %lu\n", __func__, page_to_pfn(page)); __count_zone_vm_events(PGALLOC, zone, 1 << order); zone_statistics(preferred_zone, zone, gfp_flags); local_irq_restore(flags); @@ -2280,26 +2268,17 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, } try_this_zone: - - printk("I am %s: %d\n", __func__, __LINE__); page = buffered_rmqueue(ac->preferred_zone, zone, order, gfp_mask, ac->migratetype); - printk("I am %s: %d\n", __func__, __LINE__); - printk("page:%lu\n", page_to_pfn(page)); if (page) { - printk("I am %s: %d\n", __func__, __LINE__); if (prep_new_page(page, order, gfp_mask, alloc_flags)) goto try_this_zone; - - printk("I am %s: %d\n", __func__, __LINE__); return page; } - - this_zone_full: if (IS_ENABLED(CONFIG_NUMA) && zlc_active) zlc_mark_zone_full(zonelist, z); - } + } /* * The first pass makes sure allocations are spread fairly within the @@ -2327,10 +2306,6 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, if (zonelist_rescan) goto zonelist_scan; - - - printk("I am %s: %d\n", __func__, __LINE__); - return NULL; } @@ -2945,23 +2920,18 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (should_fail_alloc_page(gfp_mask, order)) return NULL; - - printk(KERN_INFO "I am %s line:%d\n", __func__, __LINE__); /* * Check the zones suitable for the gfp_mask contain at least one * valid zone. It's possible to have an empty zonelist as a result * of __GFP_THISNODE and a memoryless node */ if (unlikely(!zonelist->_zonerefs->zone)) { - printk(KERN_INFO "I am %s line:%d\n", __func__, __LINE__); return NULL; } if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; - printk(KERN_INFO "I am %s line:%d\n", __func__, __LINE__); - retry_cpuset: cpuset_mems_cookie = read_mems_allowed_begin(); @@ -2972,19 +2942,15 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, ac.nodemask ? : &cpuset_current_mems_allowed, &ac.preferred_zone); - printk(KERN_INFO "I am %s line:%d\n", __func__, __LINE__); if (!ac.preferred_zone) goto out; - printk(KERN_INFO "I am %s line:%d\n", __func__, __LINE__); ac.classzone_idx = zonelist_zone_idx(preferred_zoneref); /* First allocation attempt */ alloc_mask = gfp_mask|__GFP_HARDWALL; page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); - printk(KERN_INFO "I am %s line:%d\n", __func__, __LINE__); - if (unlikely(!page)) { /* * Runtime PM, block IO and its error handling path @@ -3011,11 +2977,10 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) goto retry_cpuset; +#if 1 printk(KERN_INFO "Done: I am %s %lu\n", __func__, page_to_pfn(page)); - printk(KERN_INFO "Done: I am %s %lx\n", __func__, page_to_pfn(page)); page->virtual = (void *)total_ram + (page_to_pfn(page) << PAGE_SHIFT); - - print_buddy_freelist(); +#endif return page; } @@ -3028,7 +2993,6 @@ EXPORT_SYMBOL(__alloc_pages_nodemask); unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) { struct page *page; - printk("I am %s\n", __func__); /* * __get_free_pages() returns a 32-bit address, which cannot represent * a highmem page @@ -3038,7 +3002,6 @@ unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) page = alloc_pages(gfp_mask, order); if (!page) return 0; - printk("I am %s\n", __func__); return (unsigned long) page_address(page); } EXPORT_SYMBOL(__get_free_pages); @@ -3046,8 +3009,6 @@ EXPORT_SYMBOL(__get_free_pages); unsigned long get_zeroed_page(gfp_t gfp_mask) { - - printk("I am %s\n", __func__); return __get_free_pages(gfp_mask | __GFP_ZERO, 0); } EXPORT_SYMBOL(get_zeroed_page); @@ -3272,8 +3233,6 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) unsigned int order = get_order(size); unsigned long addr; - printk("I am %s\n", __func__); - addr = __get_free_pages(gfp_mask, order); return make_alloc_exact(addr, order, size); } @@ -5228,8 +5187,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, zone->zone_pgdat = pgdat; zone_pcp_init(zone); - printk("I am %s size:%lu\n", __func__, size); - /* For bootup, initialized properly in watermark setup */ mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages); @@ -5249,8 +5206,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) { - printk("I am %s %lu\n", __func__, pgdat->node_spanned_pages); - /* Skip empty nodes */ if (!pgdat->node_spanned_pages) return; From abebc87ed2b17c00c264722e2c98c8aca6dbfba0 Mon Sep 17 00:00:00 2001 From: yjiao Date: Thu, 18 Jun 2015 01:04:17 -0400 Subject: [PATCH 08/29] add a tiny change --- arch/lib/Makefile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/lib/Makefile b/arch/lib/Makefile index 4f6abbbe35a8..6f78302f2c86 100644 --- a/arch/lib/Makefile +++ b/arch/lib/Makefile @@ -96,7 +96,7 @@ comma= , kernel/_to_keep=notifier.o params.o sysctl.o \ rwsem.o semaphore.o kfifo.o cred.o user.o groups.o ksysfs.o kernel/time/_to_keep=time.o -kernel/rcu_to_keep=rcu/srcu.o rcu/pdate.o rcu/tiny.o +kernel/rcu_to_keep=rcu/srcu.o rcu/update.o rcu/tiny.o kernel/locking_to_keep=locking/mutex.o kernel/bpf_to_keep=bpf/core.o mm/_to_keep=util.o list_lru.o slib.o page_alloc.o memblock.o mmzone.o slib_env.o \ @@ -128,7 +128,8 @@ quiet_cmd_objsmk = OBJS-MK $@ done > $@ $(ARCH_DIR)/objs.mk: $(ARCH_DIR)/Makefile.print $(srctree)/.config $(ARCH_DIR)/Makefile - +$(call if_changed,objsmk) + +$(call if_changed,objsmk); \ + echo $(srctree) quiet_cmd_linker = GEN $@ cmd_linker = ld -shared --verbose | ./$^ > $@ From 05cd7d3083dca9b5a85bfdd93511ff9914ce94ad Mon Sep 17 00:00:00 2001 From: yjiao Date: Thu, 18 Jun 2015 01:56:06 -0400 Subject: [PATCH 09/29] try to make no impact on upstream kernel --- arch/lib/include/sim.h | 2 ++ arch/lib/lib.c | 2 ++ mm/page_alloc.c | 36 ++++++++++++++---------------------- 3 files changed, 18 insertions(+), 22 deletions(-) diff --git a/arch/lib/include/sim.h b/arch/lib/include/sim.h index b30d7e878325..0cc54dbbde2d 100644 --- a/arch/lib/include/sim.h +++ b/arch/lib/include/sim.h @@ -14,6 +14,8 @@ #include "sim-types.h" +extern void init_memory_system(void); + /* API called from within linux kernel. Forwards to SimImported. */ int lib_vprintf(const char *str, va_list args); void *lib_malloc(unsigned long size); diff --git a/arch/lib/lib.c b/arch/lib/lib.c index 52d638e48938..ed5349448b07 100644 --- a/arch/lib/lib.c +++ b/arch/lib/lib.c @@ -168,6 +168,8 @@ void lib_init(struct SimExported *exported, const struct SimImported *imported, pr_notice("%s", linux_banner); + init_memory_system(); + rcu_init(); /* in drivers/base/core.c (called normally by drivers/base/init.c) */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7b6406bd5846..ca455f1a490b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -112,7 +112,7 @@ EXPORT_SYMBOL(node_states); /* Protect totalram_pages and zone->managed_pages */ static DEFINE_SPINLOCK(managed_page_count_lock); -#if 0 +#ifndef CONFIG_LIB unsigned long totalram_pages __read_mostly; #endif @@ -207,7 +207,7 @@ int pageblock_order __read_mostly; static void __free_pages_ok(struct page *page, unsigned int order); -#if 0 +#ifndef CONFIG_LIB /* * results with 256, 32 in the lowmem_reserve sysctl: * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) @@ -2881,11 +2881,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, return page; } - - -static int mem_initialized; - +#ifdef CONFIG_LIB extern char *total_ram; +#endif /* * This is the 'heart' of the zoned buddy allocator. @@ -2905,12 +2903,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, .migratetype = gfpflags_to_migratetype(gfp_mask), }; - if (mem_initialized == 0) { - init_memory_system(); - print_buddy_freelist(); - mem_initialized = 1; - } - gfp_mask &= gfp_allowed_mask; lockdep_trace_alloc(gfp_mask); @@ -2977,7 +2969,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) goto retry_cpuset; -#if 1 +#ifdef CONFIG_LIB printk(KERN_INFO "Done: I am %s %lu\n", __func__, page_to_pfn(page)); page->virtual = (void *)total_ram + (page_to_pfn(page) << PAGE_SHIFT); #endif @@ -3025,7 +3017,7 @@ void __free_pages(struct page *page, unsigned int order) EXPORT_SYMBOL(__free_pages); -#if 0 +#ifndef CONFIG_LIB void free_pages(unsigned long addr, unsigned int order) { printk("I am %s\n", __func__); @@ -3318,7 +3310,7 @@ static unsigned long nr_free_zone_pages(int offset) * nr_free_buffer_pages() counts the number of pages which are beyond the high * watermark within ZONE_DMA and ZONE_NORMAL. */ -#if 0 +#ifndef CONFIG_LIB unsigned long nr_free_buffer_pages(void) { return nr_free_zone_pages(gfp_zone(GFP_USER)); @@ -3351,7 +3343,7 @@ static inline void show_node(struct zone *zone) printk("Node %d ", zone_to_nid(zone)); } -#if 0 +#ifndef CONFIG_LIB void si_meminfo(struct sysinfo *val) { val->totalram = totalram_pages; @@ -5752,7 +5744,7 @@ void free_highmem_page(struct page *page) } #endif -#if 0 +#ifndef CONFIG_LIB void __init mem_init_print_info(const char *str) { @@ -6107,7 +6099,7 @@ int __meminit init_per_zone_wmark_min(void) new_min_free_kbytes, user_min_free_kbytes); } -#if 0 +#ifndef CONFIG_LIB setup_per_zone_wmarks(); refresh_zone_stat_thresholds(); setup_per_zone_lowmem_reserve(); @@ -6117,7 +6109,7 @@ int __meminit init_per_zone_wmark_min(void) } module_init(init_per_zone_wmark_min) -#if 0 +#ifndef CONFIG_LIB /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so @@ -6176,7 +6168,7 @@ int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, } #endif -#if 0 +#ifndef CONFIG_LIB /* * lowmem_reserve_ratio_sysctl_handler - just a wrapper around * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() @@ -6242,7 +6234,7 @@ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write, #endif -#if 0 +#ifndef CONFIG_LIB int hashdist = HASHDIST_DEFAULT; #endif @@ -6258,7 +6250,7 @@ static int __init set_hashdist(char *str) __setup("hashdist=", set_hashdist); #endif -#if 0 +#ifndef CONFIG_LIB /* * allocate a large system hash table from bootmem From fbe7dd382c9ec7e656e79ecf02a9d3688d570b45 Mon Sep 17 00:00:00 2001 From: yjiao Date: Thu, 18 Jun 2015 13:33:34 -0400 Subject: [PATCH 10/29] solve OPT=no compiler crash --- arch/lib/include/asm/barrier.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/lib/include/asm/barrier.h b/arch/lib/include/asm/barrier.h index d2f81e6b8c1c..e9c4f70573cb 100644 --- a/arch/lib/include/asm/barrier.h +++ b/arch/lib/include/asm/barrier.h @@ -63,6 +63,7 @@ #define smp_wmb() dmb(ishst) #endif +#ifndef CONFIG_LIB #define smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ @@ -78,6 +79,23 @@ do { \ ___p1; \ }) +#else +#define smp_store_release(p, v) \ +do { \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + smp_mb(); \ + ___p1; \ +}) + +#endif + + #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) From 883afb41177902e6a2314b2afe3355fe79f5a5bd Mon Sep 17 00:00:00 2001 From: yjiao Date: Thu, 18 Jun 2015 16:40:04 -0400 Subject: [PATCH 11/29] solve undefined symbol for init_mm --- arch/lib/glue.c | 2 ++ mm/highmem.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/lib/glue.c b/arch/lib/glue.c index 756681000644..5b5658024a95 100644 --- a/arch/lib/glue.c +++ b/arch/lib/glue.c @@ -33,6 +33,8 @@ struct kernel_param; struct super_block; struct tvec_base {}; +struct mm_struct init_mm; + /* defined in sched.c, used in net/sched/em_meta.c */ unsigned long avenrun[3]; /* defined in mm/page_alloc.c, used in net/xfrm/xfrm_hash.c */ diff --git a/mm/highmem.c b/mm/highmem.c index ce66f2baf2bd..61922d6309ed 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -164,7 +164,7 @@ struct page *kmap_to_page(void *vaddr) } EXPORT_SYMBOL(kmap_to_page); -#if 1 +#ifdef CONFIG_LIB #define cache_is_vivt() 0 #endif From 6064bf2e15b0d9c845c7820ada396ae219e140d1 Mon Sep 17 00:00:00 2001 From: jyizheng Date: Tue, 1 Sep 2015 14:25:19 -0400 Subject: [PATCH 12/29] test --- test | 1 + 1 file changed, 1 insertion(+) create mode 100644 test diff --git a/test b/test new file mode 100644 index 000000000000..8ce43a3d893f --- /dev/null +++ b/test @@ -0,0 +1 @@ +test git From 305f67bcc2ef8ac361f466d59935e9dca3c0bae3 Mon Sep 17 00:00:00 2001 From: jyizheng Date: Tue, 1 Sep 2015 14:29:41 -0400 Subject: [PATCH 13/29] add build.sh --- build.sh | 2 ++ 1 file changed, 2 insertions(+) create mode 100755 build.sh diff --git a/build.sh b/build.sh new file mode 100755 index 000000000000..c783573fb1ad --- /dev/null +++ b/build.sh @@ -0,0 +1,2 @@ +make defconfig ARCH=lib +make library ARCH=lib From 7a2bd8dfb88a3391ae13efeaae1e6c8e761c08e8 Mon Sep 17 00:00:00 2001 From: jyizheng Date: Tue, 1 Sep 2015 14:32:40 -0400 Subject: [PATCH 14/29] remove test --- test | 1 - 1 file changed, 1 deletion(-) delete mode 100644 test diff --git a/test b/test deleted file mode 100644 index 8ce43a3d893f..000000000000 --- a/test +++ /dev/null @@ -1 +0,0 @@ -test git From 56bc5550f8224243f747f8b014a87acb26426464 Mon Sep 17 00:00:00 2001 From: jyizheng Date: Tue, 1 Sep 2015 14:51:36 -0400 Subject: [PATCH 15/29] delete build.sh --- build.sh | 2 -- 1 file changed, 2 deletions(-) delete mode 100755 build.sh diff --git a/build.sh b/build.sh deleted file mode 100755 index c783573fb1ad..000000000000 --- a/build.sh +++ /dev/null @@ -1,2 +0,0 @@ -make defconfig ARCH=lib -make library ARCH=lib From 8f1788e50adedd372c8cd158f63803efe6f80b93 Mon Sep 17 00:00:00 2001 From: jyizheng Date: Tue, 1 Sep 2015 15:54:19 -0400 Subject: [PATCH 16/29] add #endif --- mm/page_alloc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 28d42a3de52c..0ece8b356840 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6515,6 +6515,7 @@ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write, mutex_unlock(&pcp_batch_high_lock); return ret; } +#endif #ifdef CONFIG_NUMA int hashdist = HASHDIST_DEFAULT; From f583c91516ad63cb6004366142af460764727b7e Mon Sep 17 00:00:00 2001 From: jyizheng Date: Tue, 1 Sep 2015 22:21:25 -0400 Subject: [PATCH 17/29] solve some unreferced symbol problem --- arch/lib/Makefile | 2 +- arch/lib/glue.c | 27 +++++++++++++++++++++++++++ arch/lib/sysctl.c | 3 --- 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/arch/lib/Makefile b/arch/lib/Makefile index 0243cd629615..6f34ae290bfc 100644 --- a/arch/lib/Makefile +++ b/arch/lib/Makefile @@ -100,7 +100,7 @@ kernel/rcu_to_keep=rcu/srcu.o rcu/update.o rcu/tiny.o kernel/locking_to_keep=locking/mutex.o kernel/bpf_to_keep=bpf/core.o mm/_to_keep=util.o list_lru.o slib.o page_alloc.o memblock.o mmzone.o slib_env.o \ -nobootmem.o highmem.o +nobootmem.o highmem.o oom_kill.o crypto/_to_keep=aead.o ahash.o shash.o api.o algapi.o cipher.o compress.o proc.o \ crc32c_generic.o drivers/base/_to_keep=class.o core.o bus.o dd.o driver.o devres.o module.o map.o diff --git a/arch/lib/glue.c b/arch/lib/glue.c index a3e9980029ae..80b7d7ce3fd2 100644 --- a/arch/lib/glue.c +++ b/arch/lib/glue.c @@ -62,6 +62,25 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd); static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); +/* memory.c */ +unsigned long highest_memmap_pfn __read_mostly; +unsigned long max_mapnr; + +/* + * Randomize the address space (stacks, mmaps, brk, etc.). + * + * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, + * as ancient (libc5 based) binaries can segfault. ) + */ +int randomize_va_space __read_mostly = +#ifdef CONFIG_COMPAT_BRK + 1; +#else + 2; +#endif + +/* vmscan.c */ +unsigned long vm_total_pages; /* arm/mmu.c */ pgprot_t pgprot_kernel; @@ -299,4 +318,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, return 0; } +#ifdef CONFIG_HAVE_ARCH_PFN_VALID +extern int memblock_is_memory(phys_addr_t addr); + +int pfn_valid(unsigned long pfn) +{ + return memblock_is_memory(__pfn_to_phys(pfn)); +} +#endif diff --git a/arch/lib/sysctl.c b/arch/lib/sysctl.c index ecf0dcedf563..8d1aa5b5d351 100644 --- a/arch/lib/sysctl.c +++ b/arch/lib/sysctl.c @@ -101,9 +101,6 @@ int sched_rt_handler(struct ctl_table *table, int write, int sysctl_overcommit_memory = OVERCOMMIT_GUESS; int sysctl_overcommit_ratio = 50; -int sysctl_panic_on_oom = 0; -int sysctl_oom_dump_tasks = 0; -int sysctl_oom_kill_allocating_task = 0; int sysctl_nr_trim_pages = 0; int sysctl_drop_caches = 0; int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES - 1] = { 32 }; From ebcdbffc17f2028bda01ed0a6cf0a01edbd54105 Mon Sep 17 00:00:00 2001 From: jyizheng Date: Tue, 1 Sep 2015 23:35:51 -0400 Subject: [PATCH 18/29] change barrier.h --- arch/lib/include/asm/barrier.h | 20 +------------------- mm/slib_env.h | 7 +++++-- 2 files changed, 6 insertions(+), 21 deletions(-) diff --git a/arch/lib/include/asm/barrier.h b/arch/lib/include/asm/barrier.h index e9c4f70573cb..6c2327e1c732 100644 --- a/arch/lib/include/asm/barrier.h +++ b/arch/lib/include/asm/barrier.h @@ -63,7 +63,6 @@ #define smp_wmb() dmb(ishst) #endif -#ifndef CONFIG_LIB #define smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ @@ -79,27 +78,10 @@ do { \ ___p1; \ }) -#else -#define smp_store_release(p, v) \ -do { \ - smp_mb(); \ - ACCESS_ONCE(*p) = (v); \ -} while (0) - -#define smp_load_acquire(p) \ -({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ - smp_mb(); \ - ___p1; \ -}) - -#endif - - #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) -#define set_mb(var, value) do { var = value; smp_mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #define smp_mb__before_atomic() smp_mb() #define smp_mb__after_atomic() smp_mb() diff --git a/mm/slib_env.h b/mm/slib_env.h index d58840422688..f227d00414d5 100644 --- a/mm/slib_env.h +++ b/mm/slib_env.h @@ -1,3 +1,7 @@ +#ifndef SLIB_ENV_H +#define SLIB_ENV_H + + #include #include @@ -49,7 +53,6 @@ extern struct meminfo meminfo; //pgprot_t pgprot_s2; //pgprot_t pgprot_s2_device; - void __init init_memory_system(void); - +#endif From 93273fdbd6a656b01b7276063b77e17d3e9b712c Mon Sep 17 00:00:00 2001 From: jyizheng Date: Wed, 2 Sep 2015 12:24:01 -0400 Subject: [PATCH 19/29] disable some compile time check for atomic variables --- arch/lib/include/asm/barrier.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/lib/include/asm/barrier.h b/arch/lib/include/asm/barrier.h index 6c2327e1c732..dab38c62c9a3 100644 --- a/arch/lib/include/asm/barrier.h +++ b/arch/lib/include/asm/barrier.h @@ -63,6 +63,7 @@ #define smp_wmb() dmb(ishst) #endif +#ifndef CONFIG_LIB #define smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ @@ -77,6 +78,20 @@ do { \ smp_mb(); \ ___p1; \ }) +#else +#define smp_store_release(p, v) \ +do { \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + smp_mb(); \ + ___p1; \ +}) +#endif /* CONFIG_LIB */ #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) From 71adfe18066b4696916a270cf960272a2c9b0ebf Mon Sep 17 00:00:00 2001 From: jyizheng Date: Thu, 3 Sep 2015 01:24:32 -0400 Subject: [PATCH 20/29] Do cleanup according to Hajime's suggestions. Check here for details: https://github.com/libos-nuse/net-next-nuse/pull/44#discussion_r32902447 --- arch/lib/Kconfig | 3 --- arch/lib/defconfig | 1 - arch/lib/glue.c | 2 -- 3 files changed, 6 deletions(-) diff --git a/arch/lib/Kconfig b/arch/lib/Kconfig index cb5c768c2ff2..34a7f59168ad 100644 --- a/arch/lib/Kconfig +++ b/arch/lib/Kconfig @@ -120,9 +120,6 @@ source "lib/Kconfig" config SLIB def_bool y -config INIT_DEBUG_ALWAYS - def_bool y - config HAVE_MEMBLOCK def_bool y diff --git a/arch/lib/defconfig b/arch/lib/defconfig index 734b836f82b1..7fab6d9c3930 100644 --- a/arch/lib/defconfig +++ b/arch/lib/defconfig @@ -653,4 +653,3 @@ CONFIG_NLATTR=y # CONFIG_CORDIC is not set # CONFIG_DDR is not set # CONFIG_ARCH_HAS_SG_CHAIN is not set - diff --git a/arch/lib/glue.c b/arch/lib/glue.c index 80b7d7ce3fd2..d98eabbd92c9 100644 --- a/arch/lib/glue.c +++ b/arch/lib/glue.c @@ -36,8 +36,6 @@ struct mm_struct init_mm; /* defined in sched.c, used in net/sched/em_meta.c */ unsigned long avenrun[3]; -/* defined in mm/page_alloc.c */ -//struct pglist_data __refdata contig_page_data; /* defined in linux/mmzone.h mm/memory.c */ struct page *mem_map = 0; /* used by sysinfo in kernel/timer.c */ From ff865bc328db46d9a4c1cbba16e6378d6e36eb99 Mon Sep 17 00:00:00 2001 From: jyizheng Date: Thu, 3 Sep 2015 01:31:58 -0400 Subject: [PATCH 21/29] git rid of #if 0 useless code --- arch/lib/sysctl.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/arch/lib/sysctl.c b/arch/lib/sysctl.c index 8d1aa5b5d351..595fe0005651 100644 --- a/arch/lib/sysctl.c +++ b/arch/lib/sysctl.c @@ -120,10 +120,6 @@ unsigned long dirty_background_bytes = 0; int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; unsigned long dac_mmap_min_addr = 4096; -#if 0 -int percpu_pagelist_fraction = 0; -#endif - int panic_timeout = 0; int panic_on_oops = 0; int printk_delay_msec = 0; @@ -135,10 +131,6 @@ int pid_max = PID_MAX_DEFAULT; int pid_max_min = RESERVED_PIDS + 1; int pid_max_max = PID_MAX_LIMIT; -#if 0 -int min_free_kbytes = 1024; -#endif - int max_threads = 100; int laptop_mode = 0; From 58602f8a03d98a27c6337405766f495e160d4f36 Mon Sep 17 00:00:00 2001 From: jyizheng Date: Thu, 3 Sep 2015 02:27:12 -0400 Subject: [PATCH 22/29] Resolve Hajime'c commemts. Check here: https://github.com/libos-nuse/net-next-nuse/pull/47/files#r38614406 --- arch/lib/glue.c | 3 ++ include/linux/bootmem.h | 2 -- mm/bootmem.c | 6 ++-- mm/highmem.c | 4 --- mm/memblock.c | 2 +- mm/page_alloc.c | 47 +++-------------------------- mm/slib.c | 67 ----------------------------------------- mm/slib_env.c | 23 ++++++-------- 8 files changed, 20 insertions(+), 134 deletions(-) diff --git a/arch/lib/glue.c b/arch/lib/glue.c index d98eabbd92c9..e1bfe2f22b38 100644 --- a/arch/lib/glue.c +++ b/arch/lib/glue.c @@ -64,6 +64,9 @@ const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); unsigned long highest_memmap_pfn __read_mostly; unsigned long max_mapnr; +/* highmem.c */ +#define cache_is_vivt() 0 + /* * Randomize the address space (stacks, mmaps, brk, etc.). * diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index e8f98f7ac18c..d2f2d4f90f0a 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -56,8 +56,6 @@ extern void free_bootmem(unsigned long physaddr, unsigned long size); extern void free_bootmem_late(unsigned long physaddr, unsigned long size); - - /* * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, * the architecture-specific code should honor this). diff --git a/mm/bootmem.c b/mm/bootmem.c index 09c3d8b1bdcc..04b5e284a3fb 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -48,7 +48,7 @@ static int __init bootmem_debug_setup(char *buf) early_param("bootmem_debug", bootmem_debug_setup); #define bdebug(fmt, args...) ({ \ - if (unlikely(!bootmem_debug)) \ + if (unlikely(bootmem_debug)) \ printk(KERN_INFO \ "bootmem::%s " fmt, \ __func__, ## args); \ @@ -75,7 +75,7 @@ unsigned long __init bootmem_bootmap_pages(unsigned long pages) /* * link bdata in order */ -void __init link_bootmem(bootmem_data_t *bdata) +static void __init link_bootmem(bootmem_data_t *bdata) { bootmem_data_t *ent; @@ -174,8 +174,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) struct page *page; unsigned long *map, start, end, pages, cur, count = 0; - printk("In %s node_bootmem_map %p\n", __func__, bdata->node_bootmem_map); - if (!bdata->node_bootmem_map) return 0; diff --git a/mm/highmem.c b/mm/highmem.c index 61922d6309ed..123bcd3ed4f2 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -164,10 +164,6 @@ struct page *kmap_to_page(void *vaddr) } EXPORT_SYMBOL(kmap_to_page); -#ifdef CONFIG_LIB -#define cache_is_vivt() 0 -#endif - static void flush_all_zero_pkmaps(void) { int i; diff --git a/mm/memblock.c b/mm/memblock.c index a67aeccd4640..87108e77e476 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -50,7 +50,7 @@ struct memblock memblock __initdata_memblock = { .current_limit = MEMBLOCK_ALLOC_ANYWHERE, }; -int memblock_debug __initdata_memblock = 1; +int memblock_debug __initdata_memblock; #ifdef CONFIG_MOVABLE_NODE bool movable_node_enabled __initdata_memblock = false; #endif diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 470b428d96e7..c2802fe7b857 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -161,7 +161,6 @@ unsigned long dirty_balance_reserve __read_mostly; int percpu_pagelist_fraction; - gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; #ifdef CONFIG_PM_SLEEP @@ -1318,8 +1317,6 @@ static inline void expand(struct zone *zone, struct page *page, set_page_guard(zone, &page[size], high, migratetype); continue; } - - list_add(&page[size].lru, &area->free_list[migratetype]); area->nr_free++; set_page_order(&page[size], high); @@ -1754,7 +1751,6 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, else list_add_tail(&page->lru, list); list = &page->lru; - if (is_migrate_cma(get_freepage_migratetype(page))) __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, -(1 << order)); @@ -1872,7 +1868,6 @@ void drain_all_pages(struct zone *zone) * cpu to drain that CPU pcps and on_each_cpu_mask * disables preemption as part of its processing */ - for_each_online_cpu(cpu) { struct per_cpu_pageset *pcp; struct zone *z; @@ -2139,7 +2134,6 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, list_del(&page->lru); pcp->count--; } else { - if (unlikely(gfp_flags & __GFP_NOFAIL)) { /* * __GFP_NOFAIL is not to be used in new code. @@ -2644,7 +2638,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, this_zone_full: if (IS_ENABLED(CONFIG_NUMA) && zlc_active) zlc_mark_zone_full(zonelist, z); - } + } /* * The first pass makes sure allocations are spread fairly within the @@ -2672,6 +2666,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, if (zonelist_rescan) goto zonelist_scan; + return NULL; } @@ -3243,9 +3238,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, * valid zone. It's possible to have an empty zonelist as a result * of __GFP_THISNODE and a memoryless node */ - if (unlikely(!zonelist->_zonerefs->zone)) { + if (unlikely(!zonelist->_zonerefs->zone)) return NULL; - } if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; @@ -3259,16 +3253,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx, ac.nodemask ? : &cpuset_current_mems_allowed, &ac.preferred_zone); - if (!ac.preferred_zone) goto out; - ac.classzone_idx = zonelist_zone_idx(preferred_zoneref); /* First allocation attempt */ alloc_mask = gfp_mask|__GFP_HARDWALL; page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); - if (unlikely(!page)) { /* * Runtime PM, block IO and its error handling path @@ -3299,18 +3290,17 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, printk(KERN_INFO "Done: I am %s %lu\n", __func__, page_to_pfn(page)); page->virtual = (void *)total_ram + (page_to_pfn(page) << PAGE_SHIFT); #endif - return page; } EXPORT_SYMBOL(__alloc_pages_nodemask); - /* * Common helper functions. */ unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) { struct page *page; + /* * __get_free_pages() returns a 32-bit address, which cannot represent * a highmem page @@ -3346,14 +3336,11 @@ EXPORT_SYMBOL(__free_pages); #ifndef CONFIG_LIB void free_pages(unsigned long addr, unsigned int order) { - printk("I am %s\n", __func__); - if (addr != 0) { VM_BUG_ON(!virt_addr_valid((void *)addr)); __free_pages(virt_to_page((void *)addr), order); } } - EXPORT_SYMBOL(free_pages); #else void free_pages(unsigned long addr, unsigned int order) @@ -3361,15 +3348,13 @@ void free_pages(unsigned long addr, unsigned int order) unsigned long pfn = addr - (unsigned long) total_ram; pfn = pfn >> PAGE_SHIFT; - if (addr != 0) { + if (pfn != 0) { __free_pages(pfn_to_page(pfn), order); } } - EXPORT_SYMBOL(free_pages); #endif - /* * Page Fragment: * An arbitrary-length arbitrary-offset area of memory which resides @@ -3532,7 +3517,6 @@ static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) return (void *)addr; } - /** * alloc_pages_exact - allocate an exact number physically-contiguous pages. * @size: the number of bytes to allocate @@ -3556,7 +3540,6 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) } EXPORT_SYMBOL(alloc_pages_exact); - /** * alloc_pages_exact_nid - allocate an exact number of physically-contiguous * pages on a node. @@ -3597,7 +3580,6 @@ void free_pages_exact(void *virt, size_t size) } EXPORT_SYMBOL(free_pages_exact); - /** * nr_free_zone_pages - count number of pages beyond high watermark * @offset: The zone index of the highest zone @@ -3627,9 +3609,6 @@ static unsigned long nr_free_zone_pages(int offset) return sum; } - - - /** * nr_free_buffer_pages - count number of pages beyond high watermark * @@ -3641,15 +3620,12 @@ unsigned long nr_free_buffer_pages(void) { return nr_free_zone_pages(gfp_zone(GFP_USER)); } - #else unsigned long nr_free_buffer_pages(void) { return 65535; } - #endif - EXPORT_SYMBOL_GPL(nr_free_buffer_pages); /** @@ -3680,9 +3656,7 @@ void si_meminfo(struct sysinfo *val) val->freehigh = nr_free_highpages(); val->mem_unit = PAGE_SIZE; } - EXPORT_SYMBOL(si_meminfo); - #endif #ifdef CONFIG_NUMA @@ -6041,7 +6015,6 @@ void free_highmem_page(struct page *page) #endif #ifndef CONFIG_LIB - void __init mem_init_print_info(const char *str) { unsigned long physpages, codesize, datasize, rosize, bss_size; @@ -6094,10 +6067,8 @@ void __init mem_init_print_info(const char *str) #endif str ? ", " : "", str ? str : ""); } - #endif - /** * set_dma_reserve - set the specified number of pages reserved in the first zone * @new_dma_reserve: The number of pages to mark reserved @@ -6293,7 +6264,6 @@ static void __setup_per_zone_wmarks(void) calculate_totalreserve_pages(); } - /** * setup_per_zone_wmarks - called when min_free_kbytes changes * or when memory is hot-{added|removed} @@ -6308,7 +6278,6 @@ void setup_per_zone_wmarks(void) mutex_unlock(&zonelists_mutex); } - /* * The inactive anon list should be small enough that the VM never has to * do too much work, but large enough that each inactive page has a chance @@ -6406,7 +6375,6 @@ int __meminit init_per_zone_wmark_min(void) module_init(init_per_zone_wmark_min) #ifndef CONFIG_LIB - /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so * that we can call two helper functions whenever min_free_kbytes @@ -6427,7 +6395,6 @@ int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, } return 0; } - #endif #ifdef CONFIG_NUMA @@ -6482,8 +6449,6 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, return 0; } - - /* * percpu_pagelist_fraction - changes the pcp->high for each zone on each * cpu. It is the fraction of total pages in each zone that a hot per cpu @@ -6640,10 +6605,8 @@ void *__init alloc_large_system_hash(const char *tablename, return table; } - #endif - /* Return a pointer to the bitmap storing bits affecting a block of pages */ static inline unsigned long *get_pageblock_bitmap(struct zone *zone, unsigned long pfn) diff --git a/mm/slib.c b/mm/slib.c index f72e0a1025d6..4c597f3bce02 100644 --- a/mm/slib.c +++ b/mm/slib.c @@ -113,70 +113,12 @@ void kmem_cache_free(struct kmem_cache *cache, void *p) kfree(p); } -#if 0 - -struct page * -__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, - struct zonelist *zonelist, nodemask_t *nodemask) -{ - void *p; - struct page *page; - unsigned long pointer; - - /* typically, called from networking code by alloc_page or */ - /* directly with an order = 0. */ - if (order) - return NULL; - p = lib_malloc(sizeof(struct page) + (1 << PAGE_SHIFT)); - page = (struct page *)p; - - atomic_set(&page->_count, 1); - page->flags = 0; - pointer = (unsigned long)page; - pointer += sizeof(struct page); - page->virtual = (void *)pointer; - return page; -} -void __free_pages(struct page *page, unsigned int order) -{ - /* typically, called from networking code by __free_page */ - lib_assert(order == 0); - lib_free(page); -} - -void free_pages(unsigned long addr, unsigned int order) -{ - if (addr != 0) - kfree((void *)addr); -} - -void *alloc_pages_exact(size_t size, gfp_t gfp_mask) -{ - return alloc_pages(gfp_mask, get_order(size)); -} - -unsigned long get_zeroed_page(gfp_t gfp_mask) -{ - return __get_free_pages(gfp_mask | __GFP_ZERO, 0); -} - -unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) -{ - int size = (1 << order) * PAGE_SIZE; - void *p = kmalloc(size, gfp_mask); - - return (unsigned long)p; -} - -#endif - void put_page(struct page *page) { if (atomic_dec_and_test(&page->_count)) lib_free(page); } - void *vmalloc(unsigned long size) { return lib_malloc(size); @@ -208,12 +150,3 @@ void free_percpu(void __percpu *ptr) { kfree(ptr); } -/* -void *__alloc_bootmem_nopanic(unsigned long size, - unsigned long align, - unsigned long goal) -{ - return kzalloc(size, GFP_KERNEL); -} -*/ - diff --git a/mm/slib_env.c b/mm/slib_env.c index 5fc33df36c4b..03dbde825937 100644 --- a/mm/slib_env.c +++ b/mm/slib_env.c @@ -1,3 +1,12 @@ +/* + * Library Slab Allocator (SLIB) + * + * Copyright (c) 2015 INRIA, Hajime Tazaki + * + * Author: Mathieu Lacage + * Hajime Tazaki + */ + #include #include #include @@ -12,7 +21,6 @@ #include "sim.h" #include "sim-assert.h" - struct meminfo meminfo; static void * __initdata vmalloc_min = (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); @@ -48,7 +56,6 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) memblock_free_early(pg, pgend - pg); } - /* * The mem_map array can get very big. Free the unused area of the memory map. */ @@ -154,7 +161,6 @@ static void __init free_highpages(void) #endif } - /* * mem_init() marks the free areas in the mem_map and tells us how much * memory is free. This is done after various parts of the system have @@ -219,13 +225,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low, free_area_init_node(0, zone_size, min, zhole_size); } - void __init arm_memblock_init(void) { memblock_dump_all(); } - int __init arm_add_memory(u64 start, u64 size) { u64 aligned_start; @@ -270,7 +274,6 @@ int __init arm_add_memory(u64 start, u64 size) return 0; } - static void __init find_limits(unsigned long *min, unsigned long *max_low, unsigned long *max_high) { @@ -279,8 +282,6 @@ static void __init find_limits(unsigned long *min, unsigned long *max_low, *max_high = PFN_DOWN(memblock_end_of_DRAM()); } - - static void __init arm_bootmem_init(unsigned long start_pfn, unsigned long end_pfn) { @@ -333,7 +334,6 @@ static void __init arm_bootmem_init(unsigned long start_pfn, } } - void __init bootmem_init(void) { unsigned long min, max_low, max_high; @@ -357,13 +357,11 @@ void __init bootmem_init(void) max_pfn = max_high; } - void __init paging_init(void) { bootmem_init(); } - void __init sanity_check_meminfo(void) { phys_addr_t memblock_limit = 0; @@ -427,7 +425,6 @@ void __init sanity_check_meminfo(void) else if (!IS_ALIGNED(block_end, SECTION_SIZE)) memblock_limit = arm_lowmem_limit; } - } } @@ -464,7 +461,6 @@ void __init setup_arch(char **cmd) paging_init(); } - void *kmap_atomic(struct page *page) { return (void *)total_ram + (page_to_pfn(page) << PAGE_SHIFT); @@ -500,4 +496,3 @@ void test(void) // pgdat->node_zonelists, // pgdat->nr_zones); } - From 53d0f17d214d2b5d4f47ed3ff382ed77aa823bb6 Mon Sep 17 00:00:00 2001 From: jyizheng Date: Thu, 3 Sep 2015 02:47:51 -0400 Subject: [PATCH 23/29] add cache_is_vivt back to highmem.c again to resolve some warnings --- arch/lib/glue.c | 4 ---- mm/highmem.c | 7 ++++--- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/arch/lib/glue.c b/arch/lib/glue.c index e1bfe2f22b38..80bd04a18f45 100644 --- a/arch/lib/glue.c +++ b/arch/lib/glue.c @@ -24,7 +24,6 @@ #include "sim.h" #include "lib.h" - struct pipe_buffer; struct file; struct pipe_inode_info; @@ -64,9 +63,6 @@ const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); unsigned long highest_memmap_pfn __read_mostly; unsigned long max_mapnr; -/* highmem.c */ -#define cache_is_vivt() 0 - /* * Randomize the address space (stacks, mmaps, brk, etc.). * diff --git a/mm/highmem.c b/mm/highmem.c index 123bcd3ed4f2..4693485ad9c2 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -29,11 +29,14 @@ #include #include - #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) DEFINE_PER_CPU(int, __kmap_atomic_idx); #endif +#ifdef CONFIG_LIB +#define cache_is_vivt() 0 +#endif + /* * Virtual_count is not a pure "count". * 0 means that it is not mapped, and has not been mapped @@ -107,7 +110,6 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) unsigned long totalhigh_pages __read_mostly; EXPORT_SYMBOL(totalhigh_pages); - EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); unsigned int nr_free_highpages (void) @@ -293,7 +295,6 @@ void *kmap_high(struct page *page) unlock_kmap(); return (void*) vaddr; } - EXPORT_SYMBOL(kmap_high); #ifdef ARCH_NEEDS_KMAP_HIGH_GET From 396eca8381e719c75d64ba64931a032c5f3dbb97 Mon Sep 17 00:00:00 2001 From: jyizheng Date: Sun, 6 Sep 2015 20:48:51 -0400 Subject: [PATCH 24/29] resolve pull request comments --- arch/lib/Makefile | 1 - arch/lib/fs.c | 8 -------- arch/lib/include/asm/processor.h | 4 +--- include/linux/bootmem.h | 1 - mm/bootmem.c | 4 +--- mm/highmem.c | 1 + mm/slib_env.c | 3 +-- mm/slib_env.h | 13 ++++++++----- 8 files changed, 12 insertions(+), 23 deletions(-) diff --git a/arch/lib/Makefile b/arch/lib/Makefile index feb37fe84f3b..0365948f04cc 100644 --- a/arch/lib/Makefile +++ b/arch/lib/Makefile @@ -129,7 +129,6 @@ quiet_cmd_objsmk = OBJS-MK $@ $(ARCH_DIR)/objs.mk: $(ARCH_DIR)/Makefile.print $(srctree)/.config $(ARCH_DIR)/Makefile +$(call if_changed,objsmk); \ - echo $(srctree) quiet_cmd_linker = GEN $@ cmd_linker = ld -shared --verbose | ./$^ > $@ diff --git a/arch/lib/fs.c b/arch/lib/fs.c index 75bb1c9a5f78..7757a1687c86 100644 --- a/arch/lib/fs.c +++ b/arch/lib/fs.c @@ -63,11 +63,3 @@ int dirtytime_interval_handler(struct ctl_table *table, int write, { return -ENOSYS; } - -#if 0 -unsigned int nr_free_buffer_pages(void) -{ - return 65535; -} -#endif - diff --git a/arch/lib/include/asm/processor.h b/arch/lib/include/asm/processor.h index cb14ac560b56..15293c89e86b 100644 --- a/arch/lib/include/asm/processor.h +++ b/arch/lib/include/asm/processor.h @@ -7,9 +7,7 @@ struct thread_struct {}; #define cpu_relax_lowlatency() cpu_relax() #define KSTK_ESP(tsk) (0) -# define current_text_addr() ({ __label__ _l; _l: &&_l; }) - -//#define TASK_SIZE ((~(long)0)) +#define current_text_addr() ({ __label__ _l; _l: &&_l; }) #define thread_saved_pc(x) (unsigned long)0 #define task_pt_regs(t) NULL diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index d2f2d4f90f0a..f589222bfa87 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -55,7 +55,6 @@ extern void free_bootmem_node(pg_data_t *pgdat, extern void free_bootmem(unsigned long physaddr, unsigned long size); extern void free_bootmem_late(unsigned long physaddr, unsigned long size); - /* * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, * the architecture-specific code should honor this). diff --git a/mm/bootmem.c b/mm/bootmem.c index 04b5e284a3fb..ae9d08f1f0e3 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -48,7 +48,7 @@ static int __init bootmem_debug_setup(char *buf) early_param("bootmem_debug", bootmem_debug_setup); #define bdebug(fmt, args...) ({ \ - if (unlikely(bootmem_debug)) \ + if (unlikely(bootmem_debug)) \ printk(KERN_INFO \ "bootmem::%s " fmt, \ __func__, ## args); \ @@ -277,12 +277,10 @@ unsigned long __init free_all_bootmem(void) reset_all_zones_managed_pages(); - list_for_each_entry(bdata, &bdata_list, list) total_pages += free_all_bootmem_core(bdata); totalram_pages += total_pages; - printk("I am %s\n", __func__); return total_pages; } diff --git a/mm/highmem.c b/mm/highmem.c index 4693485ad9c2..b606040d5db9 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -110,6 +110,7 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) unsigned long totalhigh_pages __read_mostly; EXPORT_SYMBOL(totalhigh_pages); + EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); unsigned int nr_free_highpages (void) diff --git a/mm/slib_env.c b/mm/slib_env.c index 03dbde825937..8a97fd3ece61 100644 --- a/mm/slib_env.c +++ b/mm/slib_env.c @@ -3,8 +3,7 @@ * * Copyright (c) 2015 INRIA, Hajime Tazaki * - * Author: Mathieu Lacage - * Hajime Tazaki + * Author: Yizheng Jiao */ #include diff --git a/mm/slib_env.h b/mm/slib_env.h index f227d00414d5..e67103ec7326 100644 --- a/mm/slib_env.h +++ b/mm/slib_env.h @@ -1,17 +1,22 @@ +/* + * Library Slab Allocator (SLIB) + * + * Copyright (c) 2015 INRIA, Hajime Tazaki + * + * Author: Yizheng Jiao + */ + #ifndef SLIB_ENV_H #define SLIB_ENV_H - #include #include - /* From arm/include/asm/memory.h */ #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) #define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) - /* * Memory map description: from arm/include/asm/setup.h */ @@ -44,8 +49,6 @@ extern struct meminfo meminfo; #define bank_phys_end(bank) ((bank)->start + (bank)->size) #define bank_phys_size(bank) (bank)->size - - /* From arm/mm/mmu.c */ //pgprot_t pgprot_user; //pgprot_t pgprot_kernel; From 357371565cc6e39368771039a5837e47289be169 Mon Sep 17 00:00:00 2001 From: jyizheng Date: Sun, 6 Sep 2015 23:49:10 -0400 Subject: [PATCH 25/29] 1)Change __MR_MAX_ZONES in arch/lib/Makefile 2)Use symbolic link in arch/lib/include 3)Resolve ERRORs reported by checkpatch.pl --- arch/lib/Kconfig | 2 +- arch/lib/Makefile | 2 +- arch/lib/glue.c | 3 - arch/lib/include/asm/barrier.h | 106 +-- arch/lib/include/asm/cache.h | 29 +- arch/lib/include/asm/cachetype.h | 60 +- arch/lib/include/asm/elf.h | 11 +- arch/lib/include/asm/glue-proc.h | 265 +------- arch/lib/include/asm/hardirq.h | 9 +- arch/lib/include/asm/highmem.h | 76 +-- arch/lib/include/asm/kmap_types.h | 10 +- arch/lib/include/asm/memory.h | 360 +--------- arch/lib/include/asm/mmu.h | 41 +- arch/lib/include/asm/outercache.h | 150 +---- arch/lib/include/asm/page-arm.h | 1 + arch/lib/include/asm/page.h | 166 +---- arch/lib/include/asm/pgtable-2level-hwdef.h | 96 +-- arch/lib/include/asm/pgtable-2level-types.h | 68 +- arch/lib/include/asm/pgtable-2level.h | 199 +----- arch/lib/include/asm/pgtable-hwdef.h | 20 +- arch/lib/include/asm/pgtable.h | 364 +---------- arch/lib/include/asm/proc-fns.h | 161 +---- arch/lib/include/asm/tlbflush.h | 687 +------------------- arch/lib/lib.h | 4 + mm/bootmem.c | 1 - mm/page_alloc.c | 34 +- mm/slib_env.c | 49 +- mm/slib_env.h | 27 +- 28 files changed, 69 insertions(+), 2932 deletions(-) mode change 100644 => 120000 arch/lib/include/asm/barrier.h mode change 100644 => 120000 arch/lib/include/asm/cache.h mode change 100644 => 120000 arch/lib/include/asm/cachetype.h mode change 100644 => 120000 arch/lib/include/asm/elf.h mode change 100644 => 120000 arch/lib/include/asm/glue-proc.h mode change 100644 => 120000 arch/lib/include/asm/hardirq.h mode change 100644 => 120000 arch/lib/include/asm/highmem.h mode change 100644 => 120000 arch/lib/include/asm/kmap_types.h mode change 100644 => 120000 arch/lib/include/asm/memory.h mode change 100644 => 120000 arch/lib/include/asm/mmu.h mode change 100644 => 120000 arch/lib/include/asm/outercache.h create mode 120000 arch/lib/include/asm/page-arm.h mode change 100644 => 120000 arch/lib/include/asm/pgtable-2level-hwdef.h mode change 100644 => 120000 arch/lib/include/asm/pgtable-2level-types.h mode change 100644 => 120000 arch/lib/include/asm/pgtable-2level.h mode change 100644 => 120000 arch/lib/include/asm/pgtable-hwdef.h mode change 100644 => 120000 arch/lib/include/asm/pgtable.h mode change 100644 => 120000 arch/lib/include/asm/proc-fns.h mode change 100644 => 120000 arch/lib/include/asm/tlbflush.h diff --git a/arch/lib/Kconfig b/arch/lib/Kconfig index 34a7f59168ad..351dd99a65be 100644 --- a/arch/lib/Kconfig +++ b/arch/lib/Kconfig @@ -154,7 +154,7 @@ config ARM_L1_CACHE_SHIFT default 6 config PGTABLE_LEVELS - int + int default 2 config HAVE_ARCH_PFN_VALID diff --git a/arch/lib/Makefile b/arch/lib/Makefile index 0365948f04cc..b9a74506ce71 100644 --- a/arch/lib/Makefile +++ b/arch/lib/Makefile @@ -156,7 +156,7 @@ define cmd_lib_bounds echo "#define GENERATED_BOUNDS_H"; \ echo ""; \ echo "#define NR_PAGEFLAGS (__NR_PAGEFLAGS)"; \ - echo "#define MAX_NR_ZONES 3"; \ + echo "#define MAX_NR_ZONES __MAX_NR_ZONES"; \ echo ""; \ echo "#endif /* GENERATED_BOUNDS_H */") > $@ endef diff --git a/arch/lib/glue.c b/arch/lib/glue.c index 80bd04a18f45..11ce23998d62 100644 --- a/arch/lib/glue.c +++ b/arch/lib/glue.c @@ -316,11 +316,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, } #ifdef CONFIG_HAVE_ARCH_PFN_VALID -extern int memblock_is_memory(phys_addr_t addr); - int pfn_valid(unsigned long pfn) { return memblock_is_memory(__pfn_to_phys(pfn)); } #endif - diff --git a/arch/lib/include/asm/barrier.h b/arch/lib/include/asm/barrier.h deleted file mode 100644 index dab38c62c9a3..000000000000 --- a/arch/lib/include/asm/barrier.h +++ /dev/null @@ -1,105 +0,0 @@ -#ifndef __ASM_BARRIER_H -#define __ASM_BARRIER_H - -#ifndef __ASSEMBLY__ -#include - -#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); - -#if __LINUX_ARM_ARCH__ >= 7 || \ - (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) -#define sev() __asm__ __volatile__ ("sev" : : : "memory") -#define wfe() __asm__ __volatile__ ("wfe" : : : "memory") -#define wfi() __asm__ __volatile__ ("wfi" : : : "memory") -#endif - -#if __LINUX_ARM_ARCH__ >= 7 -#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory") -#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory") -#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") -#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 -#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ - : : "r" (0) : "memory") -#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ - : : "r" (0) : "memory") -#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ - : : "r" (0) : "memory") -#elif defined(CONFIG_CPU_FA526) -#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ - : : "r" (0) : "memory") -#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ - : : "r" (0) : "memory") -#define dmb(x) __asm__ __volatile__ ("" : : : "memory") -#else -#define isb(x) __asm__ __volatile__ ("" : : : "memory") -#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ - : : "r" (0) : "memory") -#define dmb(x) __asm__ __volatile__ ("" : : : "memory") -#endif - -#ifdef CONFIG_ARCH_HAS_BARRIERS -#include -#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) -#define mb() do { dsb(); outer_sync(); } while (0) -#define rmb() dsb() -#define wmb() do { dsb(st); outer_sync(); } while (0) -#define dma_rmb() dmb(osh) -#define dma_wmb() dmb(oshst) -#else -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define dma_rmb() barrier() -#define dma_wmb() barrier() -#endif - -#ifndef CONFIG_SMP -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#else -#define smp_mb() dmb(ish) -#define smp_rmb() smp_mb() -#define smp_wmb() dmb(ishst) -#endif - -#ifndef CONFIG_LIB -#define smp_store_release(p, v) \ -do { \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - ACCESS_ONCE(*p) = (v); \ -} while (0) - -#define smp_load_acquire(p) \ -({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - ___p1; \ -}) -#else -#define smp_store_release(p, v) \ -do { \ - smp_mb(); \ - ACCESS_ONCE(*p) = (v); \ -} while (0) - -#define smp_load_acquire(p) \ -({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ - smp_mb(); \ - ___p1; \ -}) -#endif /* CONFIG_LIB */ - -#define read_barrier_depends() do { } while(0) -#define smp_read_barrier_depends() do { } while(0) - -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) - -#define smp_mb__before_atomic() smp_mb() -#define smp_mb__after_atomic() smp_mb() - -#endif /* !__ASSEMBLY__ */ -#endif /* __ASM_BARRIER_H */ diff --git a/arch/lib/include/asm/barrier.h b/arch/lib/include/asm/barrier.h new file mode 120000 index 000000000000..472ea0ae7c94 --- /dev/null +++ b/arch/lib/include/asm/barrier.h @@ -0,0 +1 @@ +../../../arm/include/asm/barrier.h \ No newline at end of file diff --git a/arch/lib/include/asm/cache.h b/arch/lib/include/asm/cache.h deleted file mode 100644 index 75fe66bc02b4..000000000000 --- a/arch/lib/include/asm/cache.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * arch/arm/include/asm/cache.h - */ -#ifndef __ASMARM_CACHE_H -#define __ASMARM_CACHE_H - -#define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) - -/* - * Memory returned by kmalloc() may be used for DMA, so we must make - * sure that all such allocations are cache aligned. Otherwise, - * unrelated code may cause parts of the buffer to be read into the - * cache before the transfer is done, causing old data to be seen by - * the CPU. - */ -#define ARCH_DMA_MINALIGN L1_CACHE_BYTES - -/* - * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. - */ -#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) -#define ARCH_SLAB_MINALIGN 8 -#endif - -#define __read_mostly __attribute__((__section__(".data..read_mostly"))) - -#endif diff --git a/arch/lib/include/asm/cache.h b/arch/lib/include/asm/cache.h new file mode 120000 index 000000000000..b186a8c20991 --- /dev/null +++ b/arch/lib/include/asm/cache.h @@ -0,0 +1 @@ +../../../arm/include/asm/cache.h \ No newline at end of file diff --git a/arch/lib/include/asm/cachetype.h b/arch/lib/include/asm/cachetype.h deleted file mode 100644 index 7ea78144ae22..000000000000 --- a/arch/lib/include/asm/cachetype.h +++ /dev/null @@ -1,59 +0,0 @@ -#ifndef __ASM_ARM_CACHETYPE_H -#define __ASM_ARM_CACHETYPE_H - -#define CACHEID_VIVT (1 << 0) -#define CACHEID_VIPT_NONALIASING (1 << 1) -#define CACHEID_VIPT_ALIASING (1 << 2) -#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING) -#define CACHEID_ASID_TAGGED (1 << 3) -#define CACHEID_VIPT_I_ALIASING (1 << 4) -#define CACHEID_PIPT (1 << 5) - -extern unsigned int cacheid; - -#define cache_is_vivt() cacheid_is(CACHEID_VIVT) -#define cache_is_vipt() cacheid_is(CACHEID_VIPT) -#define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING) -#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING) -#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED) -#define icache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_I_ALIASING) -#define icache_is_pipt() cacheid_is(CACHEID_PIPT) - -/* - * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture - * Mask out support which will never be present on newer CPUs. - * - v6+ is never VIVT - * - v7+ VIPT never aliases on D-side - */ -#if __LINUX_ARM_ARCH__ >= 7 -#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING |\ - CACHEID_ASID_TAGGED |\ - CACHEID_VIPT_I_ALIASING |\ - CACHEID_PIPT) -#elif __LINUX_ARM_ARCH__ >= 6 -#define __CACHEID_ARCH_MIN (~CACHEID_VIVT) -#else -#define __CACHEID_ARCH_MIN (~0) -#endif - -/* - * Mask out support which isn't configured - */ -#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT) -#define __CACHEID_ALWAYS (CACHEID_VIVT) -#define __CACHEID_NEVER (~CACHEID_VIVT) -#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT) -#define __CACHEID_ALWAYS (0) -#define __CACHEID_NEVER (CACHEID_VIVT) -#else -#define __CACHEID_ALWAYS (0) -#define __CACHEID_NEVER (0) -#endif - -static inline unsigned int __attribute__((pure)) cacheid_is(unsigned int mask) -{ - return (__CACHEID_ALWAYS & mask) | - (~__CACHEID_NEVER & __CACHEID_ARCH_MIN & mask & cacheid); -} - -#endif diff --git a/arch/lib/include/asm/cachetype.h b/arch/lib/include/asm/cachetype.h new file mode 120000 index 000000000000..5a58837efbbd --- /dev/null +++ b/arch/lib/include/asm/cachetype.h @@ -0,0 +1 @@ +../../../arm/include/asm/cachetype.h \ No newline at end of file diff --git a/arch/lib/include/asm/elf.h b/arch/lib/include/asm/elf.h deleted file mode 100644 index a7396c9d4225..000000000000 --- a/arch/lib/include/asm/elf.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _ASM_SIM_ELF_H -#define _ASM_SIM_ELF_H - -#if defined(CONFIG_64BIT) -#define ELF_CLASS ELFCLASS64 -#else -#define ELF_CLASS ELFCLASS32 -#endif - -#endif /* _ASM_SIM_ELF_H */ diff --git a/arch/lib/include/asm/elf.h b/arch/lib/include/asm/elf.h new file mode 120000 index 000000000000..55f48b91b219 --- /dev/null +++ b/arch/lib/include/asm/elf.h @@ -0,0 +1 @@ +../../../arm/include/asm/elf.h \ No newline at end of file diff --git a/arch/lib/include/asm/glue-proc.h b/arch/lib/include/asm/glue-proc.h deleted file mode 100644 index 74be7c22035a..000000000000 --- a/arch/lib/include/asm/glue-proc.h +++ /dev/null @@ -1,264 +0,0 @@ -/* - * arch/arm/include/asm/glue-proc.h - * - * Copyright (C) 1997-1999 Russell King - * Copyright (C) 2000 Deep Blue Solutions Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef ASM_GLUE_PROC_H -#define ASM_GLUE_PROC_H - -#include - -/* - * Work out if we need multiple CPU support - */ -#undef MULTI_CPU -#undef CPU_NAME - -/* - * CPU_NAME - the prefix for CPU related functions - */ - -#ifdef CONFIG_CPU_ARM7TDMI -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm7tdmi -# endif -#endif - -#ifdef CONFIG_CPU_ARM720T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm720 -# endif -#endif - -#ifdef CONFIG_CPU_ARM740T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm740 -# endif -#endif - -#ifdef CONFIG_CPU_ARM9TDMI -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm9tdmi -# endif -#endif - -#ifdef CONFIG_CPU_ARM920T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm920 -# endif -#endif - -#ifdef CONFIG_CPU_ARM922T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm922 -# endif -#endif - -#ifdef CONFIG_CPU_FA526 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_fa526 -# endif -#endif - -#ifdef CONFIG_CPU_ARM925T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm925 -# endif -#endif - -#ifdef CONFIG_CPU_ARM926T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm926 -# endif -#endif - -#ifdef CONFIG_CPU_ARM940T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm940 -# endif -#endif - -#ifdef CONFIG_CPU_ARM946E -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm946 -# endif -#endif - -#ifdef CONFIG_CPU_SA110 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_sa110 -# endif -#endif - -#ifdef CONFIG_CPU_SA1100 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_sa1100 -# endif -#endif - -#ifdef CONFIG_CPU_ARM1020 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1020 -# endif -#endif - -#ifdef CONFIG_CPU_ARM1020E -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1020e -# endif -#endif - -#ifdef CONFIG_CPU_ARM1022 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1022 -# endif -#endif - -#ifdef CONFIG_CPU_ARM1026 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1026 -# endif -#endif - -#ifdef CONFIG_CPU_XSCALE -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_xscale -# endif -#endif - -#ifdef CONFIG_CPU_XSC3 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_xsc3 -# endif -#endif - -#ifdef CONFIG_CPU_MOHAWK -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_mohawk -# endif -#endif - -#ifdef CONFIG_CPU_FEROCEON -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_feroceon -# endif -#endif - -#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_v6 -# endif -#endif - -#ifdef CONFIG_CPU_V7M -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_v7m -# endif -#endif - -#ifdef CONFIG_CPU_PJ4B -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_pj4b -# endif -#endif - -#ifdef CONFIG_CPU_V7 -/* - * Cortex-A9 needs a different suspend/resume function, so we need - * multiple CPU support for ARMv7 anyway. - */ -# undef MULTI_CPU -# define MULTI_CPU -#endif - -#ifndef MULTI_CPU -#define cpu_proc_init __glue(CPU_NAME,_proc_init) -#define cpu_proc_fin __glue(CPU_NAME,_proc_fin) -#define cpu_reset __glue(CPU_NAME,_reset) -#define cpu_do_idle __glue(CPU_NAME,_do_idle) -#define cpu_dcache_clean_area __glue(CPU_NAME,_dcache_clean_area) -#define cpu_do_switch_mm __glue(CPU_NAME,_switch_mm) -#define cpu_set_pte_ext __glue(CPU_NAME,_set_pte_ext) -#define cpu_suspend_size __glue(CPU_NAME,_suspend_size) -#define cpu_do_suspend __glue(CPU_NAME,_do_suspend) -#define cpu_do_resume __glue(CPU_NAME,_do_resume) -#endif - -#endif diff --git a/arch/lib/include/asm/glue-proc.h b/arch/lib/include/asm/glue-proc.h new file mode 120000 index 000000000000..dd3e9c28772d --- /dev/null +++ b/arch/lib/include/asm/glue-proc.h @@ -0,0 +1 @@ +../../../arm/include/asm/glue-proc.h \ No newline at end of file diff --git a/arch/lib/include/asm/hardirq.h b/arch/lib/include/asm/hardirq.h deleted file mode 100644 index 47d47f95a793..000000000000 --- a/arch/lib/include/asm/hardirq.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _ASM_SIM_HARDIRQ_H -#define _ASM_SIM_HARDIRQ_H - -extern unsigned int interrupt_pending; - -#define local_softirq_pending() (interrupt_pending) - -#endif /* _ASM_SIM_HARDIRQ_H */ diff --git a/arch/lib/include/asm/hardirq.h b/arch/lib/include/asm/hardirq.h new file mode 120000 index 000000000000..09b5f09acbf1 --- /dev/null +++ b/arch/lib/include/asm/hardirq.h @@ -0,0 +1 @@ +../../../arm/include/asm/hardirq.h \ No newline at end of file diff --git a/arch/lib/include/asm/highmem.h b/arch/lib/include/asm/highmem.h deleted file mode 100644 index 130b1dd3411e..000000000000 --- a/arch/lib/include/asm/highmem.h +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef _ASM_HIGHMEM_H -#define _ASM_HIGHMEM_H - -#include - -#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE) -#define LAST_PKMAP PTRS_PER_PTE -#define LAST_PKMAP_MASK (LAST_PKMAP - 1) -#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) -#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) - -#define kmap_prot PAGE_KERNEL - -#define flush_cache_kmaps() \ - do { \ - if (cache_is_vivt()) \ - flush_cache_all(); \ - } while (0) - -extern pte_t *pkmap_page_table; -extern pte_t *fixmap_page_table; - -extern void *kmap_high(struct page *page); -extern void kunmap_high(struct page *page); - -/* - * The reason for kmap_high_get() is to ensure that the currently kmap'd - * page usage count does not decrease to zero while we're using its - * existing virtual mapping in an atomic context. With a VIVT cache this - * is essential to do, but with a VIPT cache this is only an optimization - * so not to pay the price of establishing a second mapping if an existing - * one can be used. However, on platforms without hardware TLB maintenance - * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since - * the locking involved must also disable IRQs which is incompatible with - * the IPI mechanism used by global TLB operations. - */ -#define ARCH_NEEDS_KMAP_HIGH_GET -#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6) -#undef ARCH_NEEDS_KMAP_HIGH_GET -#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT) -#error "The sum of features in your kernel config cannot be supported together" -#endif -#endif - -/* - * Needed to be able to broadcast the TLB invalidation for kmap. - */ -#ifdef CONFIG_ARM_ERRATA_798181 -#undef ARCH_NEEDS_KMAP_HIGH_GET -#endif - -#ifdef ARCH_NEEDS_KMAP_HIGH_GET -extern void *kmap_high_get(struct page *page); -#else -static inline void *kmap_high_get(struct page *page) -{ - return NULL; -} -#endif - -/* - * The following functions are already defined by - * when CONFIG_HIGHMEM is not set. - */ -#ifdef CONFIG_HIGHMEM -extern void *kmap(struct page *page); -extern void kunmap(struct page *page); -extern void *kmap_atomic(struct page *page); -extern void __kunmap_atomic(void *kvaddr); -extern void *kmap_atomic_pfn(unsigned long pfn); -extern struct page *kmap_atomic_to_page(const void *ptr); -#endif - -#endif - diff --git a/arch/lib/include/asm/highmem.h b/arch/lib/include/asm/highmem.h new file mode 120000 index 000000000000..3b7bc556a3a5 --- /dev/null +++ b/arch/lib/include/asm/highmem.h @@ -0,0 +1 @@ +../../../arm/include/asm/highmem.h \ No newline at end of file diff --git a/arch/lib/include/asm/kmap_types.h b/arch/lib/include/asm/kmap_types.h deleted file mode 100644 index 83eb2f772911..000000000000 --- a/arch/lib/include/asm/kmap_types.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef __ARM_KMAP_TYPES_H -#define __ARM_KMAP_TYPES_H - -/* - * This is the "bare minimum". AIO seems to require this. - */ -#define KM_TYPE_NR 16 - -#endif diff --git a/arch/lib/include/asm/kmap_types.h b/arch/lib/include/asm/kmap_types.h new file mode 120000 index 000000000000..c36c0bb637f1 --- /dev/null +++ b/arch/lib/include/asm/kmap_types.h @@ -0,0 +1 @@ +../../../arm/include/asm/kmap_types.h \ No newline at end of file diff --git a/arch/lib/include/asm/memory.h b/arch/lib/include/asm/memory.h deleted file mode 100644 index 184def0e1652..000000000000 --- a/arch/lib/include/asm/memory.h +++ /dev/null @@ -1,359 +0,0 @@ -/* - * arch/arm/include/asm/memory.h - * - * Copyright (C) 2000-2002 Russell King - * modification for nommu, Hyok S. Choi, 2004 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Note: this file should not be included by non-asm/.h files - */ -#ifndef __ASM_ARM_MEMORY_H -#define __ASM_ARM_MEMORY_H - -#include -#include -#include -#include - -#include - -#ifdef CONFIG_NEED_MACH_MEMORY_H -#include -#endif - -/* - * Allow for constants defined here to be used from assembly code - * by prepending the UL suffix only with actual C code compilation. - */ -#define UL(x) _AC(x, UL) - -/* PAGE_OFFSET - the virtual address of the start of the kernel image */ -#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) - -#ifdef CONFIG_MMU - -/* - * TASK_SIZE - the maximum size of a user space task. - * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area - */ -#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) -#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) - -/* - * The maximum size of a 26-bit user space task. - */ -#define TASK_SIZE_26 (UL(1) << 26) - -/* - * The module space lives between the addresses given by TASK_SIZE - * and PAGE_OFFSET - it must be within 32MB of the kernel text. - */ -#ifndef CONFIG_THUMB2_KERNEL -#define MODULES_VADDR (PAGE_OFFSET - SZ_16M) -#else -/* smaller range for Thumb-2 symbols relocation (2^24)*/ -#define MODULES_VADDR (PAGE_OFFSET - SZ_8M) -#endif - -#if TASK_SIZE > MODULES_VADDR -#error Top of user space clashes with start of module space -#endif - -/* - * The highmem pkmap virtual space shares the end of the module area. - */ -#ifdef CONFIG_HIGHMEM -#define MODULES_END (PAGE_OFFSET - PMD_SIZE) -#else -#define MODULES_END (PAGE_OFFSET) -#endif - -/* - * The XIP kernel gets mapped at the bottom of the module vm area. - * Since we use sections to map it, this macro replaces the physical address - * with its virtual address while keeping offset from the base section. - */ -#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff)) - -/* - * Allow 16MB-aligned ioremap pages - */ -#define IOREMAP_MAX_ORDER 24 - -#else /* CONFIG_MMU */ - -/* - * The limitation of user task size can grow up to the end of free ram region. - * It is difficult to define and perhaps will never meet the original meaning - * of this define that was meant to. - * Fortunately, there is no reference for this in noMMU mode, for now. - */ -#define TASK_SIZE UL(0xffffffff) - -#ifndef TASK_UNMAPPED_BASE -#define TASK_UNMAPPED_BASE UL(0x00000000) -#endif - -#ifndef END_MEM -#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) -#endif - -/* - * The module can be at any place in ram in nommu mode. - */ -#define MODULES_END (END_MEM) -#define MODULES_VADDR PAGE_OFFSET - -#define XIP_VIRT_ADDR(physaddr) (physaddr) - -#endif /* !CONFIG_MMU */ - -/* - * We fix the TCM memories max 32 KiB ITCM resp DTCM at these - * locations - */ -#ifdef CONFIG_HAVE_TCM -#define ITCM_OFFSET UL(0xfffe0000) -#define DTCM_OFFSET UL(0xfffe8000) -#endif - -/* - * Convert a physical address to a Page Frame Number and back - */ -#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) -#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) - -/* - * Convert a page to/from a physical address - */ -#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) -#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) - -/* - * Minimum guaranted alignment in pgd_alloc(). The page table pointers passed - * around in head.S and proc-*.S are shifted by this amount, in order to - * leave spare high bits for systems with physical address extension. This - * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but - * gives us about 38-bits or so. - */ -#ifdef CONFIG_ARM_LPAE -#define ARCH_PGD_SHIFT L1_CACHE_SHIFT -#else -#define ARCH_PGD_SHIFT 0 -#endif -#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) - -/* - * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical - * memory. This is used for XIP and NoMMU kernels, and on platforms that don't - * have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use - * PLAT_PHYS_OFFSET and not PHYS_OFFSET. - */ -#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) - -#ifndef __ASSEMBLY__ - -/* - * Physical vs virtual RAM address space conversion. These are - * private definitions which should NOT be used outside memory.h - * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. - * - * PFNs are used to describe any physical page; this means - * PFN 0 == physical address 0. - */ -#if defined(__virt_to_phys) -#define PHYS_OFFSET PLAT_PHYS_OFFSET -#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) - -#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) - -#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT) - -/* - * Constants used to force the right instruction encodings and shifts - * so that all we need to do is modify the 8-bit constant field. - */ -#define __PV_BITS_31_24 0x81000000 -#define __PV_BITS_7_0 0x81 - -extern unsigned long __pv_phys_pfn_offset; -extern u64 __pv_offset; -extern void fixup_pv_table(const void *, unsigned long); -extern const void *__pv_table_begin, *__pv_table_end; - -#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT) -#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset) - -#define virt_to_pfn(kaddr) \ - ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ - PHYS_PFN_OFFSET) - -#define __pv_stub(from,to,instr,type) \ - __asm__("@ __pv_stub\n" \ - "1: " instr " %0, %1, %2\n" \ - " .pushsection .pv_table,\"a\"\n" \ - " .long 1b\n" \ - " .popsection\n" \ - : "=r" (to) \ - : "r" (from), "I" (type)) - -#define __pv_stub_mov_hi(t) \ - __asm__ volatile("@ __pv_stub_mov\n" \ - "1: mov %R0, %1\n" \ - " .pushsection .pv_table,\"a\"\n" \ - " .long 1b\n" \ - " .popsection\n" \ - : "=r" (t) \ - : "I" (__PV_BITS_7_0)) - -#define __pv_add_carry_stub(x, y) \ - __asm__ volatile("@ __pv_add_carry_stub\n" \ - "1: adds %Q0, %1, %2\n" \ - " adc %R0, %R0, #0\n" \ - " .pushsection .pv_table,\"a\"\n" \ - " .long 1b\n" \ - " .popsection\n" \ - : "+r" (y) \ - : "r" (x), "I" (__PV_BITS_31_24) \ - : "cc") - -static inline phys_addr_t __virt_to_phys(unsigned long x) -{ - phys_addr_t t; - - if (sizeof(phys_addr_t) == 4) { - __pv_stub(x, t, "add", __PV_BITS_31_24); - } else { - __pv_stub_mov_hi(t); - __pv_add_carry_stub(x, t); - } - return t; -} - -static inline unsigned long __phys_to_virt(phys_addr_t x) -{ - unsigned long t; - - /* - * 'unsigned long' cast discard upper word when - * phys_addr_t is 64 bit, and makes sure that inline - * assembler expression receives 32 bit argument - * in place where 'r' 32 bit operand is expected. - */ - __pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24); - return t; -} - -#else - -#define PHYS_OFFSET PLAT_PHYS_OFFSET -#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) - -static inline phys_addr_t __virt_to_phys(unsigned long x) -{ - return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; -} - -static inline unsigned long __phys_to_virt(phys_addr_t x) -{ - return x - PHYS_OFFSET + PAGE_OFFSET; -} - -#define virt_to_pfn(kaddr) \ - ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ - PHYS_PFN_OFFSET) - -#endif - -/* - * These are *only* valid on the kernel direct mapped RAM memory. - * Note: Drivers should NOT use these. They are the wrong - * translation for translating DMA addresses. Use the driver - * DMA support - see dma-mapping.h. - */ -#define virt_to_phys virt_to_phys -static inline phys_addr_t virt_to_phys(const volatile void *x) -{ - return __virt_to_phys((unsigned long)(x)); -} - -#define phys_to_virt phys_to_virt -static inline void *phys_to_virt(phys_addr_t x) -{ - return (void *)__phys_to_virt(x); -} - -/* - * Drivers should NOT use these either. - */ -#define __pa(x) __virt_to_phys((unsigned long)(x)) -#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) -#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) - -extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); - -/* - * These are for systems that have a hardware interconnect supported alias of - * physical memory for idmap purposes. Most cases should leave these - * untouched. - */ -static inline phys_addr_t __virt_to_idmap(unsigned long x) -{ - if (arch_virt_to_idmap) - return arch_virt_to_idmap(x); - else - return __virt_to_phys(x); -} - -#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x)) - -/* - * Virtual <-> DMA view memory address translations - * Again, these are *only* valid on the kernel direct mapped RAM - * memory. Use of these is *deprecated* (and that doesn't mean - * use the __ prefixed forms instead.) See dma-mapping.h. - */ -#ifndef __virt_to_bus -#define __virt_to_bus __virt_to_phys -#define __bus_to_virt __phys_to_virt -#define __pfn_to_bus(x) __pfn_to_phys(x) -#define __bus_to_pfn(x) __phys_to_pfn(x) -#endif - -#ifdef CONFIG_VIRT_TO_BUS -#define virt_to_bus virt_to_bus -static inline __deprecated unsigned long virt_to_bus(void *x) -{ - return __virt_to_bus((unsigned long)x); -} - -#define bus_to_virt bus_to_virt -static inline __deprecated void *bus_to_virt(unsigned long x) -{ - return (void *)__bus_to_virt(x); -} -#endif - -/* - * Conversion between a struct page and a physical address. - * - * page_to_pfn(page) convert a struct page * to a PFN number - * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * - * - * virt_to_page(k) convert a _valid_ virtual address to struct page * - * virt_addr_valid(k) indicates whether a virtual address is valid - */ -#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET - -#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) -#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ - && pfn_valid(virt_to_pfn(kaddr))) - -#endif - -#include - -#endif diff --git a/arch/lib/include/asm/memory.h b/arch/lib/include/asm/memory.h new file mode 120000 index 000000000000..af1ba14762a0 --- /dev/null +++ b/arch/lib/include/asm/memory.h @@ -0,0 +1 @@ +../../../arm/include/asm/memory.h \ No newline at end of file diff --git a/arch/lib/include/asm/mmu.h b/arch/lib/include/asm/mmu.h deleted file mode 100644 index a5b47421059d..000000000000 --- a/arch/lib/include/asm/mmu.h +++ /dev/null @@ -1,40 +0,0 @@ -#ifndef __ARM_MMU_H -#define __ARM_MMU_H - -#ifdef CONFIG_MMU - -typedef struct { -#ifdef CONFIG_CPU_HAS_ASID - atomic64_t id; -#else - int switch_pending; -#endif - unsigned int vmalloc_seq; - unsigned long sigpage; -#ifdef CONFIG_VDSO - unsigned long vdso; -#endif -} mm_context_t; - -#ifdef CONFIG_CPU_HAS_ASID -#define ASID_BITS 8 -#define ASID_MASK ((~0ULL) << ASID_BITS) -#define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK)) -#else -#define ASID(mm) (0) -#endif - -#else - -/* - * From nommu.h: - * Copyright (C) 2002, David McCullough - * modified for 2.6 by Hyok S. Choi - */ -typedef struct { - unsigned long end_brk; -} mm_context_t; - -#endif - -#endif diff --git a/arch/lib/include/asm/mmu.h b/arch/lib/include/asm/mmu.h new file mode 120000 index 000000000000..51afcdf84514 --- /dev/null +++ b/arch/lib/include/asm/mmu.h @@ -0,0 +1 @@ +../../../arm/include/asm/mmu.h \ No newline at end of file diff --git a/arch/lib/include/asm/outercache.h b/arch/lib/include/asm/outercache.h deleted file mode 100644 index 563b92fc2f41..000000000000 --- a/arch/lib/include/asm/outercache.h +++ /dev/null @@ -1,149 +0,0 @@ -/* - * arch/arm/include/asm/outercache.h - * - * Copyright (C) 2010 ARM Ltd. - * Written by Catalin Marinas - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __ASM_OUTERCACHE_H -#define __ASM_OUTERCACHE_H - -#include - -struct l2x0_regs; - -struct outer_cache_fns { - void (*inv_range)(unsigned long, unsigned long); - void (*clean_range)(unsigned long, unsigned long); - void (*flush_range)(unsigned long, unsigned long); - void (*flush_all)(void); - void (*disable)(void); -#ifdef CONFIG_OUTER_CACHE_SYNC - void (*sync)(void); -#endif - void (*resume)(void); - - /* This is an ARM L2C thing */ - void (*write_sec)(unsigned long, unsigned); - void (*configure)(const struct l2x0_regs *); -}; - -extern struct outer_cache_fns outer_cache; - -#ifdef CONFIG_OUTER_CACHE -/** - * outer_inv_range - invalidate range of outer cache lines - * @start: starting physical address, inclusive - * @end: end physical address, exclusive - */ -static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) -{ - if (outer_cache.inv_range) - outer_cache.inv_range(start, end); -} - -/** - * outer_clean_range - clean dirty outer cache lines - * @start: starting physical address, inclusive - * @end: end physical address, exclusive - */ -static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) -{ - if (outer_cache.clean_range) - outer_cache.clean_range(start, end); -} - -/** - * outer_flush_range - clean and invalidate outer cache lines - * @start: starting physical address, inclusive - * @end: end physical address, exclusive - */ -static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) -{ - if (outer_cache.flush_range) - outer_cache.flush_range(start, end); -} - -/** - * outer_flush_all - clean and invalidate all cache lines in the outer cache - * - * Note: depending on implementation, this may not be atomic - it must - * only be called with interrupts disabled and no other active outer - * cache masters. - * - * It is intended that this function is only used by implementations - * needing to override the outer_cache.disable() method due to security. - * (Some implementations perform this as a clean followed by an invalidate.) - */ -static inline void outer_flush_all(void) -{ - if (outer_cache.flush_all) - outer_cache.flush_all(); -} - -/** - * outer_disable - clean, invalidate and disable the outer cache - * - * Disable the outer cache, ensuring that any data contained in the outer - * cache is pushed out to lower levels of system memory. The note and - * conditions above concerning outer_flush_all() applies here. - */ -extern void outer_disable(void); - -/** - * outer_resume - restore the cache configuration and re-enable outer cache - * - * Restore any configuration that the cache had when previously enabled, - * and re-enable the outer cache. - */ -static inline void outer_resume(void) -{ - if (outer_cache.resume) - outer_cache.resume(); -} - -#else - -static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) -{ } -static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) -{ } -static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) -{ } -static inline void outer_flush_all(void) { } -static inline void outer_disable(void) { } -static inline void outer_resume(void) { } - -#endif - -#ifdef CONFIG_OUTER_CACHE_SYNC -/** - * outer_sync - perform a sync point for outer cache - * - * Ensure that all outer cache operations are complete and any store - * buffers are drained. - */ -static inline void outer_sync(void) -{ - if (outer_cache.sync) - outer_cache.sync(); -} -#else -static inline void outer_sync(void) -{ } -#endif - -#endif /* __ASM_OUTERCACHE_H */ diff --git a/arch/lib/include/asm/outercache.h b/arch/lib/include/asm/outercache.h new file mode 120000 index 000000000000..eb3562b3e00a --- /dev/null +++ b/arch/lib/include/asm/outercache.h @@ -0,0 +1 @@ +../../../arm/include/asm/outercache.h \ No newline at end of file diff --git a/arch/lib/include/asm/page-arm.h b/arch/lib/include/asm/page-arm.h new file mode 120000 index 000000000000..db56b3d5b0cb --- /dev/null +++ b/arch/lib/include/asm/page-arm.h @@ -0,0 +1 @@ +../../../arm/include/asm/page.h \ No newline at end of file diff --git a/arch/lib/include/asm/page.h b/arch/lib/include/asm/page.h index eadfa319a847..faa4a5ed2266 100644 --- a/arch/lib/include/asm/page.h +++ b/arch/lib/include/asm/page.h @@ -7,170 +7,10 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#ifndef _ASMARM_PAGE_H -#define _ASMARM_PAGE_H - -/* PAGE_SHIFT determines the page size */ -#define PAGE_SHIFT 12 -#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) -#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) +#ifndef _ASM_LIB_PAGE_H +#define _ASM_LIB_PAGE_H #define WANT_PAGE_VIRTUAL 1 - - -#ifndef __ASSEMBLY__ - -#ifndef CONFIG_MMU - -#include - -#else - -#include - -/* - * User Space Model - * ================ - * - * This section selects the correct set of functions for dealing with - * page-based copying and clearing for user space for the particular - * processor(s) we're building for. - * - * We have the following to choose from: - * v4wt - ARMv4 with writethrough cache, without minicache - * v4wb - ARMv4 with writeback cache, without minicache - * v4_mc - ARMv4 with minicache - * xscale - Xscale - * xsc3 - XScalev3 - */ -#undef _USER -#undef MULTI_USER - -#ifdef CONFIG_CPU_COPY_V4WT -# ifdef _USER -# define MULTI_USER 1 -# else -# define _USER v4wt -# endif -#endif - -#ifdef CONFIG_CPU_COPY_V4WB -# ifdef _USER -# define MULTI_USER 1 -# else -# define _USER v4wb -# endif -#endif - -#ifdef CONFIG_CPU_COPY_FEROCEON -# ifdef _USER -# define MULTI_USER 1 -# else -# define _USER feroceon -# endif -#endif - -#ifdef CONFIG_CPU_COPY_FA -# ifdef _USER -# define MULTI_USER 1 -# else -# define _USER fa -# endif -#endif - -#ifdef CONFIG_CPU_SA1100 -# ifdef _USER -# define MULTI_USER 1 -# else -# define _USER v4_mc -# endif -#endif - -#ifdef CONFIG_CPU_XSCALE -# ifdef _USER -# define MULTI_USER 1 -# else -# define _USER xscale_mc -# endif -#endif - -#ifdef CONFIG_CPU_XSC3 -# ifdef _USER -# define MULTI_USER 1 -# else -# define _USER xsc3_mc -# endif -#endif - -#ifdef CONFIG_CPU_COPY_V6 -# define MULTI_USER 1 -#endif - -#if !defined(_USER) && !defined(MULTI_USER) -#error Unknown user operations model -#endif - -struct page; -struct vm_area_struct; - -struct cpu_user_fns { - void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); - void (*cpu_copy_user_highpage)(struct page *to, struct page *from, - unsigned long vaddr, struct vm_area_struct *vma); -}; - -#ifdef MULTI_USER -extern struct cpu_user_fns cpu_user; - -#define __cpu_clear_user_highpage cpu_user.cpu_clear_user_highpage -#define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage - -#else - -#define __cpu_clear_user_highpage __glue(_USER,_clear_user_highpage) -#define __cpu_copy_user_highpage __glue(_USER,_copy_user_highpage) - -extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); -extern void __cpu_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr, struct vm_area_struct *vma); -#endif - -#define clear_user_highpage(page,vaddr) \ - __cpu_clear_user_highpage(page, vaddr) - -#define __HAVE_ARCH_COPY_USER_HIGHPAGE -#define copy_user_highpage(to,from,vaddr,vma) \ - __cpu_copy_user_highpage(to, from, vaddr, vma) - -#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) -extern void copy_page(void *to, const void *from); - -#ifdef CONFIG_KUSER_HELPERS -#define __HAVE_ARCH_GATE_AREA 1 -#endif - -#ifdef CONFIG_ARM_LPAE -#include -#else -#include -#endif - -#endif /* CONFIG_MMU */ - -typedef struct page *pgtable_t; - -#ifdef CONFIG_HAVE_ARCH_PFN_VALID -extern int pfn_valid(unsigned long); -#endif - -#include - -#endif /* !__ASSEMBLY__ */ - -#define VM_DATA_DEFAULT_FLAGS \ - (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ - VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - -#include +#include #endif diff --git a/arch/lib/include/asm/pgtable-2level-hwdef.h b/arch/lib/include/asm/pgtable-2level-hwdef.h deleted file mode 100644 index 5e68278e953e..000000000000 --- a/arch/lib/include/asm/pgtable-2level-hwdef.h +++ /dev/null @@ -1,95 +0,0 @@ -/* - * arch/arm/include/asm/pgtable-2level-hwdef.h - * - * Copyright (C) 1995-2002 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef _ASM_PGTABLE_2LEVEL_HWDEF_H -#define _ASM_PGTABLE_2LEVEL_HWDEF_H - -/* - * Hardware page table definitions. - * - * + Level 1 descriptor (PMD) - * - common - */ -#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0) -#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) -#define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0) -#define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0) -#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */ -#define PMD_BIT4 (_AT(pmdval_t, 1) << 4) -#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) -#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ -/* - * - section - */ -#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */ -#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) -#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) -#define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */ -#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 1) << 10) -#define PMD_SECT_AP_READ (_AT(pmdval_t, 1) << 11) -#define PMD_SECT_TEX(x) (_AT(pmdval_t, (x)) << 12) /* v5 */ -#define PMD_SECT_APX (_AT(pmdval_t, 1) << 15) /* v6 */ -#define PMD_SECT_S (_AT(pmdval_t, 1) << 16) /* v6 */ -#define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */ -#define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */ -#define PMD_SECT_AF (_AT(pmdval_t, 0)) - -#define PMD_SECT_UNCACHED (_AT(pmdval_t, 0)) -#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE) -#define PMD_SECT_WT (PMD_SECT_CACHEABLE) -#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) -#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE) -#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) -#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2)) - -/* - * - coarse table (not used) - */ - -/* - * + Level 2 descriptor (PTE) - * - common - */ -#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) -#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) -#define PTE_TYPE_LARGE (_AT(pteval_t, 1) << 0) -#define PTE_TYPE_SMALL (_AT(pteval_t, 2) << 0) -#define PTE_TYPE_EXT (_AT(pteval_t, 3) << 0) /* v5 */ -#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) -#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) - -/* - * - extended small page/tiny page - */ -#define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */ -#define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4) -#define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4) -#define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4) -#define PTE_EXT_AP_UNO_SRO (_AT(pteval_t, 0) << 4) -#define PTE_EXT_AP_UNO_SRW (PTE_EXT_AP0) -#define PTE_EXT_AP_URO_SRW (PTE_EXT_AP1) -#define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0) -#define PTE_EXT_TEX(x) (_AT(pteval_t, (x)) << 6) /* v5 */ -#define PTE_EXT_APX (_AT(pteval_t, 1) << 9) /* v6 */ -#define PTE_EXT_COHERENT (_AT(pteval_t, 1) << 9) /* XScale3 */ -#define PTE_EXT_SHARED (_AT(pteval_t, 1) << 10) /* v6 */ -#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* v6 */ - -/* - * - small page - */ -#define PTE_SMALL_AP_MASK (_AT(pteval_t, 0xff) << 4) -#define PTE_SMALL_AP_UNO_SRO (_AT(pteval_t, 0x00) << 4) -#define PTE_SMALL_AP_UNO_SRW (_AT(pteval_t, 0x55) << 4) -#define PTE_SMALL_AP_URO_SRW (_AT(pteval_t, 0xaa) << 4) -#define PTE_SMALL_AP_URW_SRW (_AT(pteval_t, 0xff) << 4) - -#define PHYS_MASK (~0UL) - -#endif diff --git a/arch/lib/include/asm/pgtable-2level-hwdef.h b/arch/lib/include/asm/pgtable-2level-hwdef.h new file mode 120000 index 000000000000..a3062387ce04 --- /dev/null +++ b/arch/lib/include/asm/pgtable-2level-hwdef.h @@ -0,0 +1 @@ +../../../arm/include/asm/pgtable-2level-hwdef.h \ No newline at end of file diff --git a/arch/lib/include/asm/pgtable-2level-types.h b/arch/lib/include/asm/pgtable-2level-types.h deleted file mode 100644 index 66cb5b0e89c5..000000000000 --- a/arch/lib/include/asm/pgtable-2level-types.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * arch/arm/include/asm/pgtable-2level-types.h - * - * Copyright (C) 1995-2003 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -#ifndef _ASM_PGTABLE_2LEVEL_TYPES_H -#define _ASM_PGTABLE_2LEVEL_TYPES_H - -#include - -typedef u32 pteval_t; -typedef u32 pmdval_t; - -#undef STRICT_MM_TYPECHECKS - -#ifdef STRICT_MM_TYPECHECKS -/* - * These are used to make use of C type-checking.. - */ -typedef struct { pteval_t pte; } pte_t; -typedef struct { pmdval_t pmd; } pmd_t; -typedef struct { pmdval_t pgd[2]; } pgd_t; -typedef struct { pteval_t pgprot; } pgprot_t; - -#define pte_val(x) ((x).pte) -#define pmd_val(x) ((x).pmd) -#define pgd_val(x) ((x).pgd[0]) -#define pgprot_val(x) ((x).pgprot) - -#define __pte(x) ((pte_t) { (x) } ) -#define __pmd(x) ((pmd_t) { (x) } ) -#define __pgprot(x) ((pgprot_t) { (x) } ) - -#else -/* - * .. while these make it easier on the compiler - */ -typedef pteval_t pte_t; -typedef pmdval_t pmd_t; -typedef pmdval_t pgd_t[2]; -typedef pteval_t pgprot_t; - -#define pte_val(x) (x) -#define pmd_val(x) (x) -#define pgd_val(x) ((x)[0]) -#define pgprot_val(x) (x) - -#define __pte(x) (x) -#define __pmd(x) (x) -#define __pgprot(x) (x) - -#endif /* STRICT_MM_TYPECHECKS */ - -#endif /* _ASM_PGTABLE_2LEVEL_TYPES_H */ diff --git a/arch/lib/include/asm/pgtable-2level-types.h b/arch/lib/include/asm/pgtable-2level-types.h new file mode 120000 index 000000000000..41d09767ed1c --- /dev/null +++ b/arch/lib/include/asm/pgtable-2level-types.h @@ -0,0 +1 @@ +../../../arm/include/asm/pgtable-2level-types.h \ No newline at end of file diff --git a/arch/lib/include/asm/pgtable-2level.h b/arch/lib/include/asm/pgtable-2level.h deleted file mode 100644 index bfd662e49a25..000000000000 --- a/arch/lib/include/asm/pgtable-2level.h +++ /dev/null @@ -1,198 +0,0 @@ -/* - * arch/arm/include/asm/pgtable-2level.h - * - * Copyright (C) 1995-2002 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef _ASM_PGTABLE_2LEVEL_H -#define _ASM_PGTABLE_2LEVEL_H - -#define __PAGETABLE_PMD_FOLDED - -/* - * Hardware-wise, we have a two level page table structure, where the first - * level has 4096 entries, and the second level has 256 entries. Each entry - * is one 32-bit word. Most of the bits in the second level entry are used - * by hardware, and there aren't any "accessed" and "dirty" bits. - * - * Linux on the other hand has a three level page table structure, which can - * be wrapped to fit a two level page table structure easily - using the PGD - * and PTE only. However, Linux also expects one "PTE" table per page, and - * at least a "dirty" bit. - * - * Therefore, we tweak the implementation slightly - we tell Linux that we - * have 2048 entries in the first level, each of which is 8 bytes (iow, two - * hardware pointers to the second level.) The second level contains two - * hardware PTE tables arranged contiguously, preceded by Linux versions - * which contain the state information Linux needs. We, therefore, end up - * with 512 entries in the "PTE" level. - * - * This leads to the page tables having the following layout: - * - * pgd pte - * | | - * +--------+ - * | | +------------+ +0 - * +- - - - + | Linux pt 0 | - * | | +------------+ +1024 - * +--------+ +0 | Linux pt 1 | - * | |-----> +------------+ +2048 - * +- - - - + +4 | h/w pt 0 | - * | |-----> +------------+ +3072 - * +--------+ +8 | h/w pt 1 | - * | | +------------+ +4096 - * - * See L_PTE_xxx below for definitions of bits in the "Linux pt", and - * PTE_xxx for definitions of bits appearing in the "h/w pt". - * - * PMD_xxx definitions refer to bits in the first level page table. - * - * The "dirty" bit is emulated by only granting hardware write permission - * iff the page is marked "writable" and "dirty" in the Linux PTE. This - * means that a write to a clean page will cause a permission fault, and - * the Linux MM layer will mark the page dirty via handle_pte_fault(). - * For the hardware to notice the permission change, the TLB entry must - * be flushed, and ptep_set_access_flags() does that for us. - * - * The "accessed" or "young" bit is emulated by a similar method; we only - * allow accesses to the page if the "young" bit is set. Accesses to the - * page will cause a fault, and handle_pte_fault() will set the young bit - * for us as long as the page is marked present in the corresponding Linux - * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is - * up to date. - * - * However, when the "young" bit is cleared, we deny access to the page - * by clearing the hardware PTE. Currently Linux does not flush the TLB - * for us in this case, which means the TLB will retain the transation - * until either the TLB entry is evicted under pressure, or a context - * switch which changes the user space mapping occurs. - */ -#define PTRS_PER_PTE 512 -#define PTRS_PER_PMD 1 -#define PTRS_PER_PGD 2048 - -#define PTE_HWTABLE_PTRS (PTRS_PER_PTE) -#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t)) -#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32)) - -/* - * PMD_SHIFT determines the size of the area a second-level page table can map - * PGDIR_SHIFT determines what a third-level page table entry can map - */ -#define PMD_SHIFT 21 -#define PGDIR_SHIFT 21 - -#define PMD_SIZE (1UL << PMD_SHIFT) -#define PMD_MASK (~(PMD_SIZE-1)) -#define PGDIR_SIZE (1UL << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) - -/* - * section address mask and size definitions. - */ -#define SECTION_SHIFT 20 -#define SECTION_SIZE (1UL << SECTION_SHIFT) -#define SECTION_MASK (~(SECTION_SIZE-1)) - -/* - * ARMv6 supersection address mask and size definitions. - */ -#define SUPERSECTION_SHIFT 24 -#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT) -#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1)) - -#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) - -/* - * "Linux" PTE definitions. - * - * We keep two sets of PTEs - the hardware and the linux version. - * This allows greater flexibility in the way we map the Linux bits - * onto the hardware tables, and allows us to have YOUNG and DIRTY - * bits. - * - * The PTE table pointer refers to the hardware entries; the "Linux" - * entries are stored 1024 bytes below. - */ -#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */ -#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) -#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) -#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6) -#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) -#define L_PTE_USER (_AT(pteval_t, 1) << 8) -#define L_PTE_XN (_AT(pteval_t, 1) << 9) -#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ -#define L_PTE_NONE (_AT(pteval_t, 1) << 11) - -/* - * These are the memory types, defined to be compatible with - * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB - */ -#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ -#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ -#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */ -#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */ -#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */ -#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */ -#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */ -#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ -#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ -#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ -#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */ -#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) - -#ifndef __ASSEMBLY__ - -/* - * The "pud_xxx()" functions here are trivial when the pmd is folded into - * the pud: the pud entry is never bad, always exists, and can't be set or - * cleared. - */ -#define pud_none(pud) (0) -#define pud_bad(pud) (0) -#define pud_present(pud) (1) -#define pud_clear(pudp) do { } while (0) -#define set_pud(pud,pudp) do { } while (0) - -static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) -{ - return (pmd_t *)pud; -} - -#define pmd_large(pmd) (pmd_val(pmd) & 2) -#define pmd_bad(pmd) (pmd_val(pmd) & 2) - -#define copy_pmd(pmdpd,pmdps) \ - do { \ - pmdpd[0] = pmdps[0]; \ - pmdpd[1] = pmdps[1]; \ - flush_pmd_entry(pmdpd); \ - } while (0) - -#define pmd_clear(pmdp) \ - do { \ - pmdp[0] = __pmd(0); \ - pmdp[1] = __pmd(0); \ - clean_pmd_entry(pmdp); \ - } while (0) - -/* we don't need complex calculations here as the pmd is folded into the pgd */ -#define pmd_addr_end(addr,end) (end) - -#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) -#define pte_special(pte) (0) -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } - -/* - * We don't have huge page support for short descriptors, for the moment - * define empty stubs for use by pin_page_for_write. - */ -#define pmd_hugewillfault(pmd) (0) -#define pmd_thp_or_huge(pmd) (0) - -#endif /* __ASSEMBLY__ */ - -#endif /* _ASM_PGTABLE_2LEVEL_H */ diff --git a/arch/lib/include/asm/pgtable-2level.h b/arch/lib/include/asm/pgtable-2level.h new file mode 120000 index 000000000000..c95b6b679a30 --- /dev/null +++ b/arch/lib/include/asm/pgtable-2level.h @@ -0,0 +1 @@ +../../../arm/include/asm/pgtable-2level.h \ No newline at end of file diff --git a/arch/lib/include/asm/pgtable-hwdef.h b/arch/lib/include/asm/pgtable-hwdef.h deleted file mode 100644 index 8426229ba292..000000000000 --- a/arch/lib/include/asm/pgtable-hwdef.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * arch/arm/include/asm/pgtable-hwdef.h - * - * Copyright (C) 1995-2002 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef _ASMARM_PGTABLE_HWDEF_H -#define _ASMARM_PGTABLE_HWDEF_H - -#ifdef CONFIG_ARM_LPAE -#include -#else -#include -#endif - -#endif diff --git a/arch/lib/include/asm/pgtable-hwdef.h b/arch/lib/include/asm/pgtable-hwdef.h new file mode 120000 index 000000000000..fabe180c1494 --- /dev/null +++ b/arch/lib/include/asm/pgtable-hwdef.h @@ -0,0 +1 @@ +../../../arm/include/asm/pgtable-hwdef.h \ No newline at end of file diff --git a/arch/lib/include/asm/pgtable.h b/arch/lib/include/asm/pgtable.h deleted file mode 100644 index f40354198bad..000000000000 --- a/arch/lib/include/asm/pgtable.h +++ /dev/null @@ -1,363 +0,0 @@ -/* - * arch/arm/include/asm/pgtable.h - * - * Copyright (C) 1995-2002 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef _ASMARM_PGTABLE_H -#define _ASMARM_PGTABLE_H - -#include -#include - -#ifndef CONFIG_MMU - -#include -#include - -#else - -#include -#include -#include - - -#include - -#ifdef CONFIG_ARM_LPAE -#include -#else -#include -#endif - -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) -#define VMALLOC_END 0xff000000UL - -#define LIBRARY_TEXT_START 0x0c000000 - -#ifndef __ASSEMBLY__ -extern void __pte_error(const char *file, int line, pte_t); -extern void __pmd_error(const char *file, int line, pmd_t); -extern void __pgd_error(const char *file, int line, pgd_t); - -#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte) -#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) -#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) - -/* - * This is the lowest virtual address we can permit any user space - * mapping to be mapped at. This is particularly important for - * non-high vector CPUs. - */ -#define FIRST_USER_ADDRESS (PAGE_SIZE * 2) - -/* - * Use TASK_SIZE as the ceiling argument for free_pgtables() and - * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd - * page shared between user and kernel). - */ -#ifdef CONFIG_ARM_LPAE -#define USER_PGTABLES_CEILING TASK_SIZE -#endif - -/* - * The pgprot_* and protection_map entries will be fixed up in runtime - * to include the cachable and bufferable bits based on memory policy, - * as well as any architecture dependent bits like global/ASID and SMP - * shared mapping bits. - */ -#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG - -extern pgprot_t pgprot_user; -extern pgprot_t pgprot_kernel; -extern pgprot_t pgprot_hyp_device; -extern pgprot_t pgprot_s2; -extern pgprot_t pgprot_s2_device; - -#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) - -#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE) -#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) -#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) -#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) -#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) -#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) -#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) -#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) -#define PAGE_KERNEL_EXEC pgprot_kernel -#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP) -#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP) -#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY) -#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY) - -#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) -#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) -#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) -#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) -#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) -#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) -#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) - -#define __pgprot_modify(prot,mask,bits) \ - __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) - -#define pgprot_noncached(prot) \ - __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) - -#define pgprot_writecombine(prot) \ - __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) - -#define pgprot_stronglyordered(prot) \ - __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) - -#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE -#define pgprot_dmacoherent(prot) \ - __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) -#define __HAVE_PHYS_MEM_ACCESS_PROT -struct file; -extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, - unsigned long size, pgprot_t vma_prot); -#else -#define pgprot_dmacoherent(prot) \ - __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN) -#endif - -#endif /* __ASSEMBLY__ */ - -/* - * The table below defines the page protection levels that we insert into our - * Linux page table version. These get translated into the best that the - * architecture can perform. Note that on most ARM hardware: - * 1) We cannot do execute protection - * 2) If we could do execute protection, then read is implied - * 3) write implies read permissions - */ -#define __P000 __PAGE_NONE -#define __P001 __PAGE_READONLY -#define __P010 __PAGE_COPY -#define __P011 __PAGE_COPY -#define __P100 __PAGE_READONLY_EXEC -#define __P101 __PAGE_READONLY_EXEC -#define __P110 __PAGE_COPY_EXEC -#define __P111 __PAGE_COPY_EXEC - -#define __S000 __PAGE_NONE -#define __S001 __PAGE_READONLY -#define __S010 __PAGE_SHARED -#define __S011 __PAGE_SHARED -#define __S100 __PAGE_READONLY_EXEC -#define __S101 __PAGE_READONLY_EXEC -#define __S110 __PAGE_SHARED_EXEC -#define __S111 __PAGE_SHARED_EXEC - -#ifndef __ASSEMBLY__ -/* - * ZERO_PAGE is a global shared page that is always zero: used - * for zero-mapped memory areas etc.. - */ -extern struct page *empty_zero_page; -#define ZERO_PAGE(vaddr) (empty_zero_page) - - -extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; - -/* to find an entry in a page-table-directory */ -#define pgd_index(addr) ((addr) >> PGDIR_SHIFT) - -#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) - -/* to find an entry in a kernel page-table-directory */ -#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) - -#define pmd_none(pmd) (!pmd_val(pmd)) -#define pmd_present(pmd) (pmd_val(pmd)) - -static inline pte_t *pmd_page_vaddr(pmd_t pmd) -{ - return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); -} - -#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) - -#ifndef CONFIG_HIGHPTE -#define __pte_map(pmd) pmd_page_vaddr(*(pmd)) -#define __pte_unmap(pte) do { } while (0) -#else -#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd))) -#define __pte_unmap(pte) kunmap_atomic(pte) -#endif - -#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) - -#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr)) - -#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr)) -#define pte_unmap(pte) __pte_unmap(pte) - -#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) -#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot)) - -#define pte_page(pte) pfn_to_page(pte_pfn(pte)) -#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) - -#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) - -#define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \ - : !!(pte_val(pte) & (val))) -#define pte_isclear(pte, val) (!(pte_val(pte) & (val))) - -#define pte_none(pte) (!pte_val(pte)) -#define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT)) -#define pte_valid(pte) (pte_isset((pte), L_PTE_VALID)) -#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) -#define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY)) -#define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY)) -#define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG)) -#define pte_exec(pte) (pte_isclear((pte), L_PTE_XN)) - -#define pte_valid_user(pte) \ - (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte)) - -#if __LINUX_ARM_ARCH__ < 6 -static inline void __sync_icache_dcache(pte_t pteval) -{ -} -#else -extern void __sync_icache_dcache(pte_t pteval); -#endif - -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pteval) -{ - unsigned long ext = 0; - - if (addr < TASK_SIZE && pte_valid_user(pteval)) { - if (!pte_special(pteval)) - __sync_icache_dcache(pteval); - ext |= PTE_EXT_NG; - } - - set_pte_ext(ptep, pteval, ext); -} - -static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) -{ - pte_val(pte) &= ~pgprot_val(prot); - return pte; -} - -static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) -{ - pte_val(pte) |= pgprot_val(prot); - return pte; -} - -static inline pte_t pte_wrprotect(pte_t pte) -{ - return set_pte_bit(pte, __pgprot(L_PTE_RDONLY)); -} - -static inline pte_t pte_mkwrite(pte_t pte) -{ - return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY)); -} - -static inline pte_t pte_mkclean(pte_t pte) -{ - return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY)); -} - -static inline pte_t pte_mkdirty(pte_t pte) -{ - return set_pte_bit(pte, __pgprot(L_PTE_DIRTY)); -} - -static inline pte_t pte_mkold(pte_t pte) -{ - return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG)); -} - -static inline pte_t pte_mkyoung(pte_t pte) -{ - return set_pte_bit(pte, __pgprot(L_PTE_YOUNG)); -} - -static inline pte_t pte_mkexec(pte_t pte) -{ - return clear_pte_bit(pte, __pgprot(L_PTE_XN)); -} - -static inline pte_t pte_mknexec(pte_t pte) -{ - return set_pte_bit(pte, __pgprot(L_PTE_XN)); -} - -static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) -{ - const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | - L_PTE_NONE | L_PTE_VALID; - pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); - return pte; -} - -/* - * Encode and decode a swap entry. Swap entries are stored in the Linux - * page tables as follows: - * - * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 - * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 - * <--------------- offset ------------------------> < type -> 0 0 - * - * This gives us up to 31 swap files and 128GB per swap file. Note that - * the offset field is always non-zero. - */ -#define __SWP_TYPE_SHIFT 2 -#define __SWP_TYPE_BITS 5 -#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) -#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) - -#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) -#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) -#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) - -#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) -#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) - -/* - * It is an error for the kernel to have more swap files than we can - * encode in the PTEs. This ensures that we know when MAX_SWAPFILES - * is increased beyond what we presently support. - */ -#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) - -/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ -/* FIXME: this is not correct */ -#define kern_addr_valid(addr) (1) - -#include - -/* - * We provide our own arch_get_unmapped_area to cope with VIPT caches. - */ -#define HAVE_ARCH_UNMAPPED_AREA -#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN - -#define pgtable_cache_init() do { } while (0) - -#endif /* !__ASSEMBLY__ */ - -#endif /* CONFIG_MMU */ - -#endif /* _ASMARM_PGTABLE_H */ diff --git a/arch/lib/include/asm/pgtable.h b/arch/lib/include/asm/pgtable.h new file mode 120000 index 000000000000..26b97b4ba905 --- /dev/null +++ b/arch/lib/include/asm/pgtable.h @@ -0,0 +1 @@ +../../../arm/include/asm/pgtable.h \ No newline at end of file diff --git a/arch/lib/include/asm/proc-fns.h b/arch/lib/include/asm/proc-fns.h deleted file mode 100644 index 5324c1112f3a..000000000000 --- a/arch/lib/include/asm/proc-fns.h +++ /dev/null @@ -1,160 +0,0 @@ -/* - * arch/arm/include/asm/proc-fns.h - * - * Copyright (C) 1997-1999 Russell King - * Copyright (C) 2000 Deep Blue Solutions Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_PROCFNS_H -#define __ASM_PROCFNS_H - -#ifdef __KERNEL__ - -#include -#include - -#ifndef __ASSEMBLY__ - -struct mm_struct; - -/* - * Don't change this structure - ASM code relies on it. - */ -extern struct processor { - /* MISC - * get data abort address/flags - */ - void (*_data_abort)(unsigned long pc); - /* - * Retrieve prefetch fault address - */ - unsigned long (*_prefetch_abort)(unsigned long lr); - /* - * Set up any processor specifics - */ - void (*_proc_init)(void); - /* - * Disable any processor specifics - */ - void (*_proc_fin)(void); - /* - * Special stuff for a reset - */ - void (*reset)(unsigned long addr) __attribute__((noreturn)); - /* - * Idle the processor - */ - int (*_do_idle)(void); - /* - * Processor architecture specific - */ - /* - * clean a virtual address range from the - * D-cache without flushing the cache. - */ - void (*dcache_clean_area)(void *addr, int size); - - /* - * Set the page table - */ - void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm); - /* - * Set a possibly extended PTE. Non-extended PTEs should - * ignore 'ext'. - */ -#ifdef CONFIG_ARM_LPAE - void (*set_pte_ext)(pte_t *ptep, pte_t pte); -#else - void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); -#endif - - /* Suspend/resume */ - unsigned int suspend_size; - void (*do_suspend)(void *); - void (*do_resume)(void *); -} processor; - -#ifndef MULTI_CPU -extern void cpu_proc_init(void); -extern void cpu_proc_fin(void); -extern int cpu_do_idle(void); -extern void cpu_dcache_clean_area(void *, int); -extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); -#ifdef CONFIG_ARM_LPAE -extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); -#else -extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); -#endif -extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); - -/* These three are private to arch/arm/kernel/suspend.c */ -extern void cpu_do_suspend(void *); -extern void cpu_do_resume(void *); -#else -#define cpu_proc_init processor._proc_init -#define cpu_proc_fin processor._proc_fin -#define cpu_reset processor.reset -#define cpu_do_idle processor._do_idle -#define cpu_dcache_clean_area processor.dcache_clean_area -#define cpu_set_pte_ext processor.set_pte_ext -#define cpu_do_switch_mm processor.switch_mm - -/* These three are private to arch/arm/kernel/suspend.c */ -#define cpu_do_suspend processor.do_suspend -#define cpu_do_resume processor.do_resume -#endif - -extern void cpu_resume(void); - -#include - -#ifdef CONFIG_MMU - -#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) - -#ifdef CONFIG_ARM_LPAE - -#define cpu_get_ttbr(nr) \ - ({ \ - u64 ttbr; \ - __asm__("mrrc p15, " #nr ", %Q0, %R0, c2" \ - : "=r" (ttbr)); \ - ttbr; \ - }) - -#define cpu_set_ttbr(nr, val) \ - do { \ - u64 ttbr = val; \ - __asm__("mcrr p15, " #nr ", %Q0, %R0, c2" \ - : : "r" (ttbr)); \ - } while (0) - -#define cpu_get_pgd() \ - ({ \ - u64 pg = cpu_get_ttbr(0); \ - pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \ - (pgd_t *)phys_to_virt(pg); \ - }) -#else -#define cpu_get_pgd() \ - ({ \ - unsigned long pg; \ - __asm__("mrc p15, 0, %0, c2, c0, 0" \ - : "=r" (pg) : : "cc"); \ - pg &= ~0x3fff; \ - (pgd_t *)phys_to_virt(pg); \ - }) -#endif - -#else /*!CONFIG_MMU */ - -#define cpu_switch_mm(pgd,mm) { } - -#endif - -#endif /* __ASSEMBLY__ */ -#endif /* __KERNEL__ */ -#endif /* __ASM_PROCFNS_H */ diff --git a/arch/lib/include/asm/proc-fns.h b/arch/lib/include/asm/proc-fns.h new file mode 120000 index 000000000000..18838a47591c --- /dev/null +++ b/arch/lib/include/asm/proc-fns.h @@ -0,0 +1 @@ +../../../arm/include/asm/proc-fns.h \ No newline at end of file diff --git a/arch/lib/include/asm/tlbflush.h b/arch/lib/include/asm/tlbflush.h deleted file mode 100644 index def9e570199f..000000000000 --- a/arch/lib/include/asm/tlbflush.h +++ /dev/null @@ -1,686 +0,0 @@ -/* - * arch/arm/include/asm/tlbflush.h - * - * Copyright (C) 1999-2003 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef _ASMARM_TLBFLUSH_H -#define _ASMARM_TLBFLUSH_H - -#ifdef CONFIG_MMU - -#include - -#define TLB_V4_U_PAGE (1 << 1) -#define TLB_V4_D_PAGE (1 << 2) -#define TLB_V4_I_PAGE (1 << 3) -#define TLB_V6_U_PAGE (1 << 4) -#define TLB_V6_D_PAGE (1 << 5) -#define TLB_V6_I_PAGE (1 << 6) - -#define TLB_V4_U_FULL (1 << 9) -#define TLB_V4_D_FULL (1 << 10) -#define TLB_V4_I_FULL (1 << 11) -#define TLB_V6_U_FULL (1 << 12) -#define TLB_V6_D_FULL (1 << 13) -#define TLB_V6_I_FULL (1 << 14) - -#define TLB_V6_U_ASID (1 << 16) -#define TLB_V6_D_ASID (1 << 17) -#define TLB_V6_I_ASID (1 << 18) - -#define TLB_V6_BP (1 << 19) - -/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */ -#define TLB_V7_UIS_PAGE (1 << 20) -#define TLB_V7_UIS_FULL (1 << 21) -#define TLB_V7_UIS_ASID (1 << 22) -#define TLB_V7_UIS_BP (1 << 23) - -#define TLB_BARRIER (1 << 28) -#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ -#define TLB_DCLEAN (1 << 30) -#define TLB_WB (1 << 31) - -/* - * MMU TLB Model - * ============= - * - * We have the following to choose from: - * v4 - ARMv4 without write buffer - * v4wb - ARMv4 with write buffer without I TLB flush entry instruction - * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction - * fr - Feroceon (v4wbi with non-outer-cacheable page table walks) - * fa - Faraday (v4 with write buffer with UTLB) - * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction - * v7wbi - identical to v6wbi - */ -#undef _TLB -#undef MULTI_TLB - -#ifdef CONFIG_SMP_ON_UP -#define MULTI_TLB 1 -#endif - -#define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE) - -#ifdef CONFIG_CPU_TLB_V4WT -# define v4_possible_flags v4_tlb_flags -# define v4_always_flags v4_tlb_flags -# ifdef _TLB -# define MULTI_TLB 1 -# else -# define _TLB v4 -# endif -#else -# define v4_possible_flags 0 -# define v4_always_flags (-1UL) -#endif - -#define fa_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ - TLB_V4_U_FULL | TLB_V4_U_PAGE) - -#ifdef CONFIG_CPU_TLB_FA -# define fa_possible_flags fa_tlb_flags -# define fa_always_flags fa_tlb_flags -# ifdef _TLB -# define MULTI_TLB 1 -# else -# define _TLB fa -# endif -#else -# define fa_possible_flags 0 -# define fa_always_flags (-1UL) -#endif - -#define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \ - TLB_V4_I_FULL | TLB_V4_D_FULL | \ - TLB_V4_I_PAGE | TLB_V4_D_PAGE) - -#ifdef CONFIG_CPU_TLB_V4WBI -# define v4wbi_possible_flags v4wbi_tlb_flags -# define v4wbi_always_flags v4wbi_tlb_flags -# ifdef _TLB -# define MULTI_TLB 1 -# else -# define _TLB v4wbi -# endif -#else -# define v4wbi_possible_flags 0 -# define v4wbi_always_flags (-1UL) -#endif - -#define fr_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_L2CLEAN_FR | \ - TLB_V4_I_FULL | TLB_V4_D_FULL | \ - TLB_V4_I_PAGE | TLB_V4_D_PAGE) - -#ifdef CONFIG_CPU_TLB_FEROCEON -# define fr_possible_flags fr_tlb_flags -# define fr_always_flags fr_tlb_flags -# ifdef _TLB -# define MULTI_TLB 1 -# else -# define _TLB v4wbi -# endif -#else -# define fr_possible_flags 0 -# define fr_always_flags (-1UL) -#endif - -#define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \ - TLB_V4_I_FULL | TLB_V4_D_FULL | \ - TLB_V4_D_PAGE) - -#ifdef CONFIG_CPU_TLB_V4WB -# define v4wb_possible_flags v4wb_tlb_flags -# define v4wb_always_flags v4wb_tlb_flags -# ifdef _TLB -# define MULTI_TLB 1 -# else -# define _TLB v4wb -# endif -#else -# define v4wb_possible_flags 0 -# define v4wb_always_flags (-1UL) -#endif - -#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ - TLB_V6_I_FULL | TLB_V6_D_FULL | \ - TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ - TLB_V6_I_ASID | TLB_V6_D_ASID | \ - TLB_V6_BP) - -#ifdef CONFIG_CPU_TLB_V6 -# define v6wbi_possible_flags v6wbi_tlb_flags -# define v6wbi_always_flags v6wbi_tlb_flags -# ifdef _TLB -# define MULTI_TLB 1 -# else -# define _TLB v6wbi -# endif -#else -# define v6wbi_possible_flags 0 -# define v6wbi_always_flags (-1UL) -#endif - -#define v7wbi_tlb_flags_smp (TLB_WB | TLB_BARRIER | \ - TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \ - TLB_V7_UIS_ASID | TLB_V7_UIS_BP) -#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ - TLB_V6_U_FULL | TLB_V6_U_PAGE | \ - TLB_V6_U_ASID | TLB_V6_BP) - -#ifdef CONFIG_CPU_TLB_V7 - -# ifdef CONFIG_SMP_ON_UP -# define v7wbi_possible_flags (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up) -# define v7wbi_always_flags (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up) -# elif defined(CONFIG_SMP) -# define v7wbi_possible_flags v7wbi_tlb_flags_smp -# define v7wbi_always_flags v7wbi_tlb_flags_smp -# else -# define v7wbi_possible_flags v7wbi_tlb_flags_up -# define v7wbi_always_flags v7wbi_tlb_flags_up -# endif -# ifdef _TLB -# define MULTI_TLB 1 -# else -# define _TLB v7wbi -# endif -#else -# define v7wbi_possible_flags 0 -# define v7wbi_always_flags (-1UL) -#endif - -#ifndef _TLB -#error Unknown TLB model -#endif - -#ifndef __ASSEMBLY__ - -#include - -struct cpu_tlb_fns { - void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *); - void (*flush_kern_range)(unsigned long, unsigned long); - unsigned long tlb_flags; -}; - -/* - * Select the calling method - */ -#ifdef MULTI_TLB - -#define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range -#define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range - -#else - -#define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range) -#define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range) - -extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); -extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long); - -#endif - -extern struct cpu_tlb_fns cpu_tlb; - -#define __cpu_tlb_flags cpu_tlb.tlb_flags - -/* - * TLB Management - * ============== - * - * The arch/arm/mm/tlb-*.S files implement these methods. - * - * The TLB specific code is expected to perform whatever tests it - * needs to determine if it should invalidate the TLB for each - * call. Start addresses are inclusive and end addresses are - * exclusive; it is safe to round these addresses down. - * - * flush_tlb_all() - * - * Invalidate the entire TLB. - * - * flush_tlb_mm(mm) - * - * Invalidate all TLB entries in a particular address - * space. - * - mm - mm_struct describing address space - * - * flush_tlb_range(mm,start,end) - * - * Invalidate a range of TLB entries in the specified - * address space. - * - mm - mm_struct describing address space - * - start - start address (may not be aligned) - * - end - end address (exclusive, may not be aligned) - * - * flush_tlb_page(vaddr,vma) - * - * Invalidate the specified page in the specified address range. - * - vaddr - virtual address (may not be aligned) - * - vma - vma_struct describing address range - * - * flush_kern_tlb_page(kaddr) - * - * Invalidate the TLB entry for the specified page. The address - * will be in the kernels virtual memory space. Current uses - * only require the D-TLB to be invalidated. - * - kaddr - Kernel virtual memory address - */ - -/* - * We optimise the code below by: - * - building a set of TLB flags that might be set in __cpu_tlb_flags - * - building a set of TLB flags that will always be set in __cpu_tlb_flags - * - if we're going to need __cpu_tlb_flags, access it once and only once - * - * This allows us to build optimal assembly for the single-CPU type case, - * and as close to optimal given the compiler constrants for multi-CPU - * case. We could do better for the multi-CPU case if the compiler - * implemented the "%?" method, but this has been discontinued due to too - * many people getting it wrong. - */ -#define possible_tlb_flags (v4_possible_flags | \ - v4wbi_possible_flags | \ - fr_possible_flags | \ - v4wb_possible_flags | \ - fa_possible_flags | \ - v6wbi_possible_flags | \ - v7wbi_possible_flags) - -#define always_tlb_flags (v4_always_flags & \ - v4wbi_always_flags & \ - fr_always_flags & \ - v4wb_always_flags & \ - fa_always_flags & \ - v6wbi_always_flags & \ - v7wbi_always_flags) - -#define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f))) - -#define __tlb_op(f, insnarg, arg) \ - do { \ - if (always_tlb_flags & (f)) \ - asm("mcr " insnarg \ - : : "r" (arg) : "cc"); \ - else if (possible_tlb_flags & (f)) \ - asm("tst %1, %2\n\t" \ - "mcrne " insnarg \ - : : "r" (arg), "r" (__tlb_flag), "Ir" (f) \ - : "cc"); \ - } while (0) - -#define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg) -#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg) - -static inline void __local_flush_tlb_all(void) -{ - const int zero = 0; - const unsigned int __tlb_flag = __cpu_tlb_flags; - - tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); - tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); - tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); -} - -static inline void local_flush_tlb_all(void) -{ - const int zero = 0; - const unsigned int __tlb_flag = __cpu_tlb_flags; - - if (tlb_flag(TLB_WB)) - dsb(nshst); - - __local_flush_tlb_all(); - tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero); - - if (tlb_flag(TLB_BARRIER)) { - dsb(nsh); - isb(); - } -} - -static inline void __flush_tlb_all(void) -{ - const int zero = 0; - const unsigned int __tlb_flag = __cpu_tlb_flags; - - if (tlb_flag(TLB_WB)) - dsb(ishst); - - __local_flush_tlb_all(); - tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero); - - if (tlb_flag(TLB_BARRIER)) { - dsb(ish); - isb(); - } -} - -static inline void __local_flush_tlb_mm(struct mm_struct *mm) -{ - const int zero = 0; - const int asid = ASID(mm); - const unsigned int __tlb_flag = __cpu_tlb_flags; - - if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { - tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); - tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); - tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); - } - } - - tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid); - tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid); - tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid); -} - -static inline void local_flush_tlb_mm(struct mm_struct *mm) -{ - const int asid = ASID(mm); - const unsigned int __tlb_flag = __cpu_tlb_flags; - - if (tlb_flag(TLB_WB)) - dsb(nshst); - - __local_flush_tlb_mm(mm); - tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid); - - if (tlb_flag(TLB_BARRIER)) - dsb(nsh); -} - -static inline void __flush_tlb_mm(struct mm_struct *mm) -{ - const unsigned int __tlb_flag = __cpu_tlb_flags; - - if (tlb_flag(TLB_WB)) - dsb(ishst); - - __local_flush_tlb_mm(mm); -#ifdef CONFIG_ARM_ERRATA_720789 - tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", 0); -#else - tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm)); -#endif - - if (tlb_flag(TLB_BARRIER)) - dsb(ish); -} - -static inline void -__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) -{ - const int zero = 0; - const unsigned int __tlb_flag = __cpu_tlb_flags; - - uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); - - if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && - cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); - tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); - tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); - if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) - asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); - } - - tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr); - tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr); - tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr); -} - -static inline void -local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) -{ - const unsigned int __tlb_flag = __cpu_tlb_flags; - - uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); - - if (tlb_flag(TLB_WB)) - dsb(nshst); - - __local_flush_tlb_page(vma, uaddr); - tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr); - - if (tlb_flag(TLB_BARRIER)) - dsb(nsh); -} - -static inline void -__flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) -{ - const unsigned int __tlb_flag = __cpu_tlb_flags; - - uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); - - if (tlb_flag(TLB_WB)) - dsb(ishst); - - __local_flush_tlb_page(vma, uaddr); -#ifdef CONFIG_ARM_ERRATA_720789 - tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK); -#else - tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr); -#endif - - if (tlb_flag(TLB_BARRIER)) - dsb(ish); -} - -static inline void __local_flush_tlb_kernel_page(unsigned long kaddr) -{ - const int zero = 0; - const unsigned int __tlb_flag = __cpu_tlb_flags; - - tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); - tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); - tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); - if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) - asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); - - tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr); - tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr); - tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr); -} - -static inline void local_flush_tlb_kernel_page(unsigned long kaddr) -{ - const unsigned int __tlb_flag = __cpu_tlb_flags; - - kaddr &= PAGE_MASK; - - if (tlb_flag(TLB_WB)) - dsb(nshst); - - __local_flush_tlb_kernel_page(kaddr); - tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr); - - if (tlb_flag(TLB_BARRIER)) { - dsb(nsh); - isb(); - } -} - -static inline void __flush_tlb_kernel_page(unsigned long kaddr) -{ - const unsigned int __tlb_flag = __cpu_tlb_flags; - - kaddr &= PAGE_MASK; - - if (tlb_flag(TLB_WB)) - dsb(ishst); - - __local_flush_tlb_kernel_page(kaddr); - tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr); - - if (tlb_flag(TLB_BARRIER)) { - dsb(ish); - isb(); - } -} - -/* - * Branch predictor maintenance is paired with full TLB invalidation, so - * there is no need for any barriers here. - */ -static inline void __local_flush_bp_all(void) -{ - const int zero = 0; - const unsigned int __tlb_flag = __cpu_tlb_flags; - - if (tlb_flag(TLB_V6_BP)) - asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero)); -} - -static inline void local_flush_bp_all(void) -{ - const int zero = 0; - const unsigned int __tlb_flag = __cpu_tlb_flags; - - __local_flush_bp_all(); - if (tlb_flag(TLB_V7_UIS_BP)) - asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero)); -} - -static inline void __flush_bp_all(void) -{ - const int zero = 0; - const unsigned int __tlb_flag = __cpu_tlb_flags; - - __local_flush_bp_all(); - if (tlb_flag(TLB_V7_UIS_BP)) - asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero)); -} - -/* - * flush_pmd_entry - * - * Flush a PMD entry (word aligned, or double-word aligned) to - * RAM if the TLB for the CPU we are running on requires this. - * This is typically used when we are creating PMD entries. - * - * clean_pmd_entry - * - * Clean (but don't drain the write buffer) if the CPU requires - * these operations. This is typically used when we are removing - * PMD entries. - */ -static inline void flush_pmd_entry(void *pmd) -{ - const unsigned int __tlb_flag = __cpu_tlb_flags; - - tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd); - tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); - - if (tlb_flag(TLB_WB)) - dsb(ishst); -} - -static inline void clean_pmd_entry(void *pmd) -{ - const unsigned int __tlb_flag = __cpu_tlb_flags; - - tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd); - tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); -} - -#undef tlb_op -#undef tlb_flag -#undef always_tlb_flags -#undef possible_tlb_flags - -/* - * Convert calls to our calling convention. - */ -#define local_flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) -#define local_flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e) - -#ifndef CONFIG_SMP -#define flush_tlb_all local_flush_tlb_all -#define flush_tlb_mm local_flush_tlb_mm -#define flush_tlb_page local_flush_tlb_page -#define flush_tlb_kernel_page local_flush_tlb_kernel_page -#define flush_tlb_range local_flush_tlb_range -#define flush_tlb_kernel_range local_flush_tlb_kernel_range -#define flush_bp_all local_flush_bp_all -#else -extern void flush_tlb_all(void); -extern void flush_tlb_mm(struct mm_struct *mm); -extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr); -extern void flush_tlb_kernel_page(unsigned long kaddr); -extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); -extern void flush_bp_all(void); -#endif - -/* - * If PG_dcache_clean is not set for the page, we need to ensure that any - * cache entries for the kernels virtual memory range are written - * back to the page. On ARMv6 and later, the cache coherency is handled via - * the set_pte_at() function. - */ -#if __LINUX_ARM_ARCH__ < 6 -extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, - pte_t *ptep); -#else -static inline void update_mmu_cache(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep) -{ -} -#endif - -#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) - -#endif - -#elif defined(CONFIG_SMP) /* !CONFIG_MMU */ - -#ifndef __ASSEMBLY__ - -#include - -static inline void local_flush_tlb_all(void) { } -static inline void local_flush_tlb_mm(struct mm_struct *mm) { } -static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { } -static inline void local_flush_tlb_kernel_page(unsigned long kaddr) { } -static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { } -static inline void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { } -static inline void local_flush_bp_all(void) { } - -extern void flush_tlb_all(void); -extern void flush_tlb_mm(struct mm_struct *mm); -extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr); -extern void flush_tlb_kernel_page(unsigned long kaddr); -extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); -extern void flush_bp_all(void); -#endif /* __ASSEMBLY__ */ - -#endif - -#ifndef __ASSEMBLY__ -#ifdef CONFIG_ARM_ERRATA_798181 -extern void erratum_a15_798181_init(void); -#else -static inline void erratum_a15_798181_init(void) {} -#endif -extern bool (*erratum_a15_798181_handler)(void); - -static inline bool erratum_a15_798181(void) -{ - if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) && - erratum_a15_798181_handler)) - return erratum_a15_798181_handler(); - return false; -} -#endif - -#endif diff --git a/arch/lib/include/asm/tlbflush.h b/arch/lib/include/asm/tlbflush.h new file mode 120000 index 000000000000..03e072b029ab --- /dev/null +++ b/arch/lib/include/asm/tlbflush.h @@ -0,0 +1 @@ +../../../arm/include/asm/tlbflush.h \ No newline at end of file diff --git a/arch/lib/lib.h b/arch/lib/lib.h index abf2a2628bb2..a8be78ea69f9 100644 --- a/arch/lib/lib.h +++ b/arch/lib/lib.h @@ -18,4 +18,8 @@ struct SimTask { void *private; }; +#ifdef CONFIG_HAVE_ARCH_PFN_VALID +extern int memblock_is_memory(phys_addr_t addr); +#endif + #endif /* LIB_H */ diff --git a/mm/bootmem.c b/mm/bootmem.c index ae9d08f1f0e3..24131ca5013a 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -281,7 +281,6 @@ unsigned long __init free_all_bootmem(void) total_pages += free_all_bootmem_core(bdata); totalram_pages += total_pages; - return total_pages; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c2802fe7b857..ad4a56d30092 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -117,7 +117,6 @@ static DEFINE_SPINLOCK(managed_page_count_lock); unsigned long totalram_pages __read_mostly; #endif - static void print_buddy_freelist(void) { struct zone *zone; @@ -127,28 +126,26 @@ static void print_buddy_freelist(void) int i = 0; for_each_zone(zone) { - printk(KERN_INFO "I am zone %s %lu\n", zone->name, zone->present_pages); + pr_info("For zone %s %lu\n", zone->name, + zone->present_pages); if (zone->present_pages == 0) goto out; - + for_each_migratetype_order(order, t) { - list_for_each(curr, &zone->free_area[order].free_list[t]) { - pfn = page_to_pfn(list_entry(curr, struct page, lru)); + struct free_area area = zone->free_area[order]; - printk(KERN_INFO "%lu %d %d %d\n",pfn, order, t, i); + list_for_each(curr, &area.free_list[t]) { + pfn = page_to_pfn(list_entry(curr, + struct page, lru)); + pr_info("%lu %d %d %d\n", pfn, order, t, i); i++; } - } } out: - printk(KERN_INFO "Totoal free page2: %d\n", i); + pr_info("Totoal free page2: %d\n", i); } - - - - unsigned long totalreserve_pages __read_mostly; unsigned long totalcma_pages __read_mostly; /* @@ -3202,10 +3199,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, return page; } -#ifdef CONFIG_LIB -extern char *total_ram; -#endif - /* * This is the 'heart' of the zoned buddy allocator. */ @@ -3287,7 +3280,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, goto retry_cpuset; #ifdef CONFIG_LIB - printk(KERN_INFO "Done: I am %s %lu\n", __func__, page_to_pfn(page)); page->virtual = (void *)total_ram + (page_to_pfn(page) << PAGE_SHIFT); #endif return page; @@ -3346,11 +3338,10 @@ EXPORT_SYMBOL(free_pages); void free_pages(unsigned long addr, unsigned int order) { unsigned long pfn = addr - (unsigned long) total_ram; - pfn = pfn >> PAGE_SHIFT; - if (pfn != 0) { + pfn = pfn >> PAGE_SHIFT; + if (pfn != 0) __free_pages(pfn_to_page(pfn), order); - } } EXPORT_SYMBOL(free_pages); #endif @@ -3656,6 +3647,7 @@ void si_meminfo(struct sysinfo *val) val->freehigh = nr_free_highpages(); val->mem_unit = PAGE_SIZE; } + EXPORT_SYMBOL(si_meminfo); #endif @@ -6067,7 +6059,7 @@ void __init mem_init_print_info(const char *str) #endif str ? ", " : "", str ? str : ""); } -#endif +#endif /** * set_dma_reserve - set the specified number of pages reserved in the first zone diff --git a/mm/slib_env.c b/mm/slib_env.c index 8a97fd3ece61..e2b7de74cc8c 100644 --- a/mm/slib_env.c +++ b/mm/slib_env.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include @@ -24,7 +24,7 @@ struct meminfo meminfo; static void * __initdata vmalloc_min = (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); -phys_addr_t arm_lowmem_limit __initdata = 0; +phys_addr_t arm_lowmem_limit __initdata; unsigned int cacheid __read_mostly; @@ -109,20 +109,11 @@ static void __init free_highpages(void) unsigned long max_low = max_low_pfn; struct memblock_region *mem, *res; - printk("max_low_pfn:%lu\n", max_low_pfn); - printk("min_low_pfn:%lu\n", min_low_pfn); - printk("max_pfn:%lu\n", max_pfn); - - /* set highmem page free */ for_each_memblock(memory, mem) { unsigned long start = memblock_region_memory_base_pfn(mem); unsigned long end = memblock_region_memory_end_pfn(mem); - printk("start:%lu\n", start); - printk("end:%lu\n", end); - - /* Ignore complete lowmem entries */ if (end <= max_low) continue; @@ -172,11 +163,7 @@ void __init mem_init(void) /* this will put all unused low memory onto the freelists */ free_unused_memmap(); free_all_bootmem(); - free_highpages(); - - //mem_init_print_info(NULL); - } static void __init zone_sizes_init(unsigned long min, unsigned long max_low, @@ -211,11 +198,13 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low, if (start < max_low) { unsigned long low_end = min(end, max_low); + zhole_size[0] -= low_end - start; } #ifdef CONFIG_HIGHMEM if (end > max_low) { unsigned long high_start = max(start, max_low); + zhole_size[ZONE_HIGHMEM] -= end - high_start; } #endif @@ -260,7 +249,7 @@ int __init arm_add_memory(u64 start, u64 size) start = aligned_start; size = size & ~(phys_addr_t)(PAGE_SIZE - 1); - printk("I am %s start:%llu, size:%llu\n", __func__, start, size); + pr_info("[%s] start:%llu, size:%llu\n", __func__, start, size); /* * Check whether this memory region has non-zero size or @@ -329,7 +318,7 @@ static void __init arm_bootmem_init(unsigned long start_pfn, if (start >= end) break; reserve_bootmem(__pfn_to_phys(start), - (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT); + (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT); } } @@ -339,10 +328,9 @@ void __init bootmem_init(void) find_limits(&min, &max_low, &max_high); - printk("min:%lu\n", min); - printk("max_low:%lu\n", max_low); - printk("max_high:%lu\n", max_high); - + pr_info("min:%lu\n", min); + pr_info("max_low:%lu\n", max_low); + pr_info("max_high:%lu\n", max_high); zone_sizes_init(min, max_low, max_high); @@ -388,10 +376,11 @@ void __init sanity_check_meminfo(void) } if (reg->size > size_limit) { - phys_addr_t overlap_size = reg->size - size_limit; + phys_addr_t overlap_size = + reg->size - size_limit; pr_notice("Truncating RAM at %pa-%pa to -%pa", - &block_start, &block_end, &vmalloc_limit); + &block_start, &block_end, &vmalloc_limit); memblock_remove(vmalloc_limit, overlap_size); block_end = vmalloc_limit; } @@ -442,18 +431,19 @@ void __init sanity_check_meminfo(void) memblock_set_current_limit(memblock_limit); } -char *total_ram = NULL; +char *total_ram; void __init setup_arch(char **cmd) { int ret; + ret = arm_add_memory(0, 1024 * 1024 * 1024 * 1); if (ret) - printk("arm_add_memory failed in %s\n", __func__); + pr_info("arm_add_memory failed in %s\n", __func__); - total_ram = lib_malloc(1024 * 1024 * 1024 * 1); + total_ram = lib_malloc(1024 * 1024 * 1024 * 1); if (total_ram == NULL) - printk("Alloc memory failed in %s\n", __func__); + pr_info("Alloc memory failed in %s\n", __func__); sanity_check_meminfo(); arm_memblock_init(); @@ -489,9 +479,6 @@ void __init init_memory_system(void) void test(void) { pg_data_t *pgdat = NODE_DATA(nid); + alloc_pages(GFP_KERNEL, 1); - - //printk("I am printk: %p, %p, %d\n", pgdat->node_zones, - // pgdat->node_zonelists, - // pgdat->nr_zones); } diff --git a/mm/slib_env.h b/mm/slib_env.h index e67103ec7326..09693f3ff4b7 100644 --- a/mm/slib_env.h +++ b/mm/slib_env.h @@ -16,7 +16,6 @@ #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) #define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) - /* * Memory map description: from arm/include/asm/setup.h */ @@ -27,35 +26,29 @@ #endif struct membank { - phys_addr_t start; - unsigned long size; - unsigned int highmem; + phys_addr_t start; + unsigned long size; + unsigned int highmem; }; struct meminfo { - int nr_banks; - struct membank bank[NR_BANKS]; + int nr_banks; + struct membank bank[NR_BANKS]; }; extern struct meminfo meminfo; -#define for_each_bank(iter,mi) \ - for (iter = 0; iter < (mi)->nr_banks; iter++) +#define for_each_bank(iter, mi) \ + for (iter = 0; iter < (mi)->nr_banks; iter++) #define bank_pfn_start(bank) __phys_to_pfn((bank)->start) #define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size) #define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT) -#define bank_phys_start(bank) (bank)->start +#define bank_phys_start(bank) ((bank)->start) #define bank_phys_end(bank) ((bank)->start + (bank)->size) -#define bank_phys_size(bank) (bank)->size - -/* From arm/mm/mmu.c */ -//pgprot_t pgprot_user; -//pgprot_t pgprot_kernel; -//pgprot_t pgprot_hyp_device; -//pgprot_t pgprot_s2; -//pgprot_t pgprot_s2_device; +#define bank_phys_size(bank) ((bank)->size) void __init init_memory_system(void); +extern char *total_ram; #endif From 34af0b6f0349e14f7d7e5a6da55455c71409584c Mon Sep 17 00:00:00 2001 From: jyizheng Date: Mon, 7 Sep 2015 01:29:12 -0400 Subject: [PATCH 26/29] Lib: 1) resovle undefined symbol rumpns_irq_stat --- arch/lib/include/asm/hwcap.h | 1 + arch/lib/include/asm/ptrace-arm.h | 1 + arch/lib/include/asm/ptrace.h | 2 ++ arch/lib/include/asm/user.h | 1 + arch/lib/include/asm/vdso_datapage.h | 1 + arch/lib/include/uapi/asm/hwcap.h | 1 + arch/lib/include/uapi/asm/ptrace.h | 1 + arch/lib/softirq.c | 4 ++++ 8 files changed, 12 insertions(+) create mode 120000 arch/lib/include/asm/hwcap.h create mode 120000 arch/lib/include/asm/ptrace-arm.h create mode 120000 arch/lib/include/asm/user.h create mode 120000 arch/lib/include/asm/vdso_datapage.h create mode 120000 arch/lib/include/uapi/asm/hwcap.h create mode 120000 arch/lib/include/uapi/asm/ptrace.h diff --git a/arch/lib/include/asm/hwcap.h b/arch/lib/include/asm/hwcap.h new file mode 120000 index 000000000000..637fa7d2cc6b --- /dev/null +++ b/arch/lib/include/asm/hwcap.h @@ -0,0 +1 @@ +../../../arm/include/asm/hwcap.h \ No newline at end of file diff --git a/arch/lib/include/asm/ptrace-arm.h b/arch/lib/include/asm/ptrace-arm.h new file mode 120000 index 000000000000..1e13abc0f023 --- /dev/null +++ b/arch/lib/include/asm/ptrace-arm.h @@ -0,0 +1 @@ +../../../arm/include/asm/ptrace.h \ No newline at end of file diff --git a/arch/lib/include/asm/ptrace.h b/arch/lib/include/asm/ptrace.h index ddd97080b759..46551c8fdda0 100644 --- a/arch/lib/include/asm/ptrace.h +++ b/arch/lib/include/asm/ptrace.h @@ -1,4 +1,6 @@ #ifndef _ASM_SIM_PTRACE_H #define _ASM_SIM_PTRACE_H +#include + #endif /* _ASM_SIM_PTRACE_H */ diff --git a/arch/lib/include/asm/user.h b/arch/lib/include/asm/user.h new file mode 120000 index 000000000000..36a6cda21e4a --- /dev/null +++ b/arch/lib/include/asm/user.h @@ -0,0 +1 @@ +../../../arm/include/asm/user.h \ No newline at end of file diff --git a/arch/lib/include/asm/vdso_datapage.h b/arch/lib/include/asm/vdso_datapage.h new file mode 120000 index 000000000000..46b4b5e75db8 --- /dev/null +++ b/arch/lib/include/asm/vdso_datapage.h @@ -0,0 +1 @@ +../../../arm/include/asm/vdso_datapage.h \ No newline at end of file diff --git a/arch/lib/include/uapi/asm/hwcap.h b/arch/lib/include/uapi/asm/hwcap.h new file mode 120000 index 000000000000..f4557b5629aa --- /dev/null +++ b/arch/lib/include/uapi/asm/hwcap.h @@ -0,0 +1 @@ +../../../../arm/include/uapi/asm/hwcap.h \ No newline at end of file diff --git a/arch/lib/include/uapi/asm/ptrace.h b/arch/lib/include/uapi/asm/ptrace.h new file mode 120000 index 000000000000..3eb86732ae4c --- /dev/null +++ b/arch/lib/include/uapi/asm/ptrace.h @@ -0,0 +1 @@ +../../../../arm/include/uapi/asm/ptrace.h \ No newline at end of file diff --git a/arch/lib/softirq.c b/arch/lib/softirq.c index 3f6363a70317..88699a381562 100644 --- a/arch/lib/softirq.c +++ b/arch/lib/softirq.c @@ -11,6 +11,10 @@ #include "sim.h" #include "sim-assert.h" +#ifndef __ARCH_IRQ_STAT +irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; +EXPORT_SYMBOL(irq_stat); +#endif static struct softirq_action softirq_vec[NR_SOFTIRQS]; static struct SimTask *g_softirq_task = 0; From 7b04e147da7ada539e5d610ab00b6da3f4957179 Mon Sep 17 00:00:00 2001 From: jyizheng Date: Mon, 7 Sep 2015 01:44:58 -0400 Subject: [PATCH 27/29] disable compile time check --- arch/lib/include/asm/barrier.h | 89 +++++++++++++++++++++++++++++++++- 1 file changed, 88 insertions(+), 1 deletion(-) mode change 120000 => 100644 arch/lib/include/asm/barrier.h diff --git a/arch/lib/include/asm/barrier.h b/arch/lib/include/asm/barrier.h deleted file mode 120000 index 472ea0ae7c94..000000000000 --- a/arch/lib/include/asm/barrier.h +++ /dev/null @@ -1 +0,0 @@ -../../../arm/include/asm/barrier.h \ No newline at end of file diff --git a/arch/lib/include/asm/barrier.h b/arch/lib/include/asm/barrier.h new file mode 100644 index 000000000000..d7a40f4040c8 --- /dev/null +++ b/arch/lib/include/asm/barrier.h @@ -0,0 +1,88 @@ +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +#ifndef __ASSEMBLY__ +#include + +#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); + +#if __LINUX_ARM_ARCH__ >= 7 || \ + (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) +#define sev() __asm__ __volatile__ ("sev" : : : "memory") +#define wfe() __asm__ __volatile__ ("wfe" : : : "memory") +#define wfi() __asm__ __volatile__ ("wfi" : : : "memory") +#endif + +#if __LINUX_ARM_ARCH__ >= 7 +#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory") +#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory") +#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") +#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 +#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ + : : "r" (0) : "memory") +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ + : : "r" (0) : "memory") +#elif defined(CONFIG_CPU_FA526) +#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ + : : "r" (0) : "memory") +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb(x) __asm__ __volatile__ ("" : : : "memory") +#else +#define isb(x) __asm__ __volatile__ ("" : : : "memory") +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb(x) __asm__ __volatile__ ("" : : : "memory") +#endif + +#ifdef CONFIG_ARCH_HAS_BARRIERS +#include +#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) +#define mb() do { dsb(); outer_sync(); } while (0) +#define rmb() dsb() +#define wmb() do { dsb(st); outer_sync(); } while (0) +#define dma_rmb() dmb(osh) +#define dma_wmb() dmb(oshst) +#else +#define mb() barrier() +#define rmb() barrier() +#define wmb() barrier() +#define dma_rmb() barrier() +#define dma_wmb() barrier() +#endif + +#ifndef CONFIG_SMP +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#else +#define smp_mb() dmb(ish) +#define smp_rmb() smp_mb() +#define smp_wmb() dmb(ishst) +#endif + +#define smp_store_release(p, v) \ +do { \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + smp_mb(); \ + ___p1; \ +}) + +#define read_barrier_depends() do { } while(0) +#define smp_read_barrier_depends() do { } while(0) + +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) + +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() + +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_BARRIER_H */ From a7c9ded5be7d747e39eb8fec7f5ee6200715331f Mon Sep 17 00:00:00 2001 From: jyizheng Date: Mon, 7 Sep 2015 23:14:43 -0400 Subject: [PATCH 28/29] resolve pull request comments --- arch/lib/Makefile | 2 +- mm/bootmem.c | 1 + mm/highmem.c | 2 ++ mm/page_alloc.c | 1 + mm/slib_env.c | 2 +- mm/slib_env.h | 2 +- 6 files changed, 7 insertions(+), 3 deletions(-) diff --git a/arch/lib/Makefile b/arch/lib/Makefile index b9a74506ce71..6f422f773c79 100644 --- a/arch/lib/Makefile +++ b/arch/lib/Makefile @@ -156,7 +156,7 @@ define cmd_lib_bounds echo "#define GENERATED_BOUNDS_H"; \ echo ""; \ echo "#define NR_PAGEFLAGS (__NR_PAGEFLAGS)"; \ - echo "#define MAX_NR_ZONES __MAX_NR_ZONES"; \ + echo "#define MAX_NR_ZONES (__MAX_NR_ZONES)"; \ echo ""; \ echo "#endif /* GENERATED_BOUNDS_H */") > $@ endef diff --git a/mm/bootmem.c b/mm/bootmem.c index 24131ca5013a..a23dd1934654 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -281,6 +281,7 @@ unsigned long __init free_all_bootmem(void) total_pages += free_all_bootmem_core(bdata); totalram_pages += total_pages; + return total_pages; } diff --git a/mm/highmem.c b/mm/highmem.c index b606040d5db9..5e72abe70642 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -29,6 +29,7 @@ #include #include + #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) DEFINE_PER_CPU(int, __kmap_atomic_idx); #endif @@ -296,6 +297,7 @@ void *kmap_high(struct page *page) unlock_kmap(); return (void*) vaddr; } + EXPORT_SYMBOL(kmap_high); #ifdef ARCH_NEEDS_KMAP_HIGH_GET diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ad4a56d30092..27fe23b169a9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4699,6 +4699,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, */ if (!(pfn & (pageblock_nr_pages - 1))) { struct page *page = pfn_to_page(pfn); + __init_single_page(page, pfn, zone, nid); set_pageblock_migratetype(page, MIGRATE_MOVABLE); } else { diff --git a/mm/slib_env.c b/mm/slib_env.c index e2b7de74cc8c..0f253d110039 100644 --- a/mm/slib_env.c +++ b/mm/slib_env.c @@ -1,7 +1,7 @@ /* * Library Slab Allocator (SLIB) * - * Copyright (c) 2015 INRIA, Hajime Tazaki + * Copyright (c) 2015 Yizheng Jiao * * Author: Yizheng Jiao */ diff --git a/mm/slib_env.h b/mm/slib_env.h index 09693f3ff4b7..6968b746eebc 100644 --- a/mm/slib_env.h +++ b/mm/slib_env.h @@ -1,7 +1,7 @@ /* * Library Slab Allocator (SLIB) * - * Copyright (c) 2015 INRIA, Hajime Tazaki + * Copyright (c) 2015 Yizheng Jiao * * Author: Yizheng Jiao */ From a68bf58ae803f1c5b2e345ec89c00fefa58b3399 Mon Sep 17 00:00:00 2001 From: jyizheng Date: Mon, 7 Sep 2015 23:20:15 -0400 Subject: [PATCH 29/29] add something missing in the last commit --- arch/lib/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/lib/Makefile b/arch/lib/Makefile index 6f422f773c79..50e25007fa60 100644 --- a/arch/lib/Makefile +++ b/arch/lib/Makefile @@ -128,7 +128,7 @@ quiet_cmd_objsmk = OBJS-MK $@ done > $@ $(ARCH_DIR)/objs.mk: $(ARCH_DIR)/Makefile.print $(srctree)/.config $(ARCH_DIR)/Makefile - +$(call if_changed,objsmk); \ + +$(call if_changed,objsmk); quiet_cmd_linker = GEN $@ cmd_linker = ld -shared --verbose | ./$^ > $@