diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/atomic.h | 6 | ||||
-rw-r--r-- | include/asm-generic/checksum.h | 4 | ||||
-rw-r--r-- | include/asm-generic/cmpxchg-local.h | 8 | ||||
-rw-r--r-- | include/asm-generic/cmpxchg.h | 10 | ||||
-rw-r--r-- | include/asm-generic/cputime.h | 66 | ||||
-rw-r--r-- | include/asm-generic/cputime_jiffies.h | 72 | ||||
-rw-r--r-- | include/asm-generic/cputime_nsecs.h | 104 | ||||
-rw-r--r-- | include/asm-generic/dma-mapping-broken.h | 16 | ||||
-rw-r--r-- | include/asm-generic/gpio.h | 64 | ||||
-rw-r--r-- | include/asm-generic/io.h | 94 | ||||
-rw-r--r-- | include/asm-generic/mmu.h | 6 | ||||
-rw-r--r-- | include/asm-generic/parport.h | 4 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 144 | ||||
-rw-r--r-- | include/asm-generic/signal.h | 2 | ||||
-rw-r--r-- | include/asm-generic/syscalls.h | 34 | ||||
-rw-r--r-- | include/asm-generic/tlb.h | 9 | ||||
-rw-r--r-- | include/asm-generic/trace_clock.h | 16 | ||||
-rw-r--r-- | include/asm-generic/uaccess.h | 14 | ||||
-rw-r--r-- | include/asm-generic/unistd.h | 12 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 32 |
20 files changed, 539 insertions, 178 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 1ced6413ea03..33bd2de3bc1e 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -136,12 +136,6 @@ static inline void atomic_dec(atomic_t *v) #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ - (unsigned long)(n), sizeof(*(ptr)))) - -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h index c084767c88bc..59811df58c5b 100644 --- a/include/asm-generic/checksum.h +++ b/include/asm-generic/checksum.h @@ -38,12 +38,15 @@ extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, csum_partial_copy((src), (dst), (len), (sum)) #endif +#ifndef ip_fast_csum /* * This is a version of ip_compute_csum() optimized for IP headers, * which always checksum on 4 octet boundaries. */ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); +#endif +#ifndef csum_fold /* * Fold a partial checksum */ @@ -54,6 +57,7 @@ static inline __sum16 csum_fold(__wsum csum) sum = (sum & 0xffff) + (sum >> 16); return (__force __sum16)~sum; } +#endif #ifndef csum_tcpudp_nofold /* diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h index 2533fddd34a6..d8d4c898c1bb 100644 --- a/include/asm-generic/cmpxchg-local.h +++ b/include/asm-generic/cmpxchg-local.h @@ -21,7 +21,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, if (size == 8 && sizeof(unsigned long) != 8) wrong_size_cmpxchg(ptr); - local_irq_save(flags); + raw_local_irq_save(flags); switch (size) { case 1: prev = *(u8 *)ptr; if (prev == old) @@ -42,7 +42,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, default: wrong_size_cmpxchg(ptr); } - local_irq_restore(flags); + raw_local_irq_restore(flags); return prev; } @@ -55,11 +55,11 @@ static inline u64 __cmpxchg64_local_generic(volatile void *ptr, u64 prev; unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); prev = *(u64 *)ptr; if (prev == old) *(u64 *)ptr = new; - local_irq_restore(flags); + raw_local_irq_restore(flags); return prev; } diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h index 14883026015d..811fb1e9b061 100644 --- a/include/asm-generic/cmpxchg.h +++ b/include/asm-generic/cmpxchg.h @@ -92,6 +92,16 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size) */ #include <asm-generic/cmpxchg-local.h> +#ifndef cmpxchg_local +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ + (unsigned long)(n), sizeof(*(ptr)))) +#endif + +#ifndef cmpxchg64_local +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#endif + #define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n)) #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h index 9a62937c56ca..51969436b8b8 100644 --- a/include/asm-generic/cputime.h +++ b/include/asm-generic/cputime.h @@ -4,66 +4,12 @@ #include <linux/time.h> #include <linux/jiffies.h> -typedef unsigned long __nocast cputime_t; - -#define cputime_one_jiffy jiffies_to_cputime(1) -#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct) -#define cputime_to_scaled(__ct) (__ct) -#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz) - -typedef u64 __nocast cputime64_t; - -#define cputime64_to_jiffies64(__ct) (__force u64)(__ct) -#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif) - -#define nsecs_to_cputime64(__ct) \ - jiffies64_to_cputime64(nsecs_to_jiffies64(__ct)) - - -/* - * Convert cputime to microseconds and back. - */ -#define cputime_to_usecs(__ct) \ - jiffies_to_usecs(cputime_to_jiffies(__ct)) -#define usecs_to_cputime(__usec) \ - jiffies_to_cputime(usecs_to_jiffies(__usec)) -#define usecs_to_cputime64(__usec) \ - jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000)) - -/* - * Convert cputime to seconds and back. - */ -#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ) -#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ) - -/* - * Convert cputime to timespec and back. - */ -#define timespec_to_cputime(__val) \ - jiffies_to_cputime(timespec_to_jiffies(__val)) -#define cputime_to_timespec(__ct,__val) \ - jiffies_to_timespec(cputime_to_jiffies(__ct),__val) - -/* - * Convert cputime to timeval and back. - */ -#define timeval_to_cputime(__val) \ - jiffies_to_cputime(timeval_to_jiffies(__val)) -#define cputime_to_timeval(__ct,__val) \ - jiffies_to_timeval(cputime_to_jiffies(__ct),__val) - -/* - * Convert cputime to clock and back. - */ -#define cputime_to_clock_t(__ct) \ - jiffies_to_clock_t(cputime_to_jiffies(__ct)) -#define clock_t_to_cputime(__x) \ - jiffies_to_cputime(clock_t_to_jiffies(__x)) +#ifndef CONFIG_VIRT_CPU_ACCOUNTING +# include <asm-generic/cputime_jiffies.h> +#endif -/* - * Convert cputime64 to clock. - */ -#define cputime64_to_clock_t(__ct) \ - jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct)) +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +# include <asm-generic/cputime_nsecs.h> +#endif #endif diff --git a/include/asm-generic/cputime_jiffies.h b/include/asm-generic/cputime_jiffies.h new file mode 100644 index 000000000000..272ecba9f588 --- /dev/null +++ b/include/asm-generic/cputime_jiffies.h @@ -0,0 +1,72 @@ +#ifndef _ASM_GENERIC_CPUTIME_JIFFIES_H +#define _ASM_GENERIC_CPUTIME_JIFFIES_H + +typedef unsigned long __nocast cputime_t; + +#define cputime_one_jiffy jiffies_to_cputime(1) +#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct) +#define cputime_to_scaled(__ct) (__ct) +#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz) + +typedef u64 __nocast cputime64_t; + +#define cputime64_to_jiffies64(__ct) (__force u64)(__ct) +#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif) + + +/* + * Convert nanoseconds to cputime + */ +#define nsecs_to_cputime64(__nsec) \ + jiffies64_to_cputime64(nsecs_to_jiffies64(__nsec)) +#define nsecs_to_cputime(__nsec) \ + jiffies_to_cputime(nsecs_to_jiffies(__nsec)) + + +/* + * Convert cputime to microseconds and back. + */ +#define cputime_to_usecs(__ct) \ + jiffies_to_usecs(cputime_to_jiffies(__ct)) +#define usecs_to_cputime(__usec) \ + jiffies_to_cputime(usecs_to_jiffies(__usec)) +#define usecs_to_cputime64(__usec) \ + jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000)) + +/* + * Convert cputime to seconds and back. + */ +#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ) +#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ) + +/* + * Convert cputime to timespec and back. + */ +#define timespec_to_cputime(__val) \ + jiffies_to_cputime(timespec_to_jiffies(__val)) +#define cputime_to_timespec(__ct,__val) \ + jiffies_to_timespec(cputime_to_jiffies(__ct),__val) + +/* + * Convert cputime to timeval and back. + */ +#define timeval_to_cputime(__val) \ + jiffies_to_cputime(timeval_to_jiffies(__val)) +#define cputime_to_timeval(__ct,__val) \ + jiffies_to_timeval(cputime_to_jiffies(__ct),__val) + +/* + * Convert cputime to clock and back. + */ +#define cputime_to_clock_t(__ct) \ + jiffies_to_clock_t(cputime_to_jiffies(__ct)) +#define clock_t_to_cputime(__x) \ + jiffies_to_cputime(clock_t_to_jiffies(__x)) + +/* + * Convert cputime64 to clock. + */ +#define cputime64_to_clock_t(__ct) \ + jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct)) + +#endif diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h new file mode 100644 index 000000000000..a8ece9a33aef --- /dev/null +++ b/include/asm-generic/cputime_nsecs.h @@ -0,0 +1,104 @@ +/* + * Definitions for measuring cputime in nsecs resolution. + * + * Based on <arch/ia64/include/asm/cputime.h> + * + * Copyright (C) 2007 FUJITSU LIMITED + * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#ifndef _ASM_GENERIC_CPUTIME_NSECS_H +#define _ASM_GENERIC_CPUTIME_NSECS_H + +typedef u64 __nocast cputime_t; +typedef u64 __nocast cputime64_t; + +#define cputime_one_jiffy jiffies_to_cputime(1) + +/* + * Convert cputime <-> jiffies (HZ) + */ +#define cputime_to_jiffies(__ct) \ + ((__force u64)(__ct) / (NSEC_PER_SEC / HZ)) +#define cputime_to_scaled(__ct) (__ct) +#define jiffies_to_cputime(__jif) \ + (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ)) +#define cputime64_to_jiffies64(__ct) \ + ((__force u64)(__ct) / (NSEC_PER_SEC / HZ)) +#define jiffies64_to_cputime64(__jif) \ + (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ)) + + +/* + * Convert cputime <-> nanoseconds + */ +#define nsecs_to_cputime(__nsecs) ((__force u64)(__nsecs)) + + +/* + * Convert cputime <-> microseconds + */ +#define cputime_to_usecs(__ct) \ + ((__force u64)(__ct) / NSEC_PER_USEC) +#define usecs_to_cputime(__usecs) \ + (__force cputime_t)((__usecs) * NSEC_PER_USEC) +#define usecs_to_cputime64(__usecs) \ + (__force cputime64_t)((__usecs) * NSEC_PER_USEC) + +/* + * Convert cputime <-> seconds + */ +#define cputime_to_secs(__ct) \ + ((__force u64)(__ct) / NSEC_PER_SEC) +#define secs_to_cputime(__secs) \ + (__force cputime_t)((__secs) * NSEC_PER_SEC) + +/* + * Convert cputime <-> timespec (nsec) + */ +static inline cputime_t timespec_to_cputime(const struct timespec *val) +{ + u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec; + return (__force cputime_t) ret; +} +static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) +{ + val->tv_sec = (__force u64) ct / NSEC_PER_SEC; + val->tv_nsec = (__force u64) ct % NSEC_PER_SEC; +} + +/* + * Convert cputime <-> timeval (msec) + */ +static inline cputime_t timeval_to_cputime(const struct timeval *val) +{ + u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC; + return (__force cputime_t) ret; +} +static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) +{ + val->tv_sec = (__force u64) ct / NSEC_PER_SEC; + val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC; +} + +/* + * Convert cputime <-> clock (USER_HZ) + */ +#define cputime_to_clock_t(__ct) \ + ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ)) +#define clock_t_to_cputime(__x) \ + (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ)) + +/* + * Convert cputime64 to clock. + */ +#define cputime64_to_clock_t(__ct) \ + cputime_to_clock_t((__force cputime_t)__ct) + +#endif diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h index ccf7b4f34a3c..6c32af918c2f 100644 --- a/include/asm-generic/dma-mapping-broken.h +++ b/include/asm-generic/dma-mapping-broken.h @@ -16,6 +16,22 @@ extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle); +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) +{ + /* attrs is not supported and ignored */ + return dma_alloc_coherent(dev, size, dma_handle, flag); +} + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ + /* attrs is not supported and ignored */ + dma_free_coherent(dev, size, cpu_addr, dma_handle); +} + #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index a9432fc6b8ba..bde646995d10 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -5,6 +5,7 @@ #include <linux/types.h> #include <linux/errno.h> #include <linux/of.h> +#include <linux/pinctrl/pinctrl.h> #ifdef CONFIG_GPIOLIB @@ -46,16 +47,20 @@ struct gpio; struct seq_file; struct module; struct device_node; +struct gpio_desc; /** * struct gpio_chip - abstract a GPIO controller * @label: for diagnostics * @dev: optional device providing the GPIOs * @owner: helps prevent removal of modules exporting active GPIOs + * @list: links gpio_chips together for traversal * @request: optional hook for chip-specific activation, such as * enabling module power and clock; may sleep * @free: optional hook for chip-specific deactivation, such as * disabling module power and clock; may sleep + * @get_direction: returns direction for signal "offset", 0=out, 1=in, + * (same as GPIOF_DIR_XXX), or negative error * @direction_input: configures signal "offset" as input, or returns error * @get: returns value for signal "offset"; for output signals this * returns either the value actually sensed, or zero @@ -72,6 +77,7 @@ struct device_node; * negative during registration, requests dynamic ID allocation. * @ngpio: the number of GPIOs handled by this controller; the last GPIO * handled is (base + ngpio - 1). + * @desc: array of ngpio descriptors. Private. * @can_sleep: flag must be set iff get()/set() methods sleep, as they * must while accessing GPIO expander chips over I2C or SPI * @names: if set, must be an array of strings to use as alternative @@ -95,12 +101,14 @@ struct gpio_chip { const char *label; struct device *dev; struct module *owner; + struct list_head list; int (*request)(struct gpio_chip *chip, unsigned offset); void (*free)(struct gpio_chip *chip, unsigned offset); - + int (*get_direction)(struct gpio_chip *chip, + unsigned offset); int (*direction_input)(struct gpio_chip *chip, unsigned offset); int (*get)(struct gpio_chip *chip, @@ -120,6 +128,7 @@ struct gpio_chip { struct gpio_chip *chip); int base; u16 ngpio; + struct gpio_desc *desc; const char *const *names; unsigned can_sleep:1; unsigned exported:1; @@ -134,12 +143,20 @@ struct gpio_chip { int (*of_xlate)(struct gpio_chip *gc, const struct of_phandle_args *gpiospec, u32 *flags); #endif +#ifdef CONFIG_PINCTRL + /* + * If CONFIG_PINCTRL is enabled, then gpio controllers can optionally + * describe the actual pin range which they serve in an SoC. This + * information would be used by pinctrl subsystem to configure + * corresponding pins for gpio usage. + */ + struct list_head pin_ranges; +#endif }; extern const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset); extern struct gpio_chip *gpio_to_chip(unsigned gpio); -extern int __must_check gpiochip_reserve(int start, int ngpio); /* add/remove chips */ extern int gpiochip_add(struct gpio_chip *chip); @@ -179,12 +196,6 @@ extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *labe extern int gpio_request_array(const struct gpio *array, size_t num); extern void gpio_free_array(const struct gpio *array, size_t num); -/* bindings for managed devices that want to request gpios */ -int devm_gpio_request(struct device *dev, unsigned gpio, const char *label); -int devm_gpio_request_one(struct device *dev, unsigned gpio, - unsigned long flags, const char *label); -void devm_gpio_free(struct device *dev, unsigned int gpio); - #ifdef CONFIG_GPIO_SYSFS /* @@ -199,6 +210,43 @@ extern void gpio_unexport(unsigned gpio); #endif /* CONFIG_GPIO_SYSFS */ +#ifdef CONFIG_PINCTRL + +/** + * struct gpio_pin_range - pin range controlled by a gpio chip + * @head: list for maintaining set of pin ranges, used internally + * @pctldev: pinctrl device which handles corresponding pins + * @range: actual range of pins controlled by a gpio controller + */ + +struct gpio_pin_range { + struct list_head node; + struct pinctrl_dev *pctldev; + struct pinctrl_gpio_range range; +}; + +int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, + unsigned int gpio_offset, unsigned int pin_offset, + unsigned int npins); +void gpiochip_remove_pin_ranges(struct gpio_chip *chip); + +#else + +static inline int +gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, + unsigned int gpio_offset, unsigned int pin_offset, + unsigned int npins) +{ + return 0; +} + +static inline void +gpiochip_remove_pin_ranges(struct gpio_chip *chip) +{ +} + +#endif /* CONFIG_PINCTRL */ + #else /* !CONFIG_GPIOLIB */ static inline bool gpio_is_valid(int number) diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 448303bdb85f..ac9da00e9f2c 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -12,7 +12,6 @@ #define __ASM_GENERIC_IO_H #include <asm/page.h> /* I/O is all done through memory accesses */ -#include <asm/cacheflush.h> #include <linux/types.h> #ifdef CONFIG_GENERIC_IOMAP @@ -54,8 +53,18 @@ static inline u32 __raw_readl(const volatile void __iomem *addr) #endif #define readb __raw_readb -#define readw(addr) __le16_to_cpu(__raw_readw(addr)) -#define readl(addr) __le32_to_cpu(__raw_readl(addr)) + +#define readw readw +static inline u16 readw(const volatile void __iomem *addr) +{ + return __le16_to_cpu(__raw_readw(addr)); +} + +#define readl readl +static inline u32 readl(const volatile void __iomem *addr) +{ + return __le32_to_cpu(__raw_readl(addr)); +} #ifndef __raw_writeb static inline void __raw_writeb(u8 b, volatile void __iomem *addr) @@ -83,19 +92,29 @@ static inline void __raw_writel(u32 b, volatile void __iomem *addr) #define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr) #ifdef CONFIG_64BIT +#ifndef __raw_readq static inline u64 __raw_readq(const volatile void __iomem *addr) { return *(const volatile u64 __force *) addr; } -#define readq(addr) __le64_to_cpu(__raw_readq(addr)) +#endif + +#define readq readq +static inline u64 readq(const volatile void __iomem *addr) +{ + return __le64_to_cpu(__raw_readq(addr)); +} +#ifndef __raw_writeq static inline void __raw_writeq(u64 b, volatile void __iomem *addr) { *(volatile u64 __force *) addr = b; } -#define writeq(b,addr) __raw_writeq(__cpu_to_le64(b),addr) #endif +#define writeq(b, addr) __raw_writeq(__cpu_to_le64(b), addr) +#endif /* CONFIG_64BIT */ + #ifndef PCI_IOBASE #define PCI_IOBASE ((void __iomem *) 0) #endif @@ -148,7 +167,7 @@ static inline void insb(unsigned long addr, void *buffer, int count) if (count) { u8 *buf = buffer; do { - u8 x = inb(addr); + u8 x = __raw_readb(addr + PCI_IOBASE); *buf++ = x; } while (--count); } @@ -161,7 +180,7 @@ static inline void insw(unsigned long addr, void *buffer, int count) if (count) { u16 *buf = buffer; do { - u16 x = inw(addr); + u16 x = __raw_readw(addr + PCI_IOBASE); *buf++ = x; } while (--count); } @@ -174,7 +193,7 @@ static inline void insl(unsigned long addr, void *buffer, int count) if (count) { u32 *buf = buffer; do { - u32 x = inl(addr); + u32 x = __raw_readl(addr + PCI_IOBASE); *buf++ = x; } while (--count); } @@ -187,7 +206,7 @@ static inline void outsb(unsigned long addr, const void *buffer, int count) if (count) { const u8 *buf = buffer; do { - outb(*buf++, addr); + __raw_writeb(*buf++, addr + PCI_IOBASE); } while (--count); } } @@ -199,7 +218,7 @@ static inline void outsw(unsigned long addr, const void *buffer, int count) if (count) { const u16 *buf = buffer; do { - outw(*buf++, addr); + __raw_writew(*buf++, addr + PCI_IOBASE); } while (--count); } } @@ -211,54 +230,24 @@ static inline void outsl(unsigned long addr, const void *buffer, int count) if (count) { const u32 *buf = buffer; do { - outl(*buf++, addr); + __raw_writel(*buf++, addr + PCI_IOBASE); } while (--count); } } #endif -static inline void readsl(const void __iomem *addr, void *buf, int len) -{ - insl(addr - PCI_IOBASE, buf, len); -} - -static inline void readsw(const void __iomem *addr, void *buf, int len) -{ - insw(addr - PCI_IOBASE, buf, len); -} - -static inline void readsb(const void __iomem *addr, void *buf, int len) -{ - insb(addr - PCI_IOBASE, buf, len); -} - -static inline void writesl(const void __iomem *addr, const void *buf, int len) -{ - outsl(addr - PCI_IOBASE, buf, len); -} - -static inline void writesw(const void __iomem *addr, const void *buf, int len) -{ - outsw(addr - PCI_IOBASE, buf, len); -} - -static inline void writesb(const void __iomem *addr, const void *buf, int len) -{ - outsb(addr - PCI_IOBASE, buf, len); -} - #ifndef CONFIG_GENERIC_IOMAP #define ioread8(addr) readb(addr) #define ioread16(addr) readw(addr) -#define ioread16be(addr) be16_to_cpu(ioread16(addr)) +#define ioread16be(addr) __be16_to_cpu(__raw_readw(addr)) #define ioread32(addr) readl(addr) -#define ioread32be(addr) be32_to_cpu(ioread32(addr)) +#define ioread32be(addr) __be32_to_cpu(__raw_readl(addr)) #define iowrite8(v, addr) writeb((v), (addr)) #define iowrite16(v, addr) writew((v), (addr)) -#define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr)) +#define iowrite16be(v, addr) __raw_writew(__cpu_to_be16(v), addr) #define iowrite32(v, addr) writel((v), (addr)) -#define iowrite32be(v, addr) iowrite32(be32_to_cpu(v), (addr)) +#define iowrite32be(v, addr) __raw_writel(__cpu_to_be32(v), addr) #define ioread8_rep(p, dst, count) \ insb((unsigned long) (p), (dst), (count)) @@ -286,15 +275,20 @@ static inline void writesb(const void __iomem *addr, const void *buf, int len) #ifndef CONFIG_GENERIC_IOMAP struct pci_dev; +extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); + +#ifndef pci_iounmap static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) { } +#endif #endif /* CONFIG_GENERIC_IOMAP */ /* * Change virtual addresses to physical addresses and vv. * These are pretty trivial */ +#ifndef virt_to_phys static inline unsigned long virt_to_phys(volatile void *address) { return __pa((unsigned long)address); @@ -304,6 +298,7 @@ static inline void *phys_to_virt(unsigned long address) { return __va(address); } +#endif /* * Change "struct page" to physical address. @@ -351,6 +346,7 @@ extern void ioport_unmap(void __iomem *p); #define xlate_dev_kmem_ptr(p) p #define xlate_dev_mem_ptr(p) __va(p) +#ifdef CONFIG_VIRT_TO_BUS #ifndef virt_to_bus static inline unsigned long virt_to_bus(volatile void *address) { @@ -362,10 +358,18 @@ static inline void *bus_to_virt(unsigned long address) return (void *) address; } #endif +#endif +#ifndef memset_io #define memset_io(a, b, c) memset(__io_virt(a), (b), (c)) +#endif + +#ifndef memcpy_fromio #define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c)) +#endif +#ifndef memcpy_toio #define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c)) +#endif #endif /* __KERNEL__ */ diff --git a/include/asm-generic/mmu.h b/include/asm-generic/mmu.h index 4f4aa56d6b52..0ed3f1cfb854 100644 --- a/include/asm-generic/mmu.h +++ b/include/asm-generic/mmu.h @@ -7,8 +7,12 @@ */ #ifndef __ASSEMBLY__ typedef struct { - struct vm_list_struct *vmlist; unsigned long end_brk; + +#ifdef CONFIG_BINFMT_ELF_FDPIC + unsigned long exec_fdpic_loadmap; + unsigned long interp_fdpic_loadmap; +#endif } mm_context_t; #endif diff --git a/include/asm-generic/parport.h b/include/asm-generic/parport.h index 40528cb977e8..2c9f9d4336ca 100644 --- a/include/asm-generic/parport.h +++ b/include/asm-generic/parport.h @@ -10,8 +10,8 @@ * to devices on the PCI bus. */ -static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma); -static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma) +static int parport_pc_find_isa_ports(int autoirq, int autodma); +static int parport_pc_find_nonpci_ports(int autoirq, int autodma) { #ifdef CONFIG_ISA return parport_pc_find_isa_ports(autoirq, autodma); diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index b36ce40bd1c6..bfd87685fc1f 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -197,16 +197,6 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif -#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY -#define page_test_and_clear_dirty(pfn, mapped) (0) -#endif - -#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY -#define pte_maybe_dirty(pte) pte_dirty(pte) -#else -#define pte_maybe_dirty(pte) (1) -#endif - #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG #define page_test_and_clear_young(pfn) (0) #endif @@ -219,6 +209,10 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) #define move_pte(pte, prot, old_addr, new_addr) (pte) #endif +#ifndef pte_accessible +# define pte_accessible(pte) ((void)(pte),1) +#endif + #ifndef flush_tlb_fix_spurious_fault #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) #endif @@ -449,6 +443,30 @@ extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, unsigned long size); #endif +#ifdef __HAVE_COLOR_ZERO_PAGE +static inline int is_zero_pfn(unsigned long pfn) +{ + extern unsigned long zero_pfn; + unsigned long offset_from_zero_pfn = pfn - zero_pfn; + return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); +} + +#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) + +#else +static inline int is_zero_pfn(unsigned long pfn) +{ + extern unsigned long zero_pfn; + return pfn == zero_pfn; +} + +static inline unsigned long my_zero_pfn(unsigned long addr) +{ + extern unsigned long zero_pfn; + return zero_pfn; +} +#endif + #ifdef CONFIG_MMU #ifndef CONFIG_TRANSPARENT_HUGEPAGE @@ -554,6 +572,112 @@ static inline int pmd_trans_unstable(pmd_t *pmd) #endif } +#ifdef CONFIG_NUMA_BALANCING +#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE +/* + * _PAGE_NUMA works identical to _PAGE_PROTNONE (it's actually the + * same bit too). It's set only when _PAGE_PRESET is not set and it's + * never set if _PAGE_PRESENT is set. + * + * pte/pmd_present() returns true if pte/pmd_numa returns true. Page + * fault triggers on those regions if pte/pmd_numa returns true + * (because _PAGE_PRESENT is not set). + */ +#ifndef pte_numa +static inline int pte_numa(pte_t pte) +{ + return (pte_flags(pte) & + (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA; +} +#endif + +#ifndef pmd_numa +static inline int pmd_numa(pmd_t pmd) +{ + return (pmd_flags(pmd) & + (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA; +} +#endif + +/* + * pte/pmd_mknuma sets the _PAGE_ACCESSED bitflag automatically + * because they're called by the NUMA hinting minor page fault. If we + * wouldn't set the _PAGE_ACCESSED bitflag here, the TLB miss handler + * would be forced to set it later while filling the TLB after we + * return to userland. That would trigger a second write to memory + * that we optimize away by setting _PAGE_ACCESSED here. + */ +#ifndef pte_mknonnuma +static inline pte_t pte_mknonnuma(pte_t pte) +{ + pte = pte_clear_flags(pte, _PAGE_NUMA); + return pte_set_flags(pte, _PAGE_PRESENT|_PAGE_ACCESSED); +} +#endif + +#ifndef pmd_mknonnuma +static inline pmd_t pmd_mknonnuma(pmd_t pmd) +{ + pmd = pmd_clear_flags(pmd, _PAGE_NUMA); + return pmd_set_flags(pmd, _PAGE_PRESENT|_PAGE_ACCESSED); +} +#endif + +#ifndef pte_mknuma +static inline pte_t pte_mknuma(pte_t pte) +{ + pte = pte_set_flags(pte, _PAGE_NUMA); + return pte_clear_flags(pte, _PAGE_PRESENT); +} +#endif + +#ifndef pmd_mknuma +static inline pmd_t pmd_mknuma(pmd_t pmd) +{ + pmd = pmd_set_flags(pmd, _PAGE_NUMA); + return pmd_clear_flags(pmd, _PAGE_PRESENT); +} +#endif +#else +extern int pte_numa(pte_t pte); +extern int pmd_numa(pmd_t pmd); +extern pte_t pte_mknonnuma(pte_t pte); +extern pmd_t pmd_mknonnuma(pmd_t pmd); +extern pte_t pte_mknuma(pte_t pte); +extern pmd_t pmd_mknuma(pmd_t pmd); +#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ +#else +static inline int pmd_numa(pmd_t pmd) +{ + return 0; +} + +static inline int pte_numa(pte_t pte) +{ + return 0; +} + +static inline pte_t pte_mknonnuma(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_mknonnuma(pmd_t pmd) +{ + return pmd; +} + +static inline pte_t pte_mknuma(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_mknuma(pmd_t pmd) +{ + return pmd; +} +#endif /* CONFIG_NUMA_BALANCING */ + #endif /* CONFIG_MMU */ #endif /* !__ASSEMBLY__ */ diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h index 98caa306122a..d840c90a157a 100644 --- a/include/asm-generic/signal.h +++ b/include/asm-generic/signal.h @@ -10,7 +10,5 @@ #include <asm/sigcontext.h> #undef __HAVE_ARCH_SIG_BITOPS -#define ptrace_signal_deliver(regs, cookie) do { } while (0) - #endif /* __ASSEMBLY__ */ #endif /* _ASM_GENERIC_SIGNAL_H */ diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h index d89dec864d42..1f74be5113b2 100644 --- a/include/asm-generic/syscalls.h +++ b/include/asm-generic/syscalls.h @@ -8,26 +8,6 @@ * Calling conventions for these system calls can differ, so * it's possible to override them. */ -#ifndef sys_clone -asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, - void __user *parent_tid, void __user *child_tid, - struct pt_regs *regs); -#endif - -#ifndef sys_fork -asmlinkage long sys_fork(struct pt_regs *regs); -#endif - -#ifndef sys_vfork -asmlinkage long sys_vfork(struct pt_regs *regs); -#endif - -#ifndef sys_execve -asmlinkage long sys_execve(const char __user *filename, - const char __user *const __user *argv, - const char __user *const __user *envp, - struct pt_regs *regs); -#endif #ifndef sys_mmap2 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, @@ -41,22 +21,8 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long fd, off_t pgoff); #endif -#ifndef sys_sigaltstack -asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *, - struct pt_regs *); -#endif - #ifndef sys_rt_sigreturn asmlinkage long sys_rt_sigreturn(struct pt_regs *regs); #endif -#ifndef sys_rt_sigsuspend -asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize); -#endif - -#ifndef sys_rt_sigaction -asmlinkage long sys_rt_sigaction(int sig, const struct sigaction __user *act, - struct sigaction __user *oact, size_t sigsetsize); -#endif - #endif /* __ASM_GENERIC_SYSCALLS_H */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index ed6642ad03e0..25f01d0bc149 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -78,6 +78,14 @@ struct mmu_gather_batch { #define MAX_GATHER_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) +/* + * Limit the maximum number of mmu_gather batches to reduce a risk of soft + * lockups for non-preemptible kernels on huge machines when a lot of memory + * is zapped during unmapping. + * 10K pages freed at once should be safe even without a preemption point. + */ +#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) + /* struct mmu_gather is an opaque type used by the mm code for passing around * any data needed by arch specific code for tlb_remove_page. */ @@ -96,6 +104,7 @@ struct mmu_gather { struct mmu_gather_batch *active; struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; + unsigned int batch_count; }; #define HAVE_GENERIC_MMU_GATHER diff --git a/include/asm-generic/trace_clock.h b/include/asm-generic/trace_clock.h new file mode 100644 index 000000000000..6726f1bafb5e --- /dev/null +++ b/include/asm-generic/trace_clock.h @@ -0,0 +1,16 @@ +#ifndef _ASM_GENERIC_TRACE_CLOCK_H +#define _ASM_GENERIC_TRACE_CLOCK_H +/* + * Arch-specific trace clocks. + */ + +/* + * Additional trace clocks added to the trace_clocks + * array in kernel/trace/trace.c + * None if the architecture has not defined it. + */ +#ifndef ARCH_TRACE_CLOCKS +# define ARCH_TRACE_CLOCKS +#endif + +#endif /* _ASM_GENERIC_TRACE_CLOCK_H */ diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index 9788568f7978..c184aa8ec8cd 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -7,7 +7,6 @@ * address space, e.g. all NOMMU machines. */ #include <linux/sched.h> -#include <linux/mm.h> #include <linux/string.h> #include <asm/segment.h> @@ -32,7 +31,9 @@ static inline void set_fs(mm_segment_t fs) } #endif +#ifndef segment_eq #define segment_eq(a, b) ((a).seg == (b).seg) +#endif #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -168,12 +169,18 @@ static inline __must_check long __copy_to_user(void __user *to, -EFAULT; \ }) +#ifndef __put_user_fn + static inline int __put_user_fn(size_t size, void __user *ptr, void *x) { size = __copy_to_user(ptr, x, size); return size ? -EFAULT : size; } +#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) + +#endif + extern int __put_user_bad(void) __attribute__((noreturn)); #define __get_user(x, ptr) \ @@ -224,12 +231,17 @@ extern int __put_user_bad(void) __attribute__((noreturn)); -EFAULT; \ }) +#ifndef __get_user_fn static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) { size = __copy_from_user(x, ptr, size); return size ? -EFAULT : size; } +#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) + +#endif + extern int __get_user_bad(void) __attribute__((noreturn)); #ifndef __copy_from_user_inatomic diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index a36991ab334e..4077b5d9ff81 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h @@ -9,9 +9,6 @@ #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_LLSEEK #endif -#define __ARCH_WANT_SYS_RT_SIGACTION -#define __ARCH_WANT_SYS_RT_SIGSUSPEND -#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND /* * "Conditional" syscalls @@ -20,5 +17,12 @@ * but it doesn't work on all toolchains, so we just do it by hand */ #ifndef cond_syscall -#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") +#ifdef CONFIG_SYMBOL_PREFIX +#define __SYMBOL_PREFIX CONFIG_SYMBOL_PREFIX +#else +#define __SYMBOL_PREFIX +#endif +#define cond_syscall(x) asm(".weak\t" __SYMBOL_PREFIX #x "\n\t" \ + ".set\t" __SYMBOL_PREFIX #x "," \ + __SYMBOL_PREFIX "sys_ni_syscall") #endif diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index d1ea7ce0b4cb..afa12c7a025c 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -149,6 +149,33 @@ #define TRACE_SYSCALLS() #endif +#ifdef CONFIG_CLKSRC_OF +#define CLKSRC_OF_TABLES() . = ALIGN(8); \ + VMLINUX_SYMBOL(__clksrc_of_table) = .; \ + *(__clksrc_of_table) \ + *(__clksrc_of_table_end) +#else +#define CLKSRC_OF_TABLES() +#endif + +#ifdef CONFIG_IRQCHIP +#define IRQCHIP_OF_MATCH_TABLE() \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__irqchip_begin) = .; \ + *(__irqchip_of_table) \ + *(__irqchip_of_end) +#else +#define IRQCHIP_OF_MATCH_TABLE() +#endif + +#ifdef CONFIG_COMMON_CLK +#define CLK_OF_TABLES() . = ALIGN(8); \ + VMLINUX_SYMBOL(__clk_of_table) = .; \ + *(__clk_of_table) \ + *(__clk_of_table_end) +#else +#define CLK_OF_TABLES() +#endif #define KERNEL_DTB() \ STRUCT_ALIGN(); \ @@ -493,7 +520,10 @@ DEV_DISCARD(init.rodata) \ CPU_DISCARD(init.rodata) \ MEM_DISCARD(init.rodata) \ - KERNEL_DTB() + CLK_OF_TABLES() \ + CLKSRC_OF_TABLES() \ + KERNEL_DTB() \ + IRQCHIP_OF_MATCH_TABLE() #define INIT_TEXT \ *(.init.text) \ |