summaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-07-18 11:36:53 +0200
committerIngo Molnar <mingo@kernel.org>2017-07-18 11:36:53 +0200
commit1ed7d32763857fbdc8e406352404538e41050d22 (patch)
treef97c5d8f548141bf7cdec0a9d7f22adb61da6efc /arch/ia64
parent660da7c9228f685b2ebe664f9fd69aaddcc420b5 (diff)
parent27aac20574110abfd594175a668dc58b23b2b14a (diff)
downloadlinux-1ed7d32763857fbdc8e406352404538e41050d22.tar.bz2
Merge branch 'x86/boot' into x86/mm, to pick up interacting changes
The SME patches we are about to apply add some E820 logic, so merge in pending E820 code changes first, to have a single code base. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/hp/sim/simserial.c13
-rw-r--r--arch/ia64/include/asm/Kbuild2
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/ia64/include/asm/io.h2
-rw-r--r--arch/ia64/include/asm/uaccess.h48
-rw-r--r--arch/ia64/include/uapi/asm/socket.h4
-rw-r--r--arch/ia64/kernel/Makefile.gate15
-rw-r--r--arch/ia64/kernel/machine_kexec.c5
-rw-r--r--arch/ia64/kernel/mca.c2
-rw-r--r--arch/ia64/lib/Makefile2
-rw-r--r--arch/ia64/lib/strlen_user.S200
-rw-r--r--arch/ia64/mm/hugetlbpage.c4
-rw-r--r--arch/ia64/mm/init.c11
-rw-r--r--arch/ia64/sn/kernel/iomv.c2
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_ate.c2
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c4
16 files changed, 23 insertions, 295 deletions
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index de8cba121013..70d52e9bb575 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -387,19 +387,6 @@ static int activate(struct tty_port *port, struct tty_struct *tty)
}
state->xmit.head = state->xmit.tail = 0;
-
- /*
- * Set up the tty->alt_speed kludge
- */
- if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
- tty->alt_speed = 57600;
- if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
- tty->alt_speed = 115200;
- if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
- tty->alt_speed = 230400;
- if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
- tty->alt_speed = 460800;
-
errout:
local_irq_restore(flags);
return retval;
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 502a91d8dbbd..1d7641f891e1 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -1,8 +1,6 @@
-
generic-y += clkdev.h
generic-y += exec.h
generic-y += irq_work.h
-generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += preempt.h
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 73ec3c6f4cfe..3ce5ab4339f3 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -12,8 +12,6 @@
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
-#define DMA_ERROR_CODE 0
-
extern const struct dma_map_ops *dma_ops;
extern struct ia64_machine_vector ia64_mv;
extern void set_iommu_machvec(void);
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 5de673ac9cb1..a2540e21f919 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -117,7 +117,7 @@ extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count);
* following the barrier will arrive after all previous writes. For most
* ia64 platforms, this is a simple 'mf.a' instruction.
*
- * See Documentation/DocBook/deviceiobook.tmpl for more information.
+ * See Documentation/driver-api/device-io.rst for more information.
*/
static inline void ___ia64_mmiowb(void)
{
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
index 82a7646c4416..0890ded638f0 100644
--- a/arch/ia64/include/asm/uaccess.h
+++ b/arch/ia64/include/asm/uaccess.h
@@ -87,42 +87,6 @@ static inline int __access_ok(const void __user *p, unsigned long size)
#define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
-extern long __put_user_unaligned_unknown (void);
-
-#define __put_user_unaligned(x, ptr) \
-({ \
- long __ret; \
- switch (sizeof(*(ptr))) { \
- case 1: __ret = __put_user((x), (ptr)); break; \
- case 2: __ret = (__put_user((x), (u8 __user *)(ptr))) \
- | (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \
- case 4: __ret = (__put_user((x), (u16 __user *)(ptr))) \
- | (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \
- case 8: __ret = (__put_user((x), (u32 __user *)(ptr))) \
- | (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \
- default: __ret = __put_user_unaligned_unknown(); \
- } \
- __ret; \
-})
-
-extern long __get_user_unaligned_unknown (void);
-
-#define __get_user_unaligned(x, ptr) \
-({ \
- long __ret; \
- switch (sizeof(*(ptr))) { \
- case 1: __ret = __get_user((x), (ptr)); break; \
- case 2: __ret = (__get_user((x), (u8 __user *)(ptr))) \
- | (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \
- case 4: __ret = (__get_user((x), (u16 __user *)(ptr))) \
- | (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \
- case 8: __ret = (__get_user((x), (u32 __user *)(ptr))) \
- | (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \
- default: __ret = __get_user_unaligned_unknown(); \
- } \
- __ret; \
-})
-
#ifdef ASM_SUPPORTED
struct __large_struct { unsigned long buf[100]; };
# define __m(x) (*(struct __large_struct __user *)(x))
@@ -277,18 +241,6 @@ extern long __must_check __strncpy_from_user (char *to, const char __user *from,
__sfu_ret; \
})
-/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
-extern unsigned long __strlen_user (const char __user *);
-
-#define strlen_user(str) \
-({ \
- const char __user *__su_str = (str); \
- unsigned long __su_ret = 0; \
- if (__access_ok(__su_str, 0)) \
- __su_ret = __strlen_user(__su_str); \
- __su_ret; \
-})
-
/*
* Returns: 0 if exception before NUL or reaching the supplied limit
* (N), a value greater than N if the limit would be exceeded, else
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index 2c3f4b48042a..5dd5c5d0d642 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -107,4 +107,8 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
+#define SO_PEERGROUPS 59
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate
index a32903ada016..7da7c65a9bb1 100644
--- a/arch/ia64/kernel/Makefile.gate
+++ b/arch/ia64/kernel/Makefile.gate
@@ -1,8 +1,8 @@
# The gate DSO image is built using a special linker script.
-targets += gate.so gate-syms.o
+targets += gate.so gate.lds gate.o gate-dummy.o
-extra-y += gate.so gate-syms.o gate.lds gate.o
+obj-y += gate-syms.o
CPPFLAGS_gate.lds := -P -C -U$(ARCH)
@@ -14,13 +14,14 @@ GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE
$(call if_changed,gate)
-$(obj)/built-in.o: $(obj)/gate-syms.o
-$(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o
-
-GATECFLAGS_gate-syms.o = -r
-$(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE
+GATECFLAGS_gate-dummy.o = -r
+$(obj)/gate-dummy.o: $(obj)/gate.lds $(obj)/gate.o FORCE
$(call if_changed,gate)
+LDFLAGS_gate-syms.o := -r -R
+$(obj)/gate-syms.o: $(obj)/gate-dummy.o FORCE
+ $(call if_changed,ld)
+
# gate-data.o contains the gate DSO image as data in section .data..gate.
# We must build gate.so before we can assemble it.
# Note: kbuild does not track this dependency due to usage of .incbin
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index 599507bcec91..c14815dca747 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -163,8 +163,3 @@ void arch_crash_save_vmcoreinfo(void)
#endif
}
-phys_addr_t paddr_vmcoreinfo_note(void)
-{
- return ia64_tpa((unsigned long)(char *)&vmcoreinfo_note);
-}
-
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 79c7c46d7dc1..555b11180156 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -334,7 +334,7 @@ static void ia64_mlogbuf_dump_from_init(void)
ia64_mlogbuf_dump();
}
-static void inline
+static inline void
ia64_mca_spin(const char *func)
{
if (monarch_cpu == smp_processor_id())
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index 0a40b14407b1..1a36a3a39624 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -5,7 +5,7 @@
lib-y := io.o __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \
checksum.o clear_page.o csum_partial_copy.o \
- clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \
+ clear_user.o strncpy_from_user.o strnlen_user.o \
flush.o ip_fast_csum.o do_csum.o \
memset.o strlen.o xor.o
diff --git a/arch/ia64/lib/strlen_user.S b/arch/ia64/lib/strlen_user.S
deleted file mode 100644
index 9d257684e733..000000000000
--- a/arch/ia64/lib/strlen_user.S
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Optimized version of the strlen_user() function
- *
- * Inputs:
- * in0 address of buffer
- *
- * Outputs:
- * ret0 0 in case of fault, strlen(buffer)+1 otherwise
- *
- * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Stephane Eranian <eranian@hpl.hp.com>
- *
- * 01/19/99 S.Eranian heavily enhanced version (see details below)
- * 09/24/99 S.Eranian added speculation recovery code
- */
-
-#include <asm/asmmacro.h>
-#include <asm/export.h>
-
-//
-// int strlen_user(char *)
-// ------------------------
-// Returns:
-// - length of string + 1
-// - 0 in case an exception is raised
-//
-// This is an enhanced version of the basic strlen_user. it includes a
-// combination of compute zero index (czx), parallel comparisons, speculative
-// loads and loop unroll using rotating registers.
-//
-// General Ideas about the algorithm:
-// The goal is to look at the string in chunks of 8 bytes.
-// so we need to do a few extra checks at the beginning because the
-// string may not be 8-byte aligned. In this case we load the 8byte
-// quantity which includes the start of the string and mask the unused
-// bytes with 0xff to avoid confusing czx.
-// We use speculative loads and software pipelining to hide memory
-// latency and do read ahead safely. This way we defer any exception.
-//
-// Because we don't want the kernel to be relying on particular
-// settings of the DCR register, we provide recovery code in case
-// speculation fails. The recovery code is going to "redo" the work using
-// only normal loads. If we still get a fault then we return an
-// error (ret0=0). Otherwise we return the strlen+1 as usual.
-// The fact that speculation may fail can be caused, for instance, by
-// the DCR.dm bit being set. In this case TLB misses are deferred, i.e.,
-// a NaT bit will be set if the translation is not present. The normal
-// load, on the other hand, will cause the translation to be inserted
-// if the mapping exists.
-//
-// It should be noted that we execute recovery code only when we need
-// to use the data that has been speculatively loaded: we don't execute
-// recovery code on pure read ahead data.
-//
-// Remarks:
-// - the cmp r0,r0 is used as a fast way to initialize a predicate
-// register to 1. This is required to make sure that we get the parallel
-// compare correct.
-//
-// - we don't use the epilogue counter to exit the loop but we need to set
-// it to zero beforehand.
-//
-// - after the loop we must test for Nat values because neither the
-// czx nor cmp instruction raise a NaT consumption fault. We must be
-// careful not to look too far for a Nat for which we don't care.
-// For instance we don't need to look at a NaT in val2 if the zero byte
-// was in val1.
-//
-// - Clearly performance tuning is required.
-//
-
-#define saved_pfs r11
-#define tmp r10
-#define base r16
-#define orig r17
-#define saved_pr r18
-#define src r19
-#define mask r20
-#define val r21
-#define val1 r22
-#define val2 r23
-
-GLOBAL_ENTRY(__strlen_user)
- .prologue
- .save ar.pfs, saved_pfs
- alloc saved_pfs=ar.pfs,11,0,0,8
-
- .rotr v[2], w[2] // declares our 4 aliases
-
- extr.u tmp=in0,0,3 // tmp=least significant 3 bits
- mov orig=in0 // keep trackof initial byte address
- dep src=0,in0,0,3 // src=8byte-aligned in0 address
- .save pr, saved_pr
- mov saved_pr=pr // preserve predicates (rotation)
- ;;
-
- .body
-
- ld8.s v[1]=[src],8 // load the initial 8bytes (must speculate)
- shl tmp=tmp,3 // multiply by 8bits/byte
- mov mask=-1 // our mask
- ;;
- ld8.s w[1]=[src],8 // load next 8 bytes in 2nd pipeline
- cmp.eq p6,p0=r0,r0 // sets p6 (required because of // cmp.and)
- sub tmp=64,tmp // how many bits to shift our mask on the right
- ;;
- shr.u mask=mask,tmp // zero enough bits to hold v[1] valuable part
- mov ar.ec=r0 // clear epilogue counter (saved in ar.pfs)
- ;;
- add base=-16,src // keep track of aligned base
- chk.s v[1], .recover // if already NaT, then directly skip to recover
- or v[1]=v[1],mask // now we have a safe initial byte pattern
- ;;
-1:
- ld8.s v[0]=[src],8 // speculatively load next
- czx1.r val1=v[1] // search 0 byte from right
- czx1.r val2=w[1] // search 0 byte from right following 8bytes
- ;;
- ld8.s w[0]=[src],8 // speculatively load next to next
- cmp.eq.and p6,p0=8,val1 // p6 = p6 and val1==8
- cmp.eq.and p6,p0=8,val2 // p6 = p6 and mask==8
-(p6) br.wtop.dptk.few 1b // loop until p6 == 0
- ;;
- //
- // We must return try the recovery code iff
- // val1_is_nat || (val1==8 && val2_is_nat)
- //
- // XXX Fixme
- // - there must be a better way of doing the test
- //
- cmp.eq p8,p9=8,val1 // p6 = val1 had zero (disambiguate)
- tnat.nz p6,p7=val1 // test NaT on val1
-(p6) br.cond.spnt .recover // jump to recovery if val1 is NaT
- ;;
- //
- // if we come here p7 is true, i.e., initialized for // cmp
- //
- cmp.eq.and p7,p0=8,val1// val1==8?
- tnat.nz.and p7,p0=val2 // test NaT if val2
-(p7) br.cond.spnt .recover // jump to recovery if val2 is NaT
- ;;
-(p8) mov val1=val2 // val2 contains the value
-(p8) adds src=-16,src // correct position when 3 ahead
-(p9) adds src=-24,src // correct position when 4 ahead
- ;;
- sub ret0=src,orig // distance from origin
- sub tmp=7,val1 // 7=8-1 because this strlen returns strlen+1
- mov pr=saved_pr,0xffffffffffff0000
- ;;
- sub ret0=ret0,tmp // length=now - back -1
- mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
- br.ret.sptk.many rp // end of normal execution
-
- //
- // Outlined recovery code when speculation failed
- //
- // This time we don't use speculation and rely on the normal exception
- // mechanism. that's why the loop is not as good as the previous one
- // because read ahead is not possible
- //
- // XXX Fixme
- // - today we restart from the beginning of the string instead
- // of trying to continue where we left off.
- //
-.recover:
- EX(.Lexit1, ld8 val=[base],8) // load the initial bytes
- ;;
- or val=val,mask // remask first bytes
- cmp.eq p0,p6=r0,r0 // nullify first ld8 in loop
- ;;
- //
- // ar.ec is still zero here
- //
-2:
- EX(.Lexit1, (p6) ld8 val=[base],8)
- ;;
- czx1.r val1=val // search 0 byte from right
- ;;
- cmp.eq p6,p0=8,val1 // val1==8 ?
-(p6) br.wtop.dptk.few 2b // loop until p6 == 0
- ;;
- sub ret0=base,orig // distance from base
- sub tmp=7,val1 // 7=8-1 because this strlen returns strlen+1
- mov pr=saved_pr,0xffffffffffff0000
- ;;
- sub ret0=ret0,tmp // length=now - back -1
- mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
- br.ret.sptk.many rp // end of successful recovery code
-
- //
- // We failed even on the normal load (called from exception handler)
- //
-.Lexit1:
- mov ret0=0
- mov pr=saved_pr,0xffffffffffff0000
- mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
- br.ret.sptk.many rp
-END(__strlen_user)
-EXPORT_SYMBOL(__strlen_user)
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 85de86d36fdf..ae35140332f7 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -44,7 +44,7 @@ huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
}
pte_t *
-huge_pte_offset (struct mm_struct *mm, unsigned long addr)
+huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
unsigned long taddr = htlbpage_to_page(addr);
pgd_t *pgd;
@@ -92,7 +92,7 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int writ
if (REGION_NUMBER(addr) != RGN_HPAGE)
return ERR_PTR(-EINVAL);
- ptep = huge_pte_offset(mm, addr);
+ ptep = huge_pte_offset(mm, addr, HPAGE_SIZE);
if (!ptep || pte_none(*ptep))
return NULL;
page = pte_page(*ptep);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 8f3efa682ee8..a4e8d6bd9cfa 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -646,20 +646,13 @@ mem_init (void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
+int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
{
- pg_data_t *pgdat;
- struct zone *zone;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
- pgdat = NODE_DATA(nid);
-
- zone = pgdat->node_zones +
- zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
- ret = __add_pages(nid, zone, start_pfn, nr_pages);
-
+ ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
if (ret)
printk("%s: Problem encountered in __add_pages() as ret=%d\n",
__func__, ret);
diff --git a/arch/ia64/sn/kernel/iomv.c b/arch/ia64/sn/kernel/iomv.c
index c77ebdf98119..2b22a71663c1 100644
--- a/arch/ia64/sn/kernel/iomv.c
+++ b/arch/ia64/sn/kernel/iomv.c
@@ -63,7 +63,7 @@ EXPORT_SYMBOL(sn_io_addr);
/**
* __sn_mmiowb - I/O space memory barrier
*
- * See arch/ia64/include/asm/io.h and Documentation/DocBook/deviceiobook.tmpl
+ * See arch/ia64/include/asm/io.h and Documentation/driver-api/device-io.rst
* for details.
*
* On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear.
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
index 5bc34eac9e01..b67bb4cb73ff 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
@@ -140,7 +140,7 @@ static inline u64 __iomem *pcibr_ate_addr(struct pcibus_info *pcibus_info,
/*
* Update the ate.
*/
-void inline
+inline void
ate_write(struct pcibus_info *pcibus_info, int ate_index, int count,
volatile u64 ate)
{
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 46d3df4b03a1..3bd9abc35485 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -52,7 +52,7 @@
* All registers defined in struct tioce will meet that criteria.
*/
-static void inline
+static inline void
tioce_mmr_war_pre(struct tioce_kernel *kern, void __iomem *mmr_addr)
{
u64 mmr_base;
@@ -78,7 +78,7 @@ tioce_mmr_war_pre(struct tioce_kernel *kern, void __iomem *mmr_addr)
}
}
-static void inline
+static inline void
tioce_mmr_war_post(struct tioce_kernel *kern, void __iomem *mmr_addr)
{
u64 mmr_base;