summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorFranck Bui-Huu <fbuihuu@gmail.com>2007-10-18 09:11:14 +0200
committerRalf Baechle <ralf@linux-mips.org>2008-01-29 10:14:56 +0000
commita9af6041e9e7ed0278d03333c3a22a8509f33615 (patch)
tree1d05905c82e0a36c5c4c5d04c1933224fb82ae92 /arch
parent3f4ac2dc0504962b6cb9367adbc13272393b80b3 (diff)
downloadlinux-a9af6041e9e7ed0278d03333c3a22a8509f33615.tar.bz2
[MIPS] tlbex.c: Cleanup __init usages.
Signed-off-by: Franck Bui-Huu <fbuihuu@gmail.com> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/mm/tlbex.c98
1 files changed, 49 insertions, 49 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index f8925ba0b39e..77530fd96f1c 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -67,7 +67,7 @@ static inline int __maybe_unused r10000_llsc_war(void)
* why; it's not an issue caused by the core RTL.
*
*/
-static __init int __attribute__((unused)) m4kc_tlbp_war(void)
+static int __init m4kc_tlbp_war(void)
{
return (current_cpu_data.processor_id & 0xffff00) ==
(PRID_COMP_MIPS | PRID_IMP_4KC);
@@ -141,7 +141,7 @@ struct insn {
| (e) << RE_SH \
| (f) << FUNC_SH)
-static __initdata struct insn insn_table[] = {
+static struct insn insn_table[] __initdata = {
{ insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
{ insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
@@ -194,7 +194,7 @@ static __initdata struct insn insn_table[] = {
#undef M
-static __init u32 build_rs(u32 arg)
+static u32 __init build_rs(u32 arg)
{
if (arg & ~RS_MASK)
printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -202,7 +202,7 @@ static __init u32 build_rs(u32 arg)
return (arg & RS_MASK) << RS_SH;
}
-static __init u32 build_rt(u32 arg)
+static u32 __init build_rt(u32 arg)
{
if (arg & ~RT_MASK)
printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -210,7 +210,7 @@ static __init u32 build_rt(u32 arg)
return (arg & RT_MASK) << RT_SH;
}
-static __init u32 build_rd(u32 arg)
+static u32 __init build_rd(u32 arg)
{
if (arg & ~RD_MASK)
printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -218,7 +218,7 @@ static __init u32 build_rd(u32 arg)
return (arg & RD_MASK) << RD_SH;
}
-static __init u32 build_re(u32 arg)
+static u32 __init build_re(u32 arg)
{
if (arg & ~RE_MASK)
printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -226,7 +226,7 @@ static __init u32 build_re(u32 arg)
return (arg & RE_MASK) << RE_SH;
}
-static __init u32 build_simm(s32 arg)
+static u32 __init build_simm(s32 arg)
{
if (arg > 0x7fff || arg < -0x8000)
printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -234,7 +234,7 @@ static __init u32 build_simm(s32 arg)
return arg & 0xffff;
}
-static __init u32 build_uimm(u32 arg)
+static u32 __init build_uimm(u32 arg)
{
if (arg & ~IMM_MASK)
printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -242,7 +242,7 @@ static __init u32 build_uimm(u32 arg)
return arg & IMM_MASK;
}
-static __init u32 build_bimm(s32 arg)
+static u32 __init build_bimm(s32 arg)
{
if (arg > 0x1ffff || arg < -0x20000)
printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -253,7 +253,7 @@ static __init u32 build_bimm(s32 arg)
return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
}
-static __init u32 build_jimm(u32 arg)
+static u32 __init build_jimm(u32 arg)
{
if (arg & ~((JIMM_MASK) << 2))
printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -261,7 +261,7 @@ static __init u32 build_jimm(u32 arg)
return (arg >> 2) & JIMM_MASK;
}
-static __init u32 build_func(u32 arg)
+static u32 __init build_func(u32 arg)
{
if (arg & ~FUNC_MASK)
printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -269,7 +269,7 @@ static __init u32 build_func(u32 arg)
return arg & FUNC_MASK;
}
-static __init u32 build_set(u32 arg)
+static u32 __init build_set(u32 arg)
{
if (arg & ~SET_MASK)
printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -316,69 +316,69 @@ static void __init build_insn(u32 **buf, enum opcode opc, ...)
}
#define I_u1u2u3(op) \
- static inline void __init i##op(u32 **buf, unsigned int a, \
+ static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
unsigned int b, unsigned int c) \
{ \
build_insn(buf, insn##op, a, b, c); \
}
#define I_u2u1u3(op) \
- static inline void __init i##op(u32 **buf, unsigned int a, \
+ static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
unsigned int b, unsigned int c) \
{ \
build_insn(buf, insn##op, b, a, c); \
}
#define I_u3u1u2(op) \
- static inline void __init i##op(u32 **buf, unsigned int a, \
+ static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
unsigned int b, unsigned int c) \
{ \
build_insn(buf, insn##op, b, c, a); \
}
#define I_u1u2s3(op) \
- static inline void __init i##op(u32 **buf, unsigned int a, \
+ static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
unsigned int b, signed int c) \
{ \
build_insn(buf, insn##op, a, b, c); \
}
#define I_u2s3u1(op) \
- static inline void __init i##op(u32 **buf, unsigned int a, \
+ static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
signed int b, unsigned int c) \
{ \
build_insn(buf, insn##op, c, a, b); \
}
#define I_u2u1s3(op) \
- static inline void __init i##op(u32 **buf, unsigned int a, \
+ static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
unsigned int b, signed int c) \
{ \
build_insn(buf, insn##op, b, a, c); \
}
#define I_u1u2(op) \
- static inline void __init i##op(u32 **buf, unsigned int a, \
+ static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
unsigned int b) \
{ \
build_insn(buf, insn##op, a, b); \
}
#define I_u1s2(op) \
- static inline void __init i##op(u32 **buf, unsigned int a, \
+ static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
signed int b) \
{ \
build_insn(buf, insn##op, a, b); \
}
#define I_u1(op) \
- static inline void __init i##op(u32 **buf, unsigned int a) \
+ static void __init __maybe_unused i##op(u32 **buf, unsigned int a) \
{ \
build_insn(buf, insn##op, a); \
}
#define I_0(op) \
- static inline void __init i##op(u32 **buf) \
+ static void __init __maybe_unused i##op(u32 **buf) \
{ \
build_insn(buf, insn##op); \
}
@@ -458,7 +458,7 @@ struct label {
enum label_id lab;
};
-static __init void build_label(struct label **lab, u32 *addr,
+static void __init build_label(struct label **lab, u32 *addr,
enum label_id l)
{
(*lab)->addr = addr;
@@ -467,7 +467,7 @@ static __init void build_label(struct label **lab, u32 *addr,
}
#define L_LA(lb) \
- static inline void l##lb(struct label **lab, u32 *addr) \
+ static inline void __init l##lb(struct label **lab, u32 *addr) \
{ \
build_label(lab, addr, label##lb); \
}
@@ -526,7 +526,7 @@ L_LA(_r3000_write_probe_fail)
#define i_ssnop(buf) i_sll(buf, 0, 0, 1)
#define i_ehb(buf) i_sll(buf, 0, 0, 3)
-static __init int __maybe_unused in_compat_space_p(long addr)
+static int __init __maybe_unused in_compat_space_p(long addr)
{
/* Is this address in 32bit compat space? */
#ifdef CONFIG_64BIT
@@ -536,7 +536,7 @@ static __init int __maybe_unused in_compat_space_p(long addr)
#endif
}
-static __init int __maybe_unused rel_highest(long val)
+static int __init __maybe_unused rel_highest(long val)
{
#ifdef CONFIG_64BIT
return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
@@ -545,7 +545,7 @@ static __init int __maybe_unused rel_highest(long val)
#endif
}
-static __init int __maybe_unused rel_higher(long val)
+static int __init __maybe_unused rel_higher(long val)
{
#ifdef CONFIG_64BIT
return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
@@ -554,17 +554,17 @@ static __init int __maybe_unused rel_higher(long val)
#endif
}
-static __init int rel_hi(long val)
+static int __init rel_hi(long val)
{
return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
}
-static __init int rel_lo(long val)
+static int __init rel_lo(long val)
{
return ((val & 0xffff) ^ 0x8000) - 0x8000;
}
-static __init void i_LA_mostly(u32 **buf, unsigned int rs, long addr)
+static void __init i_LA_mostly(u32 **buf, unsigned int rs, long addr)
{
if (!in_compat_space_p(addr)) {
i_lui(buf, rs, rel_highest(addr));
@@ -580,7 +580,7 @@ static __init void i_LA_mostly(u32 **buf, unsigned int rs, long addr)
i_lui(buf, rs, rel_hi(addr));
}
-static __init void __maybe_unused i_LA(u32 **buf, unsigned int rs, long addr)
+static void __init __maybe_unused i_LA(u32 **buf, unsigned int rs, long addr)
{
i_LA_mostly(buf, rs, addr);
if (rel_lo(addr)) {
@@ -601,7 +601,7 @@ struct reloc {
enum label_id lab;
};
-static __init void r_mips_pc16(struct reloc **rel, u32 *addr,
+static void __init r_mips_pc16(struct reloc **rel, u32 *addr,
enum label_id l)
{
(*rel)->addr = addr;
@@ -626,7 +626,7 @@ static inline void __resolve_relocs(struct reloc *rel, struct label *lab)
}
}
-static __init void resolve_relocs(struct reloc *rel, struct label *lab)
+static void __init resolve_relocs(struct reloc *rel, struct label *lab)
{
struct label *l;
@@ -636,7 +636,7 @@ static __init void resolve_relocs(struct reloc *rel, struct label *lab)
__resolve_relocs(rel, l);
}
-static __init void move_relocs(struct reloc *rel, u32 *first, u32 *end,
+static void __init move_relocs(struct reloc *rel, u32 *first, u32 *end,
long off)
{
for (; rel->lab != label_invalid; rel++)
@@ -644,7 +644,7 @@ static __init void move_relocs(struct reloc *rel, u32 *first, u32 *end,
rel->addr += off;
}
-static __init void move_labels(struct label *lab, u32 *first, u32 *end,
+static void __init move_labels(struct label *lab, u32 *first, u32 *end,
long off)
{
for (; lab->lab != label_invalid; lab++)
@@ -652,7 +652,7 @@ static __init void move_labels(struct label *lab, u32 *first, u32 *end,
lab->addr += off;
}
-static __init void copy_handler(struct reloc *rel, struct label *lab,
+static void __init copy_handler(struct reloc *rel, struct label *lab,
u32 *first, u32 *end, u32 *target)
{
long off = (long)(target - first);
@@ -663,7 +663,7 @@ static __init void copy_handler(struct reloc *rel, struct label *lab,
move_labels(lab, first, end, off);
}
-static __init int __maybe_unused insn_has_bdelay(struct reloc *rel,
+static int __init __maybe_unused insn_has_bdelay(struct reloc *rel,
u32 *addr)
{
for (; rel->lab != label_invalid; rel++) {
@@ -755,11 +755,11 @@ il_bgez(u32 **p, struct reloc **r, unsigned int reg, enum label_id l)
* We deliberately chose a buffer size of 128, so we won't scribble
* over anything important on overflow before we panic.
*/
-static __initdata u32 tlb_handler[128];
+static u32 tlb_handler[128] __initdata;
/* simply assume worst case size for labels and relocs */
-static __initdata struct label labels[128];
-static __initdata struct reloc relocs[128];
+static struct label labels[128] __initdata;
+static struct reloc relocs[128] __initdata;
/*
* The R3000 TLB handler is simple.
@@ -813,7 +813,7 @@ static void __init build_r3000_tlb_refill_handler(void)
* other one.To keep things simple, we first assume linear space,
* then we relocate it to the final handler layout as needed.
*/
-static __initdata u32 final_handler[64];
+static u32 final_handler[64] __initdata;
/*
* Hazards
@@ -837,7 +837,7 @@ static __initdata u32 final_handler[64];
*
* As if we MIPS hackers wouldn't know how to nop pipelines happy ...
*/
-static __init void __maybe_unused build_tlb_probe_entry(u32 **p)
+static void __init __maybe_unused build_tlb_probe_entry(u32 **p)
{
switch (current_cpu_type()) {
/* Found by experiment: R4600 v2.0 needs this, too. */
@@ -861,7 +861,7 @@ static __init void __maybe_unused build_tlb_probe_entry(u32 **p)
*/
enum tlb_write_entry { tlb_random, tlb_indexed };
-static __init void build_tlb_write_entry(u32 **p, struct label **l,
+static void __init build_tlb_write_entry(u32 **p, struct label **l,
struct reloc **r,
enum tlb_write_entry wmode)
{
@@ -1003,7 +1003,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l,
* TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pmd entry.
*/
-static __init void
+static void __init
build_get_pmde64(u32 **p, struct label **l, struct reloc **r,
unsigned int tmp, unsigned int ptr)
{
@@ -1064,7 +1064,7 @@ build_get_pmde64(u32 **p, struct label **l, struct reloc **r,
* BVADDR is the faulting address, PTR is scratch.
* PTR will hold the pgd for vmalloc.
*/
-static __init void
+static void __init
build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r,
unsigned int bvaddr, unsigned int ptr)
{
@@ -1134,7 +1134,7 @@ build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r,
* TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pgd entry.
*/
-static __init void __maybe_unused
+static void __init __maybe_unused
build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
{
long pgdc = (long)pgd_current;
@@ -1169,7 +1169,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
#endif /* !CONFIG_64BIT */
-static __init void build_adjust_context(u32 **p, unsigned int ctx)
+static void __init build_adjust_context(u32 **p, unsigned int ctx)
{
unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
@@ -1195,7 +1195,7 @@ static __init void build_adjust_context(u32 **p, unsigned int ctx)
i_andi(p, ctx, ctx, mask);
}
-static __init void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
+static void __init build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
{
/*
* Bug workaround for the Nevada. It seems as if under certain
@@ -1220,7 +1220,7 @@ static __init void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
i_ADDU(p, ptr, ptr, tmp); /* add in offset */
}
-static __init void build_update_entries(u32 **p, unsigned int tmp,
+static void __init build_update_entries(u32 **p, unsigned int tmp,
unsigned int ptep)
{
/*